arm64.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. ///////////////////////////////////////////////////////////////////////////////
  2. //
  3. /// \file arm64.c
  4. /// \brief Filter for ARM64 binaries
  5. ///
  6. /// This converts ARM64 relative addresses in the BL and ADRP immediates
  7. /// to absolute values to increase redundancy of ARM64 code.
  8. ///
  9. /// Converting B or ADR instructions was also tested but it's not useful.
  10. /// A majority of the jumps for the B instruction are very small (+/- 0xFF).
  11. /// These are typical for loops and if-statements. Encoding them to their
  12. /// absolute address reduces redundancy since many of the small relative
  13. /// jump values are repeated, but very few of the absolute addresses are.
  14. //
  15. // Authors: Lasse Collin
  16. // Jia Tan
  17. //
  18. // This file has been put into the public domain.
  19. // You can do whatever you want with this file.
  20. //
  21. ///////////////////////////////////////////////////////////////////////////////
  22. #include "simple_private.h"
  23. static size_t
  24. arm64_code(void *simple lzma_attribute((__unused__)),
  25. uint32_t now_pos, bool is_encoder,
  26. uint8_t *buffer, size_t size)
  27. {
  28. size_t i;
  29. // Clang 14.0.6 on x86-64 makes this four times bigger and 40 % slower
  30. // with auto-vectorization that is enabled by default with -O2.
  31. // Such vectorization bloat happens with -O2 when targeting ARM64 too
  32. // but performance hasn't been tested.
  33. #ifdef __clang__
  34. # pragma clang loop vectorize(disable)
  35. #endif
  36. for (i = 0; i + 4 <= size; i += 4) {
  37. uint32_t pc = (uint32_t)(now_pos + i);
  38. uint32_t instr = read32le(buffer + i);
  39. if ((instr >> 26) == 0x25) {
  40. // BL instruction:
  41. // The full 26-bit immediate is converted.
  42. // The range is +/-128 MiB.
  43. //
  44. // Using the full range is helps quite a lot with
  45. // big executables. Smaller range would reduce false
  46. // positives in non-code sections of the input though
  47. // so this is a compromise that slightly favors big
  48. // files. With the full range only six bits of the 32
  49. // need to match to trigger a conversion.
  50. const uint32_t src = instr;
  51. instr = 0x94000000;
  52. pc >>= 2;
  53. if (!is_encoder)
  54. pc = 0U - pc;
  55. instr |= (src + pc) & 0x03FFFFFF;
  56. write32le(buffer + i, instr);
  57. } else if ((instr & 0x9F000000) == 0x90000000) {
  58. // ADRP instruction:
  59. // Only values in the range +/-512 MiB are converted.
  60. //
  61. // Using less than the full +/-4 GiB range reduces
  62. // false positives on non-code sections of the input
  63. // while being excellent for executables up to 512 MiB.
  64. // The positive effect of ADRP conversion is smaller
  65. // than that of BL but it also doesn't hurt so much in
  66. // non-code sections of input because, with +/-512 MiB
  67. // range, nine bits of 32 need to match to trigger a
  68. // conversion (two 10-bit match choices = 9 bits).
  69. const uint32_t src = ((instr >> 29) & 3)
  70. | ((instr >> 3) & 0x001FFFFC);
  71. // With the addition only one branch is needed to
  72. // check the +/- range. This is usually false when
  73. // processing ARM64 code so branch prediction will
  74. // handle it well in terms of performance.
  75. //
  76. //if ((src & 0x001E0000) != 0
  77. // && (src & 0x001E0000) != 0x001E0000)
  78. if ((src + 0x00020000) & 0x001C0000)
  79. continue;
  80. instr &= 0x9000001F;
  81. pc >>= 12;
  82. if (!is_encoder)
  83. pc = 0U - pc;
  84. const uint32_t dest = src + pc;
  85. instr |= (dest & 3) << 29;
  86. instr |= (dest & 0x0003FFFC) << 3;
  87. instr |= (0U - (dest & 0x00020000)) & 0x00E00000;
  88. write32le(buffer + i, instr);
  89. }
  90. }
  91. return i;
  92. }
  93. static lzma_ret
  94. arm64_coder_init(lzma_next_coder *next, const lzma_allocator *allocator,
  95. const lzma_filter_info *filters, bool is_encoder)
  96. {
  97. return lzma_simple_coder_init(next, allocator, filters,
  98. &arm64_code, 0, 4, 4, is_encoder);
  99. }
  100. #ifdef HAVE_ENCODER_ARM64
  101. extern lzma_ret
  102. lzma_simple_arm64_encoder_init(lzma_next_coder *next,
  103. const lzma_allocator *allocator,
  104. const lzma_filter_info *filters)
  105. {
  106. return arm64_coder_init(next, allocator, filters, true);
  107. }
  108. #endif
  109. #ifdef HAVE_DECODER_ARM64
  110. extern lzma_ret
  111. lzma_simple_arm64_decoder_init(lzma_next_coder *next,
  112. const lzma_allocator *allocator,
  113. const lzma_filter_info *filters)
  114. {
  115. return arm64_coder_init(next, allocator, filters, false);
  116. }
  117. #endif