udivsi3.S 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. //===-- udivsi3.S - 32-bit unsigned integer divide ------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the __udivsi3 (32-bit unsigned integer divide)
  10. // function for the ARM 32-bit architecture.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "../assembly.h"
  14. .syntax unified
  15. .text
  16. DEFINE_CODE_STATE
  17. .p2align 2
  18. DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_uidiv, __udivsi3)
  19. @ unsigned int __udivsi3(unsigned int divident, unsigned int divisor)
  20. @ Calculate and return the quotient of the (unsigned) division.
  21. DEFINE_COMPILERRT_FUNCTION(__udivsi3)
  22. #if __ARM_ARCH_EXT_IDIV__
  23. tst r1, r1
  24. beq LOCAL_LABEL(divby0)
  25. udiv r0, r0, r1
  26. bx lr
  27. LOCAL_LABEL(divby0):
  28. // Use movs for compatibility with v8-m.base.
  29. movs r0, #0
  30. # ifdef __ARM_EABI__
  31. b __aeabi_idiv0
  32. # else
  33. JMP(lr)
  34. # endif
  35. #else // ! __ARM_ARCH_EXT_IDIV__
  36. cmp r1, #1
  37. bcc LOCAL_LABEL(divby0)
  38. #if defined(USE_THUMB_1)
  39. bne LOCAL_LABEL(num_neq_denom)
  40. JMP(lr)
  41. LOCAL_LABEL(num_neq_denom):
  42. #else
  43. IT(eq)
  44. JMPc(lr, eq)
  45. #endif
  46. cmp r0, r1
  47. #if defined(USE_THUMB_1)
  48. bhs LOCAL_LABEL(num_ge_denom)
  49. movs r0, #0
  50. JMP(lr)
  51. LOCAL_LABEL(num_ge_denom):
  52. #else
  53. ITT(cc)
  54. movcc r0, #0
  55. JMPc(lr, cc)
  56. #endif
  57. // Implement division using binary long division algorithm.
  58. //
  59. // r0 is the numerator, r1 the denominator.
  60. //
  61. // The code before JMP computes the correct shift I, so that
  62. // r0 and (r1 << I) have the highest bit set in the same position.
  63. // At the time of JMP, ip := .Ldiv0block - 12 * I.
  64. // This depends on the fixed instruction size of block.
  65. // For ARM mode, this is 12 Bytes, for THUMB mode 14 Bytes.
  66. //
  67. // block(shift) implements the test-and-update-quotient core.
  68. // It assumes (r0 << shift) can be computed without overflow and
  69. // that (r0 << shift) < 2 * r1. The quotient is stored in r3.
  70. # if defined(__ARM_FEATURE_CLZ)
  71. clz ip, r0
  72. clz r3, r1
  73. // r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3.
  74. sub r3, r3, ip
  75. # if defined(USE_THUMB_2)
  76. adr ip, LOCAL_LABEL(div0block) + 1
  77. sub ip, ip, r3, lsl #1
  78. # else
  79. adr ip, LOCAL_LABEL(div0block)
  80. # endif
  81. sub ip, ip, r3, lsl #2
  82. sub ip, ip, r3, lsl #3
  83. mov r3, #0
  84. bx ip
  85. # else // No CLZ Feature
  86. # if defined(USE_THUMB_2)
  87. # error THUMB mode requires CLZ or UDIV
  88. # endif
  89. # if defined(USE_THUMB_1)
  90. # define BLOCK_SIZE 10
  91. # else
  92. # define BLOCK_SIZE 12
  93. # endif
  94. mov r2, r0
  95. # if defined(USE_THUMB_1)
  96. mov ip, r0
  97. adr r0, LOCAL_LABEL(div0block)
  98. adds r0, #1
  99. # else
  100. adr ip, LOCAL_LABEL(div0block)
  101. # endif
  102. lsrs r3, r2, #16
  103. cmp r3, r1
  104. # if defined(USE_THUMB_1)
  105. blo LOCAL_LABEL(skip_16)
  106. movs r2, r3
  107. subs r0, r0, #(16 * BLOCK_SIZE)
  108. LOCAL_LABEL(skip_16):
  109. # else
  110. movhs r2, r3
  111. subhs ip, ip, #(16 * BLOCK_SIZE)
  112. # endif
  113. lsrs r3, r2, #8
  114. cmp r3, r1
  115. # if defined(USE_THUMB_1)
  116. blo LOCAL_LABEL(skip_8)
  117. movs r2, r3
  118. subs r0, r0, #(8 * BLOCK_SIZE)
  119. LOCAL_LABEL(skip_8):
  120. # else
  121. movhs r2, r3
  122. subhs ip, ip, #(8 * BLOCK_SIZE)
  123. # endif
  124. lsrs r3, r2, #4
  125. cmp r3, r1
  126. # if defined(USE_THUMB_1)
  127. blo LOCAL_LABEL(skip_4)
  128. movs r2, r3
  129. subs r0, r0, #(4 * BLOCK_SIZE)
  130. LOCAL_LABEL(skip_4):
  131. # else
  132. movhs r2, r3
  133. subhs ip, #(4 * BLOCK_SIZE)
  134. # endif
  135. lsrs r3, r2, #2
  136. cmp r3, r1
  137. # if defined(USE_THUMB_1)
  138. blo LOCAL_LABEL(skip_2)
  139. movs r2, r3
  140. subs r0, r0, #(2 * BLOCK_SIZE)
  141. LOCAL_LABEL(skip_2):
  142. # else
  143. movhs r2, r3
  144. subhs ip, ip, #(2 * BLOCK_SIZE)
  145. # endif
  146. // Last block, no need to update r2 or r3.
  147. # if defined(USE_THUMB_1)
  148. lsrs r3, r2, #1
  149. cmp r3, r1
  150. blo LOCAL_LABEL(skip_1)
  151. subs r0, r0, #(1 * BLOCK_SIZE)
  152. LOCAL_LABEL(skip_1):
  153. movs r2, r0
  154. mov r0, ip
  155. movs r3, #0
  156. JMP (r2)
  157. # else
  158. cmp r1, r2, lsr #1
  159. subls ip, ip, #(1 * BLOCK_SIZE)
  160. movs r3, #0
  161. JMP(ip)
  162. # endif
  163. # endif // __ARM_FEATURE_CLZ
  164. #define IMM #
  165. // due to the range limit of branch in Thumb1, we have to place the
  166. // block closer
  167. LOCAL_LABEL(divby0):
  168. movs r0, #0
  169. # if defined(__ARM_EABI__)
  170. push {r7, lr}
  171. bl __aeabi_idiv0 // due to relocation limit, can't use b.
  172. pop {r7, pc}
  173. # else
  174. JMP(lr)
  175. # endif
  176. #if defined(USE_THUMB_1)
  177. #define block(shift) \
  178. lsls r2, r1, IMM shift; \
  179. cmp r0, r2; \
  180. blo LOCAL_LABEL(block_skip_##shift); \
  181. subs r0, r0, r2; \
  182. LOCAL_LABEL(block_skip_##shift) :; \
  183. adcs r3, r3 // same as ((r3 << 1) | Carry). Carry is set if r0 >= r2.
  184. // TODO: if current location counter is not word aligned, we don't
  185. // need the .p2align and nop
  186. // Label div0block must be word-aligned. First align block 31
  187. .p2align 2
  188. nop // Padding to align div0block as 31 blocks = 310 bytes
  189. #else
  190. #define block(shift) \
  191. cmp r0, r1, lsl IMM shift; \
  192. ITT(hs); \
  193. WIDE(addhs) r3, r3, IMM (1 << shift); \
  194. WIDE(subhs) r0, r0, r1, lsl IMM shift
  195. #endif
  196. block(31)
  197. block(30)
  198. block(29)
  199. block(28)
  200. block(27)
  201. block(26)
  202. block(25)
  203. block(24)
  204. block(23)
  205. block(22)
  206. block(21)
  207. block(20)
  208. block(19)
  209. block(18)
  210. block(17)
  211. block(16)
  212. block(15)
  213. block(14)
  214. block(13)
  215. block(12)
  216. block(11)
  217. block(10)
  218. block(9)
  219. block(8)
  220. block(7)
  221. block(6)
  222. block(5)
  223. block(4)
  224. block(3)
  225. block(2)
  226. block(1)
  227. LOCAL_LABEL(div0block):
  228. block(0)
  229. mov r0, r3
  230. JMP(lr)
  231. #endif // __ARM_ARCH_EXT_IDIV__
  232. END_COMPILERRT_FUNCTION(__udivsi3)
  233. NO_EXEC_STACK_DIRECTIVE