sme-libc-mem-routines.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  2. // See https://llvm.org/LICENSE.txt for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. // Routines taken from libc/AOR_v20.02/string/aarch64
  5. #include "../assembly.h"
  6. #ifdef __aarch64__
  7. #define L(l) .L ## l
  8. //
  9. // __arm_sc_memcpy / __arm_sc_memmove
  10. //
  11. #define dstin x0
  12. #define src x1
  13. #define count x2
  14. #define dst x3
  15. #define srcend1 x4
  16. #define dstend1 x5
  17. #define A_l x6
  18. #define A_lw w6
  19. #define A_h x7
  20. #define B_l x8
  21. #define B_lw w8
  22. #define B_h x9
  23. #define C_l x10
  24. #define C_lw w10
  25. #define C_h x11
  26. #define D_l x12
  27. #define D_h x13
  28. #define E_l x14
  29. #define E_h x15
  30. #define F_l x16
  31. #define F_h x17
  32. #define G_l count
  33. #define G_h dst
  34. #define H_l src
  35. #define H_h srcend1
  36. #define tmp1 x14
  37. /* This implementation handles overlaps and supports both memcpy and memmove
  38. from a single entry point. It uses unaligned accesses and branchless
  39. sequences to keep the code small, simple and improve performance.
  40. Copies are split into 3 main cases: small copies of up to 32 bytes, medium
  41. copies of up to 128 bytes, and large copies. The overhead of the overlap
  42. check is negligible since it is only required for large copies.
  43. Large copies use a software pipelined loop processing 64 bytes per iteration.
  44. The destination pointer is 16-byte aligned to minimize unaligned accesses.
  45. The loop tail is handled by always copying 64 bytes from the end.
  46. */
  47. DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(__arm_sc_memcpy)
  48. add srcend1, src, count
  49. add dstend1, dstin, count
  50. cmp count, 128
  51. b.hi L(copy_long)
  52. cmp count, 32
  53. b.hi L(copy32_128)
  54. /* Small copies: 0..32 bytes. */
  55. cmp count, 16
  56. b.lo L(copy16)
  57. ldp A_l, A_h, [src]
  58. ldp D_l, D_h, [srcend1, -16]
  59. stp A_l, A_h, [dstin]
  60. stp D_l, D_h, [dstend1, -16]
  61. ret
  62. /* Copy 8-15 bytes. */
  63. L(copy16):
  64. tbz count, 3, L(copy8)
  65. ldr A_l, [src]
  66. ldr A_h, [srcend1, -8]
  67. str A_l, [dstin]
  68. str A_h, [dstend1, -8]
  69. ret
  70. .p2align 3
  71. /* Copy 4-7 bytes. */
  72. L(copy8):
  73. tbz count, 2, L(copy4)
  74. ldr A_lw, [src]
  75. ldr B_lw, [srcend1, -4]
  76. str A_lw, [dstin]
  77. str B_lw, [dstend1, -4]
  78. ret
  79. /* Copy 0..3 bytes using a branchless sequence. */
  80. L(copy4):
  81. cbz count, L(copy0)
  82. lsr tmp1, count, 1
  83. ldrb A_lw, [src]
  84. ldrb C_lw, [srcend1, -1]
  85. ldrb B_lw, [src, tmp1]
  86. strb A_lw, [dstin]
  87. strb B_lw, [dstin, tmp1]
  88. strb C_lw, [dstend1, -1]
  89. L(copy0):
  90. ret
  91. .p2align 4
  92. /* Medium copies: 33..128 bytes. */
  93. L(copy32_128):
  94. ldp A_l, A_h, [src]
  95. ldp B_l, B_h, [src, 16]
  96. ldp C_l, C_h, [srcend1, -32]
  97. ldp D_l, D_h, [srcend1, -16]
  98. cmp count, 64
  99. b.hi L(copy128)
  100. stp A_l, A_h, [dstin]
  101. stp B_l, B_h, [dstin, 16]
  102. stp C_l, C_h, [dstend1, -32]
  103. stp D_l, D_h, [dstend1, -16]
  104. ret
  105. .p2align 4
  106. /* Copy 65..128 bytes. */
  107. L(copy128):
  108. ldp E_l, E_h, [src, 32]
  109. ldp F_l, F_h, [src, 48]
  110. cmp count, 96
  111. b.ls L(copy96)
  112. ldp G_l, G_h, [srcend1, -64]
  113. ldp H_l, H_h, [srcend1, -48]
  114. stp G_l, G_h, [dstend1, -64]
  115. stp H_l, H_h, [dstend1, -48]
  116. L(copy96):
  117. stp A_l, A_h, [dstin]
  118. stp B_l, B_h, [dstin, 16]
  119. stp E_l, E_h, [dstin, 32]
  120. stp F_l, F_h, [dstin, 48]
  121. stp C_l, C_h, [dstend1, -32]
  122. stp D_l, D_h, [dstend1, -16]
  123. ret
  124. .p2align 4
  125. /* Copy more than 128 bytes. */
  126. L(copy_long):
  127. /* Use backwards copy if there is an overlap. */
  128. sub tmp1, dstin, src
  129. cbz tmp1, L(copy0)
  130. cmp tmp1, count
  131. b.lo L(copy_long_backwards)
  132. /* Copy 16 bytes and then align dst to 16-byte alignment. */
  133. ldp D_l, D_h, [src]
  134. and tmp1, dstin, 15
  135. bic dst, dstin, 15
  136. sub src, src, tmp1
  137. add count, count, tmp1 /* Count is now 16 too large. */
  138. ldp A_l, A_h, [src, 16]
  139. stp D_l, D_h, [dstin]
  140. ldp B_l, B_h, [src, 32]
  141. ldp C_l, C_h, [src, 48]
  142. ldp D_l, D_h, [src, 64]!
  143. subs count, count, 128 + 16 /* Test and readjust count. */
  144. b.ls L(copy64_from_end)
  145. L(loop64):
  146. stp A_l, A_h, [dst, 16]
  147. ldp A_l, A_h, [src, 16]
  148. stp B_l, B_h, [dst, 32]
  149. ldp B_l, B_h, [src, 32]
  150. stp C_l, C_h, [dst, 48]
  151. ldp C_l, C_h, [src, 48]
  152. stp D_l, D_h, [dst, 64]!
  153. ldp D_l, D_h, [src, 64]!
  154. subs count, count, 64
  155. b.hi L(loop64)
  156. /* Write the last iteration and copy 64 bytes from the end. */
  157. L(copy64_from_end):
  158. ldp E_l, E_h, [srcend1, -64]
  159. stp A_l, A_h, [dst, 16]
  160. ldp A_l, A_h, [srcend1, -48]
  161. stp B_l, B_h, [dst, 32]
  162. ldp B_l, B_h, [srcend1, -32]
  163. stp C_l, C_h, [dst, 48]
  164. ldp C_l, C_h, [srcend1, -16]
  165. stp D_l, D_h, [dst, 64]
  166. stp E_l, E_h, [dstend1, -64]
  167. stp A_l, A_h, [dstend1, -48]
  168. stp B_l, B_h, [dstend1, -32]
  169. stp C_l, C_h, [dstend1, -16]
  170. ret
  171. .p2align 4
  172. /* Large backwards copy for overlapping copies.
  173. Copy 16 bytes and then align dst to 16-byte alignment. */
  174. L(copy_long_backwards):
  175. ldp D_l, D_h, [srcend1, -16]
  176. and tmp1, dstend1, 15
  177. sub srcend1, srcend1, tmp1
  178. sub count, count, tmp1
  179. ldp A_l, A_h, [srcend1, -16]
  180. stp D_l, D_h, [dstend1, -16]
  181. ldp B_l, B_h, [srcend1, -32]
  182. ldp C_l, C_h, [srcend1, -48]
  183. ldp D_l, D_h, [srcend1, -64]!
  184. sub dstend1, dstend1, tmp1
  185. subs count, count, 128
  186. b.ls L(copy64_from_start)
  187. L(loop64_backwards):
  188. stp A_l, A_h, [dstend1, -16]
  189. ldp A_l, A_h, [srcend1, -16]
  190. stp B_l, B_h, [dstend1, -32]
  191. ldp B_l, B_h, [srcend1, -32]
  192. stp C_l, C_h, [dstend1, -48]
  193. ldp C_l, C_h, [srcend1, -48]
  194. stp D_l, D_h, [dstend1, -64]!
  195. ldp D_l, D_h, [srcend1, -64]!
  196. subs count, count, 64
  197. b.hi L(loop64_backwards)
  198. /* Write the last iteration and copy 64 bytes from the start. */
  199. L(copy64_from_start):
  200. ldp G_l, G_h, [src, 48]
  201. stp A_l, A_h, [dstend1, -16]
  202. ldp A_l, A_h, [src, 32]
  203. stp B_l, B_h, [dstend1, -32]
  204. ldp B_l, B_h, [src, 16]
  205. stp C_l, C_h, [dstend1, -48]
  206. ldp C_l, C_h, [src]
  207. stp D_l, D_h, [dstend1, -64]
  208. stp G_l, G_h, [dstin, 48]
  209. stp A_l, A_h, [dstin, 32]
  210. stp B_l, B_h, [dstin, 16]
  211. stp C_l, C_h, [dstin]
  212. ret
  213. END_COMPILERRT_OUTLINE_FUNCTION(__arm_sc_memcpy)
  214. DEFINE_COMPILERRT_FUNCTION_ALIAS(__arm_sc_memmove, __arm_sc_memcpy)
  215. //
  216. // __arm_sc_memset
  217. //
  218. #define dstin x0
  219. #define val x1
  220. #define valw w1
  221. #define count x2
  222. #define dst x3
  223. #define dstend2 x4
  224. #define zva_val x5
  225. DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(__arm_sc_memset)
  226. #ifdef __ARM_FEATURE_SVE
  227. mov z0.b, valw
  228. #else
  229. bfi valw, valw, #8, #8
  230. bfi valw, valw, #16, #16
  231. bfi val, val, #32, #32
  232. fmov d0, val
  233. fmov v0.d[1], val
  234. #endif
  235. add dstend2, dstin, count
  236. cmp count, 96
  237. b.hi L(set_long)
  238. cmp count, 16
  239. b.hs L(set_medium)
  240. mov val, v0.D[0]
  241. /* Set 0..15 bytes. */
  242. tbz count, 3, 1f
  243. str val, [dstin]
  244. str val, [dstend2, -8]
  245. ret
  246. nop
  247. 1: tbz count, 2, 2f
  248. str valw, [dstin]
  249. str valw, [dstend2, -4]
  250. ret
  251. 2: cbz count, 3f
  252. strb valw, [dstin]
  253. tbz count, 1, 3f
  254. strh valw, [dstend2, -2]
  255. 3: ret
  256. /* Set 17..96 bytes. */
  257. L(set_medium):
  258. str q0, [dstin]
  259. tbnz count, 6, L(set96)
  260. str q0, [dstend2, -16]
  261. tbz count, 5, 1f
  262. str q0, [dstin, 16]
  263. str q0, [dstend2, -32]
  264. 1: ret
  265. .p2align 4
  266. /* Set 64..96 bytes. Write 64 bytes from the start and
  267. 32 bytes from the end. */
  268. L(set96):
  269. str q0, [dstin, 16]
  270. stp q0, q0, [dstin, 32]
  271. stp q0, q0, [dstend2, -32]
  272. ret
  273. .p2align 4
  274. L(set_long):
  275. and valw, valw, 255
  276. bic dst, dstin, 15
  277. str q0, [dstin]
  278. cmp count, 160
  279. ccmp valw, 0, 0, hs
  280. b.ne L(no_zva)
  281. #ifndef SKIP_ZVA_CHECK
  282. mrs zva_val, dczid_el0
  283. and zva_val, zva_val, 31
  284. cmp zva_val, 4 /* ZVA size is 64 bytes. */
  285. b.ne L(no_zva)
  286. #endif
  287. str q0, [dst, 16]
  288. stp q0, q0, [dst, 32]
  289. bic dst, dst, 63
  290. sub count, dstend2, dst /* Count is now 64 too large. */
  291. sub count, count, 128 /* Adjust count and bias for loop. */
  292. .p2align 4
  293. L(zva_loop):
  294. add dst, dst, 64
  295. dc zva, dst
  296. subs count, count, 64
  297. b.hi L(zva_loop)
  298. stp q0, q0, [dstend2, -64]
  299. stp q0, q0, [dstend2, -32]
  300. ret
  301. L(no_zva):
  302. sub count, dstend2, dst /* Count is 16 too large. */
  303. sub dst, dst, 16 /* Dst is biased by -32. */
  304. sub count, count, 64 + 16 /* Adjust count and bias for loop. */
  305. L(no_zva_loop):
  306. stp q0, q0, [dst, 32]
  307. stp q0, q0, [dst, 64]!
  308. subs count, count, 64
  309. b.hi L(no_zva_loop)
  310. stp q0, q0, [dstend2, -64]
  311. stp q0, q0, [dstend2, -32]
  312. ret
  313. END_COMPILERRT_OUTLINE_FUNCTION(__arm_sc_memset)
  314. #endif // __aarch64__