sanitizer_syscall_linux_riscv64.inc 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. //===-- sanitizer_syscall_linux_riscv64.inc ---------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Implementations of internal_syscall and internal_iserror for Linux/riscv64.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. // About local register variables:
  13. // https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
  14. //
  15. // Kernel ABI...
  16. // To my surprise I haven't found much information regarding it.
  17. // Kernel source and internet browsing shows that:
  18. // syscall number is passed in a7
  19. // (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in
  20. // a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments
  21. // are passed in: a0-a7 (see below)
  22. //
  23. // Regarding the arguments. The only "documentation" I could find is
  24. // this comment (!!!) by Bruce Hold on google forums (!!!):
  25. // https://groups.google.com/a/groups.riscv.org/forum/#!topic/sw-dev/exbrzM3GZDQ
  26. // Confirmed by inspecting glibc sources.
  27. // Great way to document things.
  28. #define SYSCALL(name) __NR_##name
  29. #define INTERNAL_SYSCALL_CLOBBERS "memory"
  30. static uptr __internal_syscall(u64 nr) {
  31. register u64 a7 asm("a7") = nr;
  32. register u64 a0 asm("a0");
  33. __asm__ volatile("ecall\n\t"
  34. : "=r"(a0)
  35. : "r"(a7)
  36. : INTERNAL_SYSCALL_CLOBBERS);
  37. return a0;
  38. }
  39. #define __internal_syscall0(n) (__internal_syscall)(n)
  40. static uptr __internal_syscall(u64 nr, u64 arg1) {
  41. register u64 a7 asm("a7") = nr;
  42. register u64 a0 asm("a0") = arg1;
  43. __asm__ volatile("ecall\n\t"
  44. : "+r"(a0)
  45. : "r"(a7)
  46. : INTERNAL_SYSCALL_CLOBBERS);
  47. return a0;
  48. }
  49. #define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
  50. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
  51. register u64 a7 asm("a7") = nr;
  52. register u64 a0 asm("a0") = arg1;
  53. register u64 a1 asm("a1") = arg2;
  54. __asm__ volatile("ecall\n\t"
  55. : "+r"(a0)
  56. : "r"(a7), "r"(a1)
  57. : INTERNAL_SYSCALL_CLOBBERS);
  58. return a0;
  59. }
  60. #define __internal_syscall2(n, a1, a2) \
  61. (__internal_syscall)(n, (u64)(a1), (long)(a2))
  62. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
  63. register u64 a7 asm("a7") = nr;
  64. register u64 a0 asm("a0") = arg1;
  65. register u64 a1 asm("a1") = arg2;
  66. register u64 a2 asm("a2") = arg3;
  67. __asm__ volatile("ecall\n\t"
  68. : "+r"(a0)
  69. : "r"(a7), "r"(a1), "r"(a2)
  70. : INTERNAL_SYSCALL_CLOBBERS);
  71. return a0;
  72. }
  73. #define __internal_syscall3(n, a1, a2, a3) \
  74. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
  75. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
  76. u64 arg4) {
  77. register u64 a7 asm("a7") = nr;
  78. register u64 a0 asm("a0") = arg1;
  79. register u64 a1 asm("a1") = arg2;
  80. register u64 a2 asm("a2") = arg3;
  81. register u64 a3 asm("a3") = arg4;
  82. __asm__ volatile("ecall\n\t"
  83. : "+r"(a0)
  84. : "r"(a7), "r"(a1), "r"(a2), "r"(a3)
  85. : INTERNAL_SYSCALL_CLOBBERS);
  86. return a0;
  87. }
  88. #define __internal_syscall4(n, a1, a2, a3, a4) \
  89. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
  90. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
  91. long arg5) {
  92. register u64 a7 asm("a7") = nr;
  93. register u64 a0 asm("a0") = arg1;
  94. register u64 a1 asm("a1") = arg2;
  95. register u64 a2 asm("a2") = arg3;
  96. register u64 a3 asm("a3") = arg4;
  97. register u64 a4 asm("a4") = arg5;
  98. __asm__ volatile("ecall\n\t"
  99. : "+r"(a0)
  100. : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
  101. : INTERNAL_SYSCALL_CLOBBERS);
  102. return a0;
  103. }
  104. #define __internal_syscall5(n, a1, a2, a3, a4, a5) \
  105. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
  106. (u64)(a5))
  107. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
  108. long arg5, long arg6) {
  109. register u64 a7 asm("a7") = nr;
  110. register u64 a0 asm("a0") = arg1;
  111. register u64 a1 asm("a1") = arg2;
  112. register u64 a2 asm("a2") = arg3;
  113. register u64 a3 asm("a3") = arg4;
  114. register u64 a4 asm("a4") = arg5;
  115. register u64 a5 asm("a5") = arg6;
  116. __asm__ volatile("ecall\n\t"
  117. : "+r"(a0)
  118. : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
  119. : INTERNAL_SYSCALL_CLOBBERS);
  120. return a0;
  121. }
  122. #define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
  123. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
  124. (u64)(a5), (long)(a6))
  125. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
  126. long arg5, long arg6, long arg7) {
  127. register u64 a7 asm("a7") = nr;
  128. register u64 a0 asm("a0") = arg1;
  129. register u64 a1 asm("a1") = arg2;
  130. register u64 a2 asm("a2") = arg3;
  131. register u64 a3 asm("a3") = arg4;
  132. register u64 a4 asm("a4") = arg5;
  133. register u64 a5 asm("a5") = arg6;
  134. register u64 a6 asm("a6") = arg7;
  135. __asm__ volatile("ecall\n\t"
  136. : "+r"(a0)
  137. : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
  138. "r"(a6)
  139. : INTERNAL_SYSCALL_CLOBBERS);
  140. return a0;
  141. }
  142. #define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \
  143. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
  144. (u64)(a5), (long)(a6), (long)(a7))
  145. #define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
  146. #define __SYSCALL_NARGS(...) \
  147. __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
  148. #define __SYSCALL_CONCAT_X(a, b) a##b
  149. #define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
  150. #define __SYSCALL_DISP(b, ...) \
  151. __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
  152. #define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
  153. // Helper function used to avoid clobbering of errno.
  154. bool internal_iserror(uptr retval, int *rverrno) {
  155. if (retval >= (uptr)-4095) {
  156. if (rverrno)
  157. *rverrno = -retval;
  158. return true;
  159. }
  160. return false;
  161. }