sanitizer_syscall_linux_loongarch64.inc 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. //===-- sanitizer_syscall_linux_loongarch64.inc -----------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Implementations of internal_syscall and internal_iserror for
  10. // Linux/loongarch64.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. // About local register variables:
  14. // https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
  15. //
  16. // Kernel ABI:
  17. // https://lore.kernel.org/loongarch/1f353678-3398-e30b-1c87-6edb278f74db@xen0n.name/T/#m1613bc86c2d7bf5f6da92bd62984302bfd699a2f
  18. // syscall number is placed in a7
  19. // parameters, if present, are placed in a0-a6
  20. // upon return:
  21. // the return value is placed in a0
  22. // t0-t8 should be considered clobbered
  23. // all other registers are preserved
  24. #define SYSCALL(name) __NR_##name
  25. #define INTERNAL_SYSCALL_CLOBBERS \
  26. "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8"
  27. static uptr __internal_syscall(u64 nr) {
  28. register u64 a7 asm("$a7") = nr;
  29. register u64 a0 asm("$a0");
  30. __asm__ volatile("syscall 0\n\t"
  31. : "=r"(a0)
  32. : "r"(a7)
  33. : INTERNAL_SYSCALL_CLOBBERS);
  34. return a0;
  35. }
  36. #define __internal_syscall0(n) (__internal_syscall)(n)
  37. static uptr __internal_syscall(u64 nr, u64 arg1) {
  38. register u64 a7 asm("$a7") = nr;
  39. register u64 a0 asm("$a0") = arg1;
  40. __asm__ volatile("syscall 0\n\t"
  41. : "+r"(a0)
  42. : "r"(a7)
  43. : INTERNAL_SYSCALL_CLOBBERS);
  44. return a0;
  45. }
  46. #define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
  47. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
  48. register u64 a7 asm("$a7") = nr;
  49. register u64 a0 asm("$a0") = arg1;
  50. register u64 a1 asm("$a1") = arg2;
  51. __asm__ volatile("syscall 0\n\t"
  52. : "+r"(a0)
  53. : "r"(a7), "r"(a1)
  54. : INTERNAL_SYSCALL_CLOBBERS);
  55. return a0;
  56. }
  57. #define __internal_syscall2(n, a1, a2) \
  58. (__internal_syscall)(n, (u64)(a1), (long)(a2))
  59. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
  60. register u64 a7 asm("$a7") = nr;
  61. register u64 a0 asm("$a0") = arg1;
  62. register u64 a1 asm("$a1") = arg2;
  63. register u64 a2 asm("$a2") = arg3;
  64. __asm__ volatile("syscall 0\n\t"
  65. : "+r"(a0)
  66. : "r"(a7), "r"(a1), "r"(a2)
  67. : INTERNAL_SYSCALL_CLOBBERS);
  68. return a0;
  69. }
  70. #define __internal_syscall3(n, a1, a2, a3) \
  71. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
  72. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
  73. u64 arg4) {
  74. register u64 a7 asm("$a7") = nr;
  75. register u64 a0 asm("$a0") = arg1;
  76. register u64 a1 asm("$a1") = arg2;
  77. register u64 a2 asm("$a2") = arg3;
  78. register u64 a3 asm("$a3") = arg4;
  79. __asm__ volatile("syscall 0\n\t"
  80. : "+r"(a0)
  81. : "r"(a7), "r"(a1), "r"(a2), "r"(a3)
  82. : INTERNAL_SYSCALL_CLOBBERS);
  83. return a0;
  84. }
  85. #define __internal_syscall4(n, a1, a2, a3, a4) \
  86. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
  87. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
  88. long arg5) {
  89. register u64 a7 asm("$a7") = nr;
  90. register u64 a0 asm("$a0") = arg1;
  91. register u64 a1 asm("$a1") = arg2;
  92. register u64 a2 asm("$a2") = arg3;
  93. register u64 a3 asm("$a3") = arg4;
  94. register u64 a4 asm("$a4") = arg5;
  95. __asm__ volatile("syscall 0\n\t"
  96. : "+r"(a0)
  97. : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
  98. : INTERNAL_SYSCALL_CLOBBERS);
  99. return a0;
  100. }
  101. #define __internal_syscall5(n, a1, a2, a3, a4, a5) \
  102. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
  103. (u64)(a5))
  104. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
  105. long arg5, long arg6) {
  106. register u64 a7 asm("$a7") = nr;
  107. register u64 a0 asm("$a0") = arg1;
  108. register u64 a1 asm("$a1") = arg2;
  109. register u64 a2 asm("$a2") = arg3;
  110. register u64 a3 asm("$a3") = arg4;
  111. register u64 a4 asm("$a4") = arg5;
  112. register u64 a5 asm("$a5") = arg6;
  113. __asm__ volatile("syscall 0\n\t"
  114. : "+r"(a0)
  115. : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
  116. : INTERNAL_SYSCALL_CLOBBERS);
  117. return a0;
  118. }
  119. #define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
  120. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
  121. (u64)(a5), (long)(a6))
  122. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
  123. long arg5, long arg6, long arg7) {
  124. register u64 a7 asm("$a7") = nr;
  125. register u64 a0 asm("$a0") = arg1;
  126. register u64 a1 asm("$a1") = arg2;
  127. register u64 a2 asm("$a2") = arg3;
  128. register u64 a3 asm("$a3") = arg4;
  129. register u64 a4 asm("$a4") = arg5;
  130. register u64 a5 asm("$a5") = arg6;
  131. register u64 a6 asm("$a6") = arg7;
  132. __asm__ volatile("syscall 0\n\t"
  133. : "+r"(a0)
  134. : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
  135. "r"(a6)
  136. : INTERNAL_SYSCALL_CLOBBERS);
  137. return a0;
  138. }
  139. #define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \
  140. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
  141. (u64)(a5), (long)(a6), (long)(a7))
  142. #define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
  143. #define __SYSCALL_NARGS(...) \
  144. __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
  145. #define __SYSCALL_CONCAT_X(a, b) a##b
  146. #define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
  147. #define __SYSCALL_DISP(b, ...) \
  148. __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
  149. #define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
  150. // Helper function used to avoid clobbering of errno.
  151. bool internal_iserror(uptr retval, int *internal_errno) {
  152. if (retval >= (uptr)-4095) {
  153. if (internal_errno)
  154. *internal_errno = -retval;
  155. return true;
  156. }
  157. return false;
  158. }