defer-taskrun.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. #include "../config-host.h"
  2. // SPDX-License-Identifier: MIT
  3. #include <errno.h>
  4. #include <stdio.h>
  5. #include <unistd.h>
  6. #include <stdlib.h>
  7. #include <string.h>
  8. #include <sys/eventfd.h>
  9. #include <signal.h>
  10. #include <poll.h>
  11. #include <assert.h>
  12. #include <pthread.h>
  13. #include <sys/types.h>
  14. #include <sys/wait.h>
  15. #include "liburing.h"
  16. #include "test.h"
  17. #include "helpers.h"
  18. #define EXEC_FILENAME ".defer-taskrun"
  19. #define EXEC_FILESIZE (1U<<20)
  20. static bool can_read_t(int fd, int time)
  21. {
  22. int ret;
  23. struct pollfd p = {
  24. .fd = fd,
  25. .events = POLLIN,
  26. };
  27. ret = poll(&p, 1, time);
  28. return ret == 1;
  29. }
  30. static bool can_read(int fd)
  31. {
  32. return can_read_t(fd, 0);
  33. }
  34. static void eventfd_clear(int fd)
  35. {
  36. uint64_t val;
  37. int ret;
  38. assert(can_read(fd));
  39. ret = read(fd, &val, 8);
  40. assert(ret == 8);
  41. }
  42. static void eventfd_trigger(int fd)
  43. {
  44. uint64_t val = 1;
  45. int ret;
  46. ret = write(fd, &val, sizeof(val));
  47. assert(ret == sizeof(val));
  48. }
  49. #define CHECK(x) \
  50. do { \
  51. if (!(x)) { \
  52. fprintf(stderr, "%s:%d %s failed\n", __FILE__, __LINE__, #x); \
  53. return -1; \
  54. } \
  55. } while (0)
  56. static int test_eventfd(void)
  57. {
  58. struct io_uring ring;
  59. int ret;
  60. int fda, fdb;
  61. struct io_uring_cqe *cqe;
  62. ret = io_uring_queue_init(8, &ring, IORING_SETUP_SINGLE_ISSUER |
  63. IORING_SETUP_DEFER_TASKRUN);
  64. if (ret)
  65. return ret;
  66. fda = eventfd(0, EFD_NONBLOCK);
  67. fdb = eventfd(0, EFD_NONBLOCK);
  68. CHECK(fda >= 0 && fdb >= 0);
  69. ret = io_uring_register_eventfd(&ring, fda);
  70. if (ret)
  71. return ret;
  72. CHECK(!can_read(fda));
  73. CHECK(!can_read(fdb));
  74. io_uring_prep_poll_add(io_uring_get_sqe(&ring), fdb, POLLIN);
  75. io_uring_submit(&ring);
  76. CHECK(!can_read(fda)); /* poll should not have completed */
  77. io_uring_prep_nop(io_uring_get_sqe(&ring));
  78. io_uring_submit(&ring);
  79. CHECK(can_read(fda)); /* nop should have */
  80. CHECK(io_uring_peek_cqe(&ring, &cqe) == 0);
  81. CHECK(cqe->res == 0);
  82. io_uring_cqe_seen(&ring, cqe);
  83. eventfd_clear(fda);
  84. eventfd_trigger(fdb);
  85. /* can take time due to rcu_call */
  86. CHECK(can_read_t(fda, 1000));
  87. /* should not have processed the cqe yet */
  88. CHECK(io_uring_cq_ready(&ring) == 0);
  89. io_uring_get_events(&ring);
  90. CHECK(io_uring_cq_ready(&ring) == 1);
  91. io_uring_queue_exit(&ring);
  92. return 0;
  93. }
  94. struct thread_data {
  95. struct io_uring ring;
  96. int efd;
  97. char buff[8];
  98. };
  99. static void *thread(void *t)
  100. {
  101. struct thread_data *td = t;
  102. io_uring_enable_rings(&td->ring);
  103. io_uring_prep_read(io_uring_get_sqe(&td->ring), td->efd, td->buff, sizeof(td->buff), 0);
  104. io_uring_submit(&td->ring);
  105. return NULL;
  106. }
  107. static int test_thread_shutdown(void)
  108. {
  109. pthread_t t1;
  110. int ret;
  111. struct thread_data td;
  112. struct io_uring_cqe *cqe;
  113. uint64_t val = 1;
  114. ret = io_uring_queue_init(8, &td.ring, IORING_SETUP_SINGLE_ISSUER |
  115. IORING_SETUP_DEFER_TASKRUN |
  116. IORING_SETUP_R_DISABLED);
  117. if (ret)
  118. return ret;
  119. CHECK(io_uring_get_events(&td.ring) == -EBADFD);
  120. td.efd = eventfd(0, 0);
  121. CHECK(td.efd >= 0);
  122. CHECK(pthread_create(&t1, NULL, thread, &td) == 0);
  123. CHECK(pthread_join(t1, NULL) == 0);
  124. CHECK(io_uring_get_events(&td.ring) == -EEXIST);
  125. CHECK(write(td.efd, &val, sizeof(val)) == sizeof(val));
  126. CHECK(io_uring_wait_cqe(&td.ring, &cqe) == -EEXIST);
  127. close(td.efd);
  128. io_uring_queue_exit(&td.ring);
  129. return 0;
  130. }
  131. static int test_exec(const char *filename)
  132. {
  133. int ret;
  134. int fd;
  135. struct io_uring ring;
  136. pid_t fork_pid;
  137. static char * const new_argv[] = {"1", "2", "3", NULL};
  138. static char * const new_env[] = {NULL};
  139. char *buff;
  140. fork_pid = fork();
  141. CHECK(fork_pid >= 0);
  142. if (fork_pid > 0) {
  143. int wstatus;
  144. CHECK(waitpid(fork_pid, &wstatus, 0) != (pid_t)-1);
  145. if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus) == T_EXIT_FAIL) {
  146. fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
  147. return -1;
  148. }
  149. return T_EXIT_PASS;
  150. }
  151. ret = io_uring_queue_init(8, &ring, IORING_SETUP_SINGLE_ISSUER |
  152. IORING_SETUP_DEFER_TASKRUN);
  153. if (ret)
  154. return ret;
  155. if (filename) {
  156. fd = open(filename, O_RDONLY | O_DIRECT);
  157. if (fd < 0 && errno == EINVAL)
  158. return T_EXIT_SKIP;
  159. } else {
  160. t_create_file(EXEC_FILENAME, EXEC_FILESIZE);
  161. fd = open(EXEC_FILENAME, O_RDONLY | O_DIRECT);
  162. if (fd < 0 && errno == EINVAL) {
  163. unlink(EXEC_FILENAME);
  164. return T_EXIT_SKIP;
  165. }
  166. unlink(EXEC_FILENAME);
  167. }
  168. buff = (char*)malloc(EXEC_FILESIZE);
  169. CHECK(posix_memalign((void **)&buff, 4096, EXEC_FILESIZE) == 0);
  170. CHECK(buff);
  171. CHECK(fd >= 0);
  172. io_uring_prep_read(io_uring_get_sqe(&ring), fd, buff, EXEC_FILESIZE, 0);
  173. io_uring_submit(&ring);
  174. ret = execve("/proc/self/exe", new_argv, new_env);
  175. /* if we get here it failed anyway */
  176. fprintf(stderr, "execve failed %d\n", ret);
  177. return T_EXIT_FAIL;
  178. }
  179. static int test_flag(void)
  180. {
  181. struct io_uring ring;
  182. int ret;
  183. int fd;
  184. struct io_uring_cqe *cqe;
  185. ret = io_uring_queue_init(8, &ring, IORING_SETUP_SINGLE_ISSUER |
  186. IORING_SETUP_DEFER_TASKRUN |
  187. IORING_SETUP_TASKRUN_FLAG);
  188. CHECK(!ret);
  189. fd = eventfd(0, EFD_NONBLOCK);
  190. CHECK(fd >= 0);
  191. io_uring_prep_poll_add(io_uring_get_sqe(&ring), fd, POLLIN);
  192. io_uring_submit(&ring);
  193. CHECK(!can_read(fd)); /* poll should not have completed */
  194. eventfd_trigger(fd);
  195. CHECK(can_read(fd));
  196. /* should not have processed the poll cqe yet */
  197. CHECK(io_uring_cq_ready(&ring) == 0);
  198. /* flag should be set */
  199. CHECK(IO_URING_READ_ONCE(*ring.sq.kflags) & IORING_SQ_TASKRUN);
  200. /* Specifically peek, knowing we have only no cqe
  201. * but because the flag is set, liburing should try and get more
  202. */
  203. ret = io_uring_peek_cqe(&ring, &cqe);
  204. CHECK(ret == 0 && cqe);
  205. CHECK(!(IO_URING_READ_ONCE(*ring.sq.kflags) & IORING_SQ_TASKRUN));
  206. close(fd);
  207. io_uring_queue_exit(&ring);
  208. return 0;
  209. }
  210. static int test_ring_shutdown(void)
  211. {
  212. struct io_uring ring;
  213. int ret;
  214. int fd[2];
  215. char buff = '\0';
  216. char send = 'X';
  217. ret = io_uring_queue_init(8, &ring, IORING_SETUP_SINGLE_ISSUER |
  218. IORING_SETUP_DEFER_TASKRUN |
  219. IORING_SETUP_TASKRUN_FLAG);
  220. CHECK(!ret);
  221. ret = t_create_socket_pair(fd, true);
  222. CHECK(!ret);
  223. io_uring_prep_recv(io_uring_get_sqe(&ring), fd[0], &buff, 1, 0);
  224. io_uring_submit(&ring);
  225. ret = write(fd[1], &send, 1);
  226. CHECK(ret == 1);
  227. /* should not have processed the poll cqe yet */
  228. CHECK(io_uring_cq_ready(&ring) == 0);
  229. io_uring_queue_exit(&ring);
  230. /* task work should have been processed by now */
  231. CHECK(buff = 'X');
  232. return 0;
  233. }
  234. static int test_drain(void)
  235. {
  236. struct io_uring ring;
  237. int ret, i, fd[2];
  238. struct io_uring_sqe *sqe;
  239. struct io_uring_cqe *cqe;
  240. struct iovec iovecs[128];
  241. char buff[ARRAY_SIZE(iovecs)];
  242. ret = io_uring_queue_init(8, &ring, IORING_SETUP_SINGLE_ISSUER |
  243. IORING_SETUP_DEFER_TASKRUN |
  244. IORING_SETUP_TASKRUN_FLAG);
  245. CHECK(!ret);
  246. for (i = 0; i < ARRAY_SIZE(iovecs); i++) {
  247. iovecs[i].iov_base = &buff[i];
  248. iovecs[i].iov_len = 1;
  249. }
  250. ret = t_create_socket_pair(fd, true);
  251. CHECK(!ret);
  252. sqe = io_uring_get_sqe(&ring);
  253. io_uring_prep_writev(sqe, fd[1], &iovecs[0], ARRAY_SIZE(iovecs), 0);
  254. sqe->flags |= IOSQE_IO_DRAIN;
  255. io_uring_submit(&ring);
  256. for (i = 0; i < ARRAY_SIZE(iovecs); i++)
  257. iovecs[i].iov_base = NULL;
  258. CHECK(io_uring_wait_cqe(&ring, &cqe) == 0);
  259. CHECK(cqe->res == 128);
  260. close(fd[0]);
  261. close(fd[1]);
  262. io_uring_queue_exit(&ring);
  263. return 0;
  264. }
  265. int main(int argc, char *argv[])
  266. {
  267. int ret;
  268. const char *filename = NULL;
  269. if (argc > 2)
  270. return T_EXIT_SKIP;
  271. if (argc == 2) {
  272. /* This test exposes interesting behaviour with a null-blk
  273. * device configured like:
  274. * $ modprobe null-blk completion_nsec=100000000 irqmode=2
  275. * and then run with $ defer-taskrun.t /dev/nullb0
  276. */
  277. filename = argv[1];
  278. }
  279. if (!t_probe_defer_taskrun())
  280. return T_EXIT_SKIP;
  281. ret = test_thread_shutdown();
  282. if (ret) {
  283. fprintf(stderr, "test_thread_shutdown failed\n");
  284. return T_EXIT_FAIL;
  285. }
  286. ret = test_exec(filename);
  287. if (ret == T_EXIT_FAIL) {
  288. fprintf(stderr, "test_exec failed\n");
  289. return T_EXIT_FAIL;
  290. }
  291. ret = test_eventfd();
  292. if (ret) {
  293. fprintf(stderr, "eventfd failed\n");
  294. return T_EXIT_FAIL;
  295. }
  296. ret = test_flag();
  297. if (ret) {
  298. fprintf(stderr, "flag failed\n");
  299. return T_EXIT_FAIL;
  300. }
  301. ret = test_ring_shutdown();
  302. if (ret) {
  303. fprintf(stderr, "test_ring_shutdown failed\n");
  304. return T_EXIT_FAIL;
  305. }
  306. ret = test_drain();
  307. if (ret) {
  308. fprintf(stderr, "test_drain failed\n");
  309. return T_EXIT_FAIL;
  310. }
  311. return T_EXIT_PASS;
  312. }