io-cancel.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. #include "../config-host.h"
  2. /* SPDX-License-Identifier: MIT */
  3. /*
  4. * Description: Basic IO cancel test
  5. */
  6. #include <errno.h>
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include <stdlib.h>
  10. #include <string.h>
  11. #include <fcntl.h>
  12. #include <sys/types.h>
  13. #include <sys/time.h>
  14. #include <sys/wait.h>
  15. #include <poll.h>
  16. #include "helpers.h"
  17. #include "liburing.h"
  18. #define FILE_SIZE (128 * 1024)
  19. #define BS 4096
  20. #define BUFFERS (FILE_SIZE / BS)
  21. static struct iovec *vecs;
  22. static unsigned long long utime_since(const struct timeval *s,
  23. const struct timeval *e)
  24. {
  25. long long sec, usec;
  26. sec = e->tv_sec - s->tv_sec;
  27. usec = (e->tv_usec - s->tv_usec);
  28. if (sec > 0 && usec < 0) {
  29. sec--;
  30. usec += 1000000;
  31. }
  32. sec *= 1000000;
  33. return sec + usec;
  34. }
  35. static unsigned long long utime_since_now(struct timeval *tv)
  36. {
  37. struct timeval end;
  38. gettimeofday(&end, NULL);
  39. return utime_since(tv, &end);
  40. }
  41. static int start_io(struct io_uring *ring, int fd, int do_write)
  42. {
  43. struct io_uring_sqe *sqe;
  44. int i, ret;
  45. for (i = 0; i < BUFFERS; i++) {
  46. off_t offset;
  47. sqe = io_uring_get_sqe(ring);
  48. if (!sqe) {
  49. fprintf(stderr, "sqe get failed\n");
  50. goto err;
  51. }
  52. offset = BS * (rand() % BUFFERS);
  53. if (do_write) {
  54. io_uring_prep_writev(sqe, fd, &vecs[i], 1, offset);
  55. } else {
  56. io_uring_prep_readv(sqe, fd, &vecs[i], 1, offset);
  57. }
  58. sqe->user_data = i + 1;
  59. }
  60. ret = io_uring_submit(ring);
  61. if (ret != BUFFERS) {
  62. fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
  63. goto err;
  64. }
  65. return 0;
  66. err:
  67. return 1;
  68. }
  69. static int wait_io(struct io_uring *ring, unsigned nr_io, int do_partial)
  70. {
  71. struct io_uring_cqe *cqe;
  72. int i, ret;
  73. for (i = 0; i < nr_io; i++) {
  74. ret = io_uring_wait_cqe(ring, &cqe);
  75. if (ret) {
  76. fprintf(stderr, "wait_cqe=%d\n", ret);
  77. goto err;
  78. }
  79. if (do_partial && cqe->user_data) {
  80. if (!(cqe->user_data & 1)) {
  81. if (cqe->res != BS) {
  82. fprintf(stderr, "IO %d wasn't cancelled but got error %d\n", (unsigned) cqe->user_data, cqe->res);
  83. goto err;
  84. }
  85. }
  86. }
  87. io_uring_cqe_seen(ring, cqe);
  88. }
  89. return 0;
  90. err:
  91. return 1;
  92. }
  93. static int do_io(struct io_uring *ring, int fd, int do_write)
  94. {
  95. if (start_io(ring, fd, do_write))
  96. return 1;
  97. if (wait_io(ring, BUFFERS, 0))
  98. return 1;
  99. return 0;
  100. }
  101. static int start_cancel(struct io_uring *ring, int do_partial, int async_cancel)
  102. {
  103. struct io_uring_sqe *sqe;
  104. int i, ret, submitted = 0;
  105. for (i = 0; i < BUFFERS; i++) {
  106. if (do_partial && (i & 1))
  107. continue;
  108. sqe = io_uring_get_sqe(ring);
  109. if (!sqe) {
  110. fprintf(stderr, "sqe get failed\n");
  111. goto err;
  112. }
  113. io_uring_prep_cancel64(sqe, i + 1, 0);
  114. if (async_cancel)
  115. sqe->flags |= IOSQE_ASYNC;
  116. sqe->user_data = 0;
  117. submitted++;
  118. }
  119. ret = io_uring_submit(ring);
  120. if (ret != submitted) {
  121. fprintf(stderr, "submit got %d, wanted %d\n", ret, submitted);
  122. goto err;
  123. }
  124. return 0;
  125. err:
  126. return 1;
  127. }
  128. /*
  129. * Test cancels. If 'do_partial' is set, then we only attempt to cancel half of
  130. * the submitted IO. This is done to verify that cancelling one piece of IO doesn't
  131. * impact others.
  132. */
  133. static int test_io_cancel(const char *file, int do_write, int do_partial,
  134. int async_cancel)
  135. {
  136. struct io_uring ring;
  137. struct timeval start_tv;
  138. unsigned long usecs;
  139. unsigned to_wait;
  140. int fd, ret;
  141. fd = open(file, O_RDWR | O_DIRECT);
  142. if (fd < 0) {
  143. if (errno == EINVAL)
  144. return T_EXIT_SKIP;
  145. perror("file open");
  146. goto err;
  147. }
  148. ret = io_uring_queue_init(4 * BUFFERS, &ring, 0);
  149. if (ret) {
  150. fprintf(stderr, "ring create failed: %d\n", ret);
  151. goto err;
  152. }
  153. if (do_io(&ring, fd, do_write))
  154. goto err;
  155. gettimeofday(&start_tv, NULL);
  156. if (do_io(&ring, fd, do_write))
  157. goto err;
  158. usecs = utime_since_now(&start_tv);
  159. if (start_io(&ring, fd, do_write))
  160. goto err;
  161. /* sleep for 1/3 of the total time, to allow some to start/complete */
  162. usleep(usecs / 3);
  163. if (start_cancel(&ring, do_partial, async_cancel))
  164. goto err;
  165. to_wait = BUFFERS;
  166. if (do_partial)
  167. to_wait += BUFFERS / 2;
  168. else
  169. to_wait += BUFFERS;
  170. if (wait_io(&ring, to_wait, do_partial))
  171. goto err;
  172. io_uring_queue_exit(&ring);
  173. close(fd);
  174. return 0;
  175. err:
  176. if (fd != -1)
  177. close(fd);
  178. return 1;
  179. }
  180. static int test_dont_cancel_another_ring(void)
  181. {
  182. struct io_uring ring1, ring2;
  183. struct io_uring_cqe *cqe;
  184. struct io_uring_sqe *sqe;
  185. char buffer[128];
  186. int ret, fds[2];
  187. struct __kernel_timespec ts = { .tv_sec = 0, .tv_nsec = 100000000, };
  188. ret = io_uring_queue_init(8, &ring1, 0);
  189. if (ret) {
  190. fprintf(stderr, "ring create failed: %d\n", ret);
  191. return 1;
  192. }
  193. ret = io_uring_queue_init(8, &ring2, 0);
  194. if (ret) {
  195. fprintf(stderr, "ring create failed: %d\n", ret);
  196. return 1;
  197. }
  198. if (pipe(fds)) {
  199. perror("pipe");
  200. return 1;
  201. }
  202. sqe = io_uring_get_sqe(&ring1);
  203. if (!sqe) {
  204. fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
  205. return 1;
  206. }
  207. io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
  208. sqe->flags |= IOSQE_ASYNC;
  209. sqe->user_data = 1;
  210. ret = io_uring_submit(&ring1);
  211. if (ret != 1) {
  212. fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
  213. return 1;
  214. }
  215. /* make sure it doesn't cancel requests of the other ctx */
  216. sqe = io_uring_get_sqe(&ring2);
  217. if (!sqe) {
  218. fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
  219. return 1;
  220. }
  221. io_uring_prep_cancel64(sqe, 1, 0);
  222. sqe->user_data = 2;
  223. ret = io_uring_submit(&ring2);
  224. if (ret != 1) {
  225. fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
  226. return 1;
  227. }
  228. ret = io_uring_wait_cqe(&ring2, &cqe);
  229. if (ret) {
  230. fprintf(stderr, "wait_cqe=%d\n", ret);
  231. return 1;
  232. }
  233. if (cqe->user_data != 2 || cqe->res != -ENOENT) {
  234. fprintf(stderr, "error: cqe %i: res=%i, but expected -ENOENT\n",
  235. (int)cqe->user_data, (int)cqe->res);
  236. return 1;
  237. }
  238. io_uring_cqe_seen(&ring2, cqe);
  239. ret = io_uring_wait_cqe_timeout(&ring1, &cqe, &ts);
  240. if (ret != -ETIME) {
  241. fprintf(stderr, "read got cancelled or wait failed\n");
  242. return 1;
  243. }
  244. io_uring_cqe_seen(&ring1, cqe);
  245. close(fds[0]);
  246. close(fds[1]);
  247. io_uring_queue_exit(&ring1);
  248. io_uring_queue_exit(&ring2);
  249. return 0;
  250. }
  251. static int test_cancel_req_across_fork(void)
  252. {
  253. struct io_uring ring;
  254. struct io_uring_cqe *cqe;
  255. struct io_uring_sqe *sqe;
  256. char buffer[128];
  257. int ret, i, fds[2];
  258. pid_t p;
  259. ret = io_uring_queue_init(8, &ring, 0);
  260. if (ret) {
  261. fprintf(stderr, "ring create failed: %d\n", ret);
  262. return 1;
  263. }
  264. if (pipe(fds)) {
  265. perror("pipe");
  266. return 1;
  267. }
  268. sqe = io_uring_get_sqe(&ring);
  269. if (!sqe) {
  270. fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
  271. return 1;
  272. }
  273. io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
  274. sqe->flags |= IOSQE_ASYNC;
  275. sqe->user_data = 1;
  276. ret = io_uring_submit(&ring);
  277. if (ret != 1) {
  278. fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
  279. return 1;
  280. }
  281. p = fork();
  282. if (p == -1) {
  283. fprintf(stderr, "fork() failed\n");
  284. return 1;
  285. }
  286. if (p == 0) {
  287. sqe = io_uring_get_sqe(&ring);
  288. if (!sqe) {
  289. fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
  290. return 1;
  291. }
  292. io_uring_prep_cancel64(sqe, 1, 0);
  293. sqe->user_data = 2;
  294. ret = io_uring_submit(&ring);
  295. if (ret != 1) {
  296. fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
  297. return 1;
  298. }
  299. for (i = 0; i < 2; ++i) {
  300. ret = io_uring_wait_cqe(&ring, &cqe);
  301. if (ret) {
  302. fprintf(stderr, "wait_cqe=%d\n", ret);
  303. return 1;
  304. }
  305. switch (cqe->user_data) {
  306. case 1:
  307. if (cqe->res != -EINTR &&
  308. cqe->res != -ECANCELED) {
  309. fprintf(stderr, "%i %i\n", (int)cqe->user_data, cqe->res);
  310. exit(1);
  311. }
  312. break;
  313. case 2:
  314. if (cqe->res != -EALREADY && cqe->res) {
  315. fprintf(stderr, "%i %i\n", (int)cqe->user_data, cqe->res);
  316. exit(1);
  317. }
  318. break;
  319. default:
  320. fprintf(stderr, "%i %i\n", (int)cqe->user_data, cqe->res);
  321. exit(1);
  322. }
  323. io_uring_cqe_seen(&ring, cqe);
  324. }
  325. exit(0);
  326. } else {
  327. int wstatus;
  328. pid_t childpid;
  329. do {
  330. childpid = waitpid(p, &wstatus, 0);
  331. } while (childpid == (pid_t)-1 && errno == EINTR);
  332. if (childpid == (pid_t)-1) {
  333. perror("waitpid()");
  334. return 1;
  335. }
  336. if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
  337. fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
  338. return 1;
  339. }
  340. }
  341. close(fds[0]);
  342. close(fds[1]);
  343. io_uring_queue_exit(&ring);
  344. return 0;
  345. }
  346. static int test_cancel_inflight_exit(void)
  347. {
  348. struct __kernel_timespec ts = { .tv_sec = 1, .tv_nsec = 0, };
  349. struct io_uring ring;
  350. struct io_uring_cqe *cqe;
  351. struct io_uring_sqe *sqe;
  352. int ret, i;
  353. pid_t p;
  354. ret = io_uring_queue_init(8, &ring, 0);
  355. if (ret) {
  356. fprintf(stderr, "ring create failed: %d\n", ret);
  357. return 1;
  358. }
  359. p = fork();
  360. if (p == -1) {
  361. fprintf(stderr, "fork() failed\n");
  362. return 1;
  363. }
  364. if (p == 0) {
  365. sqe = io_uring_get_sqe(&ring);
  366. io_uring_prep_poll_add(sqe, ring.ring_fd, POLLIN);
  367. sqe->user_data = 1;
  368. sqe->flags |= IOSQE_IO_LINK;
  369. sqe = io_uring_get_sqe(&ring);
  370. io_uring_prep_timeout(sqe, &ts, 0, 0);
  371. sqe->user_data = 2;
  372. sqe = io_uring_get_sqe(&ring);
  373. io_uring_prep_timeout(sqe, &ts, 0, 0);
  374. sqe->user_data = 3;
  375. ret = io_uring_submit(&ring);
  376. if (ret != 3) {
  377. fprintf(stderr, "io_uring_submit() failed %s, ret %i\n", __FUNCTION__, ret);
  378. exit(1);
  379. }
  380. exit(0);
  381. } else {
  382. int wstatus;
  383. if (waitpid(p, &wstatus, 0) == (pid_t)-1) {
  384. perror("waitpid()");
  385. return 1;
  386. }
  387. if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
  388. fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
  389. return 1;
  390. }
  391. }
  392. for (i = 0; i < 3; ++i) {
  393. ret = io_uring_wait_cqe(&ring, &cqe);
  394. if (ret) {
  395. fprintf(stderr, "wait_cqe=%d\n", ret);
  396. return 1;
  397. }
  398. if ((cqe->user_data == 1 && cqe->res != -ECANCELED) ||
  399. (cqe->user_data == 2 && cqe->res != -ECANCELED) ||
  400. (cqe->user_data == 3 && cqe->res != -ETIME)) {
  401. fprintf(stderr, "%i %i\n", (int)cqe->user_data, cqe->res);
  402. return 1;
  403. }
  404. io_uring_cqe_seen(&ring, cqe);
  405. }
  406. io_uring_queue_exit(&ring);
  407. return 0;
  408. }
  409. static int test_sqpoll_cancel_iowq_requests(void)
  410. {
  411. struct io_uring ring;
  412. struct io_uring_sqe *sqe;
  413. int ret, fds[2];
  414. char buffer[16];
  415. ret = io_uring_queue_init(8, &ring, IORING_SETUP_SQPOLL);
  416. if (ret) {
  417. fprintf(stderr, "ring create failed: %d\n", ret);
  418. return 1;
  419. }
  420. if (pipe(fds)) {
  421. perror("pipe");
  422. return 1;
  423. }
  424. /* pin both pipe ends via io-wq */
  425. sqe = io_uring_get_sqe(&ring);
  426. io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
  427. sqe->flags |= IOSQE_ASYNC | IOSQE_IO_LINK;
  428. sqe->user_data = 1;
  429. sqe = io_uring_get_sqe(&ring);
  430. io_uring_prep_write(sqe, fds[1], buffer, 10, 0);
  431. sqe->flags |= IOSQE_ASYNC;
  432. sqe->user_data = 2;
  433. ret = io_uring_submit(&ring);
  434. if (ret != 2) {
  435. fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
  436. return 1;
  437. }
  438. /* wait for sqpoll to kick in and submit before exit */
  439. sleep(1);
  440. io_uring_queue_exit(&ring);
  441. /* close the write end, so if ring is cancelled properly read() fails*/
  442. close(fds[1]);
  443. ret = read(fds[0], buffer, 10);
  444. close(fds[0]);
  445. return 0;
  446. }
  447. int main(int argc, char *argv[])
  448. {
  449. const char *fname = ".io-cancel-test";
  450. int i, ret;
  451. if (argc > 1)
  452. return T_EXIT_SKIP;
  453. if (test_dont_cancel_another_ring()) {
  454. fprintf(stderr, "test_dont_cancel_another_ring() failed\n");
  455. return T_EXIT_FAIL;
  456. }
  457. if (test_cancel_req_across_fork()) {
  458. fprintf(stderr, "test_cancel_req_across_fork() failed\n");
  459. return T_EXIT_FAIL;
  460. }
  461. if (test_cancel_inflight_exit()) {
  462. fprintf(stderr, "test_cancel_inflight_exit() failed\n");
  463. return T_EXIT_FAIL;
  464. }
  465. if (test_sqpoll_cancel_iowq_requests()) {
  466. fprintf(stderr, "test_sqpoll_cancel_iowq_requests() failed\n");
  467. return T_EXIT_FAIL;
  468. }
  469. t_create_file(fname, FILE_SIZE);
  470. vecs = t_create_buffers(BUFFERS, BS);
  471. for (i = 0; i < 8; i++) {
  472. int write = (i & 1) != 0;
  473. int partial = (i & 2) != 0;
  474. int async = (i & 4) != 0;
  475. ret = test_io_cancel(fname, write, partial, async);
  476. if (ret == T_EXIT_FAIL) {
  477. fprintf(stderr, "test_io_cancel %d %d %d failed\n",
  478. write, partial, async);
  479. goto err;
  480. }
  481. }
  482. unlink(fname);
  483. return T_EXIT_PASS;
  484. err:
  485. unlink(fname);
  486. return T_EXIT_FAIL;
  487. }