io-cancel.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. #include "../config-host.h"
  2. /* SPDX-License-Identifier: MIT */
  3. /*
  4. * Description: Basic IO cancel test
  5. */
  6. #include <errno.h>
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include <stdlib.h>
  10. #include <string.h>
  11. #include <fcntl.h>
  12. #include <sys/types.h>
  13. #include <sys/time.h>
  14. #include <sys/wait.h>
  15. #include <poll.h>
  16. #include "helpers.h"
  17. #include "liburing.h"
  18. #define FILE_SIZE (128 * 1024)
  19. #define BS 4096
  20. #define BUFFERS (FILE_SIZE / BS)
  21. static struct iovec *vecs;
  22. static int start_io(struct io_uring *ring, int fd, int do_write)
  23. {
  24. struct io_uring_sqe *sqe;
  25. int i, ret;
  26. for (i = 0; i < BUFFERS; i++) {
  27. off_t offset;
  28. sqe = io_uring_get_sqe(ring);
  29. if (!sqe) {
  30. fprintf(stderr, "sqe get failed\n");
  31. goto err;
  32. }
  33. offset = BS * (rand() % BUFFERS);
  34. if (do_write) {
  35. io_uring_prep_writev(sqe, fd, &vecs[i], 1, offset);
  36. } else {
  37. io_uring_prep_readv(sqe, fd, &vecs[i], 1, offset);
  38. }
  39. sqe->user_data = i + 1;
  40. }
  41. ret = io_uring_submit(ring);
  42. if (ret != BUFFERS) {
  43. fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
  44. goto err;
  45. }
  46. return 0;
  47. err:
  48. return 1;
  49. }
  50. static int wait_io(struct io_uring *ring, unsigned nr_io, int do_partial)
  51. {
  52. struct io_uring_cqe *cqe;
  53. int i, ret;
  54. for (i = 0; i < nr_io; i++) {
  55. ret = io_uring_wait_cqe(ring, &cqe);
  56. if (ret) {
  57. fprintf(stderr, "wait_cqe=%d\n", ret);
  58. goto err;
  59. }
  60. if (do_partial && cqe->user_data) {
  61. if (!(cqe->user_data & 1)) {
  62. if (cqe->res != BS) {
  63. fprintf(stderr, "IO %d wasn't canceled but got error %d\n", (unsigned) cqe->user_data, cqe->res);
  64. goto err;
  65. }
  66. }
  67. }
  68. io_uring_cqe_seen(ring, cqe);
  69. }
  70. return 0;
  71. err:
  72. return 1;
  73. }
  74. static int do_io(struct io_uring *ring, int fd, int do_write)
  75. {
  76. if (start_io(ring, fd, do_write))
  77. return 1;
  78. if (wait_io(ring, BUFFERS, 0))
  79. return 1;
  80. return 0;
  81. }
  82. static int start_cancel(struct io_uring *ring, int do_partial, int async_cancel)
  83. {
  84. struct io_uring_sqe *sqe;
  85. int i, ret, submitted = 0;
  86. for (i = 0; i < BUFFERS; i++) {
  87. if (do_partial && (i & 1))
  88. continue;
  89. sqe = io_uring_get_sqe(ring);
  90. if (!sqe) {
  91. fprintf(stderr, "sqe get failed\n");
  92. goto err;
  93. }
  94. io_uring_prep_cancel64(sqe, i + 1, 0);
  95. if (async_cancel)
  96. sqe->flags |= IOSQE_ASYNC;
  97. sqe->user_data = 0;
  98. submitted++;
  99. }
  100. ret = io_uring_submit(ring);
  101. if (ret != submitted) {
  102. fprintf(stderr, "submit got %d, wanted %d\n", ret, submitted);
  103. goto err;
  104. }
  105. return 0;
  106. err:
  107. return 1;
  108. }
  109. /*
  110. * Test cancels. If 'do_partial' is set, then we only attempt to cancel half of
  111. * the submitted IO. This is done to verify that canceling one piece of IO doesn't
  112. * impact others.
  113. */
  114. static int test_io_cancel(const char *file, int do_write, int do_partial,
  115. int async_cancel)
  116. {
  117. struct io_uring ring;
  118. struct timeval start_tv;
  119. unsigned long usecs;
  120. unsigned to_wait;
  121. int fd, ret;
  122. fd = open(file, O_RDWR | O_DIRECT);
  123. if (fd < 0) {
  124. if (errno == EINVAL)
  125. return T_EXIT_SKIP;
  126. perror("file open");
  127. goto err;
  128. }
  129. ret = io_uring_queue_init(4 * BUFFERS, &ring, 0);
  130. if (ret) {
  131. fprintf(stderr, "ring create failed: %d\n", ret);
  132. goto err;
  133. }
  134. if (do_io(&ring, fd, do_write))
  135. goto err;
  136. gettimeofday(&start_tv, NULL);
  137. if (do_io(&ring, fd, do_write))
  138. goto err;
  139. usecs = utime_since_now(&start_tv);
  140. if (start_io(&ring, fd, do_write))
  141. goto err;
  142. /* sleep for 1/3 of the total time, to allow some to start/complete */
  143. usleep(usecs / 3);
  144. if (start_cancel(&ring, do_partial, async_cancel))
  145. goto err;
  146. to_wait = BUFFERS;
  147. if (do_partial)
  148. to_wait += BUFFERS / 2;
  149. else
  150. to_wait += BUFFERS;
  151. if (wait_io(&ring, to_wait, do_partial))
  152. goto err;
  153. io_uring_queue_exit(&ring);
  154. close(fd);
  155. return 0;
  156. err:
  157. if (fd != -1)
  158. close(fd);
  159. return 1;
  160. }
  161. static int test_dont_cancel_another_ring(void)
  162. {
  163. struct io_uring ring1, ring2;
  164. struct io_uring_cqe *cqe;
  165. struct io_uring_sqe *sqe;
  166. char buffer[128];
  167. int ret, fds[2];
  168. struct __kernel_timespec ts = { .tv_sec = 0, .tv_nsec = 100000000, };
  169. ret = io_uring_queue_init(8, &ring1, 0);
  170. if (ret) {
  171. fprintf(stderr, "ring create failed: %d\n", ret);
  172. return 1;
  173. }
  174. ret = io_uring_queue_init(8, &ring2, 0);
  175. if (ret) {
  176. fprintf(stderr, "ring create failed: %d\n", ret);
  177. return 1;
  178. }
  179. if (pipe(fds)) {
  180. perror("pipe");
  181. return 1;
  182. }
  183. sqe = io_uring_get_sqe(&ring1);
  184. if (!sqe) {
  185. fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
  186. return 1;
  187. }
  188. io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
  189. sqe->flags |= IOSQE_ASYNC;
  190. sqe->user_data = 1;
  191. ret = io_uring_submit(&ring1);
  192. if (ret != 1) {
  193. fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
  194. return 1;
  195. }
  196. /* make sure it doesn't cancel requests of the other ctx */
  197. sqe = io_uring_get_sqe(&ring2);
  198. if (!sqe) {
  199. fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
  200. return 1;
  201. }
  202. io_uring_prep_cancel64(sqe, 1, 0);
  203. sqe->user_data = 2;
  204. ret = io_uring_submit(&ring2);
  205. if (ret != 1) {
  206. fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
  207. return 1;
  208. }
  209. ret = io_uring_wait_cqe(&ring2, &cqe);
  210. if (ret) {
  211. fprintf(stderr, "wait_cqe=%d\n", ret);
  212. return 1;
  213. }
  214. if (cqe->user_data != 2 || cqe->res != -ENOENT) {
  215. fprintf(stderr, "error: cqe %i: res=%i, but expected -ENOENT\n",
  216. (int)cqe->user_data, (int)cqe->res);
  217. return 1;
  218. }
  219. io_uring_cqe_seen(&ring2, cqe);
  220. ret = io_uring_wait_cqe_timeout(&ring1, &cqe, &ts);
  221. if (ret != -ETIME) {
  222. fprintf(stderr, "read got canceled or wait failed\n");
  223. return 1;
  224. }
  225. io_uring_cqe_seen(&ring1, cqe);
  226. close(fds[0]);
  227. close(fds[1]);
  228. io_uring_queue_exit(&ring1);
  229. io_uring_queue_exit(&ring2);
  230. return 0;
  231. }
  232. static int test_cancel_req_across_fork(void)
  233. {
  234. struct io_uring ring;
  235. struct io_uring_cqe *cqe;
  236. struct io_uring_sqe *sqe;
  237. char buffer[128];
  238. int ret, i, fds[2];
  239. pid_t p;
  240. ret = io_uring_queue_init(8, &ring, 0);
  241. if (ret) {
  242. fprintf(stderr, "ring create failed: %d\n", ret);
  243. return 1;
  244. }
  245. if (pipe(fds)) {
  246. perror("pipe");
  247. return 1;
  248. }
  249. sqe = io_uring_get_sqe(&ring);
  250. if (!sqe) {
  251. fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
  252. return 1;
  253. }
  254. io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
  255. sqe->flags |= IOSQE_ASYNC;
  256. sqe->user_data = 1;
  257. ret = io_uring_submit(&ring);
  258. if (ret != 1) {
  259. fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
  260. return 1;
  261. }
  262. p = fork();
  263. if (p == -1) {
  264. fprintf(stderr, "fork() failed\n");
  265. return 1;
  266. }
  267. if (p == 0) {
  268. sqe = io_uring_get_sqe(&ring);
  269. if (!sqe) {
  270. fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
  271. return 1;
  272. }
  273. io_uring_prep_cancel64(sqe, 1, 0);
  274. sqe->user_data = 2;
  275. ret = io_uring_submit(&ring);
  276. if (ret != 1) {
  277. fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
  278. return 1;
  279. }
  280. for (i = 0; i < 2; ++i) {
  281. ret = io_uring_wait_cqe(&ring, &cqe);
  282. if (ret) {
  283. fprintf(stderr, "wait_cqe=%d\n", ret);
  284. return 1;
  285. }
  286. switch (cqe->user_data) {
  287. case 1:
  288. if (cqe->res != -EINTR &&
  289. cqe->res != -ECANCELED) {
  290. fprintf(stderr, "user_data %i res %i\n",
  291. (unsigned)cqe->user_data, cqe->res);
  292. exit(1);
  293. }
  294. break;
  295. case 2:
  296. if (cqe->res != -EALREADY && cqe->res) {
  297. fprintf(stderr, "user_data %i res %i\n",
  298. (unsigned)cqe->user_data, cqe->res);
  299. exit(1);
  300. }
  301. break;
  302. default:
  303. fprintf(stderr, "user_data %i res %i\n",
  304. (unsigned)cqe->user_data, cqe->res);
  305. exit(1);
  306. }
  307. io_uring_cqe_seen(&ring, cqe);
  308. }
  309. exit(0);
  310. } else {
  311. int wstatus;
  312. pid_t childpid;
  313. do {
  314. childpid = waitpid(p, &wstatus, 0);
  315. } while (childpid == (pid_t)-1 && errno == EINTR);
  316. if (childpid == (pid_t)-1) {
  317. perror("waitpid()");
  318. return 1;
  319. }
  320. if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
  321. fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
  322. return 1;
  323. }
  324. }
  325. close(fds[0]);
  326. close(fds[1]);
  327. io_uring_queue_exit(&ring);
  328. return 0;
  329. }
  330. static int test_cancel_inflight_exit(void)
  331. {
  332. struct __kernel_timespec ts = { .tv_sec = 1, .tv_nsec = 0, };
  333. struct io_uring ring;
  334. struct io_uring_cqe *cqe;
  335. struct io_uring_sqe *sqe;
  336. int ret, i;
  337. pid_t p;
  338. ret = io_uring_queue_init(8, &ring, 0);
  339. if (ret) {
  340. fprintf(stderr, "ring create failed: %d\n", ret);
  341. return 1;
  342. }
  343. p = fork();
  344. if (p == -1) {
  345. fprintf(stderr, "fork() failed\n");
  346. return 1;
  347. }
  348. if (p == 0) {
  349. sqe = io_uring_get_sqe(&ring);
  350. io_uring_prep_poll_add(sqe, ring.ring_fd, POLLIN);
  351. sqe->user_data = 1;
  352. sqe->flags |= IOSQE_IO_LINK;
  353. sqe = io_uring_get_sqe(&ring);
  354. io_uring_prep_timeout(sqe, &ts, 0, 0);
  355. sqe->user_data = 2;
  356. sqe = io_uring_get_sqe(&ring);
  357. io_uring_prep_timeout(sqe, &ts, 0, 0);
  358. sqe->user_data = 3;
  359. ret = io_uring_submit(&ring);
  360. if (ret != 3) {
  361. fprintf(stderr, "io_uring_submit() failed %s, ret %i\n", __FUNCTION__, ret);
  362. exit(1);
  363. }
  364. exit(0);
  365. } else {
  366. int wstatus;
  367. if (waitpid(p, &wstatus, 0) == (pid_t)-1) {
  368. perror("waitpid()");
  369. return 1;
  370. }
  371. if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
  372. fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
  373. return 1;
  374. }
  375. }
  376. for (i = 0; i < 3; ++i) {
  377. ret = io_uring_wait_cqe(&ring, &cqe);
  378. if (ret) {
  379. fprintf(stderr, "wait_cqe=%d\n", ret);
  380. return 1;
  381. }
  382. if ((cqe->user_data == 1 && cqe->res != -ECANCELED) ||
  383. (cqe->user_data == 2 && cqe->res != -ECANCELED) ||
  384. (cqe->user_data == 3 && cqe->res != -ETIME)) {
  385. fprintf(stderr, "user_data %i res %i\n",
  386. (unsigned)cqe->user_data, cqe->res);
  387. return 1;
  388. }
  389. io_uring_cqe_seen(&ring, cqe);
  390. }
  391. io_uring_queue_exit(&ring);
  392. return 0;
  393. }
  394. static int test_sqpoll_cancel_iowq_requests(void)
  395. {
  396. struct io_uring ring;
  397. struct io_uring_sqe *sqe;
  398. int ret, fds[2];
  399. char buffer[16];
  400. ret = io_uring_queue_init(8, &ring, IORING_SETUP_SQPOLL);
  401. if (ret) {
  402. fprintf(stderr, "ring create failed: %d\n", ret);
  403. return 1;
  404. }
  405. if (pipe(fds)) {
  406. perror("pipe");
  407. return 1;
  408. }
  409. /* pin both pipe ends via io-wq */
  410. sqe = io_uring_get_sqe(&ring);
  411. io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
  412. sqe->flags |= IOSQE_ASYNC | IOSQE_IO_LINK;
  413. sqe->user_data = 1;
  414. sqe = io_uring_get_sqe(&ring);
  415. io_uring_prep_write(sqe, fds[1], buffer, 10, 0);
  416. sqe->flags |= IOSQE_ASYNC;
  417. sqe->user_data = 2;
  418. ret = io_uring_submit(&ring);
  419. if (ret != 2) {
  420. fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
  421. return 1;
  422. }
  423. /* wait for sqpoll to kick in and submit before exit */
  424. sleep(1);
  425. io_uring_queue_exit(&ring);
  426. /* close the write end, so if ring is canceled properly read() fails*/
  427. close(fds[1]);
  428. ret = read(fds[0], buffer, 10);
  429. close(fds[0]);
  430. return 0;
  431. }
  432. int main(int argc, char *argv[])
  433. {
  434. const char *fname = ".io-cancel-test";
  435. int i, ret;
  436. if (argc > 1)
  437. return T_EXIT_SKIP;
  438. if (test_dont_cancel_another_ring()) {
  439. fprintf(stderr, "test_dont_cancel_another_ring() failed\n");
  440. return T_EXIT_FAIL;
  441. }
  442. if (test_cancel_req_across_fork()) {
  443. fprintf(stderr, "test_cancel_req_across_fork() failed\n");
  444. return T_EXIT_FAIL;
  445. }
  446. if (test_cancel_inflight_exit()) {
  447. fprintf(stderr, "test_cancel_inflight_exit() failed\n");
  448. return T_EXIT_FAIL;
  449. }
  450. if (test_sqpoll_cancel_iowq_requests()) {
  451. fprintf(stderr, "test_sqpoll_cancel_iowq_requests() failed\n");
  452. return T_EXIT_FAIL;
  453. }
  454. t_create_file(fname, FILE_SIZE);
  455. vecs = t_create_buffers(BUFFERS, BS);
  456. for (i = 0; i < 8; i++) {
  457. int write = (i & 1) != 0;
  458. int partial = (i & 2) != 0;
  459. int async = (i & 4) != 0;
  460. ret = test_io_cancel(fname, write, partial, async);
  461. if (ret == T_EXIT_FAIL) {
  462. fprintf(stderr, "test_io_cancel %d %d %d failed\n",
  463. write, partial, async);
  464. goto err;
  465. }
  466. }
  467. unlink(fname);
  468. return T_EXIT_PASS;
  469. err:
  470. unlink(fname);
  471. return T_EXIT_FAIL;
  472. }