read-write.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014
  1. #include "../config-host.h"
  2. /* SPDX-License-Identifier: MIT */
  3. /*
  4. * Description: basic read/write tests with buffered, O_DIRECT, and SQPOLL
  5. */
  6. #include <errno.h>
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include <stdlib.h>
  10. #include <string.h>
  11. #include <fcntl.h>
  12. #include <sys/types.h>
  13. #include <poll.h>
  14. #include <sys/eventfd.h>
  15. #include <sys/resource.h>
  16. #include "helpers.h"
  17. #include "liburing.h"
  18. #define FILE_SIZE (256 * 1024)
  19. #define BS 8192
  20. #define BUFFERS (FILE_SIZE / BS)
  21. static struct iovec *vecs;
  22. static int no_read;
  23. static int no_buf_select;
  24. static int warned;
  25. static int create_nonaligned_buffers(void)
  26. {
  27. int i;
  28. vecs = t_malloc(BUFFERS * sizeof(struct iovec));
  29. for (i = 0; i < BUFFERS; i++) {
  30. char *p = t_malloc(3 * BS);
  31. if (!p)
  32. return 1;
  33. vecs[i].iov_base = p + (rand() % BS);
  34. vecs[i].iov_len = 1 + (rand() % BS);
  35. }
  36. return 0;
  37. }
  38. static int __test_io(const char *file, struct io_uring *ring, int write,
  39. int buffered, int sqthread, int fixed, int nonvec,
  40. int buf_select, int seq, int exp_len)
  41. {
  42. struct io_uring_sqe *sqe;
  43. struct io_uring_cqe *cqe;
  44. int open_flags;
  45. int i, fd = -1, ret;
  46. off_t offset;
  47. #ifdef VERBOSE
  48. fprintf(stdout, "%s: start %d/%d/%d/%d/%d: ", __FUNCTION__, write,
  49. buffered, sqthread,
  50. fixed, nonvec);
  51. #endif
  52. if (write)
  53. open_flags = O_WRONLY;
  54. else
  55. open_flags = O_RDONLY;
  56. if (!buffered)
  57. open_flags |= O_DIRECT;
  58. if (fixed) {
  59. ret = t_register_buffers(ring, vecs, BUFFERS);
  60. if (ret == T_SETUP_SKIP)
  61. return 0;
  62. if (ret != T_SETUP_OK) {
  63. fprintf(stderr, "buffer reg failed: %d\n", ret);
  64. goto err;
  65. }
  66. }
  67. fd = open(file, open_flags);
  68. if (fd < 0) {
  69. if (errno == EINVAL)
  70. return 0;
  71. perror("file open");
  72. goto err;
  73. }
  74. if (sqthread) {
  75. ret = io_uring_register_files(ring, &fd, 1);
  76. if (ret) {
  77. fprintf(stderr, "file reg failed: %d\n", ret);
  78. goto err;
  79. }
  80. }
  81. offset = 0;
  82. for (i = 0; i < BUFFERS; i++) {
  83. sqe = io_uring_get_sqe(ring);
  84. if (!sqe) {
  85. fprintf(stderr, "sqe get failed\n");
  86. goto err;
  87. }
  88. if (!seq)
  89. offset = BS * (rand() % BUFFERS);
  90. if (write) {
  91. int do_fixed = fixed;
  92. int use_fd = fd;
  93. if (sqthread)
  94. use_fd = 0;
  95. if (fixed && (i & 1))
  96. do_fixed = 0;
  97. if (do_fixed) {
  98. io_uring_prep_write_fixed(sqe, use_fd, vecs[i].iov_base,
  99. vecs[i].iov_len,
  100. offset, i);
  101. } else if (nonvec) {
  102. io_uring_prep_write(sqe, use_fd, vecs[i].iov_base,
  103. vecs[i].iov_len, offset);
  104. } else {
  105. io_uring_prep_writev(sqe, use_fd, &vecs[i], 1,
  106. offset);
  107. }
  108. } else {
  109. int do_fixed = fixed;
  110. int use_fd = fd;
  111. if (sqthread)
  112. use_fd = 0;
  113. if (fixed && (i & 1))
  114. do_fixed = 0;
  115. if (do_fixed) {
  116. io_uring_prep_read_fixed(sqe, use_fd, vecs[i].iov_base,
  117. vecs[i].iov_len,
  118. offset, i);
  119. } else if (nonvec) {
  120. io_uring_prep_read(sqe, use_fd, vecs[i].iov_base,
  121. vecs[i].iov_len, offset);
  122. } else {
  123. io_uring_prep_readv(sqe, use_fd, &vecs[i], 1,
  124. offset);
  125. }
  126. }
  127. sqe->user_data = i;
  128. if (sqthread)
  129. sqe->flags |= IOSQE_FIXED_FILE;
  130. if (buf_select) {
  131. if (nonvec)
  132. sqe->addr = 0;
  133. sqe->flags |= IOSQE_BUFFER_SELECT;
  134. sqe->buf_group = buf_select;
  135. }
  136. if (seq)
  137. offset += BS;
  138. }
  139. ret = io_uring_submit(ring);
  140. if (ret != BUFFERS) {
  141. fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
  142. goto err;
  143. }
  144. for (i = 0; i < BUFFERS; i++) {
  145. ret = io_uring_wait_cqe(ring, &cqe);
  146. if (ret) {
  147. fprintf(stderr, "wait_cqe=%d\n", ret);
  148. goto err;
  149. }
  150. if (cqe->res == -EINVAL && nonvec) {
  151. if (!warned) {
  152. fprintf(stdout, "Non-vectored IO not "
  153. "supported, skipping\n");
  154. warned = 1;
  155. no_read = 1;
  156. }
  157. } else if (exp_len == -1) {
  158. int iov_len = vecs[cqe->user_data].iov_len;
  159. if (cqe->res != iov_len) {
  160. fprintf(stderr, "cqe res %d, wanted %d\n",
  161. cqe->res, iov_len);
  162. goto err;
  163. }
  164. } else if (cqe->res != exp_len) {
  165. fprintf(stderr, "cqe res %d, wanted %d\n", cqe->res, exp_len);
  166. goto err;
  167. }
  168. if (buf_select && exp_len == BS) {
  169. int bid = cqe->flags >> 16;
  170. unsigned char *ptr = vecs[bid].iov_base;
  171. int j;
  172. for (j = 0; j < BS; j++) {
  173. if (ptr[j] == cqe->user_data)
  174. continue;
  175. fprintf(stderr, "Data mismatch! bid=%d, "
  176. "wanted=%d, got=%d\n", bid,
  177. (int)cqe->user_data, ptr[j]);
  178. return 1;
  179. }
  180. }
  181. io_uring_cqe_seen(ring, cqe);
  182. }
  183. if (fixed) {
  184. ret = io_uring_unregister_buffers(ring);
  185. if (ret) {
  186. fprintf(stderr, "buffer unreg failed: %d\n", ret);
  187. goto err;
  188. }
  189. }
  190. if (sqthread) {
  191. ret = io_uring_unregister_files(ring);
  192. if (ret) {
  193. fprintf(stderr, "file unreg failed: %d\n", ret);
  194. goto err;
  195. }
  196. }
  197. close(fd);
  198. #ifdef VERBOSE
  199. fprintf(stdout, "PASS\n");
  200. #endif
  201. return 0;
  202. err:
  203. #ifdef VERBOSE
  204. fprintf(stderr, "FAILED\n");
  205. #endif
  206. if (fd != -1)
  207. close(fd);
  208. return 1;
  209. }
  210. static int test_io(const char *file, int write, int buffered, int sqthread,
  211. int fixed, int nonvec, int exp_len)
  212. {
  213. struct io_uring ring;
  214. int ret, ring_flags = 0;
  215. if (sqthread)
  216. ring_flags = IORING_SETUP_SQPOLL;
  217. ret = t_create_ring(64, &ring, ring_flags);
  218. if (ret == T_SETUP_SKIP)
  219. return 0;
  220. if (ret != T_SETUP_OK) {
  221. fprintf(stderr, "ring create failed: %d\n", ret);
  222. return 1;
  223. }
  224. ret = __test_io(file, &ring, write, buffered, sqthread, fixed, nonvec,
  225. 0, 0, exp_len);
  226. io_uring_queue_exit(&ring);
  227. return ret;
  228. }
  229. static int read_poll_link(const char *file)
  230. {
  231. struct __kernel_timespec ts;
  232. struct io_uring_sqe *sqe;
  233. struct io_uring_cqe *cqe;
  234. struct io_uring ring;
  235. int i, fd, ret, fds[2];
  236. ret = io_uring_queue_init(8, &ring, 0);
  237. if (ret)
  238. return ret;
  239. fd = open(file, O_WRONLY);
  240. if (fd < 0) {
  241. perror("open");
  242. return 1;
  243. }
  244. if (pipe(fds)) {
  245. perror("pipe");
  246. return 1;
  247. }
  248. sqe = io_uring_get_sqe(&ring);
  249. io_uring_prep_writev(sqe, fd, &vecs[0], 1, 0);
  250. sqe->flags |= IOSQE_IO_LINK;
  251. sqe->user_data = 1;
  252. sqe = io_uring_get_sqe(&ring);
  253. io_uring_prep_poll_add(sqe, fds[0], POLLIN);
  254. sqe->flags |= IOSQE_IO_LINK;
  255. sqe->user_data = 2;
  256. ts.tv_sec = 1;
  257. ts.tv_nsec = 0;
  258. sqe = io_uring_get_sqe(&ring);
  259. io_uring_prep_link_timeout(sqe, &ts, 0);
  260. sqe->user_data = 3;
  261. ret = io_uring_submit(&ring);
  262. if (ret != 3) {
  263. fprintf(stderr, "submitted %d\n", ret);
  264. return 1;
  265. }
  266. for (i = 0; i < 3; i++) {
  267. ret = io_uring_wait_cqe(&ring, &cqe);
  268. if (ret) {
  269. fprintf(stderr, "wait_cqe=%d\n", ret);
  270. return 1;
  271. }
  272. io_uring_cqe_seen(&ring, cqe);
  273. }
  274. return 0;
  275. }
  276. static int has_nonvec_read(void)
  277. {
  278. struct io_uring_probe *p;
  279. struct io_uring ring;
  280. int ret;
  281. ret = io_uring_queue_init(1, &ring, 0);
  282. if (ret) {
  283. fprintf(stderr, "queue init failed: %d\n", ret);
  284. exit(ret);
  285. }
  286. p = t_calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
  287. ret = io_uring_register_probe(&ring, p, 256);
  288. /* if we don't have PROBE_REGISTER, we don't have OP_READ/WRITE */
  289. if (ret == -EINVAL) {
  290. out:
  291. io_uring_queue_exit(&ring);
  292. return 0;
  293. } else if (ret) {
  294. fprintf(stderr, "register_probe: %d\n", ret);
  295. goto out;
  296. }
  297. if (p->ops_len <= IORING_OP_READ)
  298. goto out;
  299. if (!(p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED))
  300. goto out;
  301. io_uring_queue_exit(&ring);
  302. return 1;
  303. }
  304. static int test_eventfd_read(void)
  305. {
  306. struct io_uring ring;
  307. int fd, ret;
  308. eventfd_t event;
  309. struct io_uring_sqe *sqe;
  310. struct io_uring_cqe *cqe;
  311. if (no_read)
  312. return 0;
  313. ret = io_uring_queue_init(8, &ring, 0);
  314. if (ret)
  315. return ret;
  316. fd = eventfd(1, 0);
  317. if (fd < 0) {
  318. perror("eventfd");
  319. return 1;
  320. }
  321. sqe = io_uring_get_sqe(&ring);
  322. io_uring_prep_read(sqe, fd, &event, sizeof(eventfd_t), 0);
  323. ret = io_uring_submit(&ring);
  324. if (ret != 1) {
  325. fprintf(stderr, "submitted %d\n", ret);
  326. return 1;
  327. }
  328. eventfd_write(fd, 1);
  329. ret = io_uring_wait_cqe(&ring, &cqe);
  330. if (ret) {
  331. fprintf(stderr, "wait_cqe=%d\n", ret);
  332. return 1;
  333. }
  334. if (cqe->res == -EINVAL) {
  335. fprintf(stdout, "eventfd IO not supported, skipping\n");
  336. } else if (cqe->res != sizeof(eventfd_t)) {
  337. fprintf(stderr, "cqe res %d, wanted %d\n", cqe->res,
  338. (int) sizeof(eventfd_t));
  339. return 1;
  340. }
  341. io_uring_cqe_seen(&ring, cqe);
  342. return 0;
  343. }
  344. static int test_buf_select_short(const char *filename, int nonvec)
  345. {
  346. struct io_uring_sqe *sqe;
  347. struct io_uring_cqe *cqe;
  348. struct io_uring ring;
  349. int ret, i, exp_len;
  350. if (no_buf_select)
  351. return 0;
  352. ret = io_uring_queue_init(64, &ring, 0);
  353. if (ret) {
  354. fprintf(stderr, "ring create failed: %d\n", ret);
  355. return 1;
  356. }
  357. exp_len = 0;
  358. for (i = 0; i < BUFFERS; i++) {
  359. sqe = io_uring_get_sqe(&ring);
  360. io_uring_prep_provide_buffers(sqe, vecs[i].iov_base,
  361. vecs[i].iov_len / 2, 1, 1, i);
  362. if (!exp_len)
  363. exp_len = vecs[i].iov_len / 2;
  364. }
  365. ret = io_uring_submit(&ring);
  366. if (ret != BUFFERS) {
  367. fprintf(stderr, "submit: %d\n", ret);
  368. return -1;
  369. }
  370. for (i = 0; i < BUFFERS; i++) {
  371. ret = io_uring_wait_cqe(&ring, &cqe);
  372. if (cqe->res < 0) {
  373. fprintf(stderr, "cqe->res=%d\n", cqe->res);
  374. return 1;
  375. }
  376. io_uring_cqe_seen(&ring, cqe);
  377. }
  378. ret = __test_io(filename, &ring, 0, 0, 0, 0, nonvec, 1, 1, exp_len);
  379. io_uring_queue_exit(&ring);
  380. return ret;
  381. }
  382. static int provide_buffers_iovec(struct io_uring *ring, int bgid)
  383. {
  384. struct io_uring_sqe *sqe;
  385. struct io_uring_cqe *cqe;
  386. int i, ret;
  387. for (i = 0; i < BUFFERS; i++) {
  388. sqe = io_uring_get_sqe(ring);
  389. io_uring_prep_provide_buffers(sqe, vecs[i].iov_base,
  390. vecs[i].iov_len, 1, bgid, i);
  391. }
  392. ret = io_uring_submit(ring);
  393. if (ret != BUFFERS) {
  394. fprintf(stderr, "submit: %d\n", ret);
  395. return -1;
  396. }
  397. for (i = 0; i < BUFFERS; i++) {
  398. ret = io_uring_wait_cqe(ring, &cqe);
  399. if (ret) {
  400. fprintf(stderr, "wait_cqe=%d\n", ret);
  401. return 1;
  402. }
  403. if (cqe->res < 0) {
  404. fprintf(stderr, "cqe->res=%d\n", cqe->res);
  405. return 1;
  406. }
  407. io_uring_cqe_seen(ring, cqe);
  408. }
  409. return 0;
  410. }
  411. static int test_buf_select_pipe(void)
  412. {
  413. struct io_uring_sqe *sqe;
  414. struct io_uring_cqe *cqe;
  415. struct io_uring ring;
  416. int ret, i;
  417. int fds[2];
  418. if (no_buf_select)
  419. return 0;
  420. ret = io_uring_queue_init(64, &ring, 0);
  421. if (ret) {
  422. fprintf(stderr, "ring create failed: %d\n", ret);
  423. return 1;
  424. }
  425. ret = provide_buffers_iovec(&ring, 0);
  426. if (ret) {
  427. fprintf(stderr, "provide buffers failed: %d\n", ret);
  428. return 1;
  429. }
  430. ret = pipe(fds);
  431. if (ret) {
  432. fprintf(stderr, "pipe failed: %d\n", ret);
  433. return 1;
  434. }
  435. for (i = 0; i < 5; i++) {
  436. sqe = io_uring_get_sqe(&ring);
  437. io_uring_prep_read(sqe, fds[0], NULL, 1 /* max read 1 per go */, -1);
  438. sqe->flags |= IOSQE_BUFFER_SELECT;
  439. sqe->buf_group = 0;
  440. }
  441. io_uring_submit(&ring);
  442. ret = write(fds[1], "01234", 5);
  443. if (ret != 5) {
  444. fprintf(stderr, "pipe write failed %d\n", ret);
  445. return 1;
  446. }
  447. for (i = 0; i < 5; i++) {
  448. const char *buff;
  449. if (io_uring_wait_cqe(&ring, &cqe)) {
  450. fprintf(stderr, "bad wait %d\n", i);
  451. return 1;
  452. }
  453. if (cqe->res != 1) {
  454. fprintf(stderr, "expected read %d\n", cqe->res);
  455. return 1;
  456. }
  457. if (!(cqe->flags & IORING_CQE_F_BUFFER)) {
  458. fprintf(stderr, "no buffer %d\n", cqe->res);
  459. return 1;
  460. }
  461. buff = vecs[cqe->flags >> 16].iov_base;
  462. if (*buff != '0' + i) {
  463. fprintf(stderr, "%d: expected %c, got %c\n", i, '0' + i, *buff);
  464. return 1;
  465. }
  466. io_uring_cqe_seen(&ring, cqe);
  467. }
  468. close(fds[0]);
  469. close(fds[1]);
  470. io_uring_queue_exit(&ring);
  471. return 0;
  472. }
  473. static int test_buf_select(const char *filename, int nonvec)
  474. {
  475. struct io_uring_probe *p;
  476. struct io_uring ring;
  477. int ret, i;
  478. ret = io_uring_queue_init(64, &ring, 0);
  479. if (ret) {
  480. fprintf(stderr, "ring create failed: %d\n", ret);
  481. return 1;
  482. }
  483. p = io_uring_get_probe_ring(&ring);
  484. if (!p || !io_uring_opcode_supported(p, IORING_OP_PROVIDE_BUFFERS)) {
  485. no_buf_select = 1;
  486. fprintf(stdout, "Buffer select not supported, skipping\n");
  487. return 0;
  488. }
  489. io_uring_free_probe(p);
  490. /*
  491. * Write out data with known pattern
  492. */
  493. for (i = 0; i < BUFFERS; i++)
  494. memset(vecs[i].iov_base, i, vecs[i].iov_len);
  495. ret = __test_io(filename, &ring, 1, 0, 0, 0, 0, 0, 1, BS);
  496. if (ret) {
  497. fprintf(stderr, "failed writing data\n");
  498. return 1;
  499. }
  500. for (i = 0; i < BUFFERS; i++)
  501. memset(vecs[i].iov_base, 0x55, vecs[i].iov_len);
  502. ret = provide_buffers_iovec(&ring, 1);
  503. if (ret)
  504. return ret;
  505. ret = __test_io(filename, &ring, 0, 0, 0, 0, nonvec, 1, 1, BS);
  506. io_uring_queue_exit(&ring);
  507. return ret;
  508. }
  509. static int test_rem_buf(int batch, int sqe_flags)
  510. {
  511. struct io_uring_sqe *sqe;
  512. struct io_uring_cqe *cqe;
  513. struct io_uring ring;
  514. int left, ret, nr = 0;
  515. int bgid = 1;
  516. if (no_buf_select)
  517. return 0;
  518. ret = io_uring_queue_init(64, &ring, 0);
  519. if (ret) {
  520. fprintf(stderr, "ring create failed: %d\n", ret);
  521. return 1;
  522. }
  523. ret = provide_buffers_iovec(&ring, bgid);
  524. if (ret)
  525. return ret;
  526. left = BUFFERS;
  527. while (left) {
  528. int to_rem = (left < batch) ? left : batch;
  529. left -= to_rem;
  530. sqe = io_uring_get_sqe(&ring);
  531. io_uring_prep_remove_buffers(sqe, to_rem, bgid);
  532. sqe->user_data = to_rem;
  533. sqe->flags |= sqe_flags;
  534. ++nr;
  535. }
  536. ret = io_uring_submit(&ring);
  537. if (ret != nr) {
  538. fprintf(stderr, "submit: %d\n", ret);
  539. return -1;
  540. }
  541. for (; nr > 0; nr--) {
  542. ret = io_uring_wait_cqe(&ring, &cqe);
  543. if (ret) {
  544. fprintf(stderr, "wait_cqe=%d\n", ret);
  545. return 1;
  546. }
  547. if (cqe->res != cqe->user_data) {
  548. fprintf(stderr, "cqe->res=%d\n", cqe->res);
  549. return 1;
  550. }
  551. io_uring_cqe_seen(&ring, cqe);
  552. }
  553. io_uring_queue_exit(&ring);
  554. return ret;
  555. }
  556. static int test_rem_buf_single(int to_rem)
  557. {
  558. struct io_uring_sqe *sqe;
  559. struct io_uring_cqe *cqe;
  560. struct io_uring ring;
  561. int ret, expected;
  562. int bgid = 1;
  563. if (no_buf_select)
  564. return 0;
  565. ret = io_uring_queue_init(64, &ring, 0);
  566. if (ret) {
  567. fprintf(stderr, "ring create failed: %d\n", ret);
  568. return 1;
  569. }
  570. ret = provide_buffers_iovec(&ring, bgid);
  571. if (ret)
  572. return ret;
  573. expected = (to_rem > BUFFERS) ? BUFFERS : to_rem;
  574. sqe = io_uring_get_sqe(&ring);
  575. io_uring_prep_remove_buffers(sqe, to_rem, bgid);
  576. ret = io_uring_submit(&ring);
  577. if (ret != 1) {
  578. fprintf(stderr, "submit: %d\n", ret);
  579. return -1;
  580. }
  581. ret = io_uring_wait_cqe(&ring, &cqe);
  582. if (ret) {
  583. fprintf(stderr, "wait_cqe=%d\n", ret);
  584. return 1;
  585. }
  586. if (cqe->res != expected) {
  587. fprintf(stderr, "cqe->res=%d, expected=%d\n", cqe->res, expected);
  588. return 1;
  589. }
  590. io_uring_cqe_seen(&ring, cqe);
  591. io_uring_queue_exit(&ring);
  592. return ret;
  593. }
  594. static int test_io_link(const char *file)
  595. {
  596. const int nr_links = 100;
  597. const int link_len = 100;
  598. const int nr_sqes = nr_links * link_len;
  599. struct io_uring_sqe *sqe;
  600. struct io_uring_cqe *cqe;
  601. struct io_uring ring;
  602. int i, j, fd, ret;
  603. fd = open(file, O_WRONLY);
  604. if (fd < 0) {
  605. perror("file open");
  606. goto err;
  607. }
  608. ret = io_uring_queue_init(nr_sqes, &ring, 0);
  609. if (ret) {
  610. fprintf(stderr, "ring create failed: %d\n", ret);
  611. goto err;
  612. }
  613. for (i = 0; i < nr_links; ++i) {
  614. for (j = 0; j < link_len; ++j) {
  615. sqe = io_uring_get_sqe(&ring);
  616. if (!sqe) {
  617. fprintf(stderr, "sqe get failed\n");
  618. goto err;
  619. }
  620. io_uring_prep_writev(sqe, fd, &vecs[0], 1, 0);
  621. sqe->flags |= IOSQE_ASYNC;
  622. if (j != link_len - 1)
  623. sqe->flags |= IOSQE_IO_LINK;
  624. }
  625. }
  626. ret = io_uring_submit(&ring);
  627. if (ret != nr_sqes) {
  628. ret = io_uring_peek_cqe(&ring, &cqe);
  629. if (!ret && cqe->res == -EINVAL) {
  630. fprintf(stdout, "IOSQE_ASYNC not supported, skipped\n");
  631. goto out;
  632. }
  633. fprintf(stderr, "submit got %d, wanted %d\n", ret, nr_sqes);
  634. goto err;
  635. }
  636. for (i = 0; i < nr_sqes; i++) {
  637. ret = io_uring_wait_cqe(&ring, &cqe);
  638. if (ret) {
  639. fprintf(stderr, "wait_cqe=%d\n", ret);
  640. goto err;
  641. }
  642. if (cqe->res == -EINVAL) {
  643. if (!warned) {
  644. fprintf(stdout, "Non-vectored IO not "
  645. "supported, skipping\n");
  646. warned = 1;
  647. no_read = 1;
  648. }
  649. } else if (cqe->res != BS) {
  650. fprintf(stderr, "cqe res %d, wanted %d\n", cqe->res, BS);
  651. goto err;
  652. }
  653. io_uring_cqe_seen(&ring, cqe);
  654. }
  655. out:
  656. io_uring_queue_exit(&ring);
  657. close(fd);
  658. return 0;
  659. err:
  660. if (fd != -1)
  661. close(fd);
  662. return 1;
  663. }
  664. static int test_write_efbig(void)
  665. {
  666. struct io_uring_sqe *sqe;
  667. struct io_uring_cqe *cqe;
  668. struct io_uring ring;
  669. struct rlimit rlim, old_rlim;
  670. int i, fd, ret;
  671. loff_t off;
  672. if (geteuid()) {
  673. fprintf(stdout, "Not root, skipping %s\n", __FUNCTION__);
  674. return 0;
  675. }
  676. if (getrlimit(RLIMIT_FSIZE, &old_rlim) < 0) {
  677. perror("getrlimit");
  678. return 1;
  679. }
  680. rlim = old_rlim;
  681. rlim.rlim_cur = 128 * 1024;
  682. rlim.rlim_max = 128 * 1024;
  683. if (setrlimit(RLIMIT_FSIZE, &rlim) < 0) {
  684. perror("setrlimit");
  685. return 1;
  686. }
  687. fd = open(".efbig", O_WRONLY | O_CREAT, 0644);
  688. if (fd < 0) {
  689. perror("file open");
  690. goto err;
  691. }
  692. unlink(".efbig");
  693. ret = io_uring_queue_init(32, &ring, 0);
  694. if (ret) {
  695. fprintf(stderr, "ring create failed: %d\n", ret);
  696. goto err;
  697. }
  698. off = 0;
  699. for (i = 0; i < 32; i++) {
  700. sqe = io_uring_get_sqe(&ring);
  701. if (!sqe) {
  702. fprintf(stderr, "sqe get failed\n");
  703. goto err;
  704. }
  705. io_uring_prep_writev(sqe, fd, &vecs[i], 1, off);
  706. io_uring_sqe_set_data64(sqe, i);
  707. off += BS;
  708. }
  709. ret = io_uring_submit(&ring);
  710. if (ret != 32) {
  711. fprintf(stderr, "submit got %d, wanted %d\n", ret, 32);
  712. goto err;
  713. }
  714. for (i = 0; i < 32; i++) {
  715. ret = io_uring_wait_cqe(&ring, &cqe);
  716. if (ret) {
  717. fprintf(stderr, "wait_cqe=%d\n", ret);
  718. goto err;
  719. }
  720. if (cqe->user_data < 16) {
  721. if (cqe->res != BS) {
  722. fprintf(stderr, "bad write: %d\n", cqe->res);
  723. goto err;
  724. }
  725. } else {
  726. if (cqe->res != -EFBIG) {
  727. fprintf(stderr, "Expected -EFBIG: %d\n", cqe->res);
  728. goto err;
  729. }
  730. }
  731. io_uring_cqe_seen(&ring, cqe);
  732. }
  733. io_uring_queue_exit(&ring);
  734. close(fd);
  735. unlink(".efbig");
  736. if (setrlimit(RLIMIT_FSIZE, &old_rlim) < 0) {
  737. perror("setrlimit");
  738. return 1;
  739. }
  740. return 0;
  741. err:
  742. if (fd != -1)
  743. close(fd);
  744. return 1;
  745. }
  746. int main(int argc, char *argv[])
  747. {
  748. int i, ret, nr;
  749. char buf[256];
  750. char *fname;
  751. if (argc > 1) {
  752. fname = argv[1];
  753. } else {
  754. srand((unsigned)time(NULL));
  755. snprintf(buf, sizeof(buf), ".basic-rw-%u-%u",
  756. (unsigned)rand(), (unsigned)getpid());
  757. fname = buf;
  758. t_create_file(fname, FILE_SIZE);
  759. }
  760. signal(SIGXFSZ, SIG_IGN);
  761. vecs = t_create_buffers(BUFFERS, BS);
  762. /* if we don't have nonvec read, skip testing that */
  763. nr = has_nonvec_read() ? 32 : 16;
  764. for (i = 0; i < nr; i++) {
  765. int write = (i & 1) != 0;
  766. int buffered = (i & 2) != 0;
  767. int sqthread = (i & 4) != 0;
  768. int fixed = (i & 8) != 0;
  769. int nonvec = (i & 16) != 0;
  770. ret = test_io(fname, write, buffered, sqthread, fixed, nonvec,
  771. BS);
  772. if (ret) {
  773. fprintf(stderr, "test_io failed %d/%d/%d/%d/%d\n",
  774. write, buffered, sqthread, fixed, nonvec);
  775. goto err;
  776. }
  777. }
  778. ret = test_buf_select(fname, 1);
  779. if (ret) {
  780. fprintf(stderr, "test_buf_select nonvec failed\n");
  781. goto err;
  782. }
  783. ret = test_buf_select(fname, 0);
  784. if (ret) {
  785. fprintf(stderr, "test_buf_select vec failed\n");
  786. goto err;
  787. }
  788. ret = test_buf_select_short(fname, 1);
  789. if (ret) {
  790. fprintf(stderr, "test_buf_select_short nonvec failed\n");
  791. goto err;
  792. }
  793. ret = test_buf_select_short(fname, 0);
  794. if (ret) {
  795. fprintf(stderr, "test_buf_select_short vec failed\n");
  796. goto err;
  797. }
  798. ret = test_buf_select_pipe();
  799. if (ret) {
  800. fprintf(stderr, "test_buf_select_pipe failed\n");
  801. goto err;
  802. }
  803. ret = test_eventfd_read();
  804. if (ret) {
  805. fprintf(stderr, "test_eventfd_read failed\n");
  806. goto err;
  807. }
  808. ret = read_poll_link(fname);
  809. if (ret) {
  810. fprintf(stderr, "read_poll_link failed\n");
  811. goto err;
  812. }
  813. ret = test_io_link(fname);
  814. if (ret) {
  815. fprintf(stderr, "test_io_link failed\n");
  816. goto err;
  817. }
  818. ret = test_write_efbig();
  819. if (ret) {
  820. fprintf(stderr, "test_write_efbig failed\n");
  821. goto err;
  822. }
  823. ret = test_rem_buf(1, 0);
  824. if (ret) {
  825. fprintf(stderr, "test_rem_buf by 1 failed\n");
  826. goto err;
  827. }
  828. ret = test_rem_buf(10, 0);
  829. if (ret) {
  830. fprintf(stderr, "test_rem_buf by 10 failed\n");
  831. goto err;
  832. }
  833. ret = test_rem_buf(2, IOSQE_IO_LINK);
  834. if (ret) {
  835. fprintf(stderr, "test_rem_buf link failed\n");
  836. goto err;
  837. }
  838. ret = test_rem_buf(2, IOSQE_ASYNC);
  839. if (ret) {
  840. fprintf(stderr, "test_rem_buf async failed\n");
  841. goto err;
  842. }
  843. srand((unsigned)time(NULL));
  844. if (create_nonaligned_buffers()) {
  845. fprintf(stderr, "file creation failed\n");
  846. goto err;
  847. }
  848. /* test fixed bufs with non-aligned len/offset */
  849. for (i = 0; i < nr; i++) {
  850. int write = (i & 1) != 0;
  851. int buffered = (i & 2) != 0;
  852. int sqthread = (i & 4) != 0;
  853. int fixed = (i & 8) != 0;
  854. int nonvec = (i & 16) != 0;
  855. /* direct IO requires alignment, skip it */
  856. if (!buffered || !fixed || nonvec)
  857. continue;
  858. ret = test_io(fname, write, buffered, sqthread, fixed, nonvec,
  859. -1);
  860. if (ret) {
  861. fprintf(stderr, "test_io failed %d/%d/%d/%d/%d\n",
  862. write, buffered, sqthread, fixed, nonvec);
  863. goto err;
  864. }
  865. }
  866. ret = test_rem_buf_single(BUFFERS + 1);
  867. if (ret) {
  868. fprintf(stderr, "test_rem_buf_single(BUFFERS + 1) failed\n");
  869. goto err;
  870. }
  871. if (fname != argv[1])
  872. unlink(fname);
  873. return 0;
  874. err:
  875. if (fname != argv[1])
  876. unlink(fname);
  877. return 1;
  878. }