accept.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903
  1. #include "../config-host.h"
  2. /* SPDX-License-Identifier: MIT */
  3. /*
  4. * Check that IORING_OP_ACCEPT works, and send some data across to verify we
  5. * didn't get a junk fd.
  6. */
  7. #include <stdio.h>
  8. #include <stdlib.h>
  9. #include <stdint.h>
  10. #include <assert.h>
  11. #include <limits.h>
  12. #include <errno.h>
  13. #include <fcntl.h>
  14. #include <unistd.h>
  15. #include <sys/socket.h>
  16. #include <sys/time.h>
  17. #include <sys/resource.h>
  18. #include <sys/un.h>
  19. #include <netinet/tcp.h>
  20. #include <netinet/in.h>
  21. #include <arpa/inet.h>
  22. #include "helpers.h"
  23. #include "liburing.h"
  24. #define MAX_FDS 32
  25. #define NOP_USER_DATA (1LLU << 50)
  26. #define INITIAL_USER_DATA 1000
  27. static int no_accept;
  28. static int no_accept_multi;
  29. struct data {
  30. char buf[128];
  31. struct iovec iov;
  32. };
  33. struct accept_test_args {
  34. int accept_should_error;
  35. bool fixed;
  36. bool nonblock;
  37. bool queue_accept_before_connect;
  38. bool multishot;
  39. int extra_loops;
  40. bool overflow;
  41. };
  42. static void close_fds(int fds[], int nr)
  43. {
  44. int i;
  45. for (i = 0; i < nr; i++)
  46. close(fds[i]);
  47. }
  48. static void close_sock_fds(int s_fd[], int c_fd[], int nr, bool fixed)
  49. {
  50. if (!fixed)
  51. close_fds(s_fd, nr);
  52. close_fds(c_fd, nr);
  53. }
  54. static void queue_send(struct io_uring *ring, int fd)
  55. {
  56. struct io_uring_sqe *sqe;
  57. struct data *d;
  58. d = t_malloc(sizeof(*d));
  59. d->iov.iov_base = d->buf;
  60. d->iov.iov_len = sizeof(d->buf);
  61. sqe = io_uring_get_sqe(ring);
  62. io_uring_prep_writev(sqe, fd, &d->iov, 1, 0);
  63. sqe->user_data = 1;
  64. }
  65. static void queue_recv(struct io_uring *ring, int fd, bool fixed)
  66. {
  67. struct io_uring_sqe *sqe;
  68. struct data *d;
  69. d = t_malloc(sizeof(*d));
  70. d->iov.iov_base = d->buf;
  71. d->iov.iov_len = sizeof(d->buf);
  72. sqe = io_uring_get_sqe(ring);
  73. io_uring_prep_readv(sqe, fd, &d->iov, 1, 0);
  74. sqe->user_data = 2;
  75. if (fixed)
  76. sqe->flags |= IOSQE_FIXED_FILE;
  77. }
  78. static void queue_accept_multishot(struct io_uring *ring, int fd,
  79. int idx, bool fixed)
  80. {
  81. struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
  82. int ret;
  83. if (fixed)
  84. io_uring_prep_multishot_accept_direct(sqe, fd,
  85. NULL, NULL,
  86. 0);
  87. else
  88. io_uring_prep_multishot_accept(sqe, fd, NULL, NULL, 0);
  89. io_uring_sqe_set_data64(sqe, idx);
  90. ret = io_uring_submit(ring);
  91. assert(ret != -1);
  92. }
  93. static void queue_accept_conn(struct io_uring *ring, int fd,
  94. struct accept_test_args args)
  95. {
  96. struct io_uring_sqe *sqe;
  97. int ret;
  98. int fixed_idx = args.fixed ? 0 : -1;
  99. int count = 1 + args.extra_loops;
  100. if (args.multishot) {
  101. queue_accept_multishot(ring, fd, INITIAL_USER_DATA, args.fixed);
  102. return;
  103. }
  104. while (count--) {
  105. sqe = io_uring_get_sqe(ring);
  106. if (fixed_idx < 0) {
  107. io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
  108. } else {
  109. io_uring_prep_accept_direct(sqe, fd, NULL, NULL,
  110. 0, fixed_idx);
  111. }
  112. ret = io_uring_submit(ring);
  113. assert(ret != -1);
  114. }
  115. }
  116. static int accept_conn(struct io_uring *ring, int fixed_idx, int *multishot, int fd)
  117. {
  118. struct io_uring_cqe *pcqe;
  119. struct io_uring_cqe cqe;
  120. int ret;
  121. do {
  122. ret = io_uring_wait_cqe(ring, &pcqe);
  123. assert(!ret);
  124. cqe = *pcqe;
  125. io_uring_cqe_seen(ring, pcqe);
  126. } while (cqe.user_data == NOP_USER_DATA);
  127. if (*multishot) {
  128. if (!(cqe.flags & IORING_CQE_F_MORE)) {
  129. (*multishot)++;
  130. queue_accept_multishot(ring, fd, *multishot, fixed_idx == 0);
  131. } else {
  132. if (cqe.user_data != *multishot) {
  133. fprintf(stderr, "received multishot after told done!\n");
  134. return -ECANCELED;
  135. }
  136. }
  137. }
  138. ret = cqe.res;
  139. if (fixed_idx >= 0) {
  140. if (ret > 0) {
  141. if (!multishot) {
  142. close(ret);
  143. return -EINVAL;
  144. }
  145. } else if (!ret) {
  146. ret = fixed_idx;
  147. }
  148. }
  149. return ret;
  150. }
  151. static int start_accept_listen(struct sockaddr_in *addr, int port_off,
  152. int extra_flags)
  153. {
  154. int fd, ret;
  155. fd = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC | extra_flags,
  156. IPPROTO_TCP);
  157. int32_t val = 1;
  158. ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
  159. assert(ret != -1);
  160. ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
  161. assert(ret != -1);
  162. struct sockaddr_in laddr;
  163. if (!addr)
  164. addr = &laddr;
  165. addr->sin_family = AF_INET;
  166. addr->sin_addr.s_addr = inet_addr("127.0.0.1");
  167. assert(!t_bind_ephemeral_port(fd, addr));
  168. ret = listen(fd, 128);
  169. assert(ret != -1);
  170. return fd;
  171. }
  172. static int set_client_fd(struct sockaddr_in *addr)
  173. {
  174. int32_t val;
  175. int fd, ret;
  176. fd = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
  177. val = 1;
  178. ret = setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val));
  179. assert(ret != -1);
  180. int32_t flags = fcntl(fd, F_GETFL, 0);
  181. assert(flags != -1);
  182. flags |= O_NONBLOCK;
  183. ret = fcntl(fd, F_SETFL, flags);
  184. assert(ret != -1);
  185. ret = connect(fd, (struct sockaddr *)addr, sizeof(*addr));
  186. assert(ret == -1);
  187. flags = fcntl(fd, F_GETFL, 0);
  188. assert(flags != -1);
  189. flags &= ~O_NONBLOCK;
  190. ret = fcntl(fd, F_SETFL, flags);
  191. assert(ret != -1);
  192. return fd;
  193. }
  194. static void cause_overflow(struct io_uring *ring)
  195. {
  196. int i, ret;
  197. for (i = 0; i < ring->cq.ring_entries; i++) {
  198. struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
  199. io_uring_prep_nop(sqe);
  200. io_uring_sqe_set_data64(sqe, NOP_USER_DATA);
  201. ret = io_uring_submit(ring);
  202. assert(ret != -1);
  203. }
  204. }
  205. static void clear_overflow(struct io_uring *ring)
  206. {
  207. struct io_uring_cqe *cqe;
  208. while (!io_uring_peek_cqe(ring, &cqe)) {
  209. if (cqe->user_data != NOP_USER_DATA)
  210. break;
  211. io_uring_cqe_seen(ring, cqe);
  212. }
  213. }
  214. static int test_loop(struct io_uring *ring,
  215. struct accept_test_args args,
  216. int recv_s0,
  217. struct sockaddr_in *addr)
  218. {
  219. struct io_uring_cqe *cqe;
  220. uint32_t head, count = 0;
  221. int i, ret, s_fd[MAX_FDS], c_fd[MAX_FDS], done = 0;
  222. bool fixed = args.fixed;
  223. bool multishot = args.multishot;
  224. uint32_t multishot_mask = 0;
  225. int nr_fds = multishot ? MAX_FDS : 1;
  226. int multishot_idx = multishot ? INITIAL_USER_DATA : 0;
  227. int err_ret = T_EXIT_FAIL;
  228. if (args.overflow)
  229. cause_overflow(ring);
  230. for (i = 0; i < nr_fds; i++) {
  231. c_fd[i] = set_client_fd(addr);
  232. if (args.overflow && i == nr_fds / 2)
  233. clear_overflow(ring);
  234. }
  235. if (!args.queue_accept_before_connect)
  236. queue_accept_conn(ring, recv_s0, args);
  237. for (i = 0; i < nr_fds; i++) {
  238. s_fd[i] = accept_conn(ring, fixed ? 0 : -1, &multishot_idx, recv_s0);
  239. if (s_fd[i] == -EINVAL) {
  240. if (args.accept_should_error)
  241. goto out;
  242. fprintf(stdout,
  243. "%s %s Accept not supported, skipping\n",
  244. fixed ? "Fixed" : "",
  245. multishot ? "Multishot" : "");
  246. if (multishot)
  247. no_accept_multi = 1;
  248. else
  249. no_accept = 1;
  250. ret = T_EXIT_SKIP;
  251. goto out;
  252. } else if (s_fd[i] < 0) {
  253. if (args.accept_should_error &&
  254. (s_fd[i] == -EBADF || s_fd[i] == -EINVAL))
  255. goto out;
  256. fprintf(stderr, "%s %s Accept[%d] got %d\n",
  257. fixed ? "Fixed" : "",
  258. multishot ? "Multishot" : "",
  259. i, s_fd[i]);
  260. goto err;
  261. }
  262. if (multishot && fixed) {
  263. if (s_fd[i] >= MAX_FDS) {
  264. fprintf(stderr,
  265. "Fixed Multishot Accept[%d] got outbound index: %d\n",
  266. i, s_fd[i]);
  267. goto err;
  268. }
  269. /*
  270. * for fixed multishot accept test, the file slots
  271. * allocated are [0, 32), this means we finally end up
  272. * with each bit of a u32 being 1.
  273. */
  274. multishot_mask |= (1U << s_fd[i]);
  275. }
  276. }
  277. if (multishot) {
  278. if (fixed && (~multishot_mask != 0U)) {
  279. fprintf(stderr, "Fixed Multishot Accept misses events\n");
  280. goto err;
  281. }
  282. goto out;
  283. }
  284. queue_send(ring, c_fd[0]);
  285. queue_recv(ring, s_fd[0], fixed);
  286. ret = io_uring_submit_and_wait(ring, 2);
  287. assert(ret != -1);
  288. while (count < 2) {
  289. io_uring_for_each_cqe(ring, head, cqe) {
  290. if (cqe->res < 0) {
  291. fprintf(stderr, "Got cqe res %d, user_data %i\n",
  292. cqe->res, (int)cqe->user_data);
  293. done = 1;
  294. break;
  295. }
  296. assert(cqe->res == 128);
  297. count++;
  298. }
  299. assert(count <= 2);
  300. io_uring_cq_advance(ring, count);
  301. if (done)
  302. goto err;
  303. }
  304. out:
  305. close_sock_fds(s_fd, c_fd, nr_fds, fixed);
  306. return T_EXIT_PASS;
  307. err:
  308. close_sock_fds(s_fd, c_fd, nr_fds, fixed);
  309. return err_ret;
  310. }
  311. static int test(struct io_uring *ring, struct accept_test_args args)
  312. {
  313. struct sockaddr_in addr;
  314. int ret = 0;
  315. int loop;
  316. int32_t recv_s0 = start_accept_listen(&addr, 0,
  317. args.nonblock ? SOCK_NONBLOCK : 0);
  318. if (args.queue_accept_before_connect)
  319. queue_accept_conn(ring, recv_s0, args);
  320. for (loop = 0; loop < 1 + args.extra_loops; loop++) {
  321. ret = test_loop(ring, args, recv_s0, &addr);
  322. if (ret)
  323. break;
  324. }
  325. close(recv_s0);
  326. return ret;
  327. }
  328. static void sig_alrm(int sig)
  329. {
  330. exit(0);
  331. }
  332. static int test_accept_pending_on_exit(void)
  333. {
  334. struct io_uring m_io_uring;
  335. struct io_uring_cqe *cqe;
  336. struct io_uring_sqe *sqe;
  337. int fd, ret;
  338. ret = io_uring_queue_init(32, &m_io_uring, 0);
  339. assert(ret >= 0);
  340. fd = start_accept_listen(NULL, 0, 0);
  341. sqe = io_uring_get_sqe(&m_io_uring);
  342. io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
  343. ret = io_uring_submit(&m_io_uring);
  344. assert(ret != -1);
  345. signal(SIGALRM, sig_alrm);
  346. alarm(1);
  347. ret = io_uring_wait_cqe(&m_io_uring, &cqe);
  348. assert(!ret);
  349. io_uring_cqe_seen(&m_io_uring, cqe);
  350. io_uring_queue_exit(&m_io_uring);
  351. return 0;
  352. }
  353. struct test_accept_many_args {
  354. unsigned int usecs;
  355. bool nonblock;
  356. bool single_sock;
  357. bool close_fds;
  358. };
  359. /*
  360. * Test issue many accepts and see if we handle cancellation on exit
  361. */
  362. static int test_accept_many(struct test_accept_many_args args)
  363. {
  364. struct io_uring m_io_uring;
  365. struct io_uring_cqe *cqe;
  366. struct io_uring_sqe *sqe;
  367. unsigned long cur_lim;
  368. struct rlimit rlim;
  369. int *fds, i, ret;
  370. unsigned int nr = 128;
  371. int nr_socks = args.single_sock ? 1 : nr;
  372. if (getrlimit(RLIMIT_NPROC, &rlim) < 0) {
  373. perror("getrlimit");
  374. return 1;
  375. }
  376. cur_lim = rlim.rlim_cur;
  377. rlim.rlim_cur = nr / 4;
  378. if (setrlimit(RLIMIT_NPROC, &rlim) < 0) {
  379. perror("setrlimit");
  380. return 1;
  381. }
  382. ret = io_uring_queue_init(2 * nr, &m_io_uring, 0);
  383. assert(ret >= 0);
  384. fds = t_calloc(nr_socks, sizeof(int));
  385. for (i = 0; i < nr_socks; i++)
  386. fds[i] = start_accept_listen(NULL, i,
  387. args.nonblock ? SOCK_NONBLOCK : 0);
  388. for (i = 0; i < nr; i++) {
  389. int sock_idx = args.single_sock ? 0 : i;
  390. sqe = io_uring_get_sqe(&m_io_uring);
  391. io_uring_prep_accept(sqe, fds[sock_idx], NULL, NULL, 0);
  392. sqe->user_data = 1 + i;
  393. ret = io_uring_submit(&m_io_uring);
  394. assert(ret == 1);
  395. }
  396. if (args.usecs)
  397. usleep(args.usecs);
  398. if (args.close_fds)
  399. for (i = 0; i < nr_socks; i++)
  400. close(fds[i]);
  401. for (i = 0; i < nr; i++) {
  402. if (io_uring_peek_cqe(&m_io_uring, &cqe))
  403. break;
  404. if (cqe->res != -ECANCELED) {
  405. fprintf(stderr, "Expected cqe to be cancelled %d\n", cqe->res);
  406. ret = 1;
  407. goto out;
  408. }
  409. io_uring_cqe_seen(&m_io_uring, cqe);
  410. }
  411. ret = 0;
  412. out:
  413. rlim.rlim_cur = cur_lim;
  414. if (setrlimit(RLIMIT_NPROC, &rlim) < 0) {
  415. perror("setrlimit");
  416. return 1;
  417. }
  418. free(fds);
  419. io_uring_queue_exit(&m_io_uring);
  420. return ret;
  421. }
  422. static int test_accept_cancel(unsigned usecs, unsigned int nr, bool multishot)
  423. {
  424. struct io_uring m_io_uring;
  425. struct io_uring_cqe *cqe;
  426. struct io_uring_sqe *sqe;
  427. int fd, i, ret;
  428. if (multishot && no_accept_multi)
  429. return T_EXIT_SKIP;
  430. ret = io_uring_queue_init(32, &m_io_uring, 0);
  431. assert(ret >= 0);
  432. fd = start_accept_listen(NULL, 0, 0);
  433. for (i = 1; i <= nr; i++) {
  434. sqe = io_uring_get_sqe(&m_io_uring);
  435. if (!multishot)
  436. io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
  437. else
  438. io_uring_prep_multishot_accept(sqe, fd, NULL, NULL, 0);
  439. sqe->user_data = i;
  440. ret = io_uring_submit(&m_io_uring);
  441. assert(ret == 1);
  442. }
  443. if (usecs)
  444. usleep(usecs);
  445. for (i = 1; i <= nr; i++) {
  446. sqe = io_uring_get_sqe(&m_io_uring);
  447. io_uring_prep_cancel64(sqe, i, 0);
  448. sqe->user_data = nr + i;
  449. ret = io_uring_submit(&m_io_uring);
  450. assert(ret == 1);
  451. }
  452. for (i = 0; i < nr * 2; i++) {
  453. ret = io_uring_wait_cqe(&m_io_uring, &cqe);
  454. assert(!ret);
  455. /*
  456. * Two cases here:
  457. *
  458. * 1) We cancel the accept4() before it got started, we should
  459. * get '0' for the cancel request and '-ECANCELED' for the
  460. * accept request.
  461. * 2) We cancel the accept4() after it's already running, we
  462. * should get '-EALREADY' for the cancel request and
  463. * '-EINTR' for the accept request.
  464. */
  465. if (cqe->user_data == 0) {
  466. fprintf(stderr, "unexpected 0 user data\n");
  467. goto err;
  468. } else if (cqe->user_data <= nr) {
  469. if (cqe->res != -EINTR && cqe->res != -ECANCELED) {
  470. fprintf(stderr, "Cancelled accept got %d\n", cqe->res);
  471. goto err;
  472. }
  473. } else if (cqe->user_data <= nr * 2) {
  474. if (cqe->res != -EALREADY && cqe->res != 0) {
  475. fprintf(stderr, "Cancel got %d\n", cqe->res);
  476. goto err;
  477. }
  478. }
  479. io_uring_cqe_seen(&m_io_uring, cqe);
  480. }
  481. io_uring_queue_exit(&m_io_uring);
  482. close(fd);
  483. return 0;
  484. err:
  485. io_uring_queue_exit(&m_io_uring);
  486. close(fd);
  487. return 1;
  488. }
  489. static int test_accept(int count, bool before)
  490. {
  491. struct io_uring m_io_uring;
  492. int ret;
  493. struct accept_test_args args = {
  494. .queue_accept_before_connect = before,
  495. .extra_loops = count - 1
  496. };
  497. ret = io_uring_queue_init(32, &m_io_uring, 0);
  498. assert(ret >= 0);
  499. ret = test(&m_io_uring, args);
  500. io_uring_queue_exit(&m_io_uring);
  501. return ret;
  502. }
  503. static int test_multishot_accept(int count, bool before, bool overflow)
  504. {
  505. struct io_uring m_io_uring;
  506. int ret;
  507. struct accept_test_args args = {
  508. .queue_accept_before_connect = before,
  509. .multishot = true,
  510. .extra_loops = count - 1,
  511. .overflow = overflow
  512. };
  513. if (no_accept_multi)
  514. return T_EXIT_SKIP;
  515. ret = io_uring_queue_init(MAX_FDS + 10, &m_io_uring, 0);
  516. assert(ret >= 0);
  517. ret = test(&m_io_uring, args);
  518. io_uring_queue_exit(&m_io_uring);
  519. return ret;
  520. }
  521. static int test_accept_multishot_wrong_arg(void)
  522. {
  523. struct io_uring m_io_uring;
  524. struct io_uring_cqe *cqe;
  525. struct io_uring_sqe *sqe;
  526. int fd, ret;
  527. ret = io_uring_queue_init(4, &m_io_uring, 0);
  528. assert(ret >= 0);
  529. fd = start_accept_listen(NULL, 0, 0);
  530. sqe = io_uring_get_sqe(&m_io_uring);
  531. io_uring_prep_multishot_accept_direct(sqe, fd, NULL, NULL, 0);
  532. sqe->file_index = 1;
  533. ret = io_uring_submit(&m_io_uring);
  534. assert(ret == 1);
  535. ret = io_uring_wait_cqe(&m_io_uring, &cqe);
  536. assert(!ret);
  537. if (cqe->res != -EINVAL) {
  538. fprintf(stderr, "file index should be IORING_FILE_INDEX_ALLOC \
  539. if its accept in multishot direct mode\n");
  540. goto err;
  541. }
  542. io_uring_cqe_seen(&m_io_uring, cqe);
  543. io_uring_queue_exit(&m_io_uring);
  544. close(fd);
  545. return 0;
  546. err:
  547. io_uring_queue_exit(&m_io_uring);
  548. close(fd);
  549. return 1;
  550. }
  551. static int test_accept_nonblock(bool queue_before_connect, int count)
  552. {
  553. struct io_uring m_io_uring;
  554. int ret;
  555. struct accept_test_args args = {
  556. .nonblock = true,
  557. .queue_accept_before_connect = queue_before_connect,
  558. .extra_loops = count - 1
  559. };
  560. ret = io_uring_queue_init(32, &m_io_uring, 0);
  561. assert(ret >= 0);
  562. ret = test(&m_io_uring, args);
  563. io_uring_queue_exit(&m_io_uring);
  564. return ret;
  565. }
  566. static int test_accept_fixed(void)
  567. {
  568. struct io_uring m_io_uring;
  569. int ret, fd = -1;
  570. struct accept_test_args args = {
  571. .fixed = true
  572. };
  573. ret = io_uring_queue_init(32, &m_io_uring, 0);
  574. assert(ret >= 0);
  575. ret = io_uring_register_files(&m_io_uring, &fd, 1);
  576. assert(ret == 0);
  577. ret = test(&m_io_uring, args);
  578. io_uring_queue_exit(&m_io_uring);
  579. return ret;
  580. }
  581. static int test_multishot_fixed_accept(void)
  582. {
  583. struct io_uring m_io_uring;
  584. int ret, fd[MAX_FDS];
  585. struct accept_test_args args = {
  586. .fixed = true,
  587. .multishot = true
  588. };
  589. if (no_accept_multi)
  590. return T_EXIT_SKIP;
  591. memset(fd, -1, sizeof(fd));
  592. ret = io_uring_queue_init(MAX_FDS + 10, &m_io_uring, 0);
  593. assert(ret >= 0);
  594. ret = io_uring_register_files(&m_io_uring, fd, MAX_FDS);
  595. assert(ret == 0);
  596. ret = test(&m_io_uring, args);
  597. io_uring_queue_exit(&m_io_uring);
  598. return ret;
  599. }
  600. static int test_accept_sqpoll(void)
  601. {
  602. struct io_uring m_io_uring;
  603. struct io_uring_params p = { };
  604. int ret;
  605. struct accept_test_args args = { };
  606. p.flags = IORING_SETUP_SQPOLL;
  607. ret = t_create_ring_params(32, &m_io_uring, &p);
  608. if (ret == T_SETUP_SKIP)
  609. return 0;
  610. else if (ret < 0)
  611. return ret;
  612. args.accept_should_error = 1;
  613. if (p.features & IORING_FEAT_SQPOLL_NONFIXED)
  614. args.accept_should_error = 0;
  615. ret = test(&m_io_uring, args);
  616. io_uring_queue_exit(&m_io_uring);
  617. return ret;
  618. }
  619. int main(int argc, char *argv[])
  620. {
  621. int ret;
  622. if (argc > 1)
  623. return T_EXIT_SKIP;
  624. ret = test_accept(1, false);
  625. if (ret == T_EXIT_FAIL) {
  626. fprintf(stderr, "test_accept failed\n");
  627. return ret;
  628. }
  629. if (no_accept)
  630. return T_EXIT_SKIP;
  631. ret = test_accept(2, false);
  632. if (ret == T_EXIT_FAIL) {
  633. fprintf(stderr, "test_accept(2) failed\n");
  634. return ret;
  635. }
  636. ret = test_accept(2, true);
  637. if (ret == T_EXIT_FAIL) {
  638. fprintf(stderr, "test_accept(2, true) failed\n");
  639. return ret;
  640. }
  641. ret = test_accept_nonblock(false, 1);
  642. if (ret == T_EXIT_FAIL) {
  643. fprintf(stderr, "test_accept_nonblock failed\n");
  644. return ret;
  645. }
  646. ret = test_accept_nonblock(true, 1);
  647. if (ret == T_EXIT_FAIL) {
  648. fprintf(stderr, "test_accept_nonblock(before, 1) failed\n");
  649. return ret;
  650. }
  651. ret = test_accept_nonblock(true, 3);
  652. if (ret == T_EXIT_FAIL) {
  653. fprintf(stderr, "test_accept_nonblock(before,3) failed\n");
  654. return ret;
  655. }
  656. ret = test_accept_fixed();
  657. if (ret == T_EXIT_FAIL) {
  658. fprintf(stderr, "test_accept_fixed failed\n");
  659. return ret;
  660. }
  661. ret = test_multishot_fixed_accept();
  662. if (ret == T_EXIT_FAIL) {
  663. fprintf(stderr, "test_multishot_fixed_accept failed\n");
  664. return ret;
  665. }
  666. ret = test_accept_multishot_wrong_arg();
  667. if (ret == T_EXIT_FAIL) {
  668. fprintf(stderr, "test_accept_multishot_wrong_arg failed\n");
  669. return ret;
  670. }
  671. ret = test_accept_sqpoll();
  672. if (ret == T_EXIT_FAIL) {
  673. fprintf(stderr, "test_accept_sqpoll failed\n");
  674. return ret;
  675. }
  676. ret = test_accept_cancel(0, 1, false);
  677. if (ret == T_EXIT_FAIL) {
  678. fprintf(stderr, "test_accept_cancel nodelay failed\n");
  679. return ret;
  680. }
  681. ret = test_accept_cancel(10000, 1, false);
  682. if (ret == T_EXIT_FAIL) {
  683. fprintf(stderr, "test_accept_cancel delay failed\n");
  684. return ret;
  685. }
  686. ret = test_accept_cancel(0, 4, false);
  687. if (ret == T_EXIT_FAIL) {
  688. fprintf(stderr, "test_accept_cancel nodelay failed\n");
  689. return ret;
  690. }
  691. ret = test_accept_cancel(10000, 4, false);
  692. if (ret == T_EXIT_FAIL) {
  693. fprintf(stderr, "test_accept_cancel delay failed\n");
  694. return ret;
  695. }
  696. ret = test_accept_cancel(0, 1, true);
  697. if (ret == T_EXIT_FAIL) {
  698. fprintf(stderr, "test_accept_cancel multishot nodelay failed\n");
  699. return ret;
  700. }
  701. ret = test_accept_cancel(10000, 1, true);
  702. if (ret == T_EXIT_FAIL) {
  703. fprintf(stderr, "test_accept_cancel multishot delay failed\n");
  704. return ret;
  705. }
  706. ret = test_accept_cancel(0, 4, true);
  707. if (ret == T_EXIT_FAIL) {
  708. fprintf(stderr, "test_accept_cancel multishot nodelay failed\n");
  709. return ret;
  710. }
  711. ret = test_accept_cancel(10000, 4, true);
  712. if (ret == T_EXIT_FAIL) {
  713. fprintf(stderr, "test_accept_cancel multishot delay failed\n");
  714. return ret;
  715. }
  716. ret = test_multishot_accept(1, true, true);
  717. if (ret == T_EXIT_FAIL) {
  718. fprintf(stderr, "test_multishot_accept(1, false, true) failed\n");
  719. return ret;
  720. }
  721. ret = test_multishot_accept(1, false, false);
  722. if (ret == T_EXIT_FAIL) {
  723. fprintf(stderr, "test_multishot_accept(1, false, false) failed\n");
  724. return ret;
  725. }
  726. ret = test_multishot_accept(1, true, false);
  727. if (ret == T_EXIT_FAIL) {
  728. fprintf(stderr, "test_multishot_accept(1, true, false) failed\n");
  729. return ret;
  730. }
  731. ret = test_accept_many((struct test_accept_many_args) {});
  732. if (ret == T_EXIT_FAIL) {
  733. fprintf(stderr, "test_accept_many failed\n");
  734. return ret;
  735. }
  736. ret = test_accept_many((struct test_accept_many_args) {
  737. .usecs = 100000 });
  738. if (ret == T_EXIT_FAIL) {
  739. fprintf(stderr, "test_accept_many(sleep) failed\n");
  740. return ret;
  741. }
  742. ret = test_accept_many((struct test_accept_many_args) {
  743. .nonblock = true });
  744. if (ret == T_EXIT_FAIL) {
  745. fprintf(stderr, "test_accept_many(nonblock) failed\n");
  746. return ret;
  747. }
  748. ret = test_accept_many((struct test_accept_many_args) {
  749. .nonblock = true,
  750. .single_sock = true,
  751. .close_fds = true });
  752. if (ret == T_EXIT_FAIL) {
  753. fprintf(stderr, "test_accept_many(nonblock,close) failed\n");
  754. return ret;
  755. }
  756. ret = test_accept_pending_on_exit();
  757. if (ret == T_EXIT_FAIL) {
  758. fprintf(stderr, "test_accept_pending_on_exit failed\n");
  759. return ret;
  760. }
  761. return T_EXIT_PASS;
  762. }