recv-multishot.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. #include "../config-host.h"
  2. // SPDX-License-Identifier: MIT
  3. #include <errno.h>
  4. #include <stdio.h>
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include <unistd.h>
  8. #include <arpa/inet.h>
  9. #include <sys/types.h>
  10. #include <sys/socket.h>
  11. #include <pthread.h>
  12. #include <assert.h>
  13. #include "liburing.h"
  14. #include "helpers.h"
  15. #define ENORECVMULTISHOT 9999
  16. enum early_error_t {
  17. ERROR_NONE = 0,
  18. ERROR_NOT_ENOUGH_BUFFERS,
  19. ERROR_EARLY_CLOSE_SENDER,
  20. ERROR_EARLY_CLOSE_RECEIVER,
  21. ERROR_EARLY_OVERFLOW,
  22. ERROR_EARLY_LAST
  23. };
  24. struct args {
  25. bool stream;
  26. bool wait_each;
  27. bool recvmsg;
  28. enum early_error_t early_error;
  29. bool defer;
  30. };
  31. static int check_sockaddr(struct sockaddr_in *in)
  32. {
  33. struct in_addr expected;
  34. inet_pton(AF_INET, "127.0.0.1", &expected);
  35. if (in->sin_family != AF_INET) {
  36. fprintf(stderr, "bad family %d\n", (int)htons(in->sin_family));
  37. return -1;
  38. }
  39. if (memcmp(&expected, &in->sin_addr, sizeof(in->sin_addr))) {
  40. char buff[256];
  41. const char *addr = inet_ntop(AF_INET, &in->sin_addr, buff, sizeof(buff));
  42. fprintf(stderr, "unexpected address %s\n", addr ? addr : "INVALID");
  43. return -1;
  44. }
  45. return 0;
  46. }
  47. static int test(struct args *args)
  48. {
  49. int const N = 8;
  50. int const N_BUFFS = N * 64;
  51. int const N_CQE_OVERFLOW = 4;
  52. int const min_cqes = 2;
  53. int const NAME_LEN = sizeof(struct sockaddr_storage);
  54. int const CONTROL_LEN = CMSG_ALIGN(sizeof(struct sockaddr_storage))
  55. + sizeof(struct cmsghdr);
  56. struct io_uring ring;
  57. struct io_uring_cqe *cqe;
  58. struct io_uring_sqe *sqe;
  59. int fds[2], ret, i, j;
  60. int total_sent_bytes = 0, total_recv_bytes = 0, total_dropped_bytes = 0;
  61. int send_buff[256];
  62. int *sent_buffs[N_BUFFS];
  63. int *recv_buffs[N_BUFFS];
  64. int *at;
  65. struct io_uring_cqe recv_cqe[N_BUFFS];
  66. int recv_cqes = 0;
  67. bool early_error = false;
  68. bool early_error_started = false;
  69. struct __kernel_timespec timeout = {
  70. .tv_sec = 1,
  71. };
  72. struct msghdr msg;
  73. struct io_uring_params params = { };
  74. int n_sqe = 32;
  75. memset(recv_buffs, 0, sizeof(recv_buffs));
  76. if (args->defer)
  77. params.flags |= IORING_SETUP_SINGLE_ISSUER |
  78. IORING_SETUP_DEFER_TASKRUN;
  79. if (args->early_error == ERROR_EARLY_OVERFLOW) {
  80. params.flags |= IORING_SETUP_CQSIZE;
  81. params.cq_entries = N_CQE_OVERFLOW;
  82. n_sqe = N_CQE_OVERFLOW;
  83. }
  84. ret = io_uring_queue_init_params(n_sqe, &ring, &params);
  85. if (ret) {
  86. fprintf(stderr, "queue init failed: %d\n", ret);
  87. return ret;
  88. }
  89. ret = t_create_socket_pair(fds, args->stream);
  90. if (ret) {
  91. fprintf(stderr, "t_create_socket_pair failed: %d\n", ret);
  92. return ret;
  93. }
  94. if (!args->stream) {
  95. bool val = true;
  96. /* force some cmsgs to come back to us */
  97. ret = setsockopt(fds[0], IPPROTO_IP, IP_RECVORIGDSTADDR, &val,
  98. sizeof(val));
  99. if (ret) {
  100. fprintf(stderr, "setsockopt failed %d\n", errno);
  101. goto cleanup;
  102. }
  103. }
  104. for (i = 0; i < ARRAY_SIZE(send_buff); i++)
  105. send_buff[i] = i;
  106. for (i = 0; i < ARRAY_SIZE(recv_buffs); i++) {
  107. /* prepare some different sized buffers */
  108. int buffer_size = (i % 2 == 0 && (args->stream || args->recvmsg)) ? 1 : N;
  109. buffer_size *= sizeof(int);
  110. if (args->recvmsg) {
  111. buffer_size +=
  112. sizeof(struct io_uring_recvmsg_out) +
  113. NAME_LEN +
  114. CONTROL_LEN;
  115. }
  116. recv_buffs[i] = malloc(buffer_size);
  117. if (i > 2 && args->early_error == ERROR_NOT_ENOUGH_BUFFERS)
  118. continue;
  119. sqe = io_uring_get_sqe(&ring);
  120. io_uring_prep_provide_buffers(sqe, recv_buffs[i],
  121. buffer_size, 1, 7, i);
  122. io_uring_sqe_set_data64(sqe, 0x999);
  123. memset(recv_buffs[i], 0xcc, buffer_size);
  124. if (io_uring_submit_and_wait_timeout(&ring, &cqe, 1, &timeout, NULL) < 0) {
  125. fprintf(stderr, "provide buffers failed: %d\n", ret);
  126. ret = -1;
  127. goto cleanup;
  128. }
  129. io_uring_cqe_seen(&ring, cqe);
  130. }
  131. sqe = io_uring_get_sqe(&ring);
  132. if (args->recvmsg) {
  133. unsigned int flags = 0;
  134. if (!args->stream)
  135. flags |= MSG_TRUNC;
  136. memset(&msg, 0, sizeof(msg));
  137. msg.msg_namelen = NAME_LEN;
  138. msg.msg_controllen = CONTROL_LEN;
  139. io_uring_prep_recvmsg_multishot(sqe, fds[0], &msg, flags);
  140. } else {
  141. io_uring_prep_recv_multishot(sqe, fds[0], NULL, 0, 0);
  142. }
  143. sqe->flags |= IOSQE_BUFFER_SELECT;
  144. sqe->buf_group = 7;
  145. io_uring_sqe_set_data64(sqe, 1234);
  146. io_uring_submit(&ring);
  147. at = &send_buff[0];
  148. total_sent_bytes = 0;
  149. for (i = 0; i < N; i++) {
  150. int to_send = sizeof(*at) * (i+1);
  151. total_sent_bytes += to_send;
  152. sent_buffs[i] = at;
  153. if (send(fds[1], at, to_send, 0) != to_send) {
  154. if (early_error_started)
  155. break;
  156. fprintf(stderr, "send failed %d\n", errno);
  157. ret = -1;
  158. goto cleanup;
  159. }
  160. if (i == 2) {
  161. if (args->early_error == ERROR_EARLY_CLOSE_RECEIVER) {
  162. /* allow previous sends to complete */
  163. usleep(1000);
  164. io_uring_get_events(&ring);
  165. sqe = io_uring_get_sqe(&ring);
  166. io_uring_prep_recv(sqe, fds[0], NULL, 0, 0);
  167. io_uring_prep_cancel64(sqe, 1234, 0);
  168. io_uring_sqe_set_data64(sqe, 0x888);
  169. sqe->flags |= IOSQE_CQE_SKIP_SUCCESS;
  170. io_uring_submit(&ring);
  171. early_error_started = true;
  172. /* allow the cancel to complete */
  173. usleep(1000);
  174. io_uring_get_events(&ring);
  175. }
  176. if (args->early_error == ERROR_EARLY_CLOSE_SENDER) {
  177. early_error_started = true;
  178. shutdown(fds[1], SHUT_RDWR);
  179. close(fds[1]);
  180. }
  181. }
  182. at += (i+1);
  183. if (args->wait_each) {
  184. ret = io_uring_wait_cqes(&ring, &cqe, 1, &timeout, NULL);
  185. if (ret) {
  186. fprintf(stderr, "wait_each failed: %d\n", ret);
  187. ret = -1;
  188. goto cleanup;
  189. }
  190. while (io_uring_peek_cqe(&ring, &cqe) == 0) {
  191. recv_cqe[recv_cqes++] = *cqe;
  192. if (cqe->flags & IORING_CQE_F_MORE) {
  193. io_uring_cqe_seen(&ring, cqe);
  194. } else {
  195. early_error = true;
  196. io_uring_cqe_seen(&ring, cqe);
  197. }
  198. }
  199. if (early_error)
  200. break;
  201. }
  202. }
  203. close(fds[1]);
  204. /* allow sends to finish */
  205. usleep(1000);
  206. if ((args->stream && !early_error) || recv_cqes < min_cqes) {
  207. ret = io_uring_wait_cqes(&ring, &cqe, 1, &timeout, NULL);
  208. if (ret && ret != -ETIME) {
  209. fprintf(stderr, "wait final failed: %d\n", ret);
  210. ret = -1;
  211. goto cleanup;
  212. }
  213. }
  214. while (io_uring_peek_cqe(&ring, &cqe) == 0) {
  215. recv_cqe[recv_cqes++] = *cqe;
  216. io_uring_cqe_seen(&ring, cqe);
  217. }
  218. ret = -1;
  219. at = &send_buff[0];
  220. if (recv_cqes < min_cqes) {
  221. if (recv_cqes > 0 && recv_cqe[0].res == -EINVAL) {
  222. return -ENORECVMULTISHOT;
  223. }
  224. /* some kernels apparently don't check ->ioprio, skip */
  225. ret = -ENORECVMULTISHOT;
  226. goto cleanup;
  227. }
  228. for (i = 0; i < recv_cqes; i++) {
  229. cqe = &recv_cqe[i];
  230. bool const is_last = i == recv_cqes - 1;
  231. /*
  232. * Older kernels could terminate multishot early due to overflow,
  233. * but later ones will not. So discriminate based on the MORE flag.
  234. */
  235. bool const early_last = args->early_error == ERROR_EARLY_OVERFLOW &&
  236. !args->wait_each &&
  237. i == N_CQE_OVERFLOW &&
  238. !(cqe->flags & IORING_CQE_F_MORE);
  239. bool const should_be_last =
  240. (cqe->res <= 0) ||
  241. (args->stream && is_last) ||
  242. early_last;
  243. int *this_recv;
  244. int orig_payload_size = cqe->res;
  245. if (should_be_last) {
  246. int used_res = cqe->res;
  247. if (!is_last) {
  248. fprintf(stderr, "not last cqe had error %d\n", i);
  249. goto cleanup;
  250. }
  251. switch (args->early_error) {
  252. case ERROR_NOT_ENOUGH_BUFFERS:
  253. if (cqe->res != -ENOBUFS) {
  254. fprintf(stderr,
  255. "ERROR_NOT_ENOUGH_BUFFERS: res %d\n", cqe->res);
  256. goto cleanup;
  257. }
  258. break;
  259. case ERROR_EARLY_OVERFLOW:
  260. if (cqe->res < 0) {
  261. fprintf(stderr,
  262. "ERROR_EARLY_OVERFLOW: res %d\n", cqe->res);
  263. goto cleanup;
  264. }
  265. break;
  266. case ERROR_EARLY_CLOSE_RECEIVER:
  267. if (cqe->res != -ECANCELED) {
  268. fprintf(stderr,
  269. "ERROR_EARLY_CLOSE_RECEIVER: res %d\n", cqe->res);
  270. goto cleanup;
  271. }
  272. break;
  273. case ERROR_NONE:
  274. case ERROR_EARLY_CLOSE_SENDER:
  275. if (args->recvmsg && (cqe->flags & IORING_CQE_F_BUFFER)) {
  276. void *buff = recv_buffs[cqe->flags >> 16];
  277. struct io_uring_recvmsg_out *o =
  278. io_uring_recvmsg_validate(buff, cqe->res, &msg);
  279. if (!o) {
  280. fprintf(stderr, "invalid buff\n");
  281. goto cleanup;
  282. }
  283. if (o->payloadlen != 0) {
  284. fprintf(stderr, "expected 0 payloadlen, got %u\n",
  285. o->payloadlen);
  286. goto cleanup;
  287. }
  288. used_res = 0;
  289. } else if (cqe->res != 0) {
  290. fprintf(stderr, "early error: res %d\n", cqe->res);
  291. goto cleanup;
  292. }
  293. break;
  294. case ERROR_EARLY_LAST:
  295. fprintf(stderr, "bad error_early\n");
  296. goto cleanup;
  297. }
  298. if (cqe->res <= 0 && cqe->flags & IORING_CQE_F_BUFFER) {
  299. fprintf(stderr, "final BUFFER flag set\n");
  300. goto cleanup;
  301. }
  302. if (cqe->flags & IORING_CQE_F_MORE) {
  303. fprintf(stderr, "final MORE flag set\n");
  304. goto cleanup;
  305. }
  306. if (used_res <= 0)
  307. continue;
  308. } else {
  309. if (!(cqe->flags & IORING_CQE_F_MORE)) {
  310. fprintf(stderr, "MORE flag not set\n");
  311. goto cleanup;
  312. }
  313. }
  314. if (!(cqe->flags & IORING_CQE_F_BUFFER)) {
  315. fprintf(stderr, "BUFFER flag not set\n");
  316. goto cleanup;
  317. }
  318. this_recv = recv_buffs[cqe->flags >> 16];
  319. if (args->recvmsg) {
  320. struct io_uring_recvmsg_out *o = io_uring_recvmsg_validate(
  321. this_recv, cqe->res, &msg);
  322. if (!o) {
  323. fprintf(stderr, "bad recvmsg\n");
  324. goto cleanup;
  325. }
  326. orig_payload_size = o->payloadlen;
  327. if (!args->stream) {
  328. orig_payload_size = o->payloadlen;
  329. struct cmsghdr *cmsg;
  330. if (o->namelen < sizeof(struct sockaddr_in)) {
  331. fprintf(stderr, "bad addr len %d",
  332. o->namelen);
  333. goto cleanup;
  334. }
  335. if (check_sockaddr((struct sockaddr_in *)io_uring_recvmsg_name(o)))
  336. goto cleanup;
  337. cmsg = io_uring_recvmsg_cmsg_firsthdr(o, &msg);
  338. if (!cmsg ||
  339. cmsg->cmsg_level != IPPROTO_IP ||
  340. cmsg->cmsg_type != IP_RECVORIGDSTADDR) {
  341. fprintf(stderr, "bad cmsg");
  342. goto cleanup;
  343. }
  344. if (check_sockaddr((struct sockaddr_in *)CMSG_DATA(cmsg)))
  345. goto cleanup;
  346. cmsg = io_uring_recvmsg_cmsg_nexthdr(o, &msg, cmsg);
  347. if (cmsg) {
  348. fprintf(stderr, "unexpected extra cmsg\n");
  349. goto cleanup;
  350. }
  351. }
  352. this_recv = (int *)io_uring_recvmsg_payload(o, &msg);
  353. cqe->res = io_uring_recvmsg_payload_length(o, cqe->res, &msg);
  354. if (o->payloadlen != cqe->res) {
  355. if (!(o->flags & MSG_TRUNC)) {
  356. fprintf(stderr, "expected truncated flag\n");
  357. goto cleanup;
  358. }
  359. total_dropped_bytes += (o->payloadlen - cqe->res);
  360. }
  361. }
  362. total_recv_bytes += cqe->res;
  363. if (cqe->res % 4 != 0) {
  364. /*
  365. * doesn't seem to happen in practice, would need some
  366. * work to remove this requirement
  367. */
  368. fprintf(stderr, "unexpectedly aligned buffer cqe->res=%d\n", cqe->res);
  369. goto cleanup;
  370. }
  371. /*
  372. * for tcp: check buffer arrived in order
  373. * for udp: based on size validate data based on size
  374. */
  375. if (!args->stream) {
  376. int sent_idx = orig_payload_size / sizeof(*at) - 1;
  377. if (sent_idx < 0 || sent_idx > N) {
  378. fprintf(stderr, "Bad sent idx: %d\n", sent_idx);
  379. goto cleanup;
  380. }
  381. at = sent_buffs[sent_idx];
  382. }
  383. for (j = 0; j < cqe->res / 4; j++) {
  384. int sent = *at++;
  385. int recv = *this_recv++;
  386. if (sent != recv) {
  387. fprintf(stderr, "recv=%d sent=%d\n", recv, sent);
  388. goto cleanup;
  389. }
  390. }
  391. }
  392. if (args->early_error == ERROR_NONE &&
  393. total_recv_bytes + total_dropped_bytes < total_sent_bytes) {
  394. fprintf(stderr,
  395. "missing recv: recv=%d dropped=%d sent=%d\n",
  396. total_recv_bytes, total_sent_bytes, total_dropped_bytes);
  397. goto cleanup;
  398. }
  399. ret = 0;
  400. cleanup:
  401. for (i = 0; i < ARRAY_SIZE(recv_buffs); i++)
  402. free(recv_buffs[i]);
  403. close(fds[0]);
  404. close(fds[1]);
  405. io_uring_queue_exit(&ring);
  406. return ret;
  407. }
  408. static int test_enobuf(void)
  409. {
  410. struct io_uring ring;
  411. struct io_uring_sqe *sqe;
  412. struct io_uring_cqe *cqes[16];
  413. char buffs[256];
  414. int ret, i, fds[2];
  415. if (t_create_ring(8, &ring, 0) != T_SETUP_OK) {
  416. fprintf(stderr, "ring create\n");
  417. return -1;
  418. }
  419. ret = t_create_socket_pair(fds, false);
  420. if (ret) {
  421. fprintf(stderr, "t_create_socket_pair\n");
  422. return ret;
  423. }
  424. sqe = io_uring_get_sqe(&ring);
  425. assert(sqe);
  426. /* deliberately only 2 provided buffers */
  427. io_uring_prep_provide_buffers(sqe, &buffs[0], 1, 2, 0, 0);
  428. io_uring_sqe_set_data64(sqe, 0);
  429. sqe = io_uring_get_sqe(&ring);
  430. assert(sqe);
  431. io_uring_prep_recv_multishot(sqe, fds[0], NULL, 0, 0);
  432. io_uring_sqe_set_data64(sqe, 1);
  433. sqe->buf_group = 0;
  434. sqe->flags |= IOSQE_BUFFER_SELECT;
  435. ret = io_uring_submit(&ring);
  436. if (ret != 2) {
  437. fprintf(stderr, "bad submit %d\n", ret);
  438. return -1;
  439. }
  440. for (i = 0; i < 3; i++) {
  441. do {
  442. ret = write(fds[1], "?", 1);
  443. } while (ret == -1 && errno == EINTR);
  444. }
  445. ret = io_uring_wait_cqes(&ring, &cqes[0], 4, NULL, NULL);
  446. if (ret) {
  447. fprintf(stderr, "wait cqes\n");
  448. return ret;
  449. }
  450. ret = io_uring_peek_batch_cqe(&ring, &cqes[0], 4);
  451. if (ret != 4) {
  452. fprintf(stderr, "peek batch cqes\n");
  453. return -1;
  454. }
  455. /* provide buffers */
  456. assert(cqes[0]->user_data == 0);
  457. assert(cqes[0]->res == 0);
  458. /* valid recv */
  459. assert(cqes[1]->user_data == 1);
  460. assert(cqes[2]->user_data == 1);
  461. assert(cqes[1]->res == 1);
  462. assert(cqes[2]->res == 1);
  463. assert(cqes[1]->flags & (IORING_CQE_F_BUFFER | IORING_CQE_F_MORE));
  464. assert(cqes[2]->flags & (IORING_CQE_F_BUFFER | IORING_CQE_F_MORE));
  465. /* missing buffer */
  466. assert(cqes[3]->user_data == 1);
  467. assert(cqes[3]->res == -ENOBUFS);
  468. assert(!(cqes[3]->flags & (IORING_CQE_F_BUFFER | IORING_CQE_F_MORE)));
  469. close(fds[0]);
  470. close(fds[1]);
  471. io_uring_queue_exit(&ring);
  472. return 0;
  473. }
  474. int main(int argc, char *argv[])
  475. {
  476. int ret;
  477. int loop;
  478. int early_error = 0;
  479. bool has_defer;
  480. if (argc > 1)
  481. return T_EXIT_SKIP;
  482. has_defer = t_probe_defer_taskrun();
  483. for (loop = 0; loop < 16; loop++) {
  484. struct args a = {
  485. .stream = loop & 0x01,
  486. .wait_each = loop & 0x2,
  487. .recvmsg = loop & 0x04,
  488. .defer = loop & 0x08,
  489. };
  490. if (a.defer && !has_defer)
  491. continue;
  492. for (early_error = 0; early_error < ERROR_EARLY_LAST; early_error++) {
  493. a.early_error = (enum early_error_t)early_error;
  494. ret = test(&a);
  495. if (ret) {
  496. if (ret == -ENORECVMULTISHOT) {
  497. if (loop == 0)
  498. return T_EXIT_SKIP;
  499. fprintf(stderr,
  500. "ENORECVMULTISHOT received but loop>0\n");
  501. }
  502. fprintf(stderr,
  503. "test stream=%d wait_each=%d recvmsg=%d early_error=%d "
  504. " defer=%d failed\n",
  505. a.stream, a.wait_each, a.recvmsg, a.early_error, a.defer);
  506. return T_EXIT_FAIL;
  507. }
  508. }
  509. }
  510. ret = test_enobuf();
  511. if (ret) {
  512. fprintf(stderr, "test_enobuf() failed: %d\n", ret);
  513. return T_EXIT_FAIL;
  514. }
  515. return T_EXIT_PASS;
  516. }