recv-multishot.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. #include "../config-host.h"
  2. // SPDX-License-Identifier: MIT
  3. #include <errno.h>
  4. #include <stdio.h>
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include <unistd.h>
  8. #include <arpa/inet.h>
  9. #include <sys/types.h>
  10. #include <sys/socket.h>
  11. #include <pthread.h>
  12. #include <assert.h>
  13. #include "liburing.h"
  14. #include "helpers.h"
  15. #define ENORECVMULTISHOT 9999
  16. enum early_error_t {
  17. ERROR_NONE = 0,
  18. ERROR_NOT_ENOUGH_BUFFERS,
  19. ERROR_EARLY_CLOSE_SENDER,
  20. ERROR_EARLY_CLOSE_RECEIVER,
  21. ERROR_EARLY_OVERFLOW,
  22. ERROR_EARLY_LAST
  23. };
  24. struct args {
  25. bool stream;
  26. bool wait_each;
  27. bool recvmsg;
  28. enum early_error_t early_error;
  29. bool defer;
  30. };
  31. static int check_sockaddr(struct sockaddr_in *in)
  32. {
  33. struct in_addr expected;
  34. inet_pton(AF_INET, "127.0.0.1", &expected);
  35. if (in->sin_family != AF_INET) {
  36. fprintf(stderr, "bad family %d\n", (int)htons(in->sin_family));
  37. return -1;
  38. }
  39. if (memcmp(&expected, &in->sin_addr, sizeof(in->sin_addr))) {
  40. char buff[256];
  41. const char *addr = inet_ntop(AF_INET, &in->sin_addr, buff, sizeof(buff));
  42. fprintf(stderr, "unexpected address %s\n", addr ? addr : "INVALID");
  43. return -1;
  44. }
  45. return 0;
  46. }
  47. static int test(struct args *args)
  48. {
  49. int const N = 8;
  50. int const N_BUFFS = N * 64;
  51. int const N_CQE_OVERFLOW = 4;
  52. int const min_cqes = args->early_error ? 2 : 8;
  53. int const NAME_LEN = sizeof(struct sockaddr_storage);
  54. int const CONTROL_LEN = CMSG_ALIGN(sizeof(struct sockaddr_storage))
  55. + sizeof(struct cmsghdr);
  56. struct io_uring ring;
  57. struct io_uring_cqe *cqe;
  58. struct io_uring_sqe *sqe;
  59. int fds[2], ret, i, j;
  60. int total_sent_bytes = 0, total_recv_bytes = 0, total_dropped_bytes = 0;
  61. int send_buff[256];
  62. int *sent_buffs[N_BUFFS];
  63. int *recv_buffs[N_BUFFS];
  64. int *at;
  65. struct io_uring_cqe recv_cqe[N_BUFFS];
  66. int recv_cqes = 0;
  67. bool early_error = false;
  68. bool early_error_started = false;
  69. struct __kernel_timespec timeout = {
  70. .tv_sec = 1,
  71. };
  72. struct msghdr msg;
  73. struct io_uring_params params = { };
  74. int n_sqe = 32;
  75. memset(recv_buffs, 0, sizeof(recv_buffs));
  76. if (args->defer)
  77. params.flags |= IORING_SETUP_SINGLE_ISSUER |
  78. IORING_SETUP_DEFER_TASKRUN;
  79. if (args->early_error == ERROR_EARLY_OVERFLOW) {
  80. params.flags |= IORING_SETUP_CQSIZE;
  81. params.cq_entries = N_CQE_OVERFLOW;
  82. n_sqe = N_CQE_OVERFLOW;
  83. }
  84. ret = io_uring_queue_init_params(n_sqe, &ring, &params);
  85. if (ret) {
  86. fprintf(stderr, "queue init failed: %d\n", ret);
  87. return ret;
  88. }
  89. ret = t_create_socket_pair(fds, args->stream);
  90. if (ret) {
  91. fprintf(stderr, "t_create_socket_pair failed: %d\n", ret);
  92. return ret;
  93. }
  94. if (!args->stream) {
  95. bool val = true;
  96. /* force some cmsgs to come back to us */
  97. ret = setsockopt(fds[0], IPPROTO_IP, IP_RECVORIGDSTADDR, &val,
  98. sizeof(val));
  99. if (ret) {
  100. fprintf(stderr, "setsockopt failed %d\n", errno);
  101. goto cleanup;
  102. }
  103. }
  104. for (i = 0; i < ARRAY_SIZE(send_buff); i++)
  105. send_buff[i] = i;
  106. for (i = 0; i < ARRAY_SIZE(recv_buffs); i++) {
  107. /* prepare some different sized buffers */
  108. int buffer_size = (i % 2 == 0 && (args->stream || args->recvmsg)) ? 1 : N;
  109. buffer_size *= sizeof(int);
  110. if (args->recvmsg) {
  111. buffer_size +=
  112. sizeof(struct io_uring_recvmsg_out) +
  113. NAME_LEN +
  114. CONTROL_LEN;
  115. }
  116. recv_buffs[i] = malloc(buffer_size);
  117. if (i > 2 && args->early_error == ERROR_NOT_ENOUGH_BUFFERS)
  118. continue;
  119. sqe = io_uring_get_sqe(&ring);
  120. io_uring_prep_provide_buffers(sqe, recv_buffs[i],
  121. buffer_size, 1, 7, i);
  122. io_uring_sqe_set_data64(sqe, 0x999);
  123. memset(recv_buffs[i], 0xcc, buffer_size);
  124. if (io_uring_submit_and_wait_timeout(&ring, &cqe, 1, &timeout, NULL) < 0) {
  125. fprintf(stderr, "provide buffers failed: %d\n", ret);
  126. ret = -1;
  127. goto cleanup;
  128. }
  129. io_uring_cqe_seen(&ring, cqe);
  130. }
  131. sqe = io_uring_get_sqe(&ring);
  132. if (args->recvmsg) {
  133. unsigned int flags = 0;
  134. if (!args->stream)
  135. flags |= MSG_TRUNC;
  136. memset(&msg, 0, sizeof(msg));
  137. msg.msg_namelen = NAME_LEN;
  138. msg.msg_controllen = CONTROL_LEN;
  139. io_uring_prep_recvmsg_multishot(sqe, fds[0], &msg, flags);
  140. } else {
  141. io_uring_prep_recv_multishot(sqe, fds[0], NULL, 0, 0);
  142. }
  143. sqe->flags |= IOSQE_BUFFER_SELECT;
  144. sqe->buf_group = 7;
  145. io_uring_sqe_set_data64(sqe, 1234);
  146. io_uring_submit(&ring);
  147. at = &send_buff[0];
  148. total_sent_bytes = 0;
  149. for (i = 0; i < N; i++) {
  150. int to_send = sizeof(*at) * (i+1);
  151. total_sent_bytes += to_send;
  152. sent_buffs[i] = at;
  153. if (send(fds[1], at, to_send, 0) != to_send) {
  154. if (early_error_started)
  155. break;
  156. fprintf(stderr, "send failed %d\n", errno);
  157. ret = -1;
  158. goto cleanup;
  159. }
  160. if (i == 2) {
  161. if (args->early_error == ERROR_EARLY_CLOSE_RECEIVER) {
  162. /* allow previous sends to complete */
  163. usleep(1000);
  164. io_uring_get_events(&ring);
  165. sqe = io_uring_get_sqe(&ring);
  166. io_uring_prep_recv(sqe, fds[0], NULL, 0, 0);
  167. io_uring_prep_cancel64(sqe, 1234, 0);
  168. io_uring_sqe_set_data64(sqe, 0x888);
  169. sqe->flags |= IOSQE_CQE_SKIP_SUCCESS;
  170. io_uring_submit(&ring);
  171. early_error_started = true;
  172. /* allow the cancel to complete */
  173. usleep(1000);
  174. io_uring_get_events(&ring);
  175. }
  176. if (args->early_error == ERROR_EARLY_CLOSE_SENDER) {
  177. early_error_started = true;
  178. shutdown(fds[1], SHUT_RDWR);
  179. close(fds[1]);
  180. }
  181. }
  182. at += (i+1);
  183. if (args->wait_each) {
  184. ret = io_uring_wait_cqes(&ring, &cqe, 1, &timeout, NULL);
  185. if (ret) {
  186. fprintf(stderr, "wait_each failed: %d\n", ret);
  187. ret = -1;
  188. goto cleanup;
  189. }
  190. while (io_uring_peek_cqe(&ring, &cqe) == 0) {
  191. recv_cqe[recv_cqes++] = *cqe;
  192. if (cqe->flags & IORING_CQE_F_MORE) {
  193. io_uring_cqe_seen(&ring, cqe);
  194. } else {
  195. early_error = true;
  196. io_uring_cqe_seen(&ring, cqe);
  197. }
  198. }
  199. if (early_error)
  200. break;
  201. }
  202. }
  203. close(fds[1]);
  204. /* allow sends to finish */
  205. usleep(1000);
  206. if ((args->stream && !early_error) || recv_cqes < min_cqes) {
  207. unsigned int to_wait = 1;
  208. if (recv_cqes < min_cqes)
  209. to_wait = min_cqes - recv_cqes;
  210. ret = io_uring_wait_cqes(&ring, &cqe, to_wait, &timeout, NULL);
  211. if (ret && ret != -ETIME) {
  212. fprintf(stderr, "wait final failed: %d\n", ret);
  213. ret = -1;
  214. goto cleanup;
  215. }
  216. }
  217. while (io_uring_peek_cqe(&ring, &cqe) == 0) {
  218. recv_cqe[recv_cqes++] = *cqe;
  219. io_uring_cqe_seen(&ring, cqe);
  220. }
  221. ret = -1;
  222. at = &send_buff[0];
  223. if (recv_cqes < min_cqes) {
  224. if (recv_cqes > 0 && recv_cqe[0].res == -EINVAL) {
  225. return -ENORECVMULTISHOT;
  226. }
  227. /* some kernels apparently don't check ->ioprio, skip */
  228. ret = -ENORECVMULTISHOT;
  229. goto cleanup;
  230. }
  231. for (i = 0; i < recv_cqes; i++) {
  232. cqe = &recv_cqe[i];
  233. bool const is_last = i == recv_cqes - 1;
  234. /*
  235. * Older kernels could terminate multishot early due to overflow,
  236. * but later ones will not. So discriminate based on the MORE flag.
  237. */
  238. bool const early_last = args->early_error == ERROR_EARLY_OVERFLOW &&
  239. !args->wait_each &&
  240. i >= N_CQE_OVERFLOW &&
  241. !(cqe->flags & IORING_CQE_F_MORE);
  242. bool const should_be_last =
  243. (cqe->res <= 0) ||
  244. (args->stream && is_last) ||
  245. early_last;
  246. int *this_recv;
  247. int orig_payload_size = cqe->res;
  248. if (should_be_last) {
  249. int used_res = cqe->res;
  250. if (!is_last) {
  251. fprintf(stderr, "not last cqe had error %d\n", i);
  252. goto cleanup;
  253. }
  254. switch (args->early_error) {
  255. case ERROR_NOT_ENOUGH_BUFFERS:
  256. if (cqe->res != -ENOBUFS) {
  257. fprintf(stderr,
  258. "ERROR_NOT_ENOUGH_BUFFERS: res %d\n", cqe->res);
  259. goto cleanup;
  260. }
  261. break;
  262. case ERROR_EARLY_OVERFLOW:
  263. if (cqe->res < 0) {
  264. fprintf(stderr,
  265. "ERROR_EARLY_OVERFLOW: res %d\n", cqe->res);
  266. goto cleanup;
  267. }
  268. break;
  269. case ERROR_EARLY_CLOSE_RECEIVER:
  270. if (cqe->res != -ECANCELED) {
  271. fprintf(stderr,
  272. "ERROR_EARLY_CLOSE_RECEIVER: res %d\n", cqe->res);
  273. goto cleanup;
  274. }
  275. break;
  276. case ERROR_NONE:
  277. case ERROR_EARLY_CLOSE_SENDER:
  278. if (args->recvmsg && (cqe->flags & IORING_CQE_F_BUFFER)) {
  279. void *buff = recv_buffs[cqe->flags >> 16];
  280. struct io_uring_recvmsg_out *o =
  281. io_uring_recvmsg_validate(buff, cqe->res, &msg);
  282. if (!o) {
  283. fprintf(stderr, "invalid buff\n");
  284. goto cleanup;
  285. }
  286. if (o->payloadlen != 0) {
  287. fprintf(stderr, "expected 0 payloadlen, got %u\n",
  288. o->payloadlen);
  289. goto cleanup;
  290. }
  291. used_res = 0;
  292. } else if (cqe->res != 0) {
  293. fprintf(stderr, "early error: res %d\n", cqe->res);
  294. goto cleanup;
  295. }
  296. break;
  297. case ERROR_EARLY_LAST:
  298. fprintf(stderr, "bad error_early\n");
  299. goto cleanup;
  300. }
  301. if (cqe->res <= 0 && cqe->flags & IORING_CQE_F_BUFFER) {
  302. fprintf(stderr, "final BUFFER flag set\n");
  303. goto cleanup;
  304. }
  305. if (cqe->flags & IORING_CQE_F_MORE) {
  306. fprintf(stderr, "final MORE flag set\n");
  307. goto cleanup;
  308. }
  309. if (used_res <= 0)
  310. continue;
  311. } else {
  312. if (!(cqe->flags & IORING_CQE_F_MORE)) {
  313. fprintf(stderr, "MORE flag not set\n");
  314. goto cleanup;
  315. }
  316. }
  317. if (!(cqe->flags & IORING_CQE_F_BUFFER)) {
  318. fprintf(stderr, "BUFFER flag not set\n");
  319. goto cleanup;
  320. }
  321. this_recv = recv_buffs[cqe->flags >> 16];
  322. if (args->recvmsg) {
  323. struct io_uring_recvmsg_out *o = io_uring_recvmsg_validate(
  324. this_recv, cqe->res, &msg);
  325. if (!o) {
  326. fprintf(stderr, "bad recvmsg\n");
  327. goto cleanup;
  328. }
  329. orig_payload_size = o->payloadlen;
  330. if (!args->stream) {
  331. orig_payload_size = o->payloadlen;
  332. struct cmsghdr *cmsg;
  333. if (o->namelen < sizeof(struct sockaddr_in)) {
  334. fprintf(stderr, "bad addr len %d",
  335. o->namelen);
  336. goto cleanup;
  337. }
  338. if (check_sockaddr((struct sockaddr_in *)io_uring_recvmsg_name(o)))
  339. goto cleanup;
  340. cmsg = io_uring_recvmsg_cmsg_firsthdr(o, &msg);
  341. if (!cmsg ||
  342. cmsg->cmsg_level != IPPROTO_IP ||
  343. cmsg->cmsg_type != IP_RECVORIGDSTADDR) {
  344. fprintf(stderr, "bad cmsg");
  345. goto cleanup;
  346. }
  347. if (check_sockaddr((struct sockaddr_in *)CMSG_DATA(cmsg)))
  348. goto cleanup;
  349. cmsg = io_uring_recvmsg_cmsg_nexthdr(o, &msg, cmsg);
  350. if (cmsg) {
  351. fprintf(stderr, "unexpected extra cmsg\n");
  352. goto cleanup;
  353. }
  354. }
  355. this_recv = (int *)io_uring_recvmsg_payload(o, &msg);
  356. cqe->res = io_uring_recvmsg_payload_length(o, cqe->res, &msg);
  357. if (o->payloadlen != cqe->res) {
  358. if (!(o->flags & MSG_TRUNC)) {
  359. fprintf(stderr, "expected truncated flag\n");
  360. goto cleanup;
  361. }
  362. total_dropped_bytes += (o->payloadlen - cqe->res);
  363. }
  364. }
  365. total_recv_bytes += cqe->res;
  366. if (cqe->res % 4 != 0) {
  367. /*
  368. * doesn't seem to happen in practice, would need some
  369. * work to remove this requirement
  370. */
  371. fprintf(stderr, "unexpectedly aligned buffer cqe->res=%d\n", cqe->res);
  372. goto cleanup;
  373. }
  374. /*
  375. * for tcp: check buffer arrived in order
  376. * for udp: based on size validate data based on size
  377. */
  378. if (!args->stream) {
  379. int sent_idx = orig_payload_size / sizeof(*at) - 1;
  380. if (sent_idx < 0 || sent_idx > N) {
  381. fprintf(stderr, "Bad sent idx: %d\n", sent_idx);
  382. goto cleanup;
  383. }
  384. at = sent_buffs[sent_idx];
  385. }
  386. for (j = 0; j < cqe->res / 4; j++) {
  387. int sent = *at++;
  388. int recv = *this_recv++;
  389. if (sent != recv) {
  390. fprintf(stderr, "recv=%d sent=%d\n", recv, sent);
  391. goto cleanup;
  392. }
  393. }
  394. }
  395. if (args->early_error == ERROR_NONE &&
  396. total_recv_bytes + total_dropped_bytes < total_sent_bytes) {
  397. fprintf(stderr,
  398. "missing recv: recv=%d dropped=%d sent=%d\n",
  399. total_recv_bytes, total_sent_bytes, total_dropped_bytes);
  400. goto cleanup;
  401. }
  402. ret = 0;
  403. cleanup:
  404. for (i = 0; i < ARRAY_SIZE(recv_buffs); i++)
  405. free(recv_buffs[i]);
  406. close(fds[0]);
  407. close(fds[1]);
  408. io_uring_queue_exit(&ring);
  409. return ret;
  410. }
  411. static int test_enobuf(void)
  412. {
  413. struct io_uring ring;
  414. struct io_uring_sqe *sqe;
  415. struct io_uring_cqe *cqes[16];
  416. char buffs[256];
  417. int ret, i, fds[2];
  418. if (t_create_ring(8, &ring, 0) != T_SETUP_OK) {
  419. fprintf(stderr, "ring create\n");
  420. return -1;
  421. }
  422. ret = t_create_socket_pair(fds, false);
  423. if (ret) {
  424. fprintf(stderr, "t_create_socket_pair\n");
  425. return ret;
  426. }
  427. sqe = io_uring_get_sqe(&ring);
  428. assert(sqe);
  429. /* deliberately only 2 provided buffers */
  430. io_uring_prep_provide_buffers(sqe, &buffs[0], 1, 2, 0, 0);
  431. io_uring_sqe_set_data64(sqe, 0);
  432. sqe = io_uring_get_sqe(&ring);
  433. assert(sqe);
  434. io_uring_prep_recv_multishot(sqe, fds[0], NULL, 0, 0);
  435. io_uring_sqe_set_data64(sqe, 1);
  436. sqe->buf_group = 0;
  437. sqe->flags |= IOSQE_BUFFER_SELECT;
  438. ret = io_uring_submit(&ring);
  439. if (ret != 2) {
  440. fprintf(stderr, "bad submit %d\n", ret);
  441. return -1;
  442. }
  443. for (i = 0; i < 3; i++) {
  444. do {
  445. ret = write(fds[1], "?", 1);
  446. } while (ret == -1 && errno == EINTR);
  447. }
  448. ret = io_uring_wait_cqes(&ring, &cqes[0], 4, NULL, NULL);
  449. if (ret) {
  450. fprintf(stderr, "wait cqes\n");
  451. return ret;
  452. }
  453. ret = io_uring_peek_batch_cqe(&ring, &cqes[0], 4);
  454. if (ret != 4) {
  455. fprintf(stderr, "peek batch cqes\n");
  456. return -1;
  457. }
  458. /* provide buffers */
  459. assert(cqes[0]->user_data == 0);
  460. assert(cqes[0]->res == 0);
  461. /* valid recv */
  462. assert(cqes[1]->user_data == 1);
  463. assert(cqes[2]->user_data == 1);
  464. assert(cqes[1]->res == 1);
  465. assert(cqes[2]->res == 1);
  466. assert(cqes[1]->flags & (IORING_CQE_F_BUFFER | IORING_CQE_F_MORE));
  467. assert(cqes[2]->flags & (IORING_CQE_F_BUFFER | IORING_CQE_F_MORE));
  468. /* missing buffer */
  469. assert(cqes[3]->user_data == 1);
  470. assert(cqes[3]->res == -ENOBUFS);
  471. assert(!(cqes[3]->flags & (IORING_CQE_F_BUFFER | IORING_CQE_F_MORE)));
  472. close(fds[0]);
  473. close(fds[1]);
  474. io_uring_queue_exit(&ring);
  475. return 0;
  476. }
  477. int main(int argc, char *argv[])
  478. {
  479. int ret;
  480. int loop;
  481. int early_error = 0;
  482. bool has_defer;
  483. if (argc > 1)
  484. return T_EXIT_SKIP;
  485. has_defer = t_probe_defer_taskrun();
  486. for (loop = 0; loop < 16; loop++) {
  487. struct args a = {
  488. .stream = loop & 0x01,
  489. .wait_each = loop & 0x2,
  490. .recvmsg = loop & 0x04,
  491. .defer = loop & 0x08,
  492. };
  493. if (a.defer && !has_defer)
  494. continue;
  495. for (early_error = 0; early_error < ERROR_EARLY_LAST; early_error++) {
  496. a.early_error = (enum early_error_t)early_error;
  497. ret = test(&a);
  498. if (ret) {
  499. if (ret == -ENORECVMULTISHOT) {
  500. if (loop == 0)
  501. return T_EXIT_SKIP;
  502. fprintf(stderr,
  503. "ENORECVMULTISHOT received but loop>0\n");
  504. }
  505. fprintf(stderr,
  506. "test stream=%d wait_each=%d recvmsg=%d early_error=%d "
  507. " defer=%d failed\n",
  508. a.stream, a.wait_each, a.recvmsg, a.early_error, a.defer);
  509. return T_EXIT_FAIL;
  510. }
  511. }
  512. }
  513. ret = test_enobuf();
  514. if (ret) {
  515. fprintf(stderr, "test_enobuf() failed: %d\n", ret);
  516. return T_EXIT_FAIL;
  517. }
  518. return T_EXIT_PASS;
  519. }