recvsend_bundle.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755
  1. #include "../config-host.h"
  2. /* SPDX-License-Identifier: MIT */
  3. /*
  4. * Simple test case showing using send and recv bundles
  5. */
  6. #include <errno.h>
  7. #include <stdio.h>
  8. #include <stdlib.h>
  9. #include <string.h>
  10. #include <unistd.h>
  11. #include <arpa/inet.h>
  12. #include <sys/types.h>
  13. #include <sys/socket.h>
  14. #include <pthread.h>
  15. #define MSG_SIZE 128
  16. #define NR_MIN_MSGS 4
  17. #define NR_MAX_MSGS 32
  18. #define SEQ_SIZE (MSG_SIZE / sizeof(unsigned long))
  19. static int nr_msgs;
  20. static int use_tcp;
  21. static int classic_buffers;
  22. #define RECV_BIDS 8192
  23. #define RECV_BID_MASK (RECV_BIDS - 1)
  24. #include "liburing.h"
  25. #include "helpers.h"
  26. #define PORT 10202
  27. #define HOST "127.0.0.1"
  28. static int use_port = PORT;
  29. #define SEND_BGID 7
  30. #define RECV_BGID 8
  31. static int no_send_mshot;
  32. struct recv_data {
  33. pthread_barrier_t connect;
  34. pthread_barrier_t startup;
  35. pthread_barrier_t barrier;
  36. pthread_barrier_t finish;
  37. unsigned long seq;
  38. int recv_bytes;
  39. int accept_fd;
  40. int abort;
  41. unsigned int max_sends;
  42. int to_eagain;
  43. void *recv_buf;
  44. int send_bundle;
  45. int recv_bundle;
  46. };
  47. static int arm_recv(struct io_uring *ring, struct recv_data *rd)
  48. {
  49. struct io_uring_sqe *sqe;
  50. int ret;
  51. sqe = io_uring_get_sqe(ring);
  52. io_uring_prep_recv_multishot(sqe, rd->accept_fd, NULL, 0, 0);
  53. if (rd->recv_bundle && use_tcp)
  54. sqe->ioprio |= IORING_RECVSEND_BUNDLE;
  55. sqe->buf_group = RECV_BGID;
  56. sqe->flags |= IOSQE_BUFFER_SELECT;
  57. sqe->user_data = 2;
  58. ret = io_uring_submit(ring);
  59. if (ret != 1) {
  60. fprintf(stderr, "submit failed: %d\n", ret);
  61. return 1;
  62. }
  63. return 0;
  64. }
  65. static int recv_prep(struct io_uring *ring, struct recv_data *rd, int *sock)
  66. {
  67. struct sockaddr_in saddr;
  68. int sockfd, ret, val, use_fd;
  69. socklen_t socklen;
  70. memset(&saddr, 0, sizeof(saddr));
  71. saddr.sin_family = AF_INET;
  72. saddr.sin_addr.s_addr = htonl(INADDR_ANY);
  73. saddr.sin_port = htons(use_port);
  74. if (use_tcp)
  75. sockfd = socket(AF_INET, SOCK_STREAM, 0);
  76. else
  77. sockfd = socket(AF_INET, SOCK_DGRAM, 0);
  78. if (sockfd < 0) {
  79. perror("socket");
  80. return 1;
  81. }
  82. val = 1;
  83. setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
  84. ret = bind(sockfd, (struct sockaddr *)&saddr, sizeof(saddr));
  85. if (ret < 0) {
  86. perror("bind");
  87. goto err;
  88. }
  89. if (use_tcp) {
  90. ret = listen(sockfd, 1);
  91. if (ret < 0) {
  92. perror("listen");
  93. goto err;
  94. }
  95. pthread_barrier_wait(&rd->connect);
  96. if (rd->abort)
  97. goto err;
  98. socklen = sizeof(saddr);
  99. use_fd = accept(sockfd, (struct sockaddr *)&saddr, &socklen);
  100. if (use_fd < 0) {
  101. perror("accept");
  102. goto err;
  103. }
  104. } else {
  105. use_fd = sockfd;
  106. pthread_barrier_wait(&rd->connect);
  107. }
  108. rd->accept_fd = use_fd;
  109. pthread_barrier_wait(&rd->startup);
  110. pthread_barrier_wait(&rd->barrier);
  111. if (arm_recv(ring, rd))
  112. goto err;
  113. *sock = sockfd;
  114. return 0;
  115. err:
  116. close(sockfd);
  117. return 1;
  118. }
  119. static int verify_seq(struct recv_data *rd, void *verify_ptr, int verify_sz,
  120. int start_bid)
  121. {
  122. unsigned long *seqp;
  123. int seq_size = verify_sz / sizeof(unsigned long);
  124. int i;
  125. seqp = verify_ptr;
  126. for (i = 0; i < seq_size; i++) {
  127. if (rd->seq != *seqp) {
  128. fprintf(stderr, "bid=%d, got seq %lu, wanted %lu, offset %d\n", start_bid, *seqp, rd->seq, i);
  129. return 0;
  130. }
  131. seqp++;
  132. rd->seq++;
  133. }
  134. return 1;
  135. }
  136. static int recv_get_cqe(struct io_uring *ring, struct recv_data *rd,
  137. struct io_uring_cqe **cqe)
  138. {
  139. struct __kernel_timespec ts = { .tv_sec = 0, .tv_nsec = 100000000LL };
  140. int ret;
  141. do {
  142. ret = io_uring_wait_cqe_timeout(ring, cqe, &ts);
  143. if (!ret)
  144. return 0;
  145. if (ret == -ETIME) {
  146. if (rd->abort)
  147. break;
  148. continue;
  149. }
  150. fprintf(stderr, "wait recv: %d\n", ret);
  151. break;
  152. } while (1);
  153. return 1;
  154. }
  155. static int do_recv(struct io_uring *ring, struct recv_data *rd)
  156. {
  157. struct io_uring_cqe *cqe;
  158. int bid, next_bid = 0;
  159. void *verify_ptr;
  160. int verify_sz = 0;
  161. int verify_bid = 0;
  162. verify_ptr = malloc(rd->recv_bytes);
  163. do {
  164. if (recv_get_cqe(ring, rd, &cqe))
  165. break;
  166. if (cqe->res == -EINVAL) {
  167. fprintf(stdout, "recv not supported, skipping\n");
  168. return 0;
  169. }
  170. if (cqe->res < 0) {
  171. fprintf(stderr, "failed recv cqe: %d\n", cqe->res);
  172. goto err;
  173. }
  174. if (!(cqe->flags & IORING_CQE_F_BUFFER)) {
  175. fprintf(stderr, "no buffer set in recv\n");
  176. goto err;
  177. }
  178. bid = cqe->flags >> IORING_CQE_BUFFER_SHIFT;
  179. if (bid != next_bid) {
  180. fprintf(stderr, "got bid %d, wanted %d\n", bid, next_bid);
  181. goto err;
  182. }
  183. if (!rd->recv_bundle && cqe->res > MSG_SIZE) {
  184. fprintf(stderr, "recv got wrong length: %d\n", cqe->res);
  185. goto err;
  186. }
  187. if (!(verify_sz % MSG_SIZE)) {
  188. if (!verify_seq(rd, verify_ptr, verify_sz, verify_bid))
  189. goto err;
  190. verify_bid += verify_sz / MSG_SIZE;
  191. verify_bid &= RECV_BID_MASK;
  192. verify_sz = 0;
  193. } else {
  194. memcpy(verify_ptr + verify_sz, rd->recv_buf + (bid * MSG_SIZE), cqe->res);
  195. verify_sz += cqe->res;
  196. }
  197. next_bid = bid + ((cqe->res + MSG_SIZE - 1) / MSG_SIZE);
  198. next_bid &= RECV_BID_MASK;
  199. rd->recv_bytes -= cqe->res;
  200. io_uring_cqe_seen(ring, cqe);
  201. if (!(cqe->flags & IORING_CQE_F_MORE) && rd->recv_bytes) {
  202. if (arm_recv(ring, rd))
  203. goto err;
  204. }
  205. } while (rd->recv_bytes);
  206. if (verify_sz && !(verify_sz % MSG_SIZE) &&
  207. !verify_seq(rd, verify_ptr, verify_sz, verify_bid))
  208. goto err;
  209. pthread_barrier_wait(&rd->finish);
  210. return 0;
  211. err:
  212. pthread_barrier_wait(&rd->finish);
  213. return 1;
  214. }
  215. static int provide_classic_buffers(struct io_uring *ring, void *buf, int nbufs, int bgid)
  216. {
  217. struct io_uring_sqe *sqe;
  218. struct io_uring_cqe *cqe;
  219. int ret;
  220. sqe = io_uring_get_sqe(ring);
  221. io_uring_prep_provide_buffers(sqe, buf, MSG_SIZE, nbufs, bgid, 0);
  222. io_uring_submit(ring);
  223. ret = io_uring_wait_cqe(ring, &cqe);
  224. if (ret) {
  225. fprintf(stderr, "provide buffer wait: %d\n", ret);
  226. return 1;
  227. }
  228. if (cqe->res) {
  229. fprintf(stderr, "provide buffers fail: %d\n", cqe->res);
  230. return 1;
  231. }
  232. io_uring_cqe_seen(ring, cqe);
  233. return 0;
  234. }
  235. static void *recv_fn(void *data)
  236. {
  237. struct recv_data *rd = data;
  238. struct io_uring_params p = { };
  239. struct io_uring ring;
  240. struct io_uring_buf_ring *br;
  241. void *buf = NULL, *ptr;
  242. int ret, sock, i;
  243. p.cq_entries = 4096;
  244. p.flags = IORING_SETUP_CQSIZE;
  245. ret = t_create_ring_params(16, &ring, &p);
  246. if (ret == T_SETUP_SKIP) {
  247. ret = 0;
  248. goto err;
  249. } else if (ret < 0) {
  250. goto err;
  251. }
  252. if (posix_memalign(&buf, sysconf(_SC_PAGESIZE), MSG_SIZE * RECV_BIDS))
  253. goto err;
  254. if (!classic_buffers) {
  255. br = io_uring_setup_buf_ring(&ring, RECV_BIDS, RECV_BGID, 0, &ret);
  256. if (!br) {
  257. if (ret != -EINVAL)
  258. fprintf(stderr, "failed setting up recv ring %d\n", ret);
  259. goto err;
  260. }
  261. ptr = buf;
  262. for (i = 0; i < RECV_BIDS; i++) {
  263. io_uring_buf_ring_add(br, ptr, MSG_SIZE, i, RECV_BID_MASK, i);
  264. ptr += MSG_SIZE;
  265. }
  266. io_uring_buf_ring_advance(br, RECV_BIDS);
  267. rd->recv_buf = buf;
  268. } else {
  269. ret = provide_classic_buffers(&ring, buf, RECV_BIDS, RECV_BGID);
  270. if (ret) {
  271. fprintf(stderr, "failed providing classic buffers\n");
  272. goto err;
  273. }
  274. }
  275. ret = recv_prep(&ring, rd, &sock);
  276. if (ret) {
  277. fprintf(stderr, "recv_prep failed: %d\n", ret);
  278. goto err;
  279. }
  280. ret = do_recv(&ring, rd);
  281. close(sock);
  282. close(rd->accept_fd);
  283. io_uring_queue_exit(&ring);
  284. err:
  285. free(buf);
  286. return (void *)(intptr_t)ret;
  287. }
  288. static int __do_send_bundle(struct recv_data *rd, struct io_uring *ring, int sockfd)
  289. {
  290. struct io_uring_cqe *cqe;
  291. struct io_uring_sqe *sqe;
  292. int bytes_needed = MSG_SIZE * nr_msgs;
  293. int i, ret;
  294. sqe = io_uring_get_sqe(ring);
  295. io_uring_prep_send_bundle(sqe, sockfd, 0, 0);
  296. sqe->flags |= IOSQE_BUFFER_SELECT;
  297. sqe->buf_group = SEND_BGID;
  298. sqe->user_data = 1;
  299. ret = io_uring_submit(ring);
  300. if (ret != 1)
  301. return 1;
  302. pthread_barrier_wait(&rd->barrier);
  303. for (i = 0; i < nr_msgs; i++) {
  304. ret = io_uring_wait_cqe(ring, &cqe);
  305. if (ret) {
  306. fprintf(stderr, "wait send: %d\n", ret);
  307. return 1;
  308. }
  309. if (!i && cqe->res == -EINVAL) {
  310. rd->abort = 1;
  311. no_send_mshot = 1;
  312. break;
  313. }
  314. if (cqe->res < 0) {
  315. fprintf(stderr, "bad send cqe res: %d\n", cqe->res);
  316. return 1;
  317. }
  318. bytes_needed -= cqe->res;
  319. if (!bytes_needed) {
  320. io_uring_cqe_seen(ring, cqe);
  321. break;
  322. }
  323. if (!(cqe->flags & IORING_CQE_F_MORE)) {
  324. fprintf(stderr, "expected more, but MORE not set\n");
  325. return 1;
  326. }
  327. io_uring_cqe_seen(ring, cqe);
  328. }
  329. return 0;
  330. }
  331. static int __do_send(struct recv_data *rd, struct io_uring *ring, int sockfd)
  332. {
  333. struct io_uring_cqe *cqe;
  334. struct io_uring_sqe *sqe;
  335. int bytes_needed = MSG_SIZE * nr_msgs;
  336. int i, ret;
  337. for (i = 0; i < nr_msgs; i++) {
  338. sqe = io_uring_get_sqe(ring);
  339. io_uring_prep_send(sqe, sockfd, NULL, 0, 0);
  340. sqe->user_data = 10 + i;
  341. sqe->flags |= IOSQE_BUFFER_SELECT;
  342. sqe->buf_group = SEND_BGID;
  343. ret = io_uring_submit(ring);
  344. if (ret != 1)
  345. return 1;
  346. if (!i)
  347. pthread_barrier_wait(&rd->barrier);
  348. ret = io_uring_wait_cqe(ring, &cqe);
  349. if (ret) {
  350. fprintf(stderr, "send wait cqe %d\n", ret);
  351. return 1;
  352. }
  353. if (!i && cqe->res == -EINVAL) {
  354. rd->abort = 1;
  355. no_send_mshot = 1;
  356. break;
  357. }
  358. if (cqe->res != MSG_SIZE) {
  359. fprintf(stderr, "send failed cqe: %d\n", cqe->res);
  360. return 1;
  361. }
  362. if (cqe->res < 0) {
  363. fprintf(stderr, "bad send cqe res: %d\n", cqe->res);
  364. return 1;
  365. }
  366. bytes_needed -= cqe->res;
  367. io_uring_cqe_seen(ring, cqe);
  368. if (!bytes_needed)
  369. break;
  370. }
  371. return 0;
  372. }
  373. static int do_send(struct recv_data *rd)
  374. {
  375. struct sockaddr_in saddr;
  376. struct io_uring ring;
  377. unsigned long seq_buf[SEQ_SIZE], send_seq;
  378. struct io_uring_params p = { };
  379. struct io_uring_buf_ring *br;
  380. int sockfd, ret, len, i;
  381. socklen_t optlen;
  382. void *buf = NULL, *ptr;
  383. ret = io_uring_queue_init_params(16, &ring, &p);
  384. if (ret) {
  385. fprintf(stderr, "queue init failed: %d\n", ret);
  386. return 1;
  387. }
  388. if (!(p.features & IORING_FEAT_RECVSEND_BUNDLE)) {
  389. rd->abort = 1;
  390. no_send_mshot = 1;
  391. pthread_barrier_wait(&rd->connect);
  392. return 0;
  393. }
  394. if (posix_memalign(&buf, sysconf(_SC_PAGESIZE), MSG_SIZE * nr_msgs))
  395. return 1;
  396. if (!classic_buffers) {
  397. br = io_uring_setup_buf_ring(&ring, nr_msgs, SEND_BGID, 0, &ret);
  398. if (!br) {
  399. if (ret == -EINVAL) {
  400. fprintf(stderr, "einval on br setup\n");
  401. return 0;
  402. }
  403. fprintf(stderr, "failed setting up send ring %d\n", ret);
  404. return 1;
  405. }
  406. ptr = buf;
  407. for (i = 0; i < nr_msgs; i++) {
  408. io_uring_buf_ring_add(br, ptr, MSG_SIZE, i, nr_msgs - 1, i);
  409. ptr += MSG_SIZE;
  410. }
  411. io_uring_buf_ring_advance(br, nr_msgs);
  412. } else {
  413. ret = provide_classic_buffers(&ring, buf, nr_msgs, SEND_BGID);
  414. if (ret) {
  415. fprintf(stderr, "failed providing classic buffers\n");
  416. return ret;
  417. }
  418. }
  419. memset(&saddr, 0, sizeof(saddr));
  420. saddr.sin_family = AF_INET;
  421. saddr.sin_port = htons(use_port);
  422. inet_pton(AF_INET, HOST, &saddr.sin_addr);
  423. if (use_tcp)
  424. sockfd = socket(AF_INET, SOCK_STREAM, 0);
  425. else
  426. sockfd = socket(AF_INET, SOCK_DGRAM, 0);
  427. if (sockfd < 0) {
  428. perror("socket");
  429. goto err2;
  430. }
  431. pthread_barrier_wait(&rd->connect);
  432. ret = connect(sockfd, (struct sockaddr *)&saddr, sizeof(saddr));
  433. if (ret < 0) {
  434. perror("connect");
  435. goto err;
  436. }
  437. pthread_barrier_wait(&rd->startup);
  438. optlen = sizeof(len);
  439. len = 1024 * MSG_SIZE;
  440. setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF, &len, optlen);
  441. /* almost fill queue, leave room for one message */
  442. send_seq = 0;
  443. rd->to_eagain = 0;
  444. while (rd->max_sends && rd->max_sends--) {
  445. for (i = 0; i < SEQ_SIZE; i++)
  446. seq_buf[i] = send_seq++;
  447. ret = send(sockfd, seq_buf, sizeof(seq_buf), MSG_DONTWAIT);
  448. if (ret < 0) {
  449. if (errno == EAGAIN) {
  450. send_seq -= SEQ_SIZE;
  451. break;
  452. }
  453. perror("send");
  454. return 1;
  455. } else if (ret != sizeof(seq_buf)) {
  456. fprintf(stderr, "short %d send\n", ret);
  457. return 1;
  458. }
  459. rd->to_eagain++;
  460. rd->recv_bytes += sizeof(seq_buf);
  461. }
  462. ptr = buf;
  463. for (i = 0; i < nr_msgs; i++) {
  464. unsigned long *pseq = ptr;
  465. int j;
  466. for (j = 0; j < SEQ_SIZE; j++)
  467. pseq[j] = send_seq++;
  468. ptr += MSG_SIZE;
  469. }
  470. /* prepare more messages, sending with bundle */
  471. rd->recv_bytes += (nr_msgs * MSG_SIZE);
  472. if (rd->send_bundle && use_tcp)
  473. ret = __do_send_bundle(rd, &ring, sockfd);
  474. else
  475. ret = __do_send(rd, &ring, sockfd);
  476. if (ret)
  477. goto err;
  478. pthread_barrier_wait(&rd->finish);
  479. close(sockfd);
  480. io_uring_queue_exit(&ring);
  481. free(buf);
  482. return 0;
  483. err:
  484. close(sockfd);
  485. err2:
  486. io_uring_queue_exit(&ring);
  487. pthread_barrier_wait(&rd->finish);
  488. free(buf);
  489. return 1;
  490. }
  491. static int test(int backlog, unsigned int max_sends, int *to_eagain,
  492. int send_bundle, int recv_bundle)
  493. {
  494. pthread_t recv_thread;
  495. struct recv_data rd;
  496. int ret;
  497. void *retval;
  498. /* backlog not reliable on UDP, skip it */
  499. if ((backlog || max_sends) && !use_tcp)
  500. return T_EXIT_PASS;
  501. memset(&rd, 0, sizeof(rd));
  502. pthread_barrier_init(&rd.connect, NULL, 2);
  503. pthread_barrier_init(&rd.startup, NULL, 2);
  504. pthread_barrier_init(&rd.barrier, NULL, 2);
  505. pthread_barrier_init(&rd.finish, NULL, 2);
  506. rd.max_sends = max_sends;
  507. if (to_eagain)
  508. *to_eagain = 0;
  509. rd.send_bundle = send_bundle;
  510. rd.recv_bundle = recv_bundle;
  511. ret = pthread_create(&recv_thread, NULL, recv_fn, &rd);
  512. if (ret) {
  513. fprintf(stderr, "Thread create failed: %d\n", ret);
  514. return 1;
  515. }
  516. ret = do_send(&rd);
  517. if (no_send_mshot) {
  518. fprintf(stderr, "no_send_mshot, aborting (ignore other errors)\n");
  519. rd.abort = 1;
  520. pthread_join(recv_thread, &retval);
  521. return 0;
  522. }
  523. if (ret)
  524. return ret;
  525. pthread_join(recv_thread, &retval);
  526. if (to_eagain)
  527. *to_eagain = rd.to_eagain;
  528. return (intptr_t)retval;
  529. }
  530. static int run_tests(int is_udp)
  531. {
  532. int ret, eagain_hit;
  533. nr_msgs = NR_MIN_MSGS;
  534. /* test basic send bundle first */
  535. ret = test(0, 0, NULL, 0, 0);
  536. if (ret) {
  537. fprintf(stderr, "test a failed\n");
  538. return T_EXIT_FAIL;
  539. }
  540. if (no_send_mshot)
  541. return T_EXIT_SKIP;
  542. /* test recv bundle */
  543. ret = test(0, 0, NULL, 0, 1);
  544. if (ret) {
  545. fprintf(stderr, "test b failed\n");
  546. return T_EXIT_FAIL;
  547. }
  548. /* test bundling recv and send */
  549. ret = test(0, 0, NULL, 1, 1);
  550. if (ret) {
  551. fprintf(stderr, "test c failed\n");
  552. return T_EXIT_FAIL;
  553. }
  554. /* test bundling with full socket */
  555. ret = test(1, 1000000, &eagain_hit, 1, 1);
  556. if (ret) {
  557. fprintf(stderr, "test d failed\n");
  558. return T_EXIT_FAIL;
  559. }
  560. /* test bundling with almost full socket */
  561. ret = test(1, eagain_hit - (nr_msgs / 2), NULL, 1, 1);
  562. if (ret) {
  563. fprintf(stderr, "test e failed\n");
  564. return T_EXIT_FAIL;
  565. }
  566. /* test recv bundle with almost full socket */
  567. ret = test(1, eagain_hit - (nr_msgs / 2), NULL, 0, 1);
  568. if (ret) {
  569. fprintf(stderr, "test f failed\n");
  570. return T_EXIT_FAIL;
  571. }
  572. if (is_udp)
  573. return T_EXIT_PASS;
  574. /* test send bundle with almost full socket */
  575. ret = test(1, eagain_hit - (nr_msgs / 2), &eagain_hit, 1, 0);
  576. if (ret) {
  577. fprintf(stderr, "test g failed\n");
  578. return T_EXIT_FAIL;
  579. }
  580. /* now repeat the last three tests, but with > FAST_UIOV segments */
  581. nr_msgs = NR_MAX_MSGS;
  582. /* test bundling with almost full socket */
  583. ret = test(1, eagain_hit - (nr_msgs / 2), NULL, 1, 1);
  584. if (ret) {
  585. fprintf(stderr, "test h failed\n");
  586. return T_EXIT_FAIL;
  587. }
  588. /* test recv bundle with almost full socket */
  589. ret = test(1, eagain_hit - (nr_msgs / 2), NULL, 0, 1);
  590. if (ret) {
  591. fprintf(stderr, "test i failed\n");
  592. return T_EXIT_FAIL;
  593. }
  594. /* test send bundle with almost full socket */
  595. ret = test(1, eagain_hit - (nr_msgs / 2), &eagain_hit, 1, 0);
  596. if (ret) {
  597. fprintf(stderr, "test j failed\n");
  598. return T_EXIT_FAIL;
  599. }
  600. return T_EXIT_PASS;
  601. }
  602. static int test_tcp(void)
  603. {
  604. int ret;
  605. use_tcp = 1;
  606. ret = run_tests(false);
  607. if (ret == T_EXIT_FAIL)
  608. fprintf(stderr, "TCP test case (classic=%d) failed\n", classic_buffers);
  609. return ret;
  610. }
  611. static int test_udp(void)
  612. {
  613. int ret;
  614. use_tcp = 0;
  615. use_port++;
  616. ret = run_tests(true);
  617. if (ret == T_EXIT_FAIL)
  618. fprintf(stderr, "UDP test case (classic=%d) failed\n", classic_buffers);
  619. return ret;
  620. }
  621. int main(int argc, char *argv[])
  622. {
  623. int ret;
  624. if (argc > 1)
  625. return T_EXIT_SKIP;
  626. ret = test_tcp();
  627. if (ret != T_EXIT_PASS)
  628. return ret;
  629. ret = test_udp();
  630. if (ret != T_EXIT_PASS)
  631. return ret;
  632. classic_buffers = 1;
  633. ret = test_tcp();
  634. if (ret != T_EXIT_PASS)
  635. return ret;
  636. ret = test_udp();
  637. if (ret != T_EXIT_PASS)
  638. return ret;
  639. return T_EXIT_PASS;
  640. }