poll-mshot-overflow.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. #include "../config-host.h"
  2. // SPDX-License-Identifier: MIT
  3. #include <errno.h>
  4. #include <stdio.h>
  5. #include <unistd.h>
  6. #include <stdlib.h>
  7. #include <string.h>
  8. #include <signal.h>
  9. #include <poll.h>
  10. #include <sys/wait.h>
  11. #include "liburing.h"
  12. #include "helpers.h"
  13. static int check_final_cqe(struct io_uring *ring)
  14. {
  15. struct io_uring_cqe *cqe;
  16. int count = 0;
  17. bool signalled_no_more = false;
  18. while (!io_uring_peek_cqe(ring, &cqe)) {
  19. if (cqe->user_data == 1) {
  20. count++;
  21. if (signalled_no_more) {
  22. fprintf(stderr, "signalled no more!\n");
  23. return T_EXIT_FAIL;
  24. }
  25. if (!(cqe->flags & IORING_CQE_F_MORE))
  26. signalled_no_more = true;
  27. } else if (cqe->user_data != 3) {
  28. fprintf(stderr, "%d: got unexpected %d\n", count, (int)cqe->user_data);
  29. return T_EXIT_FAIL;
  30. }
  31. io_uring_cqe_seen(ring, cqe);
  32. }
  33. if (!count) {
  34. fprintf(stderr, "no cqe\n");
  35. return T_EXIT_FAIL;
  36. }
  37. return T_EXIT_PASS;
  38. }
  39. static int test(bool defer_taskrun)
  40. {
  41. struct io_uring_cqe *cqe;
  42. struct io_uring_sqe *sqe;
  43. struct io_uring ring;
  44. int pipe1[2];
  45. int ret, i;
  46. if (pipe(pipe1) != 0) {
  47. perror("pipe");
  48. return T_EXIT_FAIL;
  49. }
  50. struct io_uring_params params = {
  51. /* cheat using SINGLE_ISSUER existence to know if this behaviour
  52. * is updated
  53. */
  54. .flags = IORING_SETUP_CQSIZE | IORING_SETUP_SINGLE_ISSUER,
  55. .cq_entries = 2
  56. };
  57. if (defer_taskrun)
  58. params.flags |= IORING_SETUP_SINGLE_ISSUER |
  59. IORING_SETUP_DEFER_TASKRUN;
  60. ret = io_uring_queue_init_params(2, &ring, &params);
  61. if (ret)
  62. return T_EXIT_SKIP;
  63. sqe = io_uring_get_sqe(&ring);
  64. if (!sqe) {
  65. fprintf(stderr, "get sqe failed\n");
  66. return T_EXIT_FAIL;
  67. }
  68. io_uring_prep_poll_multishot(sqe, pipe1[0], POLLIN);
  69. io_uring_sqe_set_data64(sqe, 1);
  70. if (io_uring_cq_ready(&ring)) {
  71. fprintf(stderr, "unexpected cqe\n");
  72. return T_EXIT_FAIL;
  73. }
  74. for (i = 0; i < 2; i++) {
  75. sqe = io_uring_get_sqe(&ring);
  76. io_uring_prep_nop(sqe);
  77. io_uring_sqe_set_data64(sqe, 2);
  78. io_uring_submit(&ring);
  79. }
  80. do {
  81. errno = 0;
  82. ret = write(pipe1[1], "foo", 3);
  83. } while (ret == -1 && errno == EINTR);
  84. if (ret <= 0) {
  85. fprintf(stderr, "write failed: %d\n", errno);
  86. return T_EXIT_FAIL;
  87. }
  88. /* should have 2 cqe + 1 overflow now, so take out two cqes */
  89. for (i = 0; i < 2; i++) {
  90. if (io_uring_peek_cqe(&ring, &cqe)) {
  91. fprintf(stderr, "unexpectedly no cqe\n");
  92. return T_EXIT_FAIL;
  93. }
  94. if (cqe->user_data != 2) {
  95. fprintf(stderr, "unexpected user_data\n");
  96. return T_EXIT_FAIL;
  97. }
  98. io_uring_cqe_seen(&ring, cqe);
  99. }
  100. /* make sure everything is processed */
  101. io_uring_get_events(&ring);
  102. /* now remove the poll */
  103. sqe = io_uring_get_sqe(&ring);
  104. io_uring_prep_poll_remove(sqe, 1);
  105. io_uring_sqe_set_data64(sqe, 3);
  106. ret = io_uring_submit(&ring);
  107. if (ret != 1) {
  108. fprintf(stderr, "bad poll remove\n");
  109. return T_EXIT_FAIL;
  110. }
  111. ret = check_final_cqe(&ring);
  112. close(pipe1[0]);
  113. close(pipe1[1]);
  114. io_uring_queue_exit(&ring);
  115. return ret;
  116. }
  117. static int test_downgrade(bool support_defer)
  118. {
  119. struct io_uring_cqe cqes[128];
  120. struct io_uring_cqe *cqe;
  121. struct io_uring_sqe *sqe;
  122. struct io_uring ring;
  123. int fds[2];
  124. int ret, i, cqe_count, tmp = 0, more_cqe_count;
  125. if (pipe(fds) != 0) {
  126. perror("pipe");
  127. return -1;
  128. }
  129. struct io_uring_params params = {
  130. .flags = IORING_SETUP_CQSIZE,
  131. .cq_entries = 2
  132. };
  133. ret = io_uring_queue_init_params(2, &ring, &params);
  134. if (ret) {
  135. fprintf(stderr, "queue init: %d\n", ret);
  136. return -1;
  137. }
  138. sqe = io_uring_get_sqe(&ring);
  139. if (!sqe) {
  140. fprintf(stderr, "get sqe failed\n");
  141. return -1;
  142. }
  143. io_uring_prep_poll_multishot(sqe, fds[0], POLLIN);
  144. io_uring_sqe_set_data64(sqe, 1);
  145. io_uring_submit(&ring);
  146. for (i = 0; i < 8; i++) {
  147. ret = write(fds[1], &tmp, sizeof(tmp));
  148. if (ret != sizeof(tmp)) {
  149. perror("write");
  150. return -1;
  151. }
  152. ret = read(fds[0], &tmp, sizeof(tmp));
  153. if (ret != sizeof(tmp)) {
  154. perror("read");
  155. return -1;
  156. }
  157. }
  158. cqe_count = 0;
  159. while (!io_uring_peek_cqe(&ring, &cqe)) {
  160. cqes[cqe_count++] = *cqe;
  161. io_uring_cqe_seen(&ring, cqe);
  162. }
  163. /* Some kernels might allow overflows to poll,
  164. * but if they didn't it should stop the MORE flag
  165. */
  166. if (cqe_count < 3) {
  167. fprintf(stderr, "too few cqes: %d\n", cqe_count);
  168. return -1;
  169. } else if (cqe_count == 8) {
  170. more_cqe_count = cqe_count;
  171. /* downgrade only available since support_defer */
  172. if (support_defer) {
  173. fprintf(stderr, "did not downgrade on overflow\n");
  174. return -1;
  175. }
  176. } else {
  177. more_cqe_count = cqe_count - 1;
  178. cqe = &cqes[cqe_count - 1];
  179. if (cqe->flags & IORING_CQE_F_MORE) {
  180. fprintf(stderr, "incorrect MORE flag %x\n", cqe->flags);
  181. return -1;
  182. }
  183. }
  184. for (i = 0; i < more_cqe_count; i++) {
  185. cqe = &cqes[i];
  186. if (!(cqe->flags & IORING_CQE_F_MORE)) {
  187. fprintf(stderr, "missing MORE flag\n");
  188. return -1;
  189. }
  190. if (cqe->res < 0) {
  191. fprintf(stderr, "bad res: %d\n", cqe->res);
  192. return -1;
  193. }
  194. }
  195. close(fds[0]);
  196. close(fds[1]);
  197. io_uring_queue_exit(&ring);
  198. return 0;
  199. }
  200. int main(int argc, char *argv[])
  201. {
  202. int ret;
  203. bool support_defer;
  204. if (argc > 1)
  205. return T_EXIT_SKIP;
  206. support_defer = t_probe_defer_taskrun();
  207. ret = test_downgrade(support_defer);
  208. if (ret) {
  209. fprintf(stderr, "%s: test_downgrade(%d) failed\n", argv[0], support_defer);
  210. return T_EXIT_FAIL;
  211. }
  212. ret = test(false);
  213. if (ret == T_EXIT_SKIP)
  214. return ret;
  215. if (ret != T_EXIT_PASS) {
  216. fprintf(stderr, "%s: test(false) failed\n", argv[0]);
  217. return ret;
  218. }
  219. if (support_defer) {
  220. ret = test(true);
  221. if (ret != T_EXIT_PASS) {
  222. fprintf(stderr, "%s: test(true) failed\n", argv[0]);
  223. return ret;
  224. }
  225. }
  226. return ret;
  227. }