futex.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. #include "../config-host.h"
  2. /* SPDX-License-Identifier: MIT */
  3. /*
  4. * Description: exercise futex wait/wake/waitv
  5. *
  6. */
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include <stdlib.h>
  10. #include <pthread.h>
  11. #include <errno.h>
  12. #include <linux/futex.h>
  13. #include "liburing.h"
  14. #include "helpers.h"
  15. #define LOOPS 500
  16. #define NFUTEX 8
  17. #ifndef FUTEX2_SIZE_U8
  18. #define FUTEX2_SIZE_U8 0x00
  19. #define FUTEX2_SIZE_U16 0x01
  20. #define FUTEX2_SIZE_U32 0x02
  21. #define FUTEX2_SIZE_U64 0x03
  22. #define FUTEX2_NUMA 0x04
  23. /* 0x08 */
  24. /* 0x10 */
  25. /* 0x20 */
  26. /* 0x40 */
  27. #define FUTEX2_PRIVATE FUTEX_PRIVATE_FLAG
  28. #define FUTEX2_SIZE_MASK 0x03
  29. #endif
  30. static int no_futex;
  31. static void *fwake(void *data)
  32. {
  33. unsigned int *futex = data;
  34. struct io_uring_sqe *sqe;
  35. struct io_uring_cqe *cqe;
  36. struct io_uring ring;
  37. int ret;
  38. ret = io_uring_queue_init(1, &ring, 0);
  39. if (ret) {
  40. fprintf(stderr, "queue init: %d\n", ret);
  41. return NULL;
  42. }
  43. *futex = 1;
  44. sqe = io_uring_get_sqe(&ring);
  45. io_uring_prep_futex_wake(sqe, futex, 1, FUTEX_BITSET_MATCH_ANY,
  46. FUTEX2_SIZE_U32, 0);
  47. sqe->user_data = 3;
  48. io_uring_submit(&ring);
  49. ret = io_uring_wait_cqe(&ring, &cqe);
  50. if (ret) {
  51. fprintf(stderr, "wait: %d\n", ret);
  52. return NULL;
  53. }
  54. io_uring_cqe_seen(&ring, cqe);
  55. io_uring_queue_exit(&ring);
  56. return NULL;
  57. }
  58. static int __test(struct io_uring *ring, int vectored, int async,
  59. int async_cancel)
  60. {
  61. struct io_uring_sqe *sqe;
  62. struct io_uring_cqe *cqe;
  63. struct futex_waitv fw[NFUTEX];
  64. unsigned int *futex;
  65. pthread_t threads[NFUTEX];
  66. void *tret;
  67. int ret, i, nfutex;
  68. nfutex = NFUTEX;
  69. if (!vectored)
  70. nfutex = 1;
  71. futex = calloc(nfutex, sizeof(*futex));
  72. for (i = 0; i < nfutex; i++) {
  73. fw[i].val = 0;
  74. fw[i].uaddr = (unsigned long) &futex[i];
  75. fw[i].flags = FUTEX2_SIZE_U32;
  76. fw[i].__reserved = 0;
  77. }
  78. sqe = io_uring_get_sqe(ring);
  79. if (vectored)
  80. io_uring_prep_futex_waitv(sqe, fw, nfutex, 0);
  81. else
  82. io_uring_prep_futex_wait(sqe, futex, 0, FUTEX_BITSET_MATCH_ANY,
  83. FUTEX2_SIZE_U32, 0);
  84. if (async)
  85. sqe->flags |= IOSQE_ASYNC;
  86. sqe->user_data = 1;
  87. io_uring_submit(ring);
  88. for (i = 0; i < nfutex; i++)
  89. pthread_create(&threads[i], NULL, fwake, &futex[i]);
  90. sqe = io_uring_get_sqe(ring);
  91. io_uring_prep_cancel64(sqe, 1, 0);
  92. if (async_cancel)
  93. sqe->flags |= IOSQE_ASYNC;
  94. sqe->user_data = 2;
  95. io_uring_submit(ring);
  96. for (i = 0; i < 2; i++) {
  97. ret = io_uring_wait_cqe(ring, &cqe);
  98. if (ret) {
  99. fprintf(stderr, "parent wait %d\n", ret);
  100. return 1;
  101. }
  102. if (cqe->res == -EINVAL || cqe->res == -EOPNOTSUPP) {
  103. no_futex = 1;
  104. free(futex);
  105. return 0;
  106. }
  107. io_uring_cqe_seen(ring, cqe);
  108. }
  109. ret = io_uring_peek_cqe(ring, &cqe);
  110. if (!ret) {
  111. fprintf(stderr, "peek found cqe!\n");
  112. return 1;
  113. }
  114. for (i = 0; i < nfutex; i++)
  115. pthread_join(threads[i], &tret);
  116. free(futex);
  117. return 0;
  118. }
  119. static int test(int flags, int vectored)
  120. {
  121. struct io_uring ring;
  122. int ret, i;
  123. ret = io_uring_queue_init(8, &ring, flags);
  124. if (ret)
  125. return ret;
  126. for (i = 0; i < LOOPS; i++) {
  127. int async_cancel = (!i % 2);
  128. int async_wait = !(i % 3);
  129. ret = __test(&ring, vectored, async_wait, async_cancel);
  130. if (ret) {
  131. fprintf(stderr, "flags=%x, failed=%d\n", flags, i);
  132. break;
  133. }
  134. if (no_futex)
  135. break;
  136. }
  137. io_uring_queue_exit(&ring);
  138. return ret;
  139. }
  140. static int test_order(int vectored, int async)
  141. {
  142. struct io_uring_sqe *sqe;
  143. struct io_uring_cqe *cqe;
  144. struct futex_waitv fw = { };
  145. struct io_uring_sync_cancel_reg reg = { };
  146. struct io_uring ring;
  147. unsigned int *futex;
  148. int ret, i;
  149. ret = io_uring_queue_init(8, &ring, 0);
  150. if (ret)
  151. return ret;
  152. futex = malloc(sizeof(*futex));
  153. *futex = 0;
  154. fw.uaddr = (unsigned long) futex;
  155. fw.flags = FUTEX2_SIZE_U32;
  156. /*
  157. * Submit two futex waits
  158. */
  159. sqe = io_uring_get_sqe(&ring);
  160. if (!vectored)
  161. io_uring_prep_futex_wait(sqe, futex, 0, FUTEX_BITSET_MATCH_ANY,
  162. FUTEX2_SIZE_U32, 0);
  163. else
  164. io_uring_prep_futex_waitv(sqe, &fw, 1, 0);
  165. sqe->user_data = 1;
  166. sqe = io_uring_get_sqe(&ring);
  167. if (!vectored)
  168. io_uring_prep_futex_wait(sqe, futex, 0, FUTEX_BITSET_MATCH_ANY,
  169. FUTEX2_SIZE_U32, 0);
  170. else
  171. io_uring_prep_futex_waitv(sqe, &fw, 1, 0);
  172. sqe->user_data = 2;
  173. io_uring_submit(&ring);
  174. /*
  175. * Now submit wake for just one futex
  176. */
  177. *futex = 1;
  178. sqe = io_uring_get_sqe(&ring);
  179. io_uring_prep_futex_wake(sqe, futex, 1, FUTEX_BITSET_MATCH_ANY,
  180. FUTEX2_SIZE_U32, 0);
  181. sqe->user_data = 100;
  182. if (async)
  183. sqe->flags |= IOSQE_ASYNC;
  184. io_uring_submit(&ring);
  185. /*
  186. * We expect to find completions for the first futex wait, and
  187. * the futex wake. We should not see the last futex wait.
  188. */
  189. for (i = 0; i < 2; i++) {
  190. ret = io_uring_wait_cqe(&ring, &cqe);
  191. if (ret) {
  192. fprintf(stderr, "wait %d\n", ret);
  193. return 1;
  194. }
  195. if (cqe->user_data == 1 || cqe->user_data == 100) {
  196. io_uring_cqe_seen(&ring, cqe);
  197. continue;
  198. }
  199. fprintf(stderr, "unexpected cqe %lu, res %d\n", (unsigned long) cqe->user_data, cqe->res);
  200. return 1;
  201. }
  202. ret = io_uring_peek_cqe(&ring, &cqe);
  203. if (ret != -EAGAIN) {
  204. fprintf(stderr, "Unexpected cqe available: %d\n", cqe->res);
  205. return 1;
  206. }
  207. reg.addr = 2;
  208. ret = io_uring_register_sync_cancel(&ring, &reg);
  209. if (ret != 1) {
  210. fprintf(stderr, "Failed to cancel pending futex wait: %d\n", ret);
  211. return 1;
  212. }
  213. io_uring_queue_exit(&ring);
  214. free(futex);
  215. return 0;
  216. }
  217. static int test_multi_wake(int vectored)
  218. {
  219. struct io_uring_sqe *sqe;
  220. struct io_uring_cqe *cqe;
  221. struct futex_waitv fw;
  222. struct io_uring ring;
  223. unsigned int *futex;
  224. int ret, i;
  225. ret = io_uring_queue_init(8, &ring, 0);
  226. if (ret)
  227. return ret;
  228. futex = malloc(sizeof(*futex));
  229. *futex = 0;
  230. fw.val = 0;
  231. fw.uaddr = (unsigned long) futex;
  232. fw.flags = FUTEX2_SIZE_U32;
  233. fw.__reserved = 0;
  234. /*
  235. * Submit two futex waits
  236. */
  237. sqe = io_uring_get_sqe(&ring);
  238. if (!vectored)
  239. io_uring_prep_futex_wait(sqe, futex, 0, FUTEX_BITSET_MATCH_ANY,
  240. FUTEX2_SIZE_U32, 0);
  241. else
  242. io_uring_prep_futex_waitv(sqe, &fw, 1, 0);
  243. sqe->user_data = 1;
  244. sqe = io_uring_get_sqe(&ring);
  245. if (!vectored)
  246. io_uring_prep_futex_wait(sqe, futex, 0, FUTEX_BITSET_MATCH_ANY,
  247. FUTEX2_SIZE_U32, 0);
  248. else
  249. io_uring_prep_futex_waitv(sqe, &fw, 1, 0);
  250. sqe->user_data = 2;
  251. io_uring_submit(&ring);
  252. /*
  253. * Now submit wake for both futexes
  254. */
  255. *futex = 1;
  256. sqe = io_uring_get_sqe(&ring);
  257. io_uring_prep_futex_wake(sqe, futex, 2, FUTEX_BITSET_MATCH_ANY,
  258. FUTEX2_SIZE_U32, 0);
  259. sqe->user_data = 100;
  260. io_uring_submit(&ring);
  261. /*
  262. * We expect to find completions for the both futex waits, and
  263. * the futex wake.
  264. */
  265. for (i = 0; i < 3; i++) {
  266. ret = io_uring_wait_cqe(&ring, &cqe);
  267. if (ret) {
  268. fprintf(stderr, "wait %d\n", ret);
  269. return 1;
  270. }
  271. if (cqe->res < 0) {
  272. fprintf(stderr, "cqe error %d\n", cqe->res);
  273. return 1;
  274. }
  275. io_uring_cqe_seen(&ring, cqe);
  276. }
  277. ret = io_uring_peek_cqe(&ring, &cqe);
  278. if (!ret) {
  279. fprintf(stderr, "peek found cqe!\n");
  280. return 1;
  281. }
  282. io_uring_queue_exit(&ring);
  283. free(futex);
  284. return 0;
  285. }
  286. /*
  287. * Test that waking 0 futexes returns 0
  288. */
  289. static int test_wake_zero(void)
  290. {
  291. struct io_uring_sqe *sqe;
  292. struct io_uring_cqe *cqe;
  293. struct io_uring ring;
  294. unsigned int *futex;
  295. int ret;
  296. ret = io_uring_queue_init(8, &ring, 0);
  297. if (ret)
  298. return ret;
  299. futex = malloc(sizeof(*futex));
  300. *futex = 0;
  301. sqe = io_uring_get_sqe(&ring);
  302. sqe->user_data = 1;
  303. io_uring_prep_futex_wait(sqe, futex, 0, FUTEX_BITSET_MATCH_ANY,
  304. FUTEX2_SIZE_U32, 0);
  305. io_uring_submit(&ring);
  306. sqe = io_uring_get_sqe(&ring);
  307. sqe->user_data = 2;
  308. io_uring_prep_futex_wake(sqe, futex, 0, FUTEX_BITSET_MATCH_ANY,
  309. FUTEX2_SIZE_U32, 0);
  310. io_uring_submit(&ring);
  311. ret = io_uring_wait_cqe(&ring, &cqe);
  312. /*
  313. * Should get zero res and it should be the wake
  314. */
  315. if (cqe->res || cqe->user_data != 2) {
  316. fprintf(stderr, "cqe res %d, data %ld\n", cqe->res, (long) cqe->user_data);
  317. return 1;
  318. }
  319. io_uring_cqe_seen(&ring, cqe);
  320. /*
  321. * Should not have the wait complete
  322. */
  323. ret = io_uring_peek_cqe(&ring, &cqe);
  324. if (!ret) {
  325. fprintf(stderr, "peek found cqe!\n");
  326. return 1;
  327. }
  328. io_uring_queue_exit(&ring);
  329. free(futex);
  330. return 0;
  331. }
  332. /*
  333. * Test invalid wait/wake/waitv flags
  334. */
  335. static int test_invalid(void)
  336. {
  337. struct io_uring_sqe *sqe;
  338. struct io_uring_cqe *cqe;
  339. struct futex_waitv fw;
  340. struct io_uring ring;
  341. unsigned int *futex;
  342. int ret;
  343. ret = io_uring_queue_init(8, &ring, 0);
  344. if (ret)
  345. return ret;
  346. futex = malloc(sizeof(*futex));
  347. *futex = 0;
  348. sqe = io_uring_get_sqe(&ring);
  349. sqe->user_data = 1;
  350. io_uring_prep_futex_wait(sqe, futex, 0, FUTEX_BITSET_MATCH_ANY, 0x1000,
  351. 0);
  352. io_uring_submit(&ring);
  353. ret = io_uring_wait_cqe(&ring, &cqe);
  354. /*
  355. * Should get zero res and it should be the wake
  356. */
  357. if (cqe->res != -EINVAL) {
  358. fprintf(stderr, "wait cqe res %d\n", cqe->res);
  359. return 1;
  360. }
  361. io_uring_cqe_seen(&ring, cqe);
  362. sqe = io_uring_get_sqe(&ring);
  363. sqe->user_data = 1;
  364. io_uring_prep_futex_wake(sqe, futex, 0, FUTEX_BITSET_MATCH_ANY, 0x1000,
  365. 0);
  366. io_uring_submit(&ring);
  367. ret = io_uring_wait_cqe(&ring, &cqe);
  368. /*
  369. * Should get zero res and it should be the wake
  370. */
  371. if (cqe->res != -EINVAL) {
  372. fprintf(stderr, "wake cqe res %d\n", cqe->res);
  373. return 1;
  374. }
  375. io_uring_cqe_seen(&ring, cqe);
  376. fw.val = 0;
  377. fw.uaddr = (unsigned long) futex;
  378. fw.flags = FUTEX2_SIZE_U32 | 0x1000;
  379. fw.__reserved = 0;
  380. sqe = io_uring_get_sqe(&ring);
  381. sqe->user_data = 1;
  382. io_uring_prep_futex_waitv(sqe, &fw, 1, 0);
  383. io_uring_submit(&ring);
  384. ret = io_uring_wait_cqe(&ring, &cqe);
  385. /*
  386. * Should get zero res and it should be the wake
  387. */
  388. if (cqe->res != -EINVAL) {
  389. fprintf(stderr, "waitv cqe res %d\n", cqe->res);
  390. return 1;
  391. }
  392. io_uring_cqe_seen(&ring, cqe);
  393. io_uring_queue_exit(&ring);
  394. free(futex);
  395. return 0;
  396. }
  397. int main(int argc, char *argv[])
  398. {
  399. int ret;
  400. if (argc > 1)
  401. return T_EXIT_SKIP;
  402. ret = test(0, 0);
  403. if (ret) {
  404. fprintf(stderr, "test 0 0 failed\n");
  405. return T_EXIT_FAIL;
  406. }
  407. if (no_futex)
  408. return T_EXIT_SKIP;
  409. ret = test(0, 1);
  410. if (ret) {
  411. fprintf(stderr, "test 0 1 failed\n");
  412. return T_EXIT_FAIL;
  413. }
  414. ret = test_wake_zero();
  415. if (ret) {
  416. fprintf(stderr, "wake 0 failed\n");
  417. return T_EXIT_FAIL;
  418. }
  419. ret = test_invalid();
  420. if (ret) {
  421. fprintf(stderr, "test invalid failed\n");
  422. return T_EXIT_FAIL;
  423. }
  424. ret = test(IORING_SETUP_SQPOLL, 0);
  425. if (ret) {
  426. fprintf(stderr, "test sqpoll 0 failed\n");
  427. return T_EXIT_FAIL;
  428. }
  429. ret = test(IORING_SETUP_SQPOLL, 1);
  430. if (ret) {
  431. fprintf(stderr, "test sqpoll 1 failed\n");
  432. return T_EXIT_FAIL;
  433. }
  434. ret = test(IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN, 0);
  435. if (ret) {
  436. fprintf(stderr, "test single coop 0 failed\n");
  437. return T_EXIT_FAIL;
  438. }
  439. ret = test(IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN, 1);
  440. if (ret) {
  441. fprintf(stderr, "test single coop 1 failed\n");
  442. return T_EXIT_FAIL;
  443. }
  444. ret = test(IORING_SETUP_COOP_TASKRUN, 0);
  445. if (ret) {
  446. fprintf(stderr, "test taskrun 0 failed\n");
  447. return T_EXIT_FAIL;
  448. }
  449. ret = test(IORING_SETUP_COOP_TASKRUN, 1);
  450. if (ret) {
  451. fprintf(stderr, "test taskrun 1 failed\n");
  452. return T_EXIT_FAIL;
  453. }
  454. ret = test_order(0, 0);
  455. if (ret) {
  456. fprintf(stderr, "test_order 0 0 failed\n");
  457. return T_EXIT_FAIL;
  458. }
  459. ret = test_order(1, 0);
  460. if (ret) {
  461. fprintf(stderr, "test_order 1 0 failed\n");
  462. return T_EXIT_FAIL;
  463. }
  464. ret = test_order(0, 1);
  465. if (ret) {
  466. fprintf(stderr, "test_order 0 1 failed\n");
  467. return T_EXIT_FAIL;
  468. }
  469. ret = test_order(1, 1);
  470. if (ret) {
  471. fprintf(stderr, "test_order 1 1 failed\n");
  472. return T_EXIT_FAIL;
  473. }
  474. ret = test_multi_wake(0);
  475. if (ret) {
  476. fprintf(stderr, "multi_wake 0 failed\n");
  477. return T_EXIT_FAIL;
  478. }
  479. ret = test_multi_wake(1);
  480. if (ret) {
  481. fprintf(stderr, "multi_wake 1 failed\n");
  482. return T_EXIT_FAIL;
  483. }
  484. return T_EXIT_PASS;
  485. }