kqueue.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580
  1. /* $OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $ */
  2. /*
  3. * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
  4. * Copyright 2007-2012 Niels Provos and Nick Mathewson
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. * 3. The name of the author may not be used to endorse or promote products
  15. * derived from this software without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27. */
  28. #include "event2/event-config.h"
  29. #include "evconfig-private.h"
  30. #ifdef EVENT__HAVE_KQUEUE
  31. #include <sys/types.h>
  32. #ifdef EVENT__HAVE_SYS_TIME_H
  33. #include <sys/time.h>
  34. #endif
  35. #include <sys/queue.h>
  36. #include <sys/event.h>
  37. #include <limits.h>
  38. #include <signal.h>
  39. #include <stdio.h>
  40. #include <stdlib.h>
  41. #include <string.h>
  42. #include <unistd.h>
  43. #include <errno.h>
  44. #ifdef EVENT__HAVE_INTTYPES_H
  45. #include <inttypes.h>
  46. #endif
  47. /* Some platforms apparently define the udata field of struct kevent as
  48. * intptr_t, whereas others define it as void*. There doesn't seem to be an
  49. * easy way to tell them apart via autoconf, so we need to use OS macros. */
  50. #if defined(__NetBSD__)
  51. #define PTR_TO_UDATA(x) ((typeof(((struct kevent *)0)->udata))(x))
  52. #define INT_TO_UDATA(x) ((typeof(((struct kevent *)0)->udata))(intptr_t)(x))
  53. #elif defined(EVENT__HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__) && !defined(__CloudABI__)
  54. #define PTR_TO_UDATA(x) ((intptr_t)(x))
  55. #define INT_TO_UDATA(x) ((intptr_t)(x))
  56. #else
  57. #define PTR_TO_UDATA(x) (x)
  58. #define INT_TO_UDATA(x) ((void*)(x))
  59. #endif
  60. #include "event-internal.h"
  61. #include "log-internal.h"
  62. #include "evmap-internal.h"
  63. #include "event2/thread.h"
  64. #include "event2/util.h"
  65. #include "evthread-internal.h"
  66. #include "changelist-internal.h"
  67. #include "kqueue-internal.h"
  68. #define NEVENT 64
  69. struct kqop {
  70. struct kevent *changes;
  71. int changes_size;
  72. struct kevent *events;
  73. int events_size;
  74. int kq;
  75. int notify_event_added;
  76. pid_t pid;
  77. };
  78. static void kqop_free(struct kqop *kqop);
  79. static void *kq_init(struct event_base *);
  80. static int kq_sig_add(struct event_base *, int, short, short, void *);
  81. static int kq_sig_del(struct event_base *, int, short, short, void *);
  82. static int kq_dispatch(struct event_base *, struct timeval *);
  83. static void kq_dealloc(struct event_base *);
  84. const struct eventop kqops = {
  85. "kqueue",
  86. kq_init,
  87. event_changelist_add_,
  88. event_changelist_del_,
  89. kq_dispatch,
  90. kq_dealloc,
  91. 1 /* need reinit */,
  92. EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_FDS,
  93. EVENT_CHANGELIST_FDINFO_SIZE
  94. };
  95. static const struct eventop kqsigops = {
  96. "kqueue_signal",
  97. NULL,
  98. kq_sig_add,
  99. kq_sig_del,
  100. NULL,
  101. NULL,
  102. 1 /* need reinit */,
  103. 0,
  104. 0
  105. };
  106. static void *
  107. kq_init(struct event_base *base)
  108. {
  109. int kq = -1;
  110. struct kqop *kqueueop = NULL;
  111. if (!(kqueueop = mm_calloc(1, sizeof(struct kqop))))
  112. return (NULL);
  113. /* Initialize the kernel queue */
  114. if ((kq = kqueue()) == -1) {
  115. event_warn("kqueue");
  116. goto err;
  117. }
  118. kqueueop->kq = kq;
  119. kqueueop->pid = getpid();
  120. /* Initialize fields */
  121. kqueueop->changes = mm_calloc(NEVENT, sizeof(struct kevent));
  122. if (kqueueop->changes == NULL)
  123. goto err;
  124. kqueueop->events = mm_calloc(NEVENT, sizeof(struct kevent));
  125. if (kqueueop->events == NULL)
  126. goto err;
  127. kqueueop->events_size = kqueueop->changes_size = NEVENT;
  128. /* Check for Mac OS X kqueue bug. */
  129. memset(&kqueueop->changes[0], 0, sizeof kqueueop->changes[0]);
  130. kqueueop->changes[0].ident = -1;
  131. kqueueop->changes[0].filter = EVFILT_READ;
  132. kqueueop->changes[0].flags = EV_ADD;
  133. /*
  134. * If kqueue works, then kevent will succeed, and it will
  135. * stick an error in events[0]. If kqueue is broken, then
  136. * kevent will fail.
  137. */
  138. if (kevent(kq,
  139. kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 ||
  140. (int)kqueueop->events[0].ident != -1 ||
  141. !(kqueueop->events[0].flags & EV_ERROR)) {
  142. event_warn("%s: detected broken kqueue; not using.", __func__);
  143. goto err;
  144. }
  145. base->evsigsel = &kqsigops;
  146. return (kqueueop);
  147. err:
  148. if (kqueueop)
  149. kqop_free(kqueueop);
  150. return (NULL);
  151. }
  152. #define ADD_UDATA 0x30303
  153. static void
  154. kq_setup_kevent(struct kevent *out, evutil_socket_t fd, int filter, short change)
  155. {
  156. memset(out, 0, sizeof(struct kevent));
  157. out->ident = fd;
  158. out->filter = filter;
  159. if (change & EV_CHANGE_ADD) {
  160. out->flags = EV_ADD;
  161. /* We set a magic number here so that we can tell 'add'
  162. * errors from 'del' errors. */
  163. out->udata = INT_TO_UDATA(ADD_UDATA);
  164. if (change & EV_ET)
  165. out->flags |= EV_CLEAR;
  166. #ifdef NOTE_EOF
  167. /* Make it behave like select() and poll() */
  168. if (filter == EVFILT_READ)
  169. out->fflags = NOTE_EOF;
  170. #endif
  171. } else {
  172. EVUTIL_ASSERT(change & EV_CHANGE_DEL);
  173. out->flags = EV_DELETE;
  174. }
  175. }
  176. static int
  177. kq_build_changes_list(const struct event_changelist *changelist,
  178. struct kqop *kqop)
  179. {
  180. int i;
  181. int n_changes = 0;
  182. for (i = 0; i < changelist->n_changes; ++i) {
  183. struct event_change *in_ch = &changelist->changes[i];
  184. struct kevent *out_ch;
  185. if (n_changes >= kqop->changes_size - 1) {
  186. int newsize;
  187. struct kevent *newchanges;
  188. if (kqop->changes_size > INT_MAX / 2 ||
  189. (size_t)kqop->changes_size * 2 > EV_SIZE_MAX /
  190. sizeof(struct kevent)) {
  191. event_warnx("%s: int overflow", __func__);
  192. return (-1);
  193. }
  194. newsize = kqop->changes_size * 2;
  195. newchanges = mm_realloc(kqop->changes,
  196. newsize * sizeof(struct kevent));
  197. if (newchanges == NULL) {
  198. event_warn("%s: realloc", __func__);
  199. return (-1);
  200. }
  201. kqop->changes = newchanges;
  202. kqop->changes_size = newsize;
  203. }
  204. if (in_ch->read_change) {
  205. out_ch = &kqop->changes[n_changes++];
  206. kq_setup_kevent(out_ch, in_ch->fd, EVFILT_READ,
  207. in_ch->read_change);
  208. }
  209. if (in_ch->write_change) {
  210. out_ch = &kqop->changes[n_changes++];
  211. kq_setup_kevent(out_ch, in_ch->fd, EVFILT_WRITE,
  212. in_ch->write_change);
  213. }
  214. }
  215. return n_changes;
  216. }
  217. static int
  218. kq_grow_events(struct kqop *kqop, size_t new_size)
  219. {
  220. struct kevent *newresult;
  221. newresult = mm_realloc(kqop->events,
  222. new_size * sizeof(struct kevent));
  223. if (newresult) {
  224. kqop->events = newresult;
  225. kqop->events_size = new_size;
  226. return 0;
  227. } else {
  228. return -1;
  229. }
  230. }
  231. static int
  232. kq_dispatch(struct event_base *base, struct timeval *tv)
  233. {
  234. struct kqop *kqop = base->evbase;
  235. struct kevent *events = kqop->events;
  236. struct kevent *changes;
  237. struct timespec ts, *ts_p = NULL;
  238. int i, n_changes, res;
  239. if (tv != NULL) {
  240. ts.tv_sec = tv->tv_sec;
  241. ts.tv_nsec = tv->tv_usec * 1000;
  242. ts_p = &ts;
  243. }
  244. /* Build "changes" from "base->changes" */
  245. EVUTIL_ASSERT(kqop->changes);
  246. n_changes = kq_build_changes_list(&base->changelist, kqop);
  247. if (n_changes < 0)
  248. return -1;
  249. event_changelist_remove_all_(&base->changelist, base);
  250. /* steal the changes array in case some broken code tries to call
  251. * dispatch twice at once. */
  252. changes = kqop->changes;
  253. kqop->changes = NULL;
  254. /* Make sure that 'events' is at least as long as the list of changes:
  255. * otherwise errors in the changes can get reported as a -1 return
  256. * value from kevent() rather than as EV_ERROR events in the events
  257. * array.
  258. *
  259. * (We could instead handle -1 return values from kevent() by
  260. * retrying with a smaller changes array or a larger events array,
  261. * but this approach seems less risky for now.)
  262. */
  263. if (kqop->events_size < n_changes) {
  264. int new_size = kqop->events_size;
  265. do {
  266. new_size *= 2;
  267. } while (new_size < n_changes);
  268. kq_grow_events(kqop, new_size);
  269. events = kqop->events;
  270. }
  271. EVBASE_RELEASE_LOCK(base, th_base_lock);
  272. res = kevent(kqop->kq, changes, n_changes,
  273. events, kqop->events_size, ts_p);
  274. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  275. EVUTIL_ASSERT(kqop->changes == NULL);
  276. kqop->changes = changes;
  277. if (res == -1) {
  278. if (errno != EINTR) {
  279. event_warn("kevent");
  280. return (-1);
  281. }
  282. return (0);
  283. }
  284. event_debug(("%s: kevent reports %d", __func__, res));
  285. for (i = 0; i < res; i++) {
  286. int which = 0;
  287. if (events[i].flags & EV_ERROR) {
  288. switch (events[i].data) {
  289. /* Can occur on delete if we are not currently
  290. * watching any events on this fd. That can
  291. * happen when the fd was closed and another
  292. * file was opened with that fd. */
  293. case ENOENT:
  294. /* Can occur for reasons not fully understood
  295. * on FreeBSD. */
  296. case EINVAL:
  297. continue;
  298. #if defined(__FreeBSD__)
  299. /*
  300. * This currently occurs if an FD is closed
  301. * before the EV_DELETE makes it out via kevent().
  302. * The FreeBSD capabilities code sees the blank
  303. * capability set and rejects the request to
  304. * modify an event.
  305. *
  306. * To be strictly correct - when an FD is closed,
  307. * all the registered events are also removed.
  308. * Queuing EV_DELETE to a closed FD is wrong.
  309. * The event(s) should just be deleted from
  310. * the pending changelist.
  311. */
  312. case ENOTCAPABLE:
  313. continue;
  314. #endif
  315. /* Can occur on a delete if the fd is closed. */
  316. case EBADF:
  317. /* XXXX On NetBSD, we can also get EBADF if we
  318. * try to add the write side of a pipe, but
  319. * the read side has already been closed.
  320. * Other BSDs call this situation 'EPIPE'. It
  321. * would be good if we had a way to report
  322. * this situation. */
  323. continue;
  324. /* These two can occur on an add if the fd was one side
  325. * of a pipe, and the other side was closed. */
  326. case EPERM:
  327. case EPIPE:
  328. /* Report read events, if we're listening for
  329. * them, so that the user can learn about any
  330. * add errors. (If the operation was a
  331. * delete, then udata should be cleared.) */
  332. if (events[i].udata) {
  333. /* The operation was an add:
  334. * report the error as a read. */
  335. which |= EV_READ;
  336. break;
  337. } else {
  338. /* The operation was a del:
  339. * report nothing. */
  340. continue;
  341. }
  342. /* Other errors shouldn't occur. */
  343. default:
  344. errno = events[i].data;
  345. return (-1);
  346. }
  347. } else if (events[i].filter == EVFILT_READ) {
  348. which |= EV_READ;
  349. } else if (events[i].filter == EVFILT_WRITE) {
  350. which |= EV_WRITE;
  351. } else if (events[i].filter == EVFILT_SIGNAL) {
  352. which |= EV_SIGNAL;
  353. #ifdef EVFILT_USER
  354. } else if (events[i].filter == EVFILT_USER) {
  355. base->is_notify_pending = 0;
  356. #endif
  357. }
  358. if (!which)
  359. continue;
  360. if (events[i].filter == EVFILT_SIGNAL) {
  361. evmap_signal_active_(base, events[i].ident, 1);
  362. } else {
  363. evmap_io_active_(base, events[i].ident, which | EV_ET);
  364. }
  365. }
  366. if (res == kqop->events_size) {
  367. /* We used all the events space that we have. Maybe we should
  368. make it bigger. */
  369. kq_grow_events(kqop, kqop->events_size * 2);
  370. }
  371. return (0);
  372. }
  373. static void
  374. kqop_free(struct kqop *kqop)
  375. {
  376. if (kqop->changes)
  377. mm_free(kqop->changes);
  378. if (kqop->events)
  379. mm_free(kqop->events);
  380. if (kqop->kq >= 0 && kqop->pid == getpid())
  381. close(kqop->kq);
  382. memset(kqop, 0, sizeof(struct kqop));
  383. mm_free(kqop);
  384. }
  385. static void
  386. kq_dealloc(struct event_base *base)
  387. {
  388. struct kqop *kqop = base->evbase;
  389. evsig_dealloc_(base);
  390. kqop_free(kqop);
  391. }
  392. /* signal handling */
  393. static int
  394. kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *p)
  395. {
  396. struct kqop *kqop = base->evbase;
  397. struct kevent kev;
  398. struct timespec timeout = { 0, 0 };
  399. (void)p;
  400. EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
  401. memset(&kev, 0, sizeof(kev));
  402. kev.ident = nsignal;
  403. kev.filter = EVFILT_SIGNAL;
  404. kev.flags = EV_ADD;
  405. /* Be ready for the signal if it is sent any
  406. * time between now and the next call to
  407. * kq_dispatch. */
  408. if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
  409. return (-1);
  410. /* We can set the handler for most signals to SIG_IGN and
  411. * still have them reported to us in the queue. However,
  412. * if the handler for SIGCHLD is SIG_IGN, the system reaps
  413. * zombie processes for us, and we don't get any notification.
  414. * This appears to be the only signal with this quirk. */
  415. if (evsig_set_handler_(base, nsignal,
  416. nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1)
  417. return (-1);
  418. return (0);
  419. }
  420. static int
  421. kq_sig_del(struct event_base *base, int nsignal, short old, short events, void *p)
  422. {
  423. struct kqop *kqop = base->evbase;
  424. struct kevent kev;
  425. struct timespec timeout = { 0, 0 };
  426. (void)p;
  427. EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
  428. memset(&kev, 0, sizeof(kev));
  429. kev.ident = nsignal;
  430. kev.filter = EVFILT_SIGNAL;
  431. kev.flags = EV_DELETE;
  432. /* Because we insert signal events
  433. * immediately, we need to delete them
  434. * immediately, too */
  435. if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
  436. return (-1);
  437. if (evsig_restore_handler_(base, nsignal) == -1)
  438. return (-1);
  439. return (0);
  440. }
  441. /* OSX 10.6 and FreeBSD 8.1 add support for EVFILT_USER, which we can use
  442. * to wake up the event loop from another thread. */
  443. /* Magic number we use for our filter ID. */
  444. #define NOTIFY_IDENT 42
  445. int
  446. event_kq_add_notify_event_(struct event_base *base)
  447. {
  448. struct kqop *kqop = base->evbase;
  449. #if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
  450. struct kevent kev;
  451. struct timespec timeout = { 0, 0 };
  452. #endif
  453. if (kqop->notify_event_added)
  454. return 0;
  455. #if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
  456. memset(&kev, 0, sizeof(kev));
  457. kev.ident = NOTIFY_IDENT;
  458. kev.filter = EVFILT_USER;
  459. kev.flags = EV_ADD | EV_CLEAR;
  460. if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) {
  461. event_warn("kevent: adding EVFILT_USER event");
  462. return -1;
  463. }
  464. kqop->notify_event_added = 1;
  465. return 0;
  466. #else
  467. return -1;
  468. #endif
  469. }
  470. int
  471. event_kq_notify_base_(struct event_base *base)
  472. {
  473. struct kqop *kqop = base->evbase;
  474. #if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
  475. struct kevent kev;
  476. struct timespec timeout = { 0, 0 };
  477. #endif
  478. if (! kqop->notify_event_added)
  479. return -1;
  480. #if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
  481. memset(&kev, 0, sizeof(kev));
  482. kev.ident = NOTIFY_IDENT;
  483. kev.filter = EVFILT_USER;
  484. kev.fflags = NOTE_TRIGGER;
  485. if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) {
  486. event_warn("kevent: triggering EVFILT_USER event");
  487. return -1;
  488. }
  489. return 0;
  490. #else
  491. return -1;
  492. #endif
  493. }
  494. #endif /* EVENT__HAVE_KQUEUE */