evmap.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062
  1. /*
  2. * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. * 3. The name of the author may not be used to endorse or promote products
  13. * derived from this software without specific prior written permission.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  16. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  17. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  18. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  19. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  20. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  21. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  22. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  23. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  24. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include "event2/event-config.h"
  27. #include "evconfig-private.h"
  28. #ifdef _WIN32
  29. #include <winsock2.h>
  30. #define WIN32_LEAN_AND_MEAN
  31. #include <windows.h>
  32. #undef WIN32_LEAN_AND_MEAN
  33. #endif
  34. #include <sys/types.h>
  35. #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
  36. #include <sys/time.h>
  37. #endif
  38. #include <sys/queue.h>
  39. #include <stdio.h>
  40. #include <stdlib.h>
  41. #ifndef _WIN32
  42. #include <unistd.h>
  43. #endif
  44. #include <errno.h>
  45. #include <limits.h>
  46. #include <signal.h>
  47. #include <string.h>
  48. #include <time.h>
  49. #include "event-internal.h"
  50. #include "evmap-internal.h"
  51. #include "mm-internal.h"
  52. #include "changelist-internal.h"
  53. /** An entry for an evmap_io list: notes all the events that want to read or
  54. write on a given fd, and the number of each.
  55. */
  56. struct evmap_io {
  57. struct event_dlist events;
  58. ev_uint16_t nread;
  59. ev_uint16_t nwrite;
  60. ev_uint16_t nclose;
  61. };
  62. /* An entry for an evmap_signal list: notes all the events that want to know
  63. when a signal triggers. */
  64. struct evmap_signal {
  65. struct event_dlist events;
  66. };
  67. /* On some platforms, fds start at 0 and increment by 1 as they are
  68. allocated, and old numbers get used. For these platforms, we
  69. implement io maps just like signal maps: as an array of pointers to
  70. struct evmap_io. But on other platforms (windows), sockets are not
  71. 0-indexed, not necessarily consecutive, and not necessarily reused.
  72. There, we use a hashtable to implement evmap_io.
  73. */
  74. #ifdef EVMAP_USE_HT
  75. struct event_map_entry {
  76. HT_ENTRY(event_map_entry) map_node;
  77. evutil_socket_t fd;
  78. union { /* This is a union in case we need to make more things that can
  79. be in the hashtable. */
  80. struct evmap_io evmap_io;
  81. } ent;
  82. };
  83. /* Helper used by the event_io_map hashtable code; tries to return a good hash
  84. * of the fd in e->fd. */
  85. static inline unsigned
  86. hashsocket(struct event_map_entry *e)
  87. {
  88. /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
  89. * matter. Our hashtable implementation really likes low-order bits,
  90. * though, so let's do the rotate-and-add trick. */
  91. unsigned h = (unsigned) e->fd;
  92. h += (h >> 2) | (h << 30);
  93. return h;
  94. }
  95. /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
  96. * have the same e->fd. */
  97. static inline int
  98. eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
  99. {
  100. return e1->fd == e2->fd;
  101. }
  102. HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
  103. HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
  104. 0.5, mm_malloc, mm_realloc, mm_free)
  105. #define GET_IO_SLOT(x, map, slot, type) \
  106. do { \
  107. struct event_map_entry key_, *ent_; \
  108. key_.fd = slot; \
  109. ent_ = HT_FIND(event_io_map, map, &key_); \
  110. (x) = ent_ ? &ent_->ent.type : NULL; \
  111. } while (0);
  112. #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
  113. do { \
  114. struct event_map_entry key_, *ent_; \
  115. key_.fd = slot; \
  116. HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \
  117. event_map_entry, &key_, ptr, \
  118. { \
  119. ent_ = *ptr; \
  120. }, \
  121. { \
  122. ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
  123. if (EVUTIL_UNLIKELY(ent_ == NULL)) \
  124. return (-1); \
  125. ent_->fd = slot; \
  126. (ctor)(&ent_->ent.type); \
  127. HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \
  128. }); \
  129. (x) = &ent_->ent.type; \
  130. } while (0)
  131. void evmap_io_initmap_(struct event_io_map *ctx)
  132. {
  133. HT_INIT(event_io_map, ctx);
  134. }
  135. void evmap_io_clear_(struct event_io_map *ctx)
  136. {
  137. struct event_map_entry **ent, **next, *this;
  138. for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
  139. this = *ent;
  140. next = HT_NEXT_RMV(event_io_map, ctx, ent);
  141. mm_free(this);
  142. }
  143. HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
  144. }
  145. #endif
  146. /* Set the variable 'x' to the field in event_map 'map' with fields of type
  147. 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
  148. if there are no entries for 'slot'. Does no bounds-checking. */
  149. #define GET_SIGNAL_SLOT(x, map, slot, type) \
  150. (x) = (struct type *)((map)->entries[slot])
  151. /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
  152. by allocating enough memory for a 'struct type', and initializing the new
  153. value by calling the function 'ctor' on it. Makes the function
  154. return -1 on allocation failure.
  155. */
  156. #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
  157. do { \
  158. if ((map)->entries[slot] == NULL) { \
  159. (map)->entries[slot] = \
  160. mm_calloc(1,sizeof(struct type)+fdinfo_len); \
  161. if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
  162. return (-1); \
  163. (ctor)((struct type *)(map)->entries[slot]); \
  164. } \
  165. (x) = (struct type *)((map)->entries[slot]); \
  166. } while (0)
  167. /* If we aren't using hashtables, then define the IO_SLOT macros and functions
  168. as thin aliases over the SIGNAL_SLOT versions. */
  169. #ifndef EVMAP_USE_HT
  170. #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
  171. #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
  172. GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
  173. #define FDINFO_OFFSET sizeof(struct evmap_io)
  174. void
  175. evmap_io_initmap_(struct event_io_map* ctx)
  176. {
  177. evmap_signal_initmap_(ctx);
  178. }
  179. void
  180. evmap_io_clear_(struct event_io_map* ctx)
  181. {
  182. evmap_signal_clear_(ctx);
  183. }
  184. #endif
  185. /** Expand 'map' with new entries of width 'msize' until it is big enough
  186. to store a value in 'slot'.
  187. */
  188. static int
  189. evmap_make_space(struct event_signal_map *map, int slot, int msize)
  190. {
  191. if (map->nentries <= slot) {
  192. int nentries = map->nentries ? map->nentries : 32;
  193. void **tmp;
  194. if (slot > INT_MAX / 2)
  195. return (-1);
  196. while (nentries <= slot)
  197. nentries <<= 1;
  198. if (nentries > INT_MAX / msize)
  199. return (-1);
  200. tmp = (void **)mm_realloc(map->entries, nentries * msize);
  201. if (tmp == NULL)
  202. return (-1);
  203. memset(&tmp[map->nentries], 0,
  204. (nentries - map->nentries) * msize);
  205. map->nentries = nentries;
  206. map->entries = tmp;
  207. }
  208. return (0);
  209. }
  210. void
  211. evmap_signal_initmap_(struct event_signal_map *ctx)
  212. {
  213. ctx->nentries = 0;
  214. ctx->entries = NULL;
  215. }
  216. void
  217. evmap_signal_clear_(struct event_signal_map *ctx)
  218. {
  219. if (ctx->entries != NULL) {
  220. int i;
  221. for (i = 0; i < ctx->nentries; ++i) {
  222. if (ctx->entries[i] != NULL)
  223. mm_free(ctx->entries[i]);
  224. }
  225. mm_free(ctx->entries);
  226. ctx->entries = NULL;
  227. }
  228. ctx->nentries = 0;
  229. }
  230. /* code specific to file descriptors */
  231. /** Constructor for struct evmap_io */
  232. static void
  233. evmap_io_init(struct evmap_io *entry)
  234. {
  235. LIST_INIT(&entry->events);
  236. entry->nread = 0;
  237. entry->nwrite = 0;
  238. entry->nclose = 0;
  239. }
  240. /* return -1 on error, 0 on success if nothing changed in the event backend,
  241. * and 1 on success if something did. */
  242. int
  243. evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev)
  244. {
  245. const struct eventop *evsel = base->evsel;
  246. struct event_io_map *io = &base->io;
  247. struct evmap_io *ctx = NULL;
  248. int nread, nwrite, nclose, retval = 0;
  249. short res = 0, old = 0;
  250. struct event *old_ev;
  251. EVUTIL_ASSERT(fd == ev->ev_fd);
  252. if (fd < 0)
  253. return 0;
  254. #ifndef EVMAP_USE_HT
  255. if (fd >= io->nentries) {
  256. if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
  257. return (-1);
  258. }
  259. #endif
  260. GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
  261. evsel->fdinfo_len);
  262. nread = ctx->nread;
  263. nwrite = ctx->nwrite;
  264. nclose = ctx->nclose;
  265. if (nread)
  266. old |= EV_READ;
  267. if (nwrite)
  268. old |= EV_WRITE;
  269. if (nclose)
  270. old |= EV_CLOSED;
  271. if (ev->ev_events & EV_READ) {
  272. if (++nread == 1)
  273. res |= EV_READ;
  274. }
  275. if (ev->ev_events & EV_WRITE) {
  276. if (++nwrite == 1)
  277. res |= EV_WRITE;
  278. }
  279. if (ev->ev_events & EV_CLOSED) {
  280. if (++nclose == 1)
  281. res |= EV_CLOSED;
  282. }
  283. if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) {
  284. event_warnx("Too many events reading or writing on fd %d",
  285. (int)fd);
  286. return -1;
  287. }
  288. if (EVENT_DEBUG_MODE_IS_ON() &&
  289. (old_ev = LIST_FIRST(&ctx->events)) &&
  290. (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
  291. event_warnx("Tried to mix edge-triggered and non-edge-triggered"
  292. " events on fd %d", (int)fd);
  293. return -1;
  294. }
  295. if (res) {
  296. void *extra = ((char*)ctx) + sizeof(struct evmap_io);
  297. /* XXX(niels): we cannot mix edge-triggered and
  298. * level-triggered, we should probably assert on
  299. * this. */
  300. if (evsel->add(base, ev->ev_fd,
  301. old, (ev->ev_events & EV_ET) | res, extra) == -1)
  302. return (-1);
  303. retval = 1;
  304. }
  305. ctx->nread = (ev_uint16_t) nread;
  306. ctx->nwrite = (ev_uint16_t) nwrite;
  307. ctx->nclose = (ev_uint16_t) nclose;
  308. LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next);
  309. return (retval);
  310. }
  311. /* return -1 on error, 0 on success if nothing changed in the event backend,
  312. * and 1 on success if something did. */
  313. int
  314. evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev)
  315. {
  316. const struct eventop *evsel = base->evsel;
  317. struct event_io_map *io = &base->io;
  318. struct evmap_io *ctx;
  319. int nread, nwrite, nclose, retval = 0;
  320. short res = 0, old = 0;
  321. if (fd < 0)
  322. return 0;
  323. EVUTIL_ASSERT(fd == ev->ev_fd);
  324. #ifndef EVMAP_USE_HT
  325. if (fd >= io->nentries)
  326. return (-1);
  327. #endif
  328. GET_IO_SLOT(ctx, io, fd, evmap_io);
  329. nread = ctx->nread;
  330. nwrite = ctx->nwrite;
  331. nclose = ctx->nclose;
  332. if (nread)
  333. old |= EV_READ;
  334. if (nwrite)
  335. old |= EV_WRITE;
  336. if (nclose)
  337. old |= EV_CLOSED;
  338. if (ev->ev_events & EV_READ) {
  339. if (--nread == 0)
  340. res |= EV_READ;
  341. EVUTIL_ASSERT(nread >= 0);
  342. }
  343. if (ev->ev_events & EV_WRITE) {
  344. if (--nwrite == 0)
  345. res |= EV_WRITE;
  346. EVUTIL_ASSERT(nwrite >= 0);
  347. }
  348. if (ev->ev_events & EV_CLOSED) {
  349. if (--nclose == 0)
  350. res |= EV_CLOSED;
  351. EVUTIL_ASSERT(nclose >= 0);
  352. }
  353. if (res) {
  354. void *extra = ((char*)ctx) + sizeof(struct evmap_io);
  355. if (evsel->del(base, ev->ev_fd,
  356. old, (ev->ev_events & EV_ET) | res, extra) == -1) {
  357. retval = -1;
  358. } else {
  359. retval = 1;
  360. }
  361. }
  362. ctx->nread = nread;
  363. ctx->nwrite = nwrite;
  364. ctx->nclose = nclose;
  365. LIST_REMOVE(ev, ev_io_next);
  366. return (retval);
  367. }
  368. void
  369. evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events)
  370. {
  371. struct event_io_map *io = &base->io;
  372. struct evmap_io *ctx;
  373. struct event *ev;
  374. #ifndef EVMAP_USE_HT
  375. if (fd < 0 || fd >= io->nentries)
  376. return;
  377. #endif
  378. GET_IO_SLOT(ctx, io, fd, evmap_io);
  379. if (NULL == ctx)
  380. return;
  381. LIST_FOREACH(ev, &ctx->events, ev_io_next) {
  382. if (ev->ev_events & (events & ~EV_ET))
  383. event_active_nolock_(ev, ev->ev_events & events, 1);
  384. }
  385. }
  386. /* code specific to signals */
  387. static void
  388. evmap_signal_init(struct evmap_signal *entry)
  389. {
  390. LIST_INIT(&entry->events);
  391. }
  392. int
  393. evmap_signal_add_(struct event_base *base, int sig, struct event *ev)
  394. {
  395. const struct eventop *evsel = base->evsigsel;
  396. struct event_signal_map *map = &base->sigmap;
  397. struct evmap_signal *ctx = NULL;
  398. if (sig < 0 || sig >= NSIG)
  399. return (-1);
  400. if (sig >= map->nentries) {
  401. if (evmap_make_space(
  402. map, sig, sizeof(struct evmap_signal *)) == -1)
  403. return (-1);
  404. }
  405. GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
  406. base->evsigsel->fdinfo_len);
  407. if (LIST_EMPTY(&ctx->events)) {
  408. if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
  409. == -1)
  410. return (-1);
  411. }
  412. LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next);
  413. return (1);
  414. }
  415. int
  416. evmap_signal_del_(struct event_base *base, int sig, struct event *ev)
  417. {
  418. const struct eventop *evsel = base->evsigsel;
  419. struct event_signal_map *map = &base->sigmap;
  420. struct evmap_signal *ctx;
  421. if (sig < 0 || sig >= map->nentries)
  422. return (-1);
  423. GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
  424. LIST_REMOVE(ev, ev_signal_next);
  425. if (LIST_FIRST(&ctx->events) == NULL) {
  426. if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
  427. return (-1);
  428. }
  429. return (1);
  430. }
  431. void
  432. evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls)
  433. {
  434. struct event_signal_map *map = &base->sigmap;
  435. struct evmap_signal *ctx;
  436. struct event *ev;
  437. if (sig < 0 || sig >= map->nentries)
  438. return;
  439. GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
  440. if (!ctx)
  441. return;
  442. LIST_FOREACH(ev, &ctx->events, ev_signal_next)
  443. event_active_nolock_(ev, EV_SIGNAL, ncalls);
  444. }
  445. void *
  446. evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd)
  447. {
  448. struct evmap_io *ctx;
  449. GET_IO_SLOT(ctx, map, fd, evmap_io);
  450. if (ctx)
  451. return ((char*)ctx) + sizeof(struct evmap_io);
  452. else
  453. return NULL;
  454. }
  455. /* Callback type for evmap_io_foreach_fd */
  456. typedef int (*evmap_io_foreach_fd_cb)(
  457. struct event_base *, evutil_socket_t, struct evmap_io *, void *);
  458. /* Multipurpose helper function: Iterate over every file descriptor event_base
  459. * for which we could have EV_READ or EV_WRITE events. For each such fd, call
  460. * fn(base, signum, evmap_io, arg), where fn is the user-provided
  461. * function, base is the event_base, signum is the signal number, evmap_io
  462. * is an evmap_io structure containing a list of events pending on the
  463. * file descriptor, and arg is the user-supplied argument.
  464. *
  465. * If fn returns 0, continue on to the next signal. Otherwise, return the same
  466. * value that fn returned.
  467. *
  468. * Note that there is no guarantee that the file descriptors will be processed
  469. * in any particular order.
  470. */
  471. static int
  472. evmap_io_foreach_fd(struct event_base *base,
  473. evmap_io_foreach_fd_cb fn,
  474. void *arg)
  475. {
  476. evutil_socket_t fd;
  477. struct event_io_map *iomap = &base->io;
  478. int r = 0;
  479. #ifdef EVMAP_USE_HT
  480. struct event_map_entry **mapent;
  481. HT_FOREACH(mapent, event_io_map, iomap) {
  482. struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
  483. fd = (*mapent)->fd;
  484. #else
  485. for (fd = 0; fd < iomap->nentries; ++fd) {
  486. struct evmap_io *ctx = iomap->entries[fd];
  487. if (!ctx)
  488. continue;
  489. #endif
  490. if ((r = fn(base, fd, ctx, arg)))
  491. break;
  492. }
  493. return r;
  494. }
  495. /* Callback type for evmap_signal_foreach_signal */
  496. typedef int (*evmap_signal_foreach_signal_cb)(
  497. struct event_base *, int, struct evmap_signal *, void *);
  498. /* Multipurpose helper function: Iterate over every signal number in the
  499. * event_base for which we could have signal events. For each such signal,
  500. * call fn(base, signum, evmap_signal, arg), where fn is the user-provided
  501. * function, base is the event_base, signum is the signal number, evmap_signal
  502. * is an evmap_signal structure containing a list of events pending on the
  503. * signal, and arg is the user-supplied argument.
  504. *
  505. * If fn returns 0, continue on to the next signal. Otherwise, return the same
  506. * value that fn returned.
  507. */
  508. static int
  509. evmap_signal_foreach_signal(struct event_base *base,
  510. evmap_signal_foreach_signal_cb fn,
  511. void *arg)
  512. {
  513. struct event_signal_map *sigmap = &base->sigmap;
  514. int r = 0;
  515. int signum;
  516. for (signum = 0; signum < sigmap->nentries; ++signum) {
  517. struct evmap_signal *ctx = sigmap->entries[signum];
  518. if (!ctx)
  519. continue;
  520. if ((r = fn(base, signum, ctx, arg)))
  521. break;
  522. }
  523. return r;
  524. }
  525. /* Helper for evmap_reinit_: tell the backend to add every fd for which we have
  526. * pending events, with the appropriate combination of EV_READ, EV_WRITE, and
  527. * EV_ET. */
  528. static int
  529. evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd,
  530. struct evmap_io *ctx, void *arg)
  531. {
  532. const struct eventop *evsel = base->evsel;
  533. void *extra;
  534. int *result = arg;
  535. short events = 0;
  536. struct event *ev;
  537. EVUTIL_ASSERT(ctx);
  538. extra = ((char*)ctx) + sizeof(struct evmap_io);
  539. if (ctx->nread)
  540. events |= EV_READ;
  541. if (ctx->nwrite)
  542. events |= EV_WRITE;
  543. if (ctx->nclose)
  544. events |= EV_CLOSED;
  545. if (evsel->fdinfo_len)
  546. memset(extra, 0, evsel->fdinfo_len);
  547. if (events &&
  548. (ev = LIST_FIRST(&ctx->events)) &&
  549. (ev->ev_events & EV_ET))
  550. events |= EV_ET;
  551. if (evsel->add(base, fd, 0, events, extra) == -1)
  552. *result = -1;
  553. return 0;
  554. }
  555. /* Helper for evmap_reinit_: tell the backend to add every signal for which we
  556. * have pending events. */
  557. static int
  558. evmap_signal_reinit_iter_fn(struct event_base *base,
  559. int signum, struct evmap_signal *ctx, void *arg)
  560. {
  561. const struct eventop *evsel = base->evsigsel;
  562. int *result = arg;
  563. if (!LIST_EMPTY(&ctx->events)) {
  564. if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1)
  565. *result = -1;
  566. }
  567. return 0;
  568. }
  569. int
  570. evmap_reinit_(struct event_base *base)
  571. {
  572. int result = 0;
  573. evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result);
  574. if (result < 0)
  575. return -1;
  576. evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result);
  577. if (result < 0)
  578. return -1;
  579. return 0;
  580. }
  581. /* Helper for evmap_delete_all_: delete every event in an event_dlist. */
  582. static int
  583. delete_all_in_dlist(struct event_dlist *dlist)
  584. {
  585. struct event *ev;
  586. while ((ev = LIST_FIRST(dlist)))
  587. event_del(ev);
  588. return 0;
  589. }
  590. /* Helper for evmap_delete_all_: delete every event pending on an fd. */
  591. static int
  592. evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd,
  593. struct evmap_io *io_info, void *arg)
  594. {
  595. return delete_all_in_dlist(&io_info->events);
  596. }
  597. /* Helper for evmap_delete_all_: delete every event pending on a signal. */
  598. static int
  599. evmap_signal_delete_all_iter_fn(struct event_base *base, int signum,
  600. struct evmap_signal *sig_info, void *arg)
  601. {
  602. return delete_all_in_dlist(&sig_info->events);
  603. }
  604. void
  605. evmap_delete_all_(struct event_base *base)
  606. {
  607. evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL);
  608. evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL);
  609. }
  610. /** Per-fd structure for use with changelists. It keeps track, for each fd or
  611. * signal using the changelist, of where its entry in the changelist is.
  612. */
  613. struct event_changelist_fdinfo {
  614. int idxplus1; /* this is the index +1, so that memset(0) will make it
  615. * a no-such-element */
  616. };
  617. void
  618. event_changelist_init_(struct event_changelist *changelist)
  619. {
  620. changelist->changes = NULL;
  621. changelist->changes_size = 0;
  622. changelist->n_changes = 0;
  623. }
  624. /** Helper: return the changelist_fdinfo corresponding to a given change. */
  625. static inline struct event_changelist_fdinfo *
  626. event_change_get_fdinfo(struct event_base *base,
  627. const struct event_change *change)
  628. {
  629. char *ptr;
  630. if (change->read_change & EV_CHANGE_SIGNAL) {
  631. struct evmap_signal *ctx;
  632. GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
  633. ptr = ((char*)ctx) + sizeof(struct evmap_signal);
  634. } else {
  635. struct evmap_io *ctx;
  636. GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
  637. ptr = ((char*)ctx) + sizeof(struct evmap_io);
  638. }
  639. return (void*)ptr;
  640. }
  641. /** Callback helper for event_changelist_assert_ok */
  642. static int
  643. event_changelist_assert_ok_foreach_iter_fn(
  644. struct event_base *base,
  645. evutil_socket_t fd, struct evmap_io *io, void *arg)
  646. {
  647. struct event_changelist *changelist = &base->changelist;
  648. struct event_changelist_fdinfo *f;
  649. f = (void*)
  650. ( ((char*)io) + sizeof(struct evmap_io) );
  651. if (f->idxplus1) {
  652. struct event_change *c = &changelist->changes[f->idxplus1 - 1];
  653. EVUTIL_ASSERT(c->fd == fd);
  654. }
  655. return 0;
  656. }
  657. /** Make sure that the changelist is consistent with the evmap structures. */
  658. static void
  659. event_changelist_assert_ok(struct event_base *base)
  660. {
  661. int i;
  662. struct event_changelist *changelist = &base->changelist;
  663. EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
  664. for (i = 0; i < changelist->n_changes; ++i) {
  665. struct event_change *c = &changelist->changes[i];
  666. struct event_changelist_fdinfo *f;
  667. EVUTIL_ASSERT(c->fd >= 0);
  668. f = event_change_get_fdinfo(base, c);
  669. EVUTIL_ASSERT(f);
  670. EVUTIL_ASSERT(f->idxplus1 == i + 1);
  671. }
  672. evmap_io_foreach_fd(base,
  673. event_changelist_assert_ok_foreach_iter_fn,
  674. NULL);
  675. }
  676. #ifdef DEBUG_CHANGELIST
  677. #define event_changelist_check(base) event_changelist_assert_ok((base))
  678. #else
  679. #define event_changelist_check(base) ((void)0)
  680. #endif
  681. void
  682. event_changelist_remove_all_(struct event_changelist *changelist,
  683. struct event_base *base)
  684. {
  685. int i;
  686. event_changelist_check(base);
  687. for (i = 0; i < changelist->n_changes; ++i) {
  688. struct event_change *ch = &changelist->changes[i];
  689. struct event_changelist_fdinfo *fdinfo =
  690. event_change_get_fdinfo(base, ch);
  691. EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
  692. fdinfo->idxplus1 = 0;
  693. }
  694. changelist->n_changes = 0;
  695. event_changelist_check(base);
  696. }
  697. void
  698. event_changelist_freemem_(struct event_changelist *changelist)
  699. {
  700. if (changelist->changes)
  701. mm_free(changelist->changes);
  702. event_changelist_init_(changelist); /* zero it all out. */
  703. }
  704. /** Increase the size of 'changelist' to hold more changes. */
  705. static int
  706. event_changelist_grow(struct event_changelist *changelist)
  707. {
  708. int new_size;
  709. struct event_change *new_changes;
  710. if (changelist->changes_size < 64)
  711. new_size = 64;
  712. else
  713. new_size = changelist->changes_size * 2;
  714. new_changes = mm_realloc(changelist->changes,
  715. new_size * sizeof(struct event_change));
  716. if (EVUTIL_UNLIKELY(new_changes == NULL))
  717. return (-1);
  718. changelist->changes = new_changes;
  719. changelist->changes_size = new_size;
  720. return (0);
  721. }
  722. /** Return a pointer to the changelist entry for the file descriptor or signal
  723. * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
  724. * old_events field to old_events.
  725. */
  726. static struct event_change *
  727. event_changelist_get_or_construct(struct event_changelist *changelist,
  728. evutil_socket_t fd,
  729. short old_events,
  730. struct event_changelist_fdinfo *fdinfo)
  731. {
  732. struct event_change *change;
  733. if (fdinfo->idxplus1 == 0) {
  734. int idx;
  735. EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
  736. if (changelist->n_changes == changelist->changes_size) {
  737. if (event_changelist_grow(changelist) < 0)
  738. return NULL;
  739. }
  740. idx = changelist->n_changes++;
  741. change = &changelist->changes[idx];
  742. fdinfo->idxplus1 = idx + 1;
  743. memset(change, 0, sizeof(struct event_change));
  744. change->fd = fd;
  745. change->old_events = old_events;
  746. } else {
  747. change = &changelist->changes[fdinfo->idxplus1 - 1];
  748. EVUTIL_ASSERT(change->fd == fd);
  749. }
  750. return change;
  751. }
  752. int
  753. event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
  754. void *p)
  755. {
  756. struct event_changelist *changelist = &base->changelist;
  757. struct event_changelist_fdinfo *fdinfo = p;
  758. struct event_change *change;
  759. ev_uint8_t evchange = EV_CHANGE_ADD | (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
  760. event_changelist_check(base);
  761. change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
  762. if (!change)
  763. return -1;
  764. /* An add replaces any previous delete, but doesn't result in a no-op,
  765. * since the delete might fail (because the fd had been closed since
  766. * the last add, for instance. */
  767. if (events & (EV_READ|EV_SIGNAL))
  768. change->read_change = evchange;
  769. if (events & EV_WRITE)
  770. change->write_change = evchange;
  771. if (events & EV_CLOSED)
  772. change->close_change = evchange;
  773. event_changelist_check(base);
  774. return (0);
  775. }
  776. int
  777. event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
  778. void *p)
  779. {
  780. struct event_changelist *changelist = &base->changelist;
  781. struct event_changelist_fdinfo *fdinfo = p;
  782. struct event_change *change;
  783. ev_uint8_t del = EV_CHANGE_DEL | (events & EV_ET);
  784. event_changelist_check(base);
  785. change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
  786. event_changelist_check(base);
  787. if (!change)
  788. return -1;
  789. /* A delete on an event set that doesn't contain the event to be
  790. deleted produces a no-op. This effectively emoves any previous
  791. uncommitted add, rather than replacing it: on those platforms where
  792. "add, delete, dispatch" is not the same as "no-op, dispatch", we
  793. want the no-op behavior.
  794. If we have a no-op item, we could remove it it from the list
  795. entirely, but really there's not much point: skipping the no-op
  796. change when we do the dispatch later is far cheaper than rejuggling
  797. the array now.
  798. As this stands, it also lets through deletions of events that are
  799. not currently set.
  800. */
  801. if (events & (EV_READ|EV_SIGNAL)) {
  802. if (!(change->old_events & (EV_READ | EV_SIGNAL)))
  803. change->read_change = 0;
  804. else
  805. change->read_change = del;
  806. }
  807. if (events & EV_WRITE) {
  808. if (!(change->old_events & EV_WRITE))
  809. change->write_change = 0;
  810. else
  811. change->write_change = del;
  812. }
  813. if (events & EV_CLOSED) {
  814. if (!(change->old_events & EV_CLOSED))
  815. change->close_change = 0;
  816. else
  817. change->close_change = del;
  818. }
  819. event_changelist_check(base);
  820. return (0);
  821. }
  822. /* Helper for evmap_check_integrity_: verify that all of the events pending on
  823. * given fd are set up correctly, and that the nread and nwrite counts on that
  824. * fd are correct. */
  825. static int
  826. evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd,
  827. struct evmap_io *io_info, void *arg)
  828. {
  829. struct event *ev;
  830. int n_read = 0, n_write = 0, n_close = 0;
  831. /* First, make sure the list itself isn't corrupt. Otherwise,
  832. * running LIST_FOREACH could be an exciting adventure. */
  833. EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next);
  834. LIST_FOREACH(ev, &io_info->events, ev_io_next) {
  835. EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
  836. EVUTIL_ASSERT(ev->ev_fd == fd);
  837. EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL));
  838. EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
  839. if (ev->ev_events & EV_READ)
  840. ++n_read;
  841. if (ev->ev_events & EV_WRITE)
  842. ++n_write;
  843. if (ev->ev_events & EV_CLOSED)
  844. ++n_close;
  845. }
  846. EVUTIL_ASSERT(n_read == io_info->nread);
  847. EVUTIL_ASSERT(n_write == io_info->nwrite);
  848. EVUTIL_ASSERT(n_close == io_info->nclose);
  849. return 0;
  850. }
  851. /* Helper for evmap_check_integrity_: verify that all of the events pending
  852. * on given signal are set up correctly. */
  853. static int
  854. evmap_signal_check_integrity_fn(struct event_base *base,
  855. int signum, struct evmap_signal *sig_info, void *arg)
  856. {
  857. struct event *ev;
  858. /* First, make sure the list itself isn't corrupt. */
  859. EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next);
  860. LIST_FOREACH(ev, &sig_info->events, ev_io_next) {
  861. EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
  862. EVUTIL_ASSERT(ev->ev_fd == signum);
  863. EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL));
  864. EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
  865. }
  866. return 0;
  867. }
  868. void
  869. evmap_check_integrity_(struct event_base *base)
  870. {
  871. evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL);
  872. evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL);
  873. if (base->evsel->add == event_changelist_add_)
  874. event_changelist_assert_ok(base);
  875. }
  876. /* Helper type for evmap_foreach_event_: Bundles a function to call on every
  877. * event, and the user-provided void* to use as its third argument. */
  878. struct evmap_foreach_event_helper {
  879. event_base_foreach_event_cb fn;
  880. void *arg;
  881. };
  882. /* Helper for evmap_foreach_event_: calls a provided function on every event
  883. * pending on a given fd. */
  884. static int
  885. evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd,
  886. struct evmap_io *io_info, void *arg)
  887. {
  888. struct evmap_foreach_event_helper *h = arg;
  889. struct event *ev;
  890. int r;
  891. LIST_FOREACH(ev, &io_info->events, ev_io_next) {
  892. if ((r = h->fn(base, ev, h->arg)))
  893. return r;
  894. }
  895. return 0;
  896. }
  897. /* Helper for evmap_foreach_event_: calls a provided function on every event
  898. * pending on a given signal. */
  899. static int
  900. evmap_signal_foreach_event_fn(struct event_base *base, int signum,
  901. struct evmap_signal *sig_info, void *arg)
  902. {
  903. struct event *ev;
  904. struct evmap_foreach_event_helper *h = arg;
  905. int r;
  906. LIST_FOREACH(ev, &sig_info->events, ev_signal_next) {
  907. if ((r = h->fn(base, ev, h->arg)))
  908. return r;
  909. }
  910. return 0;
  911. }
  912. int
  913. evmap_foreach_event_(struct event_base *base,
  914. event_base_foreach_event_cb fn, void *arg)
  915. {
  916. struct evmap_foreach_event_helper h;
  917. int r;
  918. h.fn = fn;
  919. h.arg = arg;
  920. if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h)))
  921. return r;
  922. return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h);
  923. }