event-internal.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. /*
  2. * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
  3. * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. The name of the author may not be used to endorse or promote products
  14. * derived from this software without specific prior written permission.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  17. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  19. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  21. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  22. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  23. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. #ifndef EVENT_INTERNAL_H_INCLUDED_
  28. #define EVENT_INTERNAL_H_INCLUDED_
  29. #ifdef __cplusplus
  30. extern "C" {
  31. #endif
  32. #include "event2/event-config.h"
  33. #include "evconfig-private.h"
  34. #include <time.h>
  35. #include <sys/queue.h>
  36. #include "event2/event_struct.h"
  37. #include "minheap-internal.h"
  38. #include "evsignal-internal.h"
  39. #include "mm-internal.h"
  40. #include "defer-internal.h"
  41. /* map union members back */
  42. /* mutually exclusive */
  43. #define ev_signal_next ev_.ev_signal.ev_signal_next
  44. #define ev_io_next ev_.ev_io.ev_io_next
  45. #define ev_io_timeout ev_.ev_io.ev_timeout
  46. /* used only by signals */
  47. #define ev_ncalls ev_.ev_signal.ev_ncalls
  48. #define ev_pncalls ev_.ev_signal.ev_pncalls
  49. #define ev_pri ev_evcallback.evcb_pri
  50. #define ev_flags ev_evcallback.evcb_flags
  51. #define ev_closure ev_evcallback.evcb_closure
  52. #define ev_callback ev_evcallback.evcb_cb_union.evcb_callback
  53. #define ev_arg ev_evcallback.evcb_arg
  54. /** @name Event closure codes
  55. Possible values for evcb_closure in struct event_callback
  56. @{
  57. */
  58. /** A regular event. Uses the evcb_callback callback */
  59. #define EV_CLOSURE_EVENT 0
  60. /** A signal event. Uses the evcb_callback callback */
  61. #define EV_CLOSURE_EVENT_SIGNAL 1
  62. /** A persistent non-signal event. Uses the evcb_callback callback */
  63. #define EV_CLOSURE_EVENT_PERSIST 2
  64. /** A simple callback. Uses the evcb_selfcb callback. */
  65. #define EV_CLOSURE_CB_SELF 3
  66. /** A finalizing callback. Uses the evcb_cbfinalize callback. */
  67. #define EV_CLOSURE_CB_FINALIZE 4
  68. /** A finalizing event. Uses the evcb_evfinalize callback. */
  69. #define EV_CLOSURE_EVENT_FINALIZE 5
  70. /** A finalizing event that should get freed after. Uses the evcb_evfinalize
  71. * callback. */
  72. #define EV_CLOSURE_EVENT_FINALIZE_FREE 6
  73. /** @} */
  74. /** Structure to define the backend of a given event_base. */
  75. struct eventop {
  76. /** The name of this backend. */
  77. const char *name;
  78. /** Function to set up an event_base to use this backend. It should
  79. * create a new structure holding whatever information is needed to
  80. * run the backend, and return it. The returned pointer will get
  81. * stored by event_init into the event_base.evbase field. On failure,
  82. * this function should return NULL. */
  83. void *(*init)(struct event_base *);
  84. /** Enable reading/writing on a given fd or signal. 'events' will be
  85. * the events that we're trying to enable: one or more of EV_READ,
  86. * EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that
  87. * were enabled on this fd previously. 'fdinfo' will be a structure
  88. * associated with the fd by the evmap; its size is defined by the
  89. * fdinfo field below. It will be set to 0 the first time the fd is
  90. * added. The function should return 0 on success and -1 on error.
  91. */
  92. int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
  93. /** As "add", except 'events' contains the events we mean to disable. */
  94. int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
  95. /** Function to implement the core of an event loop. It must see which
  96. added events are ready, and cause event_active to be called for each
  97. active event (usually via event_io_active or such). It should
  98. return 0 on success and -1 on error.
  99. */
  100. int (*dispatch)(struct event_base *, struct timeval *);
  101. /** Function to clean up and free our data from the event_base. */
  102. void (*dealloc)(struct event_base *);
  103. /** Flag: set if we need to reinitialize the event base after we fork.
  104. */
  105. int need_reinit;
  106. /** Bit-array of supported event_method_features that this backend can
  107. * provide. */
  108. enum event_method_feature features;
  109. /** Length of the extra information we should record for each fd that
  110. has one or more active events. This information is recorded
  111. as part of the evmap entry for each fd, and passed as an argument
  112. to the add and del functions above.
  113. */
  114. size_t fdinfo_len;
  115. };
  116. #ifdef _WIN32
  117. /* If we're on win32, then file descriptors are not nice low densely packed
  118. integers. Instead, they are pointer-like windows handles, and we want to
  119. use a hashtable instead of an array to map fds to events.
  120. */
  121. #define EVMAP_USE_HT
  122. #endif
  123. /* #define HT_CACHE_HASH_VALS */
  124. #ifdef EVMAP_USE_HT
  125. #define HT_NO_CACHE_HASH_VALUES
  126. #include "ht-internal.h"
  127. struct event_map_entry;
  128. HT_HEAD(event_io_map, event_map_entry);
  129. #else
  130. #define event_io_map event_signal_map
  131. #endif
  132. /* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not
  133. defined, this structure is also used as event_io_map, which maps fds to a
  134. list of events.
  135. */
  136. struct event_signal_map {
  137. /* An array of evmap_io * or of evmap_signal *; empty entries are
  138. * set to NULL. */
  139. void **entries;
  140. /* The number of entries available in entries */
  141. int nentries;
  142. };
  143. /* A list of events waiting on a given 'common' timeout value. Ordinarily,
  144. * events waiting for a timeout wait on a minheap. Sometimes, however, a
  145. * queue can be faster.
  146. **/
  147. struct common_timeout_list {
  148. /* List of events currently waiting in the queue. */
  149. struct event_list events;
  150. /* 'magic' timeval used to indicate the duration of events in this
  151. * queue. */
  152. struct timeval duration;
  153. /* Event that triggers whenever one of the events in the queue is
  154. * ready to activate */
  155. struct event timeout_event;
  156. /* The event_base that this timeout list is part of */
  157. struct event_base *base;
  158. };
  159. /** Mask used to get the real tv_usec value from a common timeout. */
  160. #define COMMON_TIMEOUT_MICROSECONDS_MASK 0x000fffff
  161. struct event_change;
  162. /* List of 'changes' since the last call to eventop.dispatch. Only maintained
  163. * if the backend is using changesets. */
  164. struct event_changelist {
  165. struct event_change *changes;
  166. int n_changes;
  167. int changes_size;
  168. };
  169. #ifndef EVENT__DISABLE_DEBUG_MODE
  170. /* Global internal flag: set to one if debug mode is on. */
  171. extern int event_debug_mode_on_;
  172. #define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_)
  173. #else
  174. #define EVENT_DEBUG_MODE_IS_ON() (0)
  175. #endif
  176. TAILQ_HEAD(evcallback_list, event_callback);
  177. /* Sets up an event for processing once */
  178. struct event_once {
  179. LIST_ENTRY(event_once) next_once;
  180. struct event ev;
  181. void (*cb)(evutil_socket_t, short, void *);
  182. void *arg;
  183. };
  184. struct event_base {
  185. /** Function pointers and other data to describe this event_base's
  186. * backend. */
  187. const struct eventop *evsel;
  188. /** Pointer to backend-specific data. */
  189. void *evbase;
  190. /** List of changes to tell backend about at next dispatch. Only used
  191. * by the O(1) backends. */
  192. struct event_changelist changelist;
  193. /** Function pointers used to describe the backend that this event_base
  194. * uses for signals */
  195. const struct eventop *evsigsel;
  196. /** Data to implement the common signal handler code. */
  197. struct evsig_info sig;
  198. /** Number of virtual events */
  199. int virtual_event_count;
  200. /** Maximum number of virtual events active */
  201. int virtual_event_count_max;
  202. /** Number of total events added to this event_base */
  203. int event_count;
  204. /** Maximum number of total events added to this event_base */
  205. int event_count_max;
  206. /** Number of total events active in this event_base */
  207. int event_count_active;
  208. /** Maximum number of total events active in this event_base */
  209. int event_count_active_max;
  210. /** Set if we should terminate the loop once we're done processing
  211. * events. */
  212. int event_gotterm;
  213. /** Set if we should terminate the loop immediately */
  214. int event_break;
  215. /** Set if we should start a new instance of the loop immediately. */
  216. int event_continue;
  217. /** The currently running priority of events */
  218. int event_running_priority;
  219. /** Set if we're running the event_base_loop function, to prevent
  220. * reentrant invocation. */
  221. int running_loop;
  222. /** Set to the number of deferred_cbs we've made 'active' in the
  223. * loop. This is a hack to prevent starvation; it would be smarter
  224. * to just use event_config_set_max_dispatch_interval's max_callbacks
  225. * feature */
  226. int n_deferreds_queued;
  227. /* Active event management. */
  228. /** An array of nactivequeues queues for active event_callbacks (ones
  229. * that have triggered, and whose callbacks need to be called). Low
  230. * priority numbers are more important, and stall higher ones.
  231. */
  232. struct evcallback_list *activequeues;
  233. /** The length of the activequeues array */
  234. int nactivequeues;
  235. /** A list of event_callbacks that should become active the next time
  236. * we process events, but not this time. */
  237. struct evcallback_list active_later_queue;
  238. /* common timeout logic */
  239. /** An array of common_timeout_list* for all of the common timeout
  240. * values we know. */
  241. struct common_timeout_list **common_timeout_queues;
  242. /** The number of entries used in common_timeout_queues */
  243. int n_common_timeouts;
  244. /** The total size of common_timeout_queues. */
  245. int n_common_timeouts_allocated;
  246. /** Mapping from file descriptors to enabled (added) events */
  247. struct event_io_map io;
  248. /** Mapping from signal numbers to enabled (added) events. */
  249. struct event_signal_map sigmap;
  250. /** Priority queue of events with timeouts. */
  251. struct min_heap timeheap;
  252. /** Stored timeval: used to avoid calling gettimeofday/clock_gettime
  253. * too often. */
  254. struct timeval tv_cache;
  255. struct evutil_monotonic_timer monotonic_timer;
  256. /** Difference between internal time (maybe from clock_gettime) and
  257. * gettimeofday. */
  258. struct timeval tv_clock_diff;
  259. /** Second in which we last updated tv_clock_diff, in monotonic time. */
  260. time_t last_updated_clock_diff;
  261. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  262. /* threading support */
  263. /** The thread currently running the event_loop for this base */
  264. unsigned long th_owner_id;
  265. /** A lock to prevent conflicting accesses to this event_base */
  266. void *th_base_lock;
  267. /** A condition that gets signalled when we're done processing an
  268. * event with waiters on it. */
  269. void *current_event_cond;
  270. /** Number of threads blocking on current_event_cond. */
  271. int current_event_waiters;
  272. #endif
  273. /** The event whose callback is executing right now */
  274. struct event_callback *current_event;
  275. #ifdef _WIN32
  276. /** IOCP support structure, if IOCP is enabled. */
  277. struct event_iocp_port *iocp;
  278. #endif
  279. /** Flags that this base was configured with */
  280. enum event_base_config_flag flags;
  281. struct timeval max_dispatch_time;
  282. int max_dispatch_callbacks;
  283. int limit_callbacks_after_prio;
  284. /* Notify main thread to wake up break, etc. */
  285. /** True if the base already has a pending notify, and we don't need
  286. * to add any more. */
  287. int is_notify_pending;
  288. /** A socketpair used by some th_notify functions to wake up the main
  289. * thread. */
  290. evutil_socket_t th_notify_fd[2];
  291. /** An event used by some th_notify functions to wake up the main
  292. * thread. */
  293. struct event th_notify;
  294. /** A function used to wake up the main thread from another thread. */
  295. int (*th_notify_fn)(struct event_base *base);
  296. /** Saved seed for weak random number generator. Some backends use
  297. * this to produce fairness among sockets. Protected by th_base_lock. */
  298. struct evutil_weakrand_state weakrand_seed;
  299. /** List of event_onces that have not yet fired. */
  300. LIST_HEAD(once_event_list, event_once) once_events;
  301. };
  302. struct event_config_entry {
  303. TAILQ_ENTRY(event_config_entry) next;
  304. const char *avoid_method;
  305. };
  306. /** Internal structure: describes the configuration we want for an event_base
  307. * that we're about to allocate. */
  308. struct event_config {
  309. TAILQ_HEAD(event_configq, event_config_entry) entries;
  310. int n_cpus_hint;
  311. struct timeval max_dispatch_interval;
  312. int max_dispatch_callbacks;
  313. int limit_callbacks_after_prio;
  314. enum event_method_feature require_features;
  315. enum event_base_config_flag flags;
  316. };
  317. /* Internal use only: Functions that might be missing from <sys/queue.h> */
  318. #ifndef LIST_END
  319. #define LIST_END(head) NULL
  320. #endif
  321. #ifndef TAILQ_FIRST
  322. #define TAILQ_FIRST(head) ((head)->tqh_first)
  323. #endif
  324. #ifndef TAILQ_END
  325. #define TAILQ_END(head) NULL
  326. #endif
  327. #ifndef TAILQ_NEXT
  328. #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
  329. #endif
  330. #ifndef TAILQ_FOREACH
  331. #define TAILQ_FOREACH(var, head, field) \
  332. for ((var) = TAILQ_FIRST(head); \
  333. (var) != TAILQ_END(head); \
  334. (var) = TAILQ_NEXT(var, field))
  335. #endif
  336. #ifndef TAILQ_INSERT_BEFORE
  337. #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
  338. (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
  339. (elm)->field.tqe_next = (listelm); \
  340. *(listelm)->field.tqe_prev = (elm); \
  341. (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
  342. } while (0)
  343. #endif
  344. #define N_ACTIVE_CALLBACKS(base) \
  345. ((base)->event_count_active)
  346. int evsig_set_handler_(struct event_base *base, int evsignal,
  347. void (*fn)(int));
  348. int evsig_restore_handler_(struct event_base *base, int evsignal);
  349. int event_add_nolock_(struct event *ev,
  350. const struct timeval *tv, int tv_is_absolute);
  351. /** Argument for event_del_nolock_. Tells event_del not to block on the event
  352. * if it's running in another thread. */
  353. #define EVENT_DEL_NOBLOCK 0
  354. /** Argument for event_del_nolock_. Tells event_del to block on the event
  355. * if it's running in another thread, regardless of its value for EV_FINALIZE
  356. */
  357. #define EVENT_DEL_BLOCK 1
  358. /** Argument for event_del_nolock_. Tells event_del to block on the event
  359. * if it is running in another thread and it doesn't have EV_FINALIZE set.
  360. */
  361. #define EVENT_DEL_AUTOBLOCK 2
  362. /** Argument for event_del_nolock_. Tells event_del to proceed even if the
  363. * event is set up for finalization rather for regular use.*/
  364. #define EVENT_DEL_EVEN_IF_FINALIZING 3
  365. int event_del_nolock_(struct event *ev, int blocking);
  366. int event_remove_timer_nolock_(struct event *ev);
  367. void event_active_nolock_(struct event *ev, int res, short count);
  368. EVENT2_EXPORT_SYMBOL
  369. int event_callback_activate_(struct event_base *, struct event_callback *);
  370. int event_callback_activate_nolock_(struct event_base *, struct event_callback *);
  371. int event_callback_cancel_(struct event_base *base,
  372. struct event_callback *evcb);
  373. void event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
  374. EVENT2_EXPORT_SYMBOL
  375. void event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
  376. int event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcb, void (*cb)(struct event_callback *, void *));
  377. EVENT2_EXPORT_SYMBOL
  378. void event_active_later_(struct event *ev, int res);
  379. void event_active_later_nolock_(struct event *ev, int res);
  380. int event_callback_activate_later_nolock_(struct event_base *base,
  381. struct event_callback *evcb);
  382. int event_callback_cancel_nolock_(struct event_base *base,
  383. struct event_callback *evcb, int even_if_finalizing);
  384. void event_callback_init_(struct event_base *base,
  385. struct event_callback *cb);
  386. /* FIXME document. */
  387. EVENT2_EXPORT_SYMBOL
  388. void event_base_add_virtual_(struct event_base *base);
  389. void event_base_del_virtual_(struct event_base *base);
  390. /** For debugging: unless assertions are disabled, verify the referential
  391. integrity of the internal data structures of 'base'. This operation can
  392. be expensive.
  393. Returns on success; aborts on failure.
  394. */
  395. EVENT2_EXPORT_SYMBOL
  396. void event_base_assert_ok_(struct event_base *base);
  397. void event_base_assert_ok_nolock_(struct event_base *base);
  398. /* Helper function: Call 'fn' exactly once every inserted or active event in
  399. * the event_base 'base'.
  400. *
  401. * If fn returns 0, continue on to the next event. Otherwise, return the same
  402. * value that fn returned.
  403. *
  404. * Requires that 'base' be locked.
  405. */
  406. int event_base_foreach_event_nolock_(struct event_base *base,
  407. event_base_foreach_event_cb cb, void *arg);
  408. /* Cleanup function to reset debug mode during shutdown.
  409. *
  410. * Calling this function doesn't mean it'll be possible to re-enable
  411. * debug mode if any events were added.
  412. */
  413. void event_disable_debug_mode(void);
  414. #ifdef __cplusplus
  415. }
  416. #endif
  417. #endif /* EVENT_INTERNAL_H_INCLUDED_ */