poll.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. /* $OpenBSD: poll.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
  2. /*
  3. * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
  4. * Copyright 2007-2012 Niels Provos and Nick Mathewson
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. * 3. The name of the author may not be used to endorse or promote products
  15. * derived from this software without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27. */
  28. #include "event2/event-config.h"
  29. #include "evconfig-private.h"
  30. #ifdef EVENT__HAVE_POLL
  31. #include <sys/types.h>
  32. #ifdef EVENT__HAVE_SYS_TIME_H
  33. #include <sys/time.h>
  34. #endif
  35. #include <sys/queue.h>
  36. #include <poll.h>
  37. #include <signal.h>
  38. #include <limits.h>
  39. #include <stdio.h>
  40. #include <stdlib.h>
  41. #include <string.h>
  42. #include <unistd.h>
  43. #include <errno.h>
  44. #include "event-internal.h"
  45. #include "evsignal-internal.h"
  46. #include "log-internal.h"
  47. #include "evmap-internal.h"
  48. #include "event2/thread.h"
  49. #include "evthread-internal.h"
  50. #include "time-internal.h"
  51. /* Since Linux 2.6.17, poll is able to report about peer half-closed connection
  52. using special POLLRDHUP flag on a read event.
  53. */
  54. #if !defined(POLLRDHUP)
  55. #define POLLRDHUP 0
  56. #define EARLY_CLOSE_IF_HAVE_RDHUP 0
  57. #else
  58. #define EARLY_CLOSE_IF_HAVE_RDHUP EV_FEATURE_EARLY_CLOSE
  59. #endif
  60. struct pollidx {
  61. int idxplus1;
  62. };
  63. struct pollop {
  64. int event_count; /* Highest number alloc */
  65. int nfds; /* Highest number used */
  66. int realloc_copy; /* True iff we must realloc
  67. * event_set_copy */
  68. struct pollfd *event_set;
  69. struct pollfd *event_set_copy;
  70. };
  71. static void *poll_init(struct event_base *);
  72. static int poll_add(struct event_base *, int, short old, short events, void *idx);
  73. static int poll_del(struct event_base *, int, short old, short events, void *idx);
  74. static int poll_dispatch(struct event_base *, struct timeval *);
  75. static void poll_dealloc(struct event_base *);
  76. const struct eventop pollops = {
  77. "poll",
  78. poll_init,
  79. poll_add,
  80. poll_del,
  81. poll_dispatch,
  82. poll_dealloc,
  83. 1, /* need_reinit */
  84. EV_FEATURE_FDS|EARLY_CLOSE_IF_HAVE_RDHUP,
  85. sizeof(struct pollidx),
  86. };
  87. static void *
  88. poll_init(struct event_base *base)
  89. {
  90. struct pollop *pollop;
  91. if (!(pollop = mm_calloc(1, sizeof(struct pollop))))
  92. return (NULL);
  93. evsig_init_(base);
  94. evutil_weakrand_seed_(&base->weakrand_seed, 0);
  95. return (pollop);
  96. }
  97. #ifdef CHECK_INVARIANTS
  98. static void
  99. poll_check_ok(struct pollop *pop)
  100. {
  101. int i, idx;
  102. struct event *ev;
  103. for (i = 0; i < pop->fd_count; ++i) {
  104. idx = pop->idxplus1_by_fd[i]-1;
  105. if (idx < 0)
  106. continue;
  107. EVUTIL_ASSERT(pop->event_set[idx].fd == i);
  108. }
  109. for (i = 0; i < pop->nfds; ++i) {
  110. struct pollfd *pfd = &pop->event_set[i];
  111. EVUTIL_ASSERT(pop->idxplus1_by_fd[pfd->fd] == i+1);
  112. }
  113. }
  114. #else
  115. #define poll_check_ok(pop)
  116. #endif
  117. static int
  118. poll_dispatch(struct event_base *base, struct timeval *tv)
  119. {
  120. int res, i, j, nfds;
  121. long msec = -1;
  122. struct pollop *pop = base->evbase;
  123. struct pollfd *event_set;
  124. poll_check_ok(pop);
  125. nfds = pop->nfds;
  126. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  127. if (base->th_base_lock) {
  128. /* If we're using this backend in a multithreaded setting,
  129. * then we need to work on a copy of event_set, so that we can
  130. * let other threads modify the main event_set while we're
  131. * polling. If we're not multithreaded, then we'll skip the
  132. * copy step here to save memory and time. */
  133. if (pop->realloc_copy) {
  134. struct pollfd *tmp = mm_realloc(pop->event_set_copy,
  135. pop->event_count * sizeof(struct pollfd));
  136. if (tmp == NULL) {
  137. event_warn("realloc");
  138. return -1;
  139. }
  140. pop->event_set_copy = tmp;
  141. pop->realloc_copy = 0;
  142. }
  143. memcpy(pop->event_set_copy, pop->event_set,
  144. sizeof(struct pollfd)*nfds);
  145. event_set = pop->event_set_copy;
  146. } else {
  147. event_set = pop->event_set;
  148. }
  149. #else
  150. event_set = pop->event_set;
  151. #endif
  152. if (tv != NULL) {
  153. msec = evutil_tv_to_msec_(tv);
  154. if (msec < 0 || msec > INT_MAX)
  155. msec = INT_MAX;
  156. }
  157. EVBASE_RELEASE_LOCK(base, th_base_lock);
  158. res = poll(event_set, nfds, msec);
  159. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  160. if (res == -1) {
  161. if (errno != EINTR) {
  162. event_warn("poll");
  163. return (-1);
  164. }
  165. return (0);
  166. }
  167. event_debug(("%s: poll reports %d", __func__, res));
  168. if (res == 0 || nfds == 0)
  169. return (0);
  170. i = evutil_weakrand_range_(&base->weakrand_seed, nfds);
  171. for (j = 0; j < nfds; j++) {
  172. int what;
  173. if (++i == nfds)
  174. i = 0;
  175. what = event_set[i].revents;
  176. if (!what)
  177. continue;
  178. res = 0;
  179. /* If the file gets closed notify */
  180. if (what & (POLLHUP|POLLERR|POLLNVAL))
  181. what |= POLLIN|POLLOUT;
  182. if (what & POLLIN)
  183. res |= EV_READ;
  184. if (what & POLLOUT)
  185. res |= EV_WRITE;
  186. if (what & POLLRDHUP)
  187. res |= EV_CLOSED;
  188. if (res == 0)
  189. continue;
  190. evmap_io_active_(base, event_set[i].fd, res);
  191. }
  192. return (0);
  193. }
  194. static int
  195. poll_add(struct event_base *base, int fd, short old, short events, void *idx_)
  196. {
  197. struct pollop *pop = base->evbase;
  198. struct pollfd *pfd = NULL;
  199. struct pollidx *idx = idx_;
  200. int i;
  201. EVUTIL_ASSERT((events & EV_SIGNAL) == 0);
  202. if (!(events & (EV_READ|EV_WRITE|EV_CLOSED)))
  203. return (0);
  204. poll_check_ok(pop);
  205. if (pop->nfds + 1 >= pop->event_count) {
  206. struct pollfd *tmp_event_set;
  207. int tmp_event_count;
  208. if (pop->event_count < 32)
  209. tmp_event_count = 32;
  210. else
  211. tmp_event_count = pop->event_count * 2;
  212. /* We need more file descriptors */
  213. tmp_event_set = mm_realloc(pop->event_set,
  214. tmp_event_count * sizeof(struct pollfd));
  215. if (tmp_event_set == NULL) {
  216. event_warn("realloc");
  217. return (-1);
  218. }
  219. pop->event_set = tmp_event_set;
  220. pop->event_count = tmp_event_count;
  221. pop->realloc_copy = 1;
  222. }
  223. i = idx->idxplus1 - 1;
  224. if (i >= 0) {
  225. pfd = &pop->event_set[i];
  226. } else {
  227. i = pop->nfds++;
  228. pfd = &pop->event_set[i];
  229. pfd->events = 0;
  230. pfd->fd = fd;
  231. idx->idxplus1 = i + 1;
  232. }
  233. pfd->revents = 0;
  234. if (events & EV_WRITE)
  235. pfd->events |= POLLOUT;
  236. if (events & EV_READ)
  237. pfd->events |= POLLIN;
  238. if (events & EV_CLOSED)
  239. pfd->events |= POLLRDHUP;
  240. poll_check_ok(pop);
  241. return (0);
  242. }
  243. /*
  244. * Nothing to be done here.
  245. */
  246. static int
  247. poll_del(struct event_base *base, int fd, short old, short events, void *idx_)
  248. {
  249. struct pollop *pop = base->evbase;
  250. struct pollfd *pfd = NULL;
  251. struct pollidx *idx = idx_;
  252. int i;
  253. EVUTIL_ASSERT((events & EV_SIGNAL) == 0);
  254. if (!(events & (EV_READ|EV_WRITE|EV_CLOSED)))
  255. return (0);
  256. poll_check_ok(pop);
  257. i = idx->idxplus1 - 1;
  258. if (i < 0)
  259. return (-1);
  260. /* Do we still want to read or write? */
  261. pfd = &pop->event_set[i];
  262. if (events & EV_READ)
  263. pfd->events &= ~POLLIN;
  264. if (events & EV_WRITE)
  265. pfd->events &= ~POLLOUT;
  266. if (events & EV_CLOSED)
  267. pfd->events &= ~POLLRDHUP;
  268. poll_check_ok(pop);
  269. if (pfd->events)
  270. /* Another event cares about that fd. */
  271. return (0);
  272. /* Okay, so we aren't interested in that fd anymore. */
  273. idx->idxplus1 = 0;
  274. --pop->nfds;
  275. if (i != pop->nfds) {
  276. /*
  277. * Shift the last pollfd down into the now-unoccupied
  278. * position.
  279. */
  280. memcpy(&pop->event_set[i], &pop->event_set[pop->nfds],
  281. sizeof(struct pollfd));
  282. idx = evmap_io_get_fdinfo_(&base->io, pop->event_set[i].fd);
  283. EVUTIL_ASSERT(idx);
  284. EVUTIL_ASSERT(idx->idxplus1 == pop->nfds + 1);
  285. idx->idxplus1 = i + 1;
  286. }
  287. poll_check_ok(pop);
  288. return (0);
  289. }
  290. static void
  291. poll_dealloc(struct event_base *base)
  292. {
  293. struct pollop *pop = base->evbase;
  294. evsig_dealloc_(base);
  295. if (pop->event_set)
  296. mm_free(pop->event_set);
  297. if (pop->event_set_copy)
  298. mm_free(pop->event_set_copy);
  299. memset(pop, 0, sizeof(struct pollop));
  300. mm_free(pop);
  301. }
  302. #endif /* EVENT__HAVE_POLL */