bufferevent.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036
  1. /*
  2. * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
  3. * Copyright (c) 2007-2012 Niels Provos, Nick Mathewson
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. The name of the author may not be used to endorse or promote products
  14. * derived from this software without specific prior written permission.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  17. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  19. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  21. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  22. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  23. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. #include "event2/event-config.h"
  28. #include "evconfig-private.h"
  29. #include <sys/types.h>
  30. #ifdef EVENT__HAVE_SYS_TIME_H
  31. #include <sys/time.h>
  32. #endif
  33. #include <errno.h>
  34. #include <stdio.h>
  35. #include <stdlib.h>
  36. #include <string.h>
  37. #ifdef EVENT__HAVE_STDARG_H
  38. #include <stdarg.h>
  39. #endif
  40. #ifdef _WIN32
  41. #include <winsock2.h>
  42. #endif
  43. #include "event2/util.h"
  44. #include "event2/buffer.h"
  45. #include "event2/buffer_compat.h"
  46. #include "event2/bufferevent.h"
  47. #include "event2/bufferevent_struct.h"
  48. #include "event2/bufferevent_compat.h"
  49. #include "event2/event.h"
  50. #include "event-internal.h"
  51. #include "log-internal.h"
  52. #include "mm-internal.h"
  53. #include "bufferevent-internal.h"
  54. #include "evbuffer-internal.h"
  55. #include "util-internal.h"
  56. static void bufferevent_cancel_all_(struct bufferevent *bev);
  57. static void bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_);
  58. void
  59. bufferevent_suspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what)
  60. {
  61. struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
  62. BEV_LOCK(bufev);
  63. if (!bufev_private->read_suspended)
  64. bufev->be_ops->disable(bufev, EV_READ);
  65. bufev_private->read_suspended |= what;
  66. BEV_UNLOCK(bufev);
  67. }
  68. void
  69. bufferevent_unsuspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what)
  70. {
  71. struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
  72. BEV_LOCK(bufev);
  73. bufev_private->read_suspended &= ~what;
  74. if (!bufev_private->read_suspended && (bufev->enabled & EV_READ))
  75. bufev->be_ops->enable(bufev, EV_READ);
  76. BEV_UNLOCK(bufev);
  77. }
  78. void
  79. bufferevent_suspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what)
  80. {
  81. struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
  82. BEV_LOCK(bufev);
  83. if (!bufev_private->write_suspended)
  84. bufev->be_ops->disable(bufev, EV_WRITE);
  85. bufev_private->write_suspended |= what;
  86. BEV_UNLOCK(bufev);
  87. }
  88. void
  89. bufferevent_unsuspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what)
  90. {
  91. struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
  92. BEV_LOCK(bufev);
  93. bufev_private->write_suspended &= ~what;
  94. if (!bufev_private->write_suspended && (bufev->enabled & EV_WRITE))
  95. bufev->be_ops->enable(bufev, EV_WRITE);
  96. BEV_UNLOCK(bufev);
  97. }
  98. /**
  99. * Sometimes bufferevent's implementation can overrun high watermarks
  100. * (one of examples is openssl) and in this case if the read callback
  101. * will not handle enough data do over condition above the read
  102. * callback will never be called again (due to suspend above).
  103. *
  104. * To avoid this we are scheduling read callback again here, but only
  105. * from the user callback to avoid multiple scheduling:
  106. * - when the data had been added to it
  107. * - when the data had been drained from it (user specified read callback)
  108. */
  109. static void bufferevent_inbuf_wm_check(struct bufferevent *bev)
  110. {
  111. if (!bev->wm_read.high)
  112. return;
  113. if (!(bev->enabled & EV_READ))
  114. return;
  115. if (evbuffer_get_length(bev->input) < bev->wm_read.high)
  116. return;
  117. bufferevent_trigger(bev, EV_READ, BEV_OPT_DEFER_CALLBACKS);
  118. }
  119. /* Callback to implement watermarks on the input buffer. Only enabled
  120. * if the watermark is set. */
  121. static void
  122. bufferevent_inbuf_wm_cb(struct evbuffer *buf,
  123. const struct evbuffer_cb_info *cbinfo,
  124. void *arg)
  125. {
  126. struct bufferevent *bufev = arg;
  127. size_t size;
  128. size = evbuffer_get_length(buf);
  129. if (size >= bufev->wm_read.high)
  130. bufferevent_wm_suspend_read(bufev);
  131. else
  132. bufferevent_wm_unsuspend_read(bufev);
  133. }
  134. static void
  135. bufferevent_run_deferred_callbacks_locked(struct event_callback *cb, void *arg)
  136. {
  137. struct bufferevent_private *bufev_private = arg;
  138. struct bufferevent *bufev = &bufev_private->bev;
  139. BEV_LOCK(bufev);
  140. if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&
  141. bufev->errorcb) {
  142. /* The "connected" happened before any reads or writes, so
  143. send it first. */
  144. bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;
  145. bufev->errorcb(bufev, BEV_EVENT_CONNECTED, bufev->cbarg);
  146. }
  147. if (bufev_private->readcb_pending && bufev->readcb) {
  148. bufev_private->readcb_pending = 0;
  149. bufev->readcb(bufev, bufev->cbarg);
  150. bufferevent_inbuf_wm_check(bufev);
  151. }
  152. if (bufev_private->writecb_pending && bufev->writecb) {
  153. bufev_private->writecb_pending = 0;
  154. bufev->writecb(bufev, bufev->cbarg);
  155. }
  156. if (bufev_private->eventcb_pending && bufev->errorcb) {
  157. short what = bufev_private->eventcb_pending;
  158. int err = bufev_private->errno_pending;
  159. bufev_private->eventcb_pending = 0;
  160. bufev_private->errno_pending = 0;
  161. EVUTIL_SET_SOCKET_ERROR(err);
  162. bufev->errorcb(bufev, what, bufev->cbarg);
  163. }
  164. bufferevent_decref_and_unlock_(bufev);
  165. }
  166. static void
  167. bufferevent_run_deferred_callbacks_unlocked(struct event_callback *cb, void *arg)
  168. {
  169. struct bufferevent_private *bufev_private = arg;
  170. struct bufferevent *bufev = &bufev_private->bev;
  171. BEV_LOCK(bufev);
  172. #define UNLOCKED(stmt) \
  173. do { BEV_UNLOCK(bufev); stmt; BEV_LOCK(bufev); } while(0)
  174. if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&
  175. bufev->errorcb) {
  176. /* The "connected" happened before any reads or writes, so
  177. send it first. */
  178. bufferevent_event_cb errorcb = bufev->errorcb;
  179. void *cbarg = bufev->cbarg;
  180. bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;
  181. UNLOCKED(errorcb(bufev, BEV_EVENT_CONNECTED, cbarg));
  182. }
  183. if (bufev_private->readcb_pending && bufev->readcb) {
  184. bufferevent_data_cb readcb = bufev->readcb;
  185. void *cbarg = bufev->cbarg;
  186. bufev_private->readcb_pending = 0;
  187. UNLOCKED(readcb(bufev, cbarg));
  188. bufferevent_inbuf_wm_check(bufev);
  189. }
  190. if (bufev_private->writecb_pending && bufev->writecb) {
  191. bufferevent_data_cb writecb = bufev->writecb;
  192. void *cbarg = bufev->cbarg;
  193. bufev_private->writecb_pending = 0;
  194. UNLOCKED(writecb(bufev, cbarg));
  195. }
  196. if (bufev_private->eventcb_pending && bufev->errorcb) {
  197. bufferevent_event_cb errorcb = bufev->errorcb;
  198. void *cbarg = bufev->cbarg;
  199. short what = bufev_private->eventcb_pending;
  200. int err = bufev_private->errno_pending;
  201. bufev_private->eventcb_pending = 0;
  202. bufev_private->errno_pending = 0;
  203. EVUTIL_SET_SOCKET_ERROR(err);
  204. UNLOCKED(errorcb(bufev,what,cbarg));
  205. }
  206. bufferevent_decref_and_unlock_(bufev);
  207. #undef UNLOCKED
  208. }
  209. #define SCHEDULE_DEFERRED(bevp) \
  210. do { \
  211. if (event_deferred_cb_schedule_( \
  212. (bevp)->bev.ev_base, \
  213. &(bevp)->deferred)) \
  214. bufferevent_incref_(&(bevp)->bev); \
  215. } while (0)
  216. void
  217. bufferevent_run_readcb_(struct bufferevent *bufev, int options)
  218. {
  219. /* Requires that we hold the lock and a reference */
  220. struct bufferevent_private *p = BEV_UPCAST(bufev);
  221. if (bufev->readcb == NULL)
  222. return;
  223. if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) {
  224. p->readcb_pending = 1;
  225. SCHEDULE_DEFERRED(p);
  226. } else {
  227. bufev->readcb(bufev, bufev->cbarg);
  228. bufferevent_inbuf_wm_check(bufev);
  229. }
  230. }
  231. void
  232. bufferevent_run_writecb_(struct bufferevent *bufev, int options)
  233. {
  234. /* Requires that we hold the lock and a reference */
  235. struct bufferevent_private *p = BEV_UPCAST(bufev);
  236. if (bufev->writecb == NULL)
  237. return;
  238. if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) {
  239. p->writecb_pending = 1;
  240. SCHEDULE_DEFERRED(p);
  241. } else {
  242. bufev->writecb(bufev, bufev->cbarg);
  243. }
  244. }
  245. #define BEV_TRIG_ALL_OPTS ( \
  246. BEV_TRIG_IGNORE_WATERMARKS| \
  247. BEV_TRIG_DEFER_CALLBACKS \
  248. )
  249. void
  250. bufferevent_trigger(struct bufferevent *bufev, short iotype, int options)
  251. {
  252. bufferevent_incref_and_lock_(bufev);
  253. bufferevent_trigger_nolock_(bufev, iotype, options&BEV_TRIG_ALL_OPTS);
  254. bufferevent_decref_and_unlock_(bufev);
  255. }
  256. void
  257. bufferevent_run_eventcb_(struct bufferevent *bufev, short what, int options)
  258. {
  259. /* Requires that we hold the lock and a reference */
  260. struct bufferevent_private *p = BEV_UPCAST(bufev);
  261. if (bufev->errorcb == NULL)
  262. return;
  263. if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) {
  264. p->eventcb_pending |= what;
  265. p->errno_pending = EVUTIL_SOCKET_ERROR();
  266. SCHEDULE_DEFERRED(p);
  267. } else {
  268. bufev->errorcb(bufev, what, bufev->cbarg);
  269. }
  270. }
  271. void
  272. bufferevent_trigger_event(struct bufferevent *bufev, short what, int options)
  273. {
  274. bufferevent_incref_and_lock_(bufev);
  275. bufferevent_run_eventcb_(bufev, what, options&BEV_TRIG_ALL_OPTS);
  276. bufferevent_decref_and_unlock_(bufev);
  277. }
  278. int
  279. bufferevent_init_common_(struct bufferevent_private *bufev_private,
  280. struct event_base *base,
  281. const struct bufferevent_ops *ops,
  282. enum bufferevent_options options)
  283. {
  284. struct bufferevent *bufev = &bufev_private->bev;
  285. if (!bufev->input) {
  286. if ((bufev->input = evbuffer_new()) == NULL)
  287. goto err;
  288. }
  289. if (!bufev->output) {
  290. if ((bufev->output = evbuffer_new()) == NULL)
  291. goto err;
  292. }
  293. bufev_private->refcnt = 1;
  294. bufev->ev_base = base;
  295. /* Disable timeouts. */
  296. evutil_timerclear(&bufev->timeout_read);
  297. evutil_timerclear(&bufev->timeout_write);
  298. bufev->be_ops = ops;
  299. if (bufferevent_ratelim_init_(bufev_private))
  300. goto err;
  301. /*
  302. * Set to EV_WRITE so that using bufferevent_write is going to
  303. * trigger a callback. Reading needs to be explicitly enabled
  304. * because otherwise no data will be available.
  305. */
  306. bufev->enabled = EV_WRITE;
  307. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  308. if (options & BEV_OPT_THREADSAFE) {
  309. if (bufferevent_enable_locking_(bufev, NULL) < 0)
  310. goto err;
  311. }
  312. #endif
  313. if ((options & (BEV_OPT_DEFER_CALLBACKS|BEV_OPT_UNLOCK_CALLBACKS))
  314. == BEV_OPT_UNLOCK_CALLBACKS) {
  315. event_warnx("UNLOCK_CALLBACKS requires DEFER_CALLBACKS");
  316. goto err;
  317. }
  318. if (options & BEV_OPT_UNLOCK_CALLBACKS)
  319. event_deferred_cb_init_(
  320. &bufev_private->deferred,
  321. event_base_get_npriorities(base) / 2,
  322. bufferevent_run_deferred_callbacks_unlocked,
  323. bufev_private);
  324. else
  325. event_deferred_cb_init_(
  326. &bufev_private->deferred,
  327. event_base_get_npriorities(base) / 2,
  328. bufferevent_run_deferred_callbacks_locked,
  329. bufev_private);
  330. bufev_private->options = options;
  331. evbuffer_set_parent_(bufev->input, bufev);
  332. evbuffer_set_parent_(bufev->output, bufev);
  333. return 0;
  334. err:
  335. if (bufev->input) {
  336. evbuffer_free(bufev->input);
  337. bufev->input = NULL;
  338. }
  339. if (bufev->output) {
  340. evbuffer_free(bufev->output);
  341. bufev->output = NULL;
  342. }
  343. return -1;
  344. }
  345. void
  346. bufferevent_setcb(struct bufferevent *bufev,
  347. bufferevent_data_cb readcb, bufferevent_data_cb writecb,
  348. bufferevent_event_cb eventcb, void *cbarg)
  349. {
  350. BEV_LOCK(bufev);
  351. bufev->readcb = readcb;
  352. bufev->writecb = writecb;
  353. bufev->errorcb = eventcb;
  354. bufev->cbarg = cbarg;
  355. BEV_UNLOCK(bufev);
  356. }
  357. void
  358. bufferevent_getcb(struct bufferevent *bufev,
  359. bufferevent_data_cb *readcb_ptr,
  360. bufferevent_data_cb *writecb_ptr,
  361. bufferevent_event_cb *eventcb_ptr,
  362. void **cbarg_ptr)
  363. {
  364. BEV_LOCK(bufev);
  365. if (readcb_ptr)
  366. *readcb_ptr = bufev->readcb;
  367. if (writecb_ptr)
  368. *writecb_ptr = bufev->writecb;
  369. if (eventcb_ptr)
  370. *eventcb_ptr = bufev->errorcb;
  371. if (cbarg_ptr)
  372. *cbarg_ptr = bufev->cbarg;
  373. BEV_UNLOCK(bufev);
  374. }
  375. struct evbuffer *
  376. bufferevent_get_input(struct bufferevent *bufev)
  377. {
  378. return bufev->input;
  379. }
  380. struct evbuffer *
  381. bufferevent_get_output(struct bufferevent *bufev)
  382. {
  383. return bufev->output;
  384. }
  385. struct event_base *
  386. bufferevent_get_base(struct bufferevent *bufev)
  387. {
  388. return bufev->ev_base;
  389. }
  390. int
  391. bufferevent_get_priority(const struct bufferevent *bufev)
  392. {
  393. if (event_initialized(&bufev->ev_read)) {
  394. return event_get_priority(&bufev->ev_read);
  395. } else {
  396. return event_base_get_npriorities(bufev->ev_base) / 2;
  397. }
  398. }
  399. int
  400. bufferevent_write(struct bufferevent *bufev, const void *data, size_t size)
  401. {
  402. if (evbuffer_add(bufev->output, data, size) == -1)
  403. return (-1);
  404. return 0;
  405. }
  406. int
  407. bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf)
  408. {
  409. if (evbuffer_add_buffer(bufev->output, buf) == -1)
  410. return (-1);
  411. return 0;
  412. }
  413. size_t
  414. bufferevent_read(struct bufferevent *bufev, void *data, size_t size)
  415. {
  416. return (evbuffer_remove(bufev->input, data, size));
  417. }
  418. int
  419. bufferevent_read_buffer(struct bufferevent *bufev, struct evbuffer *buf)
  420. {
  421. return (evbuffer_add_buffer(buf, bufev->input));
  422. }
  423. int
  424. bufferevent_enable(struct bufferevent *bufev, short event)
  425. {
  426. struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
  427. short impl_events = event;
  428. int r = 0;
  429. bufferevent_incref_and_lock_(bufev);
  430. if (bufev_private->read_suspended)
  431. impl_events &= ~EV_READ;
  432. if (bufev_private->write_suspended)
  433. impl_events &= ~EV_WRITE;
  434. bufev->enabled |= event;
  435. if (impl_events && bufev->be_ops->enable(bufev, impl_events) < 0)
  436. r = -1;
  437. if (r)
  438. event_debug(("%s: cannot enable 0x%hx on %p", __func__, event, bufev));
  439. bufferevent_decref_and_unlock_(bufev);
  440. return r;
  441. }
  442. int
  443. bufferevent_set_timeouts(struct bufferevent *bufev,
  444. const struct timeval *tv_read,
  445. const struct timeval *tv_write)
  446. {
  447. int r = 0;
  448. BEV_LOCK(bufev);
  449. if (tv_read) {
  450. bufev->timeout_read = *tv_read;
  451. } else {
  452. evutil_timerclear(&bufev->timeout_read);
  453. }
  454. if (tv_write) {
  455. bufev->timeout_write = *tv_write;
  456. } else {
  457. evutil_timerclear(&bufev->timeout_write);
  458. }
  459. if (bufev->be_ops->adj_timeouts)
  460. r = bufev->be_ops->adj_timeouts(bufev);
  461. BEV_UNLOCK(bufev);
  462. return r;
  463. }
  464. /* Obsolete; use bufferevent_set_timeouts */
  465. void
  466. bufferevent_settimeout(struct bufferevent *bufev,
  467. int timeout_read, int timeout_write)
  468. {
  469. struct timeval tv_read, tv_write;
  470. struct timeval *ptv_read = NULL, *ptv_write = NULL;
  471. memset(&tv_read, 0, sizeof(tv_read));
  472. memset(&tv_write, 0, sizeof(tv_write));
  473. if (timeout_read) {
  474. tv_read.tv_sec = timeout_read;
  475. ptv_read = &tv_read;
  476. }
  477. if (timeout_write) {
  478. tv_write.tv_sec = timeout_write;
  479. ptv_write = &tv_write;
  480. }
  481. bufferevent_set_timeouts(bufev, ptv_read, ptv_write);
  482. }
  483. int
  484. bufferevent_disable_hard_(struct bufferevent *bufev, short event)
  485. {
  486. int r = 0;
  487. struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
  488. BEV_LOCK(bufev);
  489. bufev->enabled &= ~event;
  490. bufev_private->connecting = 0;
  491. if (bufev->be_ops->disable(bufev, event) < 0)
  492. r = -1;
  493. BEV_UNLOCK(bufev);
  494. return r;
  495. }
  496. int
  497. bufferevent_disable(struct bufferevent *bufev, short event)
  498. {
  499. int r = 0;
  500. BEV_LOCK(bufev);
  501. bufev->enabled &= ~event;
  502. if (bufev->be_ops->disable(bufev, event) < 0)
  503. r = -1;
  504. if (r)
  505. event_debug(("%s: cannot disable 0x%hx on %p", __func__, event, bufev));
  506. BEV_UNLOCK(bufev);
  507. return r;
  508. }
  509. /*
  510. * Sets the water marks
  511. */
  512. void
  513. bufferevent_setwatermark(struct bufferevent *bufev, short events,
  514. size_t lowmark, size_t highmark)
  515. {
  516. struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
  517. BEV_LOCK(bufev);
  518. if (events & EV_WRITE) {
  519. bufev->wm_write.low = lowmark;
  520. bufev->wm_write.high = highmark;
  521. }
  522. if (events & EV_READ) {
  523. bufev->wm_read.low = lowmark;
  524. bufev->wm_read.high = highmark;
  525. if (highmark) {
  526. /* There is now a new high-water mark for read.
  527. enable the callback if needed, and see if we should
  528. suspend/bufferevent_wm_unsuspend. */
  529. if (bufev_private->read_watermarks_cb == NULL) {
  530. bufev_private->read_watermarks_cb =
  531. evbuffer_add_cb(bufev->input,
  532. bufferevent_inbuf_wm_cb,
  533. bufev);
  534. }
  535. evbuffer_cb_set_flags(bufev->input,
  536. bufev_private->read_watermarks_cb,
  537. EVBUFFER_CB_ENABLED|EVBUFFER_CB_NODEFER);
  538. if (evbuffer_get_length(bufev->input) >= highmark)
  539. bufferevent_wm_suspend_read(bufev);
  540. else if (evbuffer_get_length(bufev->input) < highmark)
  541. bufferevent_wm_unsuspend_read(bufev);
  542. } else {
  543. /* There is now no high-water mark for read. */
  544. if (bufev_private->read_watermarks_cb)
  545. evbuffer_cb_clear_flags(bufev->input,
  546. bufev_private->read_watermarks_cb,
  547. EVBUFFER_CB_ENABLED);
  548. bufferevent_wm_unsuspend_read(bufev);
  549. }
  550. }
  551. BEV_UNLOCK(bufev);
  552. }
  553. int
  554. bufferevent_getwatermark(struct bufferevent *bufev, short events,
  555. size_t *lowmark, size_t *highmark)
  556. {
  557. if (events == EV_WRITE) {
  558. BEV_LOCK(bufev);
  559. if (lowmark)
  560. *lowmark = bufev->wm_write.low;
  561. if (highmark)
  562. *highmark = bufev->wm_write.high;
  563. BEV_UNLOCK(bufev);
  564. return 0;
  565. }
  566. if (events == EV_READ) {
  567. BEV_LOCK(bufev);
  568. if (lowmark)
  569. *lowmark = bufev->wm_read.low;
  570. if (highmark)
  571. *highmark = bufev->wm_read.high;
  572. BEV_UNLOCK(bufev);
  573. return 0;
  574. }
  575. return -1;
  576. }
  577. int
  578. bufferevent_flush(struct bufferevent *bufev,
  579. short iotype,
  580. enum bufferevent_flush_mode mode)
  581. {
  582. int r = -1;
  583. BEV_LOCK(bufev);
  584. if (bufev->be_ops->flush)
  585. r = bufev->be_ops->flush(bufev, iotype, mode);
  586. BEV_UNLOCK(bufev);
  587. return r;
  588. }
  589. void
  590. bufferevent_incref_and_lock_(struct bufferevent *bufev)
  591. {
  592. struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
  593. BEV_LOCK(bufev);
  594. ++bufev_private->refcnt;
  595. }
  596. #if 0
  597. static void
  598. bufferevent_transfer_lock_ownership_(struct bufferevent *donor,
  599. struct bufferevent *recipient)
  600. {
  601. struct bufferevent_private *d = BEV_UPCAST(donor);
  602. struct bufferevent_private *r = BEV_UPCAST(recipient);
  603. if (d->lock != r->lock)
  604. return;
  605. if (r->own_lock)
  606. return;
  607. if (d->own_lock) {
  608. d->own_lock = 0;
  609. r->own_lock = 1;
  610. }
  611. }
  612. #endif
  613. int
  614. bufferevent_decref_and_unlock_(struct bufferevent *bufev)
  615. {
  616. struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
  617. int n_cbs = 0;
  618. #define MAX_CBS 16
  619. struct event_callback *cbs[MAX_CBS];
  620. EVUTIL_ASSERT(bufev_private->refcnt > 0);
  621. if (--bufev_private->refcnt) {
  622. BEV_UNLOCK(bufev);
  623. return 0;
  624. }
  625. if (bufev->be_ops->unlink)
  626. bufev->be_ops->unlink(bufev);
  627. /* Okay, we're out of references. Let's finalize this once all the
  628. * callbacks are done running. */
  629. cbs[0] = &bufev->ev_read.ev_evcallback;
  630. cbs[1] = &bufev->ev_write.ev_evcallback;
  631. cbs[2] = &bufev_private->deferred;
  632. n_cbs = 3;
  633. if (bufev_private->rate_limiting) {
  634. struct event *e = &bufev_private->rate_limiting->refill_bucket_event;
  635. if (event_initialized(e))
  636. cbs[n_cbs++] = &e->ev_evcallback;
  637. }
  638. n_cbs += evbuffer_get_callbacks_(bufev->input, cbs+n_cbs, MAX_CBS-n_cbs);
  639. n_cbs += evbuffer_get_callbacks_(bufev->output, cbs+n_cbs, MAX_CBS-n_cbs);
  640. event_callback_finalize_many_(bufev->ev_base, n_cbs, cbs,
  641. bufferevent_finalize_cb_);
  642. #undef MAX_CBS
  643. BEV_UNLOCK(bufev);
  644. return 1;
  645. }
  646. static void
  647. bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_)
  648. {
  649. struct bufferevent *bufev = arg_;
  650. struct bufferevent *underlying;
  651. struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
  652. BEV_LOCK(bufev);
  653. underlying = bufferevent_get_underlying(bufev);
  654. /* Clean up the shared info */
  655. if (bufev->be_ops->destruct)
  656. bufev->be_ops->destruct(bufev);
  657. /* XXX what happens if refcnt for these buffers is > 1?
  658. * The buffers can share a lock with this bufferevent object,
  659. * but the lock might be destroyed below. */
  660. /* evbuffer will free the callbacks */
  661. evbuffer_free(bufev->input);
  662. evbuffer_free(bufev->output);
  663. if (bufev_private->rate_limiting) {
  664. if (bufev_private->rate_limiting->group)
  665. bufferevent_remove_from_rate_limit_group_internal_(bufev,0);
  666. mm_free(bufev_private->rate_limiting);
  667. bufev_private->rate_limiting = NULL;
  668. }
  669. BEV_UNLOCK(bufev);
  670. if (bufev_private->own_lock)
  671. EVTHREAD_FREE_LOCK(bufev_private->lock,
  672. EVTHREAD_LOCKTYPE_RECURSIVE);
  673. /* Free the actual allocated memory. */
  674. mm_free(((char*)bufev) - bufev->be_ops->mem_offset);
  675. /* Release the reference to underlying now that we no longer need the
  676. * reference to it. We wait this long mainly in case our lock is
  677. * shared with underlying.
  678. *
  679. * The 'destruct' function will also drop a reference to underlying
  680. * if BEV_OPT_CLOSE_ON_FREE is set.
  681. *
  682. * XXX Should we/can we just refcount evbuffer/bufferevent locks?
  683. * It would probably save us some headaches.
  684. */
  685. if (underlying)
  686. bufferevent_decref_(underlying);
  687. }
  688. int
  689. bufferevent_decref(struct bufferevent *bufev)
  690. {
  691. BEV_LOCK(bufev);
  692. return bufferevent_decref_and_unlock_(bufev);
  693. }
  694. void
  695. bufferevent_free(struct bufferevent *bufev)
  696. {
  697. BEV_LOCK(bufev);
  698. bufferevent_setcb(bufev, NULL, NULL, NULL, NULL);
  699. bufferevent_cancel_all_(bufev);
  700. bufferevent_decref_and_unlock_(bufev);
  701. }
  702. void
  703. bufferevent_incref(struct bufferevent *bufev)
  704. {
  705. struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
  706. /* XXX: now that this function is public, we might want to
  707. * - return the count from this function
  708. * - create a new function to atomically grab the current refcount
  709. */
  710. BEV_LOCK(bufev);
  711. ++bufev_private->refcnt;
  712. BEV_UNLOCK(bufev);
  713. }
  714. int
  715. bufferevent_enable_locking_(struct bufferevent *bufev, void *lock)
  716. {
  717. #ifdef EVENT__DISABLE_THREAD_SUPPORT
  718. return -1;
  719. #else
  720. struct bufferevent *underlying;
  721. if (BEV_UPCAST(bufev)->lock)
  722. return -1;
  723. underlying = bufferevent_get_underlying(bufev);
  724. if (!lock && underlying && BEV_UPCAST(underlying)->lock) {
  725. lock = BEV_UPCAST(underlying)->lock;
  726. BEV_UPCAST(bufev)->lock = lock;
  727. BEV_UPCAST(bufev)->own_lock = 0;
  728. } else if (!lock) {
  729. EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
  730. if (!lock)
  731. return -1;
  732. BEV_UPCAST(bufev)->lock = lock;
  733. BEV_UPCAST(bufev)->own_lock = 1;
  734. } else {
  735. BEV_UPCAST(bufev)->lock = lock;
  736. BEV_UPCAST(bufev)->own_lock = 0;
  737. }
  738. evbuffer_enable_locking(bufev->input, lock);
  739. evbuffer_enable_locking(bufev->output, lock);
  740. if (underlying && !BEV_UPCAST(underlying)->lock)
  741. bufferevent_enable_locking_(underlying, lock);
  742. return 0;
  743. #endif
  744. }
  745. int
  746. bufferevent_setfd(struct bufferevent *bev, evutil_socket_t fd)
  747. {
  748. union bufferevent_ctrl_data d;
  749. int res = -1;
  750. d.fd = fd;
  751. BEV_LOCK(bev);
  752. if (bev->be_ops->ctrl)
  753. res = bev->be_ops->ctrl(bev, BEV_CTRL_SET_FD, &d);
  754. if (res)
  755. event_debug(("%s: cannot set fd for %p to "EV_SOCK_FMT, __func__, bev, fd));
  756. BEV_UNLOCK(bev);
  757. return res;
  758. }
  759. evutil_socket_t
  760. bufferevent_getfd(struct bufferevent *bev)
  761. {
  762. union bufferevent_ctrl_data d;
  763. int res = -1;
  764. d.fd = -1;
  765. BEV_LOCK(bev);
  766. if (bev->be_ops->ctrl)
  767. res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_FD, &d);
  768. if (res)
  769. event_debug(("%s: cannot get fd for %p", __func__, bev));
  770. BEV_UNLOCK(bev);
  771. return (res<0) ? -1 : d.fd;
  772. }
  773. enum bufferevent_options
  774. bufferevent_get_options_(struct bufferevent *bev)
  775. {
  776. struct bufferevent_private *bev_p = BEV_UPCAST(bev);
  777. enum bufferevent_options options;
  778. BEV_LOCK(bev);
  779. options = bev_p->options;
  780. BEV_UNLOCK(bev);
  781. return options;
  782. }
  783. static void
  784. bufferevent_cancel_all_(struct bufferevent *bev)
  785. {
  786. union bufferevent_ctrl_data d;
  787. memset(&d, 0, sizeof(d));
  788. BEV_LOCK(bev);
  789. if (bev->be_ops->ctrl)
  790. bev->be_ops->ctrl(bev, BEV_CTRL_CANCEL_ALL, &d);
  791. BEV_UNLOCK(bev);
  792. }
  793. short
  794. bufferevent_get_enabled(struct bufferevent *bufev)
  795. {
  796. short r;
  797. BEV_LOCK(bufev);
  798. r = bufev->enabled;
  799. BEV_UNLOCK(bufev);
  800. return r;
  801. }
  802. struct bufferevent *
  803. bufferevent_get_underlying(struct bufferevent *bev)
  804. {
  805. union bufferevent_ctrl_data d;
  806. int res = -1;
  807. d.ptr = NULL;
  808. BEV_LOCK(bev);
  809. if (bev->be_ops->ctrl)
  810. res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_UNDERLYING, &d);
  811. BEV_UNLOCK(bev);
  812. return (res<0) ? NULL : d.ptr;
  813. }
  814. static void
  815. bufferevent_generic_read_timeout_cb(evutil_socket_t fd, short event, void *ctx)
  816. {
  817. struct bufferevent *bev = ctx;
  818. bufferevent_incref_and_lock_(bev);
  819. bufferevent_disable(bev, EV_READ);
  820. bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING, 0);
  821. bufferevent_decref_and_unlock_(bev);
  822. }
  823. static void
  824. bufferevent_generic_write_timeout_cb(evutil_socket_t fd, short event, void *ctx)
  825. {
  826. struct bufferevent *bev = ctx;
  827. bufferevent_incref_and_lock_(bev);
  828. bufferevent_disable(bev, EV_WRITE);
  829. bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING, 0);
  830. bufferevent_decref_and_unlock_(bev);
  831. }
  832. void
  833. bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev)
  834. {
  835. event_assign(&bev->ev_read, bev->ev_base, -1, EV_FINALIZE,
  836. bufferevent_generic_read_timeout_cb, bev);
  837. event_assign(&bev->ev_write, bev->ev_base, -1, EV_FINALIZE,
  838. bufferevent_generic_write_timeout_cb, bev);
  839. }
  840. int
  841. bufferevent_generic_adj_timeouts_(struct bufferevent *bev)
  842. {
  843. const short enabled = bev->enabled;
  844. struct bufferevent_private *bev_p = BEV_UPCAST(bev);
  845. int r1=0, r2=0;
  846. if ((enabled & EV_READ) && !bev_p->read_suspended &&
  847. evutil_timerisset(&bev->timeout_read))
  848. r1 = event_add(&bev->ev_read, &bev->timeout_read);
  849. else
  850. r1 = event_del(&bev->ev_read);
  851. if ((enabled & EV_WRITE) && !bev_p->write_suspended &&
  852. evutil_timerisset(&bev->timeout_write) &&
  853. evbuffer_get_length(bev->output))
  854. r2 = event_add(&bev->ev_write, &bev->timeout_write);
  855. else
  856. r2 = event_del(&bev->ev_write);
  857. if (r1 < 0 || r2 < 0)
  858. return -1;
  859. return 0;
  860. }
  861. int
  862. bufferevent_generic_adj_existing_timeouts_(struct bufferevent *bev)
  863. {
  864. int r = 0;
  865. if (event_pending(&bev->ev_read, EV_READ, NULL)) {
  866. if (evutil_timerisset(&bev->timeout_read)) {
  867. if (bufferevent_add_event_(&bev->ev_read, &bev->timeout_read) < 0)
  868. r = -1;
  869. } else {
  870. event_remove_timer(&bev->ev_read);
  871. }
  872. }
  873. if (event_pending(&bev->ev_write, EV_WRITE, NULL)) {
  874. if (evutil_timerisset(&bev->timeout_write)) {
  875. if (bufferevent_add_event_(&bev->ev_write, &bev->timeout_write) < 0)
  876. r = -1;
  877. } else {
  878. event_remove_timer(&bev->ev_write);
  879. }
  880. }
  881. return r;
  882. }
  883. int
  884. bufferevent_add_event_(struct event *ev, const struct timeval *tv)
  885. {
  886. if (!evutil_timerisset(tv))
  887. return event_add(ev, NULL);
  888. else
  889. return event_add(ev, tv);
  890. }
  891. /* For use by user programs only; internally, we should be calling
  892. either bufferevent_incref_and_lock_(), or BEV_LOCK. */
  893. void
  894. bufferevent_lock(struct bufferevent *bev)
  895. {
  896. bufferevent_incref_and_lock_(bev);
  897. }
  898. void
  899. bufferevent_unlock(struct bufferevent *bev)
  900. {
  901. bufferevent_decref_and_unlock_(bev);
  902. }