evthread.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. /*
  2. * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. * 3. The name of the author may not be used to endorse or promote products
  13. * derived from this software without specific prior written permission.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  16. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  17. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  18. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  19. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  20. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  21. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  22. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  23. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  24. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include "event2/event-config.h"
  27. #include "evconfig-private.h"
  28. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  29. #include "event2/thread.h"
  30. #include <stdlib.h>
  31. #include <string.h>
  32. #include "log-internal.h"
  33. #include "mm-internal.h"
  34. #include "util-internal.h"
  35. #include "evthread-internal.h"
  36. #ifdef EVTHREAD_EXPOSE_STRUCTS
  37. #define GLOBAL
  38. #else
  39. #define GLOBAL static
  40. #endif
  41. #ifndef EVENT__DISABLE_DEBUG_MODE
  42. extern int event_debug_created_threadable_ctx_;
  43. extern int event_debug_mode_on_;
  44. #endif
  45. /* globals */
  46. GLOBAL int evthread_lock_debugging_enabled_ = 0;
  47. GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
  48. 0, 0, NULL, NULL, NULL, NULL
  49. };
  50. GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
  51. GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
  52. 0, NULL, NULL, NULL, NULL
  53. };
  54. /* Used for debugging */
  55. static struct evthread_lock_callbacks original_lock_fns_ = {
  56. 0, 0, NULL, NULL, NULL, NULL
  57. };
  58. static struct evthread_condition_callbacks original_cond_fns_ = {
  59. 0, NULL, NULL, NULL, NULL
  60. };
  61. void
  62. evthread_set_id_callback(unsigned long (*id_fn)(void))
  63. {
  64. evthread_id_fn_ = id_fn;
  65. }
  66. struct evthread_lock_callbacks *evthread_get_lock_callbacks()
  67. {
  68. return evthread_lock_debugging_enabled_
  69. ? &original_lock_fns_ : &evthread_lock_fns_;
  70. }
  71. struct evthread_condition_callbacks *evthread_get_condition_callbacks()
  72. {
  73. return evthread_lock_debugging_enabled_
  74. ? &original_cond_fns_ : &evthread_cond_fns_;
  75. }
  76. void evthreadimpl_disable_lock_debugging_(void)
  77. {
  78. evthread_lock_debugging_enabled_ = 0;
  79. }
  80. int
  81. evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
  82. {
  83. struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();
  84. #ifndef EVENT__DISABLE_DEBUG_MODE
  85. if (event_debug_mode_on_) {
  86. if (event_debug_created_threadable_ctx_) {
  87. event_errx(1, "evthread initialization must be called BEFORE anything else!");
  88. }
  89. }
  90. #endif
  91. if (!cbs) {
  92. if (target->alloc)
  93. event_warnx("Trying to disable lock functions after "
  94. "they have been set up will probaby not work.");
  95. memset(target, 0, sizeof(evthread_lock_fns_));
  96. return 0;
  97. }
  98. if (target->alloc) {
  99. /* Uh oh; we already had locking callbacks set up.*/
  100. if (target->lock_api_version == cbs->lock_api_version &&
  101. target->supported_locktypes == cbs->supported_locktypes &&
  102. target->alloc == cbs->alloc &&
  103. target->free == cbs->free &&
  104. target->lock == cbs->lock &&
  105. target->unlock == cbs->unlock) {
  106. /* no change -- allow this. */
  107. return 0;
  108. }
  109. event_warnx("Can't change lock callbacks once they have been "
  110. "initialized.");
  111. return -1;
  112. }
  113. if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
  114. memcpy(target, cbs, sizeof(evthread_lock_fns_));
  115. return event_global_setup_locks_(1);
  116. } else {
  117. return -1;
  118. }
  119. }
  120. int
  121. evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
  122. {
  123. struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();
  124. #ifndef EVENT__DISABLE_DEBUG_MODE
  125. if (event_debug_mode_on_) {
  126. if (event_debug_created_threadable_ctx_) {
  127. event_errx(1, "evthread initialization must be called BEFORE anything else!");
  128. }
  129. }
  130. #endif
  131. if (!cbs) {
  132. if (target->alloc_condition)
  133. event_warnx("Trying to disable condition functions "
  134. "after they have been set up will probaby not "
  135. "work.");
  136. memset(target, 0, sizeof(evthread_cond_fns_));
  137. return 0;
  138. }
  139. if (target->alloc_condition) {
  140. /* Uh oh; we already had condition callbacks set up.*/
  141. if (target->condition_api_version == cbs->condition_api_version &&
  142. target->alloc_condition == cbs->alloc_condition &&
  143. target->free_condition == cbs->free_condition &&
  144. target->signal_condition == cbs->signal_condition &&
  145. target->wait_condition == cbs->wait_condition) {
  146. /* no change -- allow this. */
  147. return 0;
  148. }
  149. event_warnx("Can't change condition callbacks once they "
  150. "have been initialized.");
  151. return -1;
  152. }
  153. if (cbs->alloc_condition && cbs->free_condition &&
  154. cbs->signal_condition && cbs->wait_condition) {
  155. memcpy(target, cbs, sizeof(evthread_cond_fns_));
  156. }
  157. if (evthread_lock_debugging_enabled_) {
  158. evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
  159. evthread_cond_fns_.free_condition = cbs->free_condition;
  160. evthread_cond_fns_.signal_condition = cbs->signal_condition;
  161. }
  162. return 0;
  163. }
  164. #define DEBUG_LOCK_SIG 0xdeb0b10c
  165. struct debug_lock {
  166. unsigned signature;
  167. unsigned locktype;
  168. unsigned long held_by;
  169. /* XXXX if we ever use read-write locks, we will need a separate
  170. * lock to protect count. */
  171. int count;
  172. void *lock;
  173. };
  174. static void *
  175. debug_lock_alloc(unsigned locktype)
  176. {
  177. struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
  178. if (!result)
  179. return NULL;
  180. if (original_lock_fns_.alloc) {
  181. if (!(result->lock = original_lock_fns_.alloc(
  182. locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
  183. mm_free(result);
  184. return NULL;
  185. }
  186. } else {
  187. result->lock = NULL;
  188. }
  189. result->signature = DEBUG_LOCK_SIG;
  190. result->locktype = locktype;
  191. result->count = 0;
  192. result->held_by = 0;
  193. return result;
  194. }
  195. static void
  196. debug_lock_free(void *lock_, unsigned locktype)
  197. {
  198. struct debug_lock *lock = lock_;
  199. EVUTIL_ASSERT(lock->count == 0);
  200. EVUTIL_ASSERT(locktype == lock->locktype);
  201. EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
  202. if (original_lock_fns_.free) {
  203. original_lock_fns_.free(lock->lock,
  204. lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
  205. }
  206. lock->lock = NULL;
  207. lock->count = -100;
  208. lock->signature = 0x12300fda;
  209. mm_free(lock);
  210. }
  211. static void
  212. evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
  213. {
  214. EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
  215. ++lock->count;
  216. if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
  217. EVUTIL_ASSERT(lock->count == 1);
  218. if (evthread_id_fn_) {
  219. unsigned long me;
  220. me = evthread_id_fn_();
  221. if (lock->count > 1)
  222. EVUTIL_ASSERT(lock->held_by == me);
  223. lock->held_by = me;
  224. }
  225. }
  226. static int
  227. debug_lock_lock(unsigned mode, void *lock_)
  228. {
  229. struct debug_lock *lock = lock_;
  230. int res = 0;
  231. if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
  232. EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
  233. else
  234. EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
  235. if (original_lock_fns_.lock)
  236. res = original_lock_fns_.lock(mode, lock->lock);
  237. if (!res) {
  238. evthread_debug_lock_mark_locked(mode, lock);
  239. }
  240. return res;
  241. }
  242. static void
  243. evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
  244. {
  245. EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
  246. if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
  247. EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
  248. else
  249. EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
  250. if (evthread_id_fn_) {
  251. unsigned long me;
  252. me = evthread_id_fn_();
  253. EVUTIL_ASSERT(lock->held_by == me);
  254. if (lock->count == 1)
  255. lock->held_by = 0;
  256. }
  257. --lock->count;
  258. EVUTIL_ASSERT(lock->count >= 0);
  259. }
  260. static int
  261. debug_lock_unlock(unsigned mode, void *lock_)
  262. {
  263. struct debug_lock *lock = lock_;
  264. int res = 0;
  265. evthread_debug_lock_mark_unlocked(mode, lock);
  266. if (original_lock_fns_.unlock)
  267. res = original_lock_fns_.unlock(mode, lock->lock);
  268. return res;
  269. }
  270. static int
  271. debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
  272. {
  273. int r;
  274. struct debug_lock *lock = lock_;
  275. EVUTIL_ASSERT(lock);
  276. EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
  277. EVLOCK_ASSERT_LOCKED(lock_);
  278. evthread_debug_lock_mark_unlocked(0, lock);
  279. r = original_cond_fns_.wait_condition(cond_, lock->lock, tv);
  280. evthread_debug_lock_mark_locked(0, lock);
  281. return r;
  282. }
  283. /* misspelled version for backward compatibility */
  284. void
  285. evthread_enable_lock_debuging(void)
  286. {
  287. evthread_enable_lock_debugging();
  288. }
  289. void
  290. evthread_enable_lock_debugging(void)
  291. {
  292. struct evthread_lock_callbacks cbs = {
  293. EVTHREAD_LOCK_API_VERSION,
  294. EVTHREAD_LOCKTYPE_RECURSIVE,
  295. debug_lock_alloc,
  296. debug_lock_free,
  297. debug_lock_lock,
  298. debug_lock_unlock
  299. };
  300. if (evthread_lock_debugging_enabled_)
  301. return;
  302. memcpy(&original_lock_fns_, &evthread_lock_fns_,
  303. sizeof(struct evthread_lock_callbacks));
  304. memcpy(&evthread_lock_fns_, &cbs,
  305. sizeof(struct evthread_lock_callbacks));
  306. memcpy(&original_cond_fns_, &evthread_cond_fns_,
  307. sizeof(struct evthread_condition_callbacks));
  308. evthread_cond_fns_.wait_condition = debug_cond_wait;
  309. evthread_lock_debugging_enabled_ = 1;
  310. /* XXX return value should get checked. */
  311. event_global_setup_locks_(0);
  312. }
  313. int
  314. evthread_is_debug_lock_held_(void *lock_)
  315. {
  316. struct debug_lock *lock = lock_;
  317. if (! lock->count)
  318. return 0;
  319. if (evthread_id_fn_) {
  320. unsigned long me = evthread_id_fn_();
  321. if (lock->held_by != me)
  322. return 0;
  323. }
  324. return 1;
  325. }
  326. void *
  327. evthread_debug_get_real_lock_(void *lock_)
  328. {
  329. struct debug_lock *lock = lock_;
  330. return lock->lock;
  331. }
  332. void *
  333. evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
  334. {
  335. /* there are four cases here:
  336. 1) we're turning on debugging; locking is not on.
  337. 2) we're turning on debugging; locking is on.
  338. 3) we're turning on locking; debugging is not on.
  339. 4) we're turning on locking; debugging is on. */
  340. if (!enable_locks && original_lock_fns_.alloc == NULL) {
  341. /* Case 1: allocate a debug lock. */
  342. EVUTIL_ASSERT(lock_ == NULL);
  343. return debug_lock_alloc(locktype);
  344. } else if (!enable_locks && original_lock_fns_.alloc != NULL) {
  345. /* Case 2: wrap the lock in a debug lock. */
  346. struct debug_lock *lock;
  347. EVUTIL_ASSERT(lock_ != NULL);
  348. if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
  349. /* We can't wrap it: We need a recursive lock */
  350. original_lock_fns_.free(lock_, locktype);
  351. return debug_lock_alloc(locktype);
  352. }
  353. lock = mm_malloc(sizeof(struct debug_lock));
  354. if (!lock) {
  355. original_lock_fns_.free(lock_, locktype);
  356. return NULL;
  357. }
  358. lock->lock = lock_;
  359. lock->locktype = locktype;
  360. lock->count = 0;
  361. lock->held_by = 0;
  362. return lock;
  363. } else if (enable_locks && ! evthread_lock_debugging_enabled_) {
  364. /* Case 3: allocate a regular lock */
  365. EVUTIL_ASSERT(lock_ == NULL);
  366. return evthread_lock_fns_.alloc(locktype);
  367. } else {
  368. /* Case 4: Fill in a debug lock with a real lock */
  369. struct debug_lock *lock = lock_ ? lock_ : debug_lock_alloc(locktype);
  370. EVUTIL_ASSERT(enable_locks &&
  371. evthread_lock_debugging_enabled_);
  372. EVUTIL_ASSERT(lock->locktype == locktype);
  373. if (!lock->lock) {
  374. lock->lock = original_lock_fns_.alloc(
  375. locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
  376. if (!lock->lock) {
  377. lock->count = -200;
  378. mm_free(lock);
  379. return NULL;
  380. }
  381. }
  382. return lock;
  383. }
  384. }
  385. #ifndef EVTHREAD_EXPOSE_STRUCTS
  386. unsigned long
  387. evthreadimpl_get_id_()
  388. {
  389. return evthread_id_fn_ ? evthread_id_fn_() : 1;
  390. }
  391. void *
  392. evthreadimpl_lock_alloc_(unsigned locktype)
  393. {
  394. #ifndef EVENT__DISABLE_DEBUG_MODE
  395. if (event_debug_mode_on_) {
  396. event_debug_created_threadable_ctx_ = 1;
  397. }
  398. #endif
  399. return evthread_lock_fns_.alloc ?
  400. evthread_lock_fns_.alloc(locktype) : NULL;
  401. }
  402. void
  403. evthreadimpl_lock_free_(void *lock, unsigned locktype)
  404. {
  405. if (evthread_lock_fns_.free)
  406. evthread_lock_fns_.free(lock, locktype);
  407. }
  408. int
  409. evthreadimpl_lock_lock_(unsigned mode, void *lock)
  410. {
  411. if (evthread_lock_fns_.lock)
  412. return evthread_lock_fns_.lock(mode, lock);
  413. else
  414. return 0;
  415. }
  416. int
  417. evthreadimpl_lock_unlock_(unsigned mode, void *lock)
  418. {
  419. if (evthread_lock_fns_.unlock)
  420. return evthread_lock_fns_.unlock(mode, lock);
  421. else
  422. return 0;
  423. }
  424. void *
  425. evthreadimpl_cond_alloc_(unsigned condtype)
  426. {
  427. #ifndef EVENT__DISABLE_DEBUG_MODE
  428. if (event_debug_mode_on_) {
  429. event_debug_created_threadable_ctx_ = 1;
  430. }
  431. #endif
  432. return evthread_cond_fns_.alloc_condition ?
  433. evthread_cond_fns_.alloc_condition(condtype) : NULL;
  434. }
  435. void
  436. evthreadimpl_cond_free_(void *cond)
  437. {
  438. if (evthread_cond_fns_.free_condition)
  439. evthread_cond_fns_.free_condition(cond);
  440. }
  441. int
  442. evthreadimpl_cond_signal_(void *cond, int broadcast)
  443. {
  444. if (evthread_cond_fns_.signal_condition)
  445. return evthread_cond_fns_.signal_condition(cond, broadcast);
  446. else
  447. return 0;
  448. }
  449. int
  450. evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
  451. {
  452. if (evthread_cond_fns_.wait_condition)
  453. return evthread_cond_fns_.wait_condition(cond, lock, tv);
  454. else
  455. return 0;
  456. }
  457. int
  458. evthreadimpl_is_lock_debugging_enabled_(void)
  459. {
  460. return evthread_lock_debugging_enabled_;
  461. }
  462. int
  463. evthreadimpl_locking_enabled_(void)
  464. {
  465. return evthread_lock_fns_.lock != NULL;
  466. }
  467. #endif
  468. #endif