lock.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057
  1. /* Locking in multithreaded situations.
  2. Copyright (C) 2005-2016 Free Software Foundation, Inc.
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 3, or (at your option)
  6. any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, see <http://www.gnu.org/licenses/>. */
  13. /* Written by Bruno Haible <bruno@clisp.org>, 2005.
  14. Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
  15. gthr-win32.h. */
  16. #include <config.h>
  17. #include "glthread/lock.h"
  18. /* ========================================================================= */
  19. #if USE_POSIX_THREADS
  20. /* -------------------------- gl_lock_t datatype -------------------------- */
  21. /* ------------------------- gl_rwlock_t datatype ------------------------- */
  22. # if HAVE_PTHREAD_RWLOCK
  23. # if !defined PTHREAD_RWLOCK_INITIALIZER
  24. int
  25. glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
  26. {
  27. int err;
  28. err = pthread_rwlock_init (&lock->rwlock, NULL);
  29. if (err != 0)
  30. return err;
  31. lock->initialized = 1;
  32. return 0;
  33. }
  34. int
  35. glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
  36. {
  37. if (!lock->initialized)
  38. {
  39. int err;
  40. err = pthread_mutex_lock (&lock->guard);
  41. if (err != 0)
  42. return err;
  43. if (!lock->initialized)
  44. {
  45. err = glthread_rwlock_init_multithreaded (lock);
  46. if (err != 0)
  47. {
  48. pthread_mutex_unlock (&lock->guard);
  49. return err;
  50. }
  51. }
  52. err = pthread_mutex_unlock (&lock->guard);
  53. if (err != 0)
  54. return err;
  55. }
  56. return pthread_rwlock_rdlock (&lock->rwlock);
  57. }
  58. int
  59. glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
  60. {
  61. if (!lock->initialized)
  62. {
  63. int err;
  64. err = pthread_mutex_lock (&lock->guard);
  65. if (err != 0)
  66. return err;
  67. if (!lock->initialized)
  68. {
  69. err = glthread_rwlock_init_multithreaded (lock);
  70. if (err != 0)
  71. {
  72. pthread_mutex_unlock (&lock->guard);
  73. return err;
  74. }
  75. }
  76. err = pthread_mutex_unlock (&lock->guard);
  77. if (err != 0)
  78. return err;
  79. }
  80. return pthread_rwlock_wrlock (&lock->rwlock);
  81. }
  82. int
  83. glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
  84. {
  85. if (!lock->initialized)
  86. return EINVAL;
  87. return pthread_rwlock_unlock (&lock->rwlock);
  88. }
  89. int
  90. glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
  91. {
  92. int err;
  93. if (!lock->initialized)
  94. return EINVAL;
  95. err = pthread_rwlock_destroy (&lock->rwlock);
  96. if (err != 0)
  97. return err;
  98. lock->initialized = 0;
  99. return 0;
  100. }
  101. # endif
  102. # else
  103. int
  104. glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
  105. {
  106. int err;
  107. err = pthread_mutex_init (&lock->lock, NULL);
  108. if (err != 0)
  109. return err;
  110. err = pthread_cond_init (&lock->waiting_readers, NULL);
  111. if (err != 0)
  112. return err;
  113. err = pthread_cond_init (&lock->waiting_writers, NULL);
  114. if (err != 0)
  115. return err;
  116. lock->waiting_writers_count = 0;
  117. lock->runcount = 0;
  118. return 0;
  119. }
  120. int
  121. glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
  122. {
  123. int err;
  124. err = pthread_mutex_lock (&lock->lock);
  125. if (err != 0)
  126. return err;
  127. /* Test whether only readers are currently running, and whether the runcount
  128. field will not overflow. */
  129. /* POSIX says: "It is implementation-defined whether the calling thread
  130. acquires the lock when a writer does not hold the lock and there are
  131. writers blocked on the lock." Let's say, no: give the writers a higher
  132. priority. */
  133. while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
  134. {
  135. /* This thread has to wait for a while. Enqueue it among the
  136. waiting_readers. */
  137. err = pthread_cond_wait (&lock->waiting_readers, &lock->lock);
  138. if (err != 0)
  139. {
  140. pthread_mutex_unlock (&lock->lock);
  141. return err;
  142. }
  143. }
  144. lock->runcount++;
  145. return pthread_mutex_unlock (&lock->lock);
  146. }
  147. int
  148. glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
  149. {
  150. int err;
  151. err = pthread_mutex_lock (&lock->lock);
  152. if (err != 0)
  153. return err;
  154. /* Test whether no readers or writers are currently running. */
  155. while (!(lock->runcount == 0))
  156. {
  157. /* This thread has to wait for a while. Enqueue it among the
  158. waiting_writers. */
  159. lock->waiting_writers_count++;
  160. err = pthread_cond_wait (&lock->waiting_writers, &lock->lock);
  161. if (err != 0)
  162. {
  163. lock->waiting_writers_count--;
  164. pthread_mutex_unlock (&lock->lock);
  165. return err;
  166. }
  167. lock->waiting_writers_count--;
  168. }
  169. lock->runcount--; /* runcount becomes -1 */
  170. return pthread_mutex_unlock (&lock->lock);
  171. }
  172. int
  173. glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
  174. {
  175. int err;
  176. err = pthread_mutex_lock (&lock->lock);
  177. if (err != 0)
  178. return err;
  179. if (lock->runcount < 0)
  180. {
  181. /* Drop a writer lock. */
  182. if (!(lock->runcount == -1))
  183. {
  184. pthread_mutex_unlock (&lock->lock);
  185. return EINVAL;
  186. }
  187. lock->runcount = 0;
  188. }
  189. else
  190. {
  191. /* Drop a reader lock. */
  192. if (!(lock->runcount > 0))
  193. {
  194. pthread_mutex_unlock (&lock->lock);
  195. return EINVAL;
  196. }
  197. lock->runcount--;
  198. }
  199. if (lock->runcount == 0)
  200. {
  201. /* POSIX recommends that "write locks shall take precedence over read
  202. locks", to avoid "writer starvation". */
  203. if (lock->waiting_writers_count > 0)
  204. {
  205. /* Wake up one of the waiting writers. */
  206. err = pthread_cond_signal (&lock->waiting_writers);
  207. if (err != 0)
  208. {
  209. pthread_mutex_unlock (&lock->lock);
  210. return err;
  211. }
  212. }
  213. else
  214. {
  215. /* Wake up all waiting readers. */
  216. err = pthread_cond_broadcast (&lock->waiting_readers);
  217. if (err != 0)
  218. {
  219. pthread_mutex_unlock (&lock->lock);
  220. return err;
  221. }
  222. }
  223. }
  224. return pthread_mutex_unlock (&lock->lock);
  225. }
  226. int
  227. glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
  228. {
  229. int err;
  230. err = pthread_mutex_destroy (&lock->lock);
  231. if (err != 0)
  232. return err;
  233. err = pthread_cond_destroy (&lock->waiting_readers);
  234. if (err != 0)
  235. return err;
  236. err = pthread_cond_destroy (&lock->waiting_writers);
  237. if (err != 0)
  238. return err;
  239. return 0;
  240. }
  241. # endif
  242. /* --------------------- gl_recursive_lock_t datatype --------------------- */
  243. # if HAVE_PTHREAD_MUTEX_RECURSIVE
  244. # if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
  245. int
  246. glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
  247. {
  248. pthread_mutexattr_t attributes;
  249. int err;
  250. err = pthread_mutexattr_init (&attributes);
  251. if (err != 0)
  252. return err;
  253. err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
  254. if (err != 0)
  255. {
  256. pthread_mutexattr_destroy (&attributes);
  257. return err;
  258. }
  259. err = pthread_mutex_init (lock, &attributes);
  260. if (err != 0)
  261. {
  262. pthread_mutexattr_destroy (&attributes);
  263. return err;
  264. }
  265. err = pthread_mutexattr_destroy (&attributes);
  266. if (err != 0)
  267. return err;
  268. return 0;
  269. }
  270. # else
  271. int
  272. glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
  273. {
  274. pthread_mutexattr_t attributes;
  275. int err;
  276. err = pthread_mutexattr_init (&attributes);
  277. if (err != 0)
  278. return err;
  279. err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
  280. if (err != 0)
  281. {
  282. pthread_mutexattr_destroy (&attributes);
  283. return err;
  284. }
  285. err = pthread_mutex_init (&lock->recmutex, &attributes);
  286. if (err != 0)
  287. {
  288. pthread_mutexattr_destroy (&attributes);
  289. return err;
  290. }
  291. err = pthread_mutexattr_destroy (&attributes);
  292. if (err != 0)
  293. return err;
  294. lock->initialized = 1;
  295. return 0;
  296. }
  297. int
  298. glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
  299. {
  300. if (!lock->initialized)
  301. {
  302. int err;
  303. err = pthread_mutex_lock (&lock->guard);
  304. if (err != 0)
  305. return err;
  306. if (!lock->initialized)
  307. {
  308. err = glthread_recursive_lock_init_multithreaded (lock);
  309. if (err != 0)
  310. {
  311. pthread_mutex_unlock (&lock->guard);
  312. return err;
  313. }
  314. }
  315. err = pthread_mutex_unlock (&lock->guard);
  316. if (err != 0)
  317. return err;
  318. }
  319. return pthread_mutex_lock (&lock->recmutex);
  320. }
  321. int
  322. glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
  323. {
  324. if (!lock->initialized)
  325. return EINVAL;
  326. return pthread_mutex_unlock (&lock->recmutex);
  327. }
  328. int
  329. glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
  330. {
  331. int err;
  332. if (!lock->initialized)
  333. return EINVAL;
  334. err = pthread_mutex_destroy (&lock->recmutex);
  335. if (err != 0)
  336. return err;
  337. lock->initialized = 0;
  338. return 0;
  339. }
  340. # endif
  341. # else
  342. int
  343. glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
  344. {
  345. int err;
  346. err = pthread_mutex_init (&lock->mutex, NULL);
  347. if (err != 0)
  348. return err;
  349. lock->owner = (pthread_t) 0;
  350. lock->depth = 0;
  351. return 0;
  352. }
  353. int
  354. glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
  355. {
  356. pthread_t self = pthread_self ();
  357. if (lock->owner != self)
  358. {
  359. int err;
  360. err = pthread_mutex_lock (&lock->mutex);
  361. if (err != 0)
  362. return err;
  363. lock->owner = self;
  364. }
  365. if (++(lock->depth) == 0) /* wraparound? */
  366. {
  367. lock->depth--;
  368. return EAGAIN;
  369. }
  370. return 0;
  371. }
  372. int
  373. glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
  374. {
  375. if (lock->owner != pthread_self ())
  376. return EPERM;
  377. if (lock->depth == 0)
  378. return EINVAL;
  379. if (--(lock->depth) == 0)
  380. {
  381. lock->owner = (pthread_t) 0;
  382. return pthread_mutex_unlock (&lock->mutex);
  383. }
  384. else
  385. return 0;
  386. }
  387. int
  388. glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
  389. {
  390. if (lock->owner != (pthread_t) 0)
  391. return EBUSY;
  392. return pthread_mutex_destroy (&lock->mutex);
  393. }
  394. # endif
  395. /* -------------------------- gl_once_t datatype -------------------------- */
  396. static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT;
  397. int
  398. glthread_once_singlethreaded (pthread_once_t *once_control)
  399. {
  400. /* We don't know whether pthread_once_t is an integer type, a floating-point
  401. type, a pointer type, or a structure type. */
  402. char *firstbyte = (char *)once_control;
  403. if (*firstbyte == *(const char *)&fresh_once)
  404. {
  405. /* First time use of once_control. Invert the first byte. */
  406. *firstbyte = ~ *(const char *)&fresh_once;
  407. return 1;
  408. }
  409. else
  410. return 0;
  411. }
  412. #endif
  413. /* ========================================================================= */
  414. #if USE_PTH_THREADS
  415. /* Use the GNU Pth threads library. */
  416. /* -------------------------- gl_lock_t datatype -------------------------- */
  417. /* ------------------------- gl_rwlock_t datatype ------------------------- */
  418. /* --------------------- gl_recursive_lock_t datatype --------------------- */
  419. /* -------------------------- gl_once_t datatype -------------------------- */
  420. static void
  421. glthread_once_call (void *arg)
  422. {
  423. void (**gl_once_temp_addr) (void) = (void (**) (void)) arg;
  424. void (*initfunction) (void) = *gl_once_temp_addr;
  425. initfunction ();
  426. }
  427. int
  428. glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void))
  429. {
  430. void (*temp) (void) = initfunction;
  431. return (!pth_once (once_control, glthread_once_call, &temp) ? errno : 0);
  432. }
  433. int
  434. glthread_once_singlethreaded (pth_once_t *once_control)
  435. {
  436. /* We know that pth_once_t is an integer type. */
  437. if (*once_control == PTH_ONCE_INIT)
  438. {
  439. /* First time use of once_control. Invert the marker. */
  440. *once_control = ~ PTH_ONCE_INIT;
  441. return 1;
  442. }
  443. else
  444. return 0;
  445. }
  446. #endif
  447. /* ========================================================================= */
  448. #if USE_SOLARIS_THREADS
  449. /* Use the old Solaris threads library. */
  450. /* -------------------------- gl_lock_t datatype -------------------------- */
  451. /* ------------------------- gl_rwlock_t datatype ------------------------- */
  452. /* --------------------- gl_recursive_lock_t datatype --------------------- */
  453. int
  454. glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
  455. {
  456. int err;
  457. err = mutex_init (&lock->mutex, USYNC_THREAD, NULL);
  458. if (err != 0)
  459. return err;
  460. lock->owner = (thread_t) 0;
  461. lock->depth = 0;
  462. return 0;
  463. }
  464. int
  465. glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
  466. {
  467. thread_t self = thr_self ();
  468. if (lock->owner != self)
  469. {
  470. int err;
  471. err = mutex_lock (&lock->mutex);
  472. if (err != 0)
  473. return err;
  474. lock->owner = self;
  475. }
  476. if (++(lock->depth) == 0) /* wraparound? */
  477. {
  478. lock->depth--;
  479. return EAGAIN;
  480. }
  481. return 0;
  482. }
  483. int
  484. glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
  485. {
  486. if (lock->owner != thr_self ())
  487. return EPERM;
  488. if (lock->depth == 0)
  489. return EINVAL;
  490. if (--(lock->depth) == 0)
  491. {
  492. lock->owner = (thread_t) 0;
  493. return mutex_unlock (&lock->mutex);
  494. }
  495. else
  496. return 0;
  497. }
  498. int
  499. glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
  500. {
  501. if (lock->owner != (thread_t) 0)
  502. return EBUSY;
  503. return mutex_destroy (&lock->mutex);
  504. }
  505. /* -------------------------- gl_once_t datatype -------------------------- */
  506. int
  507. glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void))
  508. {
  509. if (!once_control->inited)
  510. {
  511. int err;
  512. /* Use the mutex to guarantee that if another thread is already calling
  513. the initfunction, this thread waits until it's finished. */
  514. err = mutex_lock (&once_control->mutex);
  515. if (err != 0)
  516. return err;
  517. if (!once_control->inited)
  518. {
  519. once_control->inited = 1;
  520. initfunction ();
  521. }
  522. return mutex_unlock (&once_control->mutex);
  523. }
  524. else
  525. return 0;
  526. }
  527. int
  528. glthread_once_singlethreaded (gl_once_t *once_control)
  529. {
  530. /* We know that gl_once_t contains an integer type. */
  531. if (!once_control->inited)
  532. {
  533. /* First time use of once_control. Invert the marker. */
  534. once_control->inited = ~ 0;
  535. return 1;
  536. }
  537. else
  538. return 0;
  539. }
  540. #endif
  541. /* ========================================================================= */
  542. #if USE_WINDOWS_THREADS
  543. /* -------------------------- gl_lock_t datatype -------------------------- */
  544. void
  545. glthread_lock_init_func (gl_lock_t *lock)
  546. {
  547. InitializeCriticalSection (&lock->lock);
  548. lock->guard.done = 1;
  549. }
  550. int
  551. glthread_lock_lock_func (gl_lock_t *lock)
  552. {
  553. if (!lock->guard.done)
  554. {
  555. if (InterlockedIncrement (&lock->guard.started) == 0)
  556. /* This thread is the first one to need this lock. Initialize it. */
  557. glthread_lock_init (lock);
  558. else
  559. /* Yield the CPU while waiting for another thread to finish
  560. initializing this lock. */
  561. while (!lock->guard.done)
  562. Sleep (0);
  563. }
  564. EnterCriticalSection (&lock->lock);
  565. return 0;
  566. }
  567. int
  568. glthread_lock_unlock_func (gl_lock_t *lock)
  569. {
  570. if (!lock->guard.done)
  571. return EINVAL;
  572. LeaveCriticalSection (&lock->lock);
  573. return 0;
  574. }
  575. int
  576. glthread_lock_destroy_func (gl_lock_t *lock)
  577. {
  578. if (!lock->guard.done)
  579. return EINVAL;
  580. DeleteCriticalSection (&lock->lock);
  581. lock->guard.done = 0;
  582. return 0;
  583. }
  584. /* ------------------------- gl_rwlock_t datatype ------------------------- */
  585. /* In this file, the waitqueues are implemented as circular arrays. */
  586. #define gl_waitqueue_t gl_carray_waitqueue_t
  587. static void
  588. gl_waitqueue_init (gl_waitqueue_t *wq)
  589. {
  590. wq->array = NULL;
  591. wq->count = 0;
  592. wq->alloc = 0;
  593. wq->offset = 0;
  594. }
  595. /* Enqueues the current thread, represented by an event, in a wait queue.
  596. Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
  597. static HANDLE
  598. gl_waitqueue_add (gl_waitqueue_t *wq)
  599. {
  600. HANDLE event;
  601. unsigned int index;
  602. if (wq->count == wq->alloc)
  603. {
  604. unsigned int new_alloc = 2 * wq->alloc + 1;
  605. HANDLE *new_array =
  606. (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
  607. if (new_array == NULL)
  608. /* No more memory. */
  609. return INVALID_HANDLE_VALUE;
  610. /* Now is a good opportunity to rotate the array so that its contents
  611. starts at offset 0. */
  612. if (wq->offset > 0)
  613. {
  614. unsigned int old_count = wq->count;
  615. unsigned int old_alloc = wq->alloc;
  616. unsigned int old_offset = wq->offset;
  617. unsigned int i;
  618. if (old_offset + old_count > old_alloc)
  619. {
  620. unsigned int limit = old_offset + old_count - old_alloc;
  621. for (i = 0; i < limit; i++)
  622. new_array[old_alloc + i] = new_array[i];
  623. }
  624. for (i = 0; i < old_count; i++)
  625. new_array[i] = new_array[old_offset + i];
  626. wq->offset = 0;
  627. }
  628. wq->array = new_array;
  629. wq->alloc = new_alloc;
  630. }
  631. /* Whether the created event is a manual-reset one or an auto-reset one,
  632. does not matter, since we will wait on it only once. */
  633. event = CreateEvent (NULL, TRUE, FALSE, NULL);
  634. if (event == INVALID_HANDLE_VALUE)
  635. /* No way to allocate an event. */
  636. return INVALID_HANDLE_VALUE;
  637. index = wq->offset + wq->count;
  638. if (index >= wq->alloc)
  639. index -= wq->alloc;
  640. wq->array[index] = event;
  641. wq->count++;
  642. return event;
  643. }
  644. /* Notifies the first thread from a wait queue and dequeues it. */
  645. static void
  646. gl_waitqueue_notify_first (gl_waitqueue_t *wq)
  647. {
  648. SetEvent (wq->array[wq->offset + 0]);
  649. wq->offset++;
  650. wq->count--;
  651. if (wq->count == 0 || wq->offset == wq->alloc)
  652. wq->offset = 0;
  653. }
  654. /* Notifies all threads from a wait queue and dequeues them all. */
  655. static void
  656. gl_waitqueue_notify_all (gl_waitqueue_t *wq)
  657. {
  658. unsigned int i;
  659. for (i = 0; i < wq->count; i++)
  660. {
  661. unsigned int index = wq->offset + i;
  662. if (index >= wq->alloc)
  663. index -= wq->alloc;
  664. SetEvent (wq->array[index]);
  665. }
  666. wq->count = 0;
  667. wq->offset = 0;
  668. }
  669. void
  670. glthread_rwlock_init_func (gl_rwlock_t *lock)
  671. {
  672. InitializeCriticalSection (&lock->lock);
  673. gl_waitqueue_init (&lock->waiting_readers);
  674. gl_waitqueue_init (&lock->waiting_writers);
  675. lock->runcount = 0;
  676. lock->guard.done = 1;
  677. }
  678. int
  679. glthread_rwlock_rdlock_func (gl_rwlock_t *lock)
  680. {
  681. if (!lock->guard.done)
  682. {
  683. if (InterlockedIncrement (&lock->guard.started) == 0)
  684. /* This thread is the first one to need this lock. Initialize it. */
  685. glthread_rwlock_init (lock);
  686. else
  687. /* Yield the CPU while waiting for another thread to finish
  688. initializing this lock. */
  689. while (!lock->guard.done)
  690. Sleep (0);
  691. }
  692. EnterCriticalSection (&lock->lock);
  693. /* Test whether only readers are currently running, and whether the runcount
  694. field will not overflow. */
  695. if (!(lock->runcount + 1 > 0))
  696. {
  697. /* This thread has to wait for a while. Enqueue it among the
  698. waiting_readers. */
  699. HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
  700. if (event != INVALID_HANDLE_VALUE)
  701. {
  702. DWORD result;
  703. LeaveCriticalSection (&lock->lock);
  704. /* Wait until another thread signals this event. */
  705. result = WaitForSingleObject (event, INFINITE);
  706. if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
  707. abort ();
  708. CloseHandle (event);
  709. /* The thread which signalled the event already did the bookkeeping:
  710. removed us from the waiting_readers, incremented lock->runcount. */
  711. if (!(lock->runcount > 0))
  712. abort ();
  713. return 0;
  714. }
  715. else
  716. {
  717. /* Allocation failure. Weird. */
  718. do
  719. {
  720. LeaveCriticalSection (&lock->lock);
  721. Sleep (1);
  722. EnterCriticalSection (&lock->lock);
  723. }
  724. while (!(lock->runcount + 1 > 0));
  725. }
  726. }
  727. lock->runcount++;
  728. LeaveCriticalSection (&lock->lock);
  729. return 0;
  730. }
  731. int
  732. glthread_rwlock_wrlock_func (gl_rwlock_t *lock)
  733. {
  734. if (!lock->guard.done)
  735. {
  736. if (InterlockedIncrement (&lock->guard.started) == 0)
  737. /* This thread is the first one to need this lock. Initialize it. */
  738. glthread_rwlock_init (lock);
  739. else
  740. /* Yield the CPU while waiting for another thread to finish
  741. initializing this lock. */
  742. while (!lock->guard.done)
  743. Sleep (0);
  744. }
  745. EnterCriticalSection (&lock->lock);
  746. /* Test whether no readers or writers are currently running. */
  747. if (!(lock->runcount == 0))
  748. {
  749. /* This thread has to wait for a while. Enqueue it among the
  750. waiting_writers. */
  751. HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
  752. if (event != INVALID_HANDLE_VALUE)
  753. {
  754. DWORD result;
  755. LeaveCriticalSection (&lock->lock);
  756. /* Wait until another thread signals this event. */
  757. result = WaitForSingleObject (event, INFINITE);
  758. if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
  759. abort ();
  760. CloseHandle (event);
  761. /* The thread which signalled the event already did the bookkeeping:
  762. removed us from the waiting_writers, set lock->runcount = -1. */
  763. if (!(lock->runcount == -1))
  764. abort ();
  765. return 0;
  766. }
  767. else
  768. {
  769. /* Allocation failure. Weird. */
  770. do
  771. {
  772. LeaveCriticalSection (&lock->lock);
  773. Sleep (1);
  774. EnterCriticalSection (&lock->lock);
  775. }
  776. while (!(lock->runcount == 0));
  777. }
  778. }
  779. lock->runcount--; /* runcount becomes -1 */
  780. LeaveCriticalSection (&lock->lock);
  781. return 0;
  782. }
  783. int
  784. glthread_rwlock_unlock_func (gl_rwlock_t *lock)
  785. {
  786. if (!lock->guard.done)
  787. return EINVAL;
  788. EnterCriticalSection (&lock->lock);
  789. if (lock->runcount < 0)
  790. {
  791. /* Drop a writer lock. */
  792. if (!(lock->runcount == -1))
  793. abort ();
  794. lock->runcount = 0;
  795. }
  796. else
  797. {
  798. /* Drop a reader lock. */
  799. if (!(lock->runcount > 0))
  800. {
  801. LeaveCriticalSection (&lock->lock);
  802. return EPERM;
  803. }
  804. lock->runcount--;
  805. }
  806. if (lock->runcount == 0)
  807. {
  808. /* POSIX recommends that "write locks shall take precedence over read
  809. locks", to avoid "writer starvation". */
  810. if (lock->waiting_writers.count > 0)
  811. {
  812. /* Wake up one of the waiting writers. */
  813. lock->runcount--;
  814. gl_waitqueue_notify_first (&lock->waiting_writers);
  815. }
  816. else
  817. {
  818. /* Wake up all waiting readers. */
  819. lock->runcount += lock->waiting_readers.count;
  820. gl_waitqueue_notify_all (&lock->waiting_readers);
  821. }
  822. }
  823. LeaveCriticalSection (&lock->lock);
  824. return 0;
  825. }
  826. int
  827. glthread_rwlock_destroy_func (gl_rwlock_t *lock)
  828. {
  829. if (!lock->guard.done)
  830. return EINVAL;
  831. if (lock->runcount != 0)
  832. return EBUSY;
  833. DeleteCriticalSection (&lock->lock);
  834. if (lock->waiting_readers.array != NULL)
  835. free (lock->waiting_readers.array);
  836. if (lock->waiting_writers.array != NULL)
  837. free (lock->waiting_writers.array);
  838. lock->guard.done = 0;
  839. return 0;
  840. }
  841. /* --------------------- gl_recursive_lock_t datatype --------------------- */
  842. void
  843. glthread_recursive_lock_init_func (gl_recursive_lock_t *lock)
  844. {
  845. lock->owner = 0;
  846. lock->depth = 0;
  847. InitializeCriticalSection (&lock->lock);
  848. lock->guard.done = 1;
  849. }
  850. int
  851. glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock)
  852. {
  853. if (!lock->guard.done)
  854. {
  855. if (InterlockedIncrement (&lock->guard.started) == 0)
  856. /* This thread is the first one to need this lock. Initialize it. */
  857. glthread_recursive_lock_init (lock);
  858. else
  859. /* Yield the CPU while waiting for another thread to finish
  860. initializing this lock. */
  861. while (!lock->guard.done)
  862. Sleep (0);
  863. }
  864. {
  865. DWORD self = GetCurrentThreadId ();
  866. if (lock->owner != self)
  867. {
  868. EnterCriticalSection (&lock->lock);
  869. lock->owner = self;
  870. }
  871. if (++(lock->depth) == 0) /* wraparound? */
  872. {
  873. lock->depth--;
  874. return EAGAIN;
  875. }
  876. }
  877. return 0;
  878. }
  879. int
  880. glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock)
  881. {
  882. if (lock->owner != GetCurrentThreadId ())
  883. return EPERM;
  884. if (lock->depth == 0)
  885. return EINVAL;
  886. if (--(lock->depth) == 0)
  887. {
  888. lock->owner = 0;
  889. LeaveCriticalSection (&lock->lock);
  890. }
  891. return 0;
  892. }
  893. int
  894. glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock)
  895. {
  896. if (lock->owner != 0)
  897. return EBUSY;
  898. DeleteCriticalSection (&lock->lock);
  899. lock->guard.done = 0;
  900. return 0;
  901. }
  902. /* -------------------------- gl_once_t datatype -------------------------- */
  903. void
  904. glthread_once_func (gl_once_t *once_control, void (*initfunction) (void))
  905. {
  906. if (once_control->inited <= 0)
  907. {
  908. if (InterlockedIncrement (&once_control->started) == 0)
  909. {
  910. /* This thread is the first one to come to this once_control. */
  911. InitializeCriticalSection (&once_control->lock);
  912. EnterCriticalSection (&once_control->lock);
  913. once_control->inited = 0;
  914. initfunction ();
  915. once_control->inited = 1;
  916. LeaveCriticalSection (&once_control->lock);
  917. }
  918. else
  919. {
  920. /* Undo last operation. */
  921. InterlockedDecrement (&once_control->started);
  922. /* Some other thread has already started the initialization.
  923. Yield the CPU while waiting for the other thread to finish
  924. initializing and taking the lock. */
  925. while (once_control->inited < 0)
  926. Sleep (0);
  927. if (once_control->inited <= 0)
  928. {
  929. /* Take the lock. This blocks until the other thread has
  930. finished calling the initfunction. */
  931. EnterCriticalSection (&once_control->lock);
  932. LeaveCriticalSection (&once_control->lock);
  933. if (!(once_control->inited > 0))
  934. abort ();
  935. }
  936. }
  937. }
  938. }
  939. #endif
  940. /* ========================================================================= */