rrdenglocking.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "rrdengine.h"
  3. struct page_cache_descr *rrdeng_create_pg_cache_descr(struct rrdengine_instance *ctx)
  4. {
  5. struct page_cache_descr *pg_cache_descr;
  6. pg_cache_descr = mallocz(sizeof(*pg_cache_descr));
  7. rrd_stat_atomic_add(&ctx->stats.page_cache_descriptors, 1);
  8. pg_cache_descr->page = NULL;
  9. pg_cache_descr->flags = 0;
  10. pg_cache_descr->prev = pg_cache_descr->next = NULL;
  11. pg_cache_descr->refcnt = 0;
  12. pg_cache_descr->waiters = 0;
  13. fatal_assert(0 == uv_cond_init(&pg_cache_descr->cond));
  14. fatal_assert(0 == uv_mutex_init(&pg_cache_descr->mutex));
  15. return pg_cache_descr;
  16. }
  17. void rrdeng_destroy_pg_cache_descr(struct rrdengine_instance *ctx, struct page_cache_descr *pg_cache_descr)
  18. {
  19. uv_cond_destroy(&pg_cache_descr->cond);
  20. uv_mutex_destroy(&pg_cache_descr->mutex);
  21. freez(pg_cache_descr);
  22. rrd_stat_atomic_add(&ctx->stats.page_cache_descriptors, -1);
  23. }
  24. /* also allocates page cache descriptor if missing */
  25. void rrdeng_page_descr_mutex_lock(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr)
  26. {
  27. unsigned long old_state, old_users, new_state, ret_state;
  28. struct page_cache_descr *pg_cache_descr = NULL;
  29. uint8_t we_locked;
  30. we_locked = 0;
  31. while (1) { /* spin */
  32. old_state = descr->pg_cache_descr_state;
  33. old_users = old_state >> PG_CACHE_DESCR_SHIFT;
  34. if (unlikely(we_locked)) {
  35. fatal_assert(old_state & PG_CACHE_DESCR_LOCKED);
  36. new_state = (1 << PG_CACHE_DESCR_SHIFT) | PG_CACHE_DESCR_ALLOCATED;
  37. ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
  38. if (old_state == ret_state) {
  39. /* success */
  40. break;
  41. }
  42. continue; /* spin */
  43. }
  44. if (old_state & PG_CACHE_DESCR_LOCKED) {
  45. fatal_assert(0 == old_users);
  46. continue; /* spin */
  47. }
  48. if (0 == old_state) {
  49. /* no page cache descriptor has been allocated */
  50. if (NULL == pg_cache_descr) {
  51. pg_cache_descr = rrdeng_create_pg_cache_descr(ctx);
  52. }
  53. new_state = PG_CACHE_DESCR_LOCKED;
  54. ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, 0, new_state);
  55. if (0 == ret_state) {
  56. we_locked = 1;
  57. descr->pg_cache_descr = pg_cache_descr;
  58. pg_cache_descr->descr = descr;
  59. pg_cache_descr = NULL; /* make sure we don't free pg_cache_descr */
  60. /* retry */
  61. continue;
  62. }
  63. continue; /* spin */
  64. }
  65. /* page cache descriptor is already allocated */
  66. if (unlikely(!(old_state & PG_CACHE_DESCR_ALLOCATED))) {
  67. fatal("Invalid page cache descriptor locking state:%#lX", old_state);
  68. }
  69. new_state = (old_users + 1) << PG_CACHE_DESCR_SHIFT;
  70. new_state |= old_state & PG_CACHE_DESCR_FLAGS_MASK;
  71. ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
  72. if (old_state == ret_state) {
  73. /* success */
  74. break;
  75. }
  76. /* spin */
  77. }
  78. if (pg_cache_descr) {
  79. rrdeng_destroy_pg_cache_descr(ctx, pg_cache_descr);
  80. }
  81. pg_cache_descr = descr->pg_cache_descr;
  82. uv_mutex_lock(&pg_cache_descr->mutex);
  83. }
  84. void rrdeng_page_descr_mutex_unlock(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr)
  85. {
  86. unsigned long old_state, new_state, ret_state, old_users;
  87. struct page_cache_descr *pg_cache_descr, *delete_pg_cache_descr = NULL;
  88. uint8_t we_locked;
  89. uv_mutex_unlock(&descr->pg_cache_descr->mutex);
  90. we_locked = 0;
  91. while (1) { /* spin */
  92. old_state = descr->pg_cache_descr_state;
  93. old_users = old_state >> PG_CACHE_DESCR_SHIFT;
  94. if (unlikely(we_locked)) {
  95. fatal_assert(0 == old_users);
  96. ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, 0);
  97. if (old_state == ret_state) {
  98. /* success */
  99. rrdeng_destroy_pg_cache_descr(ctx, delete_pg_cache_descr);
  100. return;
  101. }
  102. continue; /* spin */
  103. }
  104. if (old_state & PG_CACHE_DESCR_LOCKED) {
  105. fatal_assert(0 == old_users);
  106. continue; /* spin */
  107. }
  108. fatal_assert(old_state & PG_CACHE_DESCR_ALLOCATED);
  109. pg_cache_descr = descr->pg_cache_descr;
  110. /* caller is the only page cache descriptor user and there are no pending references on the page */
  111. if ((old_state & PG_CACHE_DESCR_DESTROY) && (1 == old_users) &&
  112. !pg_cache_descr->flags && !pg_cache_descr->refcnt) {
  113. fatal_assert(!pg_cache_descr->waiters);
  114. new_state = PG_CACHE_DESCR_LOCKED;
  115. ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
  116. if (old_state == ret_state) {
  117. we_locked = 1;
  118. delete_pg_cache_descr = pg_cache_descr;
  119. descr->pg_cache_descr = NULL;
  120. /* retry */
  121. continue;
  122. }
  123. continue; /* spin */
  124. }
  125. fatal_assert(old_users > 0);
  126. new_state = (old_users - 1) << PG_CACHE_DESCR_SHIFT;
  127. new_state |= old_state & PG_CACHE_DESCR_FLAGS_MASK;
  128. ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
  129. if (old_state == ret_state) {
  130. /* success */
  131. break;
  132. }
  133. /* spin */
  134. }
  135. }
  136. /*
  137. * Tries to deallocate page cache descriptor. If it fails, it postpones deallocation by setting the
  138. * PG_CACHE_DESCR_DESTROY flag which will be eventually cleared by a different context after doing
  139. * the deallocation.
  140. */
  141. void rrdeng_try_deallocate_pg_cache_descr(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr)
  142. {
  143. unsigned long old_state, new_state, ret_state, old_users;
  144. struct page_cache_descr *pg_cache_descr = NULL;
  145. uint8_t just_locked, can_free, must_unlock;
  146. just_locked = 0;
  147. can_free = 0;
  148. must_unlock = 0;
  149. while (1) { /* spin */
  150. old_state = descr->pg_cache_descr_state;
  151. old_users = old_state >> PG_CACHE_DESCR_SHIFT;
  152. if (unlikely(just_locked)) {
  153. fatal_assert(0 == old_users);
  154. must_unlock = 1;
  155. just_locked = 0;
  156. /* Try deallocate if there are no pending references on the page */
  157. if (!pg_cache_descr->flags && !pg_cache_descr->refcnt) {
  158. fatal_assert(!pg_cache_descr->waiters);
  159. descr->pg_cache_descr = NULL;
  160. can_free = 1;
  161. /* success */
  162. continue;
  163. }
  164. continue; /* spin */
  165. }
  166. if (unlikely(must_unlock)) {
  167. fatal_assert(0 == old_users);
  168. if (can_free) {
  169. /* success */
  170. new_state = 0;
  171. } else {
  172. new_state = old_state | PG_CACHE_DESCR_DESTROY;
  173. new_state &= ~PG_CACHE_DESCR_LOCKED;
  174. }
  175. ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
  176. if (old_state == ret_state) {
  177. /* unlocked */
  178. if (can_free)
  179. rrdeng_destroy_pg_cache_descr(ctx, pg_cache_descr);
  180. return;
  181. }
  182. continue; /* spin */
  183. }
  184. if (!(old_state & PG_CACHE_DESCR_ALLOCATED)) {
  185. /* don't do anything */
  186. return;
  187. }
  188. if (old_state & PG_CACHE_DESCR_LOCKED) {
  189. fatal_assert(0 == old_users);
  190. continue; /* spin */
  191. }
  192. /* caller is the only page cache descriptor user */
  193. if (0 == old_users) {
  194. new_state = old_state | PG_CACHE_DESCR_LOCKED;
  195. ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
  196. if (old_state == ret_state) {
  197. just_locked = 1;
  198. pg_cache_descr = descr->pg_cache_descr;
  199. /* retry */
  200. continue;
  201. }
  202. continue; /* spin */
  203. }
  204. if (old_state & PG_CACHE_DESCR_DESTROY) {
  205. /* don't do anything */
  206. return;
  207. }
  208. /* plant PG_CACHE_DESCR_DESTROY so that other contexts eventually free the page cache descriptor */
  209. new_state = old_state | PG_CACHE_DESCR_DESTROY;
  210. ret_state = ulong_compare_and_swap(&descr->pg_cache_descr_state, old_state, new_state);
  211. if (old_state == ret_state) {
  212. /* success */
  213. return;
  214. }
  215. /* spin */
  216. }
  217. }