rrdcollector.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "rrdcollector.h"
  3. #include "rrdcollector-internals.h"
  4. // Each function points to this collector structure
  5. // so that when the collector exits, all of them will
  6. // be invalidated (running == false)
  7. // The last function using this collector
  8. // frees the structure too (or when the collector calls
  9. // rrdset_collector_finished()).
  10. struct rrd_collector {
  11. int32_t refcount;
  12. int32_t refcount_dispatcher;
  13. pid_t tid;
  14. bool running;
  15. };
  16. // Each thread that adds RRDSET functions has to call
  17. // rrdset_collector_started() and rrdset_collector_finished()
  18. // to create the collector structure.
  19. __thread struct rrd_collector *thread_rrd_collector = NULL;
  20. inline bool rrd_collector_running(struct rrd_collector *rdc) {
  21. return __atomic_load_n(&rdc->running, __ATOMIC_RELAXED);
  22. }
  23. inline pid_t rrd_collector_tid(struct rrd_collector *rdc) {
  24. return rdc->tid;
  25. }
  26. bool rrd_collector_dispatcher_acquire(struct rrd_collector *rdc) {
  27. int32_t expected = __atomic_load_n(&rdc->refcount_dispatcher, __ATOMIC_RELAXED);
  28. int32_t wanted;
  29. do {
  30. if(expected < 0)
  31. return false;
  32. wanted = expected + 1;
  33. } while(!__atomic_compare_exchange_n(&rdc->refcount_dispatcher, &expected, wanted, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED));
  34. return true;
  35. }
  36. void rrd_collector_dispatcher_release(struct rrd_collector *rdc) {
  37. __atomic_sub_fetch(&rdc->refcount_dispatcher, 1, __ATOMIC_RELAXED);
  38. }
  39. static void rrd_collector_free(struct rrd_collector *rdc) {
  40. if(rdc->running)
  41. return;
  42. int32_t expected = 0;
  43. if(!__atomic_compare_exchange_n(&rdc->refcount, &expected, -1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
  44. // the collector is still referenced by charts.
  45. // leave it hanging there, the last chart will actually free it.
  46. return;
  47. }
  48. // we can free it now
  49. freez(rdc);
  50. }
  51. // called once per collector
  52. void rrd_collector_started(void) {
  53. if(!thread_rrd_collector)
  54. thread_rrd_collector = callocz(1, sizeof(struct rrd_collector));
  55. thread_rrd_collector->tid = gettid();
  56. __atomic_store_n(&thread_rrd_collector->running, true, __ATOMIC_RELAXED);
  57. }
  58. // called once per collector
  59. void rrd_collector_finished(void) {
  60. if(!thread_rrd_collector)
  61. return;
  62. __atomic_store_n(&thread_rrd_collector->running, false, __ATOMIC_RELAXED);
  63. // wait for any cancellation requests to be dispatched;
  64. // the problem is that cancellation requests require a structure allocated by the collector,
  65. // so, while cancellation requests are being dispatched, this structure is accessed.
  66. // delaying the exit of the thread is required to avoid cleaning up this structure.
  67. int32_t expected = 0;
  68. while(!__atomic_compare_exchange_n(&thread_rrd_collector->refcount_dispatcher, &expected, -1, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
  69. expected = 0;
  70. sleep_usec(1 * USEC_PER_MS);
  71. }
  72. rrd_collector_free(thread_rrd_collector);
  73. thread_rrd_collector = NULL;
  74. }
  75. bool rrd_collector_acquire(struct rrd_collector *rdc) {
  76. int32_t expected = __atomic_load_n(&rdc->refcount, __ATOMIC_RELAXED), wanted = 0;
  77. do {
  78. if(expected < 0 || !rrd_collector_running(rdc))
  79. return false;
  80. wanted = expected + 1;
  81. } while(!__atomic_compare_exchange_n(&rdc->refcount, &expected, wanted, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED));
  82. return true;
  83. }
  84. struct rrd_collector *rrd_collector_acquire_current_thread(void) {
  85. rrd_collector_started();
  86. if(!rrd_collector_acquire(thread_rrd_collector))
  87. internal_fatal(true, "FUNCTIONS: Trying to acquire a the current thread collector, that is currently exiting.");
  88. return thread_rrd_collector;
  89. }
  90. void rrd_collector_release(struct rrd_collector *rdc) {
  91. if(unlikely(!rdc)) return;
  92. int32_t expected = __atomic_load_n(&rdc->refcount, __ATOMIC_RELAXED), wanted = 0;
  93. do {
  94. if(expected < 0)
  95. return;
  96. if(expected == 0) {
  97. internal_fatal(true, "FUNCTIONS: Trying to release a collector that is not acquired.");
  98. return;
  99. }
  100. wanted = expected - 1;
  101. } while(!__atomic_compare_exchange_n(&rdc->refcount, &expected, wanted, false, __ATOMIC_RELEASE, __ATOMIC_RELAXED));
  102. if(wanted == 0)
  103. rrd_collector_free(rdc);
  104. }