locks.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "../libnetdata.h"
  3. #ifdef NETDATA_TRACE_RWLOCKS
  4. #ifndef NETDATA_TRACE_RWLOCKS_WAIT_TIME_TO_IGNORE_USEC
  5. #define NETDATA_TRACE_RWLOCKS_WAIT_TIME_TO_IGNORE_USEC 10
  6. #endif
  7. #ifndef NETDATA_TRACE_RWLOCKS_HOLD_TIME_TO_IGNORE_USEC
  8. #define NETDATA_TRACE_RWLOCKS_HOLD_TIME_TO_IGNORE_USEC 10000
  9. #endif
  10. #ifndef NETDATA_THREAD_LOCKS_ARRAY_SIZE
  11. #define NETDATA_THREAD_LOCKS_ARRAY_SIZE 10
  12. #endif
  13. #endif // NETDATA_TRACE_RWLOCKS
  14. // ----------------------------------------------------------------------------
  15. // automatic thread cancelability management, based on locks
  16. static __thread int netdata_thread_first_cancelability = 0;
  17. static __thread int netdata_thread_nested_disables = 0;
  18. static __thread size_t netdata_locks_acquired_rwlocks = 0;
  19. static __thread size_t netdata_locks_acquired_mutexes = 0;
  20. inline void netdata_thread_disable_cancelability(void) {
  21. if(!netdata_thread_nested_disables) {
  22. int old;
  23. int ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old);
  24. if(ret != 0)
  25. netdata_log_error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d",
  26. netdata_thread_tag(), ret);
  27. netdata_thread_first_cancelability = old;
  28. }
  29. netdata_thread_nested_disables++;
  30. }
  31. inline void netdata_thread_enable_cancelability(void) {
  32. if(unlikely(netdata_thread_nested_disables < 1)) {
  33. internal_fatal(true, "THREAD_CANCELABILITY: trying to enable cancelability, but it was not not disabled");
  34. netdata_log_error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): invalid thread cancelability count %d "
  35. "on thread %s - results will be undefined - please report this!",
  36. netdata_thread_nested_disables, netdata_thread_tag());
  37. netdata_thread_nested_disables = 1;
  38. }
  39. if(netdata_thread_nested_disables == 1) {
  40. int old = 1;
  41. int ret = pthread_setcancelstate(netdata_thread_first_cancelability, &old);
  42. if(ret != 0)
  43. netdata_log_error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d",
  44. netdata_thread_tag(),
  45. ret);
  46. else {
  47. if(old != PTHREAD_CANCEL_DISABLE) {
  48. internal_fatal(true, "THREAD_CANCELABILITY: invalid old state cancelability");
  49. netdata_log_error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): old thread cancelability "
  50. "on thread %s was changed, expected DISABLED (%d), found %s (%d) - please report this!",
  51. netdata_thread_tag(), PTHREAD_CANCEL_DISABLE,
  52. (old == PTHREAD_CANCEL_ENABLE) ? "ENABLED" : "UNKNOWN",
  53. old);
  54. }
  55. }
  56. }
  57. netdata_thread_nested_disables--;
  58. }
  59. // ----------------------------------------------------------------------------
  60. // mutex
  61. int __netdata_mutex_init(netdata_mutex_t *mutex) {
  62. int ret = pthread_mutex_init(mutex, NULL);
  63. if(unlikely(ret != 0))
  64. netdata_log_error("MUTEX_LOCK: failed to initialize (code %d).", ret);
  65. return ret;
  66. }
  67. int __netdata_mutex_destroy(netdata_mutex_t *mutex) {
  68. int ret = pthread_mutex_destroy(mutex);
  69. if(unlikely(ret != 0))
  70. netdata_log_error("MUTEX_LOCK: failed to destroy (code %d).", ret);
  71. return ret;
  72. }
  73. int __netdata_mutex_lock(netdata_mutex_t *mutex) {
  74. netdata_thread_disable_cancelability();
  75. int ret = pthread_mutex_lock(mutex);
  76. if(unlikely(ret != 0)) {
  77. netdata_thread_enable_cancelability();
  78. netdata_log_error("MUTEX_LOCK: failed to get lock (code %d)", ret);
  79. }
  80. else
  81. netdata_locks_acquired_mutexes++;
  82. return ret;
  83. }
  84. int __netdata_mutex_trylock(netdata_mutex_t *mutex) {
  85. netdata_thread_disable_cancelability();
  86. int ret = pthread_mutex_trylock(mutex);
  87. if(ret != 0)
  88. netdata_thread_enable_cancelability();
  89. else
  90. netdata_locks_acquired_mutexes++;
  91. return ret;
  92. }
  93. int __netdata_mutex_unlock(netdata_mutex_t *mutex) {
  94. int ret = pthread_mutex_unlock(mutex);
  95. if(unlikely(ret != 0))
  96. netdata_log_error("MUTEX_LOCK: failed to unlock (code %d).", ret);
  97. else {
  98. netdata_locks_acquired_mutexes--;
  99. netdata_thread_enable_cancelability();
  100. }
  101. return ret;
  102. }
  103. #ifdef NETDATA_TRACE_RWLOCKS
  104. int netdata_mutex_init_debug(const char *file __maybe_unused, const char *function __maybe_unused,
  105. const unsigned long line __maybe_unused, netdata_mutex_t *mutex) {
  106. netdata_log_debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(%p) from %lu@%s, %s()", mutex, line, file, function);
  107. int ret = __netdata_mutex_init(mutex);
  108. netdata_log_debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(%p) = %d, from %lu@%s, %s()", mutex, ret, line, file, function);
  109. return ret;
  110. }
  111. int netdata_mutex_destroy_debug(const char *file __maybe_unused, const char *function __maybe_unused,
  112. const unsigned long line __maybe_unused, netdata_mutex_t *mutex) {
  113. netdata_log_debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_destroy(%p) from %lu@%s, %s()", mutex, line, file, function);
  114. int ret = __netdata_mutex_destroy(mutex);
  115. netdata_log_debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_destroy(%p) = %d, from %lu@%s, %s()", mutex, ret, line, file, function);
  116. return ret;
  117. }
  118. int netdata_mutex_lock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
  119. const unsigned long line __maybe_unused, netdata_mutex_t *mutex) {
  120. netdata_log_debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(%p) from %lu@%s, %s()", mutex, line, file, function);
  121. usec_t start_s = now_monotonic_high_precision_usec();
  122. int ret = __netdata_mutex_lock(mutex);
  123. usec_t end_s = now_monotonic_high_precision_usec();
  124. // remove compiler unused variables warning
  125. (void)start_s;
  126. (void)end_s;
  127. netdata_log_debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, end_s - start_s, line, file, function);
  128. return ret;
  129. }
  130. int netdata_mutex_trylock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
  131. const unsigned long line __maybe_unused, netdata_mutex_t *mutex) {
  132. netdata_log_debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(%p) from %lu@%s, %s()", mutex, line, file, function);
  133. usec_t start_s = now_monotonic_high_precision_usec();
  134. int ret = __netdata_mutex_trylock(mutex);
  135. usec_t end_s = now_monotonic_high_precision_usec();
  136. // remove compiler unused variables warning
  137. (void)start_s;
  138. (void)end_s;
  139. netdata_log_debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, end_s - start_s, line, file, function);
  140. return ret;
  141. }
  142. int netdata_mutex_unlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
  143. const unsigned long line __maybe_unused, netdata_mutex_t *mutex) {
  144. netdata_log_debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(%p) from %lu@%s, %s()", mutex, line, file, function);
  145. usec_t start_s = now_monotonic_high_precision_usec();
  146. int ret = __netdata_mutex_unlock(mutex);
  147. usec_t end_s = now_monotonic_high_precision_usec();
  148. // remove compiler unused variables warning
  149. (void)start_s;
  150. (void)end_s;
  151. netdata_log_debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, end_s - start_s, line, file, function);
  152. return ret;
  153. }
  154. #endif // NETDATA_TRACE_RWLOCKS
  155. // ----------------------------------------------------------------------------
  156. // rwlock
  157. int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock) {
  158. int ret = pthread_rwlock_destroy(&rwlock->rwlock_t);
  159. if(unlikely(ret != 0))
  160. netdata_log_error("RW_LOCK: failed to destroy lock (code %d)", ret);
  161. return ret;
  162. }
  163. int __netdata_rwlock_init(netdata_rwlock_t *rwlock) {
  164. int ret = pthread_rwlock_init(&rwlock->rwlock_t, NULL);
  165. if(unlikely(ret != 0))
  166. netdata_log_error("RW_LOCK: failed to initialize lock (code %d)", ret);
  167. return ret;
  168. }
  169. int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock) {
  170. netdata_thread_disable_cancelability();
  171. int ret = pthread_rwlock_rdlock(&rwlock->rwlock_t);
  172. if(unlikely(ret != 0)) {
  173. netdata_thread_enable_cancelability();
  174. netdata_log_error("RW_LOCK: failed to obtain read lock (code %d)", ret);
  175. }
  176. else
  177. netdata_locks_acquired_rwlocks++;
  178. return ret;
  179. }
  180. int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock) {
  181. netdata_thread_disable_cancelability();
  182. int ret = pthread_rwlock_wrlock(&rwlock->rwlock_t);
  183. if(unlikely(ret != 0)) {
  184. netdata_log_error("RW_LOCK: failed to obtain write lock (code %d)", ret);
  185. netdata_thread_enable_cancelability();
  186. }
  187. else
  188. netdata_locks_acquired_rwlocks++;
  189. return ret;
  190. }
  191. int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock) {
  192. int ret = pthread_rwlock_unlock(&rwlock->rwlock_t);
  193. if(unlikely(ret != 0))
  194. netdata_log_error("RW_LOCK: failed to release lock (code %d)", ret);
  195. else {
  196. netdata_thread_enable_cancelability();
  197. netdata_locks_acquired_rwlocks--;
  198. }
  199. return ret;
  200. }
  201. int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock) {
  202. netdata_thread_disable_cancelability();
  203. int ret = pthread_rwlock_tryrdlock(&rwlock->rwlock_t);
  204. if(ret != 0)
  205. netdata_thread_enable_cancelability();
  206. else
  207. netdata_locks_acquired_rwlocks++;
  208. return ret;
  209. }
  210. int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock) {
  211. netdata_thread_disable_cancelability();
  212. int ret = pthread_rwlock_trywrlock(&rwlock->rwlock_t);
  213. if(ret != 0)
  214. netdata_thread_enable_cancelability();
  215. else
  216. netdata_locks_acquired_rwlocks++;
  217. return ret;
  218. }
  219. // ----------------------------------------------------------------------------
  220. // spinlock implementation
  221. // https://www.youtube.com/watch?v=rmGJc9PXpuE&t=41s
  222. void spinlock_init(SPINLOCK *spinlock) {
  223. memset(spinlock, 0, sizeof(SPINLOCK));
  224. }
  225. void spinlock_lock(SPINLOCK *spinlock) {
  226. static const struct timespec ns = { .tv_sec = 0, .tv_nsec = 1 };
  227. #ifdef NETDATA_INTERNAL_CHECKS
  228. size_t spins = 0;
  229. #endif
  230. netdata_thread_disable_cancelability();
  231. for(int i = 1;
  232. __atomic_load_n(&spinlock->locked, __ATOMIC_RELAXED) ||
  233. __atomic_test_and_set(&spinlock->locked, __ATOMIC_ACQUIRE)
  234. ; i++
  235. ) {
  236. #ifdef NETDATA_INTERNAL_CHECKS
  237. spins++;
  238. #endif
  239. if(unlikely(i == 8)) {
  240. i = 0;
  241. nanosleep(&ns, NULL);
  242. }
  243. }
  244. // we have the lock
  245. #ifdef NETDATA_INTERNAL_CHECKS
  246. spinlock->spins += spins;
  247. spinlock->locker_pid = gettid();
  248. #endif
  249. }
  250. void spinlock_unlock(SPINLOCK *spinlock) {
  251. #ifdef NETDATA_INTERNAL_CHECKS
  252. spinlock->locker_pid = 0;
  253. #endif
  254. __atomic_clear(&spinlock->locked, __ATOMIC_RELEASE);
  255. netdata_thread_enable_cancelability();
  256. }
  257. bool spinlock_trylock(SPINLOCK *spinlock) {
  258. netdata_thread_disable_cancelability();
  259. if(!__atomic_load_n(&spinlock->locked, __ATOMIC_RELAXED) &&
  260. !__atomic_test_and_set(&spinlock->locked, __ATOMIC_ACQUIRE))
  261. // we got the lock
  262. return true;
  263. // we didn't get the lock
  264. netdata_thread_enable_cancelability();
  265. return false;
  266. }
  267. // ----------------------------------------------------------------------------
  268. // rw_spinlock implementation
  269. void rw_spinlock_init(RW_SPINLOCK *rw_spinlock) {
  270. rw_spinlock->readers = 0;
  271. spinlock_init(&rw_spinlock->spinlock);
  272. }
  273. void rw_spinlock_read_lock(RW_SPINLOCK *rw_spinlock) {
  274. netdata_thread_disable_cancelability();
  275. spinlock_lock(&rw_spinlock->spinlock);
  276. __atomic_add_fetch(&rw_spinlock->readers, 1, __ATOMIC_RELAXED);
  277. spinlock_unlock(&rw_spinlock->spinlock);
  278. }
  279. void rw_spinlock_read_unlock(RW_SPINLOCK *rw_spinlock) {
  280. #ifndef NETDATA_INTERNAL_CHECKS
  281. __atomic_sub_fetch(&rw_spinlock->readers, 1, __ATOMIC_RELAXED);
  282. #else
  283. int32_t x = __atomic_sub_fetch(&rw_spinlock->readers, 1, __ATOMIC_RELAXED);
  284. if(x < 0)
  285. fatal("RW_SPINLOCK: readers is negative %d", x);
  286. #endif
  287. netdata_thread_enable_cancelability();
  288. }
  289. void rw_spinlock_write_lock(RW_SPINLOCK *rw_spinlock) {
  290. static const struct timespec ns = { .tv_sec = 0, .tv_nsec = 1 };
  291. size_t spins = 0;
  292. while(1) {
  293. spins++;
  294. spinlock_lock(&rw_spinlock->spinlock);
  295. if(__atomic_load_n(&rw_spinlock->readers, __ATOMIC_RELAXED) == 0)
  296. break;
  297. // Busy wait until all readers have released their locks.
  298. spinlock_unlock(&rw_spinlock->spinlock);
  299. nanosleep(&ns, NULL);
  300. }
  301. (void)spins;
  302. }
  303. void rw_spinlock_write_unlock(RW_SPINLOCK *rw_spinlock) {
  304. spinlock_unlock(&rw_spinlock->spinlock);
  305. }
  306. bool rw_spinlock_tryread_lock(RW_SPINLOCK *rw_spinlock) {
  307. if(spinlock_trylock(&rw_spinlock->spinlock)) {
  308. __atomic_add_fetch(&rw_spinlock->readers, 1, __ATOMIC_RELAXED);
  309. spinlock_unlock(&rw_spinlock->spinlock);
  310. netdata_thread_disable_cancelability();
  311. return true;
  312. }
  313. return false;
  314. }
  315. bool rw_spinlock_trywrite_lock(RW_SPINLOCK *rw_spinlock) {
  316. if(spinlock_trylock(&rw_spinlock->spinlock)) {
  317. if (__atomic_load_n(&rw_spinlock->readers, __ATOMIC_RELAXED) == 0) {
  318. // No readers, we've successfully acquired the write lock
  319. return true;
  320. }
  321. else {
  322. // There are readers, unlock the spinlock and return false
  323. spinlock_unlock(&rw_spinlock->spinlock);
  324. }
  325. }
  326. return false;
  327. }
  328. #ifdef NETDATA_TRACE_RWLOCKS
  329. // ----------------------------------------------------------------------------
  330. // lockers list
  331. static netdata_rwlock_locker *find_rwlock_locker(const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
  332. pid_t pid = gettid();
  333. netdata_rwlock_locker *locker = NULL;
  334. __netdata_mutex_lock(&rwlock->lockers_mutex);
  335. Pvoid_t *PValue = JudyLGet(rwlock->lockers_pid_JudyL, pid, PJE0);
  336. if(PValue && *PValue)
  337. locker = *PValue;
  338. __netdata_mutex_unlock(&rwlock->lockers_mutex);
  339. return locker;
  340. }
  341. static netdata_rwlock_locker *add_rwlock_locker(const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock, LOCKER_REQUEST lock_type) {
  342. netdata_rwlock_locker *locker;
  343. locker = find_rwlock_locker(file, function, line, rwlock);
  344. if(locker) {
  345. locker->lock |= lock_type;
  346. locker->refcount++;
  347. }
  348. else {
  349. locker = mallocz(sizeof(netdata_rwlock_locker));
  350. locker->pid = gettid();
  351. locker->tag = netdata_thread_tag();
  352. locker->refcount = 1;
  353. locker->lock = lock_type;
  354. locker->got_it = false;
  355. locker->file = file;
  356. locker->function = function;
  357. locker->line = line;
  358. __netdata_mutex_lock(&rwlock->lockers_mutex);
  359. DOUBLE_LINKED_LIST_APPEND_UNSAFE(rwlock->lockers, locker, prev, next);
  360. Pvoid_t *PValue = JudyLIns(&rwlock->lockers_pid_JudyL, locker->pid, PJE0);
  361. *PValue = locker;
  362. if (lock_type == RWLOCK_REQUEST_READ || lock_type == RWLOCK_REQUEST_TRYREAD) rwlock->readers++;
  363. if (lock_type == RWLOCK_REQUEST_WRITE || lock_type == RWLOCK_REQUEST_TRYWRITE) rwlock->writers++;
  364. __netdata_mutex_unlock(&rwlock->lockers_mutex);
  365. }
  366. return locker;
  367. }
  368. static void remove_rwlock_locker(const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock, netdata_rwlock_locker *locker) {
  369. __netdata_mutex_lock(&rwlock->lockers_mutex);
  370. locker->refcount--;
  371. if(!locker->refcount) {
  372. DOUBLE_LINKED_LIST_REMOVE_UNSAFE(rwlock->lockers, locker, prev, next);
  373. JudyLDel(&rwlock->lockers_pid_JudyL, locker->pid, PJE0);
  374. if (locker->lock == RWLOCK_REQUEST_READ || locker->lock == RWLOCK_REQUEST_TRYREAD) rwlock->readers--;
  375. else if (locker->lock == RWLOCK_REQUEST_WRITE || locker->lock == RWLOCK_REQUEST_TRYWRITE) rwlock->writers--;
  376. freez(locker);
  377. }
  378. __netdata_mutex_unlock(&rwlock->lockers_mutex);
  379. }
  380. // ----------------------------------------------------------------------------
  381. // debug versions of rwlock
  382. int netdata_rwlock_destroy_debug(const char *file __maybe_unused, const char *function __maybe_unused,
  383. const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
  384. int ret = __netdata_rwlock_destroy(rwlock);
  385. if(!ret) {
  386. while (rwlock->lockers)
  387. remove_rwlock_locker(file, function, line, rwlock, rwlock->lockers);
  388. }
  389. return ret;
  390. }
  391. int netdata_rwlock_init_debug(const char *file __maybe_unused, const char *function __maybe_unused,
  392. const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
  393. int ret = __netdata_rwlock_init(rwlock);
  394. if(!ret) {
  395. __netdata_mutex_init(&rwlock->lockers_mutex);
  396. rwlock->lockers_pid_JudyL = NULL;
  397. rwlock->lockers = NULL;
  398. rwlock->readers = 0;
  399. rwlock->writers = 0;
  400. }
  401. return ret;
  402. }
  403. int netdata_rwlock_rdlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
  404. const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
  405. netdata_rwlock_locker *locker = add_rwlock_locker(file, function, line, rwlock, RWLOCK_REQUEST_READ);
  406. int ret = __netdata_rwlock_rdlock(rwlock);
  407. if(!ret)
  408. locker->got_it = true;
  409. else
  410. remove_rwlock_locker(file, function, line, rwlock, locker);
  411. return ret;
  412. }
  413. int netdata_rwlock_wrlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
  414. const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
  415. netdata_rwlock_locker *locker = add_rwlock_locker(file, function, line, rwlock, RWLOCK_REQUEST_WRITE);
  416. int ret = __netdata_rwlock_wrlock(rwlock);
  417. if(!ret)
  418. locker->got_it = true;
  419. else
  420. remove_rwlock_locker(file, function, line, rwlock, locker);
  421. return ret;
  422. }
  423. int netdata_rwlock_unlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
  424. const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
  425. netdata_rwlock_locker *locker = find_rwlock_locker(file, function, line, rwlock);
  426. if(unlikely(!locker))
  427. fatal("UNLOCK WITHOUT LOCK");
  428. int ret = __netdata_rwlock_unlock(rwlock);
  429. if(likely(!ret))
  430. remove_rwlock_locker(file, function, line, rwlock, locker);
  431. return ret;
  432. }
  433. int netdata_rwlock_tryrdlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
  434. const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
  435. netdata_rwlock_locker *locker = add_rwlock_locker(file, function, line, rwlock, RWLOCK_REQUEST_TRYREAD);
  436. int ret = __netdata_rwlock_tryrdlock(rwlock);
  437. if(!ret)
  438. locker->got_it = true;
  439. else
  440. remove_rwlock_locker(file, function, line, rwlock, locker);
  441. return ret;
  442. }
  443. int netdata_rwlock_trywrlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
  444. const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
  445. netdata_rwlock_locker *locker = add_rwlock_locker(file, function, line, rwlock, RWLOCK_REQUEST_TRYWRITE);
  446. int ret = __netdata_rwlock_trywrlock(rwlock);
  447. if(!ret)
  448. locker->got_it = true;
  449. else
  450. remove_rwlock_locker(file, function, line, rwlock, locker);
  451. return ret;
  452. }
  453. #endif // NETDATA_TRACE_RWLOCKS