stats.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. //===-- stats.h -------------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_STATS_H_
  9. #define SCUDO_STATS_H_
  10. #include "atomic_helpers.h"
  11. #include "list.h"
  12. #include "mutex.h"
  13. #include "thread_annotations.h"
  14. #include <string.h>
  15. namespace scudo {
  16. // Memory allocator statistics
  17. enum StatType { StatAllocated, StatFree, StatMapped, StatCount };
  18. typedef uptr StatCounters[StatCount];
  19. // Per-thread stats, live in per-thread cache. We use atomics so that the
  20. // numbers themselves are consistent. But we don't use atomic_{add|sub} or a
  21. // lock, because those are expensive operations , and we only care for the stats
  22. // to be "somewhat" correct: eg. if we call GlobalStats::get while a thread is
  23. // LocalStats::add'ing, this is OK, we will still get a meaningful number.
  24. class LocalStats {
  25. public:
  26. void init() {
  27. for (uptr I = 0; I < StatCount; I++)
  28. DCHECK_EQ(get(static_cast<StatType>(I)), 0U);
  29. }
  30. void add(StatType I, uptr V) {
  31. V += atomic_load_relaxed(&StatsArray[I]);
  32. atomic_store_relaxed(&StatsArray[I], V);
  33. }
  34. void sub(StatType I, uptr V) {
  35. V = atomic_load_relaxed(&StatsArray[I]) - V;
  36. atomic_store_relaxed(&StatsArray[I], V);
  37. }
  38. void set(StatType I, uptr V) { atomic_store_relaxed(&StatsArray[I], V); }
  39. uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
  40. LocalStats *Next = nullptr;
  41. LocalStats *Prev = nullptr;
  42. private:
  43. atomic_uptr StatsArray[StatCount] = {};
  44. };
  45. // Global stats, used for aggregation and querying.
  46. class GlobalStats : public LocalStats {
  47. public:
  48. void init() { LocalStats::init(); }
  49. void link(LocalStats *S) EXCLUDES(Mutex) {
  50. ScopedLock L(Mutex);
  51. StatsList.push_back(S);
  52. }
  53. void unlink(LocalStats *S) EXCLUDES(Mutex) {
  54. ScopedLock L(Mutex);
  55. StatsList.remove(S);
  56. for (uptr I = 0; I < StatCount; I++)
  57. add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
  58. }
  59. void get(uptr *S) const EXCLUDES(Mutex) {
  60. ScopedLock L(Mutex);
  61. for (uptr I = 0; I < StatCount; I++)
  62. S[I] = LocalStats::get(static_cast<StatType>(I));
  63. for (const auto &Stats : StatsList) {
  64. for (uptr I = 0; I < StatCount; I++)
  65. S[I] += Stats.get(static_cast<StatType>(I));
  66. }
  67. // All stats must be non-negative.
  68. for (uptr I = 0; I < StatCount; I++)
  69. S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
  70. }
  71. void lock() ACQUIRE(Mutex) { Mutex.lock(); }
  72. void unlock() RELEASE(Mutex) { Mutex.unlock(); }
  73. void disable() ACQUIRE(Mutex) { lock(); }
  74. void enable() RELEASE(Mutex) { unlock(); }
  75. private:
  76. mutable HybridMutex Mutex;
  77. DoublyLinkedList<LocalStats> StatsList GUARDED_BY(Mutex);
  78. };
  79. } // namespace scudo
  80. #endif // SCUDO_STATS_H_