atomic_helpers.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. //===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_ATOMIC_H_
  9. #define SCUDO_ATOMIC_H_
  10. #include "internal_defs.h"
  11. namespace scudo {
  12. enum memory_order {
  13. memory_order_relaxed = 0,
  14. memory_order_consume = 1,
  15. memory_order_acquire = 2,
  16. memory_order_release = 3,
  17. memory_order_acq_rel = 4,
  18. memory_order_seq_cst = 5
  19. };
  20. static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
  21. static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
  22. static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
  23. static_assert(memory_order_release == __ATOMIC_RELEASE, "");
  24. static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
  25. static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");
  26. struct atomic_u8 {
  27. typedef u8 Type;
  28. volatile Type ValDoNotUse;
  29. };
  30. struct atomic_u16 {
  31. typedef u16 Type;
  32. volatile Type ValDoNotUse;
  33. };
  34. struct atomic_s32 {
  35. typedef s32 Type;
  36. volatile Type ValDoNotUse;
  37. };
  38. struct atomic_u32 {
  39. typedef u32 Type;
  40. volatile Type ValDoNotUse;
  41. };
  42. struct atomic_u64 {
  43. typedef u64 Type;
  44. // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
  45. alignas(8) volatile Type ValDoNotUse;
  46. };
  47. struct atomic_uptr {
  48. typedef uptr Type;
  49. volatile Type ValDoNotUse;
  50. };
  51. template <typename T>
  52. inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
  53. DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
  54. typename T::Type V;
  55. __atomic_load(&A->ValDoNotUse, &V, MO);
  56. return V;
  57. }
  58. template <typename T>
  59. inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
  60. DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
  61. __atomic_store(&A->ValDoNotUse, &V, MO);
  62. }
  63. inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }
  64. template <typename T>
  65. inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
  66. memory_order MO) {
  67. DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
  68. return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
  69. }
  70. template <typename T>
  71. inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
  72. memory_order MO) {
  73. DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
  74. return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
  75. }
  76. template <typename T>
  77. inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V,
  78. memory_order MO) {
  79. DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
  80. return __atomic_fetch_and(&A->ValDoNotUse, V, MO);
  81. }
  82. template <typename T>
  83. inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V,
  84. memory_order MO) {
  85. DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
  86. return __atomic_fetch_or(&A->ValDoNotUse, V, MO);
  87. }
  88. template <typename T>
  89. inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
  90. memory_order MO) {
  91. DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
  92. typename T::Type R;
  93. __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
  94. return R;
  95. }
  96. template <typename T>
  97. inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
  98. typename T::Type Xchg,
  99. memory_order MO) {
  100. return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
  101. __ATOMIC_RELAXED);
  102. }
  103. // Clutter-reducing helpers.
  104. template <typename T>
  105. inline typename T::Type atomic_load_relaxed(const volatile T *A) {
  106. return atomic_load(A, memory_order_relaxed);
  107. }
  108. template <typename T>
  109. inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
  110. atomic_store(A, V, memory_order_relaxed);
  111. }
  112. template <typename T>
  113. inline typename T::Type atomic_compare_exchange(volatile T *A,
  114. typename T::Type Cmp,
  115. typename T::Type Xchg) {
  116. atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
  117. return Cmp;
  118. }
  119. } // namespace scudo
  120. #endif // SCUDO_ATOMIC_H_