aligned_page_pool.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. #include "aligned_page_pool.h"
  2. #include "util/string/builder.h"
  3. #include <util/generic/yexception.h>
  4. #include <util/string/cast.h>
  5. #include <util/system/align.h>
  6. #include <util/system/compiler.h>
  7. #include <util/system/info.h>
  8. #include <util/system/error.h>
  9. #include <util/thread/lfstack.h>
  10. #if defined(_win_)
  11. #include <util/system/winint.h>
  12. #elif defined(_unix_)
  13. #include <sys/types.h>
  14. #include <sys/mman.h>
  15. #endif
  16. namespace NKikimr {
  17. static ui64 SYS_PAGE_SIZE = NSystemInfo::GetPageSize();
  18. constexpr ui32 MidLevels = 10;
  19. constexpr ui32 MaxMidSize = (1u << MidLevels) * TAlignedPagePool::POOL_PAGE_SIZE;
  20. static_assert(MaxMidSize == 64 * 1024 * 1024, "Upper memory block 64 Mb");
  21. namespace {
  22. template<typename T, bool SysAlign>
  23. class TGlobalPools;
  24. template<typename T, bool SysAlign>
  25. class TGlobalPagePool {
  26. friend class TGlobalPools<T, SysAlign>;
  27. public:
  28. TGlobalPagePool(size_t pageSize)
  29. : PageSize(pageSize)
  30. {}
  31. ~TGlobalPagePool() {
  32. void* addr = nullptr;
  33. while (Pages.Dequeue(&addr)) {
  34. FreePage(addr);
  35. }
  36. }
  37. void* GetPage() {
  38. void *page = nullptr;
  39. if (Pages.Dequeue(&page)) {
  40. --Count;
  41. return page;
  42. }
  43. return nullptr;
  44. }
  45. ui64 GetPageCount() const {
  46. return Count.load(std::memory_order_relaxed);
  47. }
  48. size_t GetPageSize() const {
  49. return PageSize;
  50. }
  51. size_t GetSize() const {
  52. return GetPageCount() * GetPageSize();
  53. }
  54. private:
  55. size_t PushPage(void* addr) {
  56. #ifdef PROFILE_MEMORY_ALLOCATIONS
  57. FreePage(addr);
  58. return GetPageSize();
  59. #else
  60. ++Count;
  61. Pages.Enqueue(addr);
  62. return 0;
  63. #endif
  64. }
  65. void FreePage(void* addr) {
  66. auto res = T::Munmap(addr, PageSize);
  67. Y_DEBUG_ABORT_UNLESS(0 == res, "Munmap failed: %s", LastSystemErrorText());
  68. }
  69. private:
  70. const size_t PageSize;
  71. std::atomic<ui64> Count = 0;
  72. TLockFreeStack<void*> Pages;
  73. };
  74. template<typename T, bool SysAlign>
  75. class TGlobalPools {
  76. public:
  77. static TGlobalPools<T, SysAlign>& Instance() {
  78. return *Singleton<TGlobalPools<T, SysAlign>>();
  79. }
  80. TGlobalPagePool<T, SysAlign>& Get(ui32 index) {
  81. return *Pools[index];
  82. }
  83. const TGlobalPagePool<T, SysAlign>& Get(ui32 index) const {
  84. return *Pools[index];
  85. }
  86. TGlobalPools()
  87. {
  88. Reset();
  89. }
  90. void* DoMmap(size_t size) {
  91. void* res = T::Mmap(size);
  92. TotalMmappedBytes += size;
  93. return res;
  94. }
  95. void DoCleanupFreeList(ui64 targetSize) {
  96. for(ui32 level = 0; level <= MidLevels; ++level) {
  97. auto& p = Get(level);
  98. const size_t pageSize = p.GetPageSize();
  99. while(p.GetSize() >= targetSize) {
  100. void* page = p.GetPage();
  101. if (!page)
  102. break;
  103. p.FreePage(page);
  104. i64 prev = TotalMmappedBytes.fetch_sub(pageSize);
  105. Y_DEBUG_ABORT_UNLESS(prev >= 0);
  106. }
  107. }
  108. }
  109. void PushPage(size_t level, void* addr) {
  110. auto& pool = Get(level);
  111. size_t free = pool.PushPage(addr);
  112. if (Y_UNLIKELY(free > 0)) {
  113. i64 prev = TotalMmappedBytes.fetch_sub(free);
  114. Y_DEBUG_ABORT_UNLESS(prev >= 0);
  115. }
  116. }
  117. void DoMunmap(void* addr, size_t size) {
  118. if (Y_UNLIKELY(0 != T::Munmap(addr, size))) {
  119. ythrow yexception() << "Munmap(0x"
  120. << IntToString<16>(reinterpret_cast<uintptr_t>(addr))
  121. << ", " << size << ") failed: " << LastSystemErrorText();
  122. }
  123. i64 prev = TotalMmappedBytes.fetch_sub(size);
  124. Y_DEBUG_ABORT_UNLESS(prev >= 0);
  125. }
  126. i64 GetTotalMmappedBytes() const {
  127. return TotalMmappedBytes.load();
  128. }
  129. i64 GetTotalFreeListBytes() const {
  130. i64 bytes = 0;
  131. for (ui32 i = 0; i <= MidLevels; ++i) {
  132. bytes += Get(i).GetSize();
  133. }
  134. return bytes;
  135. }
  136. void Reset()
  137. {
  138. Pools.clear();
  139. Pools.reserve(MidLevels + 1);
  140. for (ui32 i = 0; i <= MidLevels; ++i) {
  141. Pools.emplace_back(MakeHolder<TGlobalPagePool<T, SysAlign>>(TAlignedPagePool::POOL_PAGE_SIZE << i));
  142. }
  143. }
  144. private:
  145. TVector<THolder<TGlobalPagePool<T, SysAlign>>> Pools;
  146. std::atomic<i64> TotalMmappedBytes{0};
  147. };
  148. } // unnamed
  149. #ifdef _win_
  150. #define MAP_FAILED (void*)(-1)
  151. inline void* TSystemMmap::Mmap(size_t size)
  152. {
  153. if (auto res = ::VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE)) {
  154. return res;
  155. } else {
  156. return MAP_FAILED;
  157. }
  158. }
  159. inline int TSystemMmap::Munmap(void* addr, size_t size)
  160. {
  161. Y_ABORT_UNLESS(AlignUp(addr, SYS_PAGE_SIZE) == addr, "Got unaligned address");
  162. Y_ABORT_UNLESS(AlignUp(size, SYS_PAGE_SIZE) == size, "Got unaligned size");
  163. return !::VirtualFree(addr, size, MEM_DECOMMIT);
  164. }
  165. #else
  166. inline void* TSystemMmap::Mmap(size_t size)
  167. {
  168. return ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0);
  169. }
  170. inline int TSystemMmap::Munmap(void* addr, size_t size)
  171. {
  172. Y_DEBUG_ABORT_UNLESS(AlignUp(addr, SYS_PAGE_SIZE) == addr, "Got unaligned address");
  173. Y_DEBUG_ABORT_UNLESS(AlignUp(size, SYS_PAGE_SIZE) == size, "Got unaligned size");
  174. return ::munmap(addr, size);
  175. }
  176. #endif
  177. std::function<void(size_t size)> TFakeAlignedMmap::OnMmap = {};
  178. std::function<void(void* addr, size_t size)> TFakeAlignedMmap::OnMunmap = {};
  179. void* TFakeAlignedMmap::Mmap(size_t size)
  180. {
  181. if (OnMmap) {
  182. OnMmap(size);
  183. }
  184. return reinterpret_cast<void*>(TAlignedPagePool::POOL_PAGE_SIZE);
  185. }
  186. int TFakeAlignedMmap::Munmap(void* addr, size_t size)
  187. {
  188. if (OnMunmap) {
  189. OnMunmap(addr, size);
  190. }
  191. return 0;
  192. }
  193. std::function<void(size_t size)> TFakeUnalignedMmap::OnMmap = {};
  194. std::function<void(void* addr, size_t size)> TFakeUnalignedMmap::OnMunmap = {};
  195. void* TFakeUnalignedMmap::Mmap(size_t size)
  196. {
  197. if (OnMmap) {
  198. OnMmap(size);
  199. }
  200. return reinterpret_cast<void*>(TAlignedPagePool::POOL_PAGE_SIZE+1);
  201. }
  202. int TFakeUnalignedMmap::Munmap(void* addr, size_t size)
  203. {
  204. if (OnMunmap) {
  205. OnMunmap(addr, size);
  206. }
  207. return 0;
  208. }
  209. TAlignedPagePoolCounters::TAlignedPagePoolCounters(::NMonitoring::TDynamicCounterPtr countersRoot, const TString& name) {
  210. if (!countersRoot || name.empty())
  211. return;
  212. ::NMonitoring::TDynamicCounterPtr subGroup = countersRoot->GetSubgroup("counters", "utils")->GetSubgroup("subsystem", "mkqlalloc");
  213. TotalBytesAllocatedCntr = subGroup->GetCounter(name + "/TotalBytesAllocated");
  214. AllocationsCntr = subGroup->GetCounter(name + "/Allocations", true);
  215. PoolsCntr = subGroup->GetCounter(name + "/Pools", true);
  216. LostPagesBytesFreeCntr = subGroup->GetCounter(name + "/LostPagesBytesFreed", true);
  217. }
  218. template<typename T>
  219. TAlignedPagePoolImpl<T>::~TAlignedPagePoolImpl() {
  220. if (CheckLostMem && !UncaughtException()) {
  221. Y_DEBUG_ABORT_UNLESS(TotalAllocated == FreePages.size() * POOL_PAGE_SIZE,
  222. "memory leak; Expected %ld, actual %ld (%ld page(s), %ld offloaded); allocator created at: %s",
  223. TotalAllocated, FreePages.size() * POOL_PAGE_SIZE,
  224. FreePages.size(), OffloadedActiveBytes, GetDebugInfo().data());
  225. Y_DEBUG_ABORT_UNLESS(OffloadedActiveBytes == 0, "offloaded: %ld", OffloadedActiveBytes);
  226. }
  227. size_t activeBlocksSize = 0;
  228. for (auto it = ActiveBlocks.cbegin(); ActiveBlocks.cend() != it; ActiveBlocks.erase(it++)) {
  229. activeBlocksSize += it->second;
  230. #ifdef PROFILE_MEMORY_ALLOCATIONS
  231. ReturnBlock(it->first, it->second);
  232. #else
  233. Free(it->first, it->second);
  234. #endif
  235. }
  236. if (activeBlocksSize > 0 || FreePages.size() != AllPages.size() || OffloadedActiveBytes) {
  237. if (Counters.LostPagesBytesFreeCntr) {
  238. (*Counters.LostPagesBytesFreeCntr) += OffloadedActiveBytes + activeBlocksSize + (AllPages.size() - FreePages.size()) * POOL_PAGE_SIZE;
  239. }
  240. }
  241. Y_DEBUG_ABORT_UNLESS(TotalAllocated == AllPages.size() * POOL_PAGE_SIZE + OffloadedActiveBytes,
  242. "Expected %ld, actual %ld (%ld page(s))", TotalAllocated,
  243. AllPages.size() * POOL_PAGE_SIZE + OffloadedActiveBytes, AllPages.size());
  244. for (auto &ptr : AllPages) {
  245. TGlobalPools<T, false>::Instance().PushPage(0, ptr);
  246. }
  247. if (Counters.TotalBytesAllocatedCntr) {
  248. (*Counters.TotalBytesAllocatedCntr) -= TotalAllocated;
  249. }
  250. if (Counters.PoolsCntr) {
  251. --(*Counters.PoolsCntr);
  252. }
  253. TotalAllocated = 0;
  254. }
  255. template<typename T>
  256. void TAlignedPagePoolImpl<T>::ReleaseFreePages() {
  257. TotalAllocated -= FreePages.size() * POOL_PAGE_SIZE;
  258. if (Counters.TotalBytesAllocatedCntr) {
  259. (*Counters.TotalBytesAllocatedCntr) -= FreePages.size() * POOL_PAGE_SIZE;
  260. }
  261. for (; !FreePages.empty(); FreePages.pop()) {
  262. AllPages.erase(FreePages.top());
  263. TGlobalPools<T, false>::Instance().PushPage(0, FreePages.top());
  264. }
  265. }
  266. template<typename T>
  267. void TAlignedPagePoolImpl<T>::OffloadAlloc(ui64 size) {
  268. if (Limit && TotalAllocated + size > Limit && !TryIncreaseLimit(TotalAllocated + size)) {
  269. throw TMemoryLimitExceededException();
  270. }
  271. if (AllocNotifyCallback) {
  272. if (AllocNotifyCurrentBytes > AllocNotifyBytes) {
  273. AllocNotifyCallback();
  274. AllocNotifyCurrentBytes = 0;
  275. }
  276. }
  277. ++OffloadedAllocCount;
  278. OffloadedBytes += size;
  279. OffloadedActiveBytes += size;
  280. TotalAllocated += size;
  281. if (AllocNotifyCallback) {
  282. AllocNotifyCurrentBytes += size;
  283. }
  284. if (Counters.TotalBytesAllocatedCntr) {
  285. (*Counters.TotalBytesAllocatedCntr) += size;
  286. }
  287. if (Counters.AllocationsCntr) {
  288. ++(*Counters.AllocationsCntr);
  289. }
  290. UpdatePeaks();
  291. }
  292. template<typename T>
  293. void TAlignedPagePoolImpl<T>::OffloadFree(ui64 size) noexcept {
  294. TotalAllocated -= size;
  295. OffloadedActiveBytes -= size;
  296. if (Counters.TotalBytesAllocatedCntr) {
  297. (*Counters.TotalBytesAllocatedCntr) -= size;
  298. }
  299. }
  300. template<typename T>
  301. void* TAlignedPagePoolImpl<T>::GetPage() {
  302. ++PageAllocCount;
  303. if (!FreePages.empty()) {
  304. ++PageHitCount;
  305. const auto res = FreePages.top();
  306. FreePages.pop();
  307. return res;
  308. }
  309. if (Limit && TotalAllocated + POOL_PAGE_SIZE > Limit && !TryIncreaseLimit(TotalAllocated + POOL_PAGE_SIZE)) {
  310. throw TMemoryLimitExceededException();
  311. }
  312. #ifndef PROFILE_MEMORY_ALLOCATIONS
  313. if (const auto ptr = TGlobalPools<T, false>::Instance().Get(0).GetPage()) {
  314. TotalAllocated += POOL_PAGE_SIZE;
  315. if (AllocNotifyCallback) {
  316. AllocNotifyCurrentBytes += POOL_PAGE_SIZE;
  317. }
  318. if (Counters.TotalBytesAllocatedCntr) {
  319. (*Counters.TotalBytesAllocatedCntr) += POOL_PAGE_SIZE;
  320. }
  321. ++PageGlobalHitCount;
  322. AllPages.emplace(ptr);
  323. UpdatePeaks();
  324. return ptr;
  325. }
  326. ++PageMissCount;
  327. #endif
  328. #ifdef PROFILE_MEMORY_ALLOCATIONS
  329. const auto res = GetBlock(POOL_PAGE_SIZE);
  330. #else
  331. const auto res = Alloc(POOL_PAGE_SIZE);
  332. AllPages.emplace(res);
  333. #endif
  334. return res;
  335. }
  336. template<typename T>
  337. void TAlignedPagePoolImpl<T>::ReturnPage(void* addr) noexcept {
  338. #ifdef PROFILE_MEMORY_ALLOCATIONS
  339. ReturnBlock(addr, POOL_PAGE_SIZE);
  340. #else
  341. Y_DEBUG_ABORT_UNLESS(AllPages.find(addr) != AllPages.end());
  342. FreePages.emplace(addr);
  343. #endif
  344. }
  345. template<typename T>
  346. void* TAlignedPagePoolImpl<T>::GetBlock(size_t size) {
  347. Y_DEBUG_ABORT_UNLESS(size >= POOL_PAGE_SIZE);
  348. #ifdef PROFILE_MEMORY_ALLOCATIONS
  349. OffloadAlloc(size);
  350. auto ret = malloc(size);
  351. if (!ret) {
  352. throw TMemoryLimitExceededException();
  353. }
  354. return ret;
  355. #else
  356. if (size == POOL_PAGE_SIZE) {
  357. return GetPage();
  358. } else {
  359. const auto ptr = Alloc(size);
  360. Y_DEBUG_ABORT_UNLESS(ActiveBlocks.emplace(ptr, size).second);
  361. return ptr;
  362. }
  363. #endif
  364. }
  365. template<typename T>
  366. void TAlignedPagePoolImpl<T>::ReturnBlock(void* ptr, size_t size) noexcept {
  367. Y_DEBUG_ABORT_UNLESS(size >= POOL_PAGE_SIZE);
  368. #ifdef PROFILE_MEMORY_ALLOCATIONS
  369. OffloadFree(size);
  370. free(ptr);
  371. #else
  372. if (size == POOL_PAGE_SIZE) {
  373. ReturnPage(ptr);
  374. } else {
  375. Free(ptr, size);
  376. Y_DEBUG_ABORT_UNLESS(ActiveBlocks.erase(ptr));
  377. }
  378. #endif
  379. UpdateMemoryYellowZone();
  380. }
  381. template<typename T>
  382. void* TAlignedPagePoolImpl<T>::Alloc(size_t size) {
  383. void* res = nullptr;
  384. size = AlignUp(size, SYS_PAGE_SIZE);
  385. if (Limit && TotalAllocated + size > Limit && !TryIncreaseLimit(TotalAllocated + size)) {
  386. throw TMemoryLimitExceededException();
  387. }
  388. if (AllocNotifyCallback) {
  389. if (AllocNotifyCurrentBytes > AllocNotifyBytes) {
  390. AllocNotifyCallback();
  391. AllocNotifyCurrentBytes = 0;
  392. }
  393. }
  394. auto& globalPool = TGlobalPools<T, false>::Instance();
  395. if (size > POOL_PAGE_SIZE && size <= MaxMidSize) {
  396. size = FastClp2(size);
  397. auto level = LeastSignificantBit(size) - LeastSignificantBit(POOL_PAGE_SIZE);
  398. Y_DEBUG_ABORT_UNLESS(level >= 1 && level <= MidLevels);
  399. if (res = globalPool.Get(level).GetPage()) {
  400. TotalAllocated += size;
  401. if (AllocNotifyCallback) {
  402. AllocNotifyCurrentBytes += size;
  403. }
  404. if (Counters.TotalBytesAllocatedCntr) {
  405. (*Counters.TotalBytesAllocatedCntr) += size;
  406. }
  407. ++PageGlobalHitCount;
  408. } else {
  409. ++PageMissCount;
  410. }
  411. }
  412. if (!res) {
  413. auto allocSize = size + ALLOC_AHEAD_PAGES * POOL_PAGE_SIZE;
  414. void* mem = globalPool.DoMmap(allocSize);
  415. if (Y_UNLIKELY(MAP_FAILED == mem)) {
  416. ythrow yexception() << "Mmap failed to allocate " << (size + POOL_PAGE_SIZE) << " bytes: " << LastSystemErrorText();
  417. }
  418. res = AlignUp(mem, POOL_PAGE_SIZE);
  419. const size_t off = reinterpret_cast<intptr_t>(res) - reinterpret_cast<intptr_t>(mem);
  420. if (Y_UNLIKELY(off)) {
  421. // unmap prefix
  422. globalPool.DoMunmap(mem, off);
  423. }
  424. // Extra space is also page-aligned. Put it to the free page list
  425. auto alignedSize = AlignUp(size, POOL_PAGE_SIZE);
  426. ui64 extraPages = (allocSize - off - alignedSize) / POOL_PAGE_SIZE;
  427. ui64 tail = (allocSize - off - alignedSize) % POOL_PAGE_SIZE;
  428. auto extraPage = reinterpret_cast<ui8*>(res) + alignedSize;
  429. for (ui64 i = 0; i < extraPages; ++i) {
  430. AllPages.emplace(extraPage);
  431. FreePages.emplace(extraPage);
  432. extraPage += POOL_PAGE_SIZE;
  433. }
  434. if (size != alignedSize) {
  435. // unmap unaligned hole
  436. globalPool.DoMunmap(reinterpret_cast<ui8*>(res) + size, alignedSize - size);
  437. }
  438. if (tail) {
  439. // unmap suffix
  440. Y_DEBUG_ABORT_UNLESS(extraPage+tail <= reinterpret_cast<ui8*>(mem) + size + ALLOC_AHEAD_PAGES * POOL_PAGE_SIZE);
  441. globalPool.DoMunmap(extraPage, tail);
  442. }
  443. auto extraSize = extraPages * POOL_PAGE_SIZE;
  444. auto totalSize = size + extraSize;
  445. TotalAllocated += totalSize;
  446. if (AllocNotifyCallback) {
  447. AllocNotifyCurrentBytes += totalSize;
  448. }
  449. if (Counters.TotalBytesAllocatedCntr) {
  450. (*Counters.TotalBytesAllocatedCntr) += totalSize;
  451. }
  452. }
  453. if (Counters.AllocationsCntr) {
  454. ++(*Counters.AllocationsCntr);
  455. }
  456. ++AllocCount;
  457. UpdatePeaks();
  458. return res;
  459. }
  460. template<typename T>
  461. void TAlignedPagePoolImpl<T>::Free(void* ptr, size_t size) noexcept {
  462. size = AlignUp(size, SYS_PAGE_SIZE);
  463. if (size <= MaxMidSize)
  464. size = FastClp2(size);
  465. if (size <= MaxMidSize) {
  466. auto level = LeastSignificantBit(size) - LeastSignificantBit(POOL_PAGE_SIZE);
  467. Y_DEBUG_ABORT_UNLESS(level >= 1 && level <= MidLevels);
  468. TGlobalPools<T, false>::Instance().PushPage(level, ptr);
  469. } else {
  470. TGlobalPools<T, false>::Instance().DoMunmap(ptr, size);
  471. }
  472. Y_DEBUG_ABORT_UNLESS(TotalAllocated >= size);
  473. TotalAllocated -= size;
  474. if (Counters.TotalBytesAllocatedCntr) {
  475. (*Counters.TotalBytesAllocatedCntr) -= size;
  476. }
  477. }
  478. template<typename T>
  479. void TAlignedPagePoolImpl<T>::DoCleanupGlobalFreeList(ui64 targetSize) {
  480. TGlobalPools<T, true>::Instance().DoCleanupFreeList(targetSize);
  481. TGlobalPools<T, false>::Instance().DoCleanupFreeList(targetSize);
  482. }
  483. template<typename T>
  484. void TAlignedPagePoolImpl<T>::UpdateMemoryYellowZone() {
  485. if (Limit == 0) return;
  486. if (IsMemoryYellowZoneForcefullyChanged) return;
  487. if (IncreaseMemoryLimitCallback && !IsMaximumLimitValueReached) return;
  488. ui8 usedMemoryPercent = 100 * GetUsed() / Limit;
  489. if (usedMemoryPercent >= EnableMemoryYellowZoneThreshold) {
  490. IsMemoryYellowZoneReached = true;
  491. } else if (usedMemoryPercent <= DisableMemoryYellowZoneThreshold) {
  492. IsMemoryYellowZoneReached = false;
  493. }
  494. }
  495. template<typename T>
  496. bool TAlignedPagePoolImpl<T>::TryIncreaseLimit(ui64 required) {
  497. if (!IncreaseMemoryLimitCallback) {
  498. return false;
  499. }
  500. IncreaseMemoryLimitCallback(Limit, required);
  501. return Limit >= required;
  502. }
  503. template<typename T>
  504. ui64 TAlignedPagePoolImpl<T>::GetGlobalPagePoolSize() {
  505. ui64 size = 0;
  506. for (size_t level = 0; level <= MidLevels; ++level) {
  507. size += TGlobalPools<T, false>::Instance().Get(level).GetSize();
  508. }
  509. return size;
  510. }
  511. template<typename T>
  512. void TAlignedPagePoolImpl<T>::PrintStat(size_t usedPages, IOutputStream& out) const {
  513. usedPages += GetFreePageCount();
  514. out << "Count of free pages: " << GetFreePageCount() << Endl;
  515. out << "Allocated for blocks: " << (GetAllocated() - usedPages * POOL_PAGE_SIZE) << Endl;
  516. out << "Total allocated by lists: " << GetAllocated() << Endl;
  517. }
  518. template<typename T>
  519. void TAlignedPagePoolImpl<T>::ResetGlobalsUT()
  520. {
  521. TGlobalPools<T, false>::Instance().Reset();
  522. }
  523. template class TAlignedPagePoolImpl<>;
  524. template class TAlignedPagePoolImpl<TFakeAlignedMmap>;
  525. template class TAlignedPagePoolImpl<TFakeUnalignedMmap>;
  526. template<typename TMmap>
  527. void* GetAlignedPage(ui64 size) {
  528. size = AlignUp(size, SYS_PAGE_SIZE);
  529. if (size < TAlignedPagePool::POOL_PAGE_SIZE) {
  530. size = TAlignedPagePool::POOL_PAGE_SIZE;
  531. }
  532. auto& pool = TGlobalPools<TMmap, true>::Instance();
  533. if (size <= MaxMidSize) {
  534. size = FastClp2(size);
  535. auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE);
  536. Y_DEBUG_ABORT_UNLESS(level <= MidLevels);
  537. if (auto res = pool.Get(level).GetPage()) {
  538. return res;
  539. }
  540. }
  541. auto allocSize = Max<ui64>(MaxMidSize, size);
  542. void* mem = pool.DoMmap(allocSize);
  543. if (Y_UNLIKELY(MAP_FAILED == mem)) {
  544. ythrow yexception() << "Mmap failed to allocate " << allocSize << " bytes: " << LastSystemErrorText();
  545. }
  546. if (size < MaxMidSize) {
  547. // push extra allocated pages to cache
  548. auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE);
  549. Y_DEBUG_ABORT_UNLESS(level <= MidLevels);
  550. ui8* ptr = (ui8*)mem + size;
  551. ui8* const end = (ui8*)mem + MaxMidSize;
  552. while (ptr < end) {
  553. pool.PushPage(level, ptr);
  554. ptr += size;
  555. }
  556. }
  557. return mem;
  558. }
  559. template<typename TMmap>
  560. void ReleaseAlignedPage(void* mem, ui64 size) {
  561. size = AlignUp(size, SYS_PAGE_SIZE);
  562. if (size < TAlignedPagePool::POOL_PAGE_SIZE) {
  563. size = TAlignedPagePool::POOL_PAGE_SIZE;
  564. }
  565. if (size <= MaxMidSize) {
  566. size = FastClp2(size);
  567. auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE);
  568. Y_DEBUG_ABORT_UNLESS(level <= MidLevels);
  569. TGlobalPools<TMmap, true>::Instance().PushPage(level, mem);
  570. return;
  571. }
  572. TGlobalPools<TMmap, true>::Instance().DoMunmap(mem, size);
  573. }
  574. template<typename TMmap>
  575. i64 GetTotalMmapedBytes() {
  576. return TGlobalPools<TMmap, true>::Instance().GetTotalMmappedBytes() + TGlobalPools<TMmap, false>::Instance().GetTotalMmappedBytes();
  577. }
  578. template<typename TMmap>
  579. i64 GetTotalFreeListBytes() {
  580. return TGlobalPools<TMmap, true>::Instance().GetTotalFreeListBytes() + TGlobalPools<TMmap, false>::Instance().GetTotalFreeListBytes();
  581. }
  582. template i64 GetTotalMmapedBytes<>();
  583. template i64 GetTotalMmapedBytes<TFakeAlignedMmap>();
  584. template i64 GetTotalMmapedBytes<TFakeUnalignedMmap>();
  585. template i64 GetTotalFreeListBytes<>();
  586. template i64 GetTotalFreeListBytes<TFakeAlignedMmap>();
  587. template i64 GetTotalFreeListBytes<TFakeUnalignedMmap>();
  588. template void* GetAlignedPage<>(ui64);
  589. template void* GetAlignedPage<TFakeAlignedMmap>(ui64);
  590. template void* GetAlignedPage<TFakeUnalignedMmap>(ui64);
  591. template void ReleaseAlignedPage<>(void*,ui64);
  592. template void ReleaseAlignedPage<TFakeAlignedMmap>(void*,ui64);
  593. template void ReleaseAlignedPage<TFakeUnalignedMmap>(void*,ui64);
  594. } // NKikimr