aligned_page_pool.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. #include "aligned_page_pool.h"
  2. #include <util/generic/yexception.h>
  3. #include <util/stream/file.h>
  4. #include <util/string/cast.h>
  5. #include <util/string/strip.h>
  6. #include <util/system/align.h>
  7. #include <util/system/compiler.h>
  8. #include <util/system/info.h>
  9. #include <util/system/error.h>
  10. #include <util/thread/lfstack.h>
  11. #if defined(_win_)
  12. #include <util/system/winint.h>
  13. #elif defined(_unix_)
  14. #include <sys/types.h>
  15. #include <sys/mman.h>
  16. #endif
  17. namespace NKikimr {
  18. static ui64 SYS_PAGE_SIZE = NSystemInfo::GetPageSize();
  19. constexpr ui32 MidLevels = 10;
  20. constexpr ui32 MaxMidSize = (1u << MidLevels) * TAlignedPagePool::POOL_PAGE_SIZE;
  21. static_assert(MaxMidSize == 64 * 1024 * 1024, "Upper memory block 64 Mb");
  22. namespace {
  23. ui64 GetMaxMemoryMaps() {
  24. ui64 maxMapCount = 0;
  25. #if defined(_unix_)
  26. maxMapCount = FromString<ui64>(Strip(TFileInput("/proc/sys/vm/max_map_count").ReadAll()));
  27. #endif
  28. return maxMapCount;
  29. }
  30. TString GetMemoryMapsString() {
  31. TStringStream ss;
  32. ss << " (maps: " << GetMemoryMapsCount() << " vs " << GetMaxMemoryMaps() << ")";
  33. return ss.Str();
  34. }
  35. template<typename T, bool SysAlign>
  36. class TGlobalPools;
  37. template<typename T, bool SysAlign>
  38. class TGlobalPagePool {
  39. friend class TGlobalPools<T, SysAlign>;
  40. public:
  41. TGlobalPagePool(size_t pageSize)
  42. : PageSize(pageSize)
  43. {}
  44. ~TGlobalPagePool() {
  45. void* addr = nullptr;
  46. while (Pages.Dequeue(&addr)) {
  47. FreePage(addr);
  48. }
  49. }
  50. void* GetPage() {
  51. void *page = nullptr;
  52. if (Pages.Dequeue(&page)) {
  53. --Count;
  54. return page;
  55. }
  56. return nullptr;
  57. }
  58. ui64 GetPageCount() const {
  59. return Count.load(std::memory_order_relaxed);
  60. }
  61. size_t GetPageSize() const {
  62. return PageSize;
  63. }
  64. size_t GetSize() const {
  65. return GetPageCount() * GetPageSize();
  66. }
  67. private:
  68. size_t PushPage(void* addr) {
  69. #ifdef PROFILE_MEMORY_ALLOCATIONS
  70. FreePage(addr);
  71. return GetPageSize();
  72. #else
  73. ++Count;
  74. Pages.Enqueue(addr);
  75. return 0;
  76. #endif
  77. }
  78. void FreePage(void* addr) {
  79. auto res = T::Munmap(addr, PageSize);
  80. Y_DEBUG_ABORT_UNLESS(0 == res, "Munmap failed: %s", LastSystemErrorText());
  81. }
  82. private:
  83. const size_t PageSize;
  84. std::atomic<ui64> Count = 0;
  85. TLockFreeStack<void*> Pages;
  86. };
  87. template<typename T, bool SysAlign>
  88. class TGlobalPools {
  89. public:
  90. static TGlobalPools<T, SysAlign>& Instance() {
  91. return *Singleton<TGlobalPools<T, SysAlign>>();
  92. }
  93. TGlobalPagePool<T, SysAlign>& Get(ui32 index) {
  94. return *Pools[index];
  95. }
  96. const TGlobalPagePool<T, SysAlign>& Get(ui32 index) const {
  97. return *Pools[index];
  98. }
  99. TGlobalPools()
  100. {
  101. Reset();
  102. }
  103. void* DoMmap(size_t size) {
  104. void* res = T::Mmap(size);
  105. TotalMmappedBytes += size;
  106. return res;
  107. }
  108. void DoCleanupFreeList(ui64 targetSize) {
  109. for(ui32 level = 0; level <= MidLevels; ++level) {
  110. auto& p = Get(level);
  111. const size_t pageSize = p.GetPageSize();
  112. while(p.GetSize() >= targetSize) {
  113. void* page = p.GetPage();
  114. if (!page)
  115. break;
  116. p.FreePage(page);
  117. i64 prev = TotalMmappedBytes.fetch_sub(pageSize);
  118. Y_DEBUG_ABORT_UNLESS(prev >= 0);
  119. }
  120. }
  121. }
  122. void PushPage(size_t level, void* addr) {
  123. auto& pool = Get(level);
  124. size_t free = pool.PushPage(addr);
  125. if (Y_UNLIKELY(free > 0)) {
  126. i64 prev = TotalMmappedBytes.fetch_sub(free);
  127. Y_DEBUG_ABORT_UNLESS(prev >= 0);
  128. }
  129. }
  130. void DoMunmap(void* addr, size_t size) {
  131. if (Y_UNLIKELY(0 != T::Munmap(addr, size))) {
  132. TStringStream mmaps;
  133. const auto lastError = LastSystemError();
  134. if (lastError == ENOMEM) {
  135. mmaps << GetMemoryMapsString();
  136. }
  137. ythrow yexception() << "Munmap(0x"
  138. << IntToString<16>(reinterpret_cast<uintptr_t>(addr))
  139. << ", " << size << ") failed: " << LastSystemErrorText(lastError) << mmaps.Str();
  140. }
  141. i64 prev = TotalMmappedBytes.fetch_sub(size);
  142. Y_DEBUG_ABORT_UNLESS(prev >= 0);
  143. }
  144. i64 GetTotalMmappedBytes() const {
  145. return TotalMmappedBytes.load();
  146. }
  147. i64 GetTotalFreeListBytes() const {
  148. i64 bytes = 0;
  149. for (ui32 i = 0; i <= MidLevels; ++i) {
  150. bytes += Get(i).GetSize();
  151. }
  152. return bytes;
  153. }
  154. void Reset()
  155. {
  156. Pools.clear();
  157. Pools.reserve(MidLevels + 1);
  158. for (ui32 i = 0; i <= MidLevels; ++i) {
  159. Pools.emplace_back(MakeHolder<TGlobalPagePool<T, SysAlign>>(TAlignedPagePool::POOL_PAGE_SIZE << i));
  160. }
  161. }
  162. private:
  163. TVector<THolder<TGlobalPagePool<T, SysAlign>>> Pools;
  164. std::atomic<i64> TotalMmappedBytes{0};
  165. };
  166. } // unnamed
  167. #ifdef _win_
  168. #define MAP_FAILED (void*)(-1)
  169. inline void* TSystemMmap::Mmap(size_t size)
  170. {
  171. if (auto res = ::VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE)) {
  172. return res;
  173. } else {
  174. return MAP_FAILED;
  175. }
  176. }
  177. inline int TSystemMmap::Munmap(void* addr, size_t size)
  178. {
  179. Y_ABORT_UNLESS(AlignUp(addr, SYS_PAGE_SIZE) == addr, "Got unaligned address");
  180. Y_ABORT_UNLESS(AlignUp(size, SYS_PAGE_SIZE) == size, "Got unaligned size");
  181. return !::VirtualFree(addr, size, MEM_DECOMMIT);
  182. }
  183. #else
  184. inline void* TSystemMmap::Mmap(size_t size)
  185. {
  186. return ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0);
  187. }
  188. inline int TSystemMmap::Munmap(void* addr, size_t size)
  189. {
  190. Y_DEBUG_ABORT_UNLESS(AlignUp(addr, SYS_PAGE_SIZE) == addr, "Got unaligned address");
  191. Y_DEBUG_ABORT_UNLESS(AlignUp(size, SYS_PAGE_SIZE) == size, "Got unaligned size");
  192. return ::munmap(addr, size);
  193. }
  194. #endif
  195. std::function<void(size_t size)> TFakeAlignedMmap::OnMmap = {};
  196. std::function<void(void* addr, size_t size)> TFakeAlignedMmap::OnMunmap = {};
  197. void* TFakeAlignedMmap::Mmap(size_t size)
  198. {
  199. if (OnMmap) {
  200. OnMmap(size);
  201. }
  202. return reinterpret_cast<void*>(TAlignedPagePool::POOL_PAGE_SIZE);
  203. }
  204. int TFakeAlignedMmap::Munmap(void* addr, size_t size)
  205. {
  206. if (OnMunmap) {
  207. OnMunmap(addr, size);
  208. }
  209. return 0;
  210. }
  211. std::function<void(size_t size)> TFakeUnalignedMmap::OnMmap = {};
  212. std::function<void(void* addr, size_t size)> TFakeUnalignedMmap::OnMunmap = {};
  213. void* TFakeUnalignedMmap::Mmap(size_t size)
  214. {
  215. if (OnMmap) {
  216. OnMmap(size);
  217. }
  218. return reinterpret_cast<void*>(TAlignedPagePool::POOL_PAGE_SIZE+1);
  219. }
  220. int TFakeUnalignedMmap::Munmap(void* addr, size_t size)
  221. {
  222. if (OnMunmap) {
  223. OnMunmap(addr, size);
  224. }
  225. return 0;
  226. }
  227. TAlignedPagePoolCounters::TAlignedPagePoolCounters(::NMonitoring::TDynamicCounterPtr countersRoot, const TString& name) {
  228. if (!countersRoot || name.empty())
  229. return;
  230. ::NMonitoring::TDynamicCounterPtr subGroup = countersRoot->GetSubgroup("counters", "utils")->GetSubgroup("subsystem", "mkqlalloc");
  231. TotalBytesAllocatedCntr = subGroup->GetCounter(name + "/TotalBytesAllocated");
  232. AllocationsCntr = subGroup->GetCounter(name + "/Allocations", true);
  233. PoolsCntr = subGroup->GetCounter(name + "/Pools", true);
  234. LostPagesBytesFreeCntr = subGroup->GetCounter(name + "/LostPagesBytesFreed", true);
  235. }
  236. template<typename T>
  237. TAlignedPagePoolImpl<T>::~TAlignedPagePoolImpl() {
  238. if (CheckLostMem && !UncaughtException()) {
  239. Y_DEBUG_ABORT_UNLESS(TotalAllocated == FreePages.size() * POOL_PAGE_SIZE,
  240. "memory leak; Expected %ld, actual %ld (%ld page(s), %ld offloaded); allocator created at: %s",
  241. TotalAllocated, FreePages.size() * POOL_PAGE_SIZE,
  242. FreePages.size(), OffloadedActiveBytes, GetDebugInfo().data());
  243. Y_DEBUG_ABORT_UNLESS(OffloadedActiveBytes == 0, "offloaded: %ld", OffloadedActiveBytes);
  244. }
  245. size_t activeBlocksSize = 0;
  246. for (auto it = ActiveBlocks.cbegin(); ActiveBlocks.cend() != it; ActiveBlocks.erase(it++)) {
  247. activeBlocksSize += it->second;
  248. #ifdef PROFILE_MEMORY_ALLOCATIONS
  249. ReturnBlock(it->first, it->second);
  250. #else
  251. Free(it->first, it->second);
  252. #endif
  253. }
  254. if (activeBlocksSize > 0 || FreePages.size() != AllPages.size() || OffloadedActiveBytes) {
  255. if (Counters.LostPagesBytesFreeCntr) {
  256. (*Counters.LostPagesBytesFreeCntr) += OffloadedActiveBytes + activeBlocksSize + (AllPages.size() - FreePages.size()) * POOL_PAGE_SIZE;
  257. }
  258. }
  259. Y_DEBUG_ABORT_UNLESS(TotalAllocated == AllPages.size() * POOL_PAGE_SIZE + OffloadedActiveBytes,
  260. "Expected %ld, actual %ld (%ld page(s))", TotalAllocated,
  261. AllPages.size() * POOL_PAGE_SIZE + OffloadedActiveBytes, AllPages.size());
  262. for (auto &ptr : AllPages) {
  263. TGlobalPools<T, false>::Instance().PushPage(0, ptr);
  264. }
  265. if (Counters.TotalBytesAllocatedCntr) {
  266. (*Counters.TotalBytesAllocatedCntr) -= TotalAllocated;
  267. }
  268. if (Counters.PoolsCntr) {
  269. --(*Counters.PoolsCntr);
  270. }
  271. TotalAllocated = 0;
  272. }
  273. template<typename T>
  274. void TAlignedPagePoolImpl<T>::ReleaseFreePages() {
  275. TotalAllocated -= FreePages.size() * POOL_PAGE_SIZE;
  276. if (Counters.TotalBytesAllocatedCntr) {
  277. (*Counters.TotalBytesAllocatedCntr) -= FreePages.size() * POOL_PAGE_SIZE;
  278. }
  279. for (; !FreePages.empty(); FreePages.pop()) {
  280. AllPages.erase(FreePages.top());
  281. TGlobalPools<T, false>::Instance().PushPage(0, FreePages.top());
  282. }
  283. }
  284. template<typename T>
  285. void TAlignedPagePoolImpl<T>::OffloadAlloc(ui64 size) {
  286. if (Limit && TotalAllocated + size > Limit && !TryIncreaseLimit(TotalAllocated + size)) {
  287. throw TMemoryLimitExceededException();
  288. }
  289. if (AllocNotifyCallback) {
  290. if (AllocNotifyCurrentBytes > AllocNotifyBytes) {
  291. AllocNotifyCallback();
  292. AllocNotifyCurrentBytes = 0;
  293. }
  294. }
  295. ++OffloadedAllocCount;
  296. OffloadedBytes += size;
  297. OffloadedActiveBytes += size;
  298. TotalAllocated += size;
  299. if (AllocNotifyCallback) {
  300. AllocNotifyCurrentBytes += size;
  301. }
  302. if (Counters.TotalBytesAllocatedCntr) {
  303. (*Counters.TotalBytesAllocatedCntr) += size;
  304. }
  305. if (Counters.AllocationsCntr) {
  306. ++(*Counters.AllocationsCntr);
  307. }
  308. UpdatePeaks();
  309. }
  310. template<typename T>
  311. void TAlignedPagePoolImpl<T>::OffloadFree(ui64 size) noexcept {
  312. TotalAllocated -= size;
  313. OffloadedActiveBytes -= size;
  314. if (Counters.TotalBytesAllocatedCntr) {
  315. (*Counters.TotalBytesAllocatedCntr) -= size;
  316. }
  317. }
  318. template<typename T>
  319. void* TAlignedPagePoolImpl<T>::GetPage() {
  320. ++PageAllocCount;
  321. if (!FreePages.empty()) {
  322. ++PageHitCount;
  323. const auto res = FreePages.top();
  324. FreePages.pop();
  325. return res;
  326. }
  327. if (Limit && TotalAllocated + POOL_PAGE_SIZE > Limit && !TryIncreaseLimit(TotalAllocated + POOL_PAGE_SIZE)) {
  328. throw TMemoryLimitExceededException();
  329. }
  330. #ifndef PROFILE_MEMORY_ALLOCATIONS
  331. if (const auto ptr = TGlobalPools<T, false>::Instance().Get(0).GetPage()) {
  332. TotalAllocated += POOL_PAGE_SIZE;
  333. if (AllocNotifyCallback) {
  334. AllocNotifyCurrentBytes += POOL_PAGE_SIZE;
  335. }
  336. if (Counters.TotalBytesAllocatedCntr) {
  337. (*Counters.TotalBytesAllocatedCntr) += POOL_PAGE_SIZE;
  338. }
  339. ++PageGlobalHitCount;
  340. AllPages.emplace(ptr);
  341. UpdatePeaks();
  342. return ptr;
  343. }
  344. ++PageMissCount;
  345. #endif
  346. #ifdef PROFILE_MEMORY_ALLOCATIONS
  347. const auto res = GetBlock(POOL_PAGE_SIZE);
  348. #else
  349. const auto res = Alloc(POOL_PAGE_SIZE);
  350. AllPages.emplace(res);
  351. #endif
  352. return res;
  353. }
  354. template<typename T>
  355. void TAlignedPagePoolImpl<T>::ReturnPage(void* addr) noexcept {
  356. #ifdef PROFILE_MEMORY_ALLOCATIONS
  357. ReturnBlock(addr, POOL_PAGE_SIZE);
  358. #else
  359. Y_DEBUG_ABORT_UNLESS(AllPages.find(addr) != AllPages.end());
  360. FreePages.emplace(addr);
  361. #endif
  362. }
  363. template<typename T>
  364. void* TAlignedPagePoolImpl<T>::GetBlock(size_t size) {
  365. Y_DEBUG_ABORT_UNLESS(size >= POOL_PAGE_SIZE);
  366. #ifdef PROFILE_MEMORY_ALLOCATIONS
  367. OffloadAlloc(size);
  368. auto ret = malloc(size);
  369. if (!ret) {
  370. throw TMemoryLimitExceededException();
  371. }
  372. return ret;
  373. #else
  374. if (size == POOL_PAGE_SIZE) {
  375. return GetPage();
  376. } else {
  377. const auto ptr = Alloc(size);
  378. Y_DEBUG_ABORT_UNLESS(ActiveBlocks.emplace(ptr, size).second);
  379. return ptr;
  380. }
  381. #endif
  382. }
  383. template<typename T>
  384. void TAlignedPagePoolImpl<T>::ReturnBlock(void* ptr, size_t size) noexcept {
  385. Y_DEBUG_ABORT_UNLESS(size >= POOL_PAGE_SIZE);
  386. #ifdef PROFILE_MEMORY_ALLOCATIONS
  387. OffloadFree(size);
  388. free(ptr);
  389. #else
  390. if (size == POOL_PAGE_SIZE) {
  391. ReturnPage(ptr);
  392. } else {
  393. Free(ptr, size);
  394. Y_DEBUG_ABORT_UNLESS(ActiveBlocks.erase(ptr));
  395. }
  396. #endif
  397. UpdateMemoryYellowZone();
  398. }
  399. template<typename T>
  400. void* TAlignedPagePoolImpl<T>::Alloc(size_t size) {
  401. void* res = nullptr;
  402. size = AlignUp(size, SYS_PAGE_SIZE);
  403. if (Limit && TotalAllocated + size > Limit && !TryIncreaseLimit(TotalAllocated + size)) {
  404. throw TMemoryLimitExceededException();
  405. }
  406. if (AllocNotifyCallback) {
  407. if (AllocNotifyCurrentBytes > AllocNotifyBytes) {
  408. AllocNotifyCallback();
  409. AllocNotifyCurrentBytes = 0;
  410. }
  411. }
  412. auto& globalPool = TGlobalPools<T, false>::Instance();
  413. if (size > POOL_PAGE_SIZE && size <= MaxMidSize) {
  414. size = FastClp2(size);
  415. auto level = LeastSignificantBit(size) - LeastSignificantBit(POOL_PAGE_SIZE);
  416. Y_DEBUG_ABORT_UNLESS(level >= 1 && level <= MidLevels);
  417. if (res = globalPool.Get(level).GetPage()) {
  418. TotalAllocated += size;
  419. if (AllocNotifyCallback) {
  420. AllocNotifyCurrentBytes += size;
  421. }
  422. if (Counters.TotalBytesAllocatedCntr) {
  423. (*Counters.TotalBytesAllocatedCntr) += size;
  424. }
  425. ++PageGlobalHitCount;
  426. } else {
  427. ++PageMissCount;
  428. }
  429. }
  430. if (!res) {
  431. auto allocSize = size + ALLOC_AHEAD_PAGES * POOL_PAGE_SIZE;
  432. void* mem = globalPool.DoMmap(allocSize);
  433. if (Y_UNLIKELY(MAP_FAILED == mem)) {
  434. TStringStream mmaps;
  435. const auto lastError = LastSystemError();
  436. if (lastError == ENOMEM) {
  437. mmaps << GetMemoryMapsString();
  438. }
  439. ythrow yexception() << "Mmap failed to allocate " << (size + POOL_PAGE_SIZE) << " bytes: "
  440. << LastSystemErrorText(lastError) << mmaps.Str();
  441. }
  442. res = AlignUp(mem, POOL_PAGE_SIZE);
  443. const size_t off = reinterpret_cast<intptr_t>(res) - reinterpret_cast<intptr_t>(mem);
  444. if (Y_UNLIKELY(off)) {
  445. // unmap prefix
  446. globalPool.DoMunmap(mem, off);
  447. }
  448. // Extra space is also page-aligned. Put it to the free page list
  449. auto alignedSize = AlignUp(size, POOL_PAGE_SIZE);
  450. ui64 extraPages = (allocSize - off - alignedSize) / POOL_PAGE_SIZE;
  451. ui64 tail = (allocSize - off - alignedSize) % POOL_PAGE_SIZE;
  452. auto extraPage = reinterpret_cast<ui8*>(res) + alignedSize;
  453. for (ui64 i = 0; i < extraPages; ++i) {
  454. AllPages.emplace(extraPage);
  455. FreePages.emplace(extraPage);
  456. extraPage += POOL_PAGE_SIZE;
  457. }
  458. if (size != alignedSize) {
  459. // unmap unaligned hole
  460. globalPool.DoMunmap(reinterpret_cast<ui8*>(res) + size, alignedSize - size);
  461. }
  462. if (tail) {
  463. // unmap suffix
  464. Y_DEBUG_ABORT_UNLESS(extraPage+tail <= reinterpret_cast<ui8*>(mem) + size + ALLOC_AHEAD_PAGES * POOL_PAGE_SIZE);
  465. globalPool.DoMunmap(extraPage, tail);
  466. }
  467. auto extraSize = extraPages * POOL_PAGE_SIZE;
  468. auto totalSize = size + extraSize;
  469. TotalAllocated += totalSize;
  470. if (AllocNotifyCallback) {
  471. AllocNotifyCurrentBytes += totalSize;
  472. }
  473. if (Counters.TotalBytesAllocatedCntr) {
  474. (*Counters.TotalBytesAllocatedCntr) += totalSize;
  475. }
  476. }
  477. if (Counters.AllocationsCntr) {
  478. ++(*Counters.AllocationsCntr);
  479. }
  480. ++AllocCount;
  481. UpdatePeaks();
  482. return res;
  483. }
  484. template<typename T>
  485. void TAlignedPagePoolImpl<T>::Free(void* ptr, size_t size) noexcept {
  486. size = AlignUp(size, SYS_PAGE_SIZE);
  487. if (size <= MaxMidSize)
  488. size = FastClp2(size);
  489. if (size <= MaxMidSize) {
  490. auto level = LeastSignificantBit(size) - LeastSignificantBit(POOL_PAGE_SIZE);
  491. Y_DEBUG_ABORT_UNLESS(level >= 1 && level <= MidLevels);
  492. TGlobalPools<T, false>::Instance().PushPage(level, ptr);
  493. } else {
  494. TGlobalPools<T, false>::Instance().DoMunmap(ptr, size);
  495. }
  496. Y_DEBUG_ABORT_UNLESS(TotalAllocated >= size);
  497. TotalAllocated -= size;
  498. if (Counters.TotalBytesAllocatedCntr) {
  499. (*Counters.TotalBytesAllocatedCntr) -= size;
  500. }
  501. }
  502. template<typename T>
  503. void TAlignedPagePoolImpl<T>::DoCleanupGlobalFreeList(ui64 targetSize) {
  504. TGlobalPools<T, true>::Instance().DoCleanupFreeList(targetSize);
  505. TGlobalPools<T, false>::Instance().DoCleanupFreeList(targetSize);
  506. }
  507. template<typename T>
  508. void TAlignedPagePoolImpl<T>::UpdateMemoryYellowZone() {
  509. if (Limit == 0) return;
  510. if (IsMemoryYellowZoneForcefullyChanged) return;
  511. if (IncreaseMemoryLimitCallback && !IsMaximumLimitValueReached) return;
  512. ui8 usedMemoryPercent = 100 * GetUsed() / Limit;
  513. if (usedMemoryPercent >= EnableMemoryYellowZoneThreshold) {
  514. IsMemoryYellowZoneReached = true;
  515. } else if (usedMemoryPercent <= DisableMemoryYellowZoneThreshold) {
  516. IsMemoryYellowZoneReached = false;
  517. }
  518. }
  519. template<typename T>
  520. bool TAlignedPagePoolImpl<T>::TryIncreaseLimit(ui64 required) {
  521. if (!IncreaseMemoryLimitCallback) {
  522. return false;
  523. }
  524. IncreaseMemoryLimitCallback(Limit, required);
  525. return Limit >= required;
  526. }
  527. template<typename T>
  528. ui64 TAlignedPagePoolImpl<T>::GetGlobalPagePoolSize() {
  529. ui64 size = 0;
  530. for (size_t level = 0; level <= MidLevels; ++level) {
  531. size += TGlobalPools<T, false>::Instance().Get(level).GetSize();
  532. }
  533. return size;
  534. }
  535. template<typename T>
  536. void TAlignedPagePoolImpl<T>::PrintStat(size_t usedPages, IOutputStream& out) const {
  537. usedPages += GetFreePageCount();
  538. out << "Count of free pages: " << GetFreePageCount() << Endl;
  539. out << "Allocated for blocks: " << (GetAllocated() - usedPages * POOL_PAGE_SIZE) << Endl;
  540. out << "Total allocated by lists: " << GetAllocated() << Endl;
  541. }
  542. template<typename T>
  543. void TAlignedPagePoolImpl<T>::ResetGlobalsUT()
  544. {
  545. TGlobalPools<T, false>::Instance().Reset();
  546. }
  547. template class TAlignedPagePoolImpl<>;
  548. template class TAlignedPagePoolImpl<TFakeAlignedMmap>;
  549. template class TAlignedPagePoolImpl<TFakeUnalignedMmap>;
  550. template<typename TMmap>
  551. void* GetAlignedPage(ui64 size) {
  552. size = AlignUp(size, SYS_PAGE_SIZE);
  553. if (size < TAlignedPagePool::POOL_PAGE_SIZE) {
  554. size = TAlignedPagePool::POOL_PAGE_SIZE;
  555. }
  556. auto& pool = TGlobalPools<TMmap, true>::Instance();
  557. if (size <= MaxMidSize) {
  558. size = FastClp2(size);
  559. auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE);
  560. Y_DEBUG_ABORT_UNLESS(level <= MidLevels);
  561. if (auto res = pool.Get(level).GetPage()) {
  562. return res;
  563. }
  564. }
  565. auto allocSize = Max<ui64>(MaxMidSize, size);
  566. void* mem = pool.DoMmap(allocSize);
  567. if (Y_UNLIKELY(MAP_FAILED == mem)) {
  568. TStringStream mmaps;
  569. const auto lastError = LastSystemError();
  570. if (lastError == ENOMEM) {
  571. mmaps << GetMemoryMapsString();
  572. }
  573. ythrow yexception() << "Mmap failed to allocate " << allocSize << " bytes: " << LastSystemErrorText(lastError) << mmaps.Str();
  574. }
  575. if (size < MaxMidSize) {
  576. // push extra allocated pages to cache
  577. auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE);
  578. Y_DEBUG_ABORT_UNLESS(level <= MidLevels);
  579. ui8* ptr = (ui8*)mem + size;
  580. ui8* const end = (ui8*)mem + MaxMidSize;
  581. while (ptr < end) {
  582. pool.PushPage(level, ptr);
  583. ptr += size;
  584. }
  585. }
  586. return mem;
  587. }
  588. template<typename TMmap>
  589. void ReleaseAlignedPage(void* mem, ui64 size) {
  590. size = AlignUp(size, SYS_PAGE_SIZE);
  591. if (size < TAlignedPagePool::POOL_PAGE_SIZE) {
  592. size = TAlignedPagePool::POOL_PAGE_SIZE;
  593. }
  594. if (size <= MaxMidSize) {
  595. size = FastClp2(size);
  596. auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE);
  597. Y_DEBUG_ABORT_UNLESS(level <= MidLevels);
  598. TGlobalPools<TMmap, true>::Instance().PushPage(level, mem);
  599. return;
  600. }
  601. TGlobalPools<TMmap, true>::Instance().DoMunmap(mem, size);
  602. }
  603. template<typename TMmap>
  604. i64 GetTotalMmapedBytes() {
  605. return TGlobalPools<TMmap, true>::Instance().GetTotalMmappedBytes() + TGlobalPools<TMmap, false>::Instance().GetTotalMmappedBytes();
  606. }
  607. template<typename TMmap>
  608. i64 GetTotalFreeListBytes() {
  609. return TGlobalPools<TMmap, true>::Instance().GetTotalFreeListBytes() + TGlobalPools<TMmap, false>::Instance().GetTotalFreeListBytes();
  610. }
  611. template i64 GetTotalMmapedBytes<>();
  612. template i64 GetTotalMmapedBytes<TFakeAlignedMmap>();
  613. template i64 GetTotalMmapedBytes<TFakeUnalignedMmap>();
  614. template i64 GetTotalFreeListBytes<>();
  615. template i64 GetTotalFreeListBytes<TFakeAlignedMmap>();
  616. template i64 GetTotalFreeListBytes<TFakeUnalignedMmap>();
  617. template void* GetAlignedPage<>(ui64);
  618. template void* GetAlignedPage<TFakeAlignedMmap>(ui64);
  619. template void* GetAlignedPage<TFakeUnalignedMmap>(ui64);
  620. template void ReleaseAlignedPage<>(void*,ui64);
  621. template void ReleaseAlignedPage<TFakeAlignedMmap>(void*,ui64);
  622. template void ReleaseAlignedPage<TFakeUnalignedMmap>(void*,ui64);
  623. size_t GetMemoryMapsCount() {
  624. size_t lineCount = 0;
  625. TString line;
  626. #if defined(_unix_)
  627. TFileInput file("/proc/self/maps");
  628. while (file.ReadLine(line)) ++lineCount;
  629. #endif
  630. return lineCount;
  631. }
  632. } // NKikimr