aligned_page_pool.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799
  1. #include "aligned_page_pool.h"
  2. #include <util/generic/yexception.h>
  3. #include <util/stream/file.h>
  4. #include <util/string/cast.h>
  5. #include <util/string/strip.h>
  6. #include <util/system/align.h>
  7. #include <util/system/compiler.h>
  8. #include <util/system/error.h>
  9. #include <util/system/info.h>
  10. #include <util/thread/lfstack.h>
  11. #if defined(_win_)
  12. # include <util/system/winint.h>
  13. #elif defined(_unix_)
  14. # include <sys/types.h>
  15. # include <sys/mman.h>
  16. #endif
  17. namespace NKikimr {
  18. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  19. # if defined(PROFILE_MEMORY_ALLOCATIONS)
  20. static bool IsDefaultAllocator = true;
  21. # else
  22. static bool IsDefaultAllocator = false;
  23. # endif
  24. void UseDefaultAllocator() {
  25. // TODO: check that we didn't already used the MKQL allocator
  26. IsDefaultAllocator = true;
  27. }
  28. #endif
  29. static ui64 SYS_PAGE_SIZE = NSystemInfo::GetPageSize();
  30. constexpr ui32 MidLevels = 10;
  31. constexpr ui32 MaxMidSize = (1u << MidLevels) * TAlignedPagePool::POOL_PAGE_SIZE;
  32. static_assert(MaxMidSize == 64 * 1024 * 1024, "Upper memory block 64 Mb");
  33. namespace {
  34. ui64 GetMaxMemoryMaps() {
  35. ui64 maxMapCount = 0;
  36. #if defined(_unix_)
  37. maxMapCount = FromString<ui64>(Strip(TFileInput("/proc/sys/vm/max_map_count").ReadAll()));
  38. #endif
  39. return maxMapCount;
  40. }
  41. TString GetMemoryMapsString() {
  42. TStringStream ss;
  43. ss << " (maps: " << GetMemoryMapsCount() << " vs " << GetMaxMemoryMaps() << ")";
  44. return ss.Str();
  45. }
  46. template<typename T, bool SysAlign>
  47. class TGlobalPools;
  48. template<typename T, bool SysAlign>
  49. class TGlobalPagePool {
  50. friend class TGlobalPools<T, SysAlign>;
  51. public:
  52. TGlobalPagePool(size_t pageSize)
  53. : PageSize(pageSize)
  54. {}
  55. ~TGlobalPagePool() {
  56. void* addr = nullptr;
  57. while (Pages.Dequeue(&addr)) {
  58. FreePage(addr);
  59. }
  60. }
  61. void* GetPage() {
  62. void *page = nullptr;
  63. if (Pages.Dequeue(&page)) {
  64. --Count;
  65. return page;
  66. }
  67. return nullptr;
  68. }
  69. ui64 GetPageCount() const {
  70. return Count.load(std::memory_order_relaxed);
  71. }
  72. size_t GetPageSize() const {
  73. return PageSize;
  74. }
  75. size_t GetSize() const {
  76. return GetPageCount() * GetPageSize();
  77. }
  78. private:
  79. size_t PushPage(void* addr) {
  80. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  81. if (Y_UNLIKELY(IsDefaultAllocator)) {
  82. FreePage(addr);
  83. return GetPageSize();
  84. }
  85. #endif
  86. ++Count;
  87. Pages.Enqueue(addr);
  88. return 0;
  89. }
  90. void FreePage(void* addr) {
  91. auto res = T::Munmap(addr, PageSize);
  92. Y_DEBUG_ABORT_UNLESS(0 == res, "Munmap failed: %s", LastSystemErrorText());
  93. }
  94. private:
  95. const size_t PageSize;
  96. std::atomic<ui64> Count = 0;
  97. TLockFreeStack<void*> Pages;
  98. };
  99. template<typename T, bool SysAlign>
  100. class TGlobalPools {
  101. public:
  102. static TGlobalPools<T, SysAlign>& Instance() {
  103. return *Singleton<TGlobalPools<T, SysAlign>>();
  104. }
  105. TGlobalPagePool<T, SysAlign>& Get(ui32 index) {
  106. return *Pools[index];
  107. }
  108. const TGlobalPagePool<T, SysAlign>& Get(ui32 index) const {
  109. return *Pools[index];
  110. }
  111. TGlobalPools()
  112. {
  113. Reset();
  114. }
  115. void* DoMmap(size_t size) {
  116. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  117. // No memory maps allowed while using default allocator
  118. Y_DEBUG_ABORT_UNLESS(!IsDefaultAllocator);
  119. #endif
  120. void* res = T::Mmap(size);
  121. TotalMmappedBytes += size;
  122. return res;
  123. }
  124. void DoCleanupFreeList(ui64 targetSize) {
  125. for(ui32 level = 0; level <= MidLevels; ++level) {
  126. auto& p = Get(level);
  127. const size_t pageSize = p.GetPageSize();
  128. while(p.GetSize() >= targetSize) {
  129. void* page = p.GetPage();
  130. if (!page)
  131. break;
  132. p.FreePage(page);
  133. i64 prev = TotalMmappedBytes.fetch_sub(pageSize);
  134. Y_DEBUG_ABORT_UNLESS(prev >= 0);
  135. }
  136. }
  137. }
  138. void PushPage(size_t level, void* addr) {
  139. auto& pool = Get(level);
  140. size_t free = pool.PushPage(addr);
  141. if (Y_UNLIKELY(free > 0)) {
  142. i64 prev = TotalMmappedBytes.fetch_sub(free);
  143. Y_DEBUG_ABORT_UNLESS(prev >= 0);
  144. }
  145. }
  146. void DoMunmap(void* addr, size_t size) {
  147. if (Y_UNLIKELY(0 != T::Munmap(addr, size))) {
  148. TStringStream mmaps;
  149. const auto lastError = LastSystemError();
  150. if (lastError == ENOMEM) {
  151. mmaps << GetMemoryMapsString();
  152. }
  153. ythrow yexception() << "Munmap(0x"
  154. << IntToString<16>(reinterpret_cast<uintptr_t>(addr))
  155. << ", " << size << ") failed: " << LastSystemErrorText(lastError) << mmaps.Str();
  156. }
  157. i64 prev = TotalMmappedBytes.fetch_sub(size);
  158. Y_DEBUG_ABORT_UNLESS(prev >= 0);
  159. }
  160. i64 GetTotalMmappedBytes() const {
  161. return TotalMmappedBytes.load();
  162. }
  163. i64 GetTotalFreeListBytes() const {
  164. i64 bytes = 0;
  165. for (ui32 i = 0; i <= MidLevels; ++i) {
  166. bytes += Get(i).GetSize();
  167. }
  168. return bytes;
  169. }
  170. void Reset()
  171. {
  172. Pools.clear();
  173. Pools.reserve(MidLevels + 1);
  174. for (ui32 i = 0; i <= MidLevels; ++i) {
  175. Pools.emplace_back(MakeHolder<TGlobalPagePool<T, SysAlign>>(TAlignedPagePool::POOL_PAGE_SIZE << i));
  176. }
  177. }
  178. private:
  179. TVector<THolder<TGlobalPagePool<T, SysAlign>>> Pools;
  180. std::atomic<i64> TotalMmappedBytes{0};
  181. };
  182. } // unnamed
  183. #ifdef _win_
  184. #define MAP_FAILED (void*)(-1)
  185. inline void* TSystemMmap::Mmap(size_t size)
  186. {
  187. if (auto res = ::VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE)) {
  188. return res;
  189. } else {
  190. return MAP_FAILED;
  191. }
  192. }
  193. inline int TSystemMmap::Munmap(void* addr, size_t size)
  194. {
  195. Y_ABORT_UNLESS(AlignUp(addr, SYS_PAGE_SIZE) == addr, "Got unaligned address");
  196. Y_ABORT_UNLESS(AlignUp(size, SYS_PAGE_SIZE) == size, "Got unaligned size");
  197. return !::VirtualFree(addr, size, MEM_DECOMMIT);
  198. }
  199. #else
  200. inline void* TSystemMmap::Mmap(size_t size)
  201. {
  202. return ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0);
  203. }
  204. inline int TSystemMmap::Munmap(void* addr, size_t size)
  205. {
  206. Y_DEBUG_ABORT_UNLESS(AlignUp(addr, SYS_PAGE_SIZE) == addr, "Got unaligned address");
  207. Y_DEBUG_ABORT_UNLESS(AlignUp(size, SYS_PAGE_SIZE) == size, "Got unaligned size");
  208. return ::munmap(addr, size);
  209. }
  210. #endif
  211. std::function<void(size_t size)> TFakeAlignedMmap::OnMmap = {};
  212. std::function<void(void* addr, size_t size)> TFakeAlignedMmap::OnMunmap = {};
  213. void* TFakeAlignedMmap::Mmap(size_t size)
  214. {
  215. if (OnMmap) {
  216. OnMmap(size);
  217. }
  218. return reinterpret_cast<void*>(TAlignedPagePool::POOL_PAGE_SIZE);
  219. }
  220. int TFakeAlignedMmap::Munmap(void* addr, size_t size)
  221. {
  222. if (OnMunmap) {
  223. OnMunmap(addr, size);
  224. }
  225. return 0;
  226. }
  227. std::function<void(size_t size)> TFakeUnalignedMmap::OnMmap = {};
  228. std::function<void(void* addr, size_t size)> TFakeUnalignedMmap::OnMunmap = {};
  229. void* TFakeUnalignedMmap::Mmap(size_t size)
  230. {
  231. if (OnMmap) {
  232. OnMmap(size);
  233. }
  234. return reinterpret_cast<void*>(TAlignedPagePool::POOL_PAGE_SIZE+1);
  235. }
  236. int TFakeUnalignedMmap::Munmap(void* addr, size_t size)
  237. {
  238. if (OnMunmap) {
  239. OnMunmap(addr, size);
  240. }
  241. return 0;
  242. }
  243. TAlignedPagePoolCounters::TAlignedPagePoolCounters(::NMonitoring::TDynamicCounterPtr countersRoot, const TString& name) {
  244. if (!countersRoot || name.empty())
  245. return;
  246. ::NMonitoring::TDynamicCounterPtr subGroup = countersRoot->GetSubgroup("counters", "utils")->GetSubgroup("subsystem", "mkqlalloc");
  247. TotalBytesAllocatedCntr = subGroup->GetCounter(name + "/TotalBytesAllocated");
  248. AllocationsCntr = subGroup->GetCounter(name + "/Allocations", true);
  249. PoolsCntr = subGroup->GetCounter(name + "/Pools", true);
  250. LostPagesBytesFreeCntr = subGroup->GetCounter(name + "/LostPagesBytesFreed", true);
  251. }
  252. template<typename T>
  253. TAlignedPagePoolImpl<T>::~TAlignedPagePoolImpl() {
  254. if (CheckLostMem && !UncaughtException()) {
  255. Y_DEBUG_ABORT_UNLESS(TotalAllocated == FreePages.size() * POOL_PAGE_SIZE,
  256. "memory leak; Expected %ld, actual %ld (%ld page(s), %ld offloaded); allocator created at: %s",
  257. TotalAllocated, FreePages.size() * POOL_PAGE_SIZE,
  258. FreePages.size(), OffloadedActiveBytes, GetDebugInfo().data());
  259. Y_DEBUG_ABORT_UNLESS(OffloadedActiveBytes == 0, "offloaded: %ld", OffloadedActiveBytes);
  260. }
  261. size_t activeBlocksSize = 0;
  262. for (auto it = ActiveBlocks.cbegin(); ActiveBlocks.cend() != it; ActiveBlocks.erase(it++)) {
  263. activeBlocksSize += it->second;
  264. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  265. if (Y_UNLIKELY(IsDefaultAllocator)) {
  266. ReturnBlock(it->first, it->second);
  267. return;
  268. }
  269. #endif
  270. Free(it->first, it->second);
  271. }
  272. if (activeBlocksSize > 0 || FreePages.size() != AllPages.size() || OffloadedActiveBytes) {
  273. if (Counters.LostPagesBytesFreeCntr) {
  274. (*Counters.LostPagesBytesFreeCntr) += OffloadedActiveBytes + activeBlocksSize + (AllPages.size() - FreePages.size()) * POOL_PAGE_SIZE;
  275. }
  276. }
  277. Y_DEBUG_ABORT_UNLESS(TotalAllocated == AllPages.size() * POOL_PAGE_SIZE + OffloadedActiveBytes,
  278. "Expected %ld, actual %ld (%ld page(s))", TotalAllocated,
  279. AllPages.size() * POOL_PAGE_SIZE + OffloadedActiveBytes, AllPages.size());
  280. for (auto &ptr : AllPages) {
  281. TGlobalPools<T, false>::Instance().PushPage(0, ptr);
  282. }
  283. if (Counters.TotalBytesAllocatedCntr) {
  284. (*Counters.TotalBytesAllocatedCntr) -= TotalAllocated;
  285. }
  286. if (Counters.PoolsCntr) {
  287. --(*Counters.PoolsCntr);
  288. }
  289. TotalAllocated = 0;
  290. }
  291. template<typename T>
  292. void TAlignedPagePoolImpl<T>::ReleaseFreePages() {
  293. TotalAllocated -= FreePages.size() * POOL_PAGE_SIZE;
  294. if (Counters.TotalBytesAllocatedCntr) {
  295. (*Counters.TotalBytesAllocatedCntr) -= FreePages.size() * POOL_PAGE_SIZE;
  296. }
  297. for (; !FreePages.empty(); FreePages.pop()) {
  298. AllPages.erase(FreePages.top());
  299. TGlobalPools<T, false>::Instance().PushPage(0, FreePages.top());
  300. }
  301. }
  302. template<typename T>
  303. void TAlignedPagePoolImpl<T>::OffloadAlloc(ui64 size) {
  304. if (Limit && TotalAllocated + size > Limit && !TryIncreaseLimit(TotalAllocated + size)) {
  305. throw TMemoryLimitExceededException();
  306. }
  307. if (AllocNotifyCallback) {
  308. if (AllocNotifyCurrentBytes > AllocNotifyBytes) {
  309. AllocNotifyCallback();
  310. AllocNotifyCurrentBytes = 0;
  311. }
  312. }
  313. ++OffloadedAllocCount;
  314. OffloadedBytes += size;
  315. OffloadedActiveBytes += size;
  316. TotalAllocated += size;
  317. if (AllocNotifyCallback) {
  318. AllocNotifyCurrentBytes += size;
  319. }
  320. if (Counters.TotalBytesAllocatedCntr) {
  321. (*Counters.TotalBytesAllocatedCntr) += size;
  322. }
  323. if (Counters.AllocationsCntr) {
  324. ++(*Counters.AllocationsCntr);
  325. }
  326. UpdatePeaks();
  327. }
  328. template<typename T>
  329. void TAlignedPagePoolImpl<T>::OffloadFree(ui64 size) noexcept {
  330. TotalAllocated -= size;
  331. OffloadedActiveBytes -= size;
  332. if (Counters.TotalBytesAllocatedCntr) {
  333. (*Counters.TotalBytesAllocatedCntr) -= size;
  334. }
  335. }
  336. template<typename T>
  337. void* TAlignedPagePoolImpl<T>::GetPage() {
  338. ++PageAllocCount;
  339. if (!FreePages.empty()) {
  340. ++PageHitCount;
  341. const auto res = FreePages.top();
  342. FreePages.pop();
  343. return res;
  344. }
  345. if (Limit && TotalAllocated + POOL_PAGE_SIZE > Limit && !TryIncreaseLimit(TotalAllocated + POOL_PAGE_SIZE)) {
  346. throw TMemoryLimitExceededException();
  347. }
  348. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  349. if (Y_LIKELY(!IsDefaultAllocator)) {
  350. #endif
  351. if (const auto ptr = TGlobalPools<T, false>::Instance().Get(0).GetPage()) {
  352. TotalAllocated += POOL_PAGE_SIZE;
  353. if (AllocNotifyCallback) {
  354. AllocNotifyCurrentBytes += POOL_PAGE_SIZE;
  355. }
  356. if (Counters.TotalBytesAllocatedCntr) {
  357. (*Counters.TotalBytesAllocatedCntr) += POOL_PAGE_SIZE;
  358. }
  359. ++PageGlobalHitCount;
  360. AllPages.emplace(ptr);
  361. UpdatePeaks();
  362. return ptr;
  363. }
  364. ++PageMissCount;
  365. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  366. }
  367. #endif
  368. void* res;
  369. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  370. if (Y_UNLIKELY(IsDefaultAllocator)) {
  371. res = GetBlock(POOL_PAGE_SIZE);
  372. } else {
  373. #endif
  374. res = Alloc(POOL_PAGE_SIZE);
  375. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  376. }
  377. #endif
  378. AllPages.emplace(res);
  379. return res;
  380. }
  381. template<typename T>
  382. void TAlignedPagePoolImpl<T>::ReturnPage(void* addr) noexcept {
  383. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  384. if (Y_UNLIKELY(IsDefaultAllocator)) {
  385. ReturnBlock(addr, POOL_PAGE_SIZE);
  386. return;
  387. }
  388. #endif
  389. Y_DEBUG_ABORT_UNLESS(AllPages.find(addr) != AllPages.end());
  390. FreePages.emplace(addr);
  391. }
  392. template<typename T>
  393. void* TAlignedPagePoolImpl<T>::GetBlock(size_t size) {
  394. Y_DEBUG_ABORT_UNLESS(size >= POOL_PAGE_SIZE);
  395. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  396. if (Y_UNLIKELY(IsDefaultAllocator)) {
  397. OffloadAlloc(size);
  398. auto ret = malloc(size);
  399. if (!ret) {
  400. throw TMemoryLimitExceededException();
  401. }
  402. return ret;
  403. }
  404. #endif
  405. if (size == POOL_PAGE_SIZE) {
  406. return GetPage();
  407. } else {
  408. const auto ptr = Alloc(size);
  409. Y_DEBUG_ABORT_UNLESS(ActiveBlocks.emplace(ptr, size).second);
  410. return ptr;
  411. }
  412. }
  413. template<typename T>
  414. void TAlignedPagePoolImpl<T>::ReturnBlock(void* ptr, size_t size) noexcept {
  415. Y_DEBUG_ABORT_UNLESS(size >= POOL_PAGE_SIZE);
  416. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  417. if (Y_UNLIKELY(IsDefaultAllocator)) {
  418. OffloadFree(size);
  419. free(ptr);
  420. UpdateMemoryYellowZone();
  421. return;
  422. }
  423. #endif
  424. if (size == POOL_PAGE_SIZE) {
  425. ReturnPage(ptr);
  426. } else {
  427. Free(ptr, size);
  428. Y_DEBUG_ABORT_UNLESS(ActiveBlocks.erase(ptr));
  429. }
  430. UpdateMemoryYellowZone();
  431. }
  432. template<typename T>
  433. void* TAlignedPagePoolImpl<T>::Alloc(size_t size) {
  434. void* res = nullptr;
  435. size = AlignUp(size, SYS_PAGE_SIZE);
  436. if (Limit && TotalAllocated + size > Limit && !TryIncreaseLimit(TotalAllocated + size)) {
  437. throw TMemoryLimitExceededException();
  438. }
  439. if (AllocNotifyCallback) {
  440. if (AllocNotifyCurrentBytes > AllocNotifyBytes) {
  441. AllocNotifyCallback();
  442. AllocNotifyCurrentBytes = 0;
  443. }
  444. }
  445. auto& globalPool = TGlobalPools<T, false>::Instance();
  446. if (size > POOL_PAGE_SIZE && size <= MaxMidSize) {
  447. size = FastClp2(size);
  448. auto level = LeastSignificantBit(size) - LeastSignificantBit(POOL_PAGE_SIZE);
  449. Y_DEBUG_ABORT_UNLESS(level >= 1 && level <= MidLevels);
  450. if (res = globalPool.Get(level).GetPage()) {
  451. TotalAllocated += size;
  452. if (AllocNotifyCallback) {
  453. AllocNotifyCurrentBytes += size;
  454. }
  455. if (Counters.TotalBytesAllocatedCntr) {
  456. (*Counters.TotalBytesAllocatedCntr) += size;
  457. }
  458. ++PageGlobalHitCount;
  459. } else {
  460. ++PageMissCount;
  461. }
  462. }
  463. if (!res) {
  464. auto allocSize = size + ALLOC_AHEAD_PAGES * POOL_PAGE_SIZE;
  465. void* mem = globalPool.DoMmap(allocSize);
  466. if (Y_UNLIKELY(MAP_FAILED == mem)) {
  467. TStringStream mmaps;
  468. const auto lastError = LastSystemError();
  469. if (lastError == ENOMEM) {
  470. mmaps << GetMemoryMapsString();
  471. }
  472. ythrow yexception() << "Mmap failed to allocate " << (size + POOL_PAGE_SIZE) << " bytes: "
  473. << LastSystemErrorText(lastError) << mmaps.Str();
  474. }
  475. res = AlignUp(mem, POOL_PAGE_SIZE);
  476. const size_t off = reinterpret_cast<intptr_t>(res) - reinterpret_cast<intptr_t>(mem);
  477. if (Y_UNLIKELY(off)) {
  478. // unmap prefix
  479. globalPool.DoMunmap(mem, off);
  480. }
  481. // Extra space is also page-aligned. Put it to the free page list
  482. auto alignedSize = AlignUp(size, POOL_PAGE_SIZE);
  483. ui64 extraPages = (allocSize - off - alignedSize) / POOL_PAGE_SIZE;
  484. ui64 tail = (allocSize - off - alignedSize) % POOL_PAGE_SIZE;
  485. auto extraPage = reinterpret_cast<ui8*>(res) + alignedSize;
  486. for (ui64 i = 0; i < extraPages; ++i) {
  487. AllPages.emplace(extraPage);
  488. FreePages.emplace(extraPage);
  489. extraPage += POOL_PAGE_SIZE;
  490. }
  491. if (size != alignedSize) {
  492. // unmap unaligned hole
  493. globalPool.DoMunmap(reinterpret_cast<ui8*>(res) + size, alignedSize - size);
  494. }
  495. if (tail) {
  496. // unmap suffix
  497. Y_DEBUG_ABORT_UNLESS(extraPage+tail <= reinterpret_cast<ui8*>(mem) + size + ALLOC_AHEAD_PAGES * POOL_PAGE_SIZE);
  498. globalPool.DoMunmap(extraPage, tail);
  499. }
  500. auto extraSize = extraPages * POOL_PAGE_SIZE;
  501. auto totalSize = size + extraSize;
  502. TotalAllocated += totalSize;
  503. if (AllocNotifyCallback) {
  504. AllocNotifyCurrentBytes += totalSize;
  505. }
  506. if (Counters.TotalBytesAllocatedCntr) {
  507. (*Counters.TotalBytesAllocatedCntr) += totalSize;
  508. }
  509. }
  510. if (Counters.AllocationsCntr) {
  511. ++(*Counters.AllocationsCntr);
  512. }
  513. ++AllocCount;
  514. UpdatePeaks();
  515. return res;
  516. }
  517. template<typename T>
  518. void TAlignedPagePoolImpl<T>::Free(void* ptr, size_t size) noexcept {
  519. size = AlignUp(size, SYS_PAGE_SIZE);
  520. if (size <= MaxMidSize)
  521. size = FastClp2(size);
  522. if (size <= MaxMidSize) {
  523. auto level = LeastSignificantBit(size) - LeastSignificantBit(POOL_PAGE_SIZE);
  524. Y_DEBUG_ABORT_UNLESS(level >= 1 && level <= MidLevels);
  525. TGlobalPools<T, false>::Instance().PushPage(level, ptr);
  526. } else {
  527. TGlobalPools<T, false>::Instance().DoMunmap(ptr, size);
  528. }
  529. Y_DEBUG_ABORT_UNLESS(TotalAllocated >= size);
  530. TotalAllocated -= size;
  531. if (Counters.TotalBytesAllocatedCntr) {
  532. (*Counters.TotalBytesAllocatedCntr) -= size;
  533. }
  534. }
  535. template<typename T>
  536. void TAlignedPagePoolImpl<T>::DoCleanupGlobalFreeList(ui64 targetSize) {
  537. TGlobalPools<T, true>::Instance().DoCleanupFreeList(targetSize);
  538. TGlobalPools<T, false>::Instance().DoCleanupFreeList(targetSize);
  539. }
  540. template<typename T>
  541. void TAlignedPagePoolImpl<T>::UpdateMemoryYellowZone() {
  542. if (Limit == 0) return;
  543. if (IsMemoryYellowZoneForcefullyChanged) return;
  544. if (IncreaseMemoryLimitCallback && !IsMaximumLimitValueReached) return;
  545. ui8 usedMemoryPercent = 100 * GetUsed() / Limit;
  546. if (usedMemoryPercent >= EnableMemoryYellowZoneThreshold) {
  547. IsMemoryYellowZoneReached = true;
  548. } else if (usedMemoryPercent <= DisableMemoryYellowZoneThreshold) {
  549. IsMemoryYellowZoneReached = false;
  550. }
  551. }
  552. template<typename T>
  553. bool TAlignedPagePoolImpl<T>::TryIncreaseLimit(ui64 required) {
  554. if (!IncreaseMemoryLimitCallback) {
  555. return false;
  556. }
  557. IncreaseMemoryLimitCallback(Limit, required);
  558. return Limit >= required;
  559. }
  560. template<typename T>
  561. ui64 TAlignedPagePoolImpl<T>::GetGlobalPagePoolSize() {
  562. ui64 size = 0;
  563. for (size_t level = 0; level <= MidLevels; ++level) {
  564. size += TGlobalPools<T, false>::Instance().Get(level).GetSize();
  565. }
  566. return size;
  567. }
  568. template<typename T>
  569. void TAlignedPagePoolImpl<T>::PrintStat(size_t usedPages, IOutputStream& out) const {
  570. usedPages += GetFreePageCount();
  571. out << "Count of free pages: " << GetFreePageCount() << Endl;
  572. out << "Allocated for blocks: " << (GetAllocated() - usedPages * POOL_PAGE_SIZE) << Endl;
  573. out << "Total allocated by lists: " << GetAllocated() << Endl;
  574. }
  575. template<typename T>
  576. void TAlignedPagePoolImpl<T>::ResetGlobalsUT()
  577. {
  578. TGlobalPools<T, false>::Instance().Reset();
  579. }
  580. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  581. // static
  582. template<typename T>
  583. bool TAlignedPagePoolImpl<T>::IsDefaultAllocatorUsed() {
  584. return IsDefaultAllocator;
  585. }
  586. #endif
  587. template class TAlignedPagePoolImpl<>;
  588. template class TAlignedPagePoolImpl<TFakeAlignedMmap>;
  589. template class TAlignedPagePoolImpl<TFakeUnalignedMmap>;
  590. template<typename TMmap>
  591. void* GetAlignedPage(ui64 size) {
  592. size = AlignUp(size, SYS_PAGE_SIZE);
  593. if (size < TAlignedPagePool::POOL_PAGE_SIZE) {
  594. size = TAlignedPagePool::POOL_PAGE_SIZE;
  595. }
  596. auto& pool = TGlobalPools<TMmap, true>::Instance();
  597. if (size <= MaxMidSize) {
  598. size = FastClp2(size);
  599. auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE);
  600. Y_DEBUG_ABORT_UNLESS(level <= MidLevels);
  601. if (auto res = pool.Get(level).GetPage()) {
  602. return res;
  603. }
  604. }
  605. auto allocSize = Max<ui64>(MaxMidSize, size);
  606. void* mem = pool.DoMmap(allocSize);
  607. if (Y_UNLIKELY(MAP_FAILED == mem)) {
  608. TStringStream mmaps;
  609. const auto lastError = LastSystemError();
  610. if (lastError == ENOMEM) {
  611. mmaps << GetMemoryMapsString();
  612. }
  613. ythrow yexception() << "Mmap failed to allocate " << allocSize << " bytes: " << LastSystemErrorText(lastError) << mmaps.Str();
  614. }
  615. if (size < MaxMidSize) {
  616. // push extra allocated pages to cache
  617. auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE);
  618. Y_DEBUG_ABORT_UNLESS(level <= MidLevels);
  619. ui8* ptr = (ui8*)mem + size;
  620. ui8* const end = (ui8*)mem + MaxMidSize;
  621. while (ptr < end) {
  622. pool.PushPage(level, ptr);
  623. ptr += size;
  624. }
  625. }
  626. return mem;
  627. }
  628. template<typename TMmap>
  629. void ReleaseAlignedPage(void* mem, ui64 size) {
  630. size = AlignUp(size, SYS_PAGE_SIZE);
  631. if (size < TAlignedPagePool::POOL_PAGE_SIZE) {
  632. size = TAlignedPagePool::POOL_PAGE_SIZE;
  633. }
  634. if (size <= MaxMidSize) {
  635. size = FastClp2(size);
  636. auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE);
  637. Y_DEBUG_ABORT_UNLESS(level <= MidLevels);
  638. TGlobalPools<TMmap, true>::Instance().PushPage(level, mem);
  639. return;
  640. }
  641. TGlobalPools<TMmap, true>::Instance().DoMunmap(mem, size);
  642. }
  643. template<typename TMmap>
  644. i64 GetTotalMmapedBytes() {
  645. return TGlobalPools<TMmap, true>::Instance().GetTotalMmappedBytes() + TGlobalPools<TMmap, false>::Instance().GetTotalMmappedBytes();
  646. }
  647. template<typename TMmap>
  648. i64 GetTotalFreeListBytes() {
  649. return TGlobalPools<TMmap, true>::Instance().GetTotalFreeListBytes() + TGlobalPools<TMmap, false>::Instance().GetTotalFreeListBytes();
  650. }
  651. template i64 GetTotalMmapedBytes<>();
  652. template i64 GetTotalMmapedBytes<TFakeAlignedMmap>();
  653. template i64 GetTotalMmapedBytes<TFakeUnalignedMmap>();
  654. template i64 GetTotalFreeListBytes<>();
  655. template i64 GetTotalFreeListBytes<TFakeAlignedMmap>();
  656. template i64 GetTotalFreeListBytes<TFakeUnalignedMmap>();
  657. template void* GetAlignedPage<>(ui64);
  658. template void* GetAlignedPage<TFakeAlignedMmap>(ui64);
  659. template void* GetAlignedPage<TFakeUnalignedMmap>(ui64);
  660. template void ReleaseAlignedPage<>(void*,ui64);
  661. template void ReleaseAlignedPage<TFakeAlignedMmap>(void*,ui64);
  662. template void ReleaseAlignedPage<TFakeUnalignedMmap>(void*,ui64);
  663. size_t GetMemoryMapsCount() {
  664. size_t lineCount = 0;
  665. TString line;
  666. #if defined(_unix_)
  667. TFileInput file("/proc/self/maps");
  668. while (file.ReadLine(line)) ++lineCount;
  669. #endif
  670. return lineCount;
  671. }
  672. } // NKikimr