aligned_page_pool.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800
  1. #include "aligned_page_pool.h"
  2. #include <util/generic/yexception.h>
  3. #include <util/stream/file.h>
  4. #include <util/string/cast.h>
  5. #include <util/string/strip.h>
  6. #include <util/system/align.h>
  7. #include <util/system/compiler.h>
  8. #include <util/system/error.h>
  9. #include <util/system/info.h>
  10. #include <util/thread/lfstack.h>
  11. #if defined(_win_)
  12. # include <util/system/winint.h>
  13. #elif defined(_unix_)
  14. # include <sys/types.h>
  15. # include <sys/mman.h>
  16. #endif
  17. namespace NKikimr {
  18. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  19. # if defined(PROFILE_MEMORY_ALLOCATIONS)
  20. static bool IsDefaultAllocator = true;
  21. # else
  22. static bool IsDefaultAllocator = false;
  23. # endif
  24. void UseDefaultAllocator() {
  25. // TODO: check that we didn't already used the MKQL allocator
  26. IsDefaultAllocator = true;
  27. }
  28. #endif
  29. static ui64 SYS_PAGE_SIZE = NSystemInfo::GetPageSize();
  30. constexpr ui32 MidLevels = 10;
  31. constexpr ui32 MaxMidSize = (1u << MidLevels) * TAlignedPagePool::POOL_PAGE_SIZE;
  32. static_assert(MaxMidSize == 64 * 1024 * 1024, "Upper memory block 64 Mb");
  33. namespace {
  34. ui64 GetMaxMemoryMaps() {
  35. ui64 maxMapCount = 0;
  36. #if defined(_unix_)
  37. maxMapCount = FromString<ui64>(Strip(TFileInput("/proc/sys/vm/max_map_count").ReadAll()));
  38. #endif
  39. return maxMapCount;
  40. }
  41. TString GetMemoryMapsString() {
  42. TStringStream ss;
  43. ss << " (maps: " << GetMemoryMapsCount() << " vs " << GetMaxMemoryMaps() << ")";
  44. return ss.Str();
  45. }
  46. template<typename T, bool SysAlign>
  47. class TGlobalPools;
  48. template<typename T, bool SysAlign>
  49. class TGlobalPagePool {
  50. friend class TGlobalPools<T, SysAlign>;
  51. public:
  52. TGlobalPagePool(size_t pageSize)
  53. : PageSize(pageSize)
  54. {}
  55. ~TGlobalPagePool() {
  56. void* addr = nullptr;
  57. while (Pages.Dequeue(&addr)) {
  58. FreePage(addr);
  59. }
  60. }
  61. void* GetPage() {
  62. void *page = nullptr;
  63. if (Pages.Dequeue(&page)) {
  64. --Count;
  65. return page;
  66. }
  67. return nullptr;
  68. }
  69. ui64 GetPageCount() const {
  70. return Count.load(std::memory_order_relaxed);
  71. }
  72. size_t GetPageSize() const {
  73. return PageSize;
  74. }
  75. size_t GetSize() const {
  76. return GetPageCount() * GetPageSize();
  77. }
  78. private:
  79. size_t PushPage(void* addr) {
  80. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  81. if (Y_UNLIKELY(IsDefaultAllocator)) {
  82. FreePage(addr);
  83. return GetPageSize();
  84. }
  85. #endif
  86. ++Count;
  87. Pages.Enqueue(addr);
  88. return 0;
  89. }
  90. void FreePage(void* addr) {
  91. auto res = T::Munmap(addr, PageSize);
  92. Y_DEBUG_ABORT_UNLESS(0 == res, "Munmap failed: %s", LastSystemErrorText());
  93. }
  94. private:
  95. const size_t PageSize;
  96. std::atomic<ui64> Count = 0;
  97. TLockFreeStack<void*> Pages;
  98. };
  99. template<typename T, bool SysAlign>
  100. class TGlobalPools {
  101. public:
  102. static TGlobalPools<T, SysAlign>& Instance() {
  103. return *Singleton<TGlobalPools<T, SysAlign>>();
  104. }
  105. TGlobalPagePool<T, SysAlign>& Get(ui32 index) {
  106. return *Pools[index];
  107. }
  108. const TGlobalPagePool<T, SysAlign>& Get(ui32 index) const {
  109. return *Pools[index];
  110. }
  111. TGlobalPools()
  112. {
  113. Reset();
  114. }
  115. void* DoMmap(size_t size) {
  116. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  117. // No memory maps allowed while using default allocator
  118. Y_DEBUG_ABORT_UNLESS(!IsDefaultAllocator);
  119. #endif
  120. void* res = T::Mmap(size);
  121. TotalMmappedBytes += size;
  122. return res;
  123. }
  124. void DoCleanupFreeList(ui64 targetSize) {
  125. for(ui32 level = 0; level <= MidLevels; ++level) {
  126. auto& p = Get(level);
  127. const size_t pageSize = p.GetPageSize();
  128. while(p.GetSize() >= targetSize) {
  129. void* page = p.GetPage();
  130. if (!page)
  131. break;
  132. p.FreePage(page);
  133. i64 prev = TotalMmappedBytes.fetch_sub(pageSize);
  134. Y_DEBUG_ABORT_UNLESS(prev >= 0);
  135. }
  136. }
  137. }
  138. void PushPage(size_t level, void* addr) {
  139. auto& pool = Get(level);
  140. size_t free = pool.PushPage(addr);
  141. if (Y_UNLIKELY(free > 0)) {
  142. i64 prev = TotalMmappedBytes.fetch_sub(free);
  143. Y_DEBUG_ABORT_UNLESS(prev >= 0);
  144. }
  145. }
  146. void DoMunmap(void* addr, size_t size) {
  147. if (Y_UNLIKELY(0 != T::Munmap(addr, size))) {
  148. TStringStream mmaps;
  149. const auto lastError = LastSystemError();
  150. if (lastError == ENOMEM) {
  151. mmaps << GetMemoryMapsString();
  152. }
  153. ythrow yexception() << "Munmap(0x"
  154. << IntToString<16>(reinterpret_cast<uintptr_t>(addr))
  155. << ", " << size << ") failed: " << LastSystemErrorText(lastError) << mmaps.Str();
  156. }
  157. i64 prev = TotalMmappedBytes.fetch_sub(size);
  158. Y_DEBUG_ABORT_UNLESS(prev >= 0);
  159. }
  160. i64 GetTotalMmappedBytes() const {
  161. return TotalMmappedBytes.load();
  162. }
  163. i64 GetTotalFreeListBytes() const {
  164. i64 bytes = 0;
  165. for (ui32 i = 0; i <= MidLevels; ++i) {
  166. bytes += Get(i).GetSize();
  167. }
  168. return bytes;
  169. }
  170. void Reset()
  171. {
  172. Pools.clear();
  173. Pools.reserve(MidLevels + 1);
  174. for (ui32 i = 0; i <= MidLevels; ++i) {
  175. Pools.emplace_back(MakeHolder<TGlobalPagePool<T, SysAlign>>(TAlignedPagePool::POOL_PAGE_SIZE << i));
  176. }
  177. }
  178. private:
  179. TVector<THolder<TGlobalPagePool<T, SysAlign>>> Pools;
  180. std::atomic<i64> TotalMmappedBytes{0};
  181. };
  182. } // unnamed
  183. #ifdef _win_
  184. #define MAP_FAILED (void*)(-1)
  185. inline void* TSystemMmap::Mmap(size_t size)
  186. {
  187. if (auto res = ::VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE)) {
  188. return res;
  189. } else {
  190. return MAP_FAILED;
  191. }
  192. }
  193. inline int TSystemMmap::Munmap(void* addr, size_t size)
  194. {
  195. Y_ABORT_UNLESS(AlignUp(addr, SYS_PAGE_SIZE) == addr, "Got unaligned address");
  196. Y_ABORT_UNLESS(AlignUp(size, SYS_PAGE_SIZE) == size, "Got unaligned size");
  197. return !::VirtualFree(addr, size, MEM_DECOMMIT);
  198. }
  199. #else
  200. inline void* TSystemMmap::Mmap(size_t size)
  201. {
  202. return ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0);
  203. }
  204. inline int TSystemMmap::Munmap(void* addr, size_t size)
  205. {
  206. Y_DEBUG_ABORT_UNLESS(AlignUp(addr, SYS_PAGE_SIZE) == addr, "Got unaligned address");
  207. Y_DEBUG_ABORT_UNLESS(AlignUp(size, SYS_PAGE_SIZE) == size, "Got unaligned size");
  208. return ::munmap(addr, size);
  209. }
  210. #endif
  211. std::function<void(size_t size)> TFakeAlignedMmap::OnMmap = {};
  212. std::function<void(void* addr, size_t size)> TFakeAlignedMmap::OnMunmap = {};
  213. void* TFakeAlignedMmap::Mmap(size_t size)
  214. {
  215. if (OnMmap) {
  216. OnMmap(size);
  217. }
  218. return reinterpret_cast<void*>(TAlignedPagePool::POOL_PAGE_SIZE);
  219. }
  220. int TFakeAlignedMmap::Munmap(void* addr, size_t size)
  221. {
  222. if (OnMunmap) {
  223. OnMunmap(addr, size);
  224. }
  225. return 0;
  226. }
  227. std::function<void(size_t size)> TFakeUnalignedMmap::OnMmap = {};
  228. std::function<void(void* addr, size_t size)> TFakeUnalignedMmap::OnMunmap = {};
  229. void* TFakeUnalignedMmap::Mmap(size_t size)
  230. {
  231. if (OnMmap) {
  232. OnMmap(size);
  233. }
  234. return reinterpret_cast<void*>(TAlignedPagePool::POOL_PAGE_SIZE+1);
  235. }
  236. int TFakeUnalignedMmap::Munmap(void* addr, size_t size)
  237. {
  238. if (OnMunmap) {
  239. OnMunmap(addr, size);
  240. }
  241. return 0;
  242. }
  243. TAlignedPagePoolCounters::TAlignedPagePoolCounters(::NMonitoring::TDynamicCounterPtr countersRoot, const TString& name) {
  244. if (!countersRoot || name.empty())
  245. return;
  246. ::NMonitoring::TDynamicCounterPtr subGroup = countersRoot->GetSubgroup("counters", "utils")->GetSubgroup("subsystem", "mkqlalloc");
  247. TotalBytesAllocatedCntr = subGroup->GetCounter(name + "/TotalBytesAllocated");
  248. AllocationsCntr = subGroup->GetCounter(name + "/Allocations", true);
  249. PoolsCntr = subGroup->GetCounter(name + "/Pools", true);
  250. LostPagesBytesFreeCntr = subGroup->GetCounter(name + "/LostPagesBytesFreed", true);
  251. }
  252. template<typename T>
  253. TAlignedPagePoolImpl<T>::~TAlignedPagePoolImpl() {
  254. if (CheckLostMem && !UncaughtException()) {
  255. Y_DEBUG_ABORT_UNLESS(TotalAllocated == FreePages.size() * POOL_PAGE_SIZE,
  256. "memory leak; Expected %ld, actual %ld (%ld page(s), %ld offloaded); allocator created at: %s",
  257. TotalAllocated, FreePages.size() * POOL_PAGE_SIZE,
  258. FreePages.size(), OffloadedActiveBytes, GetDebugInfo().data());
  259. Y_DEBUG_ABORT_UNLESS(OffloadedActiveBytes == 0, "offloaded: %ld", OffloadedActiveBytes);
  260. }
  261. size_t activeBlocksSize = 0;
  262. for (auto it = ActiveBlocks.cbegin(); ActiveBlocks.cend() != it; ActiveBlocks.erase(it++)) {
  263. activeBlocksSize += it->second;
  264. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  265. if (Y_UNLIKELY(IsDefaultAllocator)) {
  266. ReturnBlock(it->first, it->second);
  267. return;
  268. }
  269. #endif
  270. Free(it->first, it->second);
  271. }
  272. if (activeBlocksSize > 0 || FreePages.size() != AllPages.size() || OffloadedActiveBytes) {
  273. if (Counters.LostPagesBytesFreeCntr) {
  274. (*Counters.LostPagesBytesFreeCntr) += OffloadedActiveBytes + activeBlocksSize + (AllPages.size() - FreePages.size()) * POOL_PAGE_SIZE;
  275. }
  276. }
  277. Y_DEBUG_ABORT_UNLESS(TotalAllocated == AllPages.size() * POOL_PAGE_SIZE + OffloadedActiveBytes,
  278. "Expected %ld, actual %ld (%ld page(s))", TotalAllocated,
  279. AllPages.size() * POOL_PAGE_SIZE + OffloadedActiveBytes, AllPages.size());
  280. for (auto &ptr : AllPages) {
  281. TGlobalPools<T, false>::Instance().PushPage(0, ptr);
  282. }
  283. if (Counters.TotalBytesAllocatedCntr) {
  284. (*Counters.TotalBytesAllocatedCntr) -= TotalAllocated;
  285. }
  286. if (Counters.PoolsCntr) {
  287. --(*Counters.PoolsCntr);
  288. }
  289. TotalAllocated = 0;
  290. }
  291. template<typename T>
  292. void TAlignedPagePoolImpl<T>::ReleaseFreePages() {
  293. TotalAllocated -= FreePages.size() * POOL_PAGE_SIZE;
  294. if (Counters.TotalBytesAllocatedCntr) {
  295. (*Counters.TotalBytesAllocatedCntr) -= FreePages.size() * POOL_PAGE_SIZE;
  296. }
  297. for (; !FreePages.empty(); FreePages.pop()) {
  298. AllPages.erase(FreePages.top());
  299. TGlobalPools<T, false>::Instance().PushPage(0, FreePages.top());
  300. }
  301. }
  302. template<typename T>
  303. void TAlignedPagePoolImpl<T>::OffloadAlloc(ui64 size) {
  304. if (Limit && TotalAllocated + size > Limit && !TryIncreaseLimit(TotalAllocated + size)) {
  305. throw TMemoryLimitExceededException();
  306. }
  307. if (AllocNotifyCallback) {
  308. if (AllocNotifyCurrentBytes > AllocNotifyBytes) {
  309. AllocNotifyCallback();
  310. AllocNotifyCurrentBytes = 0;
  311. }
  312. }
  313. ++OffloadedAllocCount;
  314. OffloadedBytes += size;
  315. OffloadedActiveBytes += size;
  316. TotalAllocated += size;
  317. if (AllocNotifyCallback) {
  318. AllocNotifyCurrentBytes += size;
  319. }
  320. if (Counters.TotalBytesAllocatedCntr) {
  321. (*Counters.TotalBytesAllocatedCntr) += size;
  322. }
  323. if (Counters.AllocationsCntr) {
  324. ++(*Counters.AllocationsCntr);
  325. }
  326. UpdatePeaks();
  327. }
  328. template<typename T>
  329. void TAlignedPagePoolImpl<T>::OffloadFree(ui64 size) noexcept {
  330. TotalAllocated -= size;
  331. OffloadedActiveBytes -= size;
  332. if (Counters.TotalBytesAllocatedCntr) {
  333. (*Counters.TotalBytesAllocatedCntr) -= size;
  334. }
  335. }
  336. template<typename T>
  337. void* TAlignedPagePoolImpl<T>::GetPage() {
  338. ++PageAllocCount;
  339. if (!FreePages.empty()) {
  340. ++PageHitCount;
  341. const auto res = FreePages.top();
  342. FreePages.pop();
  343. return res;
  344. }
  345. if (Limit && TotalAllocated + POOL_PAGE_SIZE > Limit && !TryIncreaseLimit(TotalAllocated + POOL_PAGE_SIZE)) {
  346. throw TMemoryLimitExceededException();
  347. }
  348. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  349. if (Y_LIKELY(!IsDefaultAllocator)) {
  350. #endif
  351. if (const auto ptr = TGlobalPools<T, false>::Instance().Get(0).GetPage()) {
  352. TotalAllocated += POOL_PAGE_SIZE;
  353. if (AllocNotifyCallback) {
  354. AllocNotifyCurrentBytes += POOL_PAGE_SIZE;
  355. }
  356. if (Counters.TotalBytesAllocatedCntr) {
  357. (*Counters.TotalBytesAllocatedCntr) += POOL_PAGE_SIZE;
  358. }
  359. ++PageGlobalHitCount;
  360. AllPages.emplace(ptr);
  361. UpdatePeaks();
  362. return ptr;
  363. }
  364. ++PageMissCount;
  365. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  366. }
  367. #endif
  368. void* res;
  369. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  370. if (Y_UNLIKELY(IsDefaultAllocator)) {
  371. res = GetBlock(POOL_PAGE_SIZE);
  372. } else {
  373. #endif
  374. res = Alloc(POOL_PAGE_SIZE);
  375. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  376. }
  377. #endif
  378. AllPages.emplace(res);
  379. return res;
  380. }
  381. template<typename T>
  382. void TAlignedPagePoolImpl<T>::ReturnPage(void* addr) noexcept {
  383. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  384. if (Y_UNLIKELY(IsDefaultAllocator)) {
  385. ReturnBlock(addr, POOL_PAGE_SIZE);
  386. AllPages.erase(addr);
  387. return;
  388. }
  389. #endif
  390. Y_DEBUG_ABORT_UNLESS(AllPages.find(addr) != AllPages.end());
  391. FreePages.emplace(addr);
  392. }
  393. template<typename T>
  394. void* TAlignedPagePoolImpl<T>::GetBlock(size_t size) {
  395. Y_DEBUG_ABORT_UNLESS(size >= POOL_PAGE_SIZE);
  396. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  397. if (Y_UNLIKELY(IsDefaultAllocator)) {
  398. OffloadAlloc(size);
  399. auto ret = malloc(size);
  400. if (!ret) {
  401. throw TMemoryLimitExceededException();
  402. }
  403. return ret;
  404. }
  405. #endif
  406. if (size == POOL_PAGE_SIZE) {
  407. return GetPage();
  408. } else {
  409. const auto ptr = Alloc(size);
  410. Y_DEBUG_ABORT_UNLESS(ActiveBlocks.emplace(ptr, size).second);
  411. return ptr;
  412. }
  413. }
  414. template<typename T>
  415. void TAlignedPagePoolImpl<T>::ReturnBlock(void* ptr, size_t size) noexcept {
  416. Y_DEBUG_ABORT_UNLESS(size >= POOL_PAGE_SIZE);
  417. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  418. if (Y_UNLIKELY(IsDefaultAllocator)) {
  419. OffloadFree(size);
  420. free(ptr);
  421. UpdateMemoryYellowZone();
  422. return;
  423. }
  424. #endif
  425. if (size == POOL_PAGE_SIZE) {
  426. ReturnPage(ptr);
  427. } else {
  428. Free(ptr, size);
  429. Y_DEBUG_ABORT_UNLESS(ActiveBlocks.erase(ptr));
  430. }
  431. UpdateMemoryYellowZone();
  432. }
  433. template<typename T>
  434. void* TAlignedPagePoolImpl<T>::Alloc(size_t size) {
  435. void* res = nullptr;
  436. size = AlignUp(size, SYS_PAGE_SIZE);
  437. if (Limit && TotalAllocated + size > Limit && !TryIncreaseLimit(TotalAllocated + size)) {
  438. throw TMemoryLimitExceededException();
  439. }
  440. if (AllocNotifyCallback) {
  441. if (AllocNotifyCurrentBytes > AllocNotifyBytes) {
  442. AllocNotifyCallback();
  443. AllocNotifyCurrentBytes = 0;
  444. }
  445. }
  446. auto& globalPool = TGlobalPools<T, false>::Instance();
  447. if (size > POOL_PAGE_SIZE && size <= MaxMidSize) {
  448. size = FastClp2(size);
  449. auto level = LeastSignificantBit(size) - LeastSignificantBit(POOL_PAGE_SIZE);
  450. Y_DEBUG_ABORT_UNLESS(level >= 1 && level <= MidLevels);
  451. if (res = globalPool.Get(level).GetPage()) {
  452. TotalAllocated += size;
  453. if (AllocNotifyCallback) {
  454. AllocNotifyCurrentBytes += size;
  455. }
  456. if (Counters.TotalBytesAllocatedCntr) {
  457. (*Counters.TotalBytesAllocatedCntr) += size;
  458. }
  459. ++PageGlobalHitCount;
  460. } else {
  461. ++PageMissCount;
  462. }
  463. }
  464. if (!res) {
  465. auto allocSize = size + ALLOC_AHEAD_PAGES * POOL_PAGE_SIZE;
  466. void* mem = globalPool.DoMmap(allocSize);
  467. if (Y_UNLIKELY(MAP_FAILED == mem)) {
  468. TStringStream mmaps;
  469. const auto lastError = LastSystemError();
  470. if (lastError == ENOMEM) {
  471. mmaps << GetMemoryMapsString();
  472. }
  473. ythrow yexception() << "Mmap failed to allocate " << (size + POOL_PAGE_SIZE) << " bytes: "
  474. << LastSystemErrorText(lastError) << mmaps.Str();
  475. }
  476. res = AlignUp(mem, POOL_PAGE_SIZE);
  477. const size_t off = reinterpret_cast<intptr_t>(res) - reinterpret_cast<intptr_t>(mem);
  478. if (Y_UNLIKELY(off)) {
  479. // unmap prefix
  480. globalPool.DoMunmap(mem, off);
  481. }
  482. // Extra space is also page-aligned. Put it to the free page list
  483. auto alignedSize = AlignUp(size, POOL_PAGE_SIZE);
  484. ui64 extraPages = (allocSize - off - alignedSize) / POOL_PAGE_SIZE;
  485. ui64 tail = (allocSize - off - alignedSize) % POOL_PAGE_SIZE;
  486. auto extraPage = reinterpret_cast<ui8*>(res) + alignedSize;
  487. for (ui64 i = 0; i < extraPages; ++i) {
  488. AllPages.emplace(extraPage);
  489. FreePages.emplace(extraPage);
  490. extraPage += POOL_PAGE_SIZE;
  491. }
  492. if (size != alignedSize) {
  493. // unmap unaligned hole
  494. globalPool.DoMunmap(reinterpret_cast<ui8*>(res) + size, alignedSize - size);
  495. }
  496. if (tail) {
  497. // unmap suffix
  498. Y_DEBUG_ABORT_UNLESS(extraPage+tail <= reinterpret_cast<ui8*>(mem) + size + ALLOC_AHEAD_PAGES * POOL_PAGE_SIZE);
  499. globalPool.DoMunmap(extraPage, tail);
  500. }
  501. auto extraSize = extraPages * POOL_PAGE_SIZE;
  502. auto totalSize = size + extraSize;
  503. TotalAllocated += totalSize;
  504. if (AllocNotifyCallback) {
  505. AllocNotifyCurrentBytes += totalSize;
  506. }
  507. if (Counters.TotalBytesAllocatedCntr) {
  508. (*Counters.TotalBytesAllocatedCntr) += totalSize;
  509. }
  510. }
  511. if (Counters.AllocationsCntr) {
  512. ++(*Counters.AllocationsCntr);
  513. }
  514. ++AllocCount;
  515. UpdatePeaks();
  516. return res;
  517. }
  518. template<typename T>
  519. void TAlignedPagePoolImpl<T>::Free(void* ptr, size_t size) noexcept {
  520. size = AlignUp(size, SYS_PAGE_SIZE);
  521. if (size <= MaxMidSize)
  522. size = FastClp2(size);
  523. if (size <= MaxMidSize) {
  524. auto level = LeastSignificantBit(size) - LeastSignificantBit(POOL_PAGE_SIZE);
  525. Y_DEBUG_ABORT_UNLESS(level >= 1 && level <= MidLevels);
  526. TGlobalPools<T, false>::Instance().PushPage(level, ptr);
  527. } else {
  528. TGlobalPools<T, false>::Instance().DoMunmap(ptr, size);
  529. }
  530. Y_DEBUG_ABORT_UNLESS(TotalAllocated >= size);
  531. TotalAllocated -= size;
  532. if (Counters.TotalBytesAllocatedCntr) {
  533. (*Counters.TotalBytesAllocatedCntr) -= size;
  534. }
  535. }
  536. template<typename T>
  537. void TAlignedPagePoolImpl<T>::DoCleanupGlobalFreeList(ui64 targetSize) {
  538. TGlobalPools<T, true>::Instance().DoCleanupFreeList(targetSize);
  539. TGlobalPools<T, false>::Instance().DoCleanupFreeList(targetSize);
  540. }
  541. template<typename T>
  542. void TAlignedPagePoolImpl<T>::UpdateMemoryYellowZone() {
  543. if (Limit == 0) return;
  544. if (IsMemoryYellowZoneForcefullyChanged) return;
  545. if (IncreaseMemoryLimitCallback && !IsMaximumLimitValueReached) return;
  546. ui8 usedMemoryPercent = 100 * GetUsed() / Limit;
  547. if (usedMemoryPercent >= EnableMemoryYellowZoneThreshold) {
  548. IsMemoryYellowZoneReached = true;
  549. } else if (usedMemoryPercent <= DisableMemoryYellowZoneThreshold) {
  550. IsMemoryYellowZoneReached = false;
  551. }
  552. }
  553. template<typename T>
  554. bool TAlignedPagePoolImpl<T>::TryIncreaseLimit(ui64 required) {
  555. if (!IncreaseMemoryLimitCallback) {
  556. return false;
  557. }
  558. IncreaseMemoryLimitCallback(Limit, required);
  559. return Limit >= required;
  560. }
  561. template<typename T>
  562. ui64 TAlignedPagePoolImpl<T>::GetGlobalPagePoolSize() {
  563. ui64 size = 0;
  564. for (size_t level = 0; level <= MidLevels; ++level) {
  565. size += TGlobalPools<T, false>::Instance().Get(level).GetSize();
  566. }
  567. return size;
  568. }
  569. template<typename T>
  570. void TAlignedPagePoolImpl<T>::PrintStat(size_t usedPages, IOutputStream& out) const {
  571. usedPages += GetFreePageCount();
  572. out << "Count of free pages: " << GetFreePageCount() << Endl;
  573. out << "Allocated for blocks: " << (GetAllocated() - usedPages * POOL_PAGE_SIZE) << Endl;
  574. out << "Total allocated by lists: " << GetAllocated() << Endl;
  575. }
  576. template<typename T>
  577. void TAlignedPagePoolImpl<T>::ResetGlobalsUT()
  578. {
  579. TGlobalPools<T, false>::Instance().Reset();
  580. }
  581. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  582. // static
  583. template<typename T>
  584. bool TAlignedPagePoolImpl<T>::IsDefaultAllocatorUsed() {
  585. return IsDefaultAllocator;
  586. }
  587. #endif
  588. template class TAlignedPagePoolImpl<>;
  589. template class TAlignedPagePoolImpl<TFakeAlignedMmap>;
  590. template class TAlignedPagePoolImpl<TFakeUnalignedMmap>;
  591. template<typename TMmap>
  592. void* GetAlignedPage(ui64 size) {
  593. size = AlignUp(size, SYS_PAGE_SIZE);
  594. if (size < TAlignedPagePool::POOL_PAGE_SIZE) {
  595. size = TAlignedPagePool::POOL_PAGE_SIZE;
  596. }
  597. auto& pool = TGlobalPools<TMmap, true>::Instance();
  598. if (size <= MaxMidSize) {
  599. size = FastClp2(size);
  600. auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE);
  601. Y_DEBUG_ABORT_UNLESS(level <= MidLevels);
  602. if (auto res = pool.Get(level).GetPage()) {
  603. return res;
  604. }
  605. }
  606. auto allocSize = Max<ui64>(MaxMidSize, size);
  607. void* mem = pool.DoMmap(allocSize);
  608. if (Y_UNLIKELY(MAP_FAILED == mem)) {
  609. TStringStream mmaps;
  610. const auto lastError = LastSystemError();
  611. if (lastError == ENOMEM) {
  612. mmaps << GetMemoryMapsString();
  613. }
  614. ythrow yexception() << "Mmap failed to allocate " << allocSize << " bytes: " << LastSystemErrorText(lastError) << mmaps.Str();
  615. }
  616. if (size < MaxMidSize) {
  617. // push extra allocated pages to cache
  618. auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE);
  619. Y_DEBUG_ABORT_UNLESS(level <= MidLevels);
  620. ui8* ptr = (ui8*)mem + size;
  621. ui8* const end = (ui8*)mem + MaxMidSize;
  622. while (ptr < end) {
  623. pool.PushPage(level, ptr);
  624. ptr += size;
  625. }
  626. }
  627. return mem;
  628. }
  629. template<typename TMmap>
  630. void ReleaseAlignedPage(void* mem, ui64 size) {
  631. size = AlignUp(size, SYS_PAGE_SIZE);
  632. if (size < TAlignedPagePool::POOL_PAGE_SIZE) {
  633. size = TAlignedPagePool::POOL_PAGE_SIZE;
  634. }
  635. if (size <= MaxMidSize) {
  636. size = FastClp2(size);
  637. auto level = LeastSignificantBit(size) - LeastSignificantBit(TAlignedPagePool::POOL_PAGE_SIZE);
  638. Y_DEBUG_ABORT_UNLESS(level <= MidLevels);
  639. TGlobalPools<TMmap, true>::Instance().PushPage(level, mem);
  640. return;
  641. }
  642. TGlobalPools<TMmap, true>::Instance().DoMunmap(mem, size);
  643. }
  644. template<typename TMmap>
  645. i64 GetTotalMmapedBytes() {
  646. return TGlobalPools<TMmap, true>::Instance().GetTotalMmappedBytes() + TGlobalPools<TMmap, false>::Instance().GetTotalMmappedBytes();
  647. }
  648. template<typename TMmap>
  649. i64 GetTotalFreeListBytes() {
  650. return TGlobalPools<TMmap, true>::Instance().GetTotalFreeListBytes() + TGlobalPools<TMmap, false>::Instance().GetTotalFreeListBytes();
  651. }
  652. template i64 GetTotalMmapedBytes<>();
  653. template i64 GetTotalMmapedBytes<TFakeAlignedMmap>();
  654. template i64 GetTotalMmapedBytes<TFakeUnalignedMmap>();
  655. template i64 GetTotalFreeListBytes<>();
  656. template i64 GetTotalFreeListBytes<TFakeAlignedMmap>();
  657. template i64 GetTotalFreeListBytes<TFakeUnalignedMmap>();
  658. template void* GetAlignedPage<>(ui64);
  659. template void* GetAlignedPage<TFakeAlignedMmap>(ui64);
  660. template void* GetAlignedPage<TFakeUnalignedMmap>(ui64);
  661. template void ReleaseAlignedPage<>(void*,ui64);
  662. template void ReleaseAlignedPage<TFakeAlignedMmap>(void*,ui64);
  663. template void ReleaseAlignedPage<TFakeUnalignedMmap>(void*,ui64);
  664. size_t GetMemoryMapsCount() {
  665. size_t lineCount = 0;
  666. TString line;
  667. #if defined(_unix_)
  668. TFileInput file("/proc/self/maps");
  669. while (file.ReadLine(line)) ++lineCount;
  670. #endif
  671. return lineCount;
  672. }
  673. } // NKikimr