mkql_alloc.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761
  1. #pragma once
  2. #include "aligned_page_pool.h"
  3. #include "mkql_mem_info.h"
  4. #include <yql/essentials/core/pg_settings/guc_settings.h>
  5. #include <yql/essentials/parser/pg_wrapper/interface/context.h>
  6. #include <yql/essentials/public/udf/udf_allocator.h>
  7. #include <yql/essentials/public/udf/udf_value.h>
  8. #include <util/string/builder.h>
  9. #include <util/system/align.h>
  10. #include <util/system/defaults.h>
  11. #include <util/system/tls.h>
  12. #include <new>
  13. #include <unordered_map>
  14. #include <atomic>
  15. #include <memory>
  16. namespace NKikimr {
  17. namespace NMiniKQL {
  18. const ui64 MKQL_ALIGNMENT = 16;
  19. struct TAllocPageHeader {
  20. ui64 Capacity;
  21. ui64 Offset;
  22. ui64 UseCount;
  23. ui64 Deallocated;
  24. TAlignedPagePool* MyAlloc;
  25. TAllocPageHeader* Link;
  26. };
  27. using TMemorySubPoolIdx = ui32;
  28. enum class EMemorySubPool: TMemorySubPoolIdx {
  29. Default = 0,
  30. Temporary = 1,
  31. Count
  32. };
  33. constexpr ui32 MaxPageUserData = TAlignedPagePool::POOL_PAGE_SIZE - sizeof(TAllocPageHeader);
  34. static_assert(sizeof(TAllocPageHeader) % MKQL_ALIGNMENT == 0, "Incorrect size of header");
  35. struct TAllocState : public TAlignedPagePool
  36. {
  37. struct TListEntry {
  38. TListEntry *Left = nullptr;
  39. TListEntry *Right = nullptr;
  40. void Link(TListEntry* root) noexcept;
  41. void Unlink() noexcept;
  42. void InitLinks() noexcept { Left = Right = this; }
  43. void Clear() noexcept { Left = Right = nullptr; }
  44. bool IsUnlinked() const noexcept { return !Left && !Right; }
  45. };
  46. #ifndef NDEBUG
  47. std::unordered_map<TMemoryUsageInfo*, TIntrusivePtr<TMemoryUsageInfo>> ActiveMemInfo;
  48. #endif
  49. bool SupportsSizedAllocators = false;
  50. void* LargeAlloc(size_t size) {
  51. return Alloc(size);
  52. }
  53. void LargeFree(void* ptr, size_t size) noexcept {
  54. Free(ptr, size);
  55. }
  56. using TCurrentPages = std::array<TAllocPageHeader*, (TMemorySubPoolIdx)EMemorySubPool::Count>;
  57. static TAllocPageHeader EmptyPageHeader;
  58. static TCurrentPages EmptyCurrentPages;
  59. std::array<TAllocPageHeader*, (TMemorySubPoolIdx)EMemorySubPool::Count> CurrentPages = EmptyCurrentPages;
  60. TListEntry OffloadedBlocksRoot;
  61. TListEntry GlobalPAllocList;
  62. TListEntry* CurrentPAllocList;
  63. TListEntry ArrowBlocksRoot;
  64. std::unordered_set<const void*> ArrowBuffers;
  65. bool EnableArrowTracking = true;
  66. void* MainContext = nullptr;
  67. void* CurrentContext = nullptr;
  68. struct TLockInfo {
  69. i32 OriginalRefs;
  70. i32 Locks;
  71. };
  72. bool UseRefLocking = false;
  73. std::unordered_map<void*, TLockInfo> LockedObjectsRefs;
  74. ::NKikimr::NUdf::TBoxedValueLink Root;
  75. NKikimr::NUdf::TBoxedValueLink* GetRoot() noexcept {
  76. return &Root;
  77. }
  78. explicit TAllocState(const TSourceLocation& location, const TAlignedPagePoolCounters& counters, bool supportsSizedAllocators);
  79. void KillAllBoxed();
  80. void InvalidateMemInfo();
  81. size_t GetDeallocatedInPages() const;
  82. static void CleanupPAllocList(TListEntry* root);
  83. static void CleanupArrowList(TListEntry* root);
  84. void LockObject(::NKikimr::NUdf::TUnboxedValuePod value);
  85. void UnlockObject(::NKikimr::NUdf::TUnboxedValuePod value);
  86. };
  87. extern Y_POD_THREAD(TAllocState*) TlsAllocState;
  88. class TPAllocScope {
  89. public:
  90. TPAllocScope() {
  91. PAllocList.InitLinks();
  92. Attach();
  93. }
  94. ~TPAllocScope() {
  95. Cleanup();
  96. Detach();
  97. }
  98. void Attach() {
  99. Y_ABORT_UNLESS(!Prev);
  100. Prev = TlsAllocState->CurrentPAllocList;
  101. Y_ABORT_UNLESS(Prev);
  102. TlsAllocState->CurrentPAllocList = &PAllocList;
  103. }
  104. void Detach() {
  105. if (Prev) {
  106. Y_ABORT_UNLESS(TlsAllocState->CurrentPAllocList == &PAllocList);
  107. TlsAllocState->CurrentPAllocList = Prev;
  108. Prev = nullptr;
  109. }
  110. }
  111. void Cleanup() {
  112. TAllocState::CleanupPAllocList(&PAllocList);
  113. }
  114. private:
  115. TAllocState::TListEntry PAllocList;
  116. TAllocState::TListEntry* Prev = nullptr;
  117. };
  118. // TListEntry and IBoxedValue use the same place
  119. static_assert(sizeof(NUdf::IBoxedValue) == sizeof(TAllocState::TListEntry));
  120. class TBoxedValueWithFree : public NUdf::TBoxedValueBase {
  121. public:
  122. void operator delete(void *mem) noexcept;
  123. };
  124. struct TMkqlPAllocHeader {
  125. union {
  126. TAllocState::TListEntry Entry;
  127. TBoxedValueWithFree Boxed;
  128. } U;
  129. size_t Size;
  130. ui64 Self; // should be placed right before pointer to allocated area, see GetMemoryChunkContext
  131. };
  132. static_assert(sizeof(TMkqlPAllocHeader) ==
  133. sizeof(size_t) +
  134. sizeof(TAllocState::TListEntry) +
  135. sizeof(void*), "Padding is not allowed");
  136. constexpr size_t ArrowAlignment = 64;
  137. struct TMkqlArrowHeader {
  138. TAllocState::TListEntry Entry;
  139. ui64 Size;
  140. char Padding[ArrowAlignment - sizeof(TAllocState::TListEntry) - sizeof(ui64)];
  141. };
  142. static_assert(sizeof(TMkqlArrowHeader) == ArrowAlignment);
  143. class TScopedAlloc {
  144. public:
  145. explicit TScopedAlloc(const TSourceLocation& location,
  146. const TAlignedPagePoolCounters& counters = TAlignedPagePoolCounters(), bool supportsSizedAllocators = false, bool initiallyAcquired = true)
  147. : InitiallyAcquired_(initiallyAcquired)
  148. , MyState_(location, counters, supportsSizedAllocators)
  149. {
  150. MyState_.MainContext = PgInitializeMainContext();
  151. if (InitiallyAcquired_) {
  152. Acquire();
  153. }
  154. }
  155. ~TScopedAlloc()
  156. {
  157. if (!InitiallyAcquired_) {
  158. Acquire();
  159. }
  160. MyState_.KillAllBoxed();
  161. Release();
  162. PgDestroyMainContext(MyState_.MainContext);
  163. }
  164. TAllocState& Ref() {
  165. return MyState_;
  166. }
  167. void Acquire();
  168. void Release();
  169. size_t GetUsed() const { return MyState_.GetUsed(); }
  170. size_t GetPeakUsed() const { return MyState_.GetPeakUsed(); }
  171. size_t GetAllocated() const { return MyState_.GetAllocated(); }
  172. size_t GetPeakAllocated() const { return MyState_.GetPeakAllocated(); }
  173. size_t GetLimit() const { return MyState_.GetLimit(); }
  174. void SetLimit(size_t limit) { MyState_.SetLimit(limit); }
  175. void DisableStrictAllocationCheck() { MyState_.DisableStrictAllocationCheck(); }
  176. void ReleaseFreePages() { MyState_.ReleaseFreePages(); }
  177. void InvalidateMemInfo() { MyState_.InvalidateMemInfo(); }
  178. bool IsAttached() const { return AttachedCount_ > 0; }
  179. void SetGUCSettings(const TGUCSettings::TPtr& GUCSettings) {
  180. Acquire();
  181. PgSetGUCSettings(MyState_.MainContext, GUCSettings);
  182. Release();
  183. }
  184. void SetMaximumLimitValueReached(bool IsReached) {
  185. MyState_.SetMaximumLimitValueReached(IsReached);
  186. }
  187. private:
  188. const bool InitiallyAcquired_;
  189. TAllocState MyState_;
  190. size_t AttachedCount_ = 0;
  191. TAllocState* PrevState_ = nullptr;
  192. };
  193. class TPagedArena {
  194. public:
  195. TPagedArena(TAlignedPagePool* pagePool) noexcept
  196. : PagePool_(pagePool)
  197. , CurrentPages_(TAllocState::EmptyCurrentPages)
  198. {}
  199. TPagedArena(const TPagedArena&) = delete;
  200. TPagedArena(TPagedArena&& other) noexcept
  201. : PagePool_(other.PagePool_)
  202. , CurrentPages_(other.CurrentPages_)
  203. {
  204. other.CurrentPages_ = TAllocState::EmptyCurrentPages;
  205. }
  206. void operator=(const TPagedArena&) = delete;
  207. void operator=(TPagedArena&& other) noexcept {
  208. Clear();
  209. PagePool_ = other.PagePool_;
  210. CurrentPages_ = other.CurrentPages_;
  211. other.CurrentPages_ = TAllocState::EmptyCurrentPages;
  212. }
  213. ~TPagedArena() noexcept {
  214. Clear();
  215. }
  216. void* Alloc(size_t sz, const EMemorySubPool pagePool = EMemorySubPool::Default) {
  217. auto& currentPage = CurrentPages_[(TMemorySubPoolIdx)pagePool];
  218. if (Y_LIKELY(currentPage->Offset + sz <= currentPage->Capacity)) {
  219. void* ret = (char*)currentPage + currentPage->Offset;
  220. currentPage->Offset = AlignUp(currentPage->Offset + sz, MKQL_ALIGNMENT);
  221. return ret;
  222. }
  223. return AllocSlow(sz, pagePool);
  224. }
  225. void Clear() noexcept;
  226. private:
  227. void* AllocSlow(const size_t sz, const EMemorySubPool pagePool);
  228. private:
  229. TAlignedPagePool* PagePool_;
  230. TAllocState::TCurrentPages CurrentPages_ = TAllocState::EmptyCurrentPages;
  231. };
  232. void* MKQLAllocSlow(size_t sz, TAllocState* state, const EMemorySubPool mPool);
  233. inline void* MKQLAllocFastDeprecated(size_t sz, TAllocState* state, const EMemorySubPool mPool) {
  234. Y_DEBUG_ABORT_UNLESS(state);
  235. #ifdef PROFILE_MEMORY_ALLOCATIONS
  236. auto ret = (TAllocState::TListEntry*)malloc(sizeof(TAllocState::TListEntry) + sz);
  237. if (!ret) {
  238. throw TMemoryLimitExceededException();
  239. }
  240. ret->Link(&state->OffloadedBlocksRoot);
  241. return ret + 1;
  242. #endif
  243. auto currPage = state->CurrentPages[(TMemorySubPoolIdx)mPool];
  244. if (Y_LIKELY(currPage->Offset + sz <= currPage->Capacity)) {
  245. void* ret = (char*)currPage + currPage->Offset;
  246. currPage->Offset = AlignUp(currPage->Offset + sz, MKQL_ALIGNMENT);
  247. ++currPage->UseCount;
  248. return ret;
  249. }
  250. return MKQLAllocSlow(sz, state, mPool);
  251. }
  252. inline void* MKQLAllocFastWithSize(size_t sz, TAllocState* state, const EMemorySubPool mPool) {
  253. Y_DEBUG_ABORT_UNLESS(state);
  254. bool useMemalloc = state->SupportsSizedAllocators && sz > MaxPageUserData;
  255. #ifdef PROFILE_MEMORY_ALLOCATIONS
  256. useMemalloc = true;
  257. #endif
  258. if (useMemalloc) {
  259. state->OffloadAlloc(sizeof(TAllocState::TListEntry) + sz);
  260. auto ret = (TAllocState::TListEntry*)malloc(sizeof(TAllocState::TListEntry) + sz);
  261. if (!ret) {
  262. throw TMemoryLimitExceededException();
  263. }
  264. ret->Link(&state->OffloadedBlocksRoot);
  265. return ret + 1;
  266. }
  267. auto currPage = state->CurrentPages[(TMemorySubPoolIdx)mPool];
  268. if (Y_LIKELY(currPage->Offset + sz <= currPage->Capacity)) {
  269. void* ret = (char*)currPage + currPage->Offset;
  270. currPage->Offset = AlignUp(currPage->Offset + sz, MKQL_ALIGNMENT);
  271. ++currPage->UseCount;
  272. return ret;
  273. }
  274. return MKQLAllocSlow(sz, state, mPool);
  275. }
  276. void MKQLFreeSlow(TAllocPageHeader* header, TAllocState *state, const EMemorySubPool mPool) noexcept;
  277. inline void MKQLFreeDeprecated(const void* mem, const EMemorySubPool mPool) noexcept {
  278. if (!mem) {
  279. return;
  280. }
  281. #ifdef PROFILE_MEMORY_ALLOCATIONS
  282. TAllocState *state = TlsAllocState;
  283. Y_DEBUG_ABORT_UNLESS(state);
  284. auto entry = (TAllocState::TListEntry*)(mem) - 1;
  285. entry->Unlink();
  286. free(entry);
  287. return;
  288. #endif
  289. TAllocPageHeader* header = (TAllocPageHeader*)TAllocState::GetPageStart(mem);
  290. Y_DEBUG_ABORT_UNLESS(header->MyAlloc == TlsAllocState, "%s", (TStringBuilder() << "wrong allocator was used; "
  291. "allocated with: " << header->MyAlloc->GetDebugInfo() << " freed with: " << TlsAllocState->GetDebugInfo()).data());
  292. if (Y_LIKELY(--header->UseCount != 0)) {
  293. return;
  294. }
  295. MKQLFreeSlow(header, TlsAllocState, mPool);
  296. }
  297. inline void MKQLFreeFastWithSize(const void* mem, size_t sz, TAllocState* state, const EMemorySubPool mPool) noexcept {
  298. if (!mem) {
  299. return;
  300. }
  301. Y_DEBUG_ABORT_UNLESS(state);
  302. bool useFree = state->SupportsSizedAllocators && sz > MaxPageUserData;
  303. #ifdef PROFILE_MEMORY_ALLOCATIONS
  304. useFree = true;
  305. #endif
  306. if (useFree) {
  307. auto entry = (TAllocState::TListEntry*)(mem) - 1;
  308. entry->Unlink();
  309. free(entry);
  310. state->OffloadFree(sizeof(TAllocState::TListEntry) + sz);
  311. return;
  312. }
  313. TAllocPageHeader* header = (TAllocPageHeader*)TAllocState::GetPageStart(mem);
  314. Y_DEBUG_ABORT_UNLESS(header->MyAlloc == state, "%s", (TStringBuilder() << "wrong allocator was used; "
  315. "allocated with: " << header->MyAlloc->GetDebugInfo() << " freed with: " << TlsAllocState->GetDebugInfo()).data());
  316. if (Y_LIKELY(--header->UseCount != 0)) {
  317. header->Deallocated += sz;
  318. return;
  319. }
  320. MKQLFreeSlow(header, state, mPool);
  321. }
  322. inline void* MKQLAllocDeprecated(size_t sz, const EMemorySubPool mPool) {
  323. return MKQLAllocFastDeprecated(sz, TlsAllocState, mPool);
  324. }
  325. inline void* MKQLAllocWithSize(size_t sz, const EMemorySubPool mPool) {
  326. return MKQLAllocFastWithSize(sz, TlsAllocState, mPool);
  327. }
  328. inline void MKQLFreeWithSize(const void* mem, size_t sz, const EMemorySubPool mPool) noexcept {
  329. MKQLFreeFastWithSize(mem, sz, TlsAllocState, mPool);
  330. }
  331. inline void MKQLRegisterObject(NUdf::TBoxedValue* value) noexcept {
  332. value->Link(TlsAllocState->GetRoot());
  333. }
  334. inline void MKQLUnregisterObject(NUdf::TBoxedValue* value) noexcept {
  335. value->Unlink();
  336. }
  337. void* MKQLArrowAllocate(ui64 size);
  338. void* MKQLArrowReallocate(const void* mem, ui64 prevSize, ui64 size);
  339. void MKQLArrowFree(const void* mem, ui64 size);
  340. void MKQLArrowUntrack(const void* mem);
  341. template <const EMemorySubPool MemoryPoolExt = EMemorySubPool::Default>
  342. struct TWithMiniKQLAlloc {
  343. static constexpr EMemorySubPool MemoryPool = MemoryPoolExt;
  344. static void FreeWithSize(const void* mem, const size_t sz) {
  345. NMiniKQL::MKQLFreeWithSize(mem, sz, MemoryPool);
  346. }
  347. static void* AllocWithSize(const size_t sz) {
  348. return NMiniKQL::MKQLAllocWithSize(sz, MemoryPool);
  349. }
  350. void* operator new(size_t sz) {
  351. return NMiniKQL::MKQLAllocWithSize(sz, MemoryPool);
  352. }
  353. void* operator new[](size_t sz) {
  354. return NMiniKQL::MKQLAllocWithSize(sz, MemoryPool);
  355. }
  356. void operator delete(void *mem, std::size_t sz) noexcept {
  357. NMiniKQL::MKQLFreeWithSize(mem, sz, MemoryPool);
  358. }
  359. void operator delete[](void *mem, std::size_t sz) noexcept {
  360. NMiniKQL::MKQLFreeWithSize(mem, sz, MemoryPool);
  361. }
  362. };
  363. template <typename T, typename... Args>
  364. T* AllocateOn(TAllocState* state, Args&&... args)
  365. {
  366. void* addr = MKQLAllocFastWithSize(sizeof(T), state, T::MemoryPool);
  367. return ::new(addr) T(std::forward<Args>(args)...);
  368. static_assert(std::is_base_of<TWithMiniKQLAlloc<T::MemoryPool>, T>::value, "Class must inherit TWithMiniKQLAlloc.");
  369. }
  370. template <typename Type, EMemorySubPool MemoryPool = EMemorySubPool::Default>
  371. struct TMKQLAllocator
  372. {
  373. typedef Type value_type;
  374. typedef Type* pointer;
  375. typedef const Type* const_pointer;
  376. typedef Type& reference;
  377. typedef const Type& const_reference;
  378. typedef size_t size_type;
  379. typedef ptrdiff_t difference_type;
  380. TMKQLAllocator() noexcept = default;
  381. ~TMKQLAllocator() noexcept = default;
  382. template<typename U> TMKQLAllocator(const TMKQLAllocator<U, MemoryPool>&) noexcept {}
  383. template<typename U> struct rebind { typedef TMKQLAllocator<U, MemoryPool> other; };
  384. template<typename U> bool operator==(const TMKQLAllocator<U, MemoryPool>&) const { return true; }
  385. template<typename U> bool operator!=(const TMKQLAllocator<U, MemoryPool>&) const { return false; }
  386. static pointer allocate(size_type n, const void* = nullptr)
  387. {
  388. return static_cast<pointer>(MKQLAllocWithSize(n * sizeof(value_type), MemoryPool));
  389. }
  390. static void deallocate(const_pointer p, size_type n) noexcept
  391. {
  392. MKQLFreeWithSize(p, n * sizeof(value_type), MemoryPool);
  393. }
  394. };
  395. using TWithDefaultMiniKQLAlloc = TWithMiniKQLAlloc<EMemorySubPool::Default>;
  396. using TWithTemporaryMiniKQLAlloc = TWithMiniKQLAlloc<EMemorySubPool::Temporary>;
  397. template <typename Type>
  398. struct TMKQLHugeAllocator
  399. {
  400. typedef Type value_type;
  401. typedef Type* pointer;
  402. typedef const Type* const_pointer;
  403. typedef Type& reference;
  404. typedef const Type& const_reference;
  405. typedef size_t size_type;
  406. typedef ptrdiff_t difference_type;
  407. TMKQLHugeAllocator() noexcept = default;
  408. ~TMKQLHugeAllocator() noexcept = default;
  409. template<typename U> TMKQLHugeAllocator(const TMKQLHugeAllocator<U>&) noexcept {}
  410. template<typename U> struct rebind { typedef TMKQLHugeAllocator<U> other; };
  411. template<typename U> bool operator==(const TMKQLHugeAllocator<U>&) const { return true; }
  412. template<typename U> bool operator!=(const TMKQLHugeAllocator<U>&) const { return false; }
  413. static pointer allocate(size_type n, const void* = nullptr)
  414. {
  415. size_t size = Max(n * sizeof(value_type), TAllocState::POOL_PAGE_SIZE);
  416. return static_cast<pointer>(TlsAllocState->GetBlock(size));
  417. }
  418. static void deallocate(const_pointer p, size_type n) noexcept
  419. {
  420. size_t size = Max(n * sizeof(value_type), TAllocState::POOL_PAGE_SIZE);
  421. TlsAllocState->ReturnBlock(const_cast<pointer>(p), size);
  422. }
  423. };
  424. template <typename T>
  425. class TPagedList
  426. {
  427. public:
  428. static_assert(sizeof(T) <= TAlignedPagePool::POOL_PAGE_SIZE, "Too big object");
  429. static constexpr size_t OBJECTS_PER_PAGE = TAlignedPagePool::POOL_PAGE_SIZE / sizeof(T);
  430. class TIterator;
  431. class TConstIterator;
  432. TPagedList(TAlignedPagePool& pool)
  433. : Pool(pool)
  434. , IndexInLastPage(OBJECTS_PER_PAGE)
  435. {}
  436. TPagedList(const TPagedList&) = delete;
  437. TPagedList(TPagedList&&) = delete;
  438. ~TPagedList() {
  439. Clear();
  440. }
  441. void Add(T&& value) {
  442. if (IndexInLastPage < OBJECTS_PER_PAGE) {
  443. auto ptr = ObjectAt(Pages.back(), IndexInLastPage);
  444. new(ptr) T(std::move(value));
  445. ++IndexInLastPage;
  446. return;
  447. }
  448. auto ptr = Pool.GetPage();
  449. IndexInLastPage = 1;
  450. Pages.push_back(ptr);
  451. new(ptr) T(std::move(value));
  452. }
  453. void Clear() {
  454. for (ui32 i = 0; i + 1 < Pages.size(); ++i) {
  455. for (ui32 objIndex = 0; objIndex < OBJECTS_PER_PAGE; ++objIndex) {
  456. ObjectAt(Pages[i], objIndex)->~T();
  457. }
  458. Pool.ReturnPage(Pages[i]);
  459. }
  460. if (!Pages.empty()) {
  461. for (ui32 objIndex = 0; objIndex < IndexInLastPage; ++objIndex) {
  462. ObjectAt(Pages.back(), objIndex)->~T();
  463. }
  464. Pool.ReturnPage(Pages.back());
  465. }
  466. TPages().swap(Pages);
  467. IndexInLastPage = OBJECTS_PER_PAGE;
  468. }
  469. const T& operator[](size_t i) const {
  470. const auto table = i / OBJECTS_PER_PAGE;
  471. const auto index = i % OBJECTS_PER_PAGE;
  472. return *ObjectAt(Pages[table], index);
  473. }
  474. size_t Size() const {
  475. return Pages.empty() ? 0 : ((Pages.size() - 1) * OBJECTS_PER_PAGE + IndexInLastPage);
  476. }
  477. TConstIterator Begin() const {
  478. return TConstIterator(this, 0, 0);
  479. }
  480. TConstIterator begin() const {
  481. return Begin();
  482. }
  483. TConstIterator End() const {
  484. if (IndexInLastPage == OBJECTS_PER_PAGE) {
  485. return TConstIterator(this, Pages.size(), 0);
  486. }
  487. return TConstIterator(this, Pages.size() - 1, IndexInLastPage);
  488. }
  489. TConstIterator end() const {
  490. return End();
  491. }
  492. TIterator Begin() {
  493. return TIterator(this, 0, 0);
  494. }
  495. TIterator begin() {
  496. return Begin();
  497. }
  498. TIterator End() {
  499. if (IndexInLastPage == OBJECTS_PER_PAGE) {
  500. return TIterator(this, Pages.size(), 0);
  501. }
  502. return TIterator(this, Pages.size() - 1, IndexInLastPage);
  503. }
  504. TIterator end() {
  505. return End();
  506. }
  507. class TIterator
  508. {
  509. public:
  510. using TOwner = TPagedList<T>;
  511. TIterator()
  512. : Owner(nullptr)
  513. , PageNo(0)
  514. , PageIndex(0)
  515. {}
  516. TIterator(const TIterator&) = default;
  517. TIterator& operator=(const TIterator&) = default;
  518. TIterator(TOwner* owner, size_t pageNo, size_t pageIndex)
  519. : Owner(owner)
  520. , PageNo(pageNo)
  521. , PageIndex(pageIndex)
  522. {}
  523. T& operator*() {
  524. Y_DEBUG_ABORT_UNLESS(PageIndex < OBJECTS_PER_PAGE);
  525. Y_DEBUG_ABORT_UNLESS(PageNo < Owner->Pages.size());
  526. Y_DEBUG_ABORT_UNLESS(PageNo + 1 < Owner->Pages.size() || PageIndex < Owner->IndexInLastPage);
  527. return *Owner->ObjectAt(Owner->Pages[PageNo], PageIndex);
  528. }
  529. TIterator& operator++() {
  530. if (++PageIndex == OBJECTS_PER_PAGE) {
  531. ++PageNo;
  532. PageIndex = 0;
  533. }
  534. return *this;
  535. }
  536. bool operator==(const TIterator& other) const {
  537. return PageNo == other.PageNo && PageIndex == other.PageIndex;
  538. }
  539. bool operator!=(const TIterator& other) const {
  540. return !operator==(other);
  541. }
  542. private:
  543. TOwner* Owner;
  544. size_t PageNo;
  545. size_t PageIndex;
  546. };
  547. class TConstIterator
  548. {
  549. public:
  550. using TOwner = TPagedList<T>;
  551. TConstIterator()
  552. : Owner(nullptr)
  553. , PageNo(0)
  554. , PageIndex(0)
  555. {}
  556. TConstIterator(const TConstIterator&) = default;
  557. TConstIterator& operator=(const TConstIterator&) = default;
  558. TConstIterator(const TOwner* owner, size_t pageNo, size_t pageIndex)
  559. : Owner(owner)
  560. , PageNo(pageNo)
  561. , PageIndex(pageIndex)
  562. {}
  563. const T& operator*() {
  564. Y_DEBUG_ABORT_UNLESS(PageIndex < OBJECTS_PER_PAGE);
  565. Y_DEBUG_ABORT_UNLESS(PageNo < Owner->Pages.size());
  566. Y_DEBUG_ABORT_UNLESS(PageNo + 1 < Owner->Pages.size() || PageIndex < Owner->IndexInLastPage);
  567. return *Owner->ObjectAt(Owner->Pages[PageNo], PageIndex);
  568. }
  569. TConstIterator& operator++() {
  570. if (++PageIndex == OBJECTS_PER_PAGE) {
  571. ++PageNo;
  572. PageIndex = 0;
  573. }
  574. return *this;
  575. }
  576. bool operator==(const TConstIterator& other) const {
  577. return PageNo == other.PageNo && PageIndex == other.PageIndex;
  578. }
  579. bool operator!=(const TConstIterator& other) const {
  580. return !operator==(other);
  581. }
  582. private:
  583. const TOwner* Owner;
  584. size_t PageNo;
  585. size_t PageIndex;
  586. };
  587. private:
  588. static const T* ObjectAt(const void* page, size_t objectIndex) {
  589. return reinterpret_cast<const T*>(static_cast<const char*>(page) + objectIndex * sizeof(T));
  590. }
  591. static T* ObjectAt(void* page, size_t objectIndex) {
  592. return reinterpret_cast<T*>(static_cast<char*>(page) + objectIndex * sizeof(T));
  593. }
  594. TAlignedPagePool& Pool;
  595. using TPages = std::vector<void*, TMKQLAllocator<void*>>;
  596. TPages Pages;
  597. size_t IndexInLastPage;
  598. };
  599. inline void TBoxedValueWithFree::operator delete(void *mem) noexcept {
  600. auto size = ((TMkqlPAllocHeader*)mem)->Size + sizeof(TMkqlPAllocHeader);
  601. MKQLFreeWithSize(mem, size, EMemorySubPool::Default);
  602. }
  603. } // NMiniKQL
  604. } // NKikimr