mkql_alloc.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777
  1. #pragma once
  2. #include "aligned_page_pool.h"
  3. #include "mkql_mem_info.h"
  4. #include <yql/essentials/core/pg_settings/guc_settings.h>
  5. #include <yql/essentials/parser/pg_wrapper/interface/context.h>
  6. #include <yql/essentials/public/udf/udf_allocator.h>
  7. #include <yql/essentials/public/udf/udf_value.h>
  8. #include <util/string/builder.h>
  9. #include <util/system/align.h>
  10. #include <util/system/defaults.h>
  11. #include <util/system/tls.h>
  12. #include <new>
  13. #include <unordered_map>
  14. #include <atomic>
  15. #include <memory>
  16. namespace NKikimr {
  17. namespace NMiniKQL {
  18. const ui64 MKQL_ALIGNMENT = 16;
  19. struct TAllocPageHeader {
  20. ui64 Capacity;
  21. ui64 Offset;
  22. ui64 UseCount;
  23. ui64 Deallocated;
  24. TAlignedPagePool* MyAlloc;
  25. TAllocPageHeader* Link;
  26. };
  27. using TMemorySubPoolIdx = ui32;
  28. enum class EMemorySubPool: TMemorySubPoolIdx {
  29. Default = 0,
  30. Temporary = 1,
  31. Count
  32. };
  33. constexpr ui32 MaxPageUserData = TAlignedPagePool::POOL_PAGE_SIZE - sizeof(TAllocPageHeader);
  34. static_assert(sizeof(TAllocPageHeader) % MKQL_ALIGNMENT == 0, "Incorrect size of header");
  35. struct TAllocState : public TAlignedPagePool
  36. {
  37. struct TListEntry {
  38. TListEntry *Left = nullptr;
  39. TListEntry *Right = nullptr;
  40. void Link(TListEntry* root) noexcept;
  41. void Unlink() noexcept;
  42. void InitLinks() noexcept { Left = Right = this; }
  43. void Clear() noexcept { Left = Right = nullptr; }
  44. bool IsUnlinked() const noexcept { return !Left && !Right; }
  45. };
  46. #ifndef NDEBUG
  47. std::unordered_map<TMemoryUsageInfo*, TIntrusivePtr<TMemoryUsageInfo>> ActiveMemInfo;
  48. #endif
  49. bool SupportsSizedAllocators = false;
  50. void* LargeAlloc(size_t size) {
  51. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  52. if (Y_UNLIKELY(IsDefaultAllocatorUsed())) {
  53. return malloc(size);
  54. }
  55. #endif
  56. return Alloc(size);
  57. }
  58. void LargeFree(void* ptr, size_t size) noexcept {
  59. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  60. if (Y_UNLIKELY(IsDefaultAllocatorUsed())) {
  61. free(ptr);
  62. return;
  63. }
  64. #endif
  65. Free(ptr, size);
  66. }
  67. using TCurrentPages = std::array<TAllocPageHeader*, (TMemorySubPoolIdx)EMemorySubPool::Count>;
  68. static TAllocPageHeader EmptyPageHeader;
  69. static TCurrentPages EmptyCurrentPages;
  70. std::array<TAllocPageHeader*, (TMemorySubPoolIdx)EMemorySubPool::Count> CurrentPages = EmptyCurrentPages;
  71. TListEntry OffloadedBlocksRoot;
  72. TListEntry GlobalPAllocList;
  73. TListEntry* CurrentPAllocList;
  74. TListEntry ArrowBlocksRoot;
  75. std::unordered_set<const void*> ArrowBuffers;
  76. bool EnableArrowTracking = true;
  77. void* MainContext = nullptr;
  78. void* CurrentContext = nullptr;
  79. struct TLockInfo {
  80. i32 OriginalRefs;
  81. i32 Locks;
  82. };
  83. bool UseRefLocking = false;
  84. std::unordered_map<void*, TLockInfo> LockedObjectsRefs;
  85. ::NKikimr::NUdf::TBoxedValueLink Root;
  86. NKikimr::NUdf::TBoxedValueLink* GetRoot() noexcept {
  87. return &Root;
  88. }
  89. explicit TAllocState(const TSourceLocation& location, const TAlignedPagePoolCounters& counters, bool supportsSizedAllocators);
  90. void KillAllBoxed();
  91. void InvalidateMemInfo();
  92. size_t GetDeallocatedInPages() const;
  93. static void CleanupPAllocList(TListEntry* root);
  94. static void CleanupArrowList(TListEntry* root);
  95. void LockObject(::NKikimr::NUdf::TUnboxedValuePod value);
  96. void UnlockObject(::NKikimr::NUdf::TUnboxedValuePod value);
  97. };
  98. extern Y_POD_THREAD(TAllocState*) TlsAllocState;
  99. class TPAllocScope {
  100. public:
  101. TPAllocScope() {
  102. PAllocList.InitLinks();
  103. Attach();
  104. }
  105. ~TPAllocScope() {
  106. Cleanup();
  107. Detach();
  108. }
  109. void Attach() {
  110. Y_ABORT_UNLESS(!Prev);
  111. Prev = TlsAllocState->CurrentPAllocList;
  112. Y_ABORT_UNLESS(Prev);
  113. TlsAllocState->CurrentPAllocList = &PAllocList;
  114. }
  115. void Detach() {
  116. if (Prev) {
  117. Y_ABORT_UNLESS(TlsAllocState->CurrentPAllocList == &PAllocList);
  118. TlsAllocState->CurrentPAllocList = Prev;
  119. Prev = nullptr;
  120. }
  121. }
  122. void Cleanup() {
  123. TAllocState::CleanupPAllocList(&PAllocList);
  124. }
  125. private:
  126. TAllocState::TListEntry PAllocList;
  127. TAllocState::TListEntry* Prev = nullptr;
  128. };
  129. // TListEntry and IBoxedValue use the same place
  130. static_assert(sizeof(NUdf::IBoxedValue) == sizeof(TAllocState::TListEntry));
  131. class TBoxedValueWithFree : public NUdf::TBoxedValueBase {
  132. public:
  133. void operator delete(void *mem) noexcept;
  134. };
  135. struct TMkqlPAllocHeader {
  136. union {
  137. TAllocState::TListEntry Entry;
  138. TBoxedValueWithFree Boxed;
  139. } U;
  140. size_t Size;
  141. ui64 Self; // should be placed right before pointer to allocated area, see GetMemoryChunkContext
  142. };
  143. static_assert(sizeof(TMkqlPAllocHeader) ==
  144. sizeof(size_t) +
  145. sizeof(TAllocState::TListEntry) +
  146. sizeof(void*), "Padding is not allowed");
  147. constexpr size_t ArrowAlignment = 64;
  148. struct TMkqlArrowHeader {
  149. TAllocState::TListEntry Entry;
  150. ui64 Size;
  151. char Padding[ArrowAlignment - sizeof(TAllocState::TListEntry) - sizeof(ui64)];
  152. };
  153. static_assert(sizeof(TMkqlArrowHeader) == ArrowAlignment);
  154. class TScopedAlloc {
  155. public:
  156. explicit TScopedAlloc(const TSourceLocation& location,
  157. const TAlignedPagePoolCounters& counters = TAlignedPagePoolCounters(), bool supportsSizedAllocators = false, bool initiallyAcquired = true)
  158. : InitiallyAcquired_(initiallyAcquired)
  159. , MyState_(location, counters, supportsSizedAllocators)
  160. {
  161. MyState_.MainContext = PgInitializeMainContext();
  162. if (InitiallyAcquired_) {
  163. Acquire();
  164. }
  165. }
  166. ~TScopedAlloc()
  167. {
  168. if (!InitiallyAcquired_) {
  169. Acquire();
  170. }
  171. MyState_.KillAllBoxed();
  172. Release();
  173. PgDestroyMainContext(MyState_.MainContext);
  174. }
  175. TAllocState& Ref() {
  176. return MyState_;
  177. }
  178. void Acquire();
  179. void Release();
  180. size_t GetUsed() const { return MyState_.GetUsed(); }
  181. size_t GetPeakUsed() const { return MyState_.GetPeakUsed(); }
  182. size_t GetAllocated() const { return MyState_.GetAllocated(); }
  183. size_t GetPeakAllocated() const { return MyState_.GetPeakAllocated(); }
  184. size_t GetLimit() const { return MyState_.GetLimit(); }
  185. void SetLimit(size_t limit) { MyState_.SetLimit(limit); }
  186. void DisableStrictAllocationCheck() { MyState_.DisableStrictAllocationCheck(); }
  187. void ReleaseFreePages() { MyState_.ReleaseFreePages(); }
  188. void InvalidateMemInfo() { MyState_.InvalidateMemInfo(); }
  189. bool IsAttached() const { return AttachedCount_ > 0; }
  190. void SetGUCSettings(const TGUCSettings::TPtr& GUCSettings) {
  191. Acquire();
  192. PgSetGUCSettings(MyState_.MainContext, GUCSettings);
  193. Release();
  194. }
  195. void SetMaximumLimitValueReached(bool IsReached) {
  196. MyState_.SetMaximumLimitValueReached(IsReached);
  197. }
  198. private:
  199. const bool InitiallyAcquired_;
  200. TAllocState MyState_;
  201. size_t AttachedCount_ = 0;
  202. TAllocState* PrevState_ = nullptr;
  203. };
  204. class TPagedArena {
  205. public:
  206. TPagedArena(TAlignedPagePool* pagePool) noexcept
  207. : PagePool_(pagePool)
  208. , CurrentPages_(TAllocState::EmptyCurrentPages)
  209. {}
  210. TPagedArena(const TPagedArena&) = delete;
  211. TPagedArena(TPagedArena&& other) noexcept
  212. : PagePool_(other.PagePool_)
  213. , CurrentPages_(other.CurrentPages_)
  214. {
  215. other.CurrentPages_ = TAllocState::EmptyCurrentPages;
  216. }
  217. void operator=(const TPagedArena&) = delete;
  218. void operator=(TPagedArena&& other) noexcept {
  219. Clear();
  220. PagePool_ = other.PagePool_;
  221. CurrentPages_ = other.CurrentPages_;
  222. other.CurrentPages_ = TAllocState::EmptyCurrentPages;
  223. }
  224. ~TPagedArena() noexcept {
  225. Clear();
  226. }
  227. void* Alloc(size_t sz, const EMemorySubPool pagePool = EMemorySubPool::Default) {
  228. auto& currentPage = CurrentPages_[(TMemorySubPoolIdx)pagePool];
  229. if (Y_LIKELY(currentPage->Offset + sz <= currentPage->Capacity)) {
  230. void* ret = (char*)currentPage + currentPage->Offset;
  231. currentPage->Offset = AlignUp(currentPage->Offset + sz, MKQL_ALIGNMENT);
  232. return ret;
  233. }
  234. return AllocSlow(sz, pagePool);
  235. }
  236. void Clear() noexcept;
  237. private:
  238. void* AllocSlow(const size_t sz, const EMemorySubPool pagePool);
  239. private:
  240. TAlignedPagePool* PagePool_;
  241. TAllocState::TCurrentPages CurrentPages_ = TAllocState::EmptyCurrentPages;
  242. };
  243. void* MKQLAllocSlow(size_t sz, TAllocState* state, const EMemorySubPool mPool);
  244. inline void* MKQLAllocFastDeprecated(size_t sz, TAllocState* state, const EMemorySubPool mPool) {
  245. Y_DEBUG_ABORT_UNLESS(state);
  246. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  247. if (Y_UNLIKELY(TAllocState::IsDefaultAllocatorUsed())) {
  248. auto ret = (TAllocState::TListEntry*)malloc(sizeof(TAllocState::TListEntry) + sz);
  249. if (!ret) {
  250. throw TMemoryLimitExceededException();
  251. }
  252. ret->Link(&state->OffloadedBlocksRoot);
  253. return ret + 1;
  254. }
  255. #endif
  256. auto currPage = state->CurrentPages[(TMemorySubPoolIdx)mPool];
  257. if (Y_LIKELY(currPage->Offset + sz <= currPage->Capacity)) {
  258. void* ret = (char*)currPage + currPage->Offset;
  259. currPage->Offset = AlignUp(currPage->Offset + sz, MKQL_ALIGNMENT);
  260. ++currPage->UseCount;
  261. return ret;
  262. }
  263. return MKQLAllocSlow(sz, state, mPool);
  264. }
  265. inline void* MKQLAllocFastWithSize(size_t sz, TAllocState* state, const EMemorySubPool mPool) {
  266. Y_DEBUG_ABORT_UNLESS(state);
  267. bool useMalloc = state->SupportsSizedAllocators && sz > MaxPageUserData;
  268. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  269. useMalloc = useMalloc || TAllocState::IsDefaultAllocatorUsed();
  270. #endif
  271. if (Y_UNLIKELY(useMalloc)) {
  272. state->OffloadAlloc(sizeof(TAllocState::TListEntry) + sz);
  273. auto ret = (TAllocState::TListEntry*)malloc(sizeof(TAllocState::TListEntry) + sz);
  274. if (!ret) {
  275. throw TMemoryLimitExceededException();
  276. }
  277. ret->Link(&state->OffloadedBlocksRoot);
  278. return ret + 1;
  279. }
  280. auto currPage = state->CurrentPages[(TMemorySubPoolIdx)mPool];
  281. if (Y_LIKELY(currPage->Offset + sz <= currPage->Capacity)) {
  282. void* ret = (char*)currPage + currPage->Offset;
  283. currPage->Offset = AlignUp(currPage->Offset + sz, MKQL_ALIGNMENT);
  284. ++currPage->UseCount;
  285. return ret;
  286. }
  287. return MKQLAllocSlow(sz, state, mPool);
  288. }
  289. void MKQLFreeSlow(TAllocPageHeader* header, TAllocState *state, const EMemorySubPool mPool) noexcept;
  290. inline void MKQLFreeDeprecated(const void* mem, const EMemorySubPool mPool) noexcept {
  291. if (!mem) {
  292. return;
  293. }
  294. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  295. if (Y_UNLIKELY(TAllocState::IsDefaultAllocatorUsed())) {
  296. TAllocState *state = TlsAllocState;
  297. Y_DEBUG_ABORT_UNLESS(state);
  298. auto entry = (TAllocState::TListEntry*)(mem) - 1;
  299. entry->Unlink();
  300. free(entry);
  301. return;
  302. }
  303. #endif
  304. TAllocPageHeader* header = (TAllocPageHeader*)TAllocState::GetPageStart(mem);
  305. Y_DEBUG_ABORT_UNLESS(header->MyAlloc == TlsAllocState, "%s", (TStringBuilder() << "wrong allocator was used; "
  306. "allocated with: " << header->MyAlloc->GetDebugInfo() << " freed with: " << TlsAllocState->GetDebugInfo()).data());
  307. if (Y_LIKELY(--header->UseCount != 0)) {
  308. return;
  309. }
  310. MKQLFreeSlow(header, TlsAllocState, mPool);
  311. }
  312. inline void MKQLFreeFastWithSize(const void* mem, size_t sz, TAllocState* state, const EMemorySubPool mPool) noexcept {
  313. if (!mem) {
  314. return;
  315. }
  316. Y_DEBUG_ABORT_UNLESS(state);
  317. bool useFree = state->SupportsSizedAllocators && sz > MaxPageUserData;
  318. #if defined(ALLOW_DEFAULT_ALLOCATOR)
  319. useFree = useFree || TAllocState::IsDefaultAllocatorUsed();
  320. #endif
  321. if (Y_UNLIKELY(useFree)) {
  322. auto entry = (TAllocState::TListEntry*)(mem) - 1;
  323. entry->Unlink();
  324. free(entry);
  325. state->OffloadFree(sizeof(TAllocState::TListEntry) + sz);
  326. return;
  327. }
  328. TAllocPageHeader* header = (TAllocPageHeader*)TAllocState::GetPageStart(mem);
  329. Y_DEBUG_ABORT_UNLESS(header->MyAlloc == state, "%s", (TStringBuilder() << "wrong allocator was used; "
  330. "allocated with: " << header->MyAlloc->GetDebugInfo() << " freed with: " << TlsAllocState->GetDebugInfo()).data());
  331. if (Y_LIKELY(--header->UseCount != 0)) {
  332. header->Deallocated += sz;
  333. return;
  334. }
  335. MKQLFreeSlow(header, state, mPool);
  336. }
  337. inline void* MKQLAllocDeprecated(size_t sz, const EMemorySubPool mPool) {
  338. return MKQLAllocFastDeprecated(sz, TlsAllocState, mPool);
  339. }
  340. inline void* MKQLAllocWithSize(size_t sz, const EMemorySubPool mPool) {
  341. return MKQLAllocFastWithSize(sz, TlsAllocState, mPool);
  342. }
  343. inline void MKQLFreeWithSize(const void* mem, size_t sz, const EMemorySubPool mPool) noexcept {
  344. MKQLFreeFastWithSize(mem, sz, TlsAllocState, mPool);
  345. }
  346. inline void MKQLRegisterObject(NUdf::TBoxedValue* value) noexcept {
  347. value->Link(TlsAllocState->GetRoot());
  348. }
  349. inline void MKQLUnregisterObject(NUdf::TBoxedValue* value) noexcept {
  350. value->Unlink();
  351. }
  352. void* MKQLArrowAllocate(ui64 size);
  353. void* MKQLArrowReallocate(const void* mem, ui64 prevSize, ui64 size);
  354. void MKQLArrowFree(const void* mem, ui64 size);
  355. void MKQLArrowUntrack(const void* mem);
  356. template <const EMemorySubPool MemoryPoolExt = EMemorySubPool::Default>
  357. struct TWithMiniKQLAlloc {
  358. static constexpr EMemorySubPool MemoryPool = MemoryPoolExt;
  359. static void FreeWithSize(const void* mem, const size_t sz) {
  360. NMiniKQL::MKQLFreeWithSize(mem, sz, MemoryPool);
  361. }
  362. static void* AllocWithSize(const size_t sz) {
  363. return NMiniKQL::MKQLAllocWithSize(sz, MemoryPool);
  364. }
  365. void* operator new(size_t sz) {
  366. return NMiniKQL::MKQLAllocWithSize(sz, MemoryPool);
  367. }
  368. void* operator new[](size_t sz) {
  369. return NMiniKQL::MKQLAllocWithSize(sz, MemoryPool);
  370. }
  371. void operator delete(void *mem, std::size_t sz) noexcept {
  372. NMiniKQL::MKQLFreeWithSize(mem, sz, MemoryPool);
  373. }
  374. void operator delete[](void *mem, std::size_t sz) noexcept {
  375. NMiniKQL::MKQLFreeWithSize(mem, sz, MemoryPool);
  376. }
  377. };
  378. template <typename T, typename... Args>
  379. T* AllocateOn(TAllocState* state, Args&&... args)
  380. {
  381. void* addr = MKQLAllocFastWithSize(sizeof(T), state, T::MemoryPool);
  382. return ::new(addr) T(std::forward<Args>(args)...);
  383. static_assert(std::is_base_of<TWithMiniKQLAlloc<T::MemoryPool>, T>::value, "Class must inherit TWithMiniKQLAlloc.");
  384. }
  385. template <typename Type, EMemorySubPool MemoryPool = EMemorySubPool::Default>
  386. struct TMKQLAllocator
  387. {
  388. typedef Type value_type;
  389. typedef Type* pointer;
  390. typedef const Type* const_pointer;
  391. typedef Type& reference;
  392. typedef const Type& const_reference;
  393. typedef size_t size_type;
  394. typedef ptrdiff_t difference_type;
  395. TMKQLAllocator() noexcept = default;
  396. ~TMKQLAllocator() noexcept = default;
  397. template<typename U> TMKQLAllocator(const TMKQLAllocator<U, MemoryPool>&) noexcept {}
  398. template<typename U> struct rebind { typedef TMKQLAllocator<U, MemoryPool> other; };
  399. template<typename U> bool operator==(const TMKQLAllocator<U, MemoryPool>&) const { return true; }
  400. template<typename U> bool operator!=(const TMKQLAllocator<U, MemoryPool>&) const { return false; }
  401. static pointer allocate(size_type n, const void* = nullptr)
  402. {
  403. return static_cast<pointer>(MKQLAllocWithSize(n * sizeof(value_type), MemoryPool));
  404. }
  405. static void deallocate(const_pointer p, size_type n) noexcept
  406. {
  407. MKQLFreeWithSize(p, n * sizeof(value_type), MemoryPool);
  408. }
  409. };
  410. using TWithDefaultMiniKQLAlloc = TWithMiniKQLAlloc<EMemorySubPool::Default>;
  411. using TWithTemporaryMiniKQLAlloc = TWithMiniKQLAlloc<EMemorySubPool::Temporary>;
  412. template <typename Type>
  413. struct TMKQLHugeAllocator
  414. {
  415. typedef Type value_type;
  416. typedef Type* pointer;
  417. typedef const Type* const_pointer;
  418. typedef Type& reference;
  419. typedef const Type& const_reference;
  420. typedef size_t size_type;
  421. typedef ptrdiff_t difference_type;
  422. TMKQLHugeAllocator() noexcept = default;
  423. ~TMKQLHugeAllocator() noexcept = default;
  424. template<typename U> TMKQLHugeAllocator(const TMKQLHugeAllocator<U>&) noexcept {}
  425. template<typename U> struct rebind { typedef TMKQLHugeAllocator<U> other; };
  426. template<typename U> bool operator==(const TMKQLHugeAllocator<U>&) const { return true; }
  427. template<typename U> bool operator!=(const TMKQLHugeAllocator<U>&) const { return false; }
  428. static pointer allocate(size_type n, const void* = nullptr)
  429. {
  430. size_t size = Max(n * sizeof(value_type), TAllocState::POOL_PAGE_SIZE);
  431. return static_cast<pointer>(TlsAllocState->GetBlock(size));
  432. }
  433. static void deallocate(const_pointer p, size_type n) noexcept
  434. {
  435. size_t size = Max(n * sizeof(value_type), TAllocState::POOL_PAGE_SIZE);
  436. TlsAllocState->ReturnBlock(const_cast<pointer>(p), size);
  437. }
  438. };
  439. template <typename T>
  440. class TPagedList
  441. {
  442. public:
  443. static_assert(sizeof(T) <= TAlignedPagePool::POOL_PAGE_SIZE, "Too big object");
  444. static constexpr size_t OBJECTS_PER_PAGE = TAlignedPagePool::POOL_PAGE_SIZE / sizeof(T);
  445. class TIterator;
  446. class TConstIterator;
  447. TPagedList(TAlignedPagePool& pool)
  448. : Pool(pool)
  449. , IndexInLastPage(OBJECTS_PER_PAGE)
  450. {}
  451. TPagedList(const TPagedList&) = delete;
  452. TPagedList(TPagedList&&) = delete;
  453. ~TPagedList() {
  454. Clear();
  455. }
  456. void Add(T&& value) {
  457. if (IndexInLastPage < OBJECTS_PER_PAGE) {
  458. auto ptr = ObjectAt(Pages.back(), IndexInLastPage);
  459. new(ptr) T(std::move(value));
  460. ++IndexInLastPage;
  461. return;
  462. }
  463. auto ptr = Pool.GetPage();
  464. IndexInLastPage = 1;
  465. Pages.push_back(ptr);
  466. new(ptr) T(std::move(value));
  467. }
  468. void Clear() {
  469. for (ui32 i = 0; i + 1 < Pages.size(); ++i) {
  470. for (ui32 objIndex = 0; objIndex < OBJECTS_PER_PAGE; ++objIndex) {
  471. ObjectAt(Pages[i], objIndex)->~T();
  472. }
  473. Pool.ReturnPage(Pages[i]);
  474. }
  475. if (!Pages.empty()) {
  476. for (ui32 objIndex = 0; objIndex < IndexInLastPage; ++objIndex) {
  477. ObjectAt(Pages.back(), objIndex)->~T();
  478. }
  479. Pool.ReturnPage(Pages.back());
  480. }
  481. TPages().swap(Pages);
  482. IndexInLastPage = OBJECTS_PER_PAGE;
  483. }
  484. const T& operator[](size_t i) const {
  485. const auto table = i / OBJECTS_PER_PAGE;
  486. const auto index = i % OBJECTS_PER_PAGE;
  487. return *ObjectAt(Pages[table], index);
  488. }
  489. size_t Size() const {
  490. return Pages.empty() ? 0 : ((Pages.size() - 1) * OBJECTS_PER_PAGE + IndexInLastPage);
  491. }
  492. TConstIterator Begin() const {
  493. return TConstIterator(this, 0, 0);
  494. }
  495. TConstIterator begin() const {
  496. return Begin();
  497. }
  498. TConstIterator End() const {
  499. if (IndexInLastPage == OBJECTS_PER_PAGE) {
  500. return TConstIterator(this, Pages.size(), 0);
  501. }
  502. return TConstIterator(this, Pages.size() - 1, IndexInLastPage);
  503. }
  504. TConstIterator end() const {
  505. return End();
  506. }
  507. TIterator Begin() {
  508. return TIterator(this, 0, 0);
  509. }
  510. TIterator begin() {
  511. return Begin();
  512. }
  513. TIterator End() {
  514. if (IndexInLastPage == OBJECTS_PER_PAGE) {
  515. return TIterator(this, Pages.size(), 0);
  516. }
  517. return TIterator(this, Pages.size() - 1, IndexInLastPage);
  518. }
  519. TIterator end() {
  520. return End();
  521. }
  522. class TIterator
  523. {
  524. public:
  525. using TOwner = TPagedList<T>;
  526. TIterator()
  527. : Owner(nullptr)
  528. , PageNo(0)
  529. , PageIndex(0)
  530. {}
  531. TIterator(const TIterator&) = default;
  532. TIterator& operator=(const TIterator&) = default;
  533. TIterator(TOwner* owner, size_t pageNo, size_t pageIndex)
  534. : Owner(owner)
  535. , PageNo(pageNo)
  536. , PageIndex(pageIndex)
  537. {}
  538. T& operator*() {
  539. Y_DEBUG_ABORT_UNLESS(PageIndex < OBJECTS_PER_PAGE);
  540. Y_DEBUG_ABORT_UNLESS(PageNo < Owner->Pages.size());
  541. Y_DEBUG_ABORT_UNLESS(PageNo + 1 < Owner->Pages.size() || PageIndex < Owner->IndexInLastPage);
  542. return *Owner->ObjectAt(Owner->Pages[PageNo], PageIndex);
  543. }
  544. TIterator& operator++() {
  545. if (++PageIndex == OBJECTS_PER_PAGE) {
  546. ++PageNo;
  547. PageIndex = 0;
  548. }
  549. return *this;
  550. }
  551. bool operator==(const TIterator& other) const {
  552. return PageNo == other.PageNo && PageIndex == other.PageIndex;
  553. }
  554. bool operator!=(const TIterator& other) const {
  555. return !operator==(other);
  556. }
  557. private:
  558. TOwner* Owner;
  559. size_t PageNo;
  560. size_t PageIndex;
  561. };
  562. class TConstIterator
  563. {
  564. public:
  565. using TOwner = TPagedList<T>;
  566. TConstIterator()
  567. : Owner(nullptr)
  568. , PageNo(0)
  569. , PageIndex(0)
  570. {}
  571. TConstIterator(const TConstIterator&) = default;
  572. TConstIterator& operator=(const TConstIterator&) = default;
  573. TConstIterator(const TOwner* owner, size_t pageNo, size_t pageIndex)
  574. : Owner(owner)
  575. , PageNo(pageNo)
  576. , PageIndex(pageIndex)
  577. {}
  578. const T& operator*() {
  579. Y_DEBUG_ABORT_UNLESS(PageIndex < OBJECTS_PER_PAGE);
  580. Y_DEBUG_ABORT_UNLESS(PageNo < Owner->Pages.size());
  581. Y_DEBUG_ABORT_UNLESS(PageNo + 1 < Owner->Pages.size() || PageIndex < Owner->IndexInLastPage);
  582. return *Owner->ObjectAt(Owner->Pages[PageNo], PageIndex);
  583. }
  584. TConstIterator& operator++() {
  585. if (++PageIndex == OBJECTS_PER_PAGE) {
  586. ++PageNo;
  587. PageIndex = 0;
  588. }
  589. return *this;
  590. }
  591. bool operator==(const TConstIterator& other) const {
  592. return PageNo == other.PageNo && PageIndex == other.PageIndex;
  593. }
  594. bool operator!=(const TConstIterator& other) const {
  595. return !operator==(other);
  596. }
  597. private:
  598. const TOwner* Owner;
  599. size_t PageNo;
  600. size_t PageIndex;
  601. };
  602. private:
  603. static const T* ObjectAt(const void* page, size_t objectIndex) {
  604. return reinterpret_cast<const T*>(static_cast<const char*>(page) + objectIndex * sizeof(T));
  605. }
  606. static T* ObjectAt(void* page, size_t objectIndex) {
  607. return reinterpret_cast<T*>(static_cast<char*>(page) + objectIndex * sizeof(T));
  608. }
  609. TAlignedPagePool& Pool;
  610. using TPages = std::vector<void*, TMKQLAllocator<void*>>;
  611. TPages Pages;
  612. size_t IndexInLastPage;
  613. };
  614. inline void TBoxedValueWithFree::operator delete(void *mem) noexcept {
  615. auto size = ((TMkqlPAllocHeader*)mem)->Size + sizeof(TMkqlPAllocHeader);
  616. MKQLFreeWithSize(mem, size, EMemorySubPool::Default);
  617. }
  618. } // NMiniKQL
  619. } // NKikimr