allocator_sba.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /**
  2. * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
  3. * SPDX-License-Identifier: Apache-2.0.
  4. */
  5. #include <aws/common/allocator.h>
  6. #include <aws/common/array_list.h>
  7. #include <aws/common/assert.h>
  8. #include <aws/common/mutex.h>
  9. /*
  10. * Small Block Allocator
  11. * This is a fairly standard approach, the idea is to always allocate aligned pages of memory so that for
  12. * any address you can round to the nearest page boundary to find the bookkeeping data. The idea is to reduce
  13. * overhead per alloc and greatly improve runtime speed by doing as little actual allocation work as possible,
  14. * preferring instead to re-use (hopefully still cached) chunks in FIFO order, or chunking up a page if there's
  15. * no free chunks. When all chunks in a page are freed, the page is returned to the OS.
  16. *
  17. * The allocator itself is simply an array of bins, each representing a power of 2 size from 32 - N (512 tends to be
  18. * a good upper bound). Thread safety is guaranteed by a mutex per bin, and locks are only necessary around the
  19. * lowest level alloc and free operations.
  20. *
  21. * Note: this allocator gets its internal memory for data structures from the parent allocator, but does not
  22. * use the parent to allocate pages. Pages are allocated directly from the OS-specific aligned malloc implementation,
  23. * which allows the OS to do address re-mapping for us instead of over-allocating to fulfill alignment.
  24. */
  25. #ifdef _WIN32
  26. # include <malloc.h>
  27. #elif __linux__ || __APPLE__
  28. # include <stdlib.h>
  29. #endif
  30. #if !defined(AWS_SBA_PAGE_SIZE)
  31. # if defined(PAGE_SIZE)
  32. # define AWS_SBA_PAGE_SIZE ((uintptr_t)(PAGE_SIZE))
  33. # else
  34. # define AWS_SBA_PAGE_SIZE ((uintptr_t)(4096))
  35. # endif
  36. #endif
  37. #define AWS_SBA_PAGE_MASK ((uintptr_t) ~(AWS_SBA_PAGE_SIZE - 1))
  38. #define AWS_SBA_TAG_VALUE 0x736f6d6570736575ULL
  39. /* list of sizes of bins, must be powers of 2, and less than AWS_SBA_PAGE_SIZE * 0.5 */
  40. #define AWS_SBA_BIN_COUNT 5
  41. static const size_t s_bin_sizes[AWS_SBA_BIN_COUNT] = {32, 64, 128, 256, 512};
  42. static const size_t s_max_bin_size = 512;
  43. struct sba_bin {
  44. size_t size; /* size of allocs in this bin */
  45. struct aws_mutex mutex; /* lock protecting this bin */
  46. uint8_t *page_cursor; /* pointer to working page, currently being chunked from */
  47. struct aws_array_list active_pages; /* all pages in use by this bin, could be optimized at scale by being a set */
  48. struct aws_array_list free_chunks; /* free chunks available in this bin */
  49. };
  50. /* Header stored at the base of each page.
  51. * As long as this is under 32 bytes, all is well.
  52. * Above that, there's potentially more waste per page */
  53. struct page_header {
  54. uint64_t tag; /* marker to identify/validate pages */
  55. struct sba_bin *bin; /* bin this page belongs to */
  56. uint32_t alloc_count; /* number of outstanding allocs from this page */
  57. uint64_t tag2;
  58. };
  59. /* This is the impl for the aws_allocator */
  60. struct small_block_allocator {
  61. struct aws_allocator *allocator; /* parent allocator, for large allocs */
  62. struct sba_bin bins[AWS_SBA_BIN_COUNT];
  63. int (*lock)(struct aws_mutex *);
  64. int (*unlock)(struct aws_mutex *);
  65. };
  66. static int s_null_lock(struct aws_mutex *mutex) {
  67. (void)mutex;
  68. /* NO OP */
  69. return 0;
  70. }
  71. static int s_null_unlock(struct aws_mutex *mutex) {
  72. (void)mutex;
  73. /* NO OP */
  74. return 0;
  75. }
  76. static int s_mutex_lock(struct aws_mutex *mutex) {
  77. return aws_mutex_lock(mutex);
  78. }
  79. static int s_mutex_unlock(struct aws_mutex *mutex) {
  80. return aws_mutex_unlock(mutex);
  81. }
  82. static void *s_page_base(const void *addr) {
  83. /* mask off the address to round it to page alignment */
  84. uint8_t *page_base = (uint8_t *)(((uintptr_t)addr) & AWS_SBA_PAGE_MASK);
  85. return page_base;
  86. }
  87. static void *s_page_bind(void *addr, struct sba_bin *bin) {
  88. /* insert the header at the base of the page and advance past it */
  89. struct page_header *page = (struct page_header *)addr;
  90. page->tag = page->tag2 = AWS_SBA_TAG_VALUE;
  91. page->bin = bin;
  92. page->alloc_count = 0;
  93. return (uint8_t *)addr + sizeof(struct page_header);
  94. }
  95. /* Wraps OS-specific aligned malloc implementation */
  96. static void *s_aligned_alloc(size_t size, size_t align) {
  97. #ifdef _WIN32
  98. return _aligned_malloc(size, align);
  99. #else
  100. void *mem = NULL;
  101. int return_code = posix_memalign(&mem, align, size);
  102. if (return_code) {
  103. aws_raise_error(AWS_ERROR_OOM);
  104. return NULL;
  105. }
  106. return mem;
  107. #endif
  108. }
  109. /* wraps OS-specific aligned free implementation */
  110. static void s_aligned_free(void *addr) {
  111. #ifdef _WIN32
  112. _aligned_free(addr);
  113. #else
  114. free(addr);
  115. #endif
  116. }
  117. /* aws_allocator vtable template */
  118. static void *s_sba_mem_acquire(struct aws_allocator *allocator, size_t size);
  119. static void s_sba_mem_release(struct aws_allocator *allocator, void *ptr);
  120. static void *s_sba_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size);
  121. static void *s_sba_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size);
  122. static struct aws_allocator s_sba_allocator = {
  123. .mem_acquire = s_sba_mem_acquire,
  124. .mem_release = s_sba_mem_release,
  125. .mem_realloc = s_sba_mem_realloc,
  126. .mem_calloc = s_sba_mem_calloc,
  127. };
  128. static int s_sba_init(struct small_block_allocator *sba, struct aws_allocator *allocator, bool multi_threaded) {
  129. sba->allocator = allocator;
  130. AWS_ZERO_ARRAY(sba->bins);
  131. sba->lock = multi_threaded ? s_mutex_lock : s_null_lock;
  132. sba->unlock = multi_threaded ? s_mutex_unlock : s_null_unlock;
  133. for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
  134. struct sba_bin *bin = &sba->bins[idx];
  135. bin->size = s_bin_sizes[idx];
  136. if (multi_threaded && aws_mutex_init(&bin->mutex)) {
  137. goto cleanup;
  138. }
  139. if (aws_array_list_init_dynamic(&bin->active_pages, sba->allocator, 16, sizeof(void *))) {
  140. goto cleanup;
  141. }
  142. /* start with enough chunks for 1 page */
  143. if (aws_array_list_init_dynamic(
  144. &bin->free_chunks, sba->allocator, aws_max_size(AWS_SBA_PAGE_SIZE / bin->size, 16), sizeof(void *))) {
  145. goto cleanup;
  146. }
  147. }
  148. return AWS_OP_SUCCESS;
  149. cleanup:
  150. for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
  151. struct sba_bin *bin = &sba->bins[idx];
  152. aws_mutex_clean_up(&bin->mutex);
  153. aws_array_list_clean_up(&bin->active_pages);
  154. aws_array_list_clean_up(&bin->free_chunks);
  155. }
  156. return AWS_OP_ERR;
  157. }
  158. static void s_sba_clean_up(struct small_block_allocator *sba) {
  159. /* free all known pages, then free the working page */
  160. for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
  161. struct sba_bin *bin = &sba->bins[idx];
  162. for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) {
  163. void *page_addr = NULL;
  164. aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx);
  165. struct page_header *page = page_addr;
  166. AWS_ASSERT(page->alloc_count == 0 && "Memory still allocated in aws_sba_allocator (bin)");
  167. s_aligned_free(page);
  168. }
  169. if (bin->page_cursor) {
  170. void *page_addr = s_page_base(bin->page_cursor);
  171. struct page_header *page = page_addr;
  172. AWS_ASSERT(page->alloc_count == 0 && "Memory still allocated in aws_sba_allocator (page)");
  173. s_aligned_free(page);
  174. }
  175. aws_array_list_clean_up(&bin->active_pages);
  176. aws_array_list_clean_up(&bin->free_chunks);
  177. aws_mutex_clean_up(&bin->mutex);
  178. }
  179. }
  180. struct aws_allocator *aws_small_block_allocator_new(struct aws_allocator *allocator, bool multi_threaded) {
  181. struct small_block_allocator *sba = NULL;
  182. struct aws_allocator *sba_allocator = NULL;
  183. aws_mem_acquire_many(
  184. allocator, 2, &sba, sizeof(struct small_block_allocator), &sba_allocator, sizeof(struct aws_allocator));
  185. if (!sba || !sba_allocator) {
  186. return NULL;
  187. }
  188. AWS_ZERO_STRUCT(*sba);
  189. AWS_ZERO_STRUCT(*sba_allocator);
  190. /* copy the template vtable */
  191. *sba_allocator = s_sba_allocator;
  192. sba_allocator->impl = sba;
  193. if (s_sba_init(sba, allocator, multi_threaded)) {
  194. s_sba_clean_up(sba);
  195. aws_mem_release(allocator, sba);
  196. return NULL;
  197. }
  198. return sba_allocator;
  199. }
  200. void aws_small_block_allocator_destroy(struct aws_allocator *sba_allocator) {
  201. if (!sba_allocator) {
  202. return;
  203. }
  204. struct small_block_allocator *sba = sba_allocator->impl;
  205. if (!sba) {
  206. return;
  207. }
  208. struct aws_allocator *allocator = sba->allocator;
  209. s_sba_clean_up(sba);
  210. aws_mem_release(allocator, sba);
  211. }
  212. size_t aws_small_block_allocator_bytes_active(struct aws_allocator *sba_allocator) {
  213. AWS_FATAL_ASSERT(sba_allocator && "aws_small_block_allocator_bytes_used requires a non-null allocator");
  214. struct small_block_allocator *sba = sba_allocator->impl;
  215. AWS_FATAL_ASSERT(sba && "aws_small_block_allocator_bytes_used: supplied allocator has invalid SBA impl");
  216. size_t used = 0;
  217. for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
  218. struct sba_bin *bin = &sba->bins[idx];
  219. sba->lock(&bin->mutex);
  220. for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) {
  221. void *page_addr = NULL;
  222. aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx);
  223. struct page_header *page = page_addr;
  224. used += page->alloc_count * bin->size;
  225. }
  226. if (bin->page_cursor) {
  227. void *page_addr = s_page_base(bin->page_cursor);
  228. struct page_header *page = page_addr;
  229. used += page->alloc_count * bin->size;
  230. }
  231. sba->unlock(&bin->mutex);
  232. }
  233. return used;
  234. }
  235. size_t aws_small_block_allocator_bytes_reserved(struct aws_allocator *sba_allocator) {
  236. AWS_FATAL_ASSERT(sba_allocator && "aws_small_block_allocator_bytes_used requires a non-null allocator");
  237. struct small_block_allocator *sba = sba_allocator->impl;
  238. AWS_FATAL_ASSERT(sba && "aws_small_block_allocator_bytes_used: supplied allocator has invalid SBA impl");
  239. size_t used = 0;
  240. for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) {
  241. struct sba_bin *bin = &sba->bins[idx];
  242. sba->lock(&bin->mutex);
  243. used += (bin->active_pages.length + (bin->page_cursor != NULL)) * AWS_SBA_PAGE_SIZE;
  244. sba->unlock(&bin->mutex);
  245. }
  246. return used;
  247. }
  248. size_t aws_small_block_allocator_page_size(struct aws_allocator *sba_allocator) {
  249. (void)sba_allocator;
  250. return AWS_SBA_PAGE_SIZE;
  251. }
  252. size_t aws_small_block_allocator_page_size_available(struct aws_allocator *sba_allocator) {
  253. (void)sba_allocator;
  254. return AWS_SBA_PAGE_SIZE - sizeof(struct page_header);
  255. }
  256. /* NOTE: Expects the mutex to be held by the caller */
  257. static void *s_sba_alloc_from_bin(struct sba_bin *bin) {
  258. /* check the free list, hand chunks out in FIFO order */
  259. if (bin->free_chunks.length > 0) {
  260. void *chunk = NULL;
  261. if (aws_array_list_back(&bin->free_chunks, &chunk)) {
  262. return NULL;
  263. }
  264. if (aws_array_list_pop_back(&bin->free_chunks)) {
  265. return NULL;
  266. }
  267. AWS_ASSERT(chunk);
  268. struct page_header *page = s_page_base(chunk);
  269. page->alloc_count++;
  270. return chunk;
  271. }
  272. /* If there is a working page to chunk from, use it */
  273. if (bin->page_cursor) {
  274. struct page_header *page = s_page_base(bin->page_cursor);
  275. AWS_ASSERT(page);
  276. size_t space_left = AWS_SBA_PAGE_SIZE - (bin->page_cursor - (uint8_t *)page);
  277. if (space_left >= bin->size) {
  278. void *chunk = bin->page_cursor;
  279. page->alloc_count++;
  280. bin->page_cursor += bin->size;
  281. space_left -= bin->size;
  282. if (space_left < bin->size) {
  283. aws_array_list_push_back(&bin->active_pages, &page);
  284. bin->page_cursor = NULL;
  285. }
  286. return chunk;
  287. }
  288. }
  289. /* Nothing free to use, allocate a page and restart */
  290. uint8_t *new_page = s_aligned_alloc(AWS_SBA_PAGE_SIZE, AWS_SBA_PAGE_SIZE);
  291. new_page = s_page_bind(new_page, bin);
  292. bin->page_cursor = new_page;
  293. return s_sba_alloc_from_bin(bin);
  294. }
  295. /* NOTE: Expects the mutex to be held by the caller */
  296. static void s_sba_free_to_bin(struct sba_bin *bin, void *addr) {
  297. AWS_PRECONDITION(addr);
  298. struct page_header *page = s_page_base(addr);
  299. AWS_ASSERT(page->bin == bin);
  300. page->alloc_count--;
  301. if (page->alloc_count == 0 && page != s_page_base(bin->page_cursor)) { /* empty page, free it */
  302. uint8_t *page_start = (uint8_t *)page + sizeof(struct page_header);
  303. uint8_t *page_end = page_start + AWS_SBA_PAGE_SIZE;
  304. /* Remove all chunks in the page from the free list */
  305. intptr_t chunk_idx = bin->free_chunks.length;
  306. for (; chunk_idx >= 0; --chunk_idx) {
  307. uint8_t *chunk = NULL;
  308. aws_array_list_get_at(&bin->free_chunks, &chunk, chunk_idx);
  309. if (chunk >= page_start && chunk < page_end) {
  310. aws_array_list_swap(&bin->free_chunks, chunk_idx, bin->free_chunks.length - 1);
  311. aws_array_list_pop_back(&bin->free_chunks);
  312. }
  313. }
  314. /* Find page in pages list and remove it */
  315. for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) {
  316. void *page_addr = NULL;
  317. aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx);
  318. if (page_addr == page) {
  319. aws_array_list_swap(&bin->active_pages, page_idx, bin->active_pages.length - 1);
  320. aws_array_list_pop_back(&bin->active_pages);
  321. break;
  322. }
  323. }
  324. /* ensure that the page tag is erased, in case nearby memory is re-used */
  325. page->tag = page->tag2 = 0;
  326. s_aligned_free(page);
  327. return;
  328. }
  329. aws_array_list_push_back(&bin->free_chunks, &addr);
  330. }
  331. /* No lock required for this function, it's all read-only access to constant data */
  332. static struct sba_bin *s_sba_find_bin(struct small_block_allocator *sba, size_t size) {
  333. AWS_PRECONDITION(size <= s_max_bin_size);
  334. /* map bits 5(32) to 9(512) to indices 0-4 */
  335. size_t next_pow2 = 0;
  336. aws_round_up_to_power_of_two(size, &next_pow2);
  337. size_t lz = aws_clz_i32((int32_t)next_pow2);
  338. size_t idx = aws_sub_size_saturating(31 - lz, 5);
  339. AWS_ASSERT(idx <= 4);
  340. struct sba_bin *bin = &sba->bins[idx];
  341. AWS_ASSERT(bin->size >= size);
  342. return bin;
  343. }
  344. static void *s_sba_alloc(struct small_block_allocator *sba, size_t size) {
  345. if (size <= s_max_bin_size) {
  346. struct sba_bin *bin = s_sba_find_bin(sba, size);
  347. AWS_FATAL_ASSERT(bin);
  348. /* BEGIN CRITICAL SECTION */
  349. sba->lock(&bin->mutex);
  350. void *mem = s_sba_alloc_from_bin(bin);
  351. sba->unlock(&bin->mutex);
  352. /* END CRITICAL SECTION */
  353. return mem;
  354. }
  355. return aws_mem_acquire(sba->allocator, size);
  356. }
  357. static void s_sba_free(struct small_block_allocator *sba, void *addr) {
  358. if (!addr) {
  359. return;
  360. }
  361. struct page_header *page = (struct page_header *)s_page_base(addr);
  362. /* Check to see if this page is tagged by the sba */
  363. /* this check causes a read of (possibly) memory we didn't allocate, but it will always be
  364. * heap memory, so should not cause any issues. TSan will see this as a data race, but it
  365. * is not, that's a false positive
  366. */
  367. if (page->tag == AWS_SBA_TAG_VALUE && page->tag2 == AWS_SBA_TAG_VALUE) {
  368. struct sba_bin *bin = page->bin;
  369. /* BEGIN CRITICAL SECTION */
  370. sba->lock(&bin->mutex);
  371. s_sba_free_to_bin(bin, addr);
  372. sba->unlock(&bin->mutex);
  373. /* END CRITICAL SECTION */
  374. return;
  375. }
  376. /* large alloc, give back to underlying allocator */
  377. aws_mem_release(sba->allocator, addr);
  378. }
  379. static void *s_sba_mem_acquire(struct aws_allocator *allocator, size_t size) {
  380. struct small_block_allocator *sba = allocator->impl;
  381. return s_sba_alloc(sba, size);
  382. }
  383. static void s_sba_mem_release(struct aws_allocator *allocator, void *ptr) {
  384. struct small_block_allocator *sba = allocator->impl;
  385. s_sba_free(sba, ptr);
  386. }
  387. static void *s_sba_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) {
  388. struct small_block_allocator *sba = allocator->impl;
  389. /* If both allocations come from the parent, let the parent do it */
  390. if (old_size > s_max_bin_size && new_size > s_max_bin_size) {
  391. void *ptr = old_ptr;
  392. if (aws_mem_realloc(sba->allocator, &ptr, old_size, new_size)) {
  393. return NULL;
  394. }
  395. return ptr;
  396. }
  397. if (new_size == 0) {
  398. s_sba_free(sba, old_ptr);
  399. return NULL;
  400. }
  401. if (old_size > new_size) {
  402. return old_ptr;
  403. }
  404. void *new_mem = s_sba_alloc(sba, new_size);
  405. if (old_ptr && old_size) {
  406. memcpy(new_mem, old_ptr, old_size);
  407. s_sba_free(sba, old_ptr);
  408. }
  409. return new_mem;
  410. }
  411. static void *s_sba_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
  412. struct small_block_allocator *sba = allocator->impl;
  413. void *mem = s_sba_alloc(sba, size * num);
  414. memset(mem, 0, size * num);
  415. return mem;
  416. }