segment.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370
  1. /* ----------------------------------------------------------------------------
  2. Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
  3. This is free software; you can redistribute it and/or modify it under the
  4. terms of the MIT license. A copy of the license can be found in the file
  5. "LICENSE" at the root of this distribution.
  6. -----------------------------------------------------------------------------*/
  7. #include "mimalloc.h"
  8. #include "mimalloc-internal.h"
  9. #include "mimalloc-atomic.h"
  10. #include <string.h> // memset
  11. #include <stdio.h>
  12. #define MI_PAGE_HUGE_ALIGN (256*1024)
  13. static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size);
  14. /* --------------------------------------------------------------------------------
  15. Segment allocation
  16. We allocate pages inside bigger "segments" (4mb on 64-bit). This is to avoid
  17. splitting VMA's on Linux and reduce fragmentation on other OS's.
  18. Each thread owns its own segments.
  19. Currently we have:
  20. - small pages (64kb), 64 in one segment
  21. - medium pages (512kb), 8 in one segment
  22. - large pages (4mb), 1 in one segment
  23. - huge blocks > MI_LARGE_OBJ_SIZE_MAX become large segment with 1 page
  24. In any case the memory for a segment is virtual and usually committed on demand.
  25. (i.e. we are careful to not touch the memory until we actually allocate a block there)
  26. If a thread ends, it "abandons" pages with used blocks
  27. and there is an abandoned segment list whose segments can
  28. be reclaimed by still running threads, much like work-stealing.
  29. -------------------------------------------------------------------------------- */
  30. /* -----------------------------------------------------------
  31. Queue of segments containing free pages
  32. ----------------------------------------------------------- */
  33. #if (MI_DEBUG>=3)
  34. static bool mi_segment_queue_contains(const mi_segment_queue_t* queue, const mi_segment_t* segment) {
  35. mi_assert_internal(segment != NULL);
  36. mi_segment_t* list = queue->first;
  37. while (list != NULL) {
  38. if (list == segment) break;
  39. mi_assert_internal(list->next==NULL || list->next->prev == list);
  40. mi_assert_internal(list->prev==NULL || list->prev->next == list);
  41. list = list->next;
  42. }
  43. return (list == segment);
  44. }
  45. #endif
  46. static bool mi_segment_queue_is_empty(const mi_segment_queue_t* queue) {
  47. return (queue->first == NULL);
  48. }
  49. static void mi_segment_queue_remove(mi_segment_queue_t* queue, mi_segment_t* segment) {
  50. mi_assert_expensive(mi_segment_queue_contains(queue, segment));
  51. if (segment->prev != NULL) segment->prev->next = segment->next;
  52. if (segment->next != NULL) segment->next->prev = segment->prev;
  53. if (segment == queue->first) queue->first = segment->next;
  54. if (segment == queue->last) queue->last = segment->prev;
  55. segment->next = NULL;
  56. segment->prev = NULL;
  57. }
  58. static void mi_segment_enqueue(mi_segment_queue_t* queue, mi_segment_t* segment) {
  59. mi_assert_expensive(!mi_segment_queue_contains(queue, segment));
  60. segment->next = NULL;
  61. segment->prev = queue->last;
  62. if (queue->last != NULL) {
  63. mi_assert_internal(queue->last->next == NULL);
  64. queue->last->next = segment;
  65. queue->last = segment;
  66. }
  67. else {
  68. queue->last = queue->first = segment;
  69. }
  70. }
  71. static mi_segment_queue_t* mi_segment_free_queue_of_kind(mi_page_kind_t kind, mi_segments_tld_t* tld) {
  72. if (kind == MI_PAGE_SMALL) return &tld->small_free;
  73. else if (kind == MI_PAGE_MEDIUM) return &tld->medium_free;
  74. else return NULL;
  75. }
  76. static mi_segment_queue_t* mi_segment_free_queue(const mi_segment_t* segment, mi_segments_tld_t* tld) {
  77. return mi_segment_free_queue_of_kind(segment->page_kind, tld);
  78. }
  79. // remove from free queue if it is in one
  80. static void mi_segment_remove_from_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) {
  81. mi_segment_queue_t* queue = mi_segment_free_queue(segment, tld); // may be NULL
  82. bool in_queue = (queue!=NULL && (segment->next != NULL || segment->prev != NULL || queue->first == segment));
  83. if (in_queue) {
  84. mi_segment_queue_remove(queue, segment);
  85. }
  86. }
  87. static void mi_segment_insert_in_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) {
  88. mi_segment_enqueue(mi_segment_free_queue(segment, tld), segment);
  89. }
  90. /* -----------------------------------------------------------
  91. Invariant checking
  92. ----------------------------------------------------------- */
  93. #if (MI_DEBUG>=2)
  94. static bool mi_segment_is_in_free_queue(const mi_segment_t* segment, mi_segments_tld_t* tld) {
  95. mi_segment_queue_t* queue = mi_segment_free_queue(segment, tld);
  96. bool in_queue = (queue!=NULL && (segment->next != NULL || segment->prev != NULL || queue->first == segment));
  97. if (in_queue) {
  98. mi_assert_expensive(mi_segment_queue_contains(queue, segment));
  99. }
  100. return in_queue;
  101. }
  102. #endif
  103. static size_t mi_segment_page_size(const mi_segment_t* segment) {
  104. if (segment->capacity > 1) {
  105. mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM);
  106. return ((size_t)1 << segment->page_shift);
  107. }
  108. else {
  109. mi_assert_internal(segment->page_kind >= MI_PAGE_LARGE);
  110. return segment->segment_size;
  111. }
  112. }
  113. #if (MI_DEBUG>=2)
  114. static bool mi_pages_reset_contains(const mi_page_t* page, mi_segments_tld_t* tld) {
  115. mi_page_t* p = tld->pages_reset.first;
  116. while (p != NULL) {
  117. if (p == page) return true;
  118. p = p->next;
  119. }
  120. return false;
  121. }
  122. #endif
  123. #if (MI_DEBUG>=3)
  124. static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t* tld) {
  125. mi_assert_internal(segment != NULL);
  126. mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
  127. mi_assert_internal(segment->used <= segment->capacity);
  128. mi_assert_internal(segment->abandoned <= segment->used);
  129. size_t nfree = 0;
  130. for (size_t i = 0; i < segment->capacity; i++) {
  131. const mi_page_t* const page = &segment->pages[i];
  132. if (!page->segment_in_use) {
  133. nfree++;
  134. }
  135. if (page->segment_in_use || page->is_reset) {
  136. mi_assert_expensive(!mi_pages_reset_contains(page, tld));
  137. }
  138. }
  139. mi_assert_internal(nfree + segment->used == segment->capacity);
  140. // mi_assert_internal(segment->thread_id == _mi_thread_id() || (segment->thread_id==0)); // or 0
  141. mi_assert_internal(segment->page_kind == MI_PAGE_HUGE ||
  142. (mi_segment_page_size(segment) * segment->capacity == segment->segment_size));
  143. return true;
  144. }
  145. #endif
  146. static bool mi_page_not_in_queue(const mi_page_t* page, mi_segments_tld_t* tld) {
  147. mi_assert_internal(page != NULL);
  148. if (page->next != NULL || page->prev != NULL) {
  149. mi_assert_internal(mi_pages_reset_contains(page, tld));
  150. return false;
  151. }
  152. else {
  153. // both next and prev are NULL, check for singleton list
  154. return (tld->pages_reset.first != page && tld->pages_reset.last != page);
  155. }
  156. }
  157. /* -----------------------------------------------------------
  158. Guard pages
  159. ----------------------------------------------------------- */
  160. static void mi_segment_protect_range(void* p, size_t size, bool protect) {
  161. if (protect) {
  162. _mi_mem_protect(p, size);
  163. }
  164. else {
  165. _mi_mem_unprotect(p, size);
  166. }
  167. }
  168. static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t* tld) {
  169. // add/remove guard pages
  170. if (MI_SECURE != 0) {
  171. // in secure mode, we set up a protected page in between the segment info and the page data
  172. const size_t os_psize = _mi_os_page_size();
  173. mi_assert_internal((segment->segment_info_size - os_psize) >= (sizeof(mi_segment_t) + ((segment->capacity - 1) * sizeof(mi_page_t))));
  174. mi_assert_internal(((uintptr_t)segment + segment->segment_info_size) % os_psize == 0);
  175. mi_segment_protect_range((uint8_t*)segment + segment->segment_info_size - os_psize, os_psize, protect);
  176. if (MI_SECURE <= 1 || segment->capacity == 1) {
  177. // and protect the last (or only) page too
  178. mi_assert_internal(MI_SECURE <= 1 || segment->page_kind >= MI_PAGE_LARGE);
  179. uint8_t* start = (uint8_t*)segment + segment->segment_size - os_psize;
  180. if (protect && !segment->mem_is_committed) {
  181. if (protect) {
  182. // ensure secure page is committed
  183. if (_mi_mem_commit(start, os_psize, NULL, tld)) { // if this fails that is ok (as it is an unaccessible page)
  184. mi_segment_protect_range(start, os_psize, protect);
  185. }
  186. }
  187. }
  188. else {
  189. mi_segment_protect_range(start, os_psize, protect);
  190. }
  191. }
  192. else {
  193. // or protect every page
  194. const size_t page_size = mi_segment_page_size(segment);
  195. for (size_t i = 0; i < segment->capacity; i++) {
  196. if (segment->pages[i].is_committed) {
  197. mi_segment_protect_range((uint8_t*)segment + (i+1)*page_size - os_psize, os_psize, protect);
  198. }
  199. }
  200. }
  201. }
  202. }
  203. /* -----------------------------------------------------------
  204. Page reset
  205. ----------------------------------------------------------- */
  206. static void mi_page_reset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld) {
  207. mi_assert_internal(page->is_committed);
  208. if (!mi_option_is_enabled(mi_option_page_reset)) return;
  209. if (segment->mem_is_pinned || page->segment_in_use || !page->is_committed || page->is_reset) return;
  210. size_t psize;
  211. void* start = mi_segment_raw_page_start(segment, page, &psize);
  212. page->is_reset = true;
  213. mi_assert_internal(size <= psize);
  214. size_t reset_size = ((size == 0 || size > psize) ? psize : size);
  215. if (reset_size > 0) _mi_mem_reset(start, reset_size, tld->os);
  216. }
  217. static bool mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld)
  218. {
  219. mi_assert_internal(page->is_reset);
  220. mi_assert_internal(page->is_committed);
  221. mi_assert_internal(!segment->mem_is_pinned);
  222. if (segment->mem_is_pinned || !page->is_committed || !page->is_reset) return true;
  223. page->is_reset = false;
  224. size_t psize;
  225. uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
  226. size_t unreset_size = (size == 0 || size > psize ? psize : size);
  227. bool is_zero = false;
  228. bool ok = true;
  229. if (unreset_size > 0) {
  230. ok = _mi_mem_unreset(start, unreset_size, &is_zero, tld->os);
  231. }
  232. if (is_zero) page->is_zero_init = true;
  233. return ok;
  234. }
  235. /* -----------------------------------------------------------
  236. The free page queue
  237. ----------------------------------------------------------- */
  238. // we re-use the `used` field for the expiration counter. Since this is a
  239. // a 32-bit field while the clock is always 64-bit we need to guard
  240. // against overflow, we use substraction to check for expiry which work
  241. // as long as the reset delay is under (2^30 - 1) milliseconds (~12 days)
  242. static void mi_page_reset_set_expire(mi_page_t* page) {
  243. uint32_t expire = (uint32_t)_mi_clock_now() + mi_option_get(mi_option_reset_delay);
  244. page->used = expire;
  245. }
  246. static bool mi_page_reset_is_expired(mi_page_t* page, mi_msecs_t now) {
  247. int32_t expire = (int32_t)(page->used);
  248. return (((int32_t)now - expire) >= 0);
  249. }
  250. static void mi_pages_reset_add(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
  251. mi_assert_internal(!page->segment_in_use || !page->is_committed);
  252. mi_assert_internal(mi_page_not_in_queue(page,tld));
  253. mi_assert_expensive(!mi_pages_reset_contains(page, tld));
  254. mi_assert_internal(_mi_page_segment(page)==segment);
  255. if (!mi_option_is_enabled(mi_option_page_reset)) return;
  256. if (segment->mem_is_pinned || page->segment_in_use || !page->is_committed || page->is_reset) return;
  257. if (mi_option_get(mi_option_reset_delay) == 0) {
  258. // reset immediately?
  259. mi_page_reset(segment, page, 0, tld);
  260. }
  261. else {
  262. // otherwise push on the delayed page reset queue
  263. mi_page_queue_t* pq = &tld->pages_reset;
  264. // push on top
  265. mi_page_reset_set_expire(page);
  266. page->next = pq->first;
  267. page->prev = NULL;
  268. if (pq->first == NULL) {
  269. mi_assert_internal(pq->last == NULL);
  270. pq->first = pq->last = page;
  271. }
  272. else {
  273. pq->first->prev = page;
  274. pq->first = page;
  275. }
  276. }
  277. }
  278. static void mi_pages_reset_remove(mi_page_t* page, mi_segments_tld_t* tld) {
  279. if (mi_page_not_in_queue(page,tld)) return;
  280. mi_page_queue_t* pq = &tld->pages_reset;
  281. mi_assert_internal(pq!=NULL);
  282. mi_assert_internal(!page->segment_in_use);
  283. mi_assert_internal(mi_pages_reset_contains(page, tld));
  284. if (page->prev != NULL) page->prev->next = page->next;
  285. if (page->next != NULL) page->next->prev = page->prev;
  286. if (page == pq->last) pq->last = page->prev;
  287. if (page == pq->first) pq->first = page->next;
  288. page->next = page->prev = NULL;
  289. page->used = 0;
  290. }
  291. static void mi_pages_reset_remove_all_in_segment(mi_segment_t* segment, bool force_reset, mi_segments_tld_t* tld) {
  292. if (segment->mem_is_pinned) return; // never reset in huge OS pages
  293. for (size_t i = 0; i < segment->capacity; i++) {
  294. mi_page_t* page = &segment->pages[i];
  295. if (!page->segment_in_use && page->is_committed && !page->is_reset) {
  296. mi_pages_reset_remove(page, tld);
  297. if (force_reset) {
  298. mi_page_reset(segment, page, 0, tld);
  299. }
  300. }
  301. else {
  302. mi_assert_internal(mi_page_not_in_queue(page,tld));
  303. }
  304. }
  305. }
  306. static void mi_reset_delayed(mi_segments_tld_t* tld) {
  307. if (!mi_option_is_enabled(mi_option_page_reset)) return;
  308. mi_msecs_t now = _mi_clock_now();
  309. mi_page_queue_t* pq = &tld->pages_reset;
  310. // from oldest up to the first that has not expired yet
  311. mi_page_t* page = pq->last;
  312. while (page != NULL && mi_page_reset_is_expired(page,now)) {
  313. mi_page_t* const prev = page->prev; // save previous field
  314. mi_page_reset(_mi_page_segment(page), page, 0, tld);
  315. page->used = 0;
  316. page->prev = page->next = NULL;
  317. page = prev;
  318. }
  319. // discard the reset pages from the queue
  320. pq->last = page;
  321. if (page != NULL){
  322. page->next = NULL;
  323. }
  324. else {
  325. pq->first = NULL;
  326. }
  327. }
  328. /* -----------------------------------------------------------
  329. Segment size calculations
  330. ----------------------------------------------------------- */
  331. // Raw start of the page available memory; can be used on uninitialized pages (only `segment_idx` must be set)
  332. // The raw start is not taking aligned block allocation into consideration.
  333. static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
  334. size_t psize = (segment->page_kind == MI_PAGE_HUGE ? segment->segment_size : (size_t)1 << segment->page_shift);
  335. uint8_t* p = (uint8_t*)segment + page->segment_idx * psize;
  336. if (page->segment_idx == 0) {
  337. // the first page starts after the segment info (and possible guard page)
  338. p += segment->segment_info_size;
  339. psize -= segment->segment_info_size;
  340. }
  341. #if (MI_SECURE > 1) // every page has an os guard page
  342. psize -= _mi_os_page_size();
  343. #elif (MI_SECURE==1) // the last page has an os guard page at the end
  344. if (page->segment_idx == segment->capacity - 1) {
  345. psize -= _mi_os_page_size();
  346. }
  347. #endif
  348. if (page_size != NULL) *page_size = psize;
  349. mi_assert_internal(page->xblock_size == 0 || _mi_ptr_page(p) == page);
  350. mi_assert_internal(_mi_ptr_segment(p) == segment);
  351. return p;
  352. }
  353. // Start of the page available memory; can be used on uninitialized pages (only `segment_idx` must be set)
  354. uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size, size_t* pre_size)
  355. {
  356. size_t psize;
  357. uint8_t* p = mi_segment_raw_page_start(segment, page, &psize);
  358. if (pre_size != NULL) *pre_size = 0;
  359. if (page->segment_idx == 0 && block_size > 0 && segment->page_kind <= MI_PAGE_MEDIUM) {
  360. // for small and medium objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore)
  361. size_t adjust = block_size - ((uintptr_t)p % block_size);
  362. if (adjust < block_size) {
  363. p += adjust;
  364. psize -= adjust;
  365. if (pre_size != NULL) *pre_size = adjust;
  366. }
  367. mi_assert_internal((uintptr_t)p % block_size == 0);
  368. }
  369. if (page_size != NULL) *page_size = psize;
  370. mi_assert_internal(page->xblock_size==0 || _mi_ptr_page(p) == page);
  371. mi_assert_internal(_mi_ptr_segment(p) == segment);
  372. return p;
  373. }
  374. static size_t mi_segment_size(size_t capacity, size_t required, size_t* pre_size, size_t* info_size)
  375. {
  376. const size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
  377. size_t guardsize = 0;
  378. size_t isize = 0;
  379. if (MI_SECURE == 0) {
  380. // normally no guard pages
  381. isize = _mi_align_up(minsize, 16 * MI_MAX_ALIGN_SIZE);
  382. }
  383. else {
  384. // in secure mode, we set up a protected page in between the segment info
  385. // and the page data (and one at the end of the segment)
  386. const size_t page_size = _mi_os_page_size();
  387. isize = _mi_align_up(minsize, page_size);
  388. guardsize = page_size;
  389. required = _mi_align_up(required, page_size);
  390. }
  391. if (info_size != NULL) *info_size = isize;
  392. if (pre_size != NULL) *pre_size = isize + guardsize;
  393. return (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + 2*guardsize, MI_PAGE_HUGE_ALIGN) );
  394. }
  395. /* ----------------------------------------------------------------------------
  396. Segment caches
  397. We keep a small segment cache per thread to increase local
  398. reuse and avoid setting/clearing guard pages in secure mode.
  399. ------------------------------------------------------------------------------- */
  400. static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
  401. if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1);
  402. else _mi_stat_decrease(&tld->stats->segments,1);
  403. tld->count += (segment_size >= 0 ? 1 : -1);
  404. if (tld->count > tld->peak_count) tld->peak_count = tld->count;
  405. tld->current_size += segment_size;
  406. if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size;
  407. }
  408. static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_segments_tld_t* tld) {
  409. segment->thread_id = 0;
  410. mi_segments_track_size(-((long)segment_size),tld);
  411. if (MI_SECURE != 0) {
  412. mi_assert_internal(!segment->mem_is_pinned);
  413. mi_segment_protect(segment, false, tld->os); // ensure no more guard pages are set
  414. }
  415. bool any_reset = false;
  416. bool fully_committed = true;
  417. for (size_t i = 0; i < segment->capacity; i++) {
  418. mi_page_t* page = &segment->pages[i];
  419. if (!page->is_committed) { fully_committed = false; }
  420. if (page->is_reset) { any_reset = true; }
  421. }
  422. if (any_reset && mi_option_is_enabled(mi_option_reset_decommits)) {
  423. fully_committed = false;
  424. }
  425. _mi_mem_free(segment, segment_size, segment->memid, fully_committed, any_reset, tld->os);
  426. }
  427. // The thread local segment cache is limited to be at most 1/8 of the peak size of segments in use,
  428. #define MI_SEGMENT_CACHE_FRACTION (8)
  429. // note: returned segment may be partially reset
  430. static mi_segment_t* mi_segment_cache_pop(size_t segment_size, mi_segments_tld_t* tld) {
  431. if (segment_size != 0 && segment_size != MI_SEGMENT_SIZE) return NULL;
  432. mi_segment_t* segment = tld->cache;
  433. if (segment == NULL) return NULL;
  434. tld->cache_count--;
  435. tld->cache = segment->next;
  436. segment->next = NULL;
  437. mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
  438. _mi_stat_decrease(&tld->stats->segments_cache, 1);
  439. return segment;
  440. }
  441. static bool mi_segment_cache_full(mi_segments_tld_t* tld)
  442. {
  443. // if (tld->count == 1 && tld->cache_count==0) return false; // always cache at least the final segment of a thread
  444. size_t max_cache = mi_option_get(mi_option_segment_cache);
  445. if (tld->cache_count < max_cache
  446. && tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION)) // at least allow a 1 element cache
  447. ) {
  448. return false;
  449. }
  450. // take the opportunity to reduce the segment cache if it is too large (now)
  451. // TODO: this never happens as we check against peak usage, should we use current usage instead?
  452. while (tld->cache_count > max_cache) { //(1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))) {
  453. mi_segment_t* segment = mi_segment_cache_pop(0,tld);
  454. mi_assert_internal(segment != NULL);
  455. if (segment != NULL) mi_segment_os_free(segment, segment->segment_size, tld);
  456. }
  457. return true;
  458. }
  459. static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld) {
  460. mi_assert_internal(!mi_segment_is_in_free_queue(segment, tld));
  461. mi_assert_internal(segment->next == NULL);
  462. if (segment->segment_size != MI_SEGMENT_SIZE || mi_segment_cache_full(tld)) {
  463. return false;
  464. }
  465. mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
  466. segment->next = tld->cache;
  467. tld->cache = segment;
  468. tld->cache_count++;
  469. _mi_stat_increase(&tld->stats->segments_cache,1);
  470. return true;
  471. }
  472. // called by threads that are terminating to free cached segments
  473. void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
  474. mi_segment_t* segment;
  475. while ((segment = mi_segment_cache_pop(0,tld)) != NULL) {
  476. mi_segment_os_free(segment, segment->segment_size, tld);
  477. }
  478. mi_assert_internal(tld->cache_count == 0);
  479. mi_assert_internal(tld->cache == NULL);
  480. #if MI_DEBUG>=2
  481. if (!_mi_is_main_thread()) {
  482. mi_assert_internal(tld->pages_reset.first == NULL);
  483. mi_assert_internal(tld->pages_reset.last == NULL);
  484. }
  485. #endif
  486. }
  487. /* -----------------------------------------------------------
  488. Segment allocation
  489. ----------------------------------------------------------- */
  490. // Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
  491. static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
  492. {
  493. // the segment parameter is non-null if it came from our cache
  494. mi_assert_internal(segment==NULL || (required==0 && page_kind <= MI_PAGE_LARGE));
  495. // calculate needed sizes first
  496. size_t capacity;
  497. if (page_kind == MI_PAGE_HUGE) {
  498. mi_assert_internal(page_shift == MI_SEGMENT_SHIFT && required > 0);
  499. capacity = 1;
  500. }
  501. else {
  502. mi_assert_internal(required == 0);
  503. size_t page_size = (size_t)1 << page_shift;
  504. capacity = MI_SEGMENT_SIZE / page_size;
  505. mi_assert_internal(MI_SEGMENT_SIZE % page_size == 0);
  506. mi_assert_internal(capacity >= 1 && capacity <= MI_SMALL_PAGES_PER_SEGMENT);
  507. }
  508. size_t info_size;
  509. size_t pre_size;
  510. size_t segment_size = mi_segment_size(capacity, required, &pre_size, &info_size);
  511. mi_assert_internal(segment_size >= required);
  512. // Initialize parameters
  513. const bool eager_delayed = (page_kind <= MI_PAGE_MEDIUM && tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay));
  514. const bool eager = !eager_delayed && mi_option_is_enabled(mi_option_eager_commit);
  515. bool commit = eager; // || (page_kind >= MI_PAGE_LARGE);
  516. bool pages_still_good = false;
  517. bool is_zero = false;
  518. // Try to get it from our thread local cache first
  519. if (segment != NULL) {
  520. // came from cache
  521. mi_assert_internal(segment->segment_size == segment_size);
  522. if (page_kind <= MI_PAGE_MEDIUM && segment->page_kind == page_kind && segment->segment_size == segment_size) {
  523. pages_still_good = true;
  524. }
  525. else
  526. {
  527. if (MI_SECURE!=0) {
  528. mi_assert_internal(!segment->mem_is_pinned);
  529. mi_segment_protect(segment, false, tld->os); // reset protection if the page kind differs
  530. }
  531. // different page kinds; unreset any reset pages, and unprotect
  532. // TODO: optimize cache pop to return fitting pages if possible?
  533. for (size_t i = 0; i < segment->capacity; i++) {
  534. mi_page_t* page = &segment->pages[i];
  535. if (page->is_reset) {
  536. if (!commit && mi_option_is_enabled(mi_option_reset_decommits)) {
  537. page->is_reset = false;
  538. }
  539. else {
  540. mi_page_unreset(segment, page, 0, tld); // todo: only unreset the part that was reset? (instead of the full page)
  541. }
  542. }
  543. }
  544. // ensure the initial info is committed
  545. if (segment->capacity < capacity) {
  546. bool commit_zero = false;
  547. bool ok = _mi_mem_commit(segment, pre_size, &commit_zero, tld->os);
  548. if (commit_zero) is_zero = true;
  549. if (!ok) {
  550. return NULL;
  551. }
  552. }
  553. }
  554. }
  555. else {
  556. // Allocate the segment from the OS
  557. size_t memid;
  558. bool mem_large = (!eager_delayed && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
  559. bool is_pinned = false;
  560. segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
  561. if (segment == NULL) return NULL; // failed to allocate
  562. if (!commit) {
  563. // ensure the initial info is committed
  564. mi_assert_internal(!mem_large && !is_pinned);
  565. bool commit_zero = false;
  566. bool ok = _mi_mem_commit(segment, pre_size, &commit_zero, tld->os);
  567. if (commit_zero) is_zero = true;
  568. if (!ok) {
  569. // commit failed; we cannot touch the memory: free the segment directly and return `NULL`
  570. _mi_mem_free(segment, MI_SEGMENT_SIZE, memid, false, false, os_tld);
  571. return NULL;
  572. }
  573. }
  574. segment->memid = memid;
  575. segment->mem_is_pinned = (mem_large || is_pinned);
  576. segment->mem_is_committed = commit;
  577. mi_segments_track_size((long)segment_size, tld);
  578. }
  579. mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
  580. mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true);
  581. mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan
  582. if (!pages_still_good) {
  583. // zero the segment info (but not the `mem` fields)
  584. ptrdiff_t ofs = offsetof(mi_segment_t, next);
  585. memset((uint8_t*)segment + ofs, 0, info_size - ofs);
  586. // initialize pages info
  587. for (uint8_t i = 0; i < capacity; i++) {
  588. segment->pages[i].segment_idx = i;
  589. segment->pages[i].is_reset = false;
  590. segment->pages[i].is_committed = commit;
  591. segment->pages[i].is_zero_init = is_zero;
  592. }
  593. }
  594. else {
  595. // zero the segment info but not the pages info (and mem fields)
  596. ptrdiff_t ofs = offsetof(mi_segment_t, next);
  597. memset((uint8_t*)segment + ofs, 0, offsetof(mi_segment_t,pages) - ofs);
  598. }
  599. // initialize
  600. segment->page_kind = page_kind;
  601. segment->capacity = capacity;
  602. segment->page_shift = page_shift;
  603. segment->segment_size = segment_size;
  604. segment->segment_info_size = pre_size;
  605. segment->thread_id = _mi_thread_id();
  606. segment->cookie = _mi_ptr_cookie(segment);
  607. // _mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size);
  608. // set protection
  609. mi_segment_protect(segment, true, tld->os);
  610. // insert in free lists for small and medium pages
  611. if (page_kind <= MI_PAGE_MEDIUM) {
  612. mi_segment_insert_in_free_queue(segment, tld);
  613. }
  614. //fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment);
  615. return segment;
  616. }
  617. static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
  618. return mi_segment_init(NULL, required, page_kind, page_shift, tld, os_tld);
  619. }
  620. static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
  621. UNUSED(force);
  622. mi_assert(segment != NULL);
  623. // note: don't reset pages even on abandon as the whole segment is freed? (and ready for reuse)
  624. bool force_reset = (force && mi_option_is_enabled(mi_option_abandoned_page_reset));
  625. mi_pages_reset_remove_all_in_segment(segment, force_reset, tld);
  626. mi_segment_remove_from_free_queue(segment,tld);
  627. mi_assert_expensive(!mi_segment_queue_contains(&tld->small_free, segment));
  628. mi_assert_expensive(!mi_segment_queue_contains(&tld->medium_free, segment));
  629. mi_assert(segment->next == NULL);
  630. mi_assert(segment->prev == NULL);
  631. _mi_stat_decrease(&tld->stats->page_committed, segment->segment_info_size);
  632. if (!force && mi_segment_cache_push(segment, tld)) {
  633. // it is put in our cache
  634. }
  635. else {
  636. // otherwise return it to the OS
  637. mi_segment_os_free(segment, segment->segment_size, tld);
  638. }
  639. }
  640. /* -----------------------------------------------------------
  641. Free page management inside a segment
  642. ----------------------------------------------------------- */
  643. static bool mi_segment_has_free(const mi_segment_t* segment) {
  644. return (segment->used < segment->capacity);
  645. }
  646. static bool mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
  647. mi_assert_internal(_mi_page_segment(page) == segment);
  648. mi_assert_internal(!page->segment_in_use);
  649. mi_pages_reset_remove(page, tld);
  650. // check commit
  651. if (!page->is_committed) {
  652. mi_assert_internal(!segment->mem_is_pinned);
  653. mi_assert_internal(!page->is_reset);
  654. size_t psize;
  655. uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
  656. bool is_zero = false;
  657. const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0);
  658. bool ok = _mi_mem_commit(start, psize + gsize, &is_zero, tld->os);
  659. if (!ok) return false; // failed to commit!
  660. if (gsize > 0) { mi_segment_protect_range(start + psize, gsize, true); }
  661. if (is_zero) { page->is_zero_init = true; }
  662. page->is_committed = true;
  663. }
  664. // set in-use before doing unreset to prevent delayed reset
  665. page->segment_in_use = true;
  666. segment->used++;
  667. // check reset
  668. if (page->is_reset) {
  669. mi_assert_internal(!segment->mem_is_pinned);
  670. bool ok = mi_page_unreset(segment, page, 0, tld);
  671. if (!ok) {
  672. page->segment_in_use = false;
  673. segment->used--;
  674. return false;
  675. }
  676. }
  677. mi_assert_internal(page->segment_in_use);
  678. mi_assert_internal(segment->used <= segment->capacity);
  679. if (segment->used == segment->capacity && segment->page_kind <= MI_PAGE_MEDIUM) {
  680. // if no more free pages, remove from the queue
  681. mi_assert_internal(!mi_segment_has_free(segment));
  682. mi_segment_remove_from_free_queue(segment, tld);
  683. }
  684. return true;
  685. }
  686. /* -----------------------------------------------------------
  687. Free
  688. ----------------------------------------------------------- */
  689. static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld);
  690. // clear page data; can be called on abandoned segments
  691. static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, bool allow_reset, mi_segments_tld_t* tld)
  692. {
  693. mi_assert_internal(page->segment_in_use);
  694. mi_assert_internal(mi_page_all_free(page));
  695. mi_assert_internal(page->is_committed);
  696. mi_assert_internal(mi_page_not_in_queue(page, tld));
  697. size_t inuse = page->capacity * mi_page_block_size(page);
  698. _mi_stat_decrease(&tld->stats->page_committed, inuse);
  699. _mi_stat_decrease(&tld->stats->pages, 1);
  700. // calculate the used size from the raw (non-aligned) start of the page
  701. //size_t pre_size;
  702. //_mi_segment_page_start(segment, page, page->block_size, NULL, &pre_size);
  703. //size_t used_size = pre_size + (page->capacity * page->block_size);
  704. page->is_zero_init = false;
  705. page->segment_in_use = false;
  706. // reset the page memory to reduce memory pressure?
  707. // note: must come after setting `segment_in_use` to false but before block_size becomes 0
  708. //mi_page_reset(segment, page, 0 /*used_size*/, tld);
  709. // zero the page data, but not the segment fields and capacity, and block_size (for page size calculations)
  710. uint32_t block_size = page->xblock_size;
  711. uint16_t capacity = page->capacity;
  712. uint16_t reserved = page->reserved;
  713. ptrdiff_t ofs = offsetof(mi_page_t,capacity);
  714. memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs);
  715. page->capacity = capacity;
  716. page->reserved = reserved;
  717. page->xblock_size = block_size;
  718. segment->used--;
  719. // add to the free page list for reuse/reset
  720. if (allow_reset) {
  721. mi_pages_reset_add(segment, page, tld);
  722. }
  723. page->capacity = 0; // after reset these can be zero'd now
  724. page->reserved = 0;
  725. }
  726. void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
  727. {
  728. mi_assert(page != NULL);
  729. mi_segment_t* segment = _mi_page_segment(page);
  730. mi_assert_expensive(mi_segment_is_valid(segment,tld));
  731. mi_reset_delayed(tld);
  732. // mark it as free now
  733. mi_segment_page_clear(segment, page, true, tld);
  734. if (segment->used == 0) {
  735. // no more used pages; remove from the free list and free the segment
  736. mi_segment_free(segment, force, tld);
  737. }
  738. else {
  739. if (segment->used == segment->abandoned) {
  740. // only abandoned pages; remove from free list and abandon
  741. mi_segment_abandon(segment,tld);
  742. }
  743. else if (segment->used + 1 == segment->capacity) {
  744. mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM); // for now we only support small and medium pages
  745. // move back to segments free list
  746. mi_segment_insert_in_free_queue(segment,tld);
  747. }
  748. }
  749. }
  750. /* -----------------------------------------------------------
  751. Abandonment
  752. When threads terminate, they can leave segments with
  753. live blocks (reached through other threads). Such segments
  754. are "abandoned" and will be reclaimed by other threads to
  755. reuse their pages and/or free them eventually
  756. We maintain a global list of abandoned segments that are
  757. reclaimed on demand. Since this is shared among threads
  758. the implementation needs to avoid the A-B-A problem on
  759. popping abandoned segments: <https://en.wikipedia.org/wiki/ABA_problem>
  760. We use tagged pointers to avoid accidentially identifying
  761. reused segments, much like stamped references in Java.
  762. Secondly, we maintain a reader counter to avoid resetting
  763. or decommitting segments that have a pending read operation.
  764. Note: the current implementation is one possible design;
  765. another way might be to keep track of abandoned segments
  766. in the regions. This would have the advantage of keeping
  767. all concurrent code in one place and not needing to deal
  768. with ABA issues. The drawback is that it is unclear how to
  769. scan abandoned segments efficiently in that case as they
  770. would be spread among all other segments in the regions.
  771. ----------------------------------------------------------- */
  772. // Use the bottom 20-bits (on 64-bit) of the aligned segment pointers
  773. // to put in a tag that increments on update to avoid the A-B-A problem.
  774. #define MI_TAGGED_MASK MI_SEGMENT_MASK
  775. typedef uintptr_t mi_tagged_segment_t;
  776. static mi_segment_t* mi_tagged_segment_ptr(mi_tagged_segment_t ts) {
  777. return (mi_segment_t*)(ts & ~MI_TAGGED_MASK);
  778. }
  779. static mi_tagged_segment_t mi_tagged_segment(mi_segment_t* segment, mi_tagged_segment_t ts) {
  780. mi_assert_internal(((uintptr_t)segment & MI_TAGGED_MASK) == 0);
  781. uintptr_t tag = ((ts & MI_TAGGED_MASK) + 1) & MI_TAGGED_MASK;
  782. return ((uintptr_t)segment | tag);
  783. }
  784. // This is a list of visited abandoned pages that were full at the time.
  785. // this list migrates to `abandoned` when that becomes NULL. The use of
  786. // this list reduces contention and the rate at which segments are visited.
  787. static mi_decl_cache_align _Atomic(mi_segment_t*) abandoned_visited; // = NULL
  788. // The abandoned page list (tagged as it supports pop)
  789. static mi_decl_cache_align _Atomic(mi_tagged_segment_t) abandoned; // = NULL
  790. // Maintain these for debug purposes (these counts may be a bit off)
  791. static mi_decl_cache_align _Atomic(uintptr_t) abandoned_count;
  792. static mi_decl_cache_align _Atomic(uintptr_t) abandoned_visited_count;
  793. // We also maintain a count of current readers of the abandoned list
  794. // in order to prevent resetting/decommitting segment memory if it might
  795. // still be read.
  796. static mi_decl_cache_align _Atomic(uintptr_t) abandoned_readers; // = 0
  797. // Push on the visited list
  798. static void mi_abandoned_visited_push(mi_segment_t* segment) {
  799. mi_assert_internal(segment->thread_id == 0);
  800. mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t,&segment->abandoned_next) == NULL);
  801. mi_assert_internal(segment->next == NULL && segment->prev == NULL);
  802. mi_assert_internal(segment->used > 0);
  803. mi_segment_t* anext = mi_atomic_load_ptr_relaxed(mi_segment_t, &abandoned_visited);
  804. do {
  805. mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, anext);
  806. } while (!mi_atomic_cas_ptr_weak_release(mi_segment_t, &abandoned_visited, &anext, segment));
  807. mi_atomic_increment_relaxed(&abandoned_visited_count);
  808. }
  809. // Move the visited list to the abandoned list.
  810. static bool mi_abandoned_visited_revisit(void)
  811. {
  812. // quick check if the visited list is empty
  813. if (mi_atomic_load_ptr_relaxed(mi_segment_t, &abandoned_visited) == NULL) return false;
  814. // grab the whole visited list
  815. mi_segment_t* first = mi_atomic_exchange_ptr_acq_rel(mi_segment_t, &abandoned_visited, NULL);
  816. if (first == NULL) return false;
  817. // first try to swap directly if the abandoned list happens to be NULL
  818. mi_tagged_segment_t afirst;
  819. mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
  820. if (mi_tagged_segment_ptr(ts)==NULL) {
  821. uintptr_t count = mi_atomic_load_relaxed(&abandoned_visited_count);
  822. afirst = mi_tagged_segment(first, ts);
  823. if (mi_atomic_cas_strong_acq_rel(&abandoned, &ts, afirst)) {
  824. mi_atomic_add_relaxed(&abandoned_count, count);
  825. mi_atomic_sub_relaxed(&abandoned_visited_count, count);
  826. return true;
  827. }
  828. }
  829. // find the last element of the visited list: O(n)
  830. mi_segment_t* last = first;
  831. mi_segment_t* next;
  832. while ((next = mi_atomic_load_ptr_relaxed(mi_segment_t, &last->abandoned_next)) != NULL) {
  833. last = next;
  834. }
  835. // and atomically prepend to the abandoned list
  836. // (no need to increase the readers as we don't access the abandoned segments)
  837. mi_tagged_segment_t anext = mi_atomic_load_relaxed(&abandoned);
  838. uintptr_t count;
  839. do {
  840. count = mi_atomic_load_relaxed(&abandoned_visited_count);
  841. mi_atomic_store_ptr_release(mi_segment_t, &last->abandoned_next, mi_tagged_segment_ptr(anext));
  842. afirst = mi_tagged_segment(first, anext);
  843. } while (!mi_atomic_cas_weak_release(&abandoned, &anext, afirst));
  844. mi_atomic_add_relaxed(&abandoned_count, count);
  845. mi_atomic_sub_relaxed(&abandoned_visited_count, count);
  846. return true;
  847. }
  848. // Push on the abandoned list.
  849. static void mi_abandoned_push(mi_segment_t* segment) {
  850. mi_assert_internal(segment->thread_id == 0);
  851. mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL);
  852. mi_assert_internal(segment->next == NULL && segment->prev == NULL);
  853. mi_assert_internal(segment->used > 0);
  854. mi_tagged_segment_t next;
  855. mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
  856. do {
  857. mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, mi_tagged_segment_ptr(ts));
  858. next = mi_tagged_segment(segment, ts);
  859. } while (!mi_atomic_cas_weak_release(&abandoned, &ts, next));
  860. mi_atomic_increment_relaxed(&abandoned_count);
  861. }
  862. // Wait until there are no more pending reads on segments that used to be in the abandoned list
  863. void _mi_abandoned_await_readers(void) {
  864. uintptr_t n;
  865. do {
  866. n = mi_atomic_load_acquire(&abandoned_readers);
  867. if (n != 0) mi_atomic_yield();
  868. } while (n != 0);
  869. }
  870. // Pop from the abandoned list
  871. static mi_segment_t* mi_abandoned_pop(void) {
  872. mi_segment_t* segment;
  873. // Check efficiently if it is empty (or if the visited list needs to be moved)
  874. mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
  875. segment = mi_tagged_segment_ptr(ts);
  876. if (mi_likely(segment == NULL)) {
  877. if (mi_likely(!mi_abandoned_visited_revisit())) { // try to swap in the visited list on NULL
  878. return NULL;
  879. }
  880. }
  881. // Do a pop. We use a reader count to prevent
  882. // a segment to be decommitted while a read is still pending,
  883. // and a tagged pointer to prevent A-B-A link corruption.
  884. // (this is called from `region.c:_mi_mem_free` for example)
  885. mi_atomic_increment_relaxed(&abandoned_readers); // ensure no segment gets decommitted
  886. mi_tagged_segment_t next = 0;
  887. ts = mi_atomic_load_acquire(&abandoned);
  888. do {
  889. segment = mi_tagged_segment_ptr(ts);
  890. if (segment != NULL) {
  891. mi_segment_t* anext = mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next);
  892. next = mi_tagged_segment(anext, ts); // note: reads the segment's `abandoned_next` field so should not be decommitted
  893. }
  894. } while (segment != NULL && !mi_atomic_cas_weak_acq_rel(&abandoned, &ts, next));
  895. mi_atomic_decrement_relaxed(&abandoned_readers); // release reader lock
  896. if (segment != NULL) {
  897. mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL);
  898. mi_atomic_decrement_relaxed(&abandoned_count);
  899. }
  900. return segment;
  901. }
  902. /* -----------------------------------------------------------
  903. Abandon segment/page
  904. ----------------------------------------------------------- */
  905. static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
  906. mi_assert_internal(segment->used == segment->abandoned);
  907. mi_assert_internal(segment->used > 0);
  908. mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL);
  909. mi_assert_expensive(mi_segment_is_valid(segment, tld));
  910. // remove the segment from the free page queue if needed
  911. mi_reset_delayed(tld);
  912. mi_pages_reset_remove_all_in_segment(segment, mi_option_is_enabled(mi_option_abandoned_page_reset), tld);
  913. mi_segment_remove_from_free_queue(segment, tld);
  914. mi_assert_internal(segment->next == NULL && segment->prev == NULL);
  915. // all pages in the segment are abandoned; add it to the abandoned list
  916. _mi_stat_increase(&tld->stats->segments_abandoned, 1);
  917. mi_segments_track_size(-((long)segment->segment_size), tld);
  918. segment->thread_id = 0;
  919. segment->abandoned_visits = 0;
  920. mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL);
  921. mi_abandoned_push(segment);
  922. }
  923. void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
  924. mi_assert(page != NULL);
  925. mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
  926. mi_assert_internal(mi_page_heap(page) == NULL);
  927. mi_segment_t* segment = _mi_page_segment(page);
  928. mi_assert_expensive(!mi_pages_reset_contains(page, tld));
  929. mi_assert_expensive(mi_segment_is_valid(segment, tld));
  930. segment->abandoned++;
  931. _mi_stat_increase(&tld->stats->pages_abandoned, 1);
  932. mi_assert_internal(segment->abandoned <= segment->used);
  933. if (segment->used == segment->abandoned) {
  934. // all pages are abandoned, abandon the entire segment
  935. mi_segment_abandon(segment, tld);
  936. }
  937. }
  938. /* -----------------------------------------------------------
  939. Reclaim abandoned pages
  940. ----------------------------------------------------------- */
  941. // Possibly clear pages and check if free space is available
  942. static bool mi_segment_check_free(mi_segment_t* segment, size_t block_size, bool* all_pages_free)
  943. {
  944. mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
  945. bool has_page = false;
  946. size_t pages_used = 0;
  947. size_t pages_used_empty = 0;
  948. for (size_t i = 0; i < segment->capacity; i++) {
  949. mi_page_t* page = &segment->pages[i];
  950. if (page->segment_in_use) {
  951. pages_used++;
  952. // ensure used count is up to date and collect potential concurrent frees
  953. _mi_page_free_collect(page, false);
  954. if (mi_page_all_free(page)) {
  955. // if everything free already, page can be reused for some block size
  956. // note: don't clear the page yet as we can only OS reset it once it is reclaimed
  957. pages_used_empty++;
  958. has_page = true;
  959. }
  960. else if (page->xblock_size == block_size && mi_page_has_any_available(page)) {
  961. // a page has available free blocks of the right size
  962. has_page = true;
  963. }
  964. }
  965. else {
  966. // whole empty page
  967. has_page = true;
  968. }
  969. }
  970. mi_assert_internal(pages_used == segment->used && pages_used >= pages_used_empty);
  971. if (all_pages_free != NULL) {
  972. *all_pages_free = ((pages_used - pages_used_empty) == 0);
  973. }
  974. return has_page;
  975. }
  976. // Reclaim a segment; returns NULL if the segment was freed
  977. // set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full.
  978. static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) {
  979. mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL);
  980. if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; }
  981. segment->thread_id = _mi_thread_id();
  982. segment->abandoned_visits = 0;
  983. mi_segments_track_size((long)segment->segment_size, tld);
  984. mi_assert_internal(segment->next == NULL && segment->prev == NULL);
  985. mi_assert_expensive(mi_segment_is_valid(segment, tld));
  986. _mi_stat_decrease(&tld->stats->segments_abandoned, 1);
  987. for (size_t i = 0; i < segment->capacity; i++) {
  988. mi_page_t* page = &segment->pages[i];
  989. if (page->segment_in_use) {
  990. mi_assert_internal(!page->is_reset);
  991. mi_assert_internal(page->is_committed);
  992. mi_assert_internal(mi_page_not_in_queue(page, tld));
  993. mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
  994. mi_assert_internal(mi_page_heap(page) == NULL);
  995. segment->abandoned--;
  996. mi_assert(page->next == NULL);
  997. _mi_stat_decrease(&tld->stats->pages_abandoned, 1);
  998. // set the heap again and allow heap thread delayed free again.
  999. mi_page_set_heap(page, heap);
  1000. _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set)
  1001. // TODO: should we not collect again given that we just collected in `check_free`?
  1002. _mi_page_free_collect(page, false); // ensure used count is up to date
  1003. if (mi_page_all_free(page)) {
  1004. // if everything free already, clear the page directly
  1005. mi_segment_page_clear(segment, page, true, tld); // reset is ok now
  1006. }
  1007. else {
  1008. // otherwise reclaim it into the heap
  1009. _mi_page_reclaim(heap, page);
  1010. if (requested_block_size == page->xblock_size && mi_page_has_any_available(page)) {
  1011. if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
  1012. }
  1013. }
  1014. }
  1015. else if (page->is_committed && !page->is_reset) { // not in-use, and not reset yet
  1016. // note: do not reset as this includes pages that were not touched before
  1017. // mi_pages_reset_add(segment, page, tld);
  1018. }
  1019. }
  1020. mi_assert_internal(segment->abandoned == 0);
  1021. if (segment->used == 0) {
  1022. mi_assert_internal(right_page_reclaimed == NULL || !(*right_page_reclaimed));
  1023. mi_segment_free(segment, false, tld);
  1024. return NULL;
  1025. }
  1026. else {
  1027. if (segment->page_kind <= MI_PAGE_MEDIUM && mi_segment_has_free(segment)) {
  1028. mi_segment_insert_in_free_queue(segment, tld);
  1029. }
  1030. return segment;
  1031. }
  1032. }
  1033. void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
  1034. mi_segment_t* segment;
  1035. while ((segment = mi_abandoned_pop()) != NULL) {
  1036. mi_segment_reclaim(segment, heap, 0, NULL, tld);
  1037. }
  1038. }
  1039. static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size, mi_page_kind_t page_kind, bool* reclaimed, mi_segments_tld_t* tld)
  1040. {
  1041. *reclaimed = false;
  1042. mi_segment_t* segment;
  1043. int max_tries = 8; // limit the work to bound allocation times
  1044. while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) {
  1045. segment->abandoned_visits++;
  1046. bool all_pages_free;
  1047. bool has_page = mi_segment_check_free(segment,block_size,&all_pages_free); // try to free up pages (due to concurrent frees)
  1048. if (all_pages_free) {
  1049. // free the segment (by forced reclaim) to make it available to other threads.
  1050. // note1: we prefer to free a segment as that might lead to reclaiming another
  1051. // segment that is still partially used.
  1052. // note2: we could in principle optimize this by skipping reclaim and directly
  1053. // freeing but that would violate some invariants temporarily)
  1054. mi_segment_reclaim(segment, heap, 0, NULL, tld);
  1055. }
  1056. else if (has_page && segment->page_kind == page_kind) {
  1057. // found a free page of the right kind, or page of the right block_size with free space
  1058. // we return the result of reclaim (which is usually `segment`) as it might free
  1059. // the segment due to concurrent frees (in which case `NULL` is returned).
  1060. return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
  1061. }
  1062. else if (segment->abandoned_visits >= 3) {
  1063. // always reclaim on 3rd visit to limit the list length.
  1064. mi_segment_reclaim(segment, heap, 0, NULL, tld);
  1065. }
  1066. else {
  1067. // otherwise, push on the visited list so it gets not looked at too quickly again
  1068. mi_abandoned_visited_push(segment);
  1069. }
  1070. }
  1071. return NULL;
  1072. }
  1073. /* -----------------------------------------------------------
  1074. Reclaim or allocate
  1075. ----------------------------------------------------------- */
  1076. static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
  1077. {
  1078. mi_assert_internal(page_kind <= MI_PAGE_LARGE);
  1079. mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
  1080. // 1. try to get a segment from our cache
  1081. mi_segment_t* segment = mi_segment_cache_pop(MI_SEGMENT_SIZE, tld);
  1082. if (segment != NULL) {
  1083. mi_segment_init(segment, 0, page_kind, page_shift, tld, os_tld);
  1084. return segment;
  1085. }
  1086. // 2. try to reclaim an abandoned segment
  1087. bool reclaimed;
  1088. segment = mi_segment_try_reclaim(heap, block_size, page_kind, &reclaimed, tld);
  1089. if (reclaimed) {
  1090. // reclaimed the right page right into the heap
  1091. mi_assert_internal(segment != NULL && segment->page_kind == page_kind && page_kind <= MI_PAGE_LARGE);
  1092. return NULL; // pretend out-of-memory as the page will be in the page queue of the heap with available blocks
  1093. }
  1094. else if (segment != NULL) {
  1095. // reclaimed a segment with empty pages (of `page_kind`) in it
  1096. return segment;
  1097. }
  1098. // 3. otherwise allocate a fresh segment
  1099. return mi_segment_alloc(0, page_kind, page_shift, tld, os_tld);
  1100. }
  1101. /* -----------------------------------------------------------
  1102. Small page allocation
  1103. ----------------------------------------------------------- */
  1104. static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
  1105. mi_assert_internal(mi_segment_has_free(segment));
  1106. mi_assert_expensive(mi_segment_is_valid(segment, tld));
  1107. for (size_t i = 0; i < segment->capacity; i++) { // TODO: use a bitmap instead of search?
  1108. mi_page_t* page = &segment->pages[i];
  1109. if (!page->segment_in_use) {
  1110. bool ok = mi_segment_page_claim(segment, page, tld);
  1111. if (ok) return page;
  1112. }
  1113. }
  1114. mi_assert(false);
  1115. return NULL;
  1116. }
  1117. // Allocate a page inside a segment. Requires that the page has free pages
  1118. static mi_page_t* mi_segment_page_alloc_in(mi_segment_t* segment, mi_segments_tld_t* tld) {
  1119. mi_assert_internal(mi_segment_has_free(segment));
  1120. return mi_segment_find_free(segment, tld);
  1121. }
  1122. static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
  1123. // find an available segment the segment free queue
  1124. mi_segment_queue_t* const free_queue = mi_segment_free_queue_of_kind(kind, tld);
  1125. if (mi_segment_queue_is_empty(free_queue)) {
  1126. // possibly allocate or reclaim a fresh segment
  1127. mi_segment_t* const segment = mi_segment_reclaim_or_alloc(heap, block_size, kind, page_shift, tld, os_tld);
  1128. if (segment == NULL) return NULL; // return NULL if out-of-memory (or reclaimed)
  1129. mi_assert_internal(free_queue->first == segment);
  1130. mi_assert_internal(segment->page_kind==kind);
  1131. mi_assert_internal(segment->used < segment->capacity);
  1132. }
  1133. mi_assert_internal(free_queue->first != NULL);
  1134. mi_page_t* const page = mi_segment_page_alloc_in(free_queue->first, tld);
  1135. mi_assert_internal(page != NULL);
  1136. #if MI_DEBUG>=2
  1137. // verify it is committed
  1138. _mi_segment_page_start(_mi_page_segment(page), page, sizeof(void*), NULL, NULL)[0] = 0;
  1139. #endif
  1140. return page;
  1141. }
  1142. static mi_page_t* mi_segment_small_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
  1143. return mi_segment_page_alloc(heap, block_size, MI_PAGE_SMALL,MI_SMALL_PAGE_SHIFT,tld,os_tld);
  1144. }
  1145. static mi_page_t* mi_segment_medium_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
  1146. return mi_segment_page_alloc(heap, block_size, MI_PAGE_MEDIUM, MI_MEDIUM_PAGE_SHIFT, tld, os_tld);
  1147. }
  1148. /* -----------------------------------------------------------
  1149. large page allocation
  1150. ----------------------------------------------------------- */
  1151. static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
  1152. mi_segment_t* segment = mi_segment_reclaim_or_alloc(heap,block_size,MI_PAGE_LARGE,MI_LARGE_PAGE_SHIFT,tld,os_tld);
  1153. if (segment == NULL) return NULL;
  1154. mi_page_t* page = mi_segment_find_free(segment, tld);
  1155. mi_assert_internal(page != NULL);
  1156. #if MI_DEBUG>=2
  1157. _mi_segment_page_start(segment, page, sizeof(void*), NULL, NULL)[0] = 0;
  1158. #endif
  1159. return page;
  1160. }
  1161. static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
  1162. {
  1163. mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT,tld,os_tld);
  1164. if (segment == NULL) return NULL;
  1165. mi_assert_internal(mi_segment_page_size(segment) - segment->segment_info_size - (2*(MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= size);
  1166. segment->thread_id = 0; // huge pages are immediately abandoned
  1167. mi_segments_track_size(-(long)segment->segment_size, tld);
  1168. mi_page_t* page = mi_segment_find_free(segment, tld);
  1169. mi_assert_internal(page != NULL);
  1170. return page;
  1171. }
  1172. // free huge block from another thread
  1173. void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
  1174. // huge page segments are always abandoned and can be freed immediately by any thread
  1175. mi_assert_internal(segment->page_kind==MI_PAGE_HUGE);
  1176. mi_assert_internal(segment == _mi_page_segment(page));
  1177. mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id)==0);
  1178. // claim it and free
  1179. mi_heap_t* heap = mi_heap_get_default(); // issue #221; don't use the internal get_default_heap as we need to ensure the thread is initialized.
  1180. // paranoia: if this it the last reference, the cas should always succeed
  1181. uintptr_t expected_tid = 0;
  1182. if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected_tid, heap->thread_id)) {
  1183. mi_block_set_next(page, block, page->free);
  1184. page->free = block;
  1185. page->used--;
  1186. page->is_zero = false;
  1187. mi_assert(page->used == 0);
  1188. mi_tld_t* tld = heap->tld;
  1189. mi_segments_track_size((long)segment->segment_size, &tld->segments);
  1190. _mi_segment_page_free(page, true, &tld->segments);
  1191. }
  1192. #if (MI_DEBUG!=0)
  1193. else {
  1194. mi_assert_internal(false);
  1195. }
  1196. #endif
  1197. }
  1198. /* -----------------------------------------------------------
  1199. Page allocation
  1200. ----------------------------------------------------------- */
  1201. mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
  1202. mi_page_t* page;
  1203. if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
  1204. page = mi_segment_small_page_alloc(heap, block_size, tld, os_tld);
  1205. }
  1206. else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
  1207. page = mi_segment_medium_page_alloc(heap, block_size, tld, os_tld);
  1208. }
  1209. else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) {
  1210. page = mi_segment_large_page_alloc(heap, block_size, tld, os_tld);
  1211. }
  1212. else {
  1213. page = mi_segment_huge_page_alloc(block_size,tld,os_tld);
  1214. }
  1215. mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
  1216. mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size);
  1217. mi_reset_delayed(tld);
  1218. mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld));
  1219. return page;
  1220. }