extent.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326
  1. #include "jemalloc/internal/jemalloc_preamble.h"
  2. #include "jemalloc/internal/jemalloc_internal_includes.h"
  3. #include "jemalloc/internal/assert.h"
  4. #include "jemalloc/internal/emap.h"
  5. #include "jemalloc/internal/extent_dss.h"
  6. #include "jemalloc/internal/extent_mmap.h"
  7. #include "jemalloc/internal/ph.h"
  8. #include "jemalloc/internal/mutex.h"
  9. /******************************************************************************/
  10. /* Data. */
  11. size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
  12. static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
  13. size_t offset, size_t length, bool growing_retained);
  14. static bool extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks,
  15. edata_t *edata, size_t offset, size_t length, bool growing_retained);
  16. static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks,
  17. edata_t *edata, size_t offset, size_t length, bool growing_retained);
  18. static edata_t *extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  19. edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks);
  20. static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  21. edata_t *a, edata_t *b, bool holding_core_locks);
  22. /* Used exclusively for gdump triggering. */
  23. static atomic_zu_t curpages;
  24. static atomic_zu_t highpages;
  25. /******************************************************************************/
  26. /*
  27. * Function prototypes for static functions that are referenced prior to
  28. * definition.
  29. */
  30. static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
  31. static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  32. ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment,
  33. bool zero, bool *commit, bool growing_retained, bool guarded);
  34. static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  35. ecache_t *ecache, edata_t *edata, bool *coalesced);
  36. static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
  37. ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment,
  38. bool zero, bool *commit, bool guarded);
  39. /******************************************************************************/
  40. size_t
  41. extent_sn_next(pac_t *pac) {
  42. return atomic_fetch_add_zu(&pac->extent_sn_next, 1, ATOMIC_RELAXED);
  43. }
  44. static inline bool
  45. extent_may_force_decay(pac_t *pac) {
  46. return !(pac_decay_ms_get(pac, extent_state_dirty) == -1
  47. || pac_decay_ms_get(pac, extent_state_muzzy) == -1);
  48. }
  49. static bool
  50. extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  51. ecache_t *ecache, edata_t *edata) {
  52. emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
  53. bool coalesced;
  54. edata = extent_try_coalesce(tsdn, pac, ehooks, ecache,
  55. edata, &coalesced);
  56. emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
  57. if (!coalesced) {
  58. return true;
  59. }
  60. eset_insert(&ecache->eset, edata);
  61. return false;
  62. }
  63. edata_t *
  64. ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
  65. edata_t *expand_edata, size_t size, size_t alignment, bool zero,
  66. bool guarded) {
  67. assert(size != 0);
  68. assert(alignment != 0);
  69. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  70. WITNESS_RANK_CORE, 0);
  71. bool commit = true;
  72. edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
  73. size, alignment, zero, &commit, false, guarded);
  74. assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
  75. assert(edata == NULL || edata_guarded_get(edata) == guarded);
  76. return edata;
  77. }
  78. edata_t *
  79. ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
  80. edata_t *expand_edata, size_t size, size_t alignment, bool zero,
  81. bool guarded) {
  82. assert(size != 0);
  83. assert(alignment != 0);
  84. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  85. WITNESS_RANK_CORE, 0);
  86. bool commit = true;
  87. edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata,
  88. size, alignment, zero, &commit, guarded);
  89. if (edata == NULL) {
  90. if (opt_retain && expand_edata != NULL) {
  91. /*
  92. * When retain is enabled and trying to expand, we do
  93. * not attempt extent_alloc_wrapper which does mmap that
  94. * is very unlikely to succeed (unless it happens to be
  95. * at the end).
  96. */
  97. return NULL;
  98. }
  99. if (guarded) {
  100. /*
  101. * Means no cached guarded extents available (and no
  102. * grow_retained was attempted). The pac_alloc flow
  103. * will alloc regular extents to make new guarded ones.
  104. */
  105. return NULL;
  106. }
  107. void *new_addr = (expand_edata == NULL) ? NULL :
  108. edata_past_get(expand_edata);
  109. edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr,
  110. size, alignment, zero, &commit,
  111. /* growing_retained */ false);
  112. }
  113. assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
  114. return edata;
  115. }
  116. void
  117. ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
  118. edata_t *edata) {
  119. assert(edata_base_get(edata) != NULL);
  120. assert(edata_size_get(edata) != 0);
  121. assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
  122. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  123. WITNESS_RANK_CORE, 0);
  124. edata_addr_set(edata, edata_base_get(edata));
  125. edata_zeroed_set(edata, false);
  126. extent_record(tsdn, pac, ehooks, ecache, edata);
  127. }
  128. edata_t *
  129. ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  130. ecache_t *ecache, size_t npages_min) {
  131. malloc_mutex_lock(tsdn, &ecache->mtx);
  132. /*
  133. * Get the LRU coalesced extent, if any. If coalescing was delayed,
  134. * the loop will iterate until the LRU extent is fully coalesced.
  135. */
  136. edata_t *edata;
  137. while (true) {
  138. /* Get the LRU extent, if any. */
  139. eset_t *eset = &ecache->eset;
  140. edata = edata_list_inactive_first(&eset->lru);
  141. if (edata == NULL) {
  142. /*
  143. * Next check if there are guarded extents. They are
  144. * more expensive to purge (since they are not
  145. * mergeable), thus in favor of caching them longer.
  146. */
  147. eset = &ecache->guarded_eset;
  148. edata = edata_list_inactive_first(&eset->lru);
  149. if (edata == NULL) {
  150. goto label_return;
  151. }
  152. }
  153. /* Check the eviction limit. */
  154. size_t extents_npages = ecache_npages_get(ecache);
  155. if (extents_npages <= npages_min) {
  156. edata = NULL;
  157. goto label_return;
  158. }
  159. eset_remove(eset, edata);
  160. if (!ecache->delay_coalesce || edata_guarded_get(edata)) {
  161. break;
  162. }
  163. /* Try to coalesce. */
  164. if (extent_try_delayed_coalesce(tsdn, pac, ehooks, ecache,
  165. edata)) {
  166. break;
  167. }
  168. /*
  169. * The LRU extent was just coalesced and the result placed in
  170. * the LRU at its neighbor's position. Start over.
  171. */
  172. }
  173. /*
  174. * Either mark the extent active or deregister it to protect against
  175. * concurrent operations.
  176. */
  177. switch (ecache->state) {
  178. case extent_state_active:
  179. not_reached();
  180. case extent_state_dirty:
  181. case extent_state_muzzy:
  182. emap_update_edata_state(tsdn, pac->emap, edata,
  183. extent_state_active);
  184. break;
  185. case extent_state_retained:
  186. extent_deregister(tsdn, pac, edata);
  187. break;
  188. default:
  189. not_reached();
  190. }
  191. label_return:
  192. malloc_mutex_unlock(tsdn, &ecache->mtx);
  193. return edata;
  194. }
  195. /*
  196. * This can only happen when we fail to allocate a new extent struct (which
  197. * indicates OOM), e.g. when trying to split an existing extent.
  198. */
  199. static void
  200. extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
  201. edata_t *edata, bool growing_retained) {
  202. size_t sz = edata_size_get(edata);
  203. if (config_stats) {
  204. atomic_fetch_add_zu(&pac->stats->abandoned_vm, sz,
  205. ATOMIC_RELAXED);
  206. }
  207. /*
  208. * Leak extent after making sure its pages have already been purged, so
  209. * that this is only a virtual memory leak.
  210. */
  211. if (ecache->state == extent_state_dirty) {
  212. if (extent_purge_lazy_impl(tsdn, ehooks, edata, 0, sz,
  213. growing_retained)) {
  214. extent_purge_forced_impl(tsdn, ehooks, edata, 0,
  215. edata_size_get(edata), growing_retained);
  216. }
  217. }
  218. edata_cache_put(tsdn, pac->edata_cache, edata);
  219. }
  220. static void
  221. extent_deactivate_locked_impl(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
  222. edata_t *edata) {
  223. malloc_mutex_assert_owner(tsdn, &ecache->mtx);
  224. assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
  225. emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
  226. eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset :
  227. &ecache->eset;
  228. eset_insert(eset, edata);
  229. }
  230. static void
  231. extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
  232. edata_t *edata) {
  233. assert(edata_state_get(edata) == extent_state_active);
  234. extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
  235. }
  236. static void
  237. extent_deactivate_check_state_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
  238. edata_t *edata, extent_state_t expected_state) {
  239. assert(edata_state_get(edata) == expected_state);
  240. extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
  241. }
  242. static void
  243. extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset,
  244. edata_t *edata) {
  245. assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
  246. assert(edata_state_get(edata) == ecache->state ||
  247. edata_state_get(edata) == extent_state_merging);
  248. eset_remove(eset, edata);
  249. emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
  250. }
  251. void
  252. extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) {
  253. cassert(config_prof);
  254. /* prof_gdump() requirement. */
  255. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  256. WITNESS_RANK_CORE, 0);
  257. if (opt_prof && edata_state_get(edata) == extent_state_active) {
  258. size_t nadd = edata_size_get(edata) >> LG_PAGE;
  259. size_t cur = atomic_fetch_add_zu(&curpages, nadd,
  260. ATOMIC_RELAXED) + nadd;
  261. size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
  262. while (cur > high && !atomic_compare_exchange_weak_zu(
  263. &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
  264. /*
  265. * Don't refresh cur, because it may have decreased
  266. * since this thread lost the highpages update race.
  267. * Note that high is updated in case of CAS failure.
  268. */
  269. }
  270. if (cur > high && prof_gdump_get_unlocked()) {
  271. prof_gdump(tsdn);
  272. }
  273. }
  274. }
  275. static void
  276. extent_gdump_sub(tsdn_t *tsdn, const edata_t *edata) {
  277. cassert(config_prof);
  278. if (opt_prof && edata_state_get(edata) == extent_state_active) {
  279. size_t nsub = edata_size_get(edata) >> LG_PAGE;
  280. assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
  281. atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
  282. }
  283. }
  284. static bool
  285. extent_register_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, bool gdump_add) {
  286. assert(edata_state_get(edata) == extent_state_active);
  287. /*
  288. * No locking needed, as the edata must be in active state, which
  289. * prevents other threads from accessing the edata.
  290. */
  291. if (emap_register_boundary(tsdn, pac->emap, edata, SC_NSIZES,
  292. /* slab */ false)) {
  293. return true;
  294. }
  295. if (config_prof && gdump_add) {
  296. extent_gdump_add(tsdn, edata);
  297. }
  298. return false;
  299. }
  300. static bool
  301. extent_register(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
  302. return extent_register_impl(tsdn, pac, edata, true);
  303. }
  304. static bool
  305. extent_register_no_gdump_add(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
  306. return extent_register_impl(tsdn, pac, edata, false);
  307. }
  308. static void
  309. extent_reregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
  310. bool err = extent_register(tsdn, pac, edata);
  311. assert(!err);
  312. }
  313. /*
  314. * Removes all pointers to the given extent from the global rtree.
  315. */
  316. static void
  317. extent_deregister_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata,
  318. bool gdump) {
  319. emap_deregister_boundary(tsdn, pac->emap, edata);
  320. if (config_prof && gdump) {
  321. extent_gdump_sub(tsdn, edata);
  322. }
  323. }
  324. static void
  325. extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
  326. extent_deregister_impl(tsdn, pac, edata, true);
  327. }
  328. static void
  329. extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac,
  330. edata_t *edata) {
  331. extent_deregister_impl(tsdn, pac, edata, false);
  332. }
  333. /*
  334. * Tries to find and remove an extent from ecache that can be used for the
  335. * given allocation request.
  336. */
  337. static edata_t *
  338. extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  339. ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
  340. bool guarded) {
  341. malloc_mutex_assert_owner(tsdn, &ecache->mtx);
  342. assert(alignment > 0);
  343. if (config_debug && expand_edata != NULL) {
  344. /*
  345. * Non-NULL expand_edata indicates in-place expanding realloc.
  346. * new_addr must either refer to a non-existing extent, or to
  347. * the base of an extant extent, since only active slabs support
  348. * interior lookups (which of course cannot be recycled).
  349. */
  350. void *new_addr = edata_past_get(expand_edata);
  351. assert(PAGE_ADDR2BASE(new_addr) == new_addr);
  352. assert(alignment <= PAGE);
  353. }
  354. edata_t *edata;
  355. eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset;
  356. if (expand_edata != NULL) {
  357. edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
  358. expand_edata, EXTENT_PAI_PAC, ecache->state);
  359. if (edata != NULL) {
  360. extent_assert_can_expand(expand_edata, edata);
  361. if (edata_size_get(edata) < size) {
  362. emap_release_edata(tsdn, pac->emap, edata,
  363. ecache->state);
  364. edata = NULL;
  365. }
  366. }
  367. } else {
  368. /*
  369. * A large extent might be broken up from its original size to
  370. * some small size to satisfy a small request. When that small
  371. * request is freed, though, it won't merge back with the larger
  372. * extent if delayed coalescing is on. The large extent can
  373. * then no longer satify a request for its original size. To
  374. * limit this effect, when delayed coalescing is enabled, we
  375. * put a cap on how big an extent we can split for a request.
  376. */
  377. unsigned lg_max_fit = ecache->delay_coalesce
  378. ? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
  379. /*
  380. * If split and merge are not allowed (Windows w/o retain), try
  381. * exact fit only.
  382. *
  383. * For simplicity purposes, splitting guarded extents is not
  384. * supported. Hence, we do only exact fit for guarded
  385. * allocations.
  386. */
  387. bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
  388. edata = eset_fit(eset, size, alignment, exact_only,
  389. lg_max_fit);
  390. }
  391. if (edata == NULL) {
  392. return NULL;
  393. }
  394. assert(!guarded || edata_guarded_get(edata));
  395. extent_activate_locked(tsdn, pac, ecache, eset, edata);
  396. return edata;
  397. }
  398. /*
  399. * Given an allocation request and an extent guaranteed to be able to satisfy
  400. * it, this splits off lead and trail extents, leaving edata pointing to an
  401. * extent satisfying the allocation.
  402. * This function doesn't put lead or trail into any ecache; it's the caller's
  403. * job to ensure that they can be reused.
  404. */
  405. typedef enum {
  406. /*
  407. * Split successfully. lead, edata, and trail, are modified to extents
  408. * describing the ranges before, in, and after the given allocation.
  409. */
  410. extent_split_interior_ok,
  411. /*
  412. * The extent can't satisfy the given allocation request. None of the
  413. * input edata_t *s are touched.
  414. */
  415. extent_split_interior_cant_alloc,
  416. /*
  417. * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
  418. * and salvage what's still salvageable (if *to_salvage is non-NULL).
  419. * None of lead, edata, or trail are valid.
  420. */
  421. extent_split_interior_error
  422. } extent_split_interior_result_t;
  423. static extent_split_interior_result_t
  424. extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  425. /* The result of splitting, in case of success. */
  426. edata_t **edata, edata_t **lead, edata_t **trail,
  427. /* The mess to clean up, in case of error. */
  428. edata_t **to_leak, edata_t **to_salvage,
  429. edata_t *expand_edata, size_t size, size_t alignment) {
  430. size_t leadsize = ALIGNMENT_CEILING((uintptr_t)edata_base_get(*edata),
  431. PAGE_CEILING(alignment)) - (uintptr_t)edata_base_get(*edata);
  432. assert(expand_edata == NULL || leadsize == 0);
  433. if (edata_size_get(*edata) < leadsize + size) {
  434. return extent_split_interior_cant_alloc;
  435. }
  436. size_t trailsize = edata_size_get(*edata) - leadsize - size;
  437. *lead = NULL;
  438. *trail = NULL;
  439. *to_leak = NULL;
  440. *to_salvage = NULL;
  441. /* Split the lead. */
  442. if (leadsize != 0) {
  443. assert(!edata_guarded_get(*edata));
  444. *lead = *edata;
  445. *edata = extent_split_impl(tsdn, pac, ehooks, *lead, leadsize,
  446. size + trailsize, /* holding_core_locks*/ true);
  447. if (*edata == NULL) {
  448. *to_leak = *lead;
  449. *lead = NULL;
  450. return extent_split_interior_error;
  451. }
  452. }
  453. /* Split the trail. */
  454. if (trailsize != 0) {
  455. assert(!edata_guarded_get(*edata));
  456. *trail = extent_split_impl(tsdn, pac, ehooks, *edata, size,
  457. trailsize, /* holding_core_locks */ true);
  458. if (*trail == NULL) {
  459. *to_leak = *edata;
  460. *to_salvage = *lead;
  461. *lead = NULL;
  462. *edata = NULL;
  463. return extent_split_interior_error;
  464. }
  465. }
  466. return extent_split_interior_ok;
  467. }
  468. /*
  469. * This fulfills the indicated allocation request out of the given extent (which
  470. * the caller should have ensured was big enough). If there's any unused space
  471. * before or after the resulting allocation, that space is given its own extent
  472. * and put back into ecache.
  473. */
  474. static edata_t *
  475. extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  476. ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
  477. edata_t *edata, bool growing_retained) {
  478. assert(!edata_guarded_get(edata) || size == edata_size_get(edata));
  479. malloc_mutex_assert_owner(tsdn, &ecache->mtx);
  480. edata_t *lead;
  481. edata_t *trail;
  482. edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
  483. edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
  484. extent_split_interior_result_t result = extent_split_interior(
  485. tsdn, pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage,
  486. expand_edata, size, alignment);
  487. if (!maps_coalesce && result != extent_split_interior_ok
  488. && !opt_retain) {
  489. /*
  490. * Split isn't supported (implies Windows w/o retain). Avoid
  491. * leaking the extent.
  492. */
  493. assert(to_leak != NULL && lead == NULL && trail == NULL);
  494. extent_deactivate_locked(tsdn, pac, ecache, to_leak);
  495. return NULL;
  496. }
  497. if (result == extent_split_interior_ok) {
  498. if (lead != NULL) {
  499. extent_deactivate_locked(tsdn, pac, ecache, lead);
  500. }
  501. if (trail != NULL) {
  502. extent_deactivate_locked(tsdn, pac, ecache, trail);
  503. }
  504. return edata;
  505. } else {
  506. /*
  507. * We should have picked an extent that was large enough to
  508. * fulfill our allocation request.
  509. */
  510. assert(result == extent_split_interior_error);
  511. if (to_salvage != NULL) {
  512. extent_deregister(tsdn, pac, to_salvage);
  513. }
  514. if (to_leak != NULL) {
  515. extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
  516. /*
  517. * May go down the purge path (which assume no ecache
  518. * locks). Only happens with OOM caused split failures.
  519. */
  520. malloc_mutex_unlock(tsdn, &ecache->mtx);
  521. extents_abandon_vm(tsdn, pac, ehooks, ecache, to_leak,
  522. growing_retained);
  523. malloc_mutex_lock(tsdn, &ecache->mtx);
  524. }
  525. return NULL;
  526. }
  527. unreachable();
  528. }
  529. /*
  530. * Tries to satisfy the given allocation request by reusing one of the extents
  531. * in the given ecache_t.
  532. */
  533. static edata_t *
  534. extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
  535. edata_t *expand_edata, size_t size, size_t alignment, bool zero,
  536. bool *commit, bool growing_retained, bool guarded) {
  537. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  538. WITNESS_RANK_CORE, growing_retained ? 1 : 0);
  539. assert(!guarded || expand_edata == NULL);
  540. assert(!guarded || alignment <= PAGE);
  541. malloc_mutex_lock(tsdn, &ecache->mtx);
  542. edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
  543. expand_edata, size, alignment, guarded);
  544. if (edata == NULL) {
  545. malloc_mutex_unlock(tsdn, &ecache->mtx);
  546. return NULL;
  547. }
  548. edata = extent_recycle_split(tsdn, pac, ehooks, ecache, expand_edata,
  549. size, alignment, edata, growing_retained);
  550. malloc_mutex_unlock(tsdn, &ecache->mtx);
  551. if (edata == NULL) {
  552. return NULL;
  553. }
  554. assert(edata_state_get(edata) == extent_state_active);
  555. if (extent_commit_zero(tsdn, ehooks, edata, *commit, zero,
  556. growing_retained)) {
  557. extent_record(tsdn, pac, ehooks, ecache, edata);
  558. return NULL;
  559. }
  560. if (edata_committed_get(edata)) {
  561. /*
  562. * This reverses the purpose of this variable - previously it
  563. * was treated as an input parameter, now it turns into an
  564. * output parameter, reporting if the edata has actually been
  565. * committed.
  566. */
  567. *commit = true;
  568. }
  569. return edata;
  570. }
  571. /*
  572. * If virtual memory is retained, create increasingly larger extents from which
  573. * to split requested extents in order to limit the total number of disjoint
  574. * virtual memory ranges retained by each shard.
  575. */
  576. static edata_t *
  577. extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  578. size_t size, size_t alignment, bool zero, bool *commit) {
  579. malloc_mutex_assert_owner(tsdn, &pac->grow_mtx);
  580. size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
  581. /* Beware size_t wrap-around. */
  582. if (alloc_size_min < size) {
  583. goto label_err;
  584. }
  585. /*
  586. * Find the next extent size in the series that would be large enough to
  587. * satisfy this request.
  588. */
  589. size_t alloc_size;
  590. pszind_t exp_grow_skip;
  591. bool err = exp_grow_size_prepare(&pac->exp_grow, alloc_size_min,
  592. &alloc_size, &exp_grow_skip);
  593. if (err) {
  594. goto label_err;
  595. }
  596. edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
  597. if (edata == NULL) {
  598. goto label_err;
  599. }
  600. bool zeroed = false;
  601. bool committed = false;
  602. void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed,
  603. &committed);
  604. if (ptr == NULL) {
  605. edata_cache_put(tsdn, pac->edata_cache, edata);
  606. goto label_err;
  607. }
  608. edata_init(edata, ecache_ind_get(&pac->ecache_retained), ptr,
  609. alloc_size, false, SC_NSIZES, extent_sn_next(pac),
  610. extent_state_active, zeroed, committed, EXTENT_PAI_PAC,
  611. EXTENT_IS_HEAD);
  612. if (extent_register_no_gdump_add(tsdn, pac, edata)) {
  613. edata_cache_put(tsdn, pac->edata_cache, edata);
  614. goto label_err;
  615. }
  616. if (edata_committed_get(edata)) {
  617. *commit = true;
  618. }
  619. edata_t *lead;
  620. edata_t *trail;
  621. edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
  622. edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
  623. extent_split_interior_result_t result = extent_split_interior(tsdn,
  624. pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL,
  625. size, alignment);
  626. if (result == extent_split_interior_ok) {
  627. if (lead != NULL) {
  628. extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
  629. lead);
  630. }
  631. if (trail != NULL) {
  632. extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
  633. trail);
  634. }
  635. } else {
  636. /*
  637. * We should have allocated a sufficiently large extent; the
  638. * cant_alloc case should not occur.
  639. */
  640. assert(result == extent_split_interior_error);
  641. if (to_salvage != NULL) {
  642. if (config_prof) {
  643. extent_gdump_add(tsdn, to_salvage);
  644. }
  645. extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
  646. to_salvage);
  647. }
  648. if (to_leak != NULL) {
  649. extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
  650. extents_abandon_vm(tsdn, pac, ehooks,
  651. &pac->ecache_retained, to_leak, true);
  652. }
  653. goto label_err;
  654. }
  655. if (*commit && !edata_committed_get(edata)) {
  656. if (extent_commit_impl(tsdn, ehooks, edata, 0,
  657. edata_size_get(edata), true)) {
  658. extent_record(tsdn, pac, ehooks,
  659. &pac->ecache_retained, edata);
  660. goto label_err;
  661. }
  662. /* A successful commit should return zeroed memory. */
  663. if (config_debug) {
  664. void *addr = edata_addr_get(edata);
  665. size_t *p = (size_t *)(uintptr_t)addr;
  666. /* Check the first page only. */
  667. for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
  668. assert(p[i] == 0);
  669. }
  670. }
  671. }
  672. /*
  673. * Increment extent_grow_next if doing so wouldn't exceed the allowed
  674. * range.
  675. */
  676. /* All opportunities for failure are past. */
  677. exp_grow_size_commit(&pac->exp_grow, exp_grow_skip);
  678. malloc_mutex_unlock(tsdn, &pac->grow_mtx);
  679. if (config_prof) {
  680. /* Adjust gdump stats now that extent is final size. */
  681. extent_gdump_add(tsdn, edata);
  682. }
  683. if (zero && !edata_zeroed_get(edata)) {
  684. ehooks_zero(tsdn, ehooks, edata_base_get(edata),
  685. edata_size_get(edata));
  686. }
  687. return edata;
  688. label_err:
  689. malloc_mutex_unlock(tsdn, &pac->grow_mtx);
  690. return NULL;
  691. }
  692. static edata_t *
  693. extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  694. edata_t *expand_edata, size_t size, size_t alignment, bool zero,
  695. bool *commit, bool guarded) {
  696. assert(size != 0);
  697. assert(alignment != 0);
  698. malloc_mutex_lock(tsdn, &pac->grow_mtx);
  699. edata_t *edata = extent_recycle(tsdn, pac, ehooks,
  700. &pac->ecache_retained, expand_edata, size, alignment, zero, commit,
  701. /* growing_retained */ true, guarded);
  702. if (edata != NULL) {
  703. malloc_mutex_unlock(tsdn, &pac->grow_mtx);
  704. if (config_prof) {
  705. extent_gdump_add(tsdn, edata);
  706. }
  707. } else if (opt_retain && expand_edata == NULL && !guarded) {
  708. edata = extent_grow_retained(tsdn, pac, ehooks, size,
  709. alignment, zero, commit);
  710. /* extent_grow_retained() always releases pac->grow_mtx. */
  711. } else {
  712. malloc_mutex_unlock(tsdn, &pac->grow_mtx);
  713. }
  714. malloc_mutex_assert_not_owner(tsdn, &pac->grow_mtx);
  715. return edata;
  716. }
  717. static bool
  718. extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
  719. edata_t *inner, edata_t *outer, bool forward) {
  720. extent_assert_can_coalesce(inner, outer);
  721. eset_remove(&ecache->eset, outer);
  722. bool err = extent_merge_impl(tsdn, pac, ehooks,
  723. forward ? inner : outer, forward ? outer : inner,
  724. /* holding_core_locks */ true);
  725. if (err) {
  726. extent_deactivate_check_state_locked(tsdn, pac, ecache, outer,
  727. extent_state_merging);
  728. }
  729. return err;
  730. }
  731. static edata_t *
  732. extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  733. ecache_t *ecache, edata_t *edata, bool *coalesced) {
  734. assert(!edata_guarded_get(edata));
  735. /*
  736. * We avoid checking / locking inactive neighbors for large size
  737. * classes, since they are eagerly coalesced on deallocation which can
  738. * cause lock contention.
  739. */
  740. /*
  741. * Continue attempting to coalesce until failure, to protect against
  742. * races with other threads that are thwarted by this one.
  743. */
  744. bool again;
  745. do {
  746. again = false;
  747. /* Try to coalesce forward. */
  748. edata_t *next = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
  749. edata, EXTENT_PAI_PAC, ecache->state, /* forward */ true);
  750. if (next != NULL) {
  751. if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
  752. next, true)) {
  753. if (ecache->delay_coalesce) {
  754. /* Do minimal coalescing. */
  755. *coalesced = true;
  756. return edata;
  757. }
  758. again = true;
  759. }
  760. }
  761. /* Try to coalesce backward. */
  762. edata_t *prev = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
  763. edata, EXTENT_PAI_PAC, ecache->state, /* forward */ false);
  764. if (prev != NULL) {
  765. if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
  766. prev, false)) {
  767. edata = prev;
  768. if (ecache->delay_coalesce) {
  769. /* Do minimal coalescing. */
  770. *coalesced = true;
  771. return edata;
  772. }
  773. again = true;
  774. }
  775. }
  776. } while (again);
  777. if (ecache->delay_coalesce) {
  778. *coalesced = false;
  779. }
  780. return edata;
  781. }
  782. static edata_t *
  783. extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  784. ecache_t *ecache, edata_t *edata, bool *coalesced) {
  785. return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
  786. coalesced);
  787. }
  788. static edata_t *
  789. extent_try_coalesce_large(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  790. ecache_t *ecache, edata_t *edata, bool *coalesced) {
  791. return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
  792. coalesced);
  793. }
  794. /* Purge a single extent to retained / unmapped directly. */
  795. static void
  796. extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  797. edata_t *edata) {
  798. size_t extent_size = edata_size_get(edata);
  799. extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
  800. if (config_stats) {
  801. /* Update stats accordingly. */
  802. LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx);
  803. locked_inc_u64(tsdn,
  804. LOCKEDINT_MTX(*pac->stats_mtx),
  805. &pac->stats->decay_dirty.nmadvise, 1);
  806. locked_inc_u64(tsdn,
  807. LOCKEDINT_MTX(*pac->stats_mtx),
  808. &pac->stats->decay_dirty.purged,
  809. extent_size >> LG_PAGE);
  810. LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx);
  811. atomic_fetch_sub_zu(&pac->stats->pac_mapped, extent_size,
  812. ATOMIC_RELAXED);
  813. }
  814. }
  815. /*
  816. * Does the metadata management portions of putting an unused extent into the
  817. * given ecache_t (coalesces and inserts into the eset).
  818. */
  819. void
  820. extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
  821. edata_t *edata) {
  822. assert((ecache->state != extent_state_dirty &&
  823. ecache->state != extent_state_muzzy) ||
  824. !edata_zeroed_get(edata));
  825. malloc_mutex_lock(tsdn, &ecache->mtx);
  826. emap_assert_mapped(tsdn, pac->emap, edata);
  827. if (edata_guarded_get(edata)) {
  828. goto label_skip_coalesce;
  829. }
  830. if (!ecache->delay_coalesce) {
  831. edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
  832. NULL);
  833. } else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
  834. assert(ecache == &pac->ecache_dirty);
  835. /* Always coalesce large extents eagerly. */
  836. bool coalesced;
  837. do {
  838. assert(edata_state_get(edata) == extent_state_active);
  839. edata = extent_try_coalesce_large(tsdn, pac, ehooks,
  840. ecache, edata, &coalesced);
  841. } while (coalesced);
  842. if (edata_size_get(edata) >=
  843. atomic_load_zu(&pac->oversize_threshold, ATOMIC_RELAXED)
  844. && extent_may_force_decay(pac)) {
  845. /* Shortcut to purge the oversize extent eagerly. */
  846. malloc_mutex_unlock(tsdn, &ecache->mtx);
  847. extent_maximally_purge(tsdn, pac, ehooks, edata);
  848. return;
  849. }
  850. }
  851. label_skip_coalesce:
  852. extent_deactivate_locked(tsdn, pac, ecache, edata);
  853. malloc_mutex_unlock(tsdn, &ecache->mtx);
  854. }
  855. void
  856. extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  857. edata_t *edata) {
  858. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  859. WITNESS_RANK_CORE, 0);
  860. if (extent_register(tsdn, pac, edata)) {
  861. edata_cache_put(tsdn, pac->edata_cache, edata);
  862. return;
  863. }
  864. extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
  865. }
  866. static bool
  867. extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  868. edata_t *edata) {
  869. bool err;
  870. assert(edata_base_get(edata) != NULL);
  871. assert(edata_size_get(edata) != 0);
  872. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  873. WITNESS_RANK_CORE, 0);
  874. edata_addr_set(edata, edata_base_get(edata));
  875. /* Try to deallocate. */
  876. err = ehooks_dalloc(tsdn, ehooks, edata_base_get(edata),
  877. edata_size_get(edata), edata_committed_get(edata));
  878. if (!err) {
  879. edata_cache_put(tsdn, pac->edata_cache, edata);
  880. }
  881. return err;
  882. }
  883. edata_t *
  884. extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  885. void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
  886. bool growing_retained) {
  887. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  888. WITNESS_RANK_CORE, growing_retained ? 1 : 0);
  889. edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
  890. if (edata == NULL) {
  891. return NULL;
  892. }
  893. size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
  894. void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
  895. &zero, commit);
  896. if (addr == NULL) {
  897. edata_cache_put(tsdn, pac->edata_cache, edata);
  898. return NULL;
  899. }
  900. edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr,
  901. size, /* slab */ false, SC_NSIZES, extent_sn_next(pac),
  902. extent_state_active, zero, *commit, EXTENT_PAI_PAC,
  903. opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD);
  904. /*
  905. * Retained memory is not counted towards gdump. Only if an extent is
  906. * allocated as a separate mapping, i.e. growing_retained is false, then
  907. * gdump should be updated.
  908. */
  909. bool gdump_add = !growing_retained;
  910. if (extent_register_impl(tsdn, pac, edata, gdump_add)) {
  911. edata_cache_put(tsdn, pac->edata_cache, edata);
  912. return NULL;
  913. }
  914. return edata;
  915. }
  916. void
  917. extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  918. edata_t *edata) {
  919. assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
  920. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  921. WITNESS_RANK_CORE, 0);
  922. /* Avoid calling the default extent_dalloc unless have to. */
  923. if (!ehooks_dalloc_will_fail(ehooks)) {
  924. /* Remove guard pages for dalloc / unmap. */
  925. if (edata_guarded_get(edata)) {
  926. assert(ehooks_are_default(ehooks));
  927. san_unguard_pages_two_sided(tsdn, ehooks, edata,
  928. pac->emap);
  929. }
  930. /*
  931. * Deregister first to avoid a race with other allocating
  932. * threads, and reregister if deallocation fails.
  933. */
  934. extent_deregister(tsdn, pac, edata);
  935. if (!extent_dalloc_wrapper_try(tsdn, pac, ehooks, edata)) {
  936. return;
  937. }
  938. extent_reregister(tsdn, pac, edata);
  939. }
  940. /* Try to decommit; purge if that fails. */
  941. bool zeroed;
  942. if (!edata_committed_get(edata)) {
  943. zeroed = true;
  944. } else if (!extent_decommit_wrapper(tsdn, ehooks, edata, 0,
  945. edata_size_get(edata))) {
  946. zeroed = true;
  947. } else if (!ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
  948. edata_size_get(edata), 0, edata_size_get(edata))) {
  949. zeroed = true;
  950. } else if (edata_state_get(edata) == extent_state_muzzy ||
  951. !ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
  952. edata_size_get(edata), 0, edata_size_get(edata))) {
  953. zeroed = false;
  954. } else {
  955. zeroed = false;
  956. }
  957. edata_zeroed_set(edata, zeroed);
  958. if (config_prof) {
  959. extent_gdump_sub(tsdn, edata);
  960. }
  961. extent_record(tsdn, pac, ehooks, &pac->ecache_retained, edata);
  962. }
  963. void
  964. extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  965. edata_t *edata) {
  966. assert(edata_base_get(edata) != NULL);
  967. assert(edata_size_get(edata) != 0);
  968. extent_state_t state = edata_state_get(edata);
  969. assert(state == extent_state_retained || state == extent_state_active);
  970. assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
  971. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  972. WITNESS_RANK_CORE, 0);
  973. if (edata_guarded_get(edata)) {
  974. assert(opt_retain);
  975. san_unguard_pages_pre_destroy(tsdn, ehooks, edata, pac->emap);
  976. }
  977. edata_addr_set(edata, edata_base_get(edata));
  978. /* Try to destroy; silently fail otherwise. */
  979. ehooks_destroy(tsdn, ehooks, edata_base_get(edata),
  980. edata_size_get(edata), edata_committed_get(edata));
  981. edata_cache_put(tsdn, pac->edata_cache, edata);
  982. }
  983. static bool
  984. extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
  985. size_t offset, size_t length, bool growing_retained) {
  986. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  987. WITNESS_RANK_CORE, growing_retained ? 1 : 0);
  988. bool err = ehooks_commit(tsdn, ehooks, edata_base_get(edata),
  989. edata_size_get(edata), offset, length);
  990. edata_committed_set(edata, edata_committed_get(edata) || !err);
  991. return err;
  992. }
  993. bool
  994. extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
  995. size_t offset, size_t length) {
  996. return extent_commit_impl(tsdn, ehooks, edata, offset, length,
  997. /* growing_retained */ false);
  998. }
  999. bool
  1000. extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
  1001. size_t offset, size_t length) {
  1002. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  1003. WITNESS_RANK_CORE, 0);
  1004. bool err = ehooks_decommit(tsdn, ehooks, edata_base_get(edata),
  1005. edata_size_get(edata), offset, length);
  1006. edata_committed_set(edata, edata_committed_get(edata) && err);
  1007. return err;
  1008. }
  1009. static bool
  1010. extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
  1011. size_t offset, size_t length, bool growing_retained) {
  1012. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  1013. WITNESS_RANK_CORE, growing_retained ? 1 : 0);
  1014. bool err = ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
  1015. edata_size_get(edata), offset, length);
  1016. return err;
  1017. }
  1018. bool
  1019. extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
  1020. size_t offset, size_t length) {
  1021. return extent_purge_lazy_impl(tsdn, ehooks, edata, offset,
  1022. length, false);
  1023. }
  1024. static bool
  1025. extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
  1026. size_t offset, size_t length, bool growing_retained) {
  1027. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  1028. WITNESS_RANK_CORE, growing_retained ? 1 : 0);
  1029. bool err = ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
  1030. edata_size_get(edata), offset, length);
  1031. return err;
  1032. }
  1033. bool
  1034. extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
  1035. size_t offset, size_t length) {
  1036. return extent_purge_forced_impl(tsdn, ehooks, edata, offset, length,
  1037. false);
  1038. }
  1039. /*
  1040. * Accepts the extent to split, and the characteristics of each side of the
  1041. * split. The 'a' parameters go with the 'lead' of the resulting pair of
  1042. * extents (the lower addressed portion of the split), and the 'b' parameters go
  1043. * with the trail (the higher addressed portion). This makes 'extent' the lead,
  1044. * and returns the trail (except in case of error).
  1045. */
  1046. static edata_t *
  1047. extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  1048. edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks) {
  1049. assert(edata_size_get(edata) == size_a + size_b);
  1050. /* Only the shrink path may split w/o holding core locks. */
  1051. if (holding_core_locks) {
  1052. witness_assert_positive_depth_to_rank(
  1053. tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
  1054. } else {
  1055. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  1056. WITNESS_RANK_CORE, 0);
  1057. }
  1058. if (ehooks_split_will_fail(ehooks)) {
  1059. return NULL;
  1060. }
  1061. edata_t *trail = edata_cache_get(tsdn, pac->edata_cache);
  1062. if (trail == NULL) {
  1063. goto label_error_a;
  1064. }
  1065. edata_init(trail, edata_arena_ind_get(edata),
  1066. (void *)((uintptr_t)edata_base_get(edata) + size_a), size_b,
  1067. /* slab */ false, SC_NSIZES, edata_sn_get(edata),
  1068. edata_state_get(edata), edata_zeroed_get(edata),
  1069. edata_committed_get(edata), EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
  1070. emap_prepare_t prepare;
  1071. bool err = emap_split_prepare(tsdn, pac->emap, &prepare, edata,
  1072. size_a, trail, size_b);
  1073. if (err) {
  1074. goto label_error_b;
  1075. }
  1076. /*
  1077. * No need to acquire trail or edata, because: 1) trail was new (just
  1078. * allocated); and 2) edata is either an active allocation (the shrink
  1079. * path), or in an acquired state (extracted from the ecache on the
  1080. * extent_recycle_split path).
  1081. */
  1082. assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
  1083. assert(emap_edata_is_acquired(tsdn, pac->emap, trail));
  1084. err = ehooks_split(tsdn, ehooks, edata_base_get(edata), size_a + size_b,
  1085. size_a, size_b, edata_committed_get(edata));
  1086. if (err) {
  1087. goto label_error_b;
  1088. }
  1089. edata_size_set(edata, size_a);
  1090. emap_split_commit(tsdn, pac->emap, &prepare, edata, size_a, trail,
  1091. size_b);
  1092. return trail;
  1093. label_error_b:
  1094. edata_cache_put(tsdn, pac->edata_cache, trail);
  1095. label_error_a:
  1096. return NULL;
  1097. }
  1098. edata_t *
  1099. extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata,
  1100. size_t size_a, size_t size_b, bool holding_core_locks) {
  1101. return extent_split_impl(tsdn, pac, ehooks, edata, size_a, size_b,
  1102. holding_core_locks);
  1103. }
  1104. static bool
  1105. extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a,
  1106. edata_t *b, bool holding_core_locks) {
  1107. /* Only the expanding path may merge w/o holding ecache locks. */
  1108. if (holding_core_locks) {
  1109. witness_assert_positive_depth_to_rank(
  1110. tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
  1111. } else {
  1112. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  1113. WITNESS_RANK_CORE, 0);
  1114. }
  1115. assert(edata_base_get(a) < edata_base_get(b));
  1116. assert(edata_arena_ind_get(a) == edata_arena_ind_get(b));
  1117. assert(edata_arena_ind_get(a) == ehooks_ind_get(ehooks));
  1118. emap_assert_mapped(tsdn, pac->emap, a);
  1119. emap_assert_mapped(tsdn, pac->emap, b);
  1120. bool err = ehooks_merge(tsdn, ehooks, edata_base_get(a),
  1121. edata_size_get(a), edata_base_get(b), edata_size_get(b),
  1122. edata_committed_get(a));
  1123. if (err) {
  1124. return true;
  1125. }
  1126. /*
  1127. * The rtree writes must happen while all the relevant elements are
  1128. * owned, so the following code uses decomposed helper functions rather
  1129. * than extent_{,de}register() to do things in the right order.
  1130. */
  1131. emap_prepare_t prepare;
  1132. emap_merge_prepare(tsdn, pac->emap, &prepare, a, b);
  1133. assert(edata_state_get(a) == extent_state_active ||
  1134. edata_state_get(a) == extent_state_merging);
  1135. edata_state_set(a, extent_state_active);
  1136. edata_size_set(a, edata_size_get(a) + edata_size_get(b));
  1137. edata_sn_set(a, (edata_sn_get(a) < edata_sn_get(b)) ?
  1138. edata_sn_get(a) : edata_sn_get(b));
  1139. edata_zeroed_set(a, edata_zeroed_get(a) && edata_zeroed_get(b));
  1140. emap_merge_commit(tsdn, pac->emap, &prepare, a, b);
  1141. edata_cache_put(tsdn, pac->edata_cache, b);
  1142. return false;
  1143. }
  1144. bool
  1145. extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
  1146. edata_t *a, edata_t *b) {
  1147. return extent_merge_impl(tsdn, pac, ehooks, a, b,
  1148. /* holding_core_locks */ false);
  1149. }
  1150. bool
  1151. extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
  1152. bool commit, bool zero, bool growing_retained) {
  1153. witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
  1154. WITNESS_RANK_CORE, growing_retained ? 1 : 0);
  1155. if (commit && !edata_committed_get(edata)) {
  1156. if (extent_commit_impl(tsdn, ehooks, edata, 0,
  1157. edata_size_get(edata), growing_retained)) {
  1158. return true;
  1159. }
  1160. }
  1161. if (zero && !edata_zeroed_get(edata)) {
  1162. void *addr = edata_base_get(edata);
  1163. size_t size = edata_size_get(edata);
  1164. ehooks_zero(tsdn, ehooks, addr, size);
  1165. }
  1166. return false;
  1167. }
  1168. bool
  1169. extent_boot(void) {
  1170. assert(sizeof(slab_data_t) >= sizeof(e_prof_info_t));
  1171. if (have_dss) {
  1172. extent_dss_boot();
  1173. }
  1174. return false;
  1175. }