123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271 |
- /*
- * kmp_alloc.cpp -- private/shared dynamic memory allocation and management
- */
- //===----------------------------------------------------------------------===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- #include "kmp.h"
- #include "kmp_io.h"
- #include "kmp_wrapper_malloc.h"
- #ifdef __clang__
- #if __has_feature(address_sanitizer)
- extern "C" { // sanitizers API
- void __lsan_ignore_object(const void* p);
- }
- #else
- #define __lsan_ignore_object(p)
- #endif
- #else
- #define __lsan_ignore_object(p)
- #endif
- // Disable bget when it is not used
- #if KMP_USE_BGET
- /* Thread private buffer management code */
- typedef int (*bget_compact_t)(size_t, int);
- typedef void *(*bget_acquire_t)(size_t);
- typedef void (*bget_release_t)(void *);
- /* NOTE: bufsize must be a signed datatype */
- #if KMP_OS_WINDOWS
- #if KMP_ARCH_X86 || KMP_ARCH_ARM
- typedef kmp_int32 bufsize;
- #else
- typedef kmp_int64 bufsize;
- #endif
- #else
- typedef ssize_t bufsize;
- #endif // KMP_OS_WINDOWS
- /* The three modes of operation are, fifo search, lifo search, and best-fit */
- typedef enum bget_mode {
- bget_mode_fifo = 0,
- bget_mode_lifo = 1,
- bget_mode_best = 2
- } bget_mode_t;
- static void bpool(kmp_info_t *th, void *buffer, bufsize len);
- static void *bget(kmp_info_t *th, bufsize size);
- static void *bgetz(kmp_info_t *th, bufsize size);
- static void *bgetr(kmp_info_t *th, void *buffer, bufsize newsize);
- static void brel(kmp_info_t *th, void *buf);
- static void bectl(kmp_info_t *th, bget_compact_t compact,
- bget_acquire_t acquire, bget_release_t release,
- bufsize pool_incr);
- /* BGET CONFIGURATION */
- /* Buffer allocation size quantum: all buffers allocated are a
- multiple of this size. This MUST be a power of two. */
- /* On IA-32 architecture with Linux* OS, malloc() does not
- ensure 16 byte alignment */
- #if KMP_ARCH_X86 || !KMP_HAVE_QUAD
- #define SizeQuant 8
- #define AlignType double
- #else
- #define SizeQuant 16
- #define AlignType _Quad
- #endif
- // Define this symbol to enable the bstats() function which calculates the
- // total free space in the buffer pool, the largest available buffer, and the
- // total space currently allocated.
- #define BufStats 1
- #ifdef KMP_DEBUG
- // Define this symbol to enable the bpoold() function which dumps the buffers
- // in a buffer pool.
- #define BufDump 1
- // Define this symbol to enable the bpoolv() function for validating a buffer
- // pool.
- #define BufValid 1
- // Define this symbol to enable the bufdump() function which allows dumping the
- // contents of an allocated or free buffer.
- #define DumpData 1
- #ifdef NOT_USED_NOW
- // Wipe free buffers to a guaranteed pattern of garbage to trip up miscreants
- // who attempt to use pointers into released buffers.
- #define FreeWipe 1
- // Use a best fit algorithm when searching for space for an allocation request.
- // This uses memory more efficiently, but allocation will be much slower.
- #define BestFit 1
- #endif /* NOT_USED_NOW */
- #endif /* KMP_DEBUG */
- static bufsize bget_bin_size[] = {
- 0,
- // 1 << 6, /* .5 Cache line */
- 1 << 7, /* 1 Cache line, new */
- 1 << 8, /* 2 Cache lines */
- 1 << 9, /* 4 Cache lines, new */
- 1 << 10, /* 8 Cache lines */
- 1 << 11, /* 16 Cache lines, new */
- 1 << 12, 1 << 13, /* new */
- 1 << 14, 1 << 15, /* new */
- 1 << 16, 1 << 17, 1 << 18, 1 << 19, 1 << 20, /* 1MB */
- 1 << 21, /* 2MB */
- 1 << 22, /* 4MB */
- 1 << 23, /* 8MB */
- 1 << 24, /* 16MB */
- 1 << 25, /* 32MB */
- };
- #define MAX_BGET_BINS (int)(sizeof(bget_bin_size) / sizeof(bufsize))
- struct bfhead;
- // Declare the interface, including the requested buffer size type, bufsize.
- /* Queue links */
- typedef struct qlinks {
- struct bfhead *flink; /* Forward link */
- struct bfhead *blink; /* Backward link */
- } qlinks_t;
- /* Header in allocated and free buffers */
- typedef struct bhead2 {
- kmp_info_t *bthr; /* The thread which owns the buffer pool */
- bufsize prevfree; /* Relative link back to previous free buffer in memory or
- 0 if previous buffer is allocated. */
- bufsize bsize; /* Buffer size: positive if free, negative if allocated. */
- } bhead2_t;
- /* Make sure the bhead structure is a multiple of SizeQuant in size. */
- typedef union bhead {
- KMP_ALIGN(SizeQuant)
- AlignType b_align;
- char b_pad[sizeof(bhead2_t) + (SizeQuant - (sizeof(bhead2_t) % SizeQuant))];
- bhead2_t bb;
- } bhead_t;
- #define BH(p) ((bhead_t *)(p))
- /* Header in directly allocated buffers (by acqfcn) */
- typedef struct bdhead {
- bufsize tsize; /* Total size, including overhead */
- bhead_t bh; /* Common header */
- } bdhead_t;
- #define BDH(p) ((bdhead_t *)(p))
- /* Header in free buffers */
- typedef struct bfhead {
- bhead_t bh; /* Common allocated/free header */
- qlinks_t ql; /* Links on free list */
- } bfhead_t;
- #define BFH(p) ((bfhead_t *)(p))
- typedef struct thr_data {
- bfhead_t freelist[MAX_BGET_BINS];
- #if BufStats
- size_t totalloc; /* Total space currently allocated */
- long numget, numrel; /* Number of bget() and brel() calls */
- long numpblk; /* Number of pool blocks */
- long numpget, numprel; /* Number of block gets and rels */
- long numdget, numdrel; /* Number of direct gets and rels */
- #endif /* BufStats */
- /* Automatic expansion block management functions */
- bget_compact_t compfcn;
- bget_acquire_t acqfcn;
- bget_release_t relfcn;
- bget_mode_t mode; /* what allocation mode to use? */
- bufsize exp_incr; /* Expansion block size */
- bufsize pool_len; /* 0: no bpool calls have been made
- -1: not all pool blocks are the same size
- >0: (common) block size for all bpool calls made so far
- */
- bfhead_t *last_pool; /* Last pool owned by this thread (delay deallocation) */
- } thr_data_t;
- /* Minimum allocation quantum: */
- #define QLSize (sizeof(qlinks_t))
- #define SizeQ ((SizeQuant > QLSize) ? SizeQuant : QLSize)
- #define MaxSize \
- (bufsize)( \
- ~(((bufsize)(1) << (sizeof(bufsize) * CHAR_BIT - 1)) | (SizeQuant - 1)))
- // Maximum for the requested size.
- /* End sentinel: value placed in bsize field of dummy block delimiting
- end of pool block. The most negative number which will fit in a
- bufsize, defined in a way that the compiler will accept. */
- #define ESent \
- ((bufsize)(-(((((bufsize)1) << ((int)sizeof(bufsize) * 8 - 2)) - 1) * 2) - 2))
- /* Thread Data management routines */
- static int bget_get_bin(bufsize size) {
- // binary chop bins
- int lo = 0, hi = MAX_BGET_BINS - 1;
- KMP_DEBUG_ASSERT(size > 0);
- while ((hi - lo) > 1) {
- int mid = (lo + hi) >> 1;
- if (size < bget_bin_size[mid])
- hi = mid - 1;
- else
- lo = mid;
- }
- KMP_DEBUG_ASSERT((lo >= 0) && (lo < MAX_BGET_BINS));
- return lo;
- }
- static void set_thr_data(kmp_info_t *th) {
- int i;
- thr_data_t *data;
- data = (thr_data_t *)((!th->th.th_local.bget_data)
- ? __kmp_allocate(sizeof(*data))
- : th->th.th_local.bget_data);
- memset(data, '\0', sizeof(*data));
- for (i = 0; i < MAX_BGET_BINS; ++i) {
- data->freelist[i].ql.flink = &data->freelist[i];
- data->freelist[i].ql.blink = &data->freelist[i];
- }
- th->th.th_local.bget_data = data;
- th->th.th_local.bget_list = 0;
- #if !USE_CMP_XCHG_FOR_BGET
- #ifdef USE_QUEUING_LOCK_FOR_BGET
- __kmp_init_lock(&th->th.th_local.bget_lock);
- #else
- __kmp_init_bootstrap_lock(&th->th.th_local.bget_lock);
- #endif /* USE_LOCK_FOR_BGET */
- #endif /* ! USE_CMP_XCHG_FOR_BGET */
- }
- static thr_data_t *get_thr_data(kmp_info_t *th) {
- thr_data_t *data;
- data = (thr_data_t *)th->th.th_local.bget_data;
- KMP_DEBUG_ASSERT(data != 0);
- return data;
- }
- /* Walk the free list and release the enqueued buffers */
- static void __kmp_bget_dequeue(kmp_info_t *th) {
- void *p = TCR_SYNC_PTR(th->th.th_local.bget_list);
- if (p != 0) {
- #if USE_CMP_XCHG_FOR_BGET
- {
- volatile void *old_value = TCR_SYNC_PTR(th->th.th_local.bget_list);
- while (!KMP_COMPARE_AND_STORE_PTR(&th->th.th_local.bget_list,
- CCAST(void *, old_value), nullptr)) {
- KMP_CPU_PAUSE();
- old_value = TCR_SYNC_PTR(th->th.th_local.bget_list);
- }
- p = CCAST(void *, old_value);
- }
- #else /* ! USE_CMP_XCHG_FOR_BGET */
- #ifdef USE_QUEUING_LOCK_FOR_BGET
- __kmp_acquire_lock(&th->th.th_local.bget_lock, __kmp_gtid_from_thread(th));
- #else
- __kmp_acquire_bootstrap_lock(&th->th.th_local.bget_lock);
- #endif /* USE_QUEUING_LOCK_FOR_BGET */
- p = (void *)th->th.th_local.bget_list;
- th->th.th_local.bget_list = 0;
- #ifdef USE_QUEUING_LOCK_FOR_BGET
- __kmp_release_lock(&th->th.th_local.bget_lock, __kmp_gtid_from_thread(th));
- #else
- __kmp_release_bootstrap_lock(&th->th.th_local.bget_lock);
- #endif
- #endif /* USE_CMP_XCHG_FOR_BGET */
- /* Check again to make sure the list is not empty */
- while (p != 0) {
- void *buf = p;
- bfhead_t *b = BFH(((char *)p) - sizeof(bhead_t));
- KMP_DEBUG_ASSERT(b->bh.bb.bsize != 0);
- KMP_DEBUG_ASSERT(((kmp_uintptr_t)TCR_PTR(b->bh.bb.bthr) & ~1) ==
- (kmp_uintptr_t)th); // clear possible mark
- KMP_DEBUG_ASSERT(b->ql.blink == 0);
- p = (void *)b->ql.flink;
- brel(th, buf);
- }
- }
- }
- /* Chain together the free buffers by using the thread owner field */
- static void __kmp_bget_enqueue(kmp_info_t *th, void *buf
- #ifdef USE_QUEUING_LOCK_FOR_BGET
- ,
- kmp_int32 rel_gtid
- #endif
- ) {
- bfhead_t *b = BFH(((char *)buf) - sizeof(bhead_t));
- KMP_DEBUG_ASSERT(b->bh.bb.bsize != 0);
- KMP_DEBUG_ASSERT(((kmp_uintptr_t)TCR_PTR(b->bh.bb.bthr) & ~1) ==
- (kmp_uintptr_t)th); // clear possible mark
- b->ql.blink = 0;
- KC_TRACE(10, ("__kmp_bget_enqueue: moving buffer to T#%d list\n",
- __kmp_gtid_from_thread(th)));
- #if USE_CMP_XCHG_FOR_BGET
- {
- volatile void *old_value = TCR_PTR(th->th.th_local.bget_list);
- /* the next pointer must be set before setting bget_list to buf to avoid
- exposing a broken list to other threads, even for an instant. */
- b->ql.flink = BFH(CCAST(void *, old_value));
- while (!KMP_COMPARE_AND_STORE_PTR(&th->th.th_local.bget_list,
- CCAST(void *, old_value), buf)) {
- KMP_CPU_PAUSE();
- old_value = TCR_PTR(th->th.th_local.bget_list);
- /* the next pointer must be set before setting bget_list to buf to avoid
- exposing a broken list to other threads, even for an instant. */
- b->ql.flink = BFH(CCAST(void *, old_value));
- }
- }
- #else /* ! USE_CMP_XCHG_FOR_BGET */
- #ifdef USE_QUEUING_LOCK_FOR_BGET
- __kmp_acquire_lock(&th->th.th_local.bget_lock, rel_gtid);
- #else
- __kmp_acquire_bootstrap_lock(&th->th.th_local.bget_lock);
- #endif
- b->ql.flink = BFH(th->th.th_local.bget_list);
- th->th.th_local.bget_list = (void *)buf;
- #ifdef USE_QUEUING_LOCK_FOR_BGET
- __kmp_release_lock(&th->th.th_local.bget_lock, rel_gtid);
- #else
- __kmp_release_bootstrap_lock(&th->th.th_local.bget_lock);
- #endif
- #endif /* USE_CMP_XCHG_FOR_BGET */
- }
- /* insert buffer back onto a new freelist */
- static void __kmp_bget_insert_into_freelist(thr_data_t *thr, bfhead_t *b) {
- int bin;
- KMP_DEBUG_ASSERT(((size_t)b) % SizeQuant == 0);
- KMP_DEBUG_ASSERT(b->bh.bb.bsize % SizeQuant == 0);
- bin = bget_get_bin(b->bh.bb.bsize);
- KMP_DEBUG_ASSERT(thr->freelist[bin].ql.blink->ql.flink ==
- &thr->freelist[bin]);
- KMP_DEBUG_ASSERT(thr->freelist[bin].ql.flink->ql.blink ==
- &thr->freelist[bin]);
- b->ql.flink = &thr->freelist[bin];
- b->ql.blink = thr->freelist[bin].ql.blink;
- thr->freelist[bin].ql.blink = b;
- b->ql.blink->ql.flink = b;
- }
- /* unlink the buffer from the old freelist */
- static void __kmp_bget_remove_from_freelist(bfhead_t *b) {
- KMP_DEBUG_ASSERT(b->ql.blink->ql.flink == b);
- KMP_DEBUG_ASSERT(b->ql.flink->ql.blink == b);
- b->ql.blink->ql.flink = b->ql.flink;
- b->ql.flink->ql.blink = b->ql.blink;
- }
- /* GET STATS -- check info on free list */
- static void bcheck(kmp_info_t *th, bufsize *max_free, bufsize *total_free) {
- thr_data_t *thr = get_thr_data(th);
- int bin;
- *total_free = *max_free = 0;
- for (bin = 0; bin < MAX_BGET_BINS; ++bin) {
- bfhead_t *b, *best;
- best = &thr->freelist[bin];
- b = best->ql.flink;
- while (b != &thr->freelist[bin]) {
- *total_free += (b->bh.bb.bsize - sizeof(bhead_t));
- if ((best == &thr->freelist[bin]) || (b->bh.bb.bsize < best->bh.bb.bsize))
- best = b;
- /* Link to next buffer */
- b = b->ql.flink;
- }
- if (*max_free < best->bh.bb.bsize)
- *max_free = best->bh.bb.bsize;
- }
- if (*max_free > (bufsize)sizeof(bhead_t))
- *max_free -= sizeof(bhead_t);
- }
- /* BGET -- Allocate a buffer. */
- static void *bget(kmp_info_t *th, bufsize requested_size) {
- thr_data_t *thr = get_thr_data(th);
- bufsize size = requested_size;
- bfhead_t *b;
- void *buf;
- int compactseq = 0;
- int use_blink = 0;
- /* For BestFit */
- bfhead_t *best;
- if (size < 0 || size + sizeof(bhead_t) > MaxSize) {
- return NULL;
- }
- __kmp_bget_dequeue(th); /* Release any queued buffers */
- if (size < (bufsize)SizeQ) { // Need at least room for the queue links.
- size = SizeQ;
- }
- #if defined(SizeQuant) && (SizeQuant > 1)
- size = (size + (SizeQuant - 1)) & (~(SizeQuant - 1));
- #endif
- size += sizeof(bhead_t); // Add overhead in allocated buffer to size required.
- KMP_DEBUG_ASSERT(size >= 0);
- KMP_DEBUG_ASSERT(size % SizeQuant == 0);
- use_blink = (thr->mode == bget_mode_lifo);
- /* If a compact function was provided in the call to bectl(), wrap
- a loop around the allocation process to allow compaction to
- intervene in case we don't find a suitable buffer in the chain. */
- for (;;) {
- int bin;
- for (bin = bget_get_bin(size); bin < MAX_BGET_BINS; ++bin) {
- /* Link to next buffer */
- b = (use_blink ? thr->freelist[bin].ql.blink
- : thr->freelist[bin].ql.flink);
- if (thr->mode == bget_mode_best) {
- best = &thr->freelist[bin];
- /* Scan the free list searching for the first buffer big enough
- to hold the requested size buffer. */
- while (b != &thr->freelist[bin]) {
- if (b->bh.bb.bsize >= (bufsize)size) {
- if ((best == &thr->freelist[bin]) ||
- (b->bh.bb.bsize < best->bh.bb.bsize)) {
- best = b;
- }
- }
- /* Link to next buffer */
- b = (use_blink ? b->ql.blink : b->ql.flink);
- }
- b = best;
- }
- while (b != &thr->freelist[bin]) {
- if ((bufsize)b->bh.bb.bsize >= (bufsize)size) {
- // Buffer is big enough to satisfy the request. Allocate it to the
- // caller. We must decide whether the buffer is large enough to split
- // into the part given to the caller and a free buffer that remains
- // on the free list, or whether the entire buffer should be removed
- // from the free list and given to the caller in its entirety. We
- // only split the buffer if enough room remains for a header plus the
- // minimum quantum of allocation.
- if ((b->bh.bb.bsize - (bufsize)size) >
- (bufsize)(SizeQ + (sizeof(bhead_t)))) {
- bhead_t *ba, *bn;
- ba = BH(((char *)b) + (b->bh.bb.bsize - (bufsize)size));
- bn = BH(((char *)ba) + size);
- KMP_DEBUG_ASSERT(bn->bb.prevfree == b->bh.bb.bsize);
- /* Subtract size from length of free block. */
- b->bh.bb.bsize -= (bufsize)size;
- /* Link allocated buffer to the previous free buffer. */
- ba->bb.prevfree = b->bh.bb.bsize;
- /* Plug negative size into user buffer. */
- ba->bb.bsize = -size;
- /* Mark this buffer as owned by this thread. */
- TCW_PTR(ba->bb.bthr,
- th); // not an allocated address (do not mark it)
- /* Mark buffer after this one not preceded by free block. */
- bn->bb.prevfree = 0;
- // unlink buffer from old freelist, and reinsert into new freelist
- __kmp_bget_remove_from_freelist(b);
- __kmp_bget_insert_into_freelist(thr, b);
- #if BufStats
- thr->totalloc += (size_t)size;
- thr->numget++; /* Increment number of bget() calls */
- #endif
- buf = (void *)((((char *)ba) + sizeof(bhead_t)));
- KMP_DEBUG_ASSERT(((size_t)buf) % SizeQuant == 0);
- return buf;
- } else {
- bhead_t *ba;
- ba = BH(((char *)b) + b->bh.bb.bsize);
- KMP_DEBUG_ASSERT(ba->bb.prevfree == b->bh.bb.bsize);
- /* The buffer isn't big enough to split. Give the whole
- shebang to the caller and remove it from the free list. */
- __kmp_bget_remove_from_freelist(b);
- #if BufStats
- thr->totalloc += (size_t)b->bh.bb.bsize;
- thr->numget++; /* Increment number of bget() calls */
- #endif
- /* Negate size to mark buffer allocated. */
- b->bh.bb.bsize = -(b->bh.bb.bsize);
- /* Mark this buffer as owned by this thread. */
- TCW_PTR(ba->bb.bthr, th); // not an allocated address (do not mark)
- /* Zero the back pointer in the next buffer in memory
- to indicate that this buffer is allocated. */
- ba->bb.prevfree = 0;
- /* Give user buffer starting at queue links. */
- buf = (void *)&(b->ql);
- KMP_DEBUG_ASSERT(((size_t)buf) % SizeQuant == 0);
- return buf;
- }
- }
- /* Link to next buffer */
- b = (use_blink ? b->ql.blink : b->ql.flink);
- }
- }
- /* We failed to find a buffer. If there's a compact function defined,
- notify it of the size requested. If it returns TRUE, try the allocation
- again. */
- if ((thr->compfcn == 0) || (!(*thr->compfcn)(size, ++compactseq))) {
- break;
- }
- }
- /* No buffer available with requested size free. */
- /* Don't give up yet -- look in the reserve supply. */
- if (thr->acqfcn != 0) {
- if (size > (bufsize)(thr->exp_incr - sizeof(bhead_t))) {
- /* Request is too large to fit in a single expansion block.
- Try to satisfy it by a direct buffer acquisition. */
- bdhead_t *bdh;
- size += sizeof(bdhead_t) - sizeof(bhead_t);
- KE_TRACE(10, ("%%%%%% MALLOC( %d )\n", (int)size));
- /* richryan */
- bdh = BDH((*thr->acqfcn)((bufsize)size));
- if (bdh != NULL) {
- // Mark the buffer special by setting size field of its header to zero.
- bdh->bh.bb.bsize = 0;
- /* Mark this buffer as owned by this thread. */
- TCW_PTR(bdh->bh.bb.bthr, th); // don't mark buffer as allocated,
- // because direct buffer never goes to free list
- bdh->bh.bb.prevfree = 0;
- bdh->tsize = size;
- #if BufStats
- thr->totalloc += (size_t)size;
- thr->numget++; /* Increment number of bget() calls */
- thr->numdget++; /* Direct bget() call count */
- #endif
- buf = (void *)(bdh + 1);
- KMP_DEBUG_ASSERT(((size_t)buf) % SizeQuant == 0);
- return buf;
- }
- } else {
- /* Try to obtain a new expansion block */
- void *newpool;
- KE_TRACE(10, ("%%%%%% MALLOCB( %d )\n", (int)thr->exp_incr));
- /* richryan */
- newpool = (*thr->acqfcn)((bufsize)thr->exp_incr);
- KMP_DEBUG_ASSERT(((size_t)newpool) % SizeQuant == 0);
- if (newpool != NULL) {
- bpool(th, newpool, thr->exp_incr);
- buf = bget(
- th, requested_size); /* This can't, I say, can't get into a loop. */
- return buf;
- }
- }
- }
- /* Still no buffer available */
- return NULL;
- }
- /* BGETZ -- Allocate a buffer and clear its contents to zero. We clear
- the entire contents of the buffer to zero, not just the
- region requested by the caller. */
- static void *bgetz(kmp_info_t *th, bufsize size) {
- char *buf = (char *)bget(th, size);
- if (buf != NULL) {
- bhead_t *b;
- bufsize rsize;
- b = BH(buf - sizeof(bhead_t));
- rsize = -(b->bb.bsize);
- if (rsize == 0) {
- bdhead_t *bd;
- bd = BDH(buf - sizeof(bdhead_t));
- rsize = bd->tsize - (bufsize)sizeof(bdhead_t);
- } else {
- rsize -= sizeof(bhead_t);
- }
- KMP_DEBUG_ASSERT(rsize >= size);
- (void)memset(buf, 0, (bufsize)rsize);
- }
- return ((void *)buf);
- }
- /* BGETR -- Reallocate a buffer. This is a minimal implementation,
- simply in terms of brel() and bget(). It could be
- enhanced to allow the buffer to grow into adjacent free
- blocks and to avoid moving data unnecessarily. */
- static void *bgetr(kmp_info_t *th, void *buf, bufsize size) {
- void *nbuf;
- bufsize osize; /* Old size of buffer */
- bhead_t *b;
- nbuf = bget(th, size);
- if (nbuf == NULL) { /* Acquire new buffer */
- return NULL;
- }
- if (buf == NULL) {
- return nbuf;
- }
- b = BH(((char *)buf) - sizeof(bhead_t));
- osize = -b->bb.bsize;
- if (osize == 0) {
- /* Buffer acquired directly through acqfcn. */
- bdhead_t *bd;
- bd = BDH(((char *)buf) - sizeof(bdhead_t));
- osize = bd->tsize - (bufsize)sizeof(bdhead_t);
- } else {
- osize -= sizeof(bhead_t);
- }
- KMP_DEBUG_ASSERT(osize > 0);
- (void)KMP_MEMCPY((char *)nbuf, (char *)buf, /* Copy the data */
- (size_t)((size < osize) ? size : osize));
- brel(th, buf);
- return nbuf;
- }
- /* BREL -- Release a buffer. */
- static void brel(kmp_info_t *th, void *buf) {
- thr_data_t *thr = get_thr_data(th);
- bfhead_t *b, *bn;
- kmp_info_t *bth;
- KMP_DEBUG_ASSERT(buf != NULL);
- KMP_DEBUG_ASSERT(((size_t)buf) % SizeQuant == 0);
- b = BFH(((char *)buf) - sizeof(bhead_t));
- if (b->bh.bb.bsize == 0) { /* Directly-acquired buffer? */
- bdhead_t *bdh;
- bdh = BDH(((char *)buf) - sizeof(bdhead_t));
- KMP_DEBUG_ASSERT(b->bh.bb.prevfree == 0);
- #if BufStats
- thr->totalloc -= (size_t)bdh->tsize;
- thr->numdrel++; /* Number of direct releases */
- thr->numrel++; /* Increment number of brel() calls */
- #endif /* BufStats */
- #ifdef FreeWipe
- (void)memset((char *)buf, 0x55, (size_t)(bdh->tsize - sizeof(bdhead_t)));
- #endif /* FreeWipe */
- KE_TRACE(10, ("%%%%%% FREE( %p )\n", (void *)bdh));
- KMP_DEBUG_ASSERT(thr->relfcn != 0);
- (*thr->relfcn)((void *)bdh); /* Release it directly. */
- return;
- }
- bth = (kmp_info_t *)((kmp_uintptr_t)TCR_PTR(b->bh.bb.bthr) &
- ~1); // clear possible mark before comparison
- if (bth != th) {
- /* Add this buffer to be released by the owning thread later */
- __kmp_bget_enqueue(bth, buf
- #ifdef USE_QUEUING_LOCK_FOR_BGET
- ,
- __kmp_gtid_from_thread(th)
- #endif
- );
- return;
- }
- /* Buffer size must be negative, indicating that the buffer is allocated. */
- if (b->bh.bb.bsize >= 0) {
- bn = NULL;
- }
- KMP_DEBUG_ASSERT(b->bh.bb.bsize < 0);
- /* Back pointer in next buffer must be zero, indicating the same thing: */
- KMP_DEBUG_ASSERT(BH((char *)b - b->bh.bb.bsize)->bb.prevfree == 0);
- #if BufStats
- thr->numrel++; /* Increment number of brel() calls */
- thr->totalloc += (size_t)b->bh.bb.bsize;
- #endif
- /* If the back link is nonzero, the previous buffer is free. */
- if (b->bh.bb.prevfree != 0) {
- /* The previous buffer is free. Consolidate this buffer with it by adding
- the length of this buffer to the previous free buffer. Note that we
- subtract the size in the buffer being released, since it's negative to
- indicate that the buffer is allocated. */
- bufsize size = b->bh.bb.bsize;
- /* Make the previous buffer the one we're working on. */
- KMP_DEBUG_ASSERT(BH((char *)b - b->bh.bb.prevfree)->bb.bsize ==
- b->bh.bb.prevfree);
- b = BFH(((char *)b) - b->bh.bb.prevfree);
- b->bh.bb.bsize -= size;
- /* unlink the buffer from the old freelist */
- __kmp_bget_remove_from_freelist(b);
- } else {
- /* The previous buffer isn't allocated. Mark this buffer size as positive
- (i.e. free) and fall through to place the buffer on the free list as an
- isolated free block. */
- b->bh.bb.bsize = -b->bh.bb.bsize;
- }
- /* insert buffer back onto a new freelist */
- __kmp_bget_insert_into_freelist(thr, b);
- /* Now we look at the next buffer in memory, located by advancing from
- the start of this buffer by its size, to see if that buffer is
- free. If it is, we combine this buffer with the next one in
- memory, dechaining the second buffer from the free list. */
- bn = BFH(((char *)b) + b->bh.bb.bsize);
- if (bn->bh.bb.bsize > 0) {
- /* The buffer is free. Remove it from the free list and add
- its size to that of our buffer. */
- KMP_DEBUG_ASSERT(BH((char *)bn + bn->bh.bb.bsize)->bb.prevfree ==
- bn->bh.bb.bsize);
- __kmp_bget_remove_from_freelist(bn);
- b->bh.bb.bsize += bn->bh.bb.bsize;
- /* unlink the buffer from the old freelist, and reinsert it into the new
- * freelist */
- __kmp_bget_remove_from_freelist(b);
- __kmp_bget_insert_into_freelist(thr, b);
- /* Finally, advance to the buffer that follows the newly
- consolidated free block. We must set its backpointer to the
- head of the consolidated free block. We know the next block
- must be an allocated block because the process of recombination
- guarantees that two free blocks will never be contiguous in
- memory. */
- bn = BFH(((char *)b) + b->bh.bb.bsize);
- }
- #ifdef FreeWipe
- (void)memset(((char *)b) + sizeof(bfhead_t), 0x55,
- (size_t)(b->bh.bb.bsize - sizeof(bfhead_t)));
- #endif
- KMP_DEBUG_ASSERT(bn->bh.bb.bsize < 0);
- /* The next buffer is allocated. Set the backpointer in it to point
- to this buffer; the previous free buffer in memory. */
- bn->bh.bb.prevfree = b->bh.bb.bsize;
- /* If a block-release function is defined, and this free buffer
- constitutes the entire block, release it. Note that pool_len
- is defined in such a way that the test will fail unless all
- pool blocks are the same size. */
- if (thr->relfcn != 0 &&
- b->bh.bb.bsize == (bufsize)(thr->pool_len - sizeof(bhead_t))) {
- #if BufStats
- if (thr->numpblk !=
- 1) { /* Do not release the last buffer until finalization time */
- #endif
- KMP_DEBUG_ASSERT(b->bh.bb.prevfree == 0);
- KMP_DEBUG_ASSERT(BH((char *)b + b->bh.bb.bsize)->bb.bsize == ESent);
- KMP_DEBUG_ASSERT(BH((char *)b + b->bh.bb.bsize)->bb.prevfree ==
- b->bh.bb.bsize);
- /* Unlink the buffer from the free list */
- __kmp_bget_remove_from_freelist(b);
- KE_TRACE(10, ("%%%%%% FREE( %p )\n", (void *)b));
- (*thr->relfcn)(b);
- #if BufStats
- thr->numprel++; /* Nr of expansion block releases */
- thr->numpblk--; /* Total number of blocks */
- KMP_DEBUG_ASSERT(thr->numpblk == thr->numpget - thr->numprel);
- // avoid leaving stale last_pool pointer around if it is being dealloced
- if (thr->last_pool == b)
- thr->last_pool = 0;
- } else {
- thr->last_pool = b;
- }
- #endif /* BufStats */
- }
- }
- /* BECTL -- Establish automatic pool expansion control */
- static void bectl(kmp_info_t *th, bget_compact_t compact,
- bget_acquire_t acquire, bget_release_t release,
- bufsize pool_incr) {
- thr_data_t *thr = get_thr_data(th);
- thr->compfcn = compact;
- thr->acqfcn = acquire;
- thr->relfcn = release;
- thr->exp_incr = pool_incr;
- }
- /* BPOOL -- Add a region of memory to the buffer pool. */
- static void bpool(kmp_info_t *th, void *buf, bufsize len) {
- /* int bin = 0; */
- thr_data_t *thr = get_thr_data(th);
- bfhead_t *b = BFH(buf);
- bhead_t *bn;
- __kmp_bget_dequeue(th); /* Release any queued buffers */
- #ifdef SizeQuant
- len &= ~((bufsize)(SizeQuant - 1));
- #endif
- if (thr->pool_len == 0) {
- thr->pool_len = len;
- } else if (len != thr->pool_len) {
- thr->pool_len = -1;
- }
- #if BufStats
- thr->numpget++; /* Number of block acquisitions */
- thr->numpblk++; /* Number of blocks total */
- KMP_DEBUG_ASSERT(thr->numpblk == thr->numpget - thr->numprel);
- #endif /* BufStats */
- /* Since the block is initially occupied by a single free buffer,
- it had better not be (much) larger than the largest buffer
- whose size we can store in bhead.bb.bsize. */
- KMP_DEBUG_ASSERT(len - sizeof(bhead_t) <= -((bufsize)ESent + 1));
- /* Clear the backpointer at the start of the block to indicate that
- there is no free block prior to this one. That blocks
- recombination when the first block in memory is released. */
- b->bh.bb.prevfree = 0;
- /* Create a dummy allocated buffer at the end of the pool. This dummy
- buffer is seen when a buffer at the end of the pool is released and
- blocks recombination of the last buffer with the dummy buffer at
- the end. The length in the dummy buffer is set to the largest
- negative number to denote the end of the pool for diagnostic
- routines (this specific value is not counted on by the actual
- allocation and release functions). */
- len -= sizeof(bhead_t);
- b->bh.bb.bsize = (bufsize)len;
- /* Set the owner of this buffer */
- TCW_PTR(b->bh.bb.bthr,
- (kmp_info_t *)((kmp_uintptr_t)th |
- 1)); // mark the buffer as allocated address
- /* Chain the new block to the free list. */
- __kmp_bget_insert_into_freelist(thr, b);
- #ifdef FreeWipe
- (void)memset(((char *)b) + sizeof(bfhead_t), 0x55,
- (size_t)(len - sizeof(bfhead_t)));
- #endif
- bn = BH(((char *)b) + len);
- bn->bb.prevfree = (bufsize)len;
- /* Definition of ESent assumes two's complement! */
- KMP_DEBUG_ASSERT((~0) == -1 && (bn != 0));
- bn->bb.bsize = ESent;
- }
- /* BFREED -- Dump the free lists for this thread. */
- static void bfreed(kmp_info_t *th) {
- int bin = 0, count = 0;
- int gtid = __kmp_gtid_from_thread(th);
- thr_data_t *thr = get_thr_data(th);
- #if BufStats
- __kmp_printf_no_lock("__kmp_printpool: T#%d total=%" KMP_UINT64_SPEC
- " get=%" KMP_INT64_SPEC " rel=%" KMP_INT64_SPEC
- " pblk=%" KMP_INT64_SPEC " pget=%" KMP_INT64_SPEC
- " prel=%" KMP_INT64_SPEC " dget=%" KMP_INT64_SPEC
- " drel=%" KMP_INT64_SPEC "\n",
- gtid, (kmp_uint64)thr->totalloc, (kmp_int64)thr->numget,
- (kmp_int64)thr->numrel, (kmp_int64)thr->numpblk,
- (kmp_int64)thr->numpget, (kmp_int64)thr->numprel,
- (kmp_int64)thr->numdget, (kmp_int64)thr->numdrel);
- #endif
- for (bin = 0; bin < MAX_BGET_BINS; ++bin) {
- bfhead_t *b;
- for (b = thr->freelist[bin].ql.flink; b != &thr->freelist[bin];
- b = b->ql.flink) {
- bufsize bs = b->bh.bb.bsize;
- KMP_DEBUG_ASSERT(b->ql.blink->ql.flink == b);
- KMP_DEBUG_ASSERT(b->ql.flink->ql.blink == b);
- KMP_DEBUG_ASSERT(bs > 0);
- count += 1;
- __kmp_printf_no_lock(
- "__kmp_printpool: T#%d Free block: 0x%p size %6ld bytes.\n", gtid, b,
- (long)bs);
- #ifdef FreeWipe
- {
- char *lerr = ((char *)b) + sizeof(bfhead_t);
- if ((bs > sizeof(bfhead_t)) &&
- ((*lerr != 0x55) ||
- (memcmp(lerr, lerr + 1, (size_t)(bs - (sizeof(bfhead_t) + 1))) !=
- 0))) {
- __kmp_printf_no_lock("__kmp_printpool: T#%d (Contents of above "
- "free block have been overstored.)\n",
- gtid);
- }
- }
- #endif
- }
- }
- if (count == 0)
- __kmp_printf_no_lock("__kmp_printpool: T#%d No free blocks\n", gtid);
- }
- void __kmp_initialize_bget(kmp_info_t *th) {
- KMP_DEBUG_ASSERT(SizeQuant >= sizeof(void *) && (th != 0));
- set_thr_data(th);
- bectl(th, (bget_compact_t)0, (bget_acquire_t)malloc, (bget_release_t)free,
- (bufsize)__kmp_malloc_pool_incr);
- }
- void __kmp_finalize_bget(kmp_info_t *th) {
- thr_data_t *thr;
- bfhead_t *b;
- KMP_DEBUG_ASSERT(th != 0);
- #if BufStats
- thr = (thr_data_t *)th->th.th_local.bget_data;
- KMP_DEBUG_ASSERT(thr != NULL);
- b = thr->last_pool;
- /* If a block-release function is defined, and this free buffer constitutes
- the entire block, release it. Note that pool_len is defined in such a way
- that the test will fail unless all pool blocks are the same size. */
- // Deallocate the last pool if one exists because we no longer do it in brel()
- if (thr->relfcn != 0 && b != 0 && thr->numpblk != 0 &&
- b->bh.bb.bsize == (bufsize)(thr->pool_len - sizeof(bhead_t))) {
- KMP_DEBUG_ASSERT(b->bh.bb.prevfree == 0);
- KMP_DEBUG_ASSERT(BH((char *)b + b->bh.bb.bsize)->bb.bsize == ESent);
- KMP_DEBUG_ASSERT(BH((char *)b + b->bh.bb.bsize)->bb.prevfree ==
- b->bh.bb.bsize);
- /* Unlink the buffer from the free list */
- __kmp_bget_remove_from_freelist(b);
- KE_TRACE(10, ("%%%%%% FREE( %p )\n", (void *)b));
- (*thr->relfcn)(b);
- thr->numprel++; /* Nr of expansion block releases */
- thr->numpblk--; /* Total number of blocks */
- KMP_DEBUG_ASSERT(thr->numpblk == thr->numpget - thr->numprel);
- }
- #endif /* BufStats */
- /* Deallocate bget_data */
- if (th->th.th_local.bget_data != NULL) {
- __kmp_free(th->th.th_local.bget_data);
- th->th.th_local.bget_data = NULL;
- }
- }
- void kmpc_set_poolsize(size_t size) {
- bectl(__kmp_get_thread(), (bget_compact_t)0, (bget_acquire_t)malloc,
- (bget_release_t)free, (bufsize)size);
- }
- size_t kmpc_get_poolsize(void) {
- thr_data_t *p;
- p = get_thr_data(__kmp_get_thread());
- return p->exp_incr;
- }
- void kmpc_set_poolmode(int mode) {
- thr_data_t *p;
- if (mode == bget_mode_fifo || mode == bget_mode_lifo ||
- mode == bget_mode_best) {
- p = get_thr_data(__kmp_get_thread());
- p->mode = (bget_mode_t)mode;
- }
- }
- int kmpc_get_poolmode(void) {
- thr_data_t *p;
- p = get_thr_data(__kmp_get_thread());
- return p->mode;
- }
- void kmpc_get_poolstat(size_t *maxmem, size_t *allmem) {
- kmp_info_t *th = __kmp_get_thread();
- bufsize a, b;
- __kmp_bget_dequeue(th); /* Release any queued buffers */
- bcheck(th, &a, &b);
- *maxmem = a;
- *allmem = b;
- }
- void kmpc_poolprint(void) {
- kmp_info_t *th = __kmp_get_thread();
- __kmp_bget_dequeue(th); /* Release any queued buffers */
- bfreed(th);
- }
- #endif // #if KMP_USE_BGET
- void *kmpc_malloc(size_t size) {
- void *ptr;
- ptr = bget(__kmp_entry_thread(), (bufsize)(size + sizeof(ptr)));
- if (ptr != NULL) {
- // save allocated pointer just before one returned to user
- *(void **)ptr = ptr;
- ptr = (void **)ptr + 1;
- }
- return ptr;
- }
- #define IS_POWER_OF_TWO(n) (((n) & ((n)-1)) == 0)
- void *kmpc_aligned_malloc(size_t size, size_t alignment) {
- void *ptr;
- void *ptr_allocated;
- KMP_DEBUG_ASSERT(alignment < 32 * 1024); // Alignment should not be too big
- if (!IS_POWER_OF_TWO(alignment)) {
- // AC: do we need to issue a warning here?
- errno = EINVAL;
- return NULL;
- }
- size = size + sizeof(void *) + alignment;
- ptr_allocated = bget(__kmp_entry_thread(), (bufsize)size);
- if (ptr_allocated != NULL) {
- // save allocated pointer just before one returned to user
- ptr = (void *)(((kmp_uintptr_t)ptr_allocated + sizeof(void *) + alignment) &
- ~(alignment - 1));
- *((void **)ptr - 1) = ptr_allocated;
- } else {
- ptr = NULL;
- }
- return ptr;
- }
- void *kmpc_calloc(size_t nelem, size_t elsize) {
- void *ptr;
- ptr = bgetz(__kmp_entry_thread(), (bufsize)(nelem * elsize + sizeof(ptr)));
- if (ptr != NULL) {
- // save allocated pointer just before one returned to user
- *(void **)ptr = ptr;
- ptr = (void **)ptr + 1;
- }
- return ptr;
- }
- void *kmpc_realloc(void *ptr, size_t size) {
- void *result = NULL;
- if (ptr == NULL) {
- // If pointer is NULL, realloc behaves like malloc.
- result = bget(__kmp_entry_thread(), (bufsize)(size + sizeof(ptr)));
- // save allocated pointer just before one returned to user
- if (result != NULL) {
- *(void **)result = result;
- result = (void **)result + 1;
- }
- } else if (size == 0) {
- // If size is 0, realloc behaves like free.
- // The thread must be registered by the call to kmpc_malloc() or
- // kmpc_calloc() before.
- // So it should be safe to call __kmp_get_thread(), not
- // __kmp_entry_thread().
- KMP_ASSERT(*((void **)ptr - 1));
- brel(__kmp_get_thread(), *((void **)ptr - 1));
- } else {
- result = bgetr(__kmp_entry_thread(), *((void **)ptr - 1),
- (bufsize)(size + sizeof(ptr)));
- if (result != NULL) {
- *(void **)result = result;
- result = (void **)result + 1;
- }
- }
- return result;
- }
- // NOTE: the library must have already been initialized by a previous allocate
- void kmpc_free(void *ptr) {
- if (!__kmp_init_serial) {
- return;
- }
- if (ptr != NULL) {
- kmp_info_t *th = __kmp_get_thread();
- __kmp_bget_dequeue(th); /* Release any queued buffers */
- // extract allocated pointer and free it
- KMP_ASSERT(*((void **)ptr - 1));
- brel(th, *((void **)ptr - 1));
- }
- }
- void *___kmp_thread_malloc(kmp_info_t *th, size_t size KMP_SRC_LOC_DECL) {
- void *ptr;
- KE_TRACE(30, ("-> __kmp_thread_malloc( %p, %d ) called from %s:%d\n", th,
- (int)size KMP_SRC_LOC_PARM));
- ptr = bget(th, (bufsize)size);
- KE_TRACE(30, ("<- __kmp_thread_malloc() returns %p\n", ptr));
- return ptr;
- }
- void *___kmp_thread_calloc(kmp_info_t *th, size_t nelem,
- size_t elsize KMP_SRC_LOC_DECL) {
- void *ptr;
- KE_TRACE(30, ("-> __kmp_thread_calloc( %p, %d, %d ) called from %s:%d\n", th,
- (int)nelem, (int)elsize KMP_SRC_LOC_PARM));
- ptr = bgetz(th, (bufsize)(nelem * elsize));
- KE_TRACE(30, ("<- __kmp_thread_calloc() returns %p\n", ptr));
- return ptr;
- }
- void *___kmp_thread_realloc(kmp_info_t *th, void *ptr,
- size_t size KMP_SRC_LOC_DECL) {
- KE_TRACE(30, ("-> __kmp_thread_realloc( %p, %p, %d ) called from %s:%d\n", th,
- ptr, (int)size KMP_SRC_LOC_PARM));
- ptr = bgetr(th, ptr, (bufsize)size);
- KE_TRACE(30, ("<- __kmp_thread_realloc() returns %p\n", ptr));
- return ptr;
- }
- void ___kmp_thread_free(kmp_info_t *th, void *ptr KMP_SRC_LOC_DECL) {
- KE_TRACE(30, ("-> __kmp_thread_free( %p, %p ) called from %s:%d\n", th,
- ptr KMP_SRC_LOC_PARM));
- if (ptr != NULL) {
- __kmp_bget_dequeue(th); /* Release any queued buffers */
- brel(th, ptr);
- }
- KE_TRACE(30, ("<- __kmp_thread_free()\n"));
- }
- /* OMP 5.0 Memory Management support */
- static const char *kmp_mk_lib_name;
- static void *h_memkind;
- /* memkind experimental API: */
- // memkind_alloc
- static void *(*kmp_mk_alloc)(void *k, size_t sz);
- // memkind_free
- static void (*kmp_mk_free)(void *kind, void *ptr);
- // memkind_check_available
- static int (*kmp_mk_check)(void *kind);
- // kinds we are going to use
- static void **mk_default;
- static void **mk_interleave;
- static void **mk_hbw;
- static void **mk_hbw_interleave;
- static void **mk_hbw_preferred;
- static void **mk_hugetlb;
- static void **mk_hbw_hugetlb;
- static void **mk_hbw_preferred_hugetlb;
- static void **mk_dax_kmem;
- static void **mk_dax_kmem_all;
- static void **mk_dax_kmem_preferred;
- static void *(*kmp_target_alloc_host)(size_t size, int device);
- static void *(*kmp_target_alloc_shared)(size_t size, int device);
- static void *(*kmp_target_alloc_device)(size_t size, int device);
- static void *(*kmp_target_free)(void *ptr, int device);
- static bool __kmp_target_mem_available;
- #define KMP_IS_TARGET_MEM_SPACE(MS) \
- (MS == llvm_omp_target_host_mem_space || \
- MS == llvm_omp_target_shared_mem_space || \
- MS == llvm_omp_target_device_mem_space)
- #define KMP_IS_TARGET_MEM_ALLOC(MA) \
- (MA == llvm_omp_target_host_mem_alloc || \
- MA == llvm_omp_target_shared_mem_alloc || \
- MA == llvm_omp_target_device_mem_alloc)
- #if KMP_OS_UNIX && KMP_DYNAMIC_LIB && !KMP_OS_DARWIN
- static inline void chk_kind(void ***pkind) {
- KMP_DEBUG_ASSERT(pkind);
- if (*pkind) // symbol found
- if (kmp_mk_check(**pkind)) // kind not available or error
- *pkind = NULL;
- }
- #endif
- void __kmp_init_memkind() {
- // as of 2018-07-31 memkind does not support Windows*, exclude it for now
- #if KMP_OS_UNIX && KMP_DYNAMIC_LIB && !KMP_OS_DARWIN
- // use of statically linked memkind is problematic, as it depends on libnuma
- kmp_mk_lib_name = "libmemkind.so";
- h_memkind = dlopen(kmp_mk_lib_name, RTLD_LAZY);
- if (h_memkind) {
- kmp_mk_check = (int (*)(void *))dlsym(h_memkind, "memkind_check_available");
- kmp_mk_alloc =
- (void *(*)(void *, size_t))dlsym(h_memkind, "memkind_malloc");
- kmp_mk_free = (void (*)(void *, void *))dlsym(h_memkind, "memkind_free");
- mk_default = (void **)dlsym(h_memkind, "MEMKIND_DEFAULT");
- if (kmp_mk_check && kmp_mk_alloc && kmp_mk_free && mk_default &&
- !kmp_mk_check(*mk_default)) {
- __kmp_memkind_available = 1;
- mk_interleave = (void **)dlsym(h_memkind, "MEMKIND_INTERLEAVE");
- chk_kind(&mk_interleave);
- mk_hbw = (void **)dlsym(h_memkind, "MEMKIND_HBW");
- chk_kind(&mk_hbw);
- mk_hbw_interleave = (void **)dlsym(h_memkind, "MEMKIND_HBW_INTERLEAVE");
- chk_kind(&mk_hbw_interleave);
- mk_hbw_preferred = (void **)dlsym(h_memkind, "MEMKIND_HBW_PREFERRED");
- chk_kind(&mk_hbw_preferred);
- mk_hugetlb = (void **)dlsym(h_memkind, "MEMKIND_HUGETLB");
- chk_kind(&mk_hugetlb);
- mk_hbw_hugetlb = (void **)dlsym(h_memkind, "MEMKIND_HBW_HUGETLB");
- chk_kind(&mk_hbw_hugetlb);
- mk_hbw_preferred_hugetlb =
- (void **)dlsym(h_memkind, "MEMKIND_HBW_PREFERRED_HUGETLB");
- chk_kind(&mk_hbw_preferred_hugetlb);
- mk_dax_kmem = (void **)dlsym(h_memkind, "MEMKIND_DAX_KMEM");
- chk_kind(&mk_dax_kmem);
- mk_dax_kmem_all = (void **)dlsym(h_memkind, "MEMKIND_DAX_KMEM_ALL");
- chk_kind(&mk_dax_kmem_all);
- mk_dax_kmem_preferred =
- (void **)dlsym(h_memkind, "MEMKIND_DAX_KMEM_PREFERRED");
- chk_kind(&mk_dax_kmem_preferred);
- KE_TRACE(25, ("__kmp_init_memkind: memkind library initialized\n"));
- return; // success
- }
- dlclose(h_memkind); // failure
- }
- #else // !(KMP_OS_UNIX && KMP_DYNAMIC_LIB)
- kmp_mk_lib_name = "";
- #endif // !(KMP_OS_UNIX && KMP_DYNAMIC_LIB)
- h_memkind = NULL;
- kmp_mk_check = NULL;
- kmp_mk_alloc = NULL;
- kmp_mk_free = NULL;
- mk_default = NULL;
- mk_interleave = NULL;
- mk_hbw = NULL;
- mk_hbw_interleave = NULL;
- mk_hbw_preferred = NULL;
- mk_hugetlb = NULL;
- mk_hbw_hugetlb = NULL;
- mk_hbw_preferred_hugetlb = NULL;
- mk_dax_kmem = NULL;
- mk_dax_kmem_all = NULL;
- mk_dax_kmem_preferred = NULL;
- }
- void __kmp_fini_memkind() {
- #if KMP_OS_UNIX && KMP_DYNAMIC_LIB
- if (__kmp_memkind_available)
- KE_TRACE(25, ("__kmp_fini_memkind: finalize memkind library\n"));
- if (h_memkind) {
- dlclose(h_memkind);
- h_memkind = NULL;
- }
- kmp_mk_check = NULL;
- kmp_mk_alloc = NULL;
- kmp_mk_free = NULL;
- mk_default = NULL;
- mk_interleave = NULL;
- mk_hbw = NULL;
- mk_hbw_interleave = NULL;
- mk_hbw_preferred = NULL;
- mk_hugetlb = NULL;
- mk_hbw_hugetlb = NULL;
- mk_hbw_preferred_hugetlb = NULL;
- mk_dax_kmem = NULL;
- mk_dax_kmem_all = NULL;
- mk_dax_kmem_preferred = NULL;
- #endif
- }
- void __kmp_init_target_mem() {
- *(void **)(&kmp_target_alloc_host) = KMP_DLSYM("llvm_omp_target_alloc_host");
- *(void **)(&kmp_target_alloc_shared) =
- KMP_DLSYM("llvm_omp_target_alloc_shared");
- *(void **)(&kmp_target_alloc_device) =
- KMP_DLSYM("llvm_omp_target_alloc_device");
- *(void **)(&kmp_target_free) = KMP_DLSYM("omp_target_free");
- __kmp_target_mem_available = kmp_target_alloc_host &&
- kmp_target_alloc_shared &&
- kmp_target_alloc_device && kmp_target_free;
- }
- omp_allocator_handle_t __kmpc_init_allocator(int gtid, omp_memspace_handle_t ms,
- int ntraits,
- omp_alloctrait_t traits[]) {
- // OpenMP 5.0 only allows predefined memspaces
- KMP_DEBUG_ASSERT(ms == omp_default_mem_space || ms == omp_low_lat_mem_space ||
- ms == omp_large_cap_mem_space || ms == omp_const_mem_space ||
- ms == omp_high_bw_mem_space || KMP_IS_TARGET_MEM_SPACE(ms));
- kmp_allocator_t *al;
- int i;
- al = (kmp_allocator_t *)__kmp_allocate(sizeof(kmp_allocator_t)); // zeroed
- al->memspace = ms; // not used currently
- for (i = 0; i < ntraits; ++i) {
- switch (traits[i].key) {
- case omp_atk_sync_hint:
- case omp_atk_access:
- case omp_atk_pinned:
- break;
- case omp_atk_alignment:
- __kmp_type_convert(traits[i].value, &(al->alignment));
- KMP_ASSERT(IS_POWER_OF_TWO(al->alignment));
- break;
- case omp_atk_pool_size:
- al->pool_size = traits[i].value;
- break;
- case omp_atk_fallback:
- al->fb = (omp_alloctrait_value_t)traits[i].value;
- KMP_DEBUG_ASSERT(
- al->fb == omp_atv_default_mem_fb || al->fb == omp_atv_null_fb ||
- al->fb == omp_atv_abort_fb || al->fb == omp_atv_allocator_fb);
- break;
- case omp_atk_fb_data:
- al->fb_data = RCAST(kmp_allocator_t *, traits[i].value);
- break;
- case omp_atk_partition:
- al->memkind = RCAST(void **, traits[i].value);
- break;
- default:
- KMP_ASSERT2(0, "Unexpected allocator trait");
- }
- }
- if (al->fb == 0) {
- // set default allocator
- al->fb = omp_atv_default_mem_fb;
- al->fb_data = (kmp_allocator_t *)omp_default_mem_alloc;
- } else if (al->fb == omp_atv_allocator_fb) {
- KMP_ASSERT(al->fb_data != NULL);
- } else if (al->fb == omp_atv_default_mem_fb) {
- al->fb_data = (kmp_allocator_t *)omp_default_mem_alloc;
- }
- if (__kmp_memkind_available) {
- // Let's use memkind library if available
- if (ms == omp_high_bw_mem_space) {
- if (al->memkind == (void *)omp_atv_interleaved && mk_hbw_interleave) {
- al->memkind = mk_hbw_interleave;
- } else if (mk_hbw_preferred) {
- // AC: do not try to use MEMKIND_HBW for now, because memkind library
- // cannot reliably detect exhaustion of HBW memory.
- // It could be possible using hbw_verify_memory_region() but memkind
- // manual says: "Using this function in production code may result in
- // serious performance penalty".
- al->memkind = mk_hbw_preferred;
- } else {
- // HBW is requested but not available --> return NULL allocator
- __kmp_free(al);
- return omp_null_allocator;
- }
- } else if (ms == omp_large_cap_mem_space) {
- if (mk_dax_kmem_all) {
- // All pmem nodes are visited
- al->memkind = mk_dax_kmem_all;
- } else if (mk_dax_kmem) {
- // Only closest pmem node is visited
- al->memkind = mk_dax_kmem;
- } else {
- __kmp_free(al);
- return omp_null_allocator;
- }
- } else {
- if (al->memkind == (void *)omp_atv_interleaved && mk_interleave) {
- al->memkind = mk_interleave;
- } else {
- al->memkind = mk_default;
- }
- }
- } else if (KMP_IS_TARGET_MEM_SPACE(ms) && !__kmp_target_mem_available) {
- __kmp_free(al);
- return omp_null_allocator;
- } else {
- if (ms == omp_high_bw_mem_space) {
- // cannot detect HBW memory presence without memkind library
- __kmp_free(al);
- return omp_null_allocator;
- }
- }
- return (omp_allocator_handle_t)al;
- }
- void __kmpc_destroy_allocator(int gtid, omp_allocator_handle_t allocator) {
- if (allocator > kmp_max_mem_alloc)
- __kmp_free(allocator);
- }
- void __kmpc_set_default_allocator(int gtid, omp_allocator_handle_t allocator) {
- if (allocator == omp_null_allocator)
- allocator = omp_default_mem_alloc;
- __kmp_threads[gtid]->th.th_def_allocator = allocator;
- }
- omp_allocator_handle_t __kmpc_get_default_allocator(int gtid) {
- return __kmp_threads[gtid]->th.th_def_allocator;
- }
- typedef struct kmp_mem_desc { // Memory block descriptor
- void *ptr_alloc; // Pointer returned by allocator
- size_t size_a; // Size of allocated memory block (initial+descriptor+align)
- size_t size_orig; // Original size requested
- void *ptr_align; // Pointer to aligned memory, returned
- kmp_allocator_t *allocator; // allocator
- } kmp_mem_desc_t;
- static int alignment = sizeof(void *); // align to pointer size by default
- // external interfaces are wrappers over internal implementation
- void *__kmpc_alloc(int gtid, size_t size, omp_allocator_handle_t allocator) {
- KE_TRACE(25, ("__kmpc_alloc: T#%d (%d, %p)\n", gtid, (int)size, allocator));
- void *ptr = __kmp_alloc(gtid, 0, size, allocator);
- KE_TRACE(25, ("__kmpc_alloc returns %p, T#%d\n", ptr, gtid));
- return ptr;
- }
- void *__kmpc_aligned_alloc(int gtid, size_t algn, size_t size,
- omp_allocator_handle_t allocator) {
- KE_TRACE(25, ("__kmpc_aligned_alloc: T#%d (%d, %d, %p)\n", gtid, (int)algn,
- (int)size, allocator));
- void *ptr = __kmp_alloc(gtid, algn, size, allocator);
- KE_TRACE(25, ("__kmpc_aligned_alloc returns %p, T#%d\n", ptr, gtid));
- return ptr;
- }
- void *__kmpc_calloc(int gtid, size_t nmemb, size_t size,
- omp_allocator_handle_t allocator) {
- KE_TRACE(25, ("__kmpc_calloc: T#%d (%d, %d, %p)\n", gtid, (int)nmemb,
- (int)size, allocator));
- void *ptr = __kmp_calloc(gtid, 0, nmemb, size, allocator);
- KE_TRACE(25, ("__kmpc_calloc returns %p, T#%d\n", ptr, gtid));
- return ptr;
- }
- void *__kmpc_realloc(int gtid, void *ptr, size_t size,
- omp_allocator_handle_t allocator,
- omp_allocator_handle_t free_allocator) {
- KE_TRACE(25, ("__kmpc_realloc: T#%d (%p, %d, %p, %p)\n", gtid, ptr, (int)size,
- allocator, free_allocator));
- void *nptr = __kmp_realloc(gtid, ptr, size, allocator, free_allocator);
- KE_TRACE(25, ("__kmpc_realloc returns %p, T#%d\n", nptr, gtid));
- return nptr;
- }
- void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t allocator) {
- KE_TRACE(25, ("__kmpc_free: T#%d free(%p,%p)\n", gtid, ptr, allocator));
- ___kmpc_free(gtid, ptr, allocator);
- KE_TRACE(10, ("__kmpc_free: T#%d freed %p (%p)\n", gtid, ptr, allocator));
- return;
- }
- // internal implementation, called from inside the library
- void *__kmp_alloc(int gtid, size_t algn, size_t size,
- omp_allocator_handle_t allocator) {
- void *ptr = NULL;
- kmp_allocator_t *al;
- KMP_DEBUG_ASSERT(__kmp_init_serial);
- if (size == 0)
- return NULL;
- if (allocator == omp_null_allocator)
- allocator = __kmp_threads[gtid]->th.th_def_allocator;
- al = RCAST(kmp_allocator_t *, allocator);
- int sz_desc = sizeof(kmp_mem_desc_t);
- kmp_mem_desc_t desc;
- kmp_uintptr_t addr; // address returned by allocator
- kmp_uintptr_t addr_align; // address to return to caller
- kmp_uintptr_t addr_descr; // address of memory block descriptor
- size_t align = alignment; // default alignment
- if (allocator > kmp_max_mem_alloc && al->alignment > align)
- align = al->alignment; // alignment required by allocator trait
- if (align < algn)
- align = algn; // max of allocator trait, parameter and sizeof(void*)
- desc.size_orig = size;
- desc.size_a = size + sz_desc + align;
- if (__kmp_memkind_available) {
- if (allocator < kmp_max_mem_alloc) {
- // pre-defined allocator
- if (allocator == omp_high_bw_mem_alloc && mk_hbw_preferred) {
- ptr = kmp_mk_alloc(*mk_hbw_preferred, desc.size_a);
- } else if (allocator == omp_large_cap_mem_alloc && mk_dax_kmem_all) {
- ptr = kmp_mk_alloc(*mk_dax_kmem_all, desc.size_a);
- } else {
- ptr = kmp_mk_alloc(*mk_default, desc.size_a);
- }
- } else if (al->pool_size > 0) {
- // custom allocator with pool size requested
- kmp_uint64 used =
- KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, desc.size_a);
- if (used + desc.size_a > al->pool_size) {
- // not enough space, need to go fallback path
- KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a);
- if (al->fb == omp_atv_default_mem_fb) {
- al = (kmp_allocator_t *)omp_default_mem_alloc;
- ptr = kmp_mk_alloc(*mk_default, desc.size_a);
- } else if (al->fb == omp_atv_abort_fb) {
- KMP_ASSERT(0); // abort fallback requested
- } else if (al->fb == omp_atv_allocator_fb) {
- KMP_ASSERT(al != al->fb_data);
- al = al->fb_data;
- return __kmp_alloc(gtid, algn, size, (omp_allocator_handle_t)al);
- } // else ptr == NULL;
- } else {
- // pool has enough space
- ptr = kmp_mk_alloc(*al->memkind, desc.size_a);
- if (ptr == NULL) {
- if (al->fb == omp_atv_default_mem_fb) {
- al = (kmp_allocator_t *)omp_default_mem_alloc;
- ptr = kmp_mk_alloc(*mk_default, desc.size_a);
- } else if (al->fb == omp_atv_abort_fb) {
- KMP_ASSERT(0); // abort fallback requested
- } else if (al->fb == omp_atv_allocator_fb) {
- KMP_ASSERT(al != al->fb_data);
- al = al->fb_data;
- return __kmp_alloc(gtid, algn, size, (omp_allocator_handle_t)al);
- }
- }
- }
- } else {
- // custom allocator, pool size not requested
- ptr = kmp_mk_alloc(*al->memkind, desc.size_a);
- if (ptr == NULL) {
- if (al->fb == omp_atv_default_mem_fb) {
- al = (kmp_allocator_t *)omp_default_mem_alloc;
- ptr = kmp_mk_alloc(*mk_default, desc.size_a);
- } else if (al->fb == omp_atv_abort_fb) {
- KMP_ASSERT(0); // abort fallback requested
- } else if (al->fb == omp_atv_allocator_fb) {
- KMP_ASSERT(al != al->fb_data);
- al = al->fb_data;
- return __kmp_alloc(gtid, algn, size, (omp_allocator_handle_t)al);
- }
- }
- }
- } else if (allocator < kmp_max_mem_alloc) {
- if (KMP_IS_TARGET_MEM_ALLOC(allocator)) {
- // Use size input directly as the memory may not be accessible on host.
- // Use default device for now.
- if (__kmp_target_mem_available) {
- kmp_int32 device =
- __kmp_threads[gtid]->th.th_current_task->td_icvs.default_device;
- if (allocator == llvm_omp_target_host_mem_alloc)
- ptr = kmp_target_alloc_host(size, device);
- else if (allocator == llvm_omp_target_shared_mem_alloc)
- ptr = kmp_target_alloc_shared(size, device);
- else // allocator == llvm_omp_target_device_mem_alloc
- ptr = kmp_target_alloc_device(size, device);
- }
- return ptr;
- }
- // pre-defined allocator
- if (allocator == omp_high_bw_mem_alloc) {
- // ptr = NULL;
- } else if (allocator == omp_large_cap_mem_alloc) {
- // warnings?
- } else {
- ptr = __kmp_thread_malloc(__kmp_thread_from_gtid(gtid), desc.size_a);
- }
- } else if (KMP_IS_TARGET_MEM_SPACE(al->memspace)) {
- if (__kmp_target_mem_available) {
- kmp_int32 device =
- __kmp_threads[gtid]->th.th_current_task->td_icvs.default_device;
- if (al->memspace == llvm_omp_target_host_mem_space)
- ptr = kmp_target_alloc_host(size, device);
- else if (al->memspace == llvm_omp_target_shared_mem_space)
- ptr = kmp_target_alloc_shared(size, device);
- else // al->memspace == llvm_omp_target_device_mem_space
- ptr = kmp_target_alloc_device(size, device);
- }
- return ptr;
- } else if (al->pool_size > 0) {
- // custom allocator with pool size requested
- kmp_uint64 used =
- KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, desc.size_a);
- if (used + desc.size_a > al->pool_size) {
- // not enough space, need to go fallback path
- KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a);
- if (al->fb == omp_atv_default_mem_fb) {
- al = (kmp_allocator_t *)omp_default_mem_alloc;
- ptr = __kmp_thread_malloc(__kmp_thread_from_gtid(gtid), desc.size_a);
- } else if (al->fb == omp_atv_abort_fb) {
- KMP_ASSERT(0); // abort fallback requested
- } else if (al->fb == omp_atv_allocator_fb) {
- KMP_ASSERT(al != al->fb_data);
- al = al->fb_data;
- return __kmp_alloc(gtid, algn, size, (omp_allocator_handle_t)al);
- } // else ptr == NULL;
- } else {
- // pool has enough space
- ptr = __kmp_thread_malloc(__kmp_thread_from_gtid(gtid), desc.size_a);
- if (ptr == NULL && al->fb == omp_atv_abort_fb) {
- KMP_ASSERT(0); // abort fallback requested
- } // no sense to look for another fallback because of same internal alloc
- }
- } else {
- // custom allocator, pool size not requested
- ptr = __kmp_thread_malloc(__kmp_thread_from_gtid(gtid), desc.size_a);
- if (ptr == NULL && al->fb == omp_atv_abort_fb) {
- KMP_ASSERT(0); // abort fallback requested
- } // no sense to look for another fallback because of same internal alloc
- }
- KE_TRACE(10, ("__kmp_alloc: T#%d %p=alloc(%d)\n", gtid, ptr, desc.size_a));
- if (ptr == NULL)
- return NULL;
- addr = (kmp_uintptr_t)ptr;
- addr_align = (addr + sz_desc + align - 1) & ~(align - 1);
- addr_descr = addr_align - sz_desc;
- desc.ptr_alloc = ptr;
- desc.ptr_align = (void *)addr_align;
- desc.allocator = al;
- *((kmp_mem_desc_t *)addr_descr) = desc; // save descriptor contents
- KMP_MB();
- return desc.ptr_align;
- }
- void *__kmp_calloc(int gtid, size_t algn, size_t nmemb, size_t size,
- omp_allocator_handle_t allocator) {
- void *ptr = NULL;
- kmp_allocator_t *al;
- KMP_DEBUG_ASSERT(__kmp_init_serial);
- if (allocator == omp_null_allocator)
- allocator = __kmp_threads[gtid]->th.th_def_allocator;
- al = RCAST(kmp_allocator_t *, allocator);
- if (nmemb == 0 || size == 0)
- return ptr;
- if ((SIZE_MAX - sizeof(kmp_mem_desc_t)) / size < nmemb) {
- if (al->fb == omp_atv_abort_fb) {
- KMP_ASSERT(0);
- }
- return ptr;
- }
- ptr = __kmp_alloc(gtid, algn, nmemb * size, allocator);
- if (ptr) {
- memset(ptr, 0x00, nmemb * size);
- }
- return ptr;
- }
- void *__kmp_realloc(int gtid, void *ptr, size_t size,
- omp_allocator_handle_t allocator,
- omp_allocator_handle_t free_allocator) {
- void *nptr = NULL;
- KMP_DEBUG_ASSERT(__kmp_init_serial);
- if (size == 0) {
- if (ptr != NULL)
- ___kmpc_free(gtid, ptr, free_allocator);
- return nptr;
- }
- nptr = __kmp_alloc(gtid, 0, size, allocator);
- if (nptr != NULL && ptr != NULL) {
- kmp_mem_desc_t desc;
- kmp_uintptr_t addr_align; // address to return to caller
- kmp_uintptr_t addr_descr; // address of memory block descriptor
- addr_align = (kmp_uintptr_t)ptr;
- addr_descr = addr_align - sizeof(kmp_mem_desc_t);
- desc = *((kmp_mem_desc_t *)addr_descr); // read descriptor
- KMP_DEBUG_ASSERT(desc.ptr_align == ptr);
- KMP_DEBUG_ASSERT(desc.size_orig > 0);
- KMP_DEBUG_ASSERT(desc.size_orig < desc.size_a);
- KMP_MEMCPY((char *)nptr, (char *)ptr,
- (size_t)((size < desc.size_orig) ? size : desc.size_orig));
- }
- if (nptr != NULL) {
- ___kmpc_free(gtid, ptr, free_allocator);
- }
- return nptr;
- }
- void ___kmpc_free(int gtid, void *ptr, omp_allocator_handle_t allocator) {
- if (ptr == NULL)
- return;
- kmp_allocator_t *al;
- omp_allocator_handle_t oal;
- al = RCAST(kmp_allocator_t *, CCAST(omp_allocator_handle_t, allocator));
- kmp_mem_desc_t desc;
- kmp_uintptr_t addr_align; // address to return to caller
- kmp_uintptr_t addr_descr; // address of memory block descriptor
- if (KMP_IS_TARGET_MEM_ALLOC(allocator) ||
- (allocator > kmp_max_mem_alloc &&
- KMP_IS_TARGET_MEM_SPACE(al->memspace))) {
- KMP_DEBUG_ASSERT(kmp_target_free);
- kmp_int32 device =
- __kmp_threads[gtid]->th.th_current_task->td_icvs.default_device;
- kmp_target_free(ptr, device);
- return;
- }
- addr_align = (kmp_uintptr_t)ptr;
- addr_descr = addr_align - sizeof(kmp_mem_desc_t);
- desc = *((kmp_mem_desc_t *)addr_descr); // read descriptor
- KMP_DEBUG_ASSERT(desc.ptr_align == ptr);
- if (allocator) {
- KMP_DEBUG_ASSERT(desc.allocator == al || desc.allocator == al->fb_data);
- }
- al = desc.allocator;
- oal = (omp_allocator_handle_t)al; // cast to void* for comparisons
- KMP_DEBUG_ASSERT(al);
- if (__kmp_memkind_available) {
- if (oal < kmp_max_mem_alloc) {
- // pre-defined allocator
- if (oal == omp_high_bw_mem_alloc && mk_hbw_preferred) {
- kmp_mk_free(*mk_hbw_preferred, desc.ptr_alloc);
- } else if (oal == omp_large_cap_mem_alloc && mk_dax_kmem_all) {
- kmp_mk_free(*mk_dax_kmem_all, desc.ptr_alloc);
- } else {
- kmp_mk_free(*mk_default, desc.ptr_alloc);
- }
- } else {
- if (al->pool_size > 0) { // custom allocator with pool size requested
- kmp_uint64 used =
- KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a);
- (void)used; // to suppress compiler warning
- KMP_DEBUG_ASSERT(used >= desc.size_a);
- }
- kmp_mk_free(*al->memkind, desc.ptr_alloc);
- }
- } else {
- if (oal > kmp_max_mem_alloc && al->pool_size > 0) {
- kmp_uint64 used =
- KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a);
- (void)used; // to suppress compiler warning
- KMP_DEBUG_ASSERT(used >= desc.size_a);
- }
- __kmp_thread_free(__kmp_thread_from_gtid(gtid), desc.ptr_alloc);
- }
- }
- /* If LEAK_MEMORY is defined, __kmp_free() will *not* free memory. It causes
- memory leaks, but it may be useful for debugging memory corruptions, used
- freed pointers, etc. */
- /* #define LEAK_MEMORY */
- struct kmp_mem_descr { // Memory block descriptor.
- void *ptr_allocated; // Pointer returned by malloc(), subject for free().
- size_t size_allocated; // Size of allocated memory block.
- void *ptr_aligned; // Pointer to aligned memory, to be used by client code.
- size_t size_aligned; // Size of aligned memory block.
- };
- typedef struct kmp_mem_descr kmp_mem_descr_t;
- /* Allocate memory on requested boundary, fill allocated memory with 0x00.
- NULL is NEVER returned, __kmp_abort() is called in case of memory allocation
- error. Must use __kmp_free when freeing memory allocated by this routine! */
- static void *___kmp_allocate_align(size_t size,
- size_t alignment KMP_SRC_LOC_DECL) {
- /* __kmp_allocate() allocates (by call to malloc()) bigger memory block than
- requested to return properly aligned pointer. Original pointer returned
- by malloc() and size of allocated block is saved in descriptor just
- before the aligned pointer. This information used by __kmp_free() -- it
- has to pass to free() original pointer, not aligned one.
- +---------+------------+-----------------------------------+---------+
- | padding | descriptor | aligned block | padding |
- +---------+------------+-----------------------------------+---------+
- ^ ^
- | |
- | +- Aligned pointer returned to caller
- +- Pointer returned by malloc()
- Aligned block is filled with zeros, paddings are filled with 0xEF. */
- kmp_mem_descr_t descr;
- kmp_uintptr_t addr_allocated; // Address returned by malloc().
- kmp_uintptr_t addr_aligned; // Aligned address to return to caller.
- kmp_uintptr_t addr_descr; // Address of memory block descriptor.
- KE_TRACE(25, ("-> ___kmp_allocate_align( %d, %d ) called from %s:%d\n",
- (int)size, (int)alignment KMP_SRC_LOC_PARM));
- KMP_DEBUG_ASSERT(alignment < 32 * 1024); // Alignment should not be too
- KMP_DEBUG_ASSERT(sizeof(void *) <= sizeof(kmp_uintptr_t));
- // Make sure kmp_uintptr_t is enough to store addresses.
- descr.size_aligned = size;
- descr.size_allocated =
- descr.size_aligned + sizeof(kmp_mem_descr_t) + alignment;
- #if KMP_DEBUG
- descr.ptr_allocated = _malloc_src_loc(descr.size_allocated, _file_, _line_);
- #else
- descr.ptr_allocated = malloc_src_loc(descr.size_allocated KMP_SRC_LOC_PARM);
- #endif
- __lsan_ignore_object(descr.ptr_allocated); // espetrov@yandex-team.ru: asan considers descr.ptr_allocated leaked because of address alignment arithmetics
- KE_TRACE(10, (" malloc( %d ) returned %p\n", (int)descr.size_allocated,
- descr.ptr_allocated));
- if (descr.ptr_allocated == NULL) {
- KMP_FATAL(OutOfHeapMemory);
- }
- addr_allocated = (kmp_uintptr_t)descr.ptr_allocated;
- addr_aligned =
- (addr_allocated + sizeof(kmp_mem_descr_t) + alignment) & ~(alignment - 1);
- addr_descr = addr_aligned - sizeof(kmp_mem_descr_t);
- descr.ptr_aligned = (void *)addr_aligned;
- KE_TRACE(26, (" ___kmp_allocate_align: "
- "ptr_allocated=%p, size_allocated=%d, "
- "ptr_aligned=%p, size_aligned=%d\n",
- descr.ptr_allocated, (int)descr.size_allocated,
- descr.ptr_aligned, (int)descr.size_aligned));
- KMP_DEBUG_ASSERT(addr_allocated <= addr_descr);
- KMP_DEBUG_ASSERT(addr_descr + sizeof(kmp_mem_descr_t) == addr_aligned);
- KMP_DEBUG_ASSERT(addr_aligned + descr.size_aligned <=
- addr_allocated + descr.size_allocated);
- KMP_DEBUG_ASSERT(addr_aligned % alignment == 0);
- #ifdef KMP_DEBUG
- memset(descr.ptr_allocated, 0xEF, descr.size_allocated);
- // Fill allocated memory block with 0xEF.
- #endif
- memset(descr.ptr_aligned, 0x00, descr.size_aligned);
- // Fill the aligned memory block (which is intended for using by caller) with
- // 0x00. Do not
- // put this filling under KMP_DEBUG condition! Many callers expect zeroed
- // memory. (Padding
- // bytes remain filled with 0xEF in debugging library.)
- *((kmp_mem_descr_t *)addr_descr) = descr;
- KMP_MB();
- KE_TRACE(25, ("<- ___kmp_allocate_align() returns %p\n", descr.ptr_aligned));
- return descr.ptr_aligned;
- } // func ___kmp_allocate_align
- /* Allocate memory on cache line boundary, fill allocated memory with 0x00.
- Do not call this func directly! Use __kmp_allocate macro instead.
- NULL is NEVER returned, __kmp_abort() is called in case of memory allocation
- error. Must use __kmp_free when freeing memory allocated by this routine! */
- void *___kmp_allocate(size_t size KMP_SRC_LOC_DECL) {
- void *ptr;
- KE_TRACE(25, ("-> __kmp_allocate( %d ) called from %s:%d\n",
- (int)size KMP_SRC_LOC_PARM));
- ptr = ___kmp_allocate_align(size, __kmp_align_alloc KMP_SRC_LOC_PARM);
- KE_TRACE(25, ("<- __kmp_allocate() returns %p\n", ptr));
- return ptr;
- } // func ___kmp_allocate
- /* Allocate memory on page boundary, fill allocated memory with 0x00.
- Does not call this func directly! Use __kmp_page_allocate macro instead.
- NULL is NEVER returned, __kmp_abort() is called in case of memory allocation
- error. Must use __kmp_free when freeing memory allocated by this routine! */
- void *___kmp_page_allocate(size_t size KMP_SRC_LOC_DECL) {
- int page_size = 8 * 1024;
- void *ptr;
- KE_TRACE(25, ("-> __kmp_page_allocate( %d ) called from %s:%d\n",
- (int)size KMP_SRC_LOC_PARM));
- ptr = ___kmp_allocate_align(size, page_size KMP_SRC_LOC_PARM);
- KE_TRACE(25, ("<- __kmp_page_allocate( %d ) returns %p\n", (int)size, ptr));
- return ptr;
- } // ___kmp_page_allocate
- /* Free memory allocated by __kmp_allocate() and __kmp_page_allocate().
- In debug mode, fill the memory block with 0xEF before call to free(). */
- void ___kmp_free(void *ptr KMP_SRC_LOC_DECL) {
- kmp_mem_descr_t descr;
- #if KMP_DEBUG
- kmp_uintptr_t addr_allocated; // Address returned by malloc().
- kmp_uintptr_t addr_aligned; // Aligned address passed by caller.
- #endif
- KE_TRACE(25,
- ("-> __kmp_free( %p ) called from %s:%d\n", ptr KMP_SRC_LOC_PARM));
- KMP_ASSERT(ptr != NULL);
- descr = *(kmp_mem_descr_t *)((kmp_uintptr_t)ptr - sizeof(kmp_mem_descr_t));
- KE_TRACE(26, (" __kmp_free: "
- "ptr_allocated=%p, size_allocated=%d, "
- "ptr_aligned=%p, size_aligned=%d\n",
- descr.ptr_allocated, (int)descr.size_allocated,
- descr.ptr_aligned, (int)descr.size_aligned));
- #if KMP_DEBUG
- addr_allocated = (kmp_uintptr_t)descr.ptr_allocated;
- addr_aligned = (kmp_uintptr_t)descr.ptr_aligned;
- KMP_DEBUG_ASSERT(addr_aligned % CACHE_LINE == 0);
- KMP_DEBUG_ASSERT(descr.ptr_aligned == ptr);
- KMP_DEBUG_ASSERT(addr_allocated + sizeof(kmp_mem_descr_t) <= addr_aligned);
- KMP_DEBUG_ASSERT(descr.size_aligned < descr.size_allocated);
- KMP_DEBUG_ASSERT(addr_aligned + descr.size_aligned <=
- addr_allocated + descr.size_allocated);
- memset(descr.ptr_allocated, 0xEF, descr.size_allocated);
- // Fill memory block with 0xEF, it helps catch using freed memory.
- #endif
- #ifndef LEAK_MEMORY
- KE_TRACE(10, (" free( %p )\n", descr.ptr_allocated));
- #ifdef KMP_DEBUG
- _free_src_loc(descr.ptr_allocated, _file_, _line_);
- #else
- free_src_loc(descr.ptr_allocated KMP_SRC_LOC_PARM);
- #endif
- #endif
- KMP_MB();
- KE_TRACE(25, ("<- __kmp_free() returns\n"));
- } // func ___kmp_free
- #if USE_FAST_MEMORY == 3
- // Allocate fast memory by first scanning the thread's free lists
- // If a chunk the right size exists, grab it off the free list.
- // Otherwise allocate normally using kmp_thread_malloc.
- // AC: How to choose the limit? Just get 16 for now...
- #define KMP_FREE_LIST_LIMIT 16
- // Always use 128 bytes for determining buckets for caching memory blocks
- #define DCACHE_LINE 128
- void *___kmp_fast_allocate(kmp_info_t *this_thr, size_t size KMP_SRC_LOC_DECL) {
- void *ptr;
- size_t num_lines, idx;
- int index;
- void *alloc_ptr;
- size_t alloc_size;
- kmp_mem_descr_t *descr;
- KE_TRACE(25, ("-> __kmp_fast_allocate( T#%d, %d ) called from %s:%d\n",
- __kmp_gtid_from_thread(this_thr), (int)size KMP_SRC_LOC_PARM));
- num_lines = (size + DCACHE_LINE - 1) / DCACHE_LINE;
- idx = num_lines - 1;
- KMP_DEBUG_ASSERT(idx >= 0);
- if (idx < 2) {
- index = 0; // idx is [ 0, 1 ], use first free list
- num_lines = 2; // 1, 2 cache lines or less than cache line
- } else if ((idx >>= 2) == 0) {
- index = 1; // idx is [ 2, 3 ], use second free list
- num_lines = 4; // 3, 4 cache lines
- } else if ((idx >>= 2) == 0) {
- index = 2; // idx is [ 4, 15 ], use third free list
- num_lines = 16; // 5, 6, ..., 16 cache lines
- } else if ((idx >>= 2) == 0) {
- index = 3; // idx is [ 16, 63 ], use fourth free list
- num_lines = 64; // 17, 18, ..., 64 cache lines
- } else {
- goto alloc_call; // 65 or more cache lines ( > 8KB ), don't use free lists
- }
- ptr = this_thr->th.th_free_lists[index].th_free_list_self;
- if (ptr != NULL) {
- // pop the head of no-sync free list
- this_thr->th.th_free_lists[index].th_free_list_self = *((void **)ptr);
- KMP_DEBUG_ASSERT(this_thr == ((kmp_mem_descr_t *)((kmp_uintptr_t)ptr -
- sizeof(kmp_mem_descr_t)))
- ->ptr_aligned);
- goto end;
- }
- ptr = TCR_SYNC_PTR(this_thr->th.th_free_lists[index].th_free_list_sync);
- if (ptr != NULL) {
- // no-sync free list is empty, use sync free list (filled in by other
- // threads only)
- // pop the head of the sync free list, push NULL instead
- while (!KMP_COMPARE_AND_STORE_PTR(
- &this_thr->th.th_free_lists[index].th_free_list_sync, ptr, nullptr)) {
- KMP_CPU_PAUSE();
- ptr = TCR_SYNC_PTR(this_thr->th.th_free_lists[index].th_free_list_sync);
- }
- // push the rest of chain into no-sync free list (can be NULL if there was
- // the only block)
- this_thr->th.th_free_lists[index].th_free_list_self = *((void **)ptr);
- KMP_DEBUG_ASSERT(this_thr == ((kmp_mem_descr_t *)((kmp_uintptr_t)ptr -
- sizeof(kmp_mem_descr_t)))
- ->ptr_aligned);
- goto end;
- }
- alloc_call:
- // haven't found block in the free lists, thus allocate it
- size = num_lines * DCACHE_LINE;
- alloc_size = size + sizeof(kmp_mem_descr_t) + DCACHE_LINE;
- KE_TRACE(25, ("__kmp_fast_allocate: T#%d Calling __kmp_thread_malloc with "
- "alloc_size %d\n",
- __kmp_gtid_from_thread(this_thr), alloc_size));
- alloc_ptr = bget(this_thr, (bufsize)alloc_size);
- // align ptr to DCACHE_LINE
- ptr = (void *)((((kmp_uintptr_t)alloc_ptr) + sizeof(kmp_mem_descr_t) +
- DCACHE_LINE) &
- ~(DCACHE_LINE - 1));
- descr = (kmp_mem_descr_t *)(((kmp_uintptr_t)ptr) - sizeof(kmp_mem_descr_t));
- descr->ptr_allocated = alloc_ptr; // remember allocated pointer
- // we don't need size_allocated
- descr->ptr_aligned = (void *)this_thr; // remember allocating thread
- // (it is already saved in bget buffer,
- // but we may want to use another allocator in future)
- descr->size_aligned = size;
- end:
- KE_TRACE(25, ("<- __kmp_fast_allocate( T#%d ) returns %p\n",
- __kmp_gtid_from_thread(this_thr), ptr));
- return ptr;
- } // func __kmp_fast_allocate
- // Free fast memory and place it on the thread's free list if it is of
- // the correct size.
- void ___kmp_fast_free(kmp_info_t *this_thr, void *ptr KMP_SRC_LOC_DECL) {
- kmp_mem_descr_t *descr;
- kmp_info_t *alloc_thr;
- size_t size;
- size_t idx;
- int index;
- KE_TRACE(25, ("-> __kmp_fast_free( T#%d, %p ) called from %s:%d\n",
- __kmp_gtid_from_thread(this_thr), ptr KMP_SRC_LOC_PARM));
- KMP_ASSERT(ptr != NULL);
- descr = (kmp_mem_descr_t *)(((kmp_uintptr_t)ptr) - sizeof(kmp_mem_descr_t));
- KE_TRACE(26, (" __kmp_fast_free: size_aligned=%d\n",
- (int)descr->size_aligned));
- size = descr->size_aligned; // 2, 4, 16, 64, 65, 66, ... cache lines
- idx = DCACHE_LINE * 2; // 2 cache lines is minimal size of block
- if (idx == size) {
- index = 0; // 2 cache lines
- } else if ((idx <<= 1) == size) {
- index = 1; // 4 cache lines
- } else if ((idx <<= 2) == size) {
- index = 2; // 16 cache lines
- } else if ((idx <<= 2) == size) {
- index = 3; // 64 cache lines
- } else {
- KMP_DEBUG_ASSERT(size > DCACHE_LINE * 64);
- goto free_call; // 65 or more cache lines ( > 8KB )
- }
- alloc_thr = (kmp_info_t *)descr->ptr_aligned; // get thread owning the block
- if (alloc_thr == this_thr) {
- // push block to self no-sync free list, linking previous head (LIFO)
- *((void **)ptr) = this_thr->th.th_free_lists[index].th_free_list_self;
- this_thr->th.th_free_lists[index].th_free_list_self = ptr;
- } else {
- void *head = this_thr->th.th_free_lists[index].th_free_list_other;
- if (head == NULL) {
- // Create new free list
- this_thr->th.th_free_lists[index].th_free_list_other = ptr;
- *((void **)ptr) = NULL; // mark the tail of the list
- descr->size_allocated = (size_t)1; // head of the list keeps its length
- } else {
- // need to check existed "other" list's owner thread and size of queue
- kmp_mem_descr_t *dsc =
- (kmp_mem_descr_t *)((char *)head - sizeof(kmp_mem_descr_t));
- // allocating thread, same for all queue nodes
- kmp_info_t *q_th = (kmp_info_t *)(dsc->ptr_aligned);
- size_t q_sz =
- dsc->size_allocated + 1; // new size in case we add current task
- if (q_th == alloc_thr && q_sz <= KMP_FREE_LIST_LIMIT) {
- // we can add current task to "other" list, no sync needed
- *((void **)ptr) = head;
- descr->size_allocated = q_sz;
- this_thr->th.th_free_lists[index].th_free_list_other = ptr;
- } else {
- // either queue blocks owner is changing or size limit exceeded
- // return old queue to allocating thread (q_th) synchronously,
- // and start new list for alloc_thr's tasks
- void *old_ptr;
- void *tail = head;
- void *next = *((void **)head);
- while (next != NULL) {
- KMP_DEBUG_ASSERT(
- // queue size should decrease by 1 each step through the list
- ((kmp_mem_descr_t *)((char *)next - sizeof(kmp_mem_descr_t)))
- ->size_allocated +
- 1 ==
- ((kmp_mem_descr_t *)((char *)tail - sizeof(kmp_mem_descr_t)))
- ->size_allocated);
- tail = next; // remember tail node
- next = *((void **)next);
- }
- KMP_DEBUG_ASSERT(q_th != NULL);
- // push block to owner's sync free list
- old_ptr = TCR_PTR(q_th->th.th_free_lists[index].th_free_list_sync);
- /* the next pointer must be set before setting free_list to ptr to avoid
- exposing a broken list to other threads, even for an instant. */
- *((void **)tail) = old_ptr;
- while (!KMP_COMPARE_AND_STORE_PTR(
- &q_th->th.th_free_lists[index].th_free_list_sync, old_ptr, head)) {
- KMP_CPU_PAUSE();
- old_ptr = TCR_PTR(q_th->th.th_free_lists[index].th_free_list_sync);
- *((void **)tail) = old_ptr;
- }
- // start new list of not-selt tasks
- this_thr->th.th_free_lists[index].th_free_list_other = ptr;
- *((void **)ptr) = NULL;
- descr->size_allocated = (size_t)1; // head of queue keeps its length
- }
- }
- }
- goto end;
- free_call:
- KE_TRACE(25, ("__kmp_fast_free: T#%d Calling __kmp_thread_free for size %d\n",
- __kmp_gtid_from_thread(this_thr), size));
- __kmp_bget_dequeue(this_thr); /* Release any queued buffers */
- brel(this_thr, descr->ptr_allocated);
- end:
- KE_TRACE(25, ("<- __kmp_fast_free() returns\n"));
- } // func __kmp_fast_free
- // Initialize the thread free lists related to fast memory
- // Only do this when a thread is initially created.
- void __kmp_initialize_fast_memory(kmp_info_t *this_thr) {
- KE_TRACE(10, ("__kmp_initialize_fast_memory: Called from th %p\n", this_thr));
- memset(this_thr->th.th_free_lists, 0, NUM_LISTS * sizeof(kmp_free_list_t));
- }
- // Free the memory in the thread free lists related to fast memory
- // Only do this when a thread is being reaped (destroyed).
- void __kmp_free_fast_memory(kmp_info_t *th) {
- // Suppose we use BGET underlying allocator, walk through its structures...
- int bin;
- thr_data_t *thr = get_thr_data(th);
- void **lst = NULL;
- KE_TRACE(
- 5, ("__kmp_free_fast_memory: Called T#%d\n", __kmp_gtid_from_thread(th)));
- __kmp_bget_dequeue(th); // Release any queued buffers
- // Dig through free lists and extract all allocated blocks
- for (bin = 0; bin < MAX_BGET_BINS; ++bin) {
- bfhead_t *b = thr->freelist[bin].ql.flink;
- while (b != &thr->freelist[bin]) {
- if ((kmp_uintptr_t)b->bh.bb.bthr & 1) { // the buffer is allocated address
- *((void **)b) =
- lst; // link the list (override bthr, but keep flink yet)
- lst = (void **)b; // push b into lst
- }
- b = b->ql.flink; // get next buffer
- }
- }
- while (lst != NULL) {
- void *next = *lst;
- KE_TRACE(10, ("__kmp_free_fast_memory: freeing %p, next=%p th %p (%d)\n",
- lst, next, th, __kmp_gtid_from_thread(th)));
- (*thr->relfcn)(lst);
- #if BufStats
- // count blocks to prevent problems in __kmp_finalize_bget()
- thr->numprel++; /* Nr of expansion block releases */
- thr->numpblk--; /* Total number of blocks */
- #endif
- lst = (void **)next;
- }
- KE_TRACE(
- 5, ("__kmp_free_fast_memory: Freed T#%d\n", __kmp_gtid_from_thread(th)));
- }
- #endif // USE_FAST_MEMORY
|