12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503 |
- /* List object implementation */
- #include "Python.h"
- #include "pycore_abstract.h" // _PyIndex_Check()
- #include "pycore_interp.h" // PyInterpreterState.list
- #include "pycore_list.h" // struct _Py_list_state, _PyListIterObject
- #include "pycore_long.h" // _PyLong_DigitCount
- #include "pycore_object.h" // _PyObject_GC_TRACK()
- #include "pycore_tuple.h" // _PyTuple_FromArray()
- #include <stddef.h>
- /*[clinic input]
- class list "PyListObject *" "&PyList_Type"
- [clinic start generated code]*/
- /*[clinic end generated code: output=da39a3ee5e6b4b0d input=f9b222678f9f71e0]*/
- #include "clinic/listobject.c.h"
- _Py_DECLARE_STR(list_err, "list index out of range");
- #if PyList_MAXFREELIST > 0
- static struct _Py_list_state *
- get_list_state(void)
- {
- PyInterpreterState *interp = _PyInterpreterState_GET();
- return &interp->list;
- }
- #endif
- /* Ensure ob_item has room for at least newsize elements, and set
- * ob_size to newsize. If newsize > ob_size on entry, the content
- * of the new slots at exit is undefined heap trash; it's the caller's
- * responsibility to overwrite them with sane values.
- * The number of allocated elements may grow, shrink, or stay the same.
- * Failure is impossible if newsize <= self.allocated on entry, although
- * that partly relies on an assumption that the system realloc() never
- * fails when passed a number of bytes <= the number of bytes last
- * allocated (the C standard doesn't guarantee this, but it's hard to
- * imagine a realloc implementation where it wouldn't be true).
- * Note that self->ob_item may change, and even if newsize is less
- * than ob_size on entry.
- */
- static int
- list_resize(PyListObject *self, Py_ssize_t newsize)
- {
- PyObject **items;
- size_t new_allocated, num_allocated_bytes;
- Py_ssize_t allocated = self->allocated;
- /* Bypass realloc() when a previous overallocation is large enough
- to accommodate the newsize. If the newsize falls lower than half
- the allocated size, then proceed with the realloc() to shrink the list.
- */
- if (allocated >= newsize && newsize >= (allocated >> 1)) {
- assert(self->ob_item != NULL || newsize == 0);
- Py_SET_SIZE(self, newsize);
- return 0;
- }
- /* This over-allocates proportional to the list size, making room
- * for additional growth. The over-allocation is mild, but is
- * enough to give linear-time amortized behavior over a long
- * sequence of appends() in the presence of a poorly-performing
- * system realloc().
- * Add padding to make the allocated size multiple of 4.
- * The growth pattern is: 0, 4, 8, 16, 24, 32, 40, 52, 64, 76, ...
- * Note: new_allocated won't overflow because the largest possible value
- * is PY_SSIZE_T_MAX * (9 / 8) + 6 which always fits in a size_t.
- */
- new_allocated = ((size_t)newsize + (newsize >> 3) + 6) & ~(size_t)3;
- /* Do not overallocate if the new size is closer to overallocated size
- * than to the old size.
- */
- if (newsize - Py_SIZE(self) > (Py_ssize_t)(new_allocated - newsize))
- new_allocated = ((size_t)newsize + 3) & ~(size_t)3;
- if (newsize == 0)
- new_allocated = 0;
- if (new_allocated <= (size_t)PY_SSIZE_T_MAX / sizeof(PyObject *)) {
- num_allocated_bytes = new_allocated * sizeof(PyObject *);
- items = (PyObject **)PyMem_Realloc(self->ob_item, num_allocated_bytes);
- }
- else {
- // integer overflow
- items = NULL;
- }
- if (items == NULL) {
- PyErr_NoMemory();
- return -1;
- }
- self->ob_item = items;
- Py_SET_SIZE(self, newsize);
- self->allocated = new_allocated;
- return 0;
- }
- static int
- list_preallocate_exact(PyListObject *self, Py_ssize_t size)
- {
- assert(self->ob_item == NULL);
- assert(size > 0);
- /* Since the Python memory allocator has granularity of 16 bytes on 64-bit
- * platforms (8 on 32-bit), there is no benefit of allocating space for
- * the odd number of items, and there is no drawback of rounding the
- * allocated size up to the nearest even number.
- */
- size = (size + 1) & ~(size_t)1;
- PyObject **items = PyMem_New(PyObject*, size);
- if (items == NULL) {
- PyErr_NoMemory();
- return -1;
- }
- self->ob_item = items;
- self->allocated = size;
- return 0;
- }
- void
- _PyList_ClearFreeList(PyInterpreterState *interp)
- {
- #if PyList_MAXFREELIST > 0
- struct _Py_list_state *state = &interp->list;
- while (state->numfree) {
- PyListObject *op = state->free_list[--state->numfree];
- assert(PyList_CheckExact(op));
- PyObject_GC_Del(op);
- }
- #endif
- }
- void
- _PyList_Fini(PyInterpreterState *interp)
- {
- _PyList_ClearFreeList(interp);
- #if defined(Py_DEBUG) && PyList_MAXFREELIST > 0
- struct _Py_list_state *state = &interp->list;
- state->numfree = -1;
- #endif
- }
- /* Print summary info about the state of the optimized allocator */
- void
- _PyList_DebugMallocStats(FILE *out)
- {
- #if PyList_MAXFREELIST > 0
- struct _Py_list_state *state = get_list_state();
- _PyDebugAllocatorStats(out,
- "free PyListObject",
- state->numfree, sizeof(PyListObject));
- #endif
- }
- PyObject *
- PyList_New(Py_ssize_t size)
- {
- PyListObject *op;
- if (size < 0) {
- PyErr_BadInternalCall();
- return NULL;
- }
- #if PyList_MAXFREELIST > 0
- struct _Py_list_state *state = get_list_state();
- #ifdef Py_DEBUG
- // PyList_New() must not be called after _PyList_Fini()
- assert(state->numfree != -1);
- #endif
- if (PyList_MAXFREELIST && state->numfree) {
- state->numfree--;
- op = state->free_list[state->numfree];
- OBJECT_STAT_INC(from_freelist);
- _Py_NewReference((PyObject *)op);
- }
- else
- #endif
- {
- op = PyObject_GC_New(PyListObject, &PyList_Type);
- if (op == NULL) {
- return NULL;
- }
- }
- if (size <= 0) {
- op->ob_item = NULL;
- }
- else {
- op->ob_item = (PyObject **) PyMem_Calloc(size, sizeof(PyObject *));
- if (op->ob_item == NULL) {
- Py_DECREF(op);
- return PyErr_NoMemory();
- }
- }
- Py_SET_SIZE(op, size);
- op->allocated = size;
- _PyObject_GC_TRACK(op);
- return (PyObject *) op;
- }
- static PyObject *
- list_new_prealloc(Py_ssize_t size)
- {
- assert(size > 0);
- PyListObject *op = (PyListObject *) PyList_New(0);
- if (op == NULL) {
- return NULL;
- }
- assert(op->ob_item == NULL);
- op->ob_item = PyMem_New(PyObject *, size);
- if (op->ob_item == NULL) {
- Py_DECREF(op);
- return PyErr_NoMemory();
- }
- op->allocated = size;
- return (PyObject *) op;
- }
- Py_ssize_t
- PyList_Size(PyObject *op)
- {
- if (!PyList_Check(op)) {
- PyErr_BadInternalCall();
- return -1;
- }
- else
- return Py_SIZE(op);
- }
- static inline int
- valid_index(Py_ssize_t i, Py_ssize_t limit)
- {
- /* The cast to size_t lets us use just a single comparison
- to check whether i is in the range: 0 <= i < limit.
- See: Section 14.2 "Bounds Checking" in the Agner Fog
- optimization manual found at:
- https://www.agner.org/optimize/optimizing_cpp.pdf
- */
- return (size_t) i < (size_t) limit;
- }
- PyObject *
- PyList_GetItem(PyObject *op, Py_ssize_t i)
- {
- if (!PyList_Check(op)) {
- PyErr_BadInternalCall();
- return NULL;
- }
- if (!valid_index(i, Py_SIZE(op))) {
- _Py_DECLARE_STR(list_err, "list index out of range");
- PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err));
- return NULL;
- }
- return ((PyListObject *)op) -> ob_item[i];
- }
- int
- PyList_SetItem(PyObject *op, Py_ssize_t i,
- PyObject *newitem)
- {
- PyObject **p;
- if (!PyList_Check(op)) {
- Py_XDECREF(newitem);
- PyErr_BadInternalCall();
- return -1;
- }
- if (!valid_index(i, Py_SIZE(op))) {
- Py_XDECREF(newitem);
- PyErr_SetString(PyExc_IndexError,
- "list assignment index out of range");
- return -1;
- }
- p = ((PyListObject *)op) -> ob_item + i;
- Py_XSETREF(*p, newitem);
- return 0;
- }
- static int
- ins1(PyListObject *self, Py_ssize_t where, PyObject *v)
- {
- Py_ssize_t i, n = Py_SIZE(self);
- PyObject **items;
- if (v == NULL) {
- PyErr_BadInternalCall();
- return -1;
- }
- assert((size_t)n + 1 < PY_SSIZE_T_MAX);
- if (list_resize(self, n+1) < 0)
- return -1;
- if (where < 0) {
- where += n;
- if (where < 0)
- where = 0;
- }
- if (where > n)
- where = n;
- items = self->ob_item;
- for (i = n; --i >= where; )
- items[i+1] = items[i];
- items[where] = Py_NewRef(v);
- return 0;
- }
- int
- PyList_Insert(PyObject *op, Py_ssize_t where, PyObject *newitem)
- {
- if (!PyList_Check(op)) {
- PyErr_BadInternalCall();
- return -1;
- }
- return ins1((PyListObject *)op, where, newitem);
- }
- /* internal, used by _PyList_AppendTakeRef */
- int
- _PyList_AppendTakeRefListResize(PyListObject *self, PyObject *newitem)
- {
- Py_ssize_t len = PyList_GET_SIZE(self);
- assert(self->allocated == -1 || self->allocated == len);
- if (list_resize(self, len + 1) < 0) {
- Py_DECREF(newitem);
- return -1;
- }
- PyList_SET_ITEM(self, len, newitem);
- return 0;
- }
- int
- PyList_Append(PyObject *op, PyObject *newitem)
- {
- if (PyList_Check(op) && (newitem != NULL)) {
- return _PyList_AppendTakeRef((PyListObject *)op, Py_NewRef(newitem));
- }
- PyErr_BadInternalCall();
- return -1;
- }
- /* Methods */
- static void
- list_dealloc(PyListObject *op)
- {
- Py_ssize_t i;
- PyObject_GC_UnTrack(op);
- Py_TRASHCAN_BEGIN(op, list_dealloc)
- if (op->ob_item != NULL) {
- /* Do it backwards, for Christian Tismer.
- There's a simple test case where somehow this reduces
- thrashing when a *very* large list is created and
- immediately deleted. */
- i = Py_SIZE(op);
- while (--i >= 0) {
- Py_XDECREF(op->ob_item[i]);
- }
- PyMem_Free(op->ob_item);
- }
- #if PyList_MAXFREELIST > 0
- struct _Py_list_state *state = get_list_state();
- #ifdef Py_DEBUG
- // list_dealloc() must not be called after _PyList_Fini()
- assert(state->numfree != -1);
- #endif
- if (state->numfree < PyList_MAXFREELIST && PyList_CheckExact(op)) {
- state->free_list[state->numfree++] = op;
- OBJECT_STAT_INC(to_freelist);
- }
- else
- #endif
- {
- Py_TYPE(op)->tp_free((PyObject *)op);
- }
- Py_TRASHCAN_END
- }
- static PyObject *
- list_repr(PyListObject *v)
- {
- Py_ssize_t i;
- PyObject *s;
- _PyUnicodeWriter writer;
- if (Py_SIZE(v) == 0) {
- return PyUnicode_FromString("[]");
- }
- i = Py_ReprEnter((PyObject*)v);
- if (i != 0) {
- return i > 0 ? PyUnicode_FromString("[...]") : NULL;
- }
- _PyUnicodeWriter_Init(&writer);
- writer.overallocate = 1;
- /* "[" + "1" + ", 2" * (len - 1) + "]" */
- writer.min_length = 1 + 1 + (2 + 1) * (Py_SIZE(v) - 1) + 1;
- if (_PyUnicodeWriter_WriteChar(&writer, '[') < 0)
- goto error;
- /* Do repr() on each element. Note that this may mutate the list,
- so must refetch the list size on each iteration. */
- for (i = 0; i < Py_SIZE(v); ++i) {
- if (i > 0) {
- if (_PyUnicodeWriter_WriteASCIIString(&writer, ", ", 2) < 0)
- goto error;
- }
- s = PyObject_Repr(v->ob_item[i]);
- if (s == NULL)
- goto error;
- if (_PyUnicodeWriter_WriteStr(&writer, s) < 0) {
- Py_DECREF(s);
- goto error;
- }
- Py_DECREF(s);
- }
- writer.overallocate = 0;
- if (_PyUnicodeWriter_WriteChar(&writer, ']') < 0)
- goto error;
- Py_ReprLeave((PyObject *)v);
- return _PyUnicodeWriter_Finish(&writer);
- error:
- _PyUnicodeWriter_Dealloc(&writer);
- Py_ReprLeave((PyObject *)v);
- return NULL;
- }
- static Py_ssize_t
- list_length(PyListObject *a)
- {
- return Py_SIZE(a);
- }
- static int
- list_contains(PyListObject *a, PyObject *el)
- {
- PyObject *item;
- Py_ssize_t i;
- int cmp;
- for (i = 0, cmp = 0 ; cmp == 0 && i < Py_SIZE(a); ++i) {
- item = PyList_GET_ITEM(a, i);
- Py_INCREF(item);
- cmp = PyObject_RichCompareBool(item, el, Py_EQ);
- Py_DECREF(item);
- }
- return cmp;
- }
- static PyObject *
- list_item(PyListObject *a, Py_ssize_t i)
- {
- if (!valid_index(i, Py_SIZE(a))) {
- PyErr_SetObject(PyExc_IndexError, &_Py_STR(list_err));
- return NULL;
- }
- return Py_NewRef(a->ob_item[i]);
- }
- static PyObject *
- list_slice(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh)
- {
- PyListObject *np;
- PyObject **src, **dest;
- Py_ssize_t i, len;
- len = ihigh - ilow;
- if (len <= 0) {
- return PyList_New(0);
- }
- np = (PyListObject *) list_new_prealloc(len);
- if (np == NULL)
- return NULL;
- src = a->ob_item + ilow;
- dest = np->ob_item;
- for (i = 0; i < len; i++) {
- PyObject *v = src[i];
- dest[i] = Py_NewRef(v);
- }
- Py_SET_SIZE(np, len);
- return (PyObject *)np;
- }
- PyObject *
- PyList_GetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh)
- {
- if (!PyList_Check(a)) {
- PyErr_BadInternalCall();
- return NULL;
- }
- if (ilow < 0) {
- ilow = 0;
- }
- else if (ilow > Py_SIZE(a)) {
- ilow = Py_SIZE(a);
- }
- if (ihigh < ilow) {
- ihigh = ilow;
- }
- else if (ihigh > Py_SIZE(a)) {
- ihigh = Py_SIZE(a);
- }
- return list_slice((PyListObject *)a, ilow, ihigh);
- }
- static PyObject *
- list_concat(PyListObject *a, PyObject *bb)
- {
- Py_ssize_t size;
- Py_ssize_t i;
- PyObject **src, **dest;
- PyListObject *np;
- if (!PyList_Check(bb)) {
- PyErr_Format(PyExc_TypeError,
- "can only concatenate list (not \"%.200s\") to list",
- Py_TYPE(bb)->tp_name);
- return NULL;
- }
- #define b ((PyListObject *)bb)
- assert((size_t)Py_SIZE(a) + (size_t)Py_SIZE(b) < PY_SSIZE_T_MAX);
- size = Py_SIZE(a) + Py_SIZE(b);
- if (size == 0) {
- return PyList_New(0);
- }
- np = (PyListObject *) list_new_prealloc(size);
- if (np == NULL) {
- return NULL;
- }
- src = a->ob_item;
- dest = np->ob_item;
- for (i = 0; i < Py_SIZE(a); i++) {
- PyObject *v = src[i];
- dest[i] = Py_NewRef(v);
- }
- src = b->ob_item;
- dest = np->ob_item + Py_SIZE(a);
- for (i = 0; i < Py_SIZE(b); i++) {
- PyObject *v = src[i];
- dest[i] = Py_NewRef(v);
- }
- Py_SET_SIZE(np, size);
- return (PyObject *)np;
- #undef b
- }
- static PyObject *
- list_repeat(PyListObject *a, Py_ssize_t n)
- {
- const Py_ssize_t input_size = Py_SIZE(a);
- if (input_size == 0 || n <= 0)
- return PyList_New(0);
- assert(n > 0);
- if (input_size > PY_SSIZE_T_MAX / n)
- return PyErr_NoMemory();
- Py_ssize_t output_size = input_size * n;
- PyListObject *np = (PyListObject *) list_new_prealloc(output_size);
- if (np == NULL)
- return NULL;
- PyObject **dest = np->ob_item;
- if (input_size == 1) {
- PyObject *elem = a->ob_item[0];
- _Py_RefcntAdd(elem, n);
- PyObject **dest_end = dest + output_size;
- while (dest < dest_end) {
- *dest++ = elem;
- }
- }
- else {
- PyObject **src = a->ob_item;
- PyObject **src_end = src + input_size;
- while (src < src_end) {
- _Py_RefcntAdd(*src, n);
- *dest++ = *src++;
- }
- _Py_memory_repeat((char *)np->ob_item, sizeof(PyObject *)*output_size,
- sizeof(PyObject *)*input_size);
- }
- Py_SET_SIZE(np, output_size);
- return (PyObject *) np;
- }
- static int
- _list_clear(PyListObject *a)
- {
- Py_ssize_t i;
- PyObject **item = a->ob_item;
- if (item != NULL) {
- /* Because XDECREF can recursively invoke operations on
- this list, we make it empty first. */
- i = Py_SIZE(a);
- Py_SET_SIZE(a, 0);
- a->ob_item = NULL;
- a->allocated = 0;
- while (--i >= 0) {
- Py_XDECREF(item[i]);
- }
- PyMem_Free(item);
- }
- /* Never fails; the return value can be ignored.
- Note that there is no guarantee that the list is actually empty
- at this point, because XDECREF may have populated it again! */
- return 0;
- }
- /* a[ilow:ihigh] = v if v != NULL.
- * del a[ilow:ihigh] if v == NULL.
- *
- * Special speed gimmick: when v is NULL and ihigh - ilow <= 8, it's
- * guaranteed the call cannot fail.
- */
- static int
- list_ass_slice(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
- {
- /* Because [X]DECREF can recursively invoke list operations on
- this list, we must postpone all [X]DECREF activity until
- after the list is back in its canonical shape. Therefore
- we must allocate an additional array, 'recycle', into which
- we temporarily copy the items that are deleted from the
- list. :-( */
- PyObject *recycle_on_stack[8];
- PyObject **recycle = recycle_on_stack; /* will allocate more if needed */
- PyObject **item;
- PyObject **vitem = NULL;
- PyObject *v_as_SF = NULL; /* PySequence_Fast(v) */
- Py_ssize_t n; /* # of elements in replacement list */
- Py_ssize_t norig; /* # of elements in list getting replaced */
- Py_ssize_t d; /* Change in size */
- Py_ssize_t k;
- size_t s;
- int result = -1; /* guilty until proved innocent */
- #define b ((PyListObject *)v)
- if (v == NULL)
- n = 0;
- else {
- if (a == b) {
- /* Special case "a[i:j] = a" -- copy b first */
- v = list_slice(b, 0, Py_SIZE(b));
- if (v == NULL)
- return result;
- result = list_ass_slice(a, ilow, ihigh, v);
- Py_DECREF(v);
- return result;
- }
- v_as_SF = PySequence_Fast(v, "can only assign an iterable");
- if(v_as_SF == NULL)
- goto Error;
- n = PySequence_Fast_GET_SIZE(v_as_SF);
- vitem = PySequence_Fast_ITEMS(v_as_SF);
- }
- if (ilow < 0)
- ilow = 0;
- else if (ilow > Py_SIZE(a))
- ilow = Py_SIZE(a);
- if (ihigh < ilow)
- ihigh = ilow;
- else if (ihigh > Py_SIZE(a))
- ihigh = Py_SIZE(a);
- norig = ihigh - ilow;
- assert(norig >= 0);
- d = n - norig;
- if (Py_SIZE(a) + d == 0) {
- Py_XDECREF(v_as_SF);
- return _list_clear(a);
- }
- item = a->ob_item;
- /* recycle the items that we are about to remove */
- s = norig * sizeof(PyObject *);
- /* If norig == 0, item might be NULL, in which case we may not memcpy from it. */
- if (s) {
- if (s > sizeof(recycle_on_stack)) {
- recycle = (PyObject **)PyMem_Malloc(s);
- if (recycle == NULL) {
- PyErr_NoMemory();
- goto Error;
- }
- }
- memcpy(recycle, &item[ilow], s);
- }
- if (d < 0) { /* Delete -d items */
- Py_ssize_t tail;
- tail = (Py_SIZE(a) - ihigh) * sizeof(PyObject *);
- memmove(&item[ihigh+d], &item[ihigh], tail);
- if (list_resize(a, Py_SIZE(a) + d) < 0) {
- memmove(&item[ihigh], &item[ihigh+d], tail);
- memcpy(&item[ilow], recycle, s);
- goto Error;
- }
- item = a->ob_item;
- }
- else if (d > 0) { /* Insert d items */
- k = Py_SIZE(a);
- if (list_resize(a, k+d) < 0)
- goto Error;
- item = a->ob_item;
- memmove(&item[ihigh+d], &item[ihigh],
- (k - ihigh)*sizeof(PyObject *));
- }
- for (k = 0; k < n; k++, ilow++) {
- PyObject *w = vitem[k];
- item[ilow] = Py_XNewRef(w);
- }
- for (k = norig - 1; k >= 0; --k)
- Py_XDECREF(recycle[k]);
- result = 0;
- Error:
- if (recycle != recycle_on_stack)
- PyMem_Free(recycle);
- Py_XDECREF(v_as_SF);
- return result;
- #undef b
- }
- int
- PyList_SetSlice(PyObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *v)
- {
- if (!PyList_Check(a)) {
- PyErr_BadInternalCall();
- return -1;
- }
- return list_ass_slice((PyListObject *)a, ilow, ihigh, v);
- }
- static PyObject *
- list_inplace_repeat(PyListObject *self, Py_ssize_t n)
- {
- Py_ssize_t input_size = PyList_GET_SIZE(self);
- if (input_size == 0 || n == 1) {
- return Py_NewRef(self);
- }
- if (n < 1) {
- (void)_list_clear(self);
- return Py_NewRef(self);
- }
- if (input_size > PY_SSIZE_T_MAX / n) {
- return PyErr_NoMemory();
- }
- Py_ssize_t output_size = input_size * n;
- if (list_resize(self, output_size) < 0)
- return NULL;
- PyObject **items = self->ob_item;
- for (Py_ssize_t j = 0; j < input_size; j++) {
- _Py_RefcntAdd(items[j], n-1);
- }
- _Py_memory_repeat((char *)items, sizeof(PyObject *)*output_size,
- sizeof(PyObject *)*input_size);
- return Py_NewRef(self);
- }
- static int
- list_ass_item(PyListObject *a, Py_ssize_t i, PyObject *v)
- {
- if (!valid_index(i, Py_SIZE(a))) {
- PyErr_SetString(PyExc_IndexError,
- "list assignment index out of range");
- return -1;
- }
- if (v == NULL)
- return list_ass_slice(a, i, i+1, v);
- Py_SETREF(a->ob_item[i], Py_NewRef(v));
- return 0;
- }
- /*[clinic input]
- list.insert
- index: Py_ssize_t
- object: object
- /
- Insert object before index.
- [clinic start generated code]*/
- static PyObject *
- list_insert_impl(PyListObject *self, Py_ssize_t index, PyObject *object)
- /*[clinic end generated code: output=7f35e32f60c8cb78 input=858514cf894c7eab]*/
- {
- if (ins1(self, index, object) == 0)
- Py_RETURN_NONE;
- return NULL;
- }
- /*[clinic input]
- list.clear
- Remove all items from list.
- [clinic start generated code]*/
- static PyObject *
- list_clear_impl(PyListObject *self)
- /*[clinic end generated code: output=67a1896c01f74362 input=ca3c1646856742f6]*/
- {
- _list_clear(self);
- Py_RETURN_NONE;
- }
- /*[clinic input]
- list.copy
- Return a shallow copy of the list.
- [clinic start generated code]*/
- static PyObject *
- list_copy_impl(PyListObject *self)
- /*[clinic end generated code: output=ec6b72d6209d418e input=6453ab159e84771f]*/
- {
- return list_slice(self, 0, Py_SIZE(self));
- }
- /*[clinic input]
- list.append
- object: object
- /
- Append object to the end of the list.
- [clinic start generated code]*/
- static PyObject *
- list_append(PyListObject *self, PyObject *object)
- /*[clinic end generated code: output=7c096003a29c0eae input=43a3fe48a7066e91]*/
- {
- if (_PyList_AppendTakeRef(self, Py_NewRef(object)) < 0) {
- return NULL;
- }
- Py_RETURN_NONE;
- }
- /*[clinic input]
- list.extend
- iterable: object
- /
- Extend list by appending elements from the iterable.
- [clinic start generated code]*/
- static PyObject *
- list_extend(PyListObject *self, PyObject *iterable)
- /*[clinic end generated code: output=630fb3bca0c8e789 input=9ec5ba3a81be3a4d]*/
- {
- PyObject *it; /* iter(v) */
- Py_ssize_t m; /* size of self */
- Py_ssize_t n; /* guess for size of iterable */
- Py_ssize_t i;
- PyObject *(*iternext)(PyObject *);
- /* Special cases:
- 1) lists and tuples which can use PySequence_Fast ops
- 2) extending self to self requires making a copy first
- */
- if (PyList_CheckExact(iterable) || PyTuple_CheckExact(iterable) ||
- (PyObject *)self == iterable) {
- PyObject **src, **dest;
- iterable = PySequence_Fast(iterable, "argument must be iterable");
- if (!iterable)
- return NULL;
- n = PySequence_Fast_GET_SIZE(iterable);
- if (n == 0) {
- /* short circuit when iterable is empty */
- Py_DECREF(iterable);
- Py_RETURN_NONE;
- }
- m = Py_SIZE(self);
- /* It should not be possible to allocate a list large enough to cause
- an overflow on any relevant platform */
- assert(m < PY_SSIZE_T_MAX - n);
- if (self->ob_item == NULL) {
- if (list_preallocate_exact(self, n) < 0) {
- return NULL;
- }
- Py_SET_SIZE(self, n);
- }
- else if (list_resize(self, m + n) < 0) {
- Py_DECREF(iterable);
- return NULL;
- }
- /* note that we may still have self == iterable here for the
- * situation a.extend(a), but the following code works
- * in that case too. Just make sure to resize self
- * before calling PySequence_Fast_ITEMS.
- */
- /* populate the end of self with iterable's items */
- src = PySequence_Fast_ITEMS(iterable);
- dest = self->ob_item + m;
- for (i = 0; i < n; i++) {
- PyObject *o = src[i];
- dest[i] = Py_NewRef(o);
- }
- Py_DECREF(iterable);
- Py_RETURN_NONE;
- }
- it = PyObject_GetIter(iterable);
- if (it == NULL)
- return NULL;
- iternext = *Py_TYPE(it)->tp_iternext;
- /* Guess a result list size. */
- n = PyObject_LengthHint(iterable, 8);
- if (n < 0) {
- Py_DECREF(it);
- return NULL;
- }
- m = Py_SIZE(self);
- if (m > PY_SSIZE_T_MAX - n) {
- /* m + n overflowed; on the chance that n lied, and there really
- * is enough room, ignore it. If n was telling the truth, we'll
- * eventually run out of memory during the loop.
- */
- }
- else if (self->ob_item == NULL) {
- if (n && list_preallocate_exact(self, n) < 0)
- goto error;
- }
- else {
- /* Make room. */
- if (list_resize(self, m + n) < 0)
- goto error;
- /* Make the list sane again. */
- Py_SET_SIZE(self, m);
- }
- /* Run iterator to exhaustion. */
- for (;;) {
- PyObject *item = iternext(it);
- if (item == NULL) {
- if (PyErr_Occurred()) {
- if (PyErr_ExceptionMatches(PyExc_StopIteration))
- PyErr_Clear();
- else
- goto error;
- }
- break;
- }
- if (Py_SIZE(self) < self->allocated) {
- /* steals ref */
- PyList_SET_ITEM(self, Py_SIZE(self), item);
- Py_SET_SIZE(self, Py_SIZE(self) + 1);
- }
- else {
- if (_PyList_AppendTakeRef(self, item) < 0)
- goto error;
- }
- }
- /* Cut back result list if initial guess was too large. */
- if (Py_SIZE(self) < self->allocated) {
- if (list_resize(self, Py_SIZE(self)) < 0)
- goto error;
- }
- Py_DECREF(it);
- Py_RETURN_NONE;
- error:
- Py_DECREF(it);
- return NULL;
- }
- PyObject *
- _PyList_Extend(PyListObject *self, PyObject *iterable)
- {
- return list_extend(self, iterable);
- }
- static PyObject *
- list_inplace_concat(PyListObject *self, PyObject *other)
- {
- PyObject *result;
- result = list_extend(self, other);
- if (result == NULL)
- return result;
- Py_DECREF(result);
- return Py_NewRef(self);
- }
- /*[clinic input]
- list.pop
- index: Py_ssize_t = -1
- /
- Remove and return item at index (default last).
- Raises IndexError if list is empty or index is out of range.
- [clinic start generated code]*/
- static PyObject *
- list_pop_impl(PyListObject *self, Py_ssize_t index)
- /*[clinic end generated code: output=6bd69dcb3f17eca8 input=b83675976f329e6f]*/
- {
- PyObject *v;
- int status;
- if (Py_SIZE(self) == 0) {
- /* Special-case most common failure cause */
- PyErr_SetString(PyExc_IndexError, "pop from empty list");
- return NULL;
- }
- if (index < 0)
- index += Py_SIZE(self);
- if (!valid_index(index, Py_SIZE(self))) {
- PyErr_SetString(PyExc_IndexError, "pop index out of range");
- return NULL;
- }
- PyObject **items = self->ob_item;
- v = items[index];
- const Py_ssize_t size_after_pop = Py_SIZE(self) - 1;
- if (size_after_pop == 0) {
- Py_INCREF(v);
- status = _list_clear(self);
- }
- else {
- if ((size_after_pop - index) > 0) {
- memmove(&items[index], &items[index+1], (size_after_pop - index) * sizeof(PyObject *));
- }
- status = list_resize(self, size_after_pop);
- }
- if (status >= 0) {
- return v; // and v now owns the reference the list had
- }
- else {
- // list resize failed, need to restore
- memmove(&items[index+1], &items[index], (size_after_pop - index)* sizeof(PyObject *));
- items[index] = v;
- return NULL;
- }
- }
- /* Reverse a slice of a list in place, from lo up to (exclusive) hi. */
- static void
- reverse_slice(PyObject **lo, PyObject **hi)
- {
- assert(lo && hi);
- --hi;
- while (lo < hi) {
- PyObject *t = *lo;
- *lo = *hi;
- *hi = t;
- ++lo;
- --hi;
- }
- }
- /* Lots of code for an adaptive, stable, natural mergesort. There are many
- * pieces to this algorithm; read listsort.txt for overviews and details.
- */
- /* A sortslice contains a pointer to an array of keys and a pointer to
- * an array of corresponding values. In other words, keys[i]
- * corresponds with values[i]. If values == NULL, then the keys are
- * also the values.
- *
- * Several convenience routines are provided here, so that keys and
- * values are always moved in sync.
- */
- typedef struct {
- PyObject **keys;
- PyObject **values;
- } sortslice;
- Py_LOCAL_INLINE(void)
- sortslice_copy(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j)
- {
- s1->keys[i] = s2->keys[j];
- if (s1->values != NULL)
- s1->values[i] = s2->values[j];
- }
- Py_LOCAL_INLINE(void)
- sortslice_copy_incr(sortslice *dst, sortslice *src)
- {
- *dst->keys++ = *src->keys++;
- if (dst->values != NULL)
- *dst->values++ = *src->values++;
- }
- Py_LOCAL_INLINE(void)
- sortslice_copy_decr(sortslice *dst, sortslice *src)
- {
- *dst->keys-- = *src->keys--;
- if (dst->values != NULL)
- *dst->values-- = *src->values--;
- }
- Py_LOCAL_INLINE(void)
- sortslice_memcpy(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j,
- Py_ssize_t n)
- {
- memcpy(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n);
- if (s1->values != NULL)
- memcpy(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n);
- }
- Py_LOCAL_INLINE(void)
- sortslice_memmove(sortslice *s1, Py_ssize_t i, sortslice *s2, Py_ssize_t j,
- Py_ssize_t n)
- {
- memmove(&s1->keys[i], &s2->keys[j], sizeof(PyObject *) * n);
- if (s1->values != NULL)
- memmove(&s1->values[i], &s2->values[j], sizeof(PyObject *) * n);
- }
- Py_LOCAL_INLINE(void)
- sortslice_advance(sortslice *slice, Py_ssize_t n)
- {
- slice->keys += n;
- if (slice->values != NULL)
- slice->values += n;
- }
- /* Comparison function: ms->key_compare, which is set at run-time in
- * listsort_impl to optimize for various special cases.
- * Returns -1 on error, 1 if x < y, 0 if x >= y.
- */
- #define ISLT(X, Y) (*(ms->key_compare))(X, Y, ms)
- /* Compare X to Y via "<". Goto "fail" if the comparison raises an
- error. Else "k" is set to true iff X<Y, and an "if (k)" block is
- started. It makes more sense in context <wink>. X and Y are PyObject*s.
- */
- #define IFLT(X, Y) if ((k = ISLT(X, Y)) < 0) goto fail; \
- if (k)
- /* The maximum number of entries in a MergeState's pending-runs stack.
- * For a list with n elements, this needs at most floor(log2(n)) + 1 entries
- * even if we didn't force runs to a minimal length. So the number of bits
- * in a Py_ssize_t is plenty large enough for all cases.
- */
- #define MAX_MERGE_PENDING (SIZEOF_SIZE_T * 8)
- /* When we get into galloping mode, we stay there until both runs win less
- * often than MIN_GALLOP consecutive times. See listsort.txt for more info.
- */
- #define MIN_GALLOP 7
- /* Avoid malloc for small temp arrays. */
- #define MERGESTATE_TEMP_SIZE 256
- /* One MergeState exists on the stack per invocation of mergesort. It's just
- * a convenient way to pass state around among the helper functions.
- */
- struct s_slice {
- sortslice base;
- Py_ssize_t len; /* length of run */
- int power; /* node "level" for powersort merge strategy */
- };
- typedef struct s_MergeState MergeState;
- struct s_MergeState {
- /* This controls when we get *into* galloping mode. It's initialized
- * to MIN_GALLOP. merge_lo and merge_hi tend to nudge it higher for
- * random data, and lower for highly structured data.
- */
- Py_ssize_t min_gallop;
- Py_ssize_t listlen; /* len(input_list) - read only */
- PyObject **basekeys; /* base address of keys array - read only */
- /* 'a' is temp storage to help with merges. It contains room for
- * alloced entries.
- */
- sortslice a; /* may point to temparray below */
- Py_ssize_t alloced;
- /* A stack of n pending runs yet to be merged. Run #i starts at
- * address base[i] and extends for len[i] elements. It's always
- * true (so long as the indices are in bounds) that
- *
- * pending[i].base + pending[i].len == pending[i+1].base
- *
- * so we could cut the storage for this, but it's a minor amount,
- * and keeping all the info explicit simplifies the code.
- */
- int n;
- struct s_slice pending[MAX_MERGE_PENDING];
- /* 'a' points to this when possible, rather than muck with malloc. */
- PyObject *temparray[MERGESTATE_TEMP_SIZE];
- /* This is the function we will use to compare two keys,
- * even when none of our special cases apply and we have to use
- * safe_object_compare. */
- int (*key_compare)(PyObject *, PyObject *, MergeState *);
- /* This function is used by unsafe_object_compare to optimize comparisons
- * when we know our list is type-homogeneous but we can't assume anything else.
- * In the pre-sort check it is set equal to Py_TYPE(key)->tp_richcompare */
- PyObject *(*key_richcompare)(PyObject *, PyObject *, int);
- /* This function is used by unsafe_tuple_compare to compare the first elements
- * of tuples. It may be set to safe_object_compare, but the idea is that hopefully
- * we can assume more, and use one of the special-case compares. */
- int (*tuple_elem_compare)(PyObject *, PyObject *, MergeState *);
- };
- /* binarysort is the best method for sorting small arrays: it does
- few compares, but can do data movement quadratic in the number of
- elements.
- [lo, hi) is a contiguous slice of a list, and is sorted via
- binary insertion. This sort is stable.
- On entry, must have lo <= start <= hi, and that [lo, start) is already
- sorted (pass start == lo if you don't know!).
- If islt() complains return -1, else 0.
- Even in case of error, the output slice will be some permutation of
- the input (nothing is lost or duplicated).
- */
- static int
- binarysort(MergeState *ms, sortslice lo, PyObject **hi, PyObject **start)
- {
- Py_ssize_t k;
- PyObject **l, **p, **r;
- PyObject *pivot;
- assert(lo.keys <= start && start <= hi);
- /* assert [lo, start) is sorted */
- if (lo.keys == start)
- ++start;
- for (; start < hi; ++start) {
- /* set l to where *start belongs */
- l = lo.keys;
- r = start;
- pivot = *r;
- /* Invariants:
- * pivot >= all in [lo, l).
- * pivot < all in [r, start).
- * The second is vacuously true at the start.
- */
- assert(l < r);
- do {
- p = l + ((r - l) >> 1);
- IFLT(pivot, *p)
- r = p;
- else
- l = p+1;
- } while (l < r);
- assert(l == r);
- /* The invariants still hold, so pivot >= all in [lo, l) and
- pivot < all in [l, start), so pivot belongs at l. Note
- that if there are elements equal to pivot, l points to the
- first slot after them -- that's why this sort is stable.
- Slide over to make room.
- Caution: using memmove is much slower under MSVC 5;
- we're not usually moving many slots. */
- for (p = start; p > l; --p)
- *p = *(p-1);
- *l = pivot;
- if (lo.values != NULL) {
- Py_ssize_t offset = lo.values - lo.keys;
- p = start + offset;
- pivot = *p;
- l += offset;
- for (p = start + offset; p > l; --p)
- *p = *(p-1);
- *l = pivot;
- }
- }
- return 0;
- fail:
- return -1;
- }
- /*
- Return the length of the run beginning at lo, in the slice [lo, hi). lo < hi
- is required on entry. "A run" is the longest ascending sequence, with
- lo[0] <= lo[1] <= lo[2] <= ...
- or the longest descending sequence, with
- lo[0] > lo[1] > lo[2] > ...
- Boolean *descending is set to 0 in the former case, or to 1 in the latter.
- For its intended use in a stable mergesort, the strictness of the defn of
- "descending" is needed so that the caller can safely reverse a descending
- sequence without violating stability (strict > ensures there are no equal
- elements to get out of order).
- Returns -1 in case of error.
- */
- static Py_ssize_t
- count_run(MergeState *ms, PyObject **lo, PyObject **hi, int *descending)
- {
- Py_ssize_t k;
- Py_ssize_t n;
- assert(lo < hi);
- *descending = 0;
- ++lo;
- if (lo == hi)
- return 1;
- n = 2;
- IFLT(*lo, *(lo-1)) {
- *descending = 1;
- for (lo = lo+1; lo < hi; ++lo, ++n) {
- IFLT(*lo, *(lo-1))
- ;
- else
- break;
- }
- }
- else {
- for (lo = lo+1; lo < hi; ++lo, ++n) {
- IFLT(*lo, *(lo-1))
- break;
- }
- }
- return n;
- fail:
- return -1;
- }
- /*
- Locate the proper position of key in a sorted vector; if the vector contains
- an element equal to key, return the position immediately to the left of
- the leftmost equal element. [gallop_right() does the same except returns
- the position to the right of the rightmost equal element (if any).]
- "a" is a sorted vector with n elements, starting at a[0]. n must be > 0.
- "hint" is an index at which to begin the search, 0 <= hint < n. The closer
- hint is to the final result, the faster this runs.
- The return value is the int k in 0..n such that
- a[k-1] < key <= a[k]
- pretending that *(a-1) is minus infinity and a[n] is plus infinity. IOW,
- key belongs at index k; or, IOW, the first k elements of a should precede
- key, and the last n-k should follow key.
- Returns -1 on error. See listsort.txt for info on the method.
- */
- static Py_ssize_t
- gallop_left(MergeState *ms, PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint)
- {
- Py_ssize_t ofs;
- Py_ssize_t lastofs;
- Py_ssize_t k;
- assert(key && a && n > 0 && hint >= 0 && hint < n);
- a += hint;
- lastofs = 0;
- ofs = 1;
- IFLT(*a, key) {
- /* a[hint] < key -- gallop right, until
- * a[hint + lastofs] < key <= a[hint + ofs]
- */
- const Py_ssize_t maxofs = n - hint; /* &a[n-1] is highest */
- while (ofs < maxofs) {
- IFLT(a[ofs], key) {
- lastofs = ofs;
- assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
- ofs = (ofs << 1) + 1;
- }
- else /* key <= a[hint + ofs] */
- break;
- }
- if (ofs > maxofs)
- ofs = maxofs;
- /* Translate back to offsets relative to &a[0]. */
- lastofs += hint;
- ofs += hint;
- }
- else {
- /* key <= a[hint] -- gallop left, until
- * a[hint - ofs] < key <= a[hint - lastofs]
- */
- const Py_ssize_t maxofs = hint + 1; /* &a[0] is lowest */
- while (ofs < maxofs) {
- IFLT(*(a-ofs), key)
- break;
- /* key <= a[hint - ofs] */
- lastofs = ofs;
- assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
- ofs = (ofs << 1) + 1;
- }
- if (ofs > maxofs)
- ofs = maxofs;
- /* Translate back to positive offsets relative to &a[0]. */
- k = lastofs;
- lastofs = hint - ofs;
- ofs = hint - k;
- }
- a -= hint;
- assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
- /* Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the
- * right of lastofs but no farther right than ofs. Do a binary
- * search, with invariant a[lastofs-1] < key <= a[ofs].
- */
- ++lastofs;
- while (lastofs < ofs) {
- Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
- IFLT(a[m], key)
- lastofs = m+1; /* a[m] < key */
- else
- ofs = m; /* key <= a[m] */
- }
- assert(lastofs == ofs); /* so a[ofs-1] < key <= a[ofs] */
- return ofs;
- fail:
- return -1;
- }
- /*
- Exactly like gallop_left(), except that if key already exists in a[0:n],
- finds the position immediately to the right of the rightmost equal value.
- The return value is the int k in 0..n such that
- a[k-1] <= key < a[k]
- or -1 if error.
- The code duplication is massive, but this is enough different given that
- we're sticking to "<" comparisons that it's much harder to follow if
- written as one routine with yet another "left or right?" flag.
- */
- static Py_ssize_t
- gallop_right(MergeState *ms, PyObject *key, PyObject **a, Py_ssize_t n, Py_ssize_t hint)
- {
- Py_ssize_t ofs;
- Py_ssize_t lastofs;
- Py_ssize_t k;
- assert(key && a && n > 0 && hint >= 0 && hint < n);
- a += hint;
- lastofs = 0;
- ofs = 1;
- IFLT(key, *a) {
- /* key < a[hint] -- gallop left, until
- * a[hint - ofs] <= key < a[hint - lastofs]
- */
- const Py_ssize_t maxofs = hint + 1; /* &a[0] is lowest */
- while (ofs < maxofs) {
- IFLT(key, *(a-ofs)) {
- lastofs = ofs;
- assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
- ofs = (ofs << 1) + 1;
- }
- else /* a[hint - ofs] <= key */
- break;
- }
- if (ofs > maxofs)
- ofs = maxofs;
- /* Translate back to positive offsets relative to &a[0]. */
- k = lastofs;
- lastofs = hint - ofs;
- ofs = hint - k;
- }
- else {
- /* a[hint] <= key -- gallop right, until
- * a[hint + lastofs] <= key < a[hint + ofs]
- */
- const Py_ssize_t maxofs = n - hint; /* &a[n-1] is highest */
- while (ofs < maxofs) {
- IFLT(key, a[ofs])
- break;
- /* a[hint + ofs] <= key */
- lastofs = ofs;
- assert(ofs <= (PY_SSIZE_T_MAX - 1) / 2);
- ofs = (ofs << 1) + 1;
- }
- if (ofs > maxofs)
- ofs = maxofs;
- /* Translate back to offsets relative to &a[0]. */
- lastofs += hint;
- ofs += hint;
- }
- a -= hint;
- assert(-1 <= lastofs && lastofs < ofs && ofs <= n);
- /* Now a[lastofs] <= key < a[ofs], so key belongs somewhere to the
- * right of lastofs but no farther right than ofs. Do a binary
- * search, with invariant a[lastofs-1] <= key < a[ofs].
- */
- ++lastofs;
- while (lastofs < ofs) {
- Py_ssize_t m = lastofs + ((ofs - lastofs) >> 1);
- IFLT(key, a[m])
- ofs = m; /* key < a[m] */
- else
- lastofs = m+1; /* a[m] <= key */
- }
- assert(lastofs == ofs); /* so a[ofs-1] <= key < a[ofs] */
- return ofs;
- fail:
- return -1;
- }
- /* Conceptually a MergeState's constructor. */
- static void
- merge_init(MergeState *ms, Py_ssize_t list_size, int has_keyfunc,
- sortslice *lo)
- {
- assert(ms != NULL);
- if (has_keyfunc) {
- /* The temporary space for merging will need at most half the list
- * size rounded up. Use the minimum possible space so we can use the
- * rest of temparray for other things. In particular, if there is
- * enough extra space, listsort() will use it to store the keys.
- */
- ms->alloced = (list_size + 1) / 2;
- /* ms->alloced describes how many keys will be stored at
- ms->temparray, but we also need to store the values. Hence,
- ms->alloced is capped at half of MERGESTATE_TEMP_SIZE. */
- if (MERGESTATE_TEMP_SIZE / 2 < ms->alloced)
- ms->alloced = MERGESTATE_TEMP_SIZE / 2;
- ms->a.values = &ms->temparray[ms->alloced];
- }
- else {
- ms->alloced = MERGESTATE_TEMP_SIZE;
- ms->a.values = NULL;
- }
- ms->a.keys = ms->temparray;
- ms->n = 0;
- ms->min_gallop = MIN_GALLOP;
- ms->listlen = list_size;
- ms->basekeys = lo->keys;
- }
- /* Free all the temp memory owned by the MergeState. This must be called
- * when you're done with a MergeState, and may be called before then if
- * you want to free the temp memory early.
- */
- static void
- merge_freemem(MergeState *ms)
- {
- assert(ms != NULL);
- if (ms->a.keys != ms->temparray) {
- PyMem_Free(ms->a.keys);
- ms->a.keys = NULL;
- }
- }
- /* Ensure enough temp memory for 'need' array slots is available.
- * Returns 0 on success and -1 if the memory can't be gotten.
- */
- static int
- merge_getmem(MergeState *ms, Py_ssize_t need)
- {
- int multiplier;
- assert(ms != NULL);
- if (need <= ms->alloced)
- return 0;
- multiplier = ms->a.values != NULL ? 2 : 1;
- /* Don't realloc! That can cost cycles to copy the old data, but
- * we don't care what's in the block.
- */
- merge_freemem(ms);
- if ((size_t)need > PY_SSIZE_T_MAX / sizeof(PyObject *) / multiplier) {
- PyErr_NoMemory();
- return -1;
- }
- ms->a.keys = (PyObject **)PyMem_Malloc(multiplier * need
- * sizeof(PyObject *));
- if (ms->a.keys != NULL) {
- ms->alloced = need;
- if (ms->a.values != NULL)
- ms->a.values = &ms->a.keys[need];
- return 0;
- }
- PyErr_NoMemory();
- return -1;
- }
- #define MERGE_GETMEM(MS, NEED) ((NEED) <= (MS)->alloced ? 0 : \
- merge_getmem(MS, NEED))
- /* Merge the na elements starting at ssa with the nb elements starting at
- * ssb.keys = ssa.keys + na in a stable way, in-place. na and nb must be > 0.
- * Must also have that ssa.keys[na-1] belongs at the end of the merge, and
- * should have na <= nb. See listsort.txt for more info. Return 0 if
- * successful, -1 if error.
- */
- static Py_ssize_t
- merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na,
- sortslice ssb, Py_ssize_t nb)
- {
- Py_ssize_t k;
- sortslice dest;
- int result = -1; /* guilty until proved innocent */
- Py_ssize_t min_gallop;
- assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0);
- assert(ssa.keys + na == ssb.keys);
- if (MERGE_GETMEM(ms, na) < 0)
- return -1;
- sortslice_memcpy(&ms->a, 0, &ssa, 0, na);
- dest = ssa;
- ssa = ms->a;
- sortslice_copy_incr(&dest, &ssb);
- --nb;
- if (nb == 0)
- goto Succeed;
- if (na == 1)
- goto CopyB;
- min_gallop = ms->min_gallop;
- for (;;) {
- Py_ssize_t acount = 0; /* # of times A won in a row */
- Py_ssize_t bcount = 0; /* # of times B won in a row */
- /* Do the straightforward thing until (if ever) one run
- * appears to win consistently.
- */
- for (;;) {
- assert(na > 1 && nb > 0);
- k = ISLT(ssb.keys[0], ssa.keys[0]);
- if (k) {
- if (k < 0)
- goto Fail;
- sortslice_copy_incr(&dest, &ssb);
- ++bcount;
- acount = 0;
- --nb;
- if (nb == 0)
- goto Succeed;
- if (bcount >= min_gallop)
- break;
- }
- else {
- sortslice_copy_incr(&dest, &ssa);
- ++acount;
- bcount = 0;
- --na;
- if (na == 1)
- goto CopyB;
- if (acount >= min_gallop)
- break;
- }
- }
- /* One run is winning so consistently that galloping may
- * be a huge win. So try that, and continue galloping until
- * (if ever) neither run appears to be winning consistently
- * anymore.
- */
- ++min_gallop;
- do {
- assert(na > 1 && nb > 0);
- min_gallop -= min_gallop > 1;
- ms->min_gallop = min_gallop;
- k = gallop_right(ms, ssb.keys[0], ssa.keys, na, 0);
- acount = k;
- if (k) {
- if (k < 0)
- goto Fail;
- sortslice_memcpy(&dest, 0, &ssa, 0, k);
- sortslice_advance(&dest, k);
- sortslice_advance(&ssa, k);
- na -= k;
- if (na == 1)
- goto CopyB;
- /* na==0 is impossible now if the comparison
- * function is consistent, but we can't assume
- * that it is.
- */
- if (na == 0)
- goto Succeed;
- }
- sortslice_copy_incr(&dest, &ssb);
- --nb;
- if (nb == 0)
- goto Succeed;
- k = gallop_left(ms, ssa.keys[0], ssb.keys, nb, 0);
- bcount = k;
- if (k) {
- if (k < 0)
- goto Fail;
- sortslice_memmove(&dest, 0, &ssb, 0, k);
- sortslice_advance(&dest, k);
- sortslice_advance(&ssb, k);
- nb -= k;
- if (nb == 0)
- goto Succeed;
- }
- sortslice_copy_incr(&dest, &ssa);
- --na;
- if (na == 1)
- goto CopyB;
- } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
- ++min_gallop; /* penalize it for leaving galloping mode */
- ms->min_gallop = min_gallop;
- }
- Succeed:
- result = 0;
- Fail:
- if (na)
- sortslice_memcpy(&dest, 0, &ssa, 0, na);
- return result;
- CopyB:
- assert(na == 1 && nb > 0);
- /* The last element of ssa belongs at the end of the merge. */
- sortslice_memmove(&dest, 0, &ssb, 0, nb);
- sortslice_copy(&dest, nb, &ssa, 0);
- return 0;
- }
- /* Merge the na elements starting at pa with the nb elements starting at
- * ssb.keys = ssa.keys + na in a stable way, in-place. na and nb must be > 0.
- * Must also have that ssa.keys[na-1] belongs at the end of the merge, and
- * should have na >= nb. See listsort.txt for more info. Return 0 if
- * successful, -1 if error.
- */
- static Py_ssize_t
- merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na,
- sortslice ssb, Py_ssize_t nb)
- {
- Py_ssize_t k;
- sortslice dest, basea, baseb;
- int result = -1; /* guilty until proved innocent */
- Py_ssize_t min_gallop;
- assert(ms && ssa.keys && ssb.keys && na > 0 && nb > 0);
- assert(ssa.keys + na == ssb.keys);
- if (MERGE_GETMEM(ms, nb) < 0)
- return -1;
- dest = ssb;
- sortslice_advance(&dest, nb-1);
- sortslice_memcpy(&ms->a, 0, &ssb, 0, nb);
- basea = ssa;
- baseb = ms->a;
- ssb.keys = ms->a.keys + nb - 1;
- if (ssb.values != NULL)
- ssb.values = ms->a.values + nb - 1;
- sortslice_advance(&ssa, na - 1);
- sortslice_copy_decr(&dest, &ssa);
- --na;
- if (na == 0)
- goto Succeed;
- if (nb == 1)
- goto CopyA;
- min_gallop = ms->min_gallop;
- for (;;) {
- Py_ssize_t acount = 0; /* # of times A won in a row */
- Py_ssize_t bcount = 0; /* # of times B won in a row */
- /* Do the straightforward thing until (if ever) one run
- * appears to win consistently.
- */
- for (;;) {
- assert(na > 0 && nb > 1);
- k = ISLT(ssb.keys[0], ssa.keys[0]);
- if (k) {
- if (k < 0)
- goto Fail;
- sortslice_copy_decr(&dest, &ssa);
- ++acount;
- bcount = 0;
- --na;
- if (na == 0)
- goto Succeed;
- if (acount >= min_gallop)
- break;
- }
- else {
- sortslice_copy_decr(&dest, &ssb);
- ++bcount;
- acount = 0;
- --nb;
- if (nb == 1)
- goto CopyA;
- if (bcount >= min_gallop)
- break;
- }
- }
- /* One run is winning so consistently that galloping may
- * be a huge win. So try that, and continue galloping until
- * (if ever) neither run appears to be winning consistently
- * anymore.
- */
- ++min_gallop;
- do {
- assert(na > 0 && nb > 1);
- min_gallop -= min_gallop > 1;
- ms->min_gallop = min_gallop;
- k = gallop_right(ms, ssb.keys[0], basea.keys, na, na-1);
- if (k < 0)
- goto Fail;
- k = na - k;
- acount = k;
- if (k) {
- sortslice_advance(&dest, -k);
- sortslice_advance(&ssa, -k);
- sortslice_memmove(&dest, 1, &ssa, 1, k);
- na -= k;
- if (na == 0)
- goto Succeed;
- }
- sortslice_copy_decr(&dest, &ssb);
- --nb;
- if (nb == 1)
- goto CopyA;
- k = gallop_left(ms, ssa.keys[0], baseb.keys, nb, nb-1);
- if (k < 0)
- goto Fail;
- k = nb - k;
- bcount = k;
- if (k) {
- sortslice_advance(&dest, -k);
- sortslice_advance(&ssb, -k);
- sortslice_memcpy(&dest, 1, &ssb, 1, k);
- nb -= k;
- if (nb == 1)
- goto CopyA;
- /* nb==0 is impossible now if the comparison
- * function is consistent, but we can't assume
- * that it is.
- */
- if (nb == 0)
- goto Succeed;
- }
- sortslice_copy_decr(&dest, &ssa);
- --na;
- if (na == 0)
- goto Succeed;
- } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP);
- ++min_gallop; /* penalize it for leaving galloping mode */
- ms->min_gallop = min_gallop;
- }
- Succeed:
- result = 0;
- Fail:
- if (nb)
- sortslice_memcpy(&dest, -(nb-1), &baseb, 0, nb);
- return result;
- CopyA:
- assert(nb == 1 && na > 0);
- /* The first element of ssb belongs at the front of the merge. */
- sortslice_memmove(&dest, 1-na, &ssa, 1-na, na);
- sortslice_advance(&dest, -na);
- sortslice_advance(&ssa, -na);
- sortslice_copy(&dest, 0, &ssb, 0);
- return 0;
- }
- /* Merge the two runs at stack indices i and i+1.
- * Returns 0 on success, -1 on error.
- */
- static Py_ssize_t
- merge_at(MergeState *ms, Py_ssize_t i)
- {
- sortslice ssa, ssb;
- Py_ssize_t na, nb;
- Py_ssize_t k;
- assert(ms != NULL);
- assert(ms->n >= 2);
- assert(i >= 0);
- assert(i == ms->n - 2 || i == ms->n - 3);
- ssa = ms->pending[i].base;
- na = ms->pending[i].len;
- ssb = ms->pending[i+1].base;
- nb = ms->pending[i+1].len;
- assert(na > 0 && nb > 0);
- assert(ssa.keys + na == ssb.keys);
- /* Record the length of the combined runs; if i is the 3rd-last
- * run now, also slide over the last run (which isn't involved
- * in this merge). The current run i+1 goes away in any case.
- */
- ms->pending[i].len = na + nb;
- if (i == ms->n - 3)
- ms->pending[i+1] = ms->pending[i+2];
- --ms->n;
- /* Where does b start in a? Elements in a before that can be
- * ignored (already in place).
- */
- k = gallop_right(ms, *ssb.keys, ssa.keys, na, 0);
- if (k < 0)
- return -1;
- sortslice_advance(&ssa, k);
- na -= k;
- if (na == 0)
- return 0;
- /* Where does a end in b? Elements in b after that can be
- * ignored (already in place).
- */
- nb = gallop_left(ms, ssa.keys[na-1], ssb.keys, nb, nb-1);
- if (nb <= 0)
- return nb;
- /* Merge what remains of the runs, using a temp array with
- * min(na, nb) elements.
- */
- if (na <= nb)
- return merge_lo(ms, ssa, na, ssb, nb);
- else
- return merge_hi(ms, ssa, na, ssb, nb);
- }
- /* Two adjacent runs begin at index s1. The first run has length n1, and
- * the second run (starting at index s1+n1) has length n2. The list has total
- * length n.
- * Compute the "power" of the first run. See listsort.txt for details.
- */
- static int
- powerloop(Py_ssize_t s1, Py_ssize_t n1, Py_ssize_t n2, Py_ssize_t n)
- {
- int result = 0;
- assert(s1 >= 0);
- assert(n1 > 0 && n2 > 0);
- assert(s1 + n1 + n2 <= n);
- /* midpoints a and b:
- * a = s1 + n1/2
- * b = s1 + n1 + n2/2 = a + (n1 + n2)/2
- *
- * Those may not be integers, though, because of the "/2". So we work with
- * 2*a and 2*b instead, which are necessarily integers. It makes no
- * difference to the outcome, since the bits in the expansion of (2*i)/n
- * are merely shifted one position from those of i/n.
- */
- Py_ssize_t a = 2 * s1 + n1; /* 2*a */
- Py_ssize_t b = a + n1 + n2; /* 2*b */
- /* Emulate a/n and b/n one bit a time, until bits differ. */
- for (;;) {
- ++result;
- if (a >= n) { /* both quotient bits are 1 */
- assert(b >= a);
- a -= n;
- b -= n;
- }
- else if (b >= n) { /* a/n bit is 0, b/n bit is 1 */
- break;
- } /* else both quotient bits are 0 */
- assert(a < b && b < n);
- a <<= 1;
- b <<= 1;
- }
- return result;
- }
- /* The next run has been identified, of length n2.
- * If there's already a run on the stack, apply the "powersort" merge strategy:
- * compute the topmost run's "power" (depth in a conceptual binary merge tree)
- * and merge adjacent runs on the stack with greater power. See listsort.txt
- * for more info.
- *
- * It's the caller's responsibility to push the new run on the stack when this
- * returns.
- *
- * Returns 0 on success, -1 on error.
- */
- static int
- found_new_run(MergeState *ms, Py_ssize_t n2)
- {
- assert(ms);
- if (ms->n) {
- assert(ms->n > 0);
- struct s_slice *p = ms->pending;
- Py_ssize_t s1 = p[ms->n - 1].base.keys - ms->basekeys; /* start index */
- Py_ssize_t n1 = p[ms->n - 1].len;
- int power = powerloop(s1, n1, n2, ms->listlen);
- while (ms->n > 1 && p[ms->n - 2].power > power) {
- if (merge_at(ms, ms->n - 2) < 0)
- return -1;
- }
- assert(ms->n < 2 || p[ms->n - 2].power < power);
- p[ms->n - 1].power = power;
- }
- return 0;
- }
- /* Regardless of invariants, merge all runs on the stack until only one
- * remains. This is used at the end of the mergesort.
- *
- * Returns 0 on success, -1 on error.
- */
- static int
- merge_force_collapse(MergeState *ms)
- {
- struct s_slice *p = ms->pending;
- assert(ms);
- while (ms->n > 1) {
- Py_ssize_t n = ms->n - 2;
- if (n > 0 && p[n-1].len < p[n+1].len)
- --n;
- if (merge_at(ms, n) < 0)
- return -1;
- }
- return 0;
- }
- /* Compute a good value for the minimum run length; natural runs shorter
- * than this are boosted artificially via binary insertion.
- *
- * If n < 64, return n (it's too small to bother with fancy stuff).
- * Else if n is an exact power of 2, return 32.
- * Else return an int k, 32 <= k <= 64, such that n/k is close to, but
- * strictly less than, an exact power of 2.
- *
- * See listsort.txt for more info.
- */
- static Py_ssize_t
- merge_compute_minrun(Py_ssize_t n)
- {
- Py_ssize_t r = 0; /* becomes 1 if any 1 bits are shifted off */
- assert(n >= 0);
- while (n >= 64) {
- r |= n & 1;
- n >>= 1;
- }
- return n + r;
- }
- static void
- reverse_sortslice(sortslice *s, Py_ssize_t n)
- {
- reverse_slice(s->keys, &s->keys[n]);
- if (s->values != NULL)
- reverse_slice(s->values, &s->values[n]);
- }
- /* Here we define custom comparison functions to optimize for the cases one commonly
- * encounters in practice: homogeneous lists, often of one of the basic types. */
- /* This struct holds the comparison function and helper functions
- * selected in the pre-sort check. */
- /* These are the special case compare functions.
- * ms->key_compare will always point to one of these: */
- /* Heterogeneous compare: default, always safe to fall back on. */
- static int
- safe_object_compare(PyObject *v, PyObject *w, MergeState *ms)
- {
- /* No assumptions necessary! */
- return PyObject_RichCompareBool(v, w, Py_LT);
- }
- /* Homogeneous compare: safe for any two comparable objects of the same type.
- * (ms->key_richcompare is set to ob_type->tp_richcompare in the
- * pre-sort check.)
- */
- static int
- unsafe_object_compare(PyObject *v, PyObject *w, MergeState *ms)
- {
- PyObject *res_obj; int res;
- /* No assumptions, because we check first: */
- if (Py_TYPE(v)->tp_richcompare != ms->key_richcompare)
- return PyObject_RichCompareBool(v, w, Py_LT);
- assert(ms->key_richcompare != NULL);
- res_obj = (*(ms->key_richcompare))(v, w, Py_LT);
- if (res_obj == Py_NotImplemented) {
- Py_DECREF(res_obj);
- return PyObject_RichCompareBool(v, w, Py_LT);
- }
- if (res_obj == NULL)
- return -1;
- if (PyBool_Check(res_obj)) {
- res = (res_obj == Py_True);
- }
- else {
- res = PyObject_IsTrue(res_obj);
- }
- Py_DECREF(res_obj);
- /* Note that we can't assert
- * res == PyObject_RichCompareBool(v, w, Py_LT);
- * because of evil compare functions like this:
- * lambda a, b: int(random.random() * 3) - 1)
- * (which is actually in test_sort.py) */
- return res;
- }
- /* Latin string compare: safe for any two latin (one byte per char) strings. */
- static int
- unsafe_latin_compare(PyObject *v, PyObject *w, MergeState *ms)
- {
- Py_ssize_t len;
- int res;
- /* Modified from Objects/unicodeobject.c:unicode_compare, assuming: */
- assert(Py_IS_TYPE(v, &PyUnicode_Type));
- assert(Py_IS_TYPE(w, &PyUnicode_Type));
- assert(PyUnicode_KIND(v) == PyUnicode_KIND(w));
- assert(PyUnicode_KIND(v) == PyUnicode_1BYTE_KIND);
- len = Py_MIN(PyUnicode_GET_LENGTH(v), PyUnicode_GET_LENGTH(w));
- res = memcmp(PyUnicode_DATA(v), PyUnicode_DATA(w), len);
- res = (res != 0 ?
- res < 0 :
- PyUnicode_GET_LENGTH(v) < PyUnicode_GET_LENGTH(w));
- assert(res == PyObject_RichCompareBool(v, w, Py_LT));;
- return res;
- }
- /* Bounded int compare: compare any two longs that fit in a single machine word. */
- static int
- unsafe_long_compare(PyObject *v, PyObject *w, MergeState *ms)
- {
- PyLongObject *vl, *wl;
- intptr_t v0, w0;
- int res;
- /* Modified from Objects/longobject.c:long_compare, assuming: */
- assert(Py_IS_TYPE(v, &PyLong_Type));
- assert(Py_IS_TYPE(w, &PyLong_Type));
- assert(_PyLong_IsCompact((PyLongObject *)v));
- assert(_PyLong_IsCompact((PyLongObject *)w));
- vl = (PyLongObject*)v;
- wl = (PyLongObject*)w;
- v0 = _PyLong_CompactValue(vl);
- w0 = _PyLong_CompactValue(wl);
- res = v0 < w0;
- assert(res == PyObject_RichCompareBool(v, w, Py_LT));
- return res;
- }
- /* Float compare: compare any two floats. */
- static int
- unsafe_float_compare(PyObject *v, PyObject *w, MergeState *ms)
- {
- int res;
- /* Modified from Objects/floatobject.c:float_richcompare, assuming: */
- assert(Py_IS_TYPE(v, &PyFloat_Type));
- assert(Py_IS_TYPE(w, &PyFloat_Type));
- res = PyFloat_AS_DOUBLE(v) < PyFloat_AS_DOUBLE(w);
- assert(res == PyObject_RichCompareBool(v, w, Py_LT));
- return res;
- }
- /* Tuple compare: compare *any* two tuples, using
- * ms->tuple_elem_compare to compare the first elements, which is set
- * using the same pre-sort check as we use for ms->key_compare,
- * but run on the list [x[0] for x in L]. This allows us to optimize compares
- * on two levels (as long as [x[0] for x in L] is type-homogeneous.) The idea is
- * that most tuple compares don't involve x[1:]. */
- static int
- unsafe_tuple_compare(PyObject *v, PyObject *w, MergeState *ms)
- {
- PyTupleObject *vt, *wt;
- Py_ssize_t i, vlen, wlen;
- int k;
- /* Modified from Objects/tupleobject.c:tuplerichcompare, assuming: */
- assert(Py_IS_TYPE(v, &PyTuple_Type));
- assert(Py_IS_TYPE(w, &PyTuple_Type));
- assert(Py_SIZE(v) > 0);
- assert(Py_SIZE(w) > 0);
- vt = (PyTupleObject *)v;
- wt = (PyTupleObject *)w;
- vlen = Py_SIZE(vt);
- wlen = Py_SIZE(wt);
- for (i = 0; i < vlen && i < wlen; i++) {
- k = PyObject_RichCompareBool(vt->ob_item[i], wt->ob_item[i], Py_EQ);
- if (k < 0)
- return -1;
- if (!k)
- break;
- }
- if (i >= vlen || i >= wlen)
- return vlen < wlen;
- if (i == 0)
- return ms->tuple_elem_compare(vt->ob_item[i], wt->ob_item[i], ms);
- else
- return PyObject_RichCompareBool(vt->ob_item[i], wt->ob_item[i], Py_LT);
- }
- /* An adaptive, stable, natural mergesort. See listsort.txt.
- * Returns Py_None on success, NULL on error. Even in case of error, the
- * list will be some permutation of its input state (nothing is lost or
- * duplicated).
- */
- /*[clinic input]
- list.sort
- *
- key as keyfunc: object = None
- reverse: bool = False
- Sort the list in ascending order and return None.
- The sort is in-place (i.e. the list itself is modified) and stable (i.e. the
- order of two equal elements is maintained).
- If a key function is given, apply it once to each list item and sort them,
- ascending or descending, according to their function values.
- The reverse flag can be set to sort in descending order.
- [clinic start generated code]*/
- static PyObject *
- list_sort_impl(PyListObject *self, PyObject *keyfunc, int reverse)
- /*[clinic end generated code: output=57b9f9c5e23fbe42 input=a74c4cd3ec6b5c08]*/
- {
- MergeState ms;
- Py_ssize_t nremaining;
- Py_ssize_t minrun;
- sortslice lo;
- Py_ssize_t saved_ob_size, saved_allocated;
- PyObject **saved_ob_item;
- PyObject **final_ob_item;
- PyObject *result = NULL; /* guilty until proved innocent */
- Py_ssize_t i;
- PyObject **keys;
- assert(self != NULL);
- assert(PyList_Check(self));
- if (keyfunc == Py_None)
- keyfunc = NULL;
- /* The list is temporarily made empty, so that mutations performed
- * by comparison functions can't affect the slice of memory we're
- * sorting (allowing mutations during sorting is a core-dump
- * factory, since ob_item may change).
- */
- saved_ob_size = Py_SIZE(self);
- saved_ob_item = self->ob_item;
- saved_allocated = self->allocated;
- Py_SET_SIZE(self, 0);
- self->ob_item = NULL;
- self->allocated = -1; /* any operation will reset it to >= 0 */
- if (keyfunc == NULL) {
- keys = NULL;
- lo.keys = saved_ob_item;
- lo.values = NULL;
- }
- else {
- if (saved_ob_size < MERGESTATE_TEMP_SIZE/2)
- /* Leverage stack space we allocated but won't otherwise use */
- keys = &ms.temparray[saved_ob_size+1];
- else {
- keys = PyMem_Malloc(sizeof(PyObject *) * saved_ob_size);
- if (keys == NULL) {
- PyErr_NoMemory();
- goto keyfunc_fail;
- }
- }
- for (i = 0; i < saved_ob_size ; i++) {
- keys[i] = PyObject_CallOneArg(keyfunc, saved_ob_item[i]);
- if (keys[i] == NULL) {
- for (i=i-1 ; i>=0 ; i--)
- Py_DECREF(keys[i]);
- if (saved_ob_size >= MERGESTATE_TEMP_SIZE/2)
- PyMem_Free(keys);
- goto keyfunc_fail;
- }
- }
- lo.keys = keys;
- lo.values = saved_ob_item;
- }
- /* The pre-sort check: here's where we decide which compare function to use.
- * How much optimization is safe? We test for homogeneity with respect to
- * several properties that are expensive to check at compare-time, and
- * set ms appropriately. */
- if (saved_ob_size > 1) {
- /* Assume the first element is representative of the whole list. */
- int keys_are_in_tuples = (Py_IS_TYPE(lo.keys[0], &PyTuple_Type) &&
- Py_SIZE(lo.keys[0]) > 0);
- PyTypeObject* key_type = (keys_are_in_tuples ?
- Py_TYPE(PyTuple_GET_ITEM(lo.keys[0], 0)) :
- Py_TYPE(lo.keys[0]));
- int keys_are_all_same_type = 1;
- int strings_are_latin = 1;
- int ints_are_bounded = 1;
- /* Prove that assumption by checking every key. */
- for (i=0; i < saved_ob_size; i++) {
- if (keys_are_in_tuples &&
- !(Py_IS_TYPE(lo.keys[i], &PyTuple_Type) && Py_SIZE(lo.keys[i]) != 0)) {
- keys_are_in_tuples = 0;
- keys_are_all_same_type = 0;
- break;
- }
- /* Note: for lists of tuples, key is the first element of the tuple
- * lo.keys[i], not lo.keys[i] itself! We verify type-homogeneity
- * for lists of tuples in the if-statement directly above. */
- PyObject *key = (keys_are_in_tuples ?
- PyTuple_GET_ITEM(lo.keys[i], 0) :
- lo.keys[i]);
- if (!Py_IS_TYPE(key, key_type)) {
- keys_are_all_same_type = 0;
- /* If keys are in tuple we must loop over the whole list to make
- sure all items are tuples */
- if (!keys_are_in_tuples) {
- break;
- }
- }
- if (keys_are_all_same_type) {
- if (key_type == &PyLong_Type &&
- ints_are_bounded &&
- !_PyLong_IsCompact((PyLongObject *)key)) {
- ints_are_bounded = 0;
- }
- else if (key_type == &PyUnicode_Type &&
- strings_are_latin &&
- PyUnicode_KIND(key) != PyUnicode_1BYTE_KIND) {
- strings_are_latin = 0;
- }
- }
- }
- /* Choose the best compare, given what we now know about the keys. */
- if (keys_are_all_same_type) {
- if (key_type == &PyUnicode_Type && strings_are_latin) {
- ms.key_compare = unsafe_latin_compare;
- }
- else if (key_type == &PyLong_Type && ints_are_bounded) {
- ms.key_compare = unsafe_long_compare;
- }
- else if (key_type == &PyFloat_Type) {
- ms.key_compare = unsafe_float_compare;
- }
- else if ((ms.key_richcompare = key_type->tp_richcompare) != NULL) {
- ms.key_compare = unsafe_object_compare;
- }
- else {
- ms.key_compare = safe_object_compare;
- }
- }
- else {
- ms.key_compare = safe_object_compare;
- }
- if (keys_are_in_tuples) {
- /* Make sure we're not dealing with tuples of tuples
- * (remember: here, key_type refers list [key[0] for key in keys]) */
- if (key_type == &PyTuple_Type) {
- ms.tuple_elem_compare = safe_object_compare;
- }
- else {
- ms.tuple_elem_compare = ms.key_compare;
- }
- ms.key_compare = unsafe_tuple_compare;
- }
- }
- /* End of pre-sort check: ms is now set properly! */
- merge_init(&ms, saved_ob_size, keys != NULL, &lo);
- nremaining = saved_ob_size;
- if (nremaining < 2)
- goto succeed;
- /* Reverse sort stability achieved by initially reversing the list,
- applying a stable forward sort, then reversing the final result. */
- if (reverse) {
- if (keys != NULL)
- reverse_slice(&keys[0], &keys[saved_ob_size]);
- reverse_slice(&saved_ob_item[0], &saved_ob_item[saved_ob_size]);
- }
- /* March over the array once, left to right, finding natural runs,
- * and extending short natural runs to minrun elements.
- */
- minrun = merge_compute_minrun(nremaining);
- do {
- int descending;
- Py_ssize_t n;
- /* Identify next run. */
- n = count_run(&ms, lo.keys, lo.keys + nremaining, &descending);
- if (n < 0)
- goto fail;
- if (descending)
- reverse_sortslice(&lo, n);
- /* If short, extend to min(minrun, nremaining). */
- if (n < minrun) {
- const Py_ssize_t force = nremaining <= minrun ?
- nremaining : minrun;
- if (binarysort(&ms, lo, lo.keys + force, lo.keys + n) < 0)
- goto fail;
- n = force;
- }
- /* Maybe merge pending runs. */
- assert(ms.n == 0 || ms.pending[ms.n -1].base.keys +
- ms.pending[ms.n-1].len == lo.keys);
- if (found_new_run(&ms, n) < 0)
- goto fail;
- /* Push new run on stack. */
- assert(ms.n < MAX_MERGE_PENDING);
- ms.pending[ms.n].base = lo;
- ms.pending[ms.n].len = n;
- ++ms.n;
- /* Advance to find next run. */
- sortslice_advance(&lo, n);
- nremaining -= n;
- } while (nremaining);
- if (merge_force_collapse(&ms) < 0)
- goto fail;
- assert(ms.n == 1);
- assert(keys == NULL
- ? ms.pending[0].base.keys == saved_ob_item
- : ms.pending[0].base.keys == &keys[0]);
- assert(ms.pending[0].len == saved_ob_size);
- lo = ms.pending[0].base;
- succeed:
- result = Py_None;
- fail:
- if (keys != NULL) {
- for (i = 0; i < saved_ob_size; i++)
- Py_DECREF(keys[i]);
- if (saved_ob_size >= MERGESTATE_TEMP_SIZE/2)
- PyMem_Free(keys);
- }
- if (self->allocated != -1 && result != NULL) {
- /* The user mucked with the list during the sort,
- * and we don't already have another error to report.
- */
- PyErr_SetString(PyExc_ValueError, "list modified during sort");
- result = NULL;
- }
- if (reverse && saved_ob_size > 1)
- reverse_slice(saved_ob_item, saved_ob_item + saved_ob_size);
- merge_freemem(&ms);
- keyfunc_fail:
- final_ob_item = self->ob_item;
- i = Py_SIZE(self);
- Py_SET_SIZE(self, saved_ob_size);
- self->ob_item = saved_ob_item;
- self->allocated = saved_allocated;
- if (final_ob_item != NULL) {
- /* we cannot use _list_clear() for this because it does not
- guarantee that the list is really empty when it returns */
- while (--i >= 0) {
- Py_XDECREF(final_ob_item[i]);
- }
- PyMem_Free(final_ob_item);
- }
- return Py_XNewRef(result);
- }
- #undef IFLT
- #undef ISLT
- int
- PyList_Sort(PyObject *v)
- {
- if (v == NULL || !PyList_Check(v)) {
- PyErr_BadInternalCall();
- return -1;
- }
- v = list_sort_impl((PyListObject *)v, NULL, 0);
- if (v == NULL)
- return -1;
- Py_DECREF(v);
- return 0;
- }
- /*[clinic input]
- list.reverse
- Reverse *IN PLACE*.
- [clinic start generated code]*/
- static PyObject *
- list_reverse_impl(PyListObject *self)
- /*[clinic end generated code: output=482544fc451abea9 input=eefd4c3ae1bc9887]*/
- {
- if (Py_SIZE(self) > 1)
- reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self));
- Py_RETURN_NONE;
- }
- int
- PyList_Reverse(PyObject *v)
- {
- PyListObject *self = (PyListObject *)v;
- if (v == NULL || !PyList_Check(v)) {
- PyErr_BadInternalCall();
- return -1;
- }
- if (Py_SIZE(self) > 1)
- reverse_slice(self->ob_item, self->ob_item + Py_SIZE(self));
- return 0;
- }
- PyObject *
- PyList_AsTuple(PyObject *v)
- {
- if (v == NULL || !PyList_Check(v)) {
- PyErr_BadInternalCall();
- return NULL;
- }
- return _PyTuple_FromArray(((PyListObject *)v)->ob_item, Py_SIZE(v));
- }
- PyObject *
- _PyList_FromArraySteal(PyObject *const *src, Py_ssize_t n)
- {
- if (n == 0) {
- return PyList_New(0);
- }
- PyListObject *list = (PyListObject *)PyList_New(n);
- if (list == NULL) {
- for (Py_ssize_t i = 0; i < n; i++) {
- Py_DECREF(src[i]);
- }
- return NULL;
- }
- PyObject **dst = list->ob_item;
- memcpy(dst, src, n * sizeof(PyObject *));
- return (PyObject *)list;
- }
- /*[clinic input]
- list.index
- value: object
- start: slice_index(accept={int}) = 0
- stop: slice_index(accept={int}, c_default="PY_SSIZE_T_MAX") = sys.maxsize
- /
- Return first index of value.
- Raises ValueError if the value is not present.
- [clinic start generated code]*/
- static PyObject *
- list_index_impl(PyListObject *self, PyObject *value, Py_ssize_t start,
- Py_ssize_t stop)
- /*[clinic end generated code: output=ec51b88787e4e481 input=40ec5826303a0eb1]*/
- {
- Py_ssize_t i;
- if (start < 0) {
- start += Py_SIZE(self);
- if (start < 0)
- start = 0;
- }
- if (stop < 0) {
- stop += Py_SIZE(self);
- if (stop < 0)
- stop = 0;
- }
- for (i = start; i < stop && i < Py_SIZE(self); i++) {
- PyObject *obj = self->ob_item[i];
- Py_INCREF(obj);
- int cmp = PyObject_RichCompareBool(obj, value, Py_EQ);
- Py_DECREF(obj);
- if (cmp > 0)
- return PyLong_FromSsize_t(i);
- else if (cmp < 0)
- return NULL;
- }
- PyErr_Format(PyExc_ValueError, "%R is not in list", value);
- return NULL;
- }
- /*[clinic input]
- list.count
- value: object
- /
- Return number of occurrences of value.
- [clinic start generated code]*/
- static PyObject *
- list_count(PyListObject *self, PyObject *value)
- /*[clinic end generated code: output=b1f5d284205ae714 input=3bdc3a5e6f749565]*/
- {
- Py_ssize_t count = 0;
- Py_ssize_t i;
- for (i = 0; i < Py_SIZE(self); i++) {
- PyObject *obj = self->ob_item[i];
- if (obj == value) {
- count++;
- continue;
- }
- Py_INCREF(obj);
- int cmp = PyObject_RichCompareBool(obj, value, Py_EQ);
- Py_DECREF(obj);
- if (cmp > 0)
- count++;
- else if (cmp < 0)
- return NULL;
- }
- return PyLong_FromSsize_t(count);
- }
- /*[clinic input]
- list.remove
- value: object
- /
- Remove first occurrence of value.
- Raises ValueError if the value is not present.
- [clinic start generated code]*/
- static PyObject *
- list_remove(PyListObject *self, PyObject *value)
- /*[clinic end generated code: output=f087e1951a5e30d1 input=2dc2ba5bb2fb1f82]*/
- {
- Py_ssize_t i;
- for (i = 0; i < Py_SIZE(self); i++) {
- PyObject *obj = self->ob_item[i];
- Py_INCREF(obj);
- int cmp = PyObject_RichCompareBool(obj, value, Py_EQ);
- Py_DECREF(obj);
- if (cmp > 0) {
- if (list_ass_slice(self, i, i+1,
- (PyObject *)NULL) == 0)
- Py_RETURN_NONE;
- return NULL;
- }
- else if (cmp < 0)
- return NULL;
- }
- PyErr_SetString(PyExc_ValueError, "list.remove(x): x not in list");
- return NULL;
- }
- static int
- list_traverse(PyListObject *o, visitproc visit, void *arg)
- {
- Py_ssize_t i;
- for (i = Py_SIZE(o); --i >= 0; )
- Py_VISIT(o->ob_item[i]);
- return 0;
- }
- static PyObject *
- list_richcompare(PyObject *v, PyObject *w, int op)
- {
- PyListObject *vl, *wl;
- Py_ssize_t i;
- if (!PyList_Check(v) || !PyList_Check(w))
- Py_RETURN_NOTIMPLEMENTED;
- vl = (PyListObject *)v;
- wl = (PyListObject *)w;
- if (Py_SIZE(vl) != Py_SIZE(wl) && (op == Py_EQ || op == Py_NE)) {
- /* Shortcut: if the lengths differ, the lists differ */
- if (op == Py_EQ)
- Py_RETURN_FALSE;
- else
- Py_RETURN_TRUE;
- }
- /* Search for the first index where items are different */
- for (i = 0; i < Py_SIZE(vl) && i < Py_SIZE(wl); i++) {
- PyObject *vitem = vl->ob_item[i];
- PyObject *witem = wl->ob_item[i];
- if (vitem == witem) {
- continue;
- }
- Py_INCREF(vitem);
- Py_INCREF(witem);
- int k = PyObject_RichCompareBool(vitem, witem, Py_EQ);
- Py_DECREF(vitem);
- Py_DECREF(witem);
- if (k < 0)
- return NULL;
- if (!k)
- break;
- }
- if (i >= Py_SIZE(vl) || i >= Py_SIZE(wl)) {
- /* No more items to compare -- compare sizes */
- Py_RETURN_RICHCOMPARE(Py_SIZE(vl), Py_SIZE(wl), op);
- }
- /* We have an item that differs -- shortcuts for EQ/NE */
- if (op == Py_EQ) {
- Py_RETURN_FALSE;
- }
- if (op == Py_NE) {
- Py_RETURN_TRUE;
- }
- /* Compare the final item again using the proper operator */
- PyObject *vitem = vl->ob_item[i];
- PyObject *witem = wl->ob_item[i];
- Py_INCREF(vitem);
- Py_INCREF(witem);
- PyObject *result = PyObject_RichCompare(vl->ob_item[i], wl->ob_item[i], op);
- Py_DECREF(vitem);
- Py_DECREF(witem);
- return result;
- }
- /*[clinic input]
- list.__init__
- iterable: object(c_default="NULL") = ()
- /
- Built-in mutable sequence.
- If no argument is given, the constructor creates a new empty list.
- The argument must be an iterable if specified.
- [clinic start generated code]*/
- static int
- list___init___impl(PyListObject *self, PyObject *iterable)
- /*[clinic end generated code: output=0f3c21379d01de48 input=b3f3fe7206af8f6b]*/
- {
- /* Verify list invariants established by PyType_GenericAlloc() */
- assert(0 <= Py_SIZE(self));
- assert(Py_SIZE(self) <= self->allocated || self->allocated == -1);
- assert(self->ob_item != NULL ||
- self->allocated == 0 || self->allocated == -1);
- /* Empty previous contents */
- if (self->ob_item != NULL) {
- (void)_list_clear(self);
- }
- if (iterable != NULL) {
- PyObject *rv = list_extend(self, iterable);
- if (rv == NULL)
- return -1;
- Py_DECREF(rv);
- }
- return 0;
- }
- static PyObject *
- list_vectorcall(PyObject *type, PyObject * const*args,
- size_t nargsf, PyObject *kwnames)
- {
- if (!_PyArg_NoKwnames("list", kwnames)) {
- return NULL;
- }
- Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
- if (!_PyArg_CheckPositional("list", nargs, 0, 1)) {
- return NULL;
- }
- PyObject *list = PyType_GenericAlloc(_PyType_CAST(type), 0);
- if (list == NULL) {
- return NULL;
- }
- if (nargs) {
- if (list___init___impl((PyListObject *)list, args[0])) {
- Py_DECREF(list);
- return NULL;
- }
- }
- return list;
- }
- /*[clinic input]
- list.__sizeof__
- Return the size of the list in memory, in bytes.
- [clinic start generated code]*/
- static PyObject *
- list___sizeof___impl(PyListObject *self)
- /*[clinic end generated code: output=3417541f95f9a53e input=b8030a5d5ce8a187]*/
- {
- size_t res = _PyObject_SIZE(Py_TYPE(self));
- res += (size_t)self->allocated * sizeof(void*);
- return PyLong_FromSize_t(res);
- }
- static PyObject *list_iter(PyObject *seq);
- static PyObject *list_subscript(PyListObject*, PyObject*);
- static PyMethodDef list_methods[] = {
- {"__getitem__", (PyCFunction)list_subscript, METH_O|METH_COEXIST,
- PyDoc_STR("__getitem__($self, index, /)\n--\n\nReturn self[index].")},
- LIST___REVERSED___METHODDEF
- LIST___SIZEOF___METHODDEF
- LIST_CLEAR_METHODDEF
- LIST_COPY_METHODDEF
- LIST_APPEND_METHODDEF
- LIST_INSERT_METHODDEF
- LIST_EXTEND_METHODDEF
- LIST_POP_METHODDEF
- LIST_REMOVE_METHODDEF
- LIST_INDEX_METHODDEF
- LIST_COUNT_METHODDEF
- LIST_REVERSE_METHODDEF
- LIST_SORT_METHODDEF
- {"__class_getitem__", Py_GenericAlias, METH_O|METH_CLASS, PyDoc_STR("See PEP 585")},
- {NULL, NULL} /* sentinel */
- };
- static PySequenceMethods list_as_sequence = {
- (lenfunc)list_length, /* sq_length */
- (binaryfunc)list_concat, /* sq_concat */
- (ssizeargfunc)list_repeat, /* sq_repeat */
- (ssizeargfunc)list_item, /* sq_item */
- 0, /* sq_slice */
- (ssizeobjargproc)list_ass_item, /* sq_ass_item */
- 0, /* sq_ass_slice */
- (objobjproc)list_contains, /* sq_contains */
- (binaryfunc)list_inplace_concat, /* sq_inplace_concat */
- (ssizeargfunc)list_inplace_repeat, /* sq_inplace_repeat */
- };
- static PyObject *
- list_subscript(PyListObject* self, PyObject* item)
- {
- if (_PyIndex_Check(item)) {
- Py_ssize_t i;
- i = PyNumber_AsSsize_t(item, PyExc_IndexError);
- if (i == -1 && PyErr_Occurred())
- return NULL;
- if (i < 0)
- i += PyList_GET_SIZE(self);
- return list_item(self, i);
- }
- else if (PySlice_Check(item)) {
- Py_ssize_t start, stop, step, slicelength, i;
- size_t cur;
- PyObject* result;
- PyObject* it;
- PyObject **src, **dest;
- if (PySlice_Unpack(item, &start, &stop, &step) < 0) {
- return NULL;
- }
- slicelength = PySlice_AdjustIndices(Py_SIZE(self), &start, &stop,
- step);
- if (slicelength <= 0) {
- return PyList_New(0);
- }
- else if (step == 1) {
- return list_slice(self, start, stop);
- }
- else {
- result = list_new_prealloc(slicelength);
- if (!result) return NULL;
- src = self->ob_item;
- dest = ((PyListObject *)result)->ob_item;
- for (cur = start, i = 0; i < slicelength;
- cur += (size_t)step, i++) {
- it = Py_NewRef(src[cur]);
- dest[i] = it;
- }
- Py_SET_SIZE(result, slicelength);
- return result;
- }
- }
- else {
- PyErr_Format(PyExc_TypeError,
- "list indices must be integers or slices, not %.200s",
- Py_TYPE(item)->tp_name);
- return NULL;
- }
- }
- static Py_ssize_t
- adjust_slice_indexes(PyListObject *lst,
- Py_ssize_t *start, Py_ssize_t *stop,
- Py_ssize_t step)
- {
- Py_ssize_t slicelength = PySlice_AdjustIndices(Py_SIZE(lst), start, stop,
- step);
- /* Make sure s[5:2] = [..] inserts at the right place:
- before 5, not before 2. */
- if ((step < 0 && *start < *stop) ||
- (step > 0 && *start > *stop))
- *stop = *start;
- return slicelength;
- }
- static int
- list_ass_subscript(PyListObject* self, PyObject* item, PyObject* value)
- {
- if (_PyIndex_Check(item)) {
- Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
- if (i == -1 && PyErr_Occurred())
- return -1;
- if (i < 0)
- i += PyList_GET_SIZE(self);
- return list_ass_item(self, i, value);
- }
- else if (PySlice_Check(item)) {
- Py_ssize_t start, stop, step;
- if (PySlice_Unpack(item, &start, &stop, &step) < 0) {
- return -1;
- }
- if (value == NULL) {
- /* delete slice */
- PyObject **garbage;
- size_t cur;
- Py_ssize_t i;
- int res;
- Py_ssize_t slicelength = adjust_slice_indexes(self, &start, &stop,
- step);
- if (step == 1)
- return list_ass_slice(self, start, stop, value);
- if (slicelength <= 0)
- return 0;
- if (step < 0) {
- stop = start + 1;
- start = stop + step*(slicelength - 1) - 1;
- step = -step;
- }
- garbage = (PyObject**)
- PyMem_Malloc(slicelength*sizeof(PyObject*));
- if (!garbage) {
- PyErr_NoMemory();
- return -1;
- }
- /* drawing pictures might help understand these for
- loops. Basically, we memmove the parts of the
- list that are *not* part of the slice: step-1
- items for each item that is part of the slice,
- and then tail end of the list that was not
- covered by the slice */
- for (cur = start, i = 0;
- cur < (size_t)stop;
- cur += step, i++) {
- Py_ssize_t lim = step - 1;
- garbage[i] = PyList_GET_ITEM(self, cur);
- if (cur + step >= (size_t)Py_SIZE(self)) {
- lim = Py_SIZE(self) - cur - 1;
- }
- memmove(self->ob_item + cur - i,
- self->ob_item + cur + 1,
- lim * sizeof(PyObject *));
- }
- cur = start + (size_t)slicelength * step;
- if (cur < (size_t)Py_SIZE(self)) {
- memmove(self->ob_item + cur - slicelength,
- self->ob_item + cur,
- (Py_SIZE(self) - cur) *
- sizeof(PyObject *));
- }
- Py_SET_SIZE(self, Py_SIZE(self) - slicelength);
- res = list_resize(self, Py_SIZE(self));
- for (i = 0; i < slicelength; i++) {
- Py_DECREF(garbage[i]);
- }
- PyMem_Free(garbage);
- return res;
- }
- else {
- /* assign slice */
- PyObject *ins, *seq;
- PyObject **garbage, **seqitems, **selfitems;
- Py_ssize_t i;
- size_t cur;
- /* protect against a[::-1] = a */
- if (self == (PyListObject*)value) {
- seq = list_slice((PyListObject*)value, 0,
- PyList_GET_SIZE(value));
- }
- else {
- seq = PySequence_Fast(value,
- "must assign iterable "
- "to extended slice");
- }
- if (!seq)
- return -1;
- Py_ssize_t slicelength = adjust_slice_indexes(self, &start, &stop,
- step);
- if (step == 1) {
- int res = list_ass_slice(self, start, stop, seq);
- Py_DECREF(seq);
- return res;
- }
- if (PySequence_Fast_GET_SIZE(seq) != slicelength) {
- PyErr_Format(PyExc_ValueError,
- "attempt to assign sequence of "
- "size %zd to extended slice of "
- "size %zd",
- PySequence_Fast_GET_SIZE(seq),
- slicelength);
- Py_DECREF(seq);
- return -1;
- }
- if (!slicelength) {
- Py_DECREF(seq);
- return 0;
- }
- garbage = (PyObject**)
- PyMem_Malloc(slicelength*sizeof(PyObject*));
- if (!garbage) {
- Py_DECREF(seq);
- PyErr_NoMemory();
- return -1;
- }
- selfitems = self->ob_item;
- seqitems = PySequence_Fast_ITEMS(seq);
- for (cur = start, i = 0; i < slicelength;
- cur += (size_t)step, i++) {
- garbage[i] = selfitems[cur];
- ins = Py_NewRef(seqitems[i]);
- selfitems[cur] = ins;
- }
- for (i = 0; i < slicelength; i++) {
- Py_DECREF(garbage[i]);
- }
- PyMem_Free(garbage);
- Py_DECREF(seq);
- return 0;
- }
- }
- else {
- PyErr_Format(PyExc_TypeError,
- "list indices must be integers or slices, not %.200s",
- Py_TYPE(item)->tp_name);
- return -1;
- }
- }
- static PyMappingMethods list_as_mapping = {
- (lenfunc)list_length,
- (binaryfunc)list_subscript,
- (objobjargproc)list_ass_subscript
- };
- PyTypeObject PyList_Type = {
- PyVarObject_HEAD_INIT(&PyType_Type, 0)
- "list",
- sizeof(PyListObject),
- 0,
- (destructor)list_dealloc, /* tp_dealloc */
- 0, /* tp_vectorcall_offset */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_as_async */
- (reprfunc)list_repr, /* tp_repr */
- 0, /* tp_as_number */
- &list_as_sequence, /* tp_as_sequence */
- &list_as_mapping, /* tp_as_mapping */
- PyObject_HashNotImplemented, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- PyObject_GenericGetAttr, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
- Py_TPFLAGS_BASETYPE | Py_TPFLAGS_LIST_SUBCLASS |
- _Py_TPFLAGS_MATCH_SELF | Py_TPFLAGS_SEQUENCE, /* tp_flags */
- list___init____doc__, /* tp_doc */
- (traverseproc)list_traverse, /* tp_traverse */
- (inquiry)_list_clear, /* tp_clear */
- list_richcompare, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- list_iter, /* tp_iter */
- 0, /* tp_iternext */
- list_methods, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)list___init__, /* tp_init */
- PyType_GenericAlloc, /* tp_alloc */
- PyType_GenericNew, /* tp_new */
- PyObject_GC_Del, /* tp_free */
- .tp_vectorcall = list_vectorcall,
- };
- /*********************** List Iterator **************************/
- static void listiter_dealloc(_PyListIterObject *);
- static int listiter_traverse(_PyListIterObject *, visitproc, void *);
- static PyObject *listiter_next(_PyListIterObject *);
- static PyObject *listiter_len(_PyListIterObject *, PyObject *);
- static PyObject *listiter_reduce_general(void *_it, int forward);
- static PyObject *listiter_reduce(_PyListIterObject *, PyObject *);
- static PyObject *listiter_setstate(_PyListIterObject *, PyObject *state);
- PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it)).");
- PyDoc_STRVAR(reduce_doc, "Return state information for pickling.");
- PyDoc_STRVAR(setstate_doc, "Set state information for unpickling.");
- static PyMethodDef listiter_methods[] = {
- {"__length_hint__", (PyCFunction)listiter_len, METH_NOARGS, length_hint_doc},
- {"__reduce__", (PyCFunction)listiter_reduce, METH_NOARGS, reduce_doc},
- {"__setstate__", (PyCFunction)listiter_setstate, METH_O, setstate_doc},
- {NULL, NULL} /* sentinel */
- };
- PyTypeObject PyListIter_Type = {
- PyVarObject_HEAD_INIT(&PyType_Type, 0)
- "list_iterator", /* tp_name */
- sizeof(_PyListIterObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- /* methods */
- (destructor)listiter_dealloc, /* tp_dealloc */
- 0, /* tp_vectorcall_offset */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_as_async */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- PyObject_GenericGetAttr, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
- 0, /* tp_doc */
- (traverseproc)listiter_traverse, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- PyObject_SelfIter, /* tp_iter */
- (iternextfunc)listiter_next, /* tp_iternext */
- listiter_methods, /* tp_methods */
- 0, /* tp_members */
- };
- static PyObject *
- list_iter(PyObject *seq)
- {
- _PyListIterObject *it;
- if (!PyList_Check(seq)) {
- PyErr_BadInternalCall();
- return NULL;
- }
- it = PyObject_GC_New(_PyListIterObject, &PyListIter_Type);
- if (it == NULL)
- return NULL;
- it->it_index = 0;
- it->it_seq = (PyListObject *)Py_NewRef(seq);
- _PyObject_GC_TRACK(it);
- return (PyObject *)it;
- }
- static void
- listiter_dealloc(_PyListIterObject *it)
- {
- _PyObject_GC_UNTRACK(it);
- Py_XDECREF(it->it_seq);
- PyObject_GC_Del(it);
- }
- static int
- listiter_traverse(_PyListIterObject *it, visitproc visit, void *arg)
- {
- Py_VISIT(it->it_seq);
- return 0;
- }
- static PyObject *
- listiter_next(_PyListIterObject *it)
- {
- PyListObject *seq;
- PyObject *item;
- assert(it != NULL);
- seq = it->it_seq;
- if (seq == NULL)
- return NULL;
- assert(PyList_Check(seq));
- if (it->it_index < PyList_GET_SIZE(seq)) {
- item = PyList_GET_ITEM(seq, it->it_index);
- ++it->it_index;
- return Py_NewRef(item);
- }
- it->it_seq = NULL;
- Py_DECREF(seq);
- return NULL;
- }
- static PyObject *
- listiter_len(_PyListIterObject *it, PyObject *Py_UNUSED(ignored))
- {
- Py_ssize_t len;
- if (it->it_seq) {
- len = PyList_GET_SIZE(it->it_seq) - it->it_index;
- if (len >= 0)
- return PyLong_FromSsize_t(len);
- }
- return PyLong_FromLong(0);
- }
- static PyObject *
- listiter_reduce(_PyListIterObject *it, PyObject *Py_UNUSED(ignored))
- {
- return listiter_reduce_general(it, 1);
- }
- static PyObject *
- listiter_setstate(_PyListIterObject *it, PyObject *state)
- {
- Py_ssize_t index = PyLong_AsSsize_t(state);
- if (index == -1 && PyErr_Occurred())
- return NULL;
- if (it->it_seq != NULL) {
- if (index < 0)
- index = 0;
- else if (index > PyList_GET_SIZE(it->it_seq))
- index = PyList_GET_SIZE(it->it_seq); /* iterator exhausted */
- it->it_index = index;
- }
- Py_RETURN_NONE;
- }
- /*********************** List Reverse Iterator **************************/
- typedef struct {
- PyObject_HEAD
- Py_ssize_t it_index;
- PyListObject *it_seq; /* Set to NULL when iterator is exhausted */
- } listreviterobject;
- static void listreviter_dealloc(listreviterobject *);
- static int listreviter_traverse(listreviterobject *, visitproc, void *);
- static PyObject *listreviter_next(listreviterobject *);
- static PyObject *listreviter_len(listreviterobject *, PyObject *);
- static PyObject *listreviter_reduce(listreviterobject *, PyObject *);
- static PyObject *listreviter_setstate(listreviterobject *, PyObject *);
- static PyMethodDef listreviter_methods[] = {
- {"__length_hint__", (PyCFunction)listreviter_len, METH_NOARGS, length_hint_doc},
- {"__reduce__", (PyCFunction)listreviter_reduce, METH_NOARGS, reduce_doc},
- {"__setstate__", (PyCFunction)listreviter_setstate, METH_O, setstate_doc},
- {NULL, NULL} /* sentinel */
- };
- PyTypeObject PyListRevIter_Type = {
- PyVarObject_HEAD_INIT(&PyType_Type, 0)
- "list_reverseiterator", /* tp_name */
- sizeof(listreviterobject), /* tp_basicsize */
- 0, /* tp_itemsize */
- /* methods */
- (destructor)listreviter_dealloc, /* tp_dealloc */
- 0, /* tp_vectorcall_offset */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_as_async */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- PyObject_GenericGetAttr, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
- 0, /* tp_doc */
- (traverseproc)listreviter_traverse, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- PyObject_SelfIter, /* tp_iter */
- (iternextfunc)listreviter_next, /* tp_iternext */
- listreviter_methods, /* tp_methods */
- 0,
- };
- /*[clinic input]
- list.__reversed__
- Return a reverse iterator over the list.
- [clinic start generated code]*/
- static PyObject *
- list___reversed___impl(PyListObject *self)
- /*[clinic end generated code: output=b166f073208c888c input=eadb6e17f8a6a280]*/
- {
- listreviterobject *it;
- it = PyObject_GC_New(listreviterobject, &PyListRevIter_Type);
- if (it == NULL)
- return NULL;
- assert(PyList_Check(self));
- it->it_index = PyList_GET_SIZE(self) - 1;
- it->it_seq = (PyListObject*)Py_NewRef(self);
- PyObject_GC_Track(it);
- return (PyObject *)it;
- }
- static void
- listreviter_dealloc(listreviterobject *it)
- {
- PyObject_GC_UnTrack(it);
- Py_XDECREF(it->it_seq);
- PyObject_GC_Del(it);
- }
- static int
- listreviter_traverse(listreviterobject *it, visitproc visit, void *arg)
- {
- Py_VISIT(it->it_seq);
- return 0;
- }
- static PyObject *
- listreviter_next(listreviterobject *it)
- {
- PyObject *item;
- Py_ssize_t index;
- PyListObject *seq;
- assert(it != NULL);
- seq = it->it_seq;
- if (seq == NULL) {
- return NULL;
- }
- assert(PyList_Check(seq));
- index = it->it_index;
- if (index>=0 && index < PyList_GET_SIZE(seq)) {
- item = PyList_GET_ITEM(seq, index);
- it->it_index--;
- return Py_NewRef(item);
- }
- it->it_index = -1;
- it->it_seq = NULL;
- Py_DECREF(seq);
- return NULL;
- }
- static PyObject *
- listreviter_len(listreviterobject *it, PyObject *Py_UNUSED(ignored))
- {
- Py_ssize_t len = it->it_index + 1;
- if (it->it_seq == NULL || PyList_GET_SIZE(it->it_seq) < len)
- len = 0;
- return PyLong_FromSsize_t(len);
- }
- static PyObject *
- listreviter_reduce(listreviterobject *it, PyObject *Py_UNUSED(ignored))
- {
- return listiter_reduce_general(it, 0);
- }
- static PyObject *
- listreviter_setstate(listreviterobject *it, PyObject *state)
- {
- Py_ssize_t index = PyLong_AsSsize_t(state);
- if (index == -1 && PyErr_Occurred())
- return NULL;
- if (it->it_seq != NULL) {
- if (index < -1)
- index = -1;
- else if (index > PyList_GET_SIZE(it->it_seq) - 1)
- index = PyList_GET_SIZE(it->it_seq) - 1;
- it->it_index = index;
- }
- Py_RETURN_NONE;
- }
- /* common pickling support */
- static PyObject *
- listiter_reduce_general(void *_it, int forward)
- {
- PyObject *list;
- PyObject *iter;
- /* _PyEval_GetBuiltin can invoke arbitrary code,
- * call must be before access of iterator pointers.
- * see issue #101765 */
- /* the objects are not the same, index is of different types! */
- if (forward) {
- iter = _PyEval_GetBuiltin(&_Py_ID(iter));
- if (!iter) {
- return NULL;
- }
- _PyListIterObject *it = (_PyListIterObject *)_it;
- if (it->it_seq) {
- return Py_BuildValue("N(O)n", iter, it->it_seq, it->it_index);
- }
- } else {
- iter = _PyEval_GetBuiltin(&_Py_ID(reversed));
- if (!iter) {
- return NULL;
- }
- listreviterobject *it = (listreviterobject *)_it;
- if (it->it_seq) {
- return Py_BuildValue("N(O)n", iter, it->it_seq, it->it_index);
- }
- }
- /* empty iterator, create an empty list */
- list = PyList_New(0);
- if (list == NULL)
- return NULL;
- return Py_BuildValue("N(N)", iter, list);
- }
|