mpdecimal.c 242 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015
  1. /*
  2. * Copyright (c) 2008-2020 Stefan Krah. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
  16. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  18. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  21. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  22. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  23. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  24. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  25. * SUCH DAMAGE.
  26. */
  27. #include "mpdecimal.h"
  28. #include <assert.h>
  29. #include <limits.h>
  30. #include <math.h>
  31. #include <stdio.h>
  32. #include <stdlib.h>
  33. #include <string.h>
  34. #include "basearith.h"
  35. #include "bits.h"
  36. #include "constants.h"
  37. #include "convolute.h"
  38. #include "crt.h"
  39. #include "mpalloc.h"
  40. #include "typearith.h"
  41. #ifdef PPRO
  42. #if defined(_MSC_VER)
  43. #include <float.h>
  44. #pragma float_control(precise, on)
  45. #pragma fenv_access(on)
  46. #elif !defined(__OpenBSD__) && !defined(__NetBSD__)
  47. /* C99 */
  48. #include <fenv.h>
  49. #pragma STDC FENV_ACCESS ON
  50. #endif
  51. #endif
  52. /* Disable warning that is part of -Wextra since gcc 7.0. */
  53. #if defined(__GNUC__) && !defined(__INTEL_COMPILER) && __GNUC__ >= 7
  54. #pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
  55. #endif
  56. #if defined(_MSC_VER)
  57. #define ALWAYS_INLINE __forceinline
  58. #elif defined (__IBMC__) || defined(LEGACY_COMPILER)
  59. #define ALWAYS_INLINE
  60. #undef inline
  61. #define inline
  62. #else
  63. #ifdef TEST_COVERAGE
  64. #define ALWAYS_INLINE
  65. #else
  66. #define ALWAYS_INLINE inline __attribute__ ((always_inline))
  67. #endif
  68. #endif
  69. /* ClangCL claims to support 128-bit int, but doesn't */
  70. #if defined(__SIZEOF_INT128__) && defined(__clang__) && defined(_MSC_VER)
  71. #undef __SIZEOF_INT128__
  72. #endif
  73. #define MPD_NEWTONDIV_CUTOFF 1024L
  74. #define MPD_NEW_STATIC(name, flags, exp, digits, len) \
  75. mpd_uint_t name##_data[MPD_MINALLOC_MAX]; \
  76. mpd_t name = {flags|MPD_STATIC|MPD_STATIC_DATA, exp, digits, \
  77. len, MPD_MINALLOC_MAX, name##_data}
  78. #define MPD_NEW_CONST(name, flags, exp, digits, len, alloc, initval) \
  79. mpd_uint_t name##_data[alloc] = {initval}; \
  80. mpd_t name = {flags|MPD_STATIC|MPD_CONST_DATA, exp, digits, \
  81. len, alloc, name##_data}
  82. #define MPD_NEW_SHARED(name, a) \
  83. mpd_t name = {(a->flags&~MPD_DATAFLAGS)|MPD_STATIC|MPD_SHARED_DATA, \
  84. a->exp, a->digits, a->len, a->alloc, a->data}
  85. static mpd_uint_t data_one[1] = {1};
  86. static mpd_uint_t data_zero[1] = {0};
  87. static const mpd_t one = {MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1, data_one};
  88. static const mpd_t minus_one = {MPD_NEG|MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1,
  89. data_one};
  90. static const mpd_t zero = {MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1, data_zero};
  91. static inline void _mpd_check_exp(mpd_t *dec, const mpd_context_t *ctx,
  92. uint32_t *status);
  93. static void _settriple(mpd_t *result, uint8_t sign, mpd_uint_t a,
  94. mpd_ssize_t exp);
  95. static inline mpd_ssize_t _mpd_real_size(mpd_uint_t *data, mpd_ssize_t size);
  96. static int _mpd_cmp_abs(const mpd_t *a, const mpd_t *b);
  97. static void _mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
  98. const mpd_context_t *ctx, uint32_t *status);
  99. static inline void _mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
  100. const mpd_context_t *ctx, uint32_t *status);
  101. static void _mpd_base_ndivmod(mpd_t *q, mpd_t *r, const mpd_t *a,
  102. const mpd_t *b, uint32_t *status);
  103. static inline void _mpd_qpow_uint(mpd_t *result, const mpd_t *base,
  104. mpd_uint_t exp, uint8_t resultsign,
  105. const mpd_context_t *ctx, uint32_t *status);
  106. static mpd_uint_t mpd_qsshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n);
  107. /******************************************************************************/
  108. /* Version */
  109. /******************************************************************************/
  110. const char *
  111. mpd_version(void)
  112. {
  113. return MPD_VERSION;
  114. }
  115. /******************************************************************************/
  116. /* Performance critical inline functions */
  117. /******************************************************************************/
  118. #ifdef CONFIG_64
  119. /* Digits in a word, primarily useful for the most significant word. */
  120. ALWAYS_INLINE int
  121. mpd_word_digits(mpd_uint_t word)
  122. {
  123. if (word < mpd_pow10[9]) {
  124. if (word < mpd_pow10[4]) {
  125. if (word < mpd_pow10[2]) {
  126. return (word < mpd_pow10[1]) ? 1 : 2;
  127. }
  128. return (word < mpd_pow10[3]) ? 3 : 4;
  129. }
  130. if (word < mpd_pow10[6]) {
  131. return (word < mpd_pow10[5]) ? 5 : 6;
  132. }
  133. if (word < mpd_pow10[8]) {
  134. return (word < mpd_pow10[7]) ? 7 : 8;
  135. }
  136. return 9;
  137. }
  138. if (word < mpd_pow10[14]) {
  139. if (word < mpd_pow10[11]) {
  140. return (word < mpd_pow10[10]) ? 10 : 11;
  141. }
  142. if (word < mpd_pow10[13]) {
  143. return (word < mpd_pow10[12]) ? 12 : 13;
  144. }
  145. return 14;
  146. }
  147. if (word < mpd_pow10[18]) {
  148. if (word < mpd_pow10[16]) {
  149. return (word < mpd_pow10[15]) ? 15 : 16;
  150. }
  151. return (word < mpd_pow10[17]) ? 17 : 18;
  152. }
  153. return (word < mpd_pow10[19]) ? 19 : 20;
  154. }
  155. #else
  156. ALWAYS_INLINE int
  157. mpd_word_digits(mpd_uint_t word)
  158. {
  159. if (word < mpd_pow10[4]) {
  160. if (word < mpd_pow10[2]) {
  161. return (word < mpd_pow10[1]) ? 1 : 2;
  162. }
  163. return (word < mpd_pow10[3]) ? 3 : 4;
  164. }
  165. if (word < mpd_pow10[6]) {
  166. return (word < mpd_pow10[5]) ? 5 : 6;
  167. }
  168. if (word < mpd_pow10[8]) {
  169. return (word < mpd_pow10[7]) ? 7 : 8;
  170. }
  171. return (word < mpd_pow10[9]) ? 9 : 10;
  172. }
  173. #endif
  174. /* Adjusted exponent */
  175. ALWAYS_INLINE mpd_ssize_t
  176. mpd_adjexp(const mpd_t *dec)
  177. {
  178. return (dec->exp + dec->digits) - 1;
  179. }
  180. /* Etiny */
  181. ALWAYS_INLINE mpd_ssize_t
  182. mpd_etiny(const mpd_context_t *ctx)
  183. {
  184. return ctx->emin - (ctx->prec - 1);
  185. }
  186. /* Etop: used for folding down in IEEE clamping */
  187. ALWAYS_INLINE mpd_ssize_t
  188. mpd_etop(const mpd_context_t *ctx)
  189. {
  190. return ctx->emax - (ctx->prec - 1);
  191. }
  192. /* Most significant word */
  193. ALWAYS_INLINE mpd_uint_t
  194. mpd_msword(const mpd_t *dec)
  195. {
  196. assert(dec->len > 0);
  197. return dec->data[dec->len-1];
  198. }
  199. /* Most significant digit of a word */
  200. inline mpd_uint_t
  201. mpd_msd(mpd_uint_t word)
  202. {
  203. int n;
  204. n = mpd_word_digits(word);
  205. return word / mpd_pow10[n-1];
  206. }
  207. /* Least significant digit of a word */
  208. ALWAYS_INLINE mpd_uint_t
  209. mpd_lsd(mpd_uint_t word)
  210. {
  211. return word % 10;
  212. }
  213. /* Coefficient size needed to store 'digits' */
  214. mpd_ssize_t
  215. mpd_digits_to_size(mpd_ssize_t digits)
  216. {
  217. mpd_ssize_t q, r;
  218. _mpd_idiv_word(&q, &r, digits, MPD_RDIGITS);
  219. return (r == 0) ? q : q+1;
  220. }
  221. /* Number of digits in the exponent. Not defined for MPD_SSIZE_MIN. */
  222. inline int
  223. mpd_exp_digits(mpd_ssize_t exp)
  224. {
  225. exp = (exp < 0) ? -exp : exp;
  226. return mpd_word_digits(exp);
  227. }
  228. /* Canonical */
  229. ALWAYS_INLINE int
  230. mpd_iscanonical(const mpd_t *dec)
  231. {
  232. (void)dec;
  233. return 1;
  234. }
  235. /* Finite */
  236. ALWAYS_INLINE int
  237. mpd_isfinite(const mpd_t *dec)
  238. {
  239. return !(dec->flags & MPD_SPECIAL);
  240. }
  241. /* Infinite */
  242. ALWAYS_INLINE int
  243. mpd_isinfinite(const mpd_t *dec)
  244. {
  245. return dec->flags & MPD_INF;
  246. }
  247. /* NaN */
  248. ALWAYS_INLINE int
  249. mpd_isnan(const mpd_t *dec)
  250. {
  251. return dec->flags & (MPD_NAN|MPD_SNAN);
  252. }
  253. /* Negative */
  254. ALWAYS_INLINE int
  255. mpd_isnegative(const mpd_t *dec)
  256. {
  257. return dec->flags & MPD_NEG;
  258. }
  259. /* Positive */
  260. ALWAYS_INLINE int
  261. mpd_ispositive(const mpd_t *dec)
  262. {
  263. return !(dec->flags & MPD_NEG);
  264. }
  265. /* qNaN */
  266. ALWAYS_INLINE int
  267. mpd_isqnan(const mpd_t *dec)
  268. {
  269. return dec->flags & MPD_NAN;
  270. }
  271. /* Signed */
  272. ALWAYS_INLINE int
  273. mpd_issigned(const mpd_t *dec)
  274. {
  275. return dec->flags & MPD_NEG;
  276. }
  277. /* sNaN */
  278. ALWAYS_INLINE int
  279. mpd_issnan(const mpd_t *dec)
  280. {
  281. return dec->flags & MPD_SNAN;
  282. }
  283. /* Special */
  284. ALWAYS_INLINE int
  285. mpd_isspecial(const mpd_t *dec)
  286. {
  287. return dec->flags & MPD_SPECIAL;
  288. }
  289. /* Zero */
  290. ALWAYS_INLINE int
  291. mpd_iszero(const mpd_t *dec)
  292. {
  293. return !mpd_isspecial(dec) && mpd_msword(dec) == 0;
  294. }
  295. /* Test for zero when specials have been ruled out already */
  296. ALWAYS_INLINE int
  297. mpd_iszerocoeff(const mpd_t *dec)
  298. {
  299. return mpd_msword(dec) == 0;
  300. }
  301. /* Normal */
  302. inline int
  303. mpd_isnormal(const mpd_t *dec, const mpd_context_t *ctx)
  304. {
  305. if (mpd_isspecial(dec)) return 0;
  306. if (mpd_iszerocoeff(dec)) return 0;
  307. return mpd_adjexp(dec) >= ctx->emin;
  308. }
  309. /* Subnormal */
  310. inline int
  311. mpd_issubnormal(const mpd_t *dec, const mpd_context_t *ctx)
  312. {
  313. if (mpd_isspecial(dec)) return 0;
  314. if (mpd_iszerocoeff(dec)) return 0;
  315. return mpd_adjexp(dec) < ctx->emin;
  316. }
  317. /* Odd word */
  318. ALWAYS_INLINE int
  319. mpd_isoddword(mpd_uint_t word)
  320. {
  321. return word & 1;
  322. }
  323. /* Odd coefficient */
  324. ALWAYS_INLINE int
  325. mpd_isoddcoeff(const mpd_t *dec)
  326. {
  327. return mpd_isoddword(dec->data[0]);
  328. }
  329. /* 0 if dec is positive, 1 if dec is negative */
  330. ALWAYS_INLINE uint8_t
  331. mpd_sign(const mpd_t *dec)
  332. {
  333. return dec->flags & MPD_NEG;
  334. }
  335. /* 1 if dec is positive, -1 if dec is negative */
  336. ALWAYS_INLINE int
  337. mpd_arith_sign(const mpd_t *dec)
  338. {
  339. return 1 - 2 * mpd_isnegative(dec);
  340. }
  341. /* Radix */
  342. ALWAYS_INLINE long
  343. mpd_radix(void)
  344. {
  345. return 10;
  346. }
  347. /* Dynamic decimal */
  348. ALWAYS_INLINE int
  349. mpd_isdynamic(const mpd_t *dec)
  350. {
  351. return !(dec->flags & MPD_STATIC);
  352. }
  353. /* Static decimal */
  354. ALWAYS_INLINE int
  355. mpd_isstatic(const mpd_t *dec)
  356. {
  357. return dec->flags & MPD_STATIC;
  358. }
  359. /* Data of decimal is dynamic */
  360. ALWAYS_INLINE int
  361. mpd_isdynamic_data(const mpd_t *dec)
  362. {
  363. return !(dec->flags & MPD_DATAFLAGS);
  364. }
  365. /* Data of decimal is static */
  366. ALWAYS_INLINE int
  367. mpd_isstatic_data(const mpd_t *dec)
  368. {
  369. return dec->flags & MPD_STATIC_DATA;
  370. }
  371. /* Data of decimal is shared */
  372. ALWAYS_INLINE int
  373. mpd_isshared_data(const mpd_t *dec)
  374. {
  375. return dec->flags & MPD_SHARED_DATA;
  376. }
  377. /* Data of decimal is const */
  378. ALWAYS_INLINE int
  379. mpd_isconst_data(const mpd_t *dec)
  380. {
  381. return dec->flags & MPD_CONST_DATA;
  382. }
  383. /******************************************************************************/
  384. /* Inline memory handling */
  385. /******************************************************************************/
  386. /* Fill destination with zeros */
  387. ALWAYS_INLINE void
  388. mpd_uint_zero(mpd_uint_t *dest, mpd_size_t len)
  389. {
  390. mpd_size_t i;
  391. for (i = 0; i < len; i++) {
  392. dest[i] = 0;
  393. }
  394. }
  395. /* Free a decimal */
  396. ALWAYS_INLINE void
  397. mpd_del(mpd_t *dec)
  398. {
  399. if (mpd_isdynamic_data(dec)) {
  400. mpd_free(dec->data);
  401. }
  402. if (mpd_isdynamic(dec)) {
  403. mpd_free(dec);
  404. }
  405. }
  406. /*
  407. * Resize the coefficient. Existing data up to 'nwords' is left untouched.
  408. * Return 1 on success, 0 otherwise.
  409. *
  410. * Input invariant: MPD_MINALLOC <= result->alloc.
  411. *
  412. * Case nwords == result->alloc:
  413. * 'result' is unchanged. Return 1.
  414. *
  415. * Case nwords > result->alloc:
  416. * Case realloc success:
  417. * The value of 'result' does not change. Return 1.
  418. * Case realloc failure:
  419. * 'result' is NaN, status is updated with MPD_Malloc_error. Return 0.
  420. *
  421. * Case nwords < result->alloc:
  422. * Case is_static_data or realloc failure [1]:
  423. * 'result' is unchanged. Return 1.
  424. * Case realloc success:
  425. * The value of result is undefined (expected). Return 1.
  426. *
  427. *
  428. * [1] In that case the old (now oversized) area is still valid.
  429. */
  430. ALWAYS_INLINE int
  431. mpd_qresize(mpd_t *result, mpd_ssize_t nwords, uint32_t *status)
  432. {
  433. assert(!mpd_isconst_data(result)); /* illegal operation for a const */
  434. assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
  435. assert(MPD_MINALLOC <= result->alloc);
  436. nwords = (nwords <= MPD_MINALLOC) ? MPD_MINALLOC : nwords;
  437. if (nwords == result->alloc) {
  438. return 1;
  439. }
  440. if (mpd_isstatic_data(result)) {
  441. if (nwords > result->alloc) {
  442. return mpd_switch_to_dyn(result, nwords, status);
  443. }
  444. return 1;
  445. }
  446. return mpd_realloc_dyn(result, nwords, status);
  447. }
  448. /* Same as mpd_qresize, but do not set the result no NaN on failure. */
  449. static ALWAYS_INLINE int
  450. mpd_qresize_cxx(mpd_t *result, mpd_ssize_t nwords)
  451. {
  452. assert(!mpd_isconst_data(result)); /* illegal operation for a const */
  453. assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
  454. assert(MPD_MINALLOC <= result->alloc);
  455. nwords = (nwords <= MPD_MINALLOC) ? MPD_MINALLOC : nwords;
  456. if (nwords == result->alloc) {
  457. return 1;
  458. }
  459. if (mpd_isstatic_data(result)) {
  460. if (nwords > result->alloc) {
  461. return mpd_switch_to_dyn_cxx(result, nwords);
  462. }
  463. return 1;
  464. }
  465. return mpd_realloc_dyn_cxx(result, nwords);
  466. }
  467. /* Same as mpd_qresize, but the complete coefficient (including the old
  468. * memory area!) is initialized to zero. */
  469. ALWAYS_INLINE int
  470. mpd_qresize_zero(mpd_t *result, mpd_ssize_t nwords, uint32_t *status)
  471. {
  472. assert(!mpd_isconst_data(result)); /* illegal operation for a const */
  473. assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
  474. assert(MPD_MINALLOC <= result->alloc);
  475. nwords = (nwords <= MPD_MINALLOC) ? MPD_MINALLOC : nwords;
  476. if (nwords != result->alloc) {
  477. if (mpd_isstatic_data(result)) {
  478. if (nwords > result->alloc) {
  479. return mpd_switch_to_dyn_zero(result, nwords, status);
  480. }
  481. }
  482. else if (!mpd_realloc_dyn(result, nwords, status)) {
  483. return 0;
  484. }
  485. }
  486. mpd_uint_zero(result->data, nwords);
  487. return 1;
  488. }
  489. /*
  490. * Reduce memory size for the coefficient to MPD_MINALLOC. In theory,
  491. * realloc may fail even when reducing the memory size. But in that case
  492. * the old memory area is always big enough, so checking for MPD_Malloc_error
  493. * is not imperative.
  494. */
  495. ALWAYS_INLINE void
  496. mpd_minalloc(mpd_t *result)
  497. {
  498. assert(!mpd_isconst_data(result)); /* illegal operation for a const */
  499. assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
  500. if (!mpd_isstatic_data(result) && result->alloc > MPD_MINALLOC) {
  501. uint8_t err = 0;
  502. result->data = mpd_realloc(result->data, MPD_MINALLOC,
  503. sizeof *result->data, &err);
  504. if (!err) {
  505. result->alloc = MPD_MINALLOC;
  506. }
  507. }
  508. }
  509. int
  510. mpd_resize(mpd_t *result, mpd_ssize_t nwords, mpd_context_t *ctx)
  511. {
  512. uint32_t status = 0;
  513. if (!mpd_qresize(result, nwords, &status)) {
  514. mpd_addstatus_raise(ctx, status);
  515. return 0;
  516. }
  517. return 1;
  518. }
  519. int
  520. mpd_resize_zero(mpd_t *result, mpd_ssize_t nwords, mpd_context_t *ctx)
  521. {
  522. uint32_t status = 0;
  523. if (!mpd_qresize_zero(result, nwords, &status)) {
  524. mpd_addstatus_raise(ctx, status);
  525. return 0;
  526. }
  527. return 1;
  528. }
  529. /******************************************************************************/
  530. /* Set attributes of a decimal */
  531. /******************************************************************************/
  532. /* Set digits. Assumption: result->len is initialized and > 0. */
  533. inline void
  534. mpd_setdigits(mpd_t *result)
  535. {
  536. mpd_ssize_t wdigits = mpd_word_digits(mpd_msword(result));
  537. result->digits = wdigits + (result->len-1) * MPD_RDIGITS;
  538. }
  539. /* Set sign */
  540. ALWAYS_INLINE void
  541. mpd_set_sign(mpd_t *result, uint8_t sign)
  542. {
  543. result->flags &= ~MPD_NEG;
  544. result->flags |= sign;
  545. }
  546. /* Copy sign from another decimal */
  547. ALWAYS_INLINE void
  548. mpd_signcpy(mpd_t *result, const mpd_t *a)
  549. {
  550. uint8_t sign = a->flags&MPD_NEG;
  551. result->flags &= ~MPD_NEG;
  552. result->flags |= sign;
  553. }
  554. /* Set infinity */
  555. ALWAYS_INLINE void
  556. mpd_set_infinity(mpd_t *result)
  557. {
  558. result->flags &= ~MPD_SPECIAL;
  559. result->flags |= MPD_INF;
  560. }
  561. /* Set qNaN */
  562. ALWAYS_INLINE void
  563. mpd_set_qnan(mpd_t *result)
  564. {
  565. result->flags &= ~MPD_SPECIAL;
  566. result->flags |= MPD_NAN;
  567. }
  568. /* Set sNaN */
  569. ALWAYS_INLINE void
  570. mpd_set_snan(mpd_t *result)
  571. {
  572. result->flags &= ~MPD_SPECIAL;
  573. result->flags |= MPD_SNAN;
  574. }
  575. /* Set to negative */
  576. ALWAYS_INLINE void
  577. mpd_set_negative(mpd_t *result)
  578. {
  579. result->flags |= MPD_NEG;
  580. }
  581. /* Set to positive */
  582. ALWAYS_INLINE void
  583. mpd_set_positive(mpd_t *result)
  584. {
  585. result->flags &= ~MPD_NEG;
  586. }
  587. /* Set to dynamic */
  588. ALWAYS_INLINE void
  589. mpd_set_dynamic(mpd_t *result)
  590. {
  591. result->flags &= ~MPD_STATIC;
  592. }
  593. /* Set to static */
  594. ALWAYS_INLINE void
  595. mpd_set_static(mpd_t *result)
  596. {
  597. result->flags |= MPD_STATIC;
  598. }
  599. /* Set data to dynamic */
  600. ALWAYS_INLINE void
  601. mpd_set_dynamic_data(mpd_t *result)
  602. {
  603. result->flags &= ~MPD_DATAFLAGS;
  604. }
  605. /* Set data to static */
  606. ALWAYS_INLINE void
  607. mpd_set_static_data(mpd_t *result)
  608. {
  609. result->flags &= ~MPD_DATAFLAGS;
  610. result->flags |= MPD_STATIC_DATA;
  611. }
  612. /* Set data to shared */
  613. ALWAYS_INLINE void
  614. mpd_set_shared_data(mpd_t *result)
  615. {
  616. result->flags &= ~MPD_DATAFLAGS;
  617. result->flags |= MPD_SHARED_DATA;
  618. }
  619. /* Set data to const */
  620. ALWAYS_INLINE void
  621. mpd_set_const_data(mpd_t *result)
  622. {
  623. result->flags &= ~MPD_DATAFLAGS;
  624. result->flags |= MPD_CONST_DATA;
  625. }
  626. /* Clear flags, preserving memory attributes. */
  627. ALWAYS_INLINE void
  628. mpd_clear_flags(mpd_t *result)
  629. {
  630. result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
  631. }
  632. /* Set flags, preserving memory attributes. */
  633. ALWAYS_INLINE void
  634. mpd_set_flags(mpd_t *result, uint8_t flags)
  635. {
  636. result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
  637. result->flags |= flags;
  638. }
  639. /* Copy flags, preserving memory attributes of result. */
  640. ALWAYS_INLINE void
  641. mpd_copy_flags(mpd_t *result, const mpd_t *a)
  642. {
  643. uint8_t aflags = a->flags;
  644. result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
  645. result->flags |= (aflags & ~(MPD_STATIC|MPD_DATAFLAGS));
  646. }
  647. /* Initialize a workcontext from ctx. Set traps, flags and newtrap to 0. */
  648. static inline void
  649. mpd_workcontext(mpd_context_t *workctx, const mpd_context_t *ctx)
  650. {
  651. workctx->prec = ctx->prec;
  652. workctx->emax = ctx->emax;
  653. workctx->emin = ctx->emin;
  654. workctx->round = ctx->round;
  655. workctx->traps = 0;
  656. workctx->status = 0;
  657. workctx->newtrap = 0;
  658. workctx->clamp = ctx->clamp;
  659. workctx->allcr = ctx->allcr;
  660. }
  661. /******************************************************************************/
  662. /* Getting and setting parts of decimals */
  663. /******************************************************************************/
  664. /* Flip the sign of a decimal */
  665. static inline void
  666. _mpd_negate(mpd_t *dec)
  667. {
  668. dec->flags ^= MPD_NEG;
  669. }
  670. /* Set coefficient to zero */
  671. void
  672. mpd_zerocoeff(mpd_t *result)
  673. {
  674. mpd_minalloc(result);
  675. result->digits = 1;
  676. result->len = 1;
  677. result->data[0] = 0;
  678. }
  679. /* Set the coefficient to all nines. */
  680. void
  681. mpd_qmaxcoeff(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
  682. {
  683. mpd_ssize_t len, r;
  684. _mpd_idiv_word(&len, &r, ctx->prec, MPD_RDIGITS);
  685. len = (r == 0) ? len : len+1;
  686. if (!mpd_qresize(result, len, status)) {
  687. return;
  688. }
  689. result->len = len;
  690. result->digits = ctx->prec;
  691. --len;
  692. if (r > 0) {
  693. result->data[len--] = mpd_pow10[r]-1;
  694. }
  695. for (; len >= 0; --len) {
  696. result->data[len] = MPD_RADIX-1;
  697. }
  698. }
  699. /*
  700. * Cut off the most significant digits so that the rest fits in ctx->prec.
  701. * Cannot fail.
  702. */
  703. static void
  704. _mpd_cap(mpd_t *result, const mpd_context_t *ctx)
  705. {
  706. uint32_t dummy;
  707. mpd_ssize_t len, r;
  708. if (result->len > 0 && result->digits > ctx->prec) {
  709. _mpd_idiv_word(&len, &r, ctx->prec, MPD_RDIGITS);
  710. len = (r == 0) ? len : len+1;
  711. if (r != 0) {
  712. result->data[len-1] %= mpd_pow10[r];
  713. }
  714. len = _mpd_real_size(result->data, len);
  715. /* resize to fewer words cannot fail */
  716. mpd_qresize(result, len, &dummy);
  717. result->len = len;
  718. mpd_setdigits(result);
  719. }
  720. if (mpd_iszero(result)) {
  721. _settriple(result, mpd_sign(result), 0, result->exp);
  722. }
  723. }
  724. /*
  725. * Cut off the most significant digits of a NaN payload so that the rest
  726. * fits in ctx->prec - ctx->clamp. Cannot fail.
  727. */
  728. static void
  729. _mpd_fix_nan(mpd_t *result, const mpd_context_t *ctx)
  730. {
  731. uint32_t dummy;
  732. mpd_ssize_t prec;
  733. mpd_ssize_t len, r;
  734. prec = ctx->prec - ctx->clamp;
  735. if (result->len > 0 && result->digits > prec) {
  736. if (prec == 0) {
  737. mpd_minalloc(result);
  738. result->len = result->digits = 0;
  739. }
  740. else {
  741. _mpd_idiv_word(&len, &r, prec, MPD_RDIGITS);
  742. len = (r == 0) ? len : len+1;
  743. if (r != 0) {
  744. result->data[len-1] %= mpd_pow10[r];
  745. }
  746. len = _mpd_real_size(result->data, len);
  747. /* resize to fewer words cannot fail */
  748. mpd_qresize(result, len, &dummy);
  749. result->len = len;
  750. mpd_setdigits(result);
  751. if (mpd_iszerocoeff(result)) {
  752. /* NaN0 is not a valid representation */
  753. result->len = result->digits = 0;
  754. }
  755. }
  756. }
  757. }
  758. /*
  759. * Get n most significant digits from a decimal, where 0 < n <= MPD_UINT_DIGITS.
  760. * Assumes MPD_UINT_DIGITS == MPD_RDIGITS+1, which is true for 32 and 64 bit
  761. * machines.
  762. *
  763. * The result of the operation will be in lo. If the operation is impossible,
  764. * hi will be nonzero. This is used to indicate an error.
  765. */
  766. static inline void
  767. _mpd_get_msdigits(mpd_uint_t *hi, mpd_uint_t *lo, const mpd_t *dec,
  768. unsigned int n)
  769. {
  770. mpd_uint_t r, tmp;
  771. assert(0 < n && n <= MPD_RDIGITS+1);
  772. _mpd_div_word(&tmp, &r, dec->digits, MPD_RDIGITS);
  773. r = (r == 0) ? MPD_RDIGITS : r; /* digits in the most significant word */
  774. *hi = 0;
  775. *lo = dec->data[dec->len-1];
  776. if (n <= r) {
  777. *lo /= mpd_pow10[r-n];
  778. }
  779. else if (dec->len > 1) {
  780. /* at this point 1 <= r < n <= MPD_RDIGITS+1 */
  781. _mpd_mul_words(hi, lo, *lo, mpd_pow10[n-r]);
  782. tmp = dec->data[dec->len-2] / mpd_pow10[MPD_RDIGITS-(n-r)];
  783. *lo = *lo + tmp;
  784. if (*lo < tmp) (*hi)++;
  785. }
  786. }
  787. /******************************************************************************/
  788. /* Gathering information about a decimal */
  789. /******************************************************************************/
  790. /* The real size of the coefficient without leading zero words. */
  791. static inline mpd_ssize_t
  792. _mpd_real_size(mpd_uint_t *data, mpd_ssize_t size)
  793. {
  794. while (size > 1 && data[size-1] == 0) {
  795. size--;
  796. }
  797. return size;
  798. }
  799. /* Return number of trailing zeros. No errors are possible. */
  800. mpd_ssize_t
  801. mpd_trail_zeros(const mpd_t *dec)
  802. {
  803. mpd_uint_t word;
  804. mpd_ssize_t i, tz = 0;
  805. for (i=0; i < dec->len; ++i) {
  806. if (dec->data[i] != 0) {
  807. word = dec->data[i];
  808. tz = i * MPD_RDIGITS;
  809. while (word % 10 == 0) {
  810. word /= 10;
  811. tz++;
  812. }
  813. break;
  814. }
  815. }
  816. return tz;
  817. }
  818. /* Integer: Undefined for specials */
  819. static int
  820. _mpd_isint(const mpd_t *dec)
  821. {
  822. mpd_ssize_t tz;
  823. if (mpd_iszerocoeff(dec)) {
  824. return 1;
  825. }
  826. tz = mpd_trail_zeros(dec);
  827. return (dec->exp + tz >= 0);
  828. }
  829. /* Integer */
  830. int
  831. mpd_isinteger(const mpd_t *dec)
  832. {
  833. if (mpd_isspecial(dec)) {
  834. return 0;
  835. }
  836. return _mpd_isint(dec);
  837. }
  838. /* Word is a power of 10 */
  839. static int
  840. mpd_word_ispow10(mpd_uint_t word)
  841. {
  842. int n;
  843. n = mpd_word_digits(word);
  844. if (word == mpd_pow10[n-1]) {
  845. return 1;
  846. }
  847. return 0;
  848. }
  849. /* Coefficient is a power of 10 */
  850. static int
  851. mpd_coeff_ispow10(const mpd_t *dec)
  852. {
  853. if (mpd_word_ispow10(mpd_msword(dec))) {
  854. if (_mpd_isallzero(dec->data, dec->len-1)) {
  855. return 1;
  856. }
  857. }
  858. return 0;
  859. }
  860. /* All digits of a word are nines */
  861. static int
  862. mpd_word_isallnine(mpd_uint_t word)
  863. {
  864. int n;
  865. n = mpd_word_digits(word);
  866. if (word == mpd_pow10[n]-1) {
  867. return 1;
  868. }
  869. return 0;
  870. }
  871. /* All digits of the coefficient are nines */
  872. static int
  873. mpd_coeff_isallnine(const mpd_t *dec)
  874. {
  875. if (mpd_word_isallnine(mpd_msword(dec))) {
  876. if (_mpd_isallnine(dec->data, dec->len-1)) {
  877. return 1;
  878. }
  879. }
  880. return 0;
  881. }
  882. /* Odd decimal: Undefined for non-integers! */
  883. int
  884. mpd_isodd(const mpd_t *dec)
  885. {
  886. mpd_uint_t q, r;
  887. assert(mpd_isinteger(dec));
  888. if (mpd_iszerocoeff(dec)) return 0;
  889. if (dec->exp < 0) {
  890. _mpd_div_word(&q, &r, -dec->exp, MPD_RDIGITS);
  891. q = dec->data[q] / mpd_pow10[r];
  892. return mpd_isoddword(q);
  893. }
  894. return dec->exp == 0 && mpd_isoddword(dec->data[0]);
  895. }
  896. /* Even: Undefined for non-integers! */
  897. int
  898. mpd_iseven(const mpd_t *dec)
  899. {
  900. return !mpd_isodd(dec);
  901. }
  902. /******************************************************************************/
  903. /* Getting and setting decimals */
  904. /******************************************************************************/
  905. /* Internal function: Set a static decimal from a triple, no error checking. */
  906. static void
  907. _ssettriple(mpd_t *result, uint8_t sign, mpd_uint_t a, mpd_ssize_t exp)
  908. {
  909. mpd_set_flags(result, sign);
  910. result->exp = exp;
  911. _mpd_div_word(&result->data[1], &result->data[0], a, MPD_RADIX);
  912. result->len = (result->data[1] == 0) ? 1 : 2;
  913. mpd_setdigits(result);
  914. }
  915. /* Internal function: Set a decimal from a triple, no error checking. */
  916. static void
  917. _settriple(mpd_t *result, uint8_t sign, mpd_uint_t a, mpd_ssize_t exp)
  918. {
  919. mpd_minalloc(result);
  920. mpd_set_flags(result, sign);
  921. result->exp = exp;
  922. _mpd_div_word(&result->data[1], &result->data[0], a, MPD_RADIX);
  923. result->len = (result->data[1] == 0) ? 1 : 2;
  924. mpd_setdigits(result);
  925. }
  926. /* Set a special number from a triple */
  927. void
  928. mpd_setspecial(mpd_t *result, uint8_t sign, uint8_t type)
  929. {
  930. mpd_minalloc(result);
  931. result->flags &= ~(MPD_NEG|MPD_SPECIAL);
  932. result->flags |= (sign|type);
  933. result->exp = result->digits = result->len = 0;
  934. }
  935. /* Set result of NaN with an error status */
  936. void
  937. mpd_seterror(mpd_t *result, uint32_t flags, uint32_t *status)
  938. {
  939. mpd_minalloc(result);
  940. mpd_set_qnan(result);
  941. mpd_set_positive(result);
  942. result->exp = result->digits = result->len = 0;
  943. *status |= flags;
  944. }
  945. /* quietly set a static decimal from an mpd_ssize_t */
  946. void
  947. mpd_qsset_ssize(mpd_t *result, mpd_ssize_t a, const mpd_context_t *ctx,
  948. uint32_t *status)
  949. {
  950. mpd_uint_t u;
  951. uint8_t sign = MPD_POS;
  952. if (a < 0) {
  953. if (a == MPD_SSIZE_MIN) {
  954. u = (mpd_uint_t)MPD_SSIZE_MAX +
  955. (-(MPD_SSIZE_MIN+MPD_SSIZE_MAX));
  956. }
  957. else {
  958. u = -a;
  959. }
  960. sign = MPD_NEG;
  961. }
  962. else {
  963. u = a;
  964. }
  965. _ssettriple(result, sign, u, 0);
  966. mpd_qfinalize(result, ctx, status);
  967. }
  968. /* quietly set a static decimal from an mpd_uint_t */
  969. void
  970. mpd_qsset_uint(mpd_t *result, mpd_uint_t a, const mpd_context_t *ctx,
  971. uint32_t *status)
  972. {
  973. _ssettriple(result, MPD_POS, a, 0);
  974. mpd_qfinalize(result, ctx, status);
  975. }
  976. /* quietly set a static decimal from an int32_t */
  977. void
  978. mpd_qsset_i32(mpd_t *result, int32_t a, const mpd_context_t *ctx,
  979. uint32_t *status)
  980. {
  981. mpd_qsset_ssize(result, a, ctx, status);
  982. }
  983. /* quietly set a static decimal from a uint32_t */
  984. void
  985. mpd_qsset_u32(mpd_t *result, uint32_t a, const mpd_context_t *ctx,
  986. uint32_t *status)
  987. {
  988. mpd_qsset_uint(result, a, ctx, status);
  989. }
  990. #ifdef CONFIG_64
  991. /* quietly set a static decimal from an int64_t */
  992. void
  993. mpd_qsset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
  994. uint32_t *status)
  995. {
  996. mpd_qsset_ssize(result, a, ctx, status);
  997. }
  998. /* quietly set a static decimal from a uint64_t */
  999. void
  1000. mpd_qsset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
  1001. uint32_t *status)
  1002. {
  1003. mpd_qsset_uint(result, a, ctx, status);
  1004. }
  1005. #endif
  1006. /* quietly set a decimal from an mpd_ssize_t */
  1007. void
  1008. mpd_qset_ssize(mpd_t *result, mpd_ssize_t a, const mpd_context_t *ctx,
  1009. uint32_t *status)
  1010. {
  1011. mpd_minalloc(result);
  1012. mpd_qsset_ssize(result, a, ctx, status);
  1013. }
  1014. /* quietly set a decimal from an mpd_uint_t */
  1015. void
  1016. mpd_qset_uint(mpd_t *result, mpd_uint_t a, const mpd_context_t *ctx,
  1017. uint32_t *status)
  1018. {
  1019. _settriple(result, MPD_POS, a, 0);
  1020. mpd_qfinalize(result, ctx, status);
  1021. }
  1022. /* quietly set a decimal from an int32_t */
  1023. void
  1024. mpd_qset_i32(mpd_t *result, int32_t a, const mpd_context_t *ctx,
  1025. uint32_t *status)
  1026. {
  1027. mpd_qset_ssize(result, a, ctx, status);
  1028. }
  1029. /* quietly set a decimal from a uint32_t */
  1030. void
  1031. mpd_qset_u32(mpd_t *result, uint32_t a, const mpd_context_t *ctx,
  1032. uint32_t *status)
  1033. {
  1034. mpd_qset_uint(result, a, ctx, status);
  1035. }
  1036. #if defined(CONFIG_32) && !defined(LEGACY_COMPILER)
  1037. /* set a decimal from a uint64_t */
  1038. static void
  1039. _c32setu64(mpd_t *result, uint64_t u, uint8_t sign, uint32_t *status)
  1040. {
  1041. mpd_uint_t w[3];
  1042. uint64_t q;
  1043. int i, len;
  1044. len = 0;
  1045. do {
  1046. q = u / MPD_RADIX;
  1047. w[len] = (mpd_uint_t)(u - q * MPD_RADIX);
  1048. u = q; len++;
  1049. } while (u != 0);
  1050. if (!mpd_qresize(result, len, status)) {
  1051. return;
  1052. }
  1053. for (i = 0; i < len; i++) {
  1054. result->data[i] = w[i];
  1055. }
  1056. mpd_set_flags(result, sign);
  1057. result->exp = 0;
  1058. result->len = len;
  1059. mpd_setdigits(result);
  1060. }
  1061. static void
  1062. _c32_qset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
  1063. uint32_t *status)
  1064. {
  1065. _c32setu64(result, a, MPD_POS, status);
  1066. mpd_qfinalize(result, ctx, status);
  1067. }
  1068. /* set a decimal from an int64_t */
  1069. static void
  1070. _c32_qset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
  1071. uint32_t *status)
  1072. {
  1073. uint64_t u;
  1074. uint8_t sign = MPD_POS;
  1075. if (a < 0) {
  1076. if (a == INT64_MIN) {
  1077. u = (uint64_t)INT64_MAX + (-(INT64_MIN+INT64_MAX));
  1078. }
  1079. else {
  1080. u = -a;
  1081. }
  1082. sign = MPD_NEG;
  1083. }
  1084. else {
  1085. u = a;
  1086. }
  1087. _c32setu64(result, u, sign, status);
  1088. mpd_qfinalize(result, ctx, status);
  1089. }
  1090. #endif /* CONFIG_32 && !LEGACY_COMPILER */
  1091. #ifndef LEGACY_COMPILER
  1092. /* quietly set a decimal from an int64_t */
  1093. void
  1094. mpd_qset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
  1095. uint32_t *status)
  1096. {
  1097. #ifdef CONFIG_64
  1098. mpd_qset_ssize(result, a, ctx, status);
  1099. #else
  1100. _c32_qset_i64(result, a, ctx, status);
  1101. #endif
  1102. }
  1103. /* quietly set a decimal from an int64_t, use a maxcontext for conversion */
  1104. void
  1105. mpd_qset_i64_exact(mpd_t *result, int64_t a, uint32_t *status)
  1106. {
  1107. mpd_context_t maxcontext;
  1108. mpd_maxcontext(&maxcontext);
  1109. #ifdef CONFIG_64
  1110. mpd_qset_ssize(result, a, &maxcontext, status);
  1111. #else
  1112. _c32_qset_i64(result, a, &maxcontext, status);
  1113. #endif
  1114. if (*status & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) {
  1115. /* we want exact results */
  1116. mpd_seterror(result, MPD_Invalid_operation, status);
  1117. }
  1118. *status &= MPD_Errors;
  1119. }
  1120. /* quietly set a decimal from a uint64_t */
  1121. void
  1122. mpd_qset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
  1123. uint32_t *status)
  1124. {
  1125. #ifdef CONFIG_64
  1126. mpd_qset_uint(result, a, ctx, status);
  1127. #else
  1128. _c32_qset_u64(result, a, ctx, status);
  1129. #endif
  1130. }
  1131. /* quietly set a decimal from a uint64_t, use a maxcontext for conversion */
  1132. void
  1133. mpd_qset_u64_exact(mpd_t *result, uint64_t a, uint32_t *status)
  1134. {
  1135. mpd_context_t maxcontext;
  1136. mpd_maxcontext(&maxcontext);
  1137. #ifdef CONFIG_64
  1138. mpd_qset_uint(result, a, &maxcontext, status);
  1139. #else
  1140. _c32_qset_u64(result, a, &maxcontext, status);
  1141. #endif
  1142. if (*status & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) {
  1143. /* we want exact results */
  1144. mpd_seterror(result, MPD_Invalid_operation, status);
  1145. }
  1146. *status &= MPD_Errors;
  1147. }
  1148. #endif /* !LEGACY_COMPILER */
  1149. /*
  1150. * Quietly get an mpd_uint_t from a decimal. Assumes
  1151. * MPD_UINT_DIGITS == MPD_RDIGITS+1, which is true for
  1152. * 32 and 64 bit machines.
  1153. *
  1154. * If the operation is impossible, MPD_Invalid_operation is set.
  1155. */
  1156. static mpd_uint_t
  1157. _mpd_qget_uint(int use_sign, const mpd_t *a, uint32_t *status)
  1158. {
  1159. mpd_t tmp;
  1160. mpd_uint_t tmp_data[2];
  1161. mpd_uint_t lo, hi;
  1162. if (mpd_isspecial(a)) {
  1163. *status |= MPD_Invalid_operation;
  1164. return MPD_UINT_MAX;
  1165. }
  1166. if (mpd_iszero(a)) {
  1167. return 0;
  1168. }
  1169. if (use_sign && mpd_isnegative(a)) {
  1170. *status |= MPD_Invalid_operation;
  1171. return MPD_UINT_MAX;
  1172. }
  1173. if (a->digits+a->exp > MPD_RDIGITS+1) {
  1174. *status |= MPD_Invalid_operation;
  1175. return MPD_UINT_MAX;
  1176. }
  1177. if (a->exp < 0) {
  1178. if (!_mpd_isint(a)) {
  1179. *status |= MPD_Invalid_operation;
  1180. return MPD_UINT_MAX;
  1181. }
  1182. /* At this point a->digits+a->exp <= MPD_RDIGITS+1,
  1183. * so the shift fits. */
  1184. tmp.data = tmp_data;
  1185. tmp.flags = MPD_STATIC|MPD_STATIC_DATA;
  1186. tmp.alloc = 2;
  1187. mpd_qsshiftr(&tmp, a, -a->exp);
  1188. tmp.exp = 0;
  1189. a = &tmp;
  1190. }
  1191. _mpd_get_msdigits(&hi, &lo, a, MPD_RDIGITS+1);
  1192. if (hi) {
  1193. *status |= MPD_Invalid_operation;
  1194. return MPD_UINT_MAX;
  1195. }
  1196. if (a->exp > 0) {
  1197. _mpd_mul_words(&hi, &lo, lo, mpd_pow10[a->exp]);
  1198. if (hi) {
  1199. *status |= MPD_Invalid_operation;
  1200. return MPD_UINT_MAX;
  1201. }
  1202. }
  1203. return lo;
  1204. }
  1205. /*
  1206. * Sets Invalid_operation for:
  1207. * - specials
  1208. * - negative numbers (except negative zero)
  1209. * - non-integers
  1210. * - overflow
  1211. */
  1212. mpd_uint_t
  1213. mpd_qget_uint(const mpd_t *a, uint32_t *status)
  1214. {
  1215. return _mpd_qget_uint(1, a, status);
  1216. }
  1217. /* Same as above, but gets the absolute value, i.e. the sign is ignored. */
  1218. mpd_uint_t
  1219. mpd_qabs_uint(const mpd_t *a, uint32_t *status)
  1220. {
  1221. return _mpd_qget_uint(0, a, status);
  1222. }
  1223. /* quietly get an mpd_ssize_t from a decimal */
  1224. mpd_ssize_t
  1225. mpd_qget_ssize(const mpd_t *a, uint32_t *status)
  1226. {
  1227. uint32_t workstatus = 0;
  1228. mpd_uint_t u;
  1229. int isneg;
  1230. u = mpd_qabs_uint(a, &workstatus);
  1231. if (workstatus&MPD_Invalid_operation) {
  1232. *status |= workstatus;
  1233. return MPD_SSIZE_MAX;
  1234. }
  1235. isneg = mpd_isnegative(a);
  1236. if (u <= MPD_SSIZE_MAX) {
  1237. return isneg ? -((mpd_ssize_t)u) : (mpd_ssize_t)u;
  1238. }
  1239. else if (isneg && u+(MPD_SSIZE_MIN+MPD_SSIZE_MAX) == MPD_SSIZE_MAX) {
  1240. return MPD_SSIZE_MIN;
  1241. }
  1242. *status |= MPD_Invalid_operation;
  1243. return MPD_SSIZE_MAX;
  1244. }
  1245. #if defined(CONFIG_32) && !defined(LEGACY_COMPILER)
  1246. /*
  1247. * Quietly get a uint64_t from a decimal. If the operation is impossible,
  1248. * MPD_Invalid_operation is set.
  1249. */
  1250. static uint64_t
  1251. _c32_qget_u64(int use_sign, const mpd_t *a, uint32_t *status)
  1252. {
  1253. MPD_NEW_STATIC(tmp,0,0,20,3);
  1254. mpd_context_t maxcontext;
  1255. uint64_t ret;
  1256. tmp_data[0] = 709551615;
  1257. tmp_data[1] = 446744073;
  1258. tmp_data[2] = 18;
  1259. if (mpd_isspecial(a)) {
  1260. *status |= MPD_Invalid_operation;
  1261. return UINT64_MAX;
  1262. }
  1263. if (mpd_iszero(a)) {
  1264. return 0;
  1265. }
  1266. if (use_sign && mpd_isnegative(a)) {
  1267. *status |= MPD_Invalid_operation;
  1268. return UINT64_MAX;
  1269. }
  1270. if (!_mpd_isint(a)) {
  1271. *status |= MPD_Invalid_operation;
  1272. return UINT64_MAX;
  1273. }
  1274. if (_mpd_cmp_abs(a, &tmp) > 0) {
  1275. *status |= MPD_Invalid_operation;
  1276. return UINT64_MAX;
  1277. }
  1278. mpd_maxcontext(&maxcontext);
  1279. mpd_qrescale(&tmp, a, 0, &maxcontext, &maxcontext.status);
  1280. maxcontext.status &= ~MPD_Rounded;
  1281. if (maxcontext.status != 0) {
  1282. *status |= (maxcontext.status|MPD_Invalid_operation); /* GCOV_NOT_REACHED */
  1283. return UINT64_MAX; /* GCOV_NOT_REACHED */
  1284. }
  1285. ret = 0;
  1286. switch (tmp.len) {
  1287. case 3:
  1288. ret += (uint64_t)tmp_data[2] * 1000000000000000000ULL;
  1289. case 2:
  1290. ret += (uint64_t)tmp_data[1] * 1000000000ULL;
  1291. case 1:
  1292. ret += tmp_data[0];
  1293. break;
  1294. default:
  1295. abort(); /* GCOV_NOT_REACHED */
  1296. }
  1297. return ret;
  1298. }
  1299. static int64_t
  1300. _c32_qget_i64(const mpd_t *a, uint32_t *status)
  1301. {
  1302. uint64_t u;
  1303. int isneg;
  1304. u = _c32_qget_u64(0, a, status);
  1305. if (*status&MPD_Invalid_operation) {
  1306. return INT64_MAX;
  1307. }
  1308. isneg = mpd_isnegative(a);
  1309. if (u <= INT64_MAX) {
  1310. return isneg ? -((int64_t)u) : (int64_t)u;
  1311. }
  1312. else if (isneg && u+(INT64_MIN+INT64_MAX) == INT64_MAX) {
  1313. return INT64_MIN;
  1314. }
  1315. *status |= MPD_Invalid_operation;
  1316. return INT64_MAX;
  1317. }
  1318. #endif /* CONFIG_32 && !LEGACY_COMPILER */
  1319. #ifdef CONFIG_64
  1320. /* quietly get a uint64_t from a decimal */
  1321. uint64_t
  1322. mpd_qget_u64(const mpd_t *a, uint32_t *status)
  1323. {
  1324. return mpd_qget_uint(a, status);
  1325. }
  1326. /* quietly get an int64_t from a decimal */
  1327. int64_t
  1328. mpd_qget_i64(const mpd_t *a, uint32_t *status)
  1329. {
  1330. return mpd_qget_ssize(a, status);
  1331. }
  1332. /* quietly get a uint32_t from a decimal */
  1333. uint32_t
  1334. mpd_qget_u32(const mpd_t *a, uint32_t *status)
  1335. {
  1336. uint32_t workstatus = 0;
  1337. uint64_t x = mpd_qget_uint(a, &workstatus);
  1338. if (workstatus&MPD_Invalid_operation) {
  1339. *status |= workstatus;
  1340. return UINT32_MAX;
  1341. }
  1342. if (x > UINT32_MAX) {
  1343. *status |= MPD_Invalid_operation;
  1344. return UINT32_MAX;
  1345. }
  1346. return (uint32_t)x;
  1347. }
  1348. /* quietly get an int32_t from a decimal */
  1349. int32_t
  1350. mpd_qget_i32(const mpd_t *a, uint32_t *status)
  1351. {
  1352. uint32_t workstatus = 0;
  1353. int64_t x = mpd_qget_ssize(a, &workstatus);
  1354. if (workstatus&MPD_Invalid_operation) {
  1355. *status |= workstatus;
  1356. return INT32_MAX;
  1357. }
  1358. if (x < INT32_MIN || x > INT32_MAX) {
  1359. *status |= MPD_Invalid_operation;
  1360. return INT32_MAX;
  1361. }
  1362. return (int32_t)x;
  1363. }
  1364. #else
  1365. #ifndef LEGACY_COMPILER
  1366. /* quietly get a uint64_t from a decimal */
  1367. uint64_t
  1368. mpd_qget_u64(const mpd_t *a, uint32_t *status)
  1369. {
  1370. uint32_t workstatus = 0;
  1371. uint64_t x = _c32_qget_u64(1, a, &workstatus);
  1372. *status |= workstatus;
  1373. return x;
  1374. }
  1375. /* quietly get an int64_t from a decimal */
  1376. int64_t
  1377. mpd_qget_i64(const mpd_t *a, uint32_t *status)
  1378. {
  1379. uint32_t workstatus = 0;
  1380. int64_t x = _c32_qget_i64(a, &workstatus);
  1381. *status |= workstatus;
  1382. return x;
  1383. }
  1384. #endif
  1385. /* quietly get a uint32_t from a decimal */
  1386. uint32_t
  1387. mpd_qget_u32(const mpd_t *a, uint32_t *status)
  1388. {
  1389. return mpd_qget_uint(a, status);
  1390. }
  1391. /* quietly get an int32_t from a decimal */
  1392. int32_t
  1393. mpd_qget_i32(const mpd_t *a, uint32_t *status)
  1394. {
  1395. return mpd_qget_ssize(a, status);
  1396. }
  1397. #endif
  1398. /******************************************************************************/
  1399. /* Filtering input of functions, finalizing output of functions */
  1400. /******************************************************************************/
  1401. /*
  1402. * Check if the operand is NaN, copy to result and return 1 if this is
  1403. * the case. Copying can fail since NaNs are allowed to have a payload that
  1404. * does not fit in MPD_MINALLOC.
  1405. */
  1406. int
  1407. mpd_qcheck_nan(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  1408. uint32_t *status)
  1409. {
  1410. if (mpd_isnan(a)) {
  1411. *status |= mpd_issnan(a) ? MPD_Invalid_operation : 0;
  1412. mpd_qcopy(result, a, status);
  1413. mpd_set_qnan(result);
  1414. _mpd_fix_nan(result, ctx);
  1415. return 1;
  1416. }
  1417. return 0;
  1418. }
  1419. /*
  1420. * Check if either operand is NaN, copy to result and return 1 if this
  1421. * is the case. Copying can fail since NaNs are allowed to have a payload
  1422. * that does not fit in MPD_MINALLOC.
  1423. */
  1424. int
  1425. mpd_qcheck_nans(mpd_t *result, const mpd_t *a, const mpd_t *b,
  1426. const mpd_context_t *ctx, uint32_t *status)
  1427. {
  1428. if ((a->flags|b->flags)&(MPD_NAN|MPD_SNAN)) {
  1429. const mpd_t *choice = b;
  1430. if (mpd_issnan(a)) {
  1431. choice = a;
  1432. *status |= MPD_Invalid_operation;
  1433. }
  1434. else if (mpd_issnan(b)) {
  1435. *status |= MPD_Invalid_operation;
  1436. }
  1437. else if (mpd_isqnan(a)) {
  1438. choice = a;
  1439. }
  1440. mpd_qcopy(result, choice, status);
  1441. mpd_set_qnan(result);
  1442. _mpd_fix_nan(result, ctx);
  1443. return 1;
  1444. }
  1445. return 0;
  1446. }
  1447. /*
  1448. * Check if one of the operands is NaN, copy to result and return 1 if this
  1449. * is the case. Copying can fail since NaNs are allowed to have a payload
  1450. * that does not fit in MPD_MINALLOC.
  1451. */
  1452. static int
  1453. mpd_qcheck_3nans(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_t *c,
  1454. const mpd_context_t *ctx, uint32_t *status)
  1455. {
  1456. if ((a->flags|b->flags|c->flags)&(MPD_NAN|MPD_SNAN)) {
  1457. const mpd_t *choice = c;
  1458. if (mpd_issnan(a)) {
  1459. choice = a;
  1460. *status |= MPD_Invalid_operation;
  1461. }
  1462. else if (mpd_issnan(b)) {
  1463. choice = b;
  1464. *status |= MPD_Invalid_operation;
  1465. }
  1466. else if (mpd_issnan(c)) {
  1467. *status |= MPD_Invalid_operation;
  1468. }
  1469. else if (mpd_isqnan(a)) {
  1470. choice = a;
  1471. }
  1472. else if (mpd_isqnan(b)) {
  1473. choice = b;
  1474. }
  1475. mpd_qcopy(result, choice, status);
  1476. mpd_set_qnan(result);
  1477. _mpd_fix_nan(result, ctx);
  1478. return 1;
  1479. }
  1480. return 0;
  1481. }
  1482. /* Check if rounding digit 'rnd' leads to an increment. */
  1483. static inline int
  1484. _mpd_rnd_incr(const mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx)
  1485. {
  1486. int ld;
  1487. switch (ctx->round) {
  1488. case MPD_ROUND_DOWN: case MPD_ROUND_TRUNC:
  1489. return 0;
  1490. case MPD_ROUND_HALF_UP:
  1491. return (rnd >= 5);
  1492. case MPD_ROUND_HALF_EVEN:
  1493. return (rnd > 5) || ((rnd == 5) && mpd_isoddcoeff(dec));
  1494. case MPD_ROUND_CEILING:
  1495. return !(rnd == 0 || mpd_isnegative(dec));
  1496. case MPD_ROUND_FLOOR:
  1497. return !(rnd == 0 || mpd_ispositive(dec));
  1498. case MPD_ROUND_HALF_DOWN:
  1499. return (rnd > 5);
  1500. case MPD_ROUND_UP:
  1501. return !(rnd == 0);
  1502. case MPD_ROUND_05UP:
  1503. ld = (int)mpd_lsd(dec->data[0]);
  1504. return (!(rnd == 0) && (ld == 0 || ld == 5));
  1505. default:
  1506. /* Without a valid context, further results will be undefined. */
  1507. return 0; /* GCOV_NOT_REACHED */
  1508. }
  1509. }
  1510. /*
  1511. * Apply rounding to a decimal that has been right-shifted into a full
  1512. * precision decimal. If an increment leads to an overflow of the precision,
  1513. * adjust the coefficient and the exponent and check the new exponent for
  1514. * overflow.
  1515. */
  1516. static inline void
  1517. _mpd_apply_round(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
  1518. uint32_t *status)
  1519. {
  1520. if (_mpd_rnd_incr(dec, rnd, ctx)) {
  1521. /* We have a number with exactly ctx->prec digits. The increment
  1522. * can only lead to an overflow if the decimal is all nines. In
  1523. * that case, the result is a power of ten with prec+1 digits.
  1524. *
  1525. * If the precision is a multiple of MPD_RDIGITS, this situation is
  1526. * detected by _mpd_baseincr returning a carry.
  1527. * If the precision is not a multiple of MPD_RDIGITS, we have to
  1528. * check if the result has one digit too many.
  1529. */
  1530. mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
  1531. if (carry) {
  1532. dec->data[dec->len-1] = mpd_pow10[MPD_RDIGITS-1];
  1533. dec->exp += 1;
  1534. _mpd_check_exp(dec, ctx, status);
  1535. return;
  1536. }
  1537. mpd_setdigits(dec);
  1538. if (dec->digits > ctx->prec) {
  1539. mpd_qshiftr_inplace(dec, 1);
  1540. dec->exp += 1;
  1541. dec->digits = ctx->prec;
  1542. _mpd_check_exp(dec, ctx, status);
  1543. }
  1544. }
  1545. }
  1546. /*
  1547. * Apply rounding to a decimal. Allow overflow of the precision.
  1548. */
  1549. static inline void
  1550. _mpd_apply_round_excess(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
  1551. uint32_t *status)
  1552. {
  1553. if (_mpd_rnd_incr(dec, rnd, ctx)) {
  1554. mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
  1555. if (carry) {
  1556. if (!mpd_qresize(dec, dec->len+1, status)) {
  1557. return;
  1558. }
  1559. dec->data[dec->len] = 1;
  1560. dec->len += 1;
  1561. }
  1562. mpd_setdigits(dec);
  1563. }
  1564. }
  1565. /*
  1566. * Apply rounding to a decimal that has been right-shifted into a decimal
  1567. * with full precision or less. Return failure if an increment would
  1568. * overflow the precision.
  1569. */
  1570. static inline int
  1571. _mpd_apply_round_fit(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
  1572. uint32_t *status)
  1573. {
  1574. if (_mpd_rnd_incr(dec, rnd, ctx)) {
  1575. mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
  1576. if (carry) {
  1577. if (!mpd_qresize(dec, dec->len+1, status)) {
  1578. return 0;
  1579. }
  1580. dec->data[dec->len] = 1;
  1581. dec->len += 1;
  1582. }
  1583. mpd_setdigits(dec);
  1584. if (dec->digits > ctx->prec) {
  1585. mpd_seterror(dec, MPD_Invalid_operation, status);
  1586. return 0;
  1587. }
  1588. }
  1589. return 1;
  1590. }
  1591. /* Check a normal number for overflow, underflow, clamping. If the operand
  1592. is modified, it will be zero, special or (sub)normal with a coefficient
  1593. that fits into the current context precision. */
  1594. static inline void
  1595. _mpd_check_exp(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
  1596. {
  1597. mpd_ssize_t adjexp, etiny, shift;
  1598. int rnd;
  1599. adjexp = mpd_adjexp(dec);
  1600. if (adjexp > ctx->emax) {
  1601. if (mpd_iszerocoeff(dec)) {
  1602. dec->exp = ctx->emax;
  1603. if (ctx->clamp) {
  1604. dec->exp -= (ctx->prec-1);
  1605. }
  1606. mpd_zerocoeff(dec);
  1607. *status |= MPD_Clamped;
  1608. return;
  1609. }
  1610. switch (ctx->round) {
  1611. case MPD_ROUND_HALF_UP: case MPD_ROUND_HALF_EVEN:
  1612. case MPD_ROUND_HALF_DOWN: case MPD_ROUND_UP:
  1613. case MPD_ROUND_TRUNC:
  1614. mpd_setspecial(dec, mpd_sign(dec), MPD_INF);
  1615. break;
  1616. case MPD_ROUND_DOWN: case MPD_ROUND_05UP:
  1617. mpd_qmaxcoeff(dec, ctx, status);
  1618. dec->exp = ctx->emax - ctx->prec + 1;
  1619. break;
  1620. case MPD_ROUND_CEILING:
  1621. if (mpd_isnegative(dec)) {
  1622. mpd_qmaxcoeff(dec, ctx, status);
  1623. dec->exp = ctx->emax - ctx->prec + 1;
  1624. }
  1625. else {
  1626. mpd_setspecial(dec, MPD_POS, MPD_INF);
  1627. }
  1628. break;
  1629. case MPD_ROUND_FLOOR:
  1630. if (mpd_ispositive(dec)) {
  1631. mpd_qmaxcoeff(dec, ctx, status);
  1632. dec->exp = ctx->emax - ctx->prec + 1;
  1633. }
  1634. else {
  1635. mpd_setspecial(dec, MPD_NEG, MPD_INF);
  1636. }
  1637. break;
  1638. default: /* debug */
  1639. abort(); /* GCOV_NOT_REACHED */
  1640. }
  1641. *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
  1642. } /* fold down */
  1643. else if (ctx->clamp && dec->exp > mpd_etop(ctx)) {
  1644. /* At this point adjexp=exp+digits-1 <= emax and exp > etop=emax-prec+1:
  1645. * (1) shift = exp -emax+prec-1 > 0
  1646. * (2) digits+shift = exp+digits-1 - emax + prec <= prec */
  1647. shift = dec->exp - mpd_etop(ctx);
  1648. if (!mpd_qshiftl(dec, dec, shift, status)) {
  1649. return;
  1650. }
  1651. dec->exp -= shift;
  1652. *status |= MPD_Clamped;
  1653. if (!mpd_iszerocoeff(dec) && adjexp < ctx->emin) {
  1654. /* Underflow is impossible, since exp < etiny=emin-prec+1
  1655. * and exp > etop=emax-prec+1 would imply emax < emin. */
  1656. *status |= MPD_Subnormal;
  1657. }
  1658. }
  1659. else if (adjexp < ctx->emin) {
  1660. etiny = mpd_etiny(ctx);
  1661. if (mpd_iszerocoeff(dec)) {
  1662. if (dec->exp < etiny) {
  1663. dec->exp = etiny;
  1664. mpd_zerocoeff(dec);
  1665. *status |= MPD_Clamped;
  1666. }
  1667. return;
  1668. }
  1669. *status |= MPD_Subnormal;
  1670. if (dec->exp < etiny) {
  1671. /* At this point adjexp=exp+digits-1 < emin and exp < etiny=emin-prec+1:
  1672. * (1) shift = emin-prec+1 - exp > 0
  1673. * (2) digits-shift = exp+digits-1 - emin + prec < prec */
  1674. shift = etiny - dec->exp;
  1675. rnd = (int)mpd_qshiftr_inplace(dec, shift);
  1676. dec->exp = etiny;
  1677. /* We always have a spare digit in case of an increment. */
  1678. _mpd_apply_round_excess(dec, rnd, ctx, status);
  1679. *status |= MPD_Rounded;
  1680. if (rnd) {
  1681. *status |= (MPD_Inexact|MPD_Underflow);
  1682. if (mpd_iszerocoeff(dec)) {
  1683. mpd_zerocoeff(dec);
  1684. *status |= MPD_Clamped;
  1685. }
  1686. }
  1687. }
  1688. /* Case exp >= etiny=emin-prec+1:
  1689. * (1) adjexp=exp+digits-1 < emin
  1690. * (2) digits < emin-exp+1 <= prec */
  1691. }
  1692. }
  1693. /* Transcendental functions do not always set Underflow reliably,
  1694. * since they only use as much precision as is necessary for correct
  1695. * rounding. If a result like 1.0000000000e-101 is finalized, there
  1696. * is no rounding digit that would trigger Underflow. But we can
  1697. * assume Inexact, so a short check suffices. */
  1698. static inline void
  1699. mpd_check_underflow(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
  1700. {
  1701. if (mpd_adjexp(dec) < ctx->emin && !mpd_iszero(dec) &&
  1702. dec->exp < mpd_etiny(ctx)) {
  1703. *status |= MPD_Underflow;
  1704. }
  1705. }
  1706. /* Check if a normal number must be rounded after the exponent has been checked. */
  1707. static inline void
  1708. _mpd_check_round(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
  1709. {
  1710. mpd_uint_t rnd;
  1711. mpd_ssize_t shift;
  1712. /* must handle specials: _mpd_check_exp() can produce infinities or NaNs */
  1713. if (mpd_isspecial(dec)) {
  1714. return;
  1715. }
  1716. if (dec->digits > ctx->prec) {
  1717. shift = dec->digits - ctx->prec;
  1718. rnd = mpd_qshiftr_inplace(dec, shift);
  1719. dec->exp += shift;
  1720. _mpd_apply_round(dec, rnd, ctx, status);
  1721. *status |= MPD_Rounded;
  1722. if (rnd) {
  1723. *status |= MPD_Inexact;
  1724. }
  1725. }
  1726. }
  1727. /* Finalize all operations. */
  1728. void
  1729. mpd_qfinalize(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
  1730. {
  1731. if (mpd_isspecial(result)) {
  1732. if (mpd_isnan(result)) {
  1733. _mpd_fix_nan(result, ctx);
  1734. }
  1735. return;
  1736. }
  1737. _mpd_check_exp(result, ctx, status);
  1738. _mpd_check_round(result, ctx, status);
  1739. }
  1740. /******************************************************************************/
  1741. /* Copying */
  1742. /******************************************************************************/
  1743. /* Internal function: Copy a decimal, share data with src: USE WITH CARE! */
  1744. static inline void
  1745. _mpd_copy_shared(mpd_t *dest, const mpd_t *src)
  1746. {
  1747. dest->flags = src->flags;
  1748. dest->exp = src->exp;
  1749. dest->digits = src->digits;
  1750. dest->len = src->len;
  1751. dest->alloc = src->alloc;
  1752. dest->data = src->data;
  1753. mpd_set_shared_data(dest);
  1754. }
  1755. /*
  1756. * Copy a decimal. In case of an error, status is set to MPD_Malloc_error.
  1757. */
  1758. int
  1759. mpd_qcopy(mpd_t *result, const mpd_t *a, uint32_t *status)
  1760. {
  1761. if (result == a) return 1;
  1762. if (!mpd_qresize(result, a->len, status)) {
  1763. return 0;
  1764. }
  1765. mpd_copy_flags(result, a);
  1766. result->exp = a->exp;
  1767. result->digits = a->digits;
  1768. result->len = a->len;
  1769. memcpy(result->data, a->data, a->len * (sizeof *result->data));
  1770. return 1;
  1771. }
  1772. /* Same as mpd_qcopy, but do not set the result to NaN on failure. */
  1773. int
  1774. mpd_qcopy_cxx(mpd_t *result, const mpd_t *a)
  1775. {
  1776. if (result == a) return 1;
  1777. if (!mpd_qresize_cxx(result, a->len)) {
  1778. return 0;
  1779. }
  1780. mpd_copy_flags(result, a);
  1781. result->exp = a->exp;
  1782. result->digits = a->digits;
  1783. result->len = a->len;
  1784. memcpy(result->data, a->data, a->len * (sizeof *result->data));
  1785. return 1;
  1786. }
  1787. /*
  1788. * Copy to a decimal with a static buffer. The caller has to make sure that
  1789. * the buffer is big enough. Cannot fail.
  1790. */
  1791. static void
  1792. mpd_qcopy_static(mpd_t *result, const mpd_t *a)
  1793. {
  1794. if (result == a) return;
  1795. memcpy(result->data, a->data, a->len * (sizeof *result->data));
  1796. mpd_copy_flags(result, a);
  1797. result->exp = a->exp;
  1798. result->digits = a->digits;
  1799. result->len = a->len;
  1800. }
  1801. /*
  1802. * Return a newly allocated copy of the operand. In case of an error,
  1803. * status is set to MPD_Malloc_error and the return value is NULL.
  1804. */
  1805. mpd_t *
  1806. mpd_qncopy(const mpd_t *a)
  1807. {
  1808. mpd_t *result;
  1809. if ((result = mpd_qnew_size(a->len)) == NULL) {
  1810. return NULL;
  1811. }
  1812. memcpy(result->data, a->data, a->len * (sizeof *result->data));
  1813. mpd_copy_flags(result, a);
  1814. result->exp = a->exp;
  1815. result->digits = a->digits;
  1816. result->len = a->len;
  1817. return result;
  1818. }
  1819. /*
  1820. * Copy a decimal and set the sign to positive. In case of an error, the
  1821. * status is set to MPD_Malloc_error.
  1822. */
  1823. int
  1824. mpd_qcopy_abs(mpd_t *result, const mpd_t *a, uint32_t *status)
  1825. {
  1826. if (!mpd_qcopy(result, a, status)) {
  1827. return 0;
  1828. }
  1829. mpd_set_positive(result);
  1830. return 1;
  1831. }
  1832. /*
  1833. * Copy a decimal and negate the sign. In case of an error, the
  1834. * status is set to MPD_Malloc_error.
  1835. */
  1836. int
  1837. mpd_qcopy_negate(mpd_t *result, const mpd_t *a, uint32_t *status)
  1838. {
  1839. if (!mpd_qcopy(result, a, status)) {
  1840. return 0;
  1841. }
  1842. _mpd_negate(result);
  1843. return 1;
  1844. }
  1845. /*
  1846. * Copy a decimal, setting the sign of the first operand to the sign of the
  1847. * second operand. In case of an error, the status is set to MPD_Malloc_error.
  1848. */
  1849. int
  1850. mpd_qcopy_sign(mpd_t *result, const mpd_t *a, const mpd_t *b, uint32_t *status)
  1851. {
  1852. uint8_t sign_b = mpd_sign(b); /* result may equal b! */
  1853. if (!mpd_qcopy(result, a, status)) {
  1854. return 0;
  1855. }
  1856. mpd_set_sign(result, sign_b);
  1857. return 1;
  1858. }
  1859. /******************************************************************************/
  1860. /* Comparisons */
  1861. /******************************************************************************/
  1862. /*
  1863. * For all functions that compare two operands and return an int the usual
  1864. * convention applies to the return value:
  1865. *
  1866. * -1 if op1 < op2
  1867. * 0 if op1 == op2
  1868. * 1 if op1 > op2
  1869. *
  1870. * INT_MAX for error
  1871. */
  1872. /* Convenience macro. If a and b are not equal, return from the calling
  1873. * function with the correct comparison value. */
  1874. #define CMP_EQUAL_OR_RETURN(a, b) \
  1875. if (a != b) { \
  1876. if (a < b) { \
  1877. return -1; \
  1878. } \
  1879. return 1; \
  1880. }
  1881. /*
  1882. * Compare the data of big and small. This function does the equivalent
  1883. * of first shifting small to the left and then comparing the data of
  1884. * big and small, except that no allocation for the left shift is needed.
  1885. */
  1886. static int
  1887. _mpd_basecmp(mpd_uint_t *big, mpd_uint_t *small, mpd_size_t n, mpd_size_t m,
  1888. mpd_size_t shift)
  1889. {
  1890. #if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
  1891. /* spurious uninitialized warnings */
  1892. mpd_uint_t l=l, lprev=lprev, h=h;
  1893. #else
  1894. mpd_uint_t l, lprev, h;
  1895. #endif
  1896. mpd_uint_t q, r;
  1897. mpd_uint_t ph, x;
  1898. assert(m > 0 && n >= m && shift > 0);
  1899. _mpd_div_word(&q, &r, (mpd_uint_t)shift, MPD_RDIGITS);
  1900. if (r != 0) {
  1901. ph = mpd_pow10[r];
  1902. --m; --n;
  1903. _mpd_divmod_pow10(&h, &lprev, small[m--], MPD_RDIGITS-r);
  1904. if (h != 0) {
  1905. CMP_EQUAL_OR_RETURN(big[n], h)
  1906. --n;
  1907. }
  1908. for (; m != MPD_SIZE_MAX; m--,n--) {
  1909. _mpd_divmod_pow10(&h, &l, small[m], MPD_RDIGITS-r);
  1910. x = ph * lprev + h;
  1911. CMP_EQUAL_OR_RETURN(big[n], x)
  1912. lprev = l;
  1913. }
  1914. x = ph * lprev;
  1915. CMP_EQUAL_OR_RETURN(big[q], x)
  1916. }
  1917. else {
  1918. while (--m != MPD_SIZE_MAX) {
  1919. CMP_EQUAL_OR_RETURN(big[m+q], small[m])
  1920. }
  1921. }
  1922. return !_mpd_isallzero(big, q);
  1923. }
  1924. /* Compare two decimals with the same adjusted exponent. */
  1925. static int
  1926. _mpd_cmp_same_adjexp(const mpd_t *a, const mpd_t *b)
  1927. {
  1928. mpd_ssize_t shift, i;
  1929. if (a->exp != b->exp) {
  1930. /* Cannot wrap: a->exp + a->digits = b->exp + b->digits, so
  1931. * a->exp - b->exp = b->digits - a->digits. */
  1932. shift = a->exp - b->exp;
  1933. if (shift > 0) {
  1934. return -1 * _mpd_basecmp(b->data, a->data, b->len, a->len, shift);
  1935. }
  1936. else {
  1937. return _mpd_basecmp(a->data, b->data, a->len, b->len, -shift);
  1938. }
  1939. }
  1940. /*
  1941. * At this point adjexp(a) == adjexp(b) and a->exp == b->exp,
  1942. * so a->digits == b->digits, therefore a->len == b->len.
  1943. */
  1944. for (i = a->len-1; i >= 0; --i) {
  1945. CMP_EQUAL_OR_RETURN(a->data[i], b->data[i])
  1946. }
  1947. return 0;
  1948. }
  1949. /* Compare two numerical values. */
  1950. static int
  1951. _mpd_cmp(const mpd_t *a, const mpd_t *b)
  1952. {
  1953. mpd_ssize_t adjexp_a, adjexp_b;
  1954. /* equal pointers */
  1955. if (a == b) {
  1956. return 0;
  1957. }
  1958. /* infinities */
  1959. if (mpd_isinfinite(a)) {
  1960. if (mpd_isinfinite(b)) {
  1961. return mpd_isnegative(b) - mpd_isnegative(a);
  1962. }
  1963. return mpd_arith_sign(a);
  1964. }
  1965. if (mpd_isinfinite(b)) {
  1966. return -mpd_arith_sign(b);
  1967. }
  1968. /* zeros */
  1969. if (mpd_iszerocoeff(a)) {
  1970. if (mpd_iszerocoeff(b)) {
  1971. return 0;
  1972. }
  1973. return -mpd_arith_sign(b);
  1974. }
  1975. if (mpd_iszerocoeff(b)) {
  1976. return mpd_arith_sign(a);
  1977. }
  1978. /* different signs */
  1979. if (mpd_sign(a) != mpd_sign(b)) {
  1980. return mpd_sign(b) - mpd_sign(a);
  1981. }
  1982. /* different adjusted exponents */
  1983. adjexp_a = mpd_adjexp(a);
  1984. adjexp_b = mpd_adjexp(b);
  1985. if (adjexp_a != adjexp_b) {
  1986. if (adjexp_a < adjexp_b) {
  1987. return -1 * mpd_arith_sign(a);
  1988. }
  1989. return mpd_arith_sign(a);
  1990. }
  1991. /* same adjusted exponents */
  1992. return _mpd_cmp_same_adjexp(a, b) * mpd_arith_sign(a);
  1993. }
  1994. /* Compare the absolutes of two numerical values. */
  1995. static int
  1996. _mpd_cmp_abs(const mpd_t *a, const mpd_t *b)
  1997. {
  1998. mpd_ssize_t adjexp_a, adjexp_b;
  1999. /* equal pointers */
  2000. if (a == b) {
  2001. return 0;
  2002. }
  2003. /* infinities */
  2004. if (mpd_isinfinite(a)) {
  2005. if (mpd_isinfinite(b)) {
  2006. return 0;
  2007. }
  2008. return 1;
  2009. }
  2010. if (mpd_isinfinite(b)) {
  2011. return -1;
  2012. }
  2013. /* zeros */
  2014. if (mpd_iszerocoeff(a)) {
  2015. if (mpd_iszerocoeff(b)) {
  2016. return 0;
  2017. }
  2018. return -1;
  2019. }
  2020. if (mpd_iszerocoeff(b)) {
  2021. return 1;
  2022. }
  2023. /* different adjusted exponents */
  2024. adjexp_a = mpd_adjexp(a);
  2025. adjexp_b = mpd_adjexp(b);
  2026. if (adjexp_a != adjexp_b) {
  2027. if (adjexp_a < adjexp_b) {
  2028. return -1;
  2029. }
  2030. return 1;
  2031. }
  2032. /* same adjusted exponents */
  2033. return _mpd_cmp_same_adjexp(a, b);
  2034. }
  2035. /* Compare two values and return an integer result. */
  2036. int
  2037. mpd_qcmp(const mpd_t *a, const mpd_t *b, uint32_t *status)
  2038. {
  2039. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  2040. if (mpd_isnan(a) || mpd_isnan(b)) {
  2041. *status |= MPD_Invalid_operation;
  2042. return INT_MAX;
  2043. }
  2044. }
  2045. return _mpd_cmp(a, b);
  2046. }
  2047. /*
  2048. * Compare a and b, convert the usual integer result to a decimal and
  2049. * store it in 'result'. For convenience, the integer result of the comparison
  2050. * is returned. Comparisons involving NaNs return NaN/INT_MAX.
  2051. */
  2052. int
  2053. mpd_qcompare(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2054. const mpd_context_t *ctx, uint32_t *status)
  2055. {
  2056. int c;
  2057. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  2058. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  2059. return INT_MAX;
  2060. }
  2061. }
  2062. c = _mpd_cmp(a, b);
  2063. _settriple(result, (c < 0), (c != 0), 0);
  2064. return c;
  2065. }
  2066. /* Same as mpd_compare(), but signal for all NaNs, i.e. also for quiet NaNs. */
  2067. int
  2068. mpd_qcompare_signal(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2069. const mpd_context_t *ctx, uint32_t *status)
  2070. {
  2071. int c;
  2072. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  2073. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  2074. *status |= MPD_Invalid_operation;
  2075. return INT_MAX;
  2076. }
  2077. }
  2078. c = _mpd_cmp(a, b);
  2079. _settriple(result, (c < 0), (c != 0), 0);
  2080. return c;
  2081. }
  2082. /* Compare the operands using a total order. */
  2083. int
  2084. mpd_cmp_total(const mpd_t *a, const mpd_t *b)
  2085. {
  2086. mpd_t aa, bb;
  2087. int nan_a, nan_b;
  2088. int c;
  2089. if (mpd_sign(a) != mpd_sign(b)) {
  2090. return mpd_sign(b) - mpd_sign(a);
  2091. }
  2092. if (mpd_isnan(a)) {
  2093. c = 1;
  2094. if (mpd_isnan(b)) {
  2095. nan_a = (mpd_isqnan(a)) ? 1 : 0;
  2096. nan_b = (mpd_isqnan(b)) ? 1 : 0;
  2097. if (nan_b == nan_a) {
  2098. if (a->len > 0 && b->len > 0) {
  2099. _mpd_copy_shared(&aa, a);
  2100. _mpd_copy_shared(&bb, b);
  2101. aa.exp = bb.exp = 0;
  2102. /* compare payload */
  2103. c = _mpd_cmp_abs(&aa, &bb);
  2104. }
  2105. else {
  2106. c = (a->len > 0) - (b->len > 0);
  2107. }
  2108. }
  2109. else {
  2110. c = nan_a - nan_b;
  2111. }
  2112. }
  2113. }
  2114. else if (mpd_isnan(b)) {
  2115. c = -1;
  2116. }
  2117. else {
  2118. c = _mpd_cmp_abs(a, b);
  2119. if (c == 0 && a->exp != b->exp) {
  2120. c = (a->exp < b->exp) ? -1 : 1;
  2121. }
  2122. }
  2123. return c * mpd_arith_sign(a);
  2124. }
  2125. /*
  2126. * Compare a and b according to a total order, convert the usual integer result
  2127. * to a decimal and store it in 'result'. For convenience, the integer result
  2128. * of the comparison is returned.
  2129. */
  2130. int
  2131. mpd_compare_total(mpd_t *result, const mpd_t *a, const mpd_t *b)
  2132. {
  2133. int c;
  2134. c = mpd_cmp_total(a, b);
  2135. _settriple(result, (c < 0), (c != 0), 0);
  2136. return c;
  2137. }
  2138. /* Compare the magnitude of the operands using a total order. */
  2139. int
  2140. mpd_cmp_total_mag(const mpd_t *a, const mpd_t *b)
  2141. {
  2142. mpd_t aa, bb;
  2143. _mpd_copy_shared(&aa, a);
  2144. _mpd_copy_shared(&bb, b);
  2145. mpd_set_positive(&aa);
  2146. mpd_set_positive(&bb);
  2147. return mpd_cmp_total(&aa, &bb);
  2148. }
  2149. /*
  2150. * Compare the magnitude of a and b according to a total order, convert the
  2151. * the usual integer result to a decimal and store it in 'result'.
  2152. * For convenience, the integer result of the comparison is returned.
  2153. */
  2154. int
  2155. mpd_compare_total_mag(mpd_t *result, const mpd_t *a, const mpd_t *b)
  2156. {
  2157. int c;
  2158. c = mpd_cmp_total_mag(a, b);
  2159. _settriple(result, (c < 0), (c != 0), 0);
  2160. return c;
  2161. }
  2162. /* Determine an ordering for operands that are numerically equal. */
  2163. static inline int
  2164. _mpd_cmp_numequal(const mpd_t *a, const mpd_t *b)
  2165. {
  2166. int sign_a, sign_b;
  2167. int c;
  2168. sign_a = mpd_sign(a);
  2169. sign_b = mpd_sign(b);
  2170. if (sign_a != sign_b) {
  2171. c = sign_b - sign_a;
  2172. }
  2173. else {
  2174. c = (a->exp < b->exp) ? -1 : 1;
  2175. c *= mpd_arith_sign(a);
  2176. }
  2177. return c;
  2178. }
  2179. /******************************************************************************/
  2180. /* Shifting the coefficient */
  2181. /******************************************************************************/
  2182. /*
  2183. * Shift the coefficient of the operand to the left, no check for specials.
  2184. * Both operands may be the same pointer. If the result length has to be
  2185. * increased, mpd_qresize() might fail with MPD_Malloc_error.
  2186. */
  2187. int
  2188. mpd_qshiftl(mpd_t *result, const mpd_t *a, mpd_ssize_t n, uint32_t *status)
  2189. {
  2190. mpd_ssize_t size;
  2191. assert(!mpd_isspecial(a));
  2192. assert(n >= 0);
  2193. if (mpd_iszerocoeff(a) || n == 0) {
  2194. return mpd_qcopy(result, a, status);
  2195. }
  2196. size = mpd_digits_to_size(a->digits+n);
  2197. if (!mpd_qresize(result, size, status)) {
  2198. return 0; /* result is NaN */
  2199. }
  2200. _mpd_baseshiftl(result->data, a->data, size, a->len, n);
  2201. mpd_copy_flags(result, a);
  2202. result->exp = a->exp;
  2203. result->digits = a->digits+n;
  2204. result->len = size;
  2205. return 1;
  2206. }
  2207. /* Determine the rounding indicator if all digits of the coefficient are shifted
  2208. * out of the picture. */
  2209. static mpd_uint_t
  2210. _mpd_get_rnd(const mpd_uint_t *data, mpd_ssize_t len, int use_msd)
  2211. {
  2212. mpd_uint_t rnd = 0, rest = 0, word;
  2213. word = data[len-1];
  2214. /* special treatment for the most significant digit if shift == digits */
  2215. if (use_msd) {
  2216. _mpd_divmod_pow10(&rnd, &rest, word, mpd_word_digits(word)-1);
  2217. if (len > 1 && rest == 0) {
  2218. rest = !_mpd_isallzero(data, len-1);
  2219. }
  2220. }
  2221. else {
  2222. rest = !_mpd_isallzero(data, len);
  2223. }
  2224. return (rnd == 0 || rnd == 5) ? rnd + !!rest : rnd;
  2225. }
  2226. /*
  2227. * Same as mpd_qshiftr(), but 'result' is an mpd_t with a static coefficient.
  2228. * It is the caller's responsibility to ensure that the coefficient is big
  2229. * enough. The function cannot fail.
  2230. */
  2231. static mpd_uint_t
  2232. mpd_qsshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n)
  2233. {
  2234. mpd_uint_t rnd;
  2235. mpd_ssize_t size;
  2236. assert(!mpd_isspecial(a));
  2237. assert(n >= 0);
  2238. if (mpd_iszerocoeff(a) || n == 0) {
  2239. mpd_qcopy_static(result, a);
  2240. return 0;
  2241. }
  2242. if (n >= a->digits) {
  2243. rnd = _mpd_get_rnd(a->data, a->len, (n==a->digits));
  2244. mpd_zerocoeff(result);
  2245. }
  2246. else {
  2247. result->digits = a->digits-n;
  2248. size = mpd_digits_to_size(result->digits);
  2249. rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
  2250. result->len = size;
  2251. }
  2252. mpd_copy_flags(result, a);
  2253. result->exp = a->exp;
  2254. return rnd;
  2255. }
  2256. /*
  2257. * Inplace shift of the coefficient to the right, no check for specials.
  2258. * Returns the rounding indicator for mpd_rnd_incr().
  2259. * The function cannot fail.
  2260. */
  2261. mpd_uint_t
  2262. mpd_qshiftr_inplace(mpd_t *result, mpd_ssize_t n)
  2263. {
  2264. uint32_t dummy;
  2265. mpd_uint_t rnd;
  2266. mpd_ssize_t size;
  2267. assert(!mpd_isspecial(result));
  2268. assert(n >= 0);
  2269. if (mpd_iszerocoeff(result) || n == 0) {
  2270. return 0;
  2271. }
  2272. if (n >= result->digits) {
  2273. rnd = _mpd_get_rnd(result->data, result->len, (n==result->digits));
  2274. mpd_zerocoeff(result);
  2275. }
  2276. else {
  2277. rnd = _mpd_baseshiftr(result->data, result->data, result->len, n);
  2278. result->digits -= n;
  2279. size = mpd_digits_to_size(result->digits);
  2280. /* reducing the size cannot fail */
  2281. mpd_qresize(result, size, &dummy);
  2282. result->len = size;
  2283. }
  2284. return rnd;
  2285. }
  2286. /*
  2287. * Shift the coefficient of the operand to the right, no check for specials.
  2288. * Both operands may be the same pointer. Returns the rounding indicator to
  2289. * be used by mpd_rnd_incr(). If the result length has to be increased,
  2290. * mpd_qcopy() or mpd_qresize() might fail with MPD_Malloc_error. In those
  2291. * cases, MPD_UINT_MAX is returned.
  2292. */
  2293. mpd_uint_t
  2294. mpd_qshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n, uint32_t *status)
  2295. {
  2296. mpd_uint_t rnd;
  2297. mpd_ssize_t size;
  2298. assert(!mpd_isspecial(a));
  2299. assert(n >= 0);
  2300. if (mpd_iszerocoeff(a) || n == 0) {
  2301. if (!mpd_qcopy(result, a, status)) {
  2302. return MPD_UINT_MAX;
  2303. }
  2304. return 0;
  2305. }
  2306. if (n >= a->digits) {
  2307. rnd = _mpd_get_rnd(a->data, a->len, (n==a->digits));
  2308. mpd_zerocoeff(result);
  2309. }
  2310. else {
  2311. result->digits = a->digits-n;
  2312. size = mpd_digits_to_size(result->digits);
  2313. if (result == a) {
  2314. rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
  2315. /* reducing the size cannot fail */
  2316. mpd_qresize(result, size, status);
  2317. }
  2318. else {
  2319. if (!mpd_qresize(result, size, status)) {
  2320. return MPD_UINT_MAX;
  2321. }
  2322. rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
  2323. }
  2324. result->len = size;
  2325. }
  2326. mpd_copy_flags(result, a);
  2327. result->exp = a->exp;
  2328. return rnd;
  2329. }
  2330. /******************************************************************************/
  2331. /* Miscellaneous operations */
  2332. /******************************************************************************/
  2333. /* Logical And */
  2334. void
  2335. mpd_qand(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2336. const mpd_context_t *ctx, uint32_t *status)
  2337. {
  2338. const mpd_t *big = a, *small = b;
  2339. mpd_uint_t x, y, z, xbit, ybit;
  2340. int k, mswdigits;
  2341. mpd_ssize_t i;
  2342. if (mpd_isspecial(a) || mpd_isspecial(b) ||
  2343. mpd_isnegative(a) || mpd_isnegative(b) ||
  2344. a->exp != 0 || b->exp != 0) {
  2345. mpd_seterror(result, MPD_Invalid_operation, status);
  2346. return;
  2347. }
  2348. if (b->digits > a->digits) {
  2349. big = b;
  2350. small = a;
  2351. }
  2352. if (!mpd_qresize(result, big->len, status)) {
  2353. return;
  2354. }
  2355. /* full words */
  2356. for (i = 0; i < small->len-1; i++) {
  2357. x = small->data[i];
  2358. y = big->data[i];
  2359. z = 0;
  2360. for (k = 0; k < MPD_RDIGITS; k++) {
  2361. xbit = x % 10;
  2362. x /= 10;
  2363. ybit = y % 10;
  2364. y /= 10;
  2365. if (xbit > 1 || ybit > 1) {
  2366. goto invalid_operation;
  2367. }
  2368. z += (xbit&ybit) ? mpd_pow10[k] : 0;
  2369. }
  2370. result->data[i] = z;
  2371. }
  2372. /* most significant word of small */
  2373. x = small->data[i];
  2374. y = big->data[i];
  2375. z = 0;
  2376. mswdigits = mpd_word_digits(x);
  2377. for (k = 0; k < mswdigits; k++) {
  2378. xbit = x % 10;
  2379. x /= 10;
  2380. ybit = y % 10;
  2381. y /= 10;
  2382. if (xbit > 1 || ybit > 1) {
  2383. goto invalid_operation;
  2384. }
  2385. z += (xbit&ybit) ? mpd_pow10[k] : 0;
  2386. }
  2387. result->data[i++] = z;
  2388. /* scan the rest of y for digits > 1 */
  2389. for (; k < MPD_RDIGITS; k++) {
  2390. ybit = y % 10;
  2391. y /= 10;
  2392. if (ybit > 1) {
  2393. goto invalid_operation;
  2394. }
  2395. }
  2396. /* scan the rest of big for digits > 1 */
  2397. for (; i < big->len; i++) {
  2398. y = big->data[i];
  2399. for (k = 0; k < MPD_RDIGITS; k++) {
  2400. ybit = y % 10;
  2401. y /= 10;
  2402. if (ybit > 1) {
  2403. goto invalid_operation;
  2404. }
  2405. }
  2406. }
  2407. mpd_clear_flags(result);
  2408. result->exp = 0;
  2409. result->len = _mpd_real_size(result->data, small->len);
  2410. mpd_qresize(result, result->len, status);
  2411. mpd_setdigits(result);
  2412. _mpd_cap(result, ctx);
  2413. return;
  2414. invalid_operation:
  2415. mpd_seterror(result, MPD_Invalid_operation, status);
  2416. }
  2417. /* Class of an operand. Returns a pointer to the constant name. */
  2418. const char *
  2419. mpd_class(const mpd_t *a, const mpd_context_t *ctx)
  2420. {
  2421. if (mpd_isnan(a)) {
  2422. if (mpd_isqnan(a))
  2423. return "NaN";
  2424. else
  2425. return "sNaN";
  2426. }
  2427. else if (mpd_ispositive(a)) {
  2428. if (mpd_isinfinite(a))
  2429. return "+Infinity";
  2430. else if (mpd_iszero(a))
  2431. return "+Zero";
  2432. else if (mpd_isnormal(a, ctx))
  2433. return "+Normal";
  2434. else
  2435. return "+Subnormal";
  2436. }
  2437. else {
  2438. if (mpd_isinfinite(a))
  2439. return "-Infinity";
  2440. else if (mpd_iszero(a))
  2441. return "-Zero";
  2442. else if (mpd_isnormal(a, ctx))
  2443. return "-Normal";
  2444. else
  2445. return "-Subnormal";
  2446. }
  2447. }
  2448. /* Logical Xor */
  2449. void
  2450. mpd_qinvert(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  2451. uint32_t *status)
  2452. {
  2453. mpd_uint_t x, z, xbit;
  2454. mpd_ssize_t i, digits, len;
  2455. mpd_ssize_t q, r;
  2456. int k;
  2457. if (mpd_isspecial(a) || mpd_isnegative(a) || a->exp != 0) {
  2458. mpd_seterror(result, MPD_Invalid_operation, status);
  2459. return;
  2460. }
  2461. digits = (a->digits < ctx->prec) ? ctx->prec : a->digits;
  2462. _mpd_idiv_word(&q, &r, digits, MPD_RDIGITS);
  2463. len = (r == 0) ? q : q+1;
  2464. if (!mpd_qresize(result, len, status)) {
  2465. return;
  2466. }
  2467. for (i = 0; i < len; i++) {
  2468. x = (i < a->len) ? a->data[i] : 0;
  2469. z = 0;
  2470. for (k = 0; k < MPD_RDIGITS; k++) {
  2471. xbit = x % 10;
  2472. x /= 10;
  2473. if (xbit > 1) {
  2474. goto invalid_operation;
  2475. }
  2476. z += !xbit ? mpd_pow10[k] : 0;
  2477. }
  2478. result->data[i] = z;
  2479. }
  2480. mpd_clear_flags(result);
  2481. result->exp = 0;
  2482. result->len = _mpd_real_size(result->data, len);
  2483. mpd_qresize(result, result->len, status);
  2484. mpd_setdigits(result);
  2485. _mpd_cap(result, ctx);
  2486. return;
  2487. invalid_operation:
  2488. mpd_seterror(result, MPD_Invalid_operation, status);
  2489. }
  2490. /* Exponent of the magnitude of the most significant digit of the operand. */
  2491. void
  2492. mpd_qlogb(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  2493. uint32_t *status)
  2494. {
  2495. if (mpd_isspecial(a)) {
  2496. if (mpd_qcheck_nan(result, a, ctx, status)) {
  2497. return;
  2498. }
  2499. mpd_setspecial(result, MPD_POS, MPD_INF);
  2500. }
  2501. else if (mpd_iszerocoeff(a)) {
  2502. mpd_setspecial(result, MPD_NEG, MPD_INF);
  2503. *status |= MPD_Division_by_zero;
  2504. }
  2505. else {
  2506. mpd_qset_ssize(result, mpd_adjexp(a), ctx, status);
  2507. }
  2508. }
  2509. /* Logical Or */
  2510. void
  2511. mpd_qor(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2512. const mpd_context_t *ctx, uint32_t *status)
  2513. {
  2514. const mpd_t *big = a, *small = b;
  2515. mpd_uint_t x, y, z, xbit, ybit;
  2516. int k, mswdigits;
  2517. mpd_ssize_t i;
  2518. if (mpd_isspecial(a) || mpd_isspecial(b) ||
  2519. mpd_isnegative(a) || mpd_isnegative(b) ||
  2520. a->exp != 0 || b->exp != 0) {
  2521. mpd_seterror(result, MPD_Invalid_operation, status);
  2522. return;
  2523. }
  2524. if (b->digits > a->digits) {
  2525. big = b;
  2526. small = a;
  2527. }
  2528. if (!mpd_qresize(result, big->len, status)) {
  2529. return;
  2530. }
  2531. /* full words */
  2532. for (i = 0; i < small->len-1; i++) {
  2533. x = small->data[i];
  2534. y = big->data[i];
  2535. z = 0;
  2536. for (k = 0; k < MPD_RDIGITS; k++) {
  2537. xbit = x % 10;
  2538. x /= 10;
  2539. ybit = y % 10;
  2540. y /= 10;
  2541. if (xbit > 1 || ybit > 1) {
  2542. goto invalid_operation;
  2543. }
  2544. z += (xbit|ybit) ? mpd_pow10[k] : 0;
  2545. }
  2546. result->data[i] = z;
  2547. }
  2548. /* most significant word of small */
  2549. x = small->data[i];
  2550. y = big->data[i];
  2551. z = 0;
  2552. mswdigits = mpd_word_digits(x);
  2553. for (k = 0; k < mswdigits; k++) {
  2554. xbit = x % 10;
  2555. x /= 10;
  2556. ybit = y % 10;
  2557. y /= 10;
  2558. if (xbit > 1 || ybit > 1) {
  2559. goto invalid_operation;
  2560. }
  2561. z += (xbit|ybit) ? mpd_pow10[k] : 0;
  2562. }
  2563. /* scan for digits > 1 and copy the rest of y */
  2564. for (; k < MPD_RDIGITS; k++) {
  2565. ybit = y % 10;
  2566. y /= 10;
  2567. if (ybit > 1) {
  2568. goto invalid_operation;
  2569. }
  2570. z += ybit*mpd_pow10[k];
  2571. }
  2572. result->data[i++] = z;
  2573. /* scan for digits > 1 and copy the rest of big */
  2574. for (; i < big->len; i++) {
  2575. y = big->data[i];
  2576. for (k = 0; k < MPD_RDIGITS; k++) {
  2577. ybit = y % 10;
  2578. y /= 10;
  2579. if (ybit > 1) {
  2580. goto invalid_operation;
  2581. }
  2582. }
  2583. result->data[i] = big->data[i];
  2584. }
  2585. mpd_clear_flags(result);
  2586. result->exp = 0;
  2587. result->len = _mpd_real_size(result->data, big->len);
  2588. mpd_qresize(result, result->len, status);
  2589. mpd_setdigits(result);
  2590. _mpd_cap(result, ctx);
  2591. return;
  2592. invalid_operation:
  2593. mpd_seterror(result, MPD_Invalid_operation, status);
  2594. }
  2595. /*
  2596. * Rotate the coefficient of 'a' by 'b' digits. 'b' must be an integer with
  2597. * exponent 0.
  2598. */
  2599. void
  2600. mpd_qrotate(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2601. const mpd_context_t *ctx, uint32_t *status)
  2602. {
  2603. uint32_t workstatus = 0;
  2604. MPD_NEW_STATIC(tmp,0,0,0,0);
  2605. MPD_NEW_STATIC(big,0,0,0,0);
  2606. MPD_NEW_STATIC(small,0,0,0,0);
  2607. mpd_ssize_t n, lshift, rshift;
  2608. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  2609. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  2610. return;
  2611. }
  2612. }
  2613. if (b->exp != 0 || mpd_isinfinite(b)) {
  2614. mpd_seterror(result, MPD_Invalid_operation, status);
  2615. return;
  2616. }
  2617. n = mpd_qget_ssize(b, &workstatus);
  2618. if (workstatus&MPD_Invalid_operation) {
  2619. mpd_seterror(result, MPD_Invalid_operation, status);
  2620. return;
  2621. }
  2622. if (n > ctx->prec || n < -ctx->prec) {
  2623. mpd_seterror(result, MPD_Invalid_operation, status);
  2624. return;
  2625. }
  2626. if (mpd_isinfinite(a)) {
  2627. mpd_qcopy(result, a, status);
  2628. return;
  2629. }
  2630. if (n >= 0) {
  2631. lshift = n;
  2632. rshift = ctx->prec-n;
  2633. }
  2634. else {
  2635. lshift = ctx->prec+n;
  2636. rshift = -n;
  2637. }
  2638. if (a->digits > ctx->prec) {
  2639. if (!mpd_qcopy(&tmp, a, status)) {
  2640. mpd_seterror(result, MPD_Malloc_error, status);
  2641. goto finish;
  2642. }
  2643. _mpd_cap(&tmp, ctx);
  2644. a = &tmp;
  2645. }
  2646. if (!mpd_qshiftl(&big, a, lshift, status)) {
  2647. mpd_seterror(result, MPD_Malloc_error, status);
  2648. goto finish;
  2649. }
  2650. _mpd_cap(&big, ctx);
  2651. if (mpd_qshiftr(&small, a, rshift, status) == MPD_UINT_MAX) {
  2652. mpd_seterror(result, MPD_Malloc_error, status);
  2653. goto finish;
  2654. }
  2655. _mpd_qadd(result, &big, &small, ctx, status);
  2656. finish:
  2657. mpd_del(&tmp);
  2658. mpd_del(&big);
  2659. mpd_del(&small);
  2660. }
  2661. /*
  2662. * b must be an integer with exponent 0 and in the range +-2*(emax + prec).
  2663. * XXX: In my opinion +-(2*emax + prec) would be more sensible.
  2664. * The result is a with the value of b added to its exponent.
  2665. */
  2666. void
  2667. mpd_qscaleb(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2668. const mpd_context_t *ctx, uint32_t *status)
  2669. {
  2670. uint32_t workstatus = 0;
  2671. mpd_uint_t n, maxjump;
  2672. #ifndef LEGACY_COMPILER
  2673. int64_t exp;
  2674. #else
  2675. mpd_uint_t x;
  2676. int x_sign, n_sign;
  2677. mpd_ssize_t exp;
  2678. #endif
  2679. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  2680. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  2681. return;
  2682. }
  2683. }
  2684. if (b->exp != 0 || mpd_isinfinite(b)) {
  2685. mpd_seterror(result, MPD_Invalid_operation, status);
  2686. return;
  2687. }
  2688. n = mpd_qabs_uint(b, &workstatus);
  2689. /* the spec demands this */
  2690. maxjump = 2 * (mpd_uint_t)(ctx->emax + ctx->prec);
  2691. if (n > maxjump || workstatus&MPD_Invalid_operation) {
  2692. mpd_seterror(result, MPD_Invalid_operation, status);
  2693. return;
  2694. }
  2695. if (mpd_isinfinite(a)) {
  2696. mpd_qcopy(result, a, status);
  2697. return;
  2698. }
  2699. #ifndef LEGACY_COMPILER
  2700. exp = a->exp + (int64_t)n * mpd_arith_sign(b);
  2701. exp = (exp > MPD_EXP_INF) ? MPD_EXP_INF : exp;
  2702. exp = (exp < MPD_EXP_CLAMP) ? MPD_EXP_CLAMP : exp;
  2703. #else
  2704. x = (a->exp < 0) ? -a->exp : a->exp;
  2705. x_sign = (a->exp < 0) ? 1 : 0;
  2706. n_sign = mpd_isnegative(b) ? 1 : 0;
  2707. if (x_sign == n_sign) {
  2708. x = x + n;
  2709. if (x < n) x = MPD_UINT_MAX;
  2710. }
  2711. else {
  2712. x_sign = (x >= n) ? x_sign : n_sign;
  2713. x = (x >= n) ? x - n : n - x;
  2714. }
  2715. if (!x_sign && x > MPD_EXP_INF) x = MPD_EXP_INF;
  2716. if (x_sign && x > -MPD_EXP_CLAMP) x = -MPD_EXP_CLAMP;
  2717. exp = x_sign ? -((mpd_ssize_t)x) : (mpd_ssize_t)x;
  2718. #endif
  2719. mpd_qcopy(result, a, status);
  2720. result->exp = (mpd_ssize_t)exp;
  2721. mpd_qfinalize(result, ctx, status);
  2722. }
  2723. /*
  2724. * Shift the coefficient by n digits, positive n is a left shift. In the case
  2725. * of a left shift, the result is decapitated to fit the context precision. If
  2726. * you don't want that, use mpd_shiftl().
  2727. */
  2728. void
  2729. mpd_qshiftn(mpd_t *result, const mpd_t *a, mpd_ssize_t n, const mpd_context_t *ctx,
  2730. uint32_t *status)
  2731. {
  2732. if (mpd_isspecial(a)) {
  2733. if (mpd_qcheck_nan(result, a, ctx, status)) {
  2734. return;
  2735. }
  2736. mpd_qcopy(result, a, status);
  2737. return;
  2738. }
  2739. if (n >= 0 && n <= ctx->prec) {
  2740. mpd_qshiftl(result, a, n, status);
  2741. _mpd_cap(result, ctx);
  2742. }
  2743. else if (n < 0 && n >= -ctx->prec) {
  2744. if (!mpd_qcopy(result, a, status)) {
  2745. return;
  2746. }
  2747. _mpd_cap(result, ctx);
  2748. mpd_qshiftr_inplace(result, -n);
  2749. }
  2750. else {
  2751. mpd_seterror(result, MPD_Invalid_operation, status);
  2752. }
  2753. }
  2754. /*
  2755. * Same as mpd_shiftn(), but the shift is specified by the decimal b, which
  2756. * must be an integer with a zero exponent. Infinities remain infinities.
  2757. */
  2758. void
  2759. mpd_qshift(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx,
  2760. uint32_t *status)
  2761. {
  2762. uint32_t workstatus = 0;
  2763. mpd_ssize_t n;
  2764. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  2765. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  2766. return;
  2767. }
  2768. }
  2769. if (b->exp != 0 || mpd_isinfinite(b)) {
  2770. mpd_seterror(result, MPD_Invalid_operation, status);
  2771. return;
  2772. }
  2773. n = mpd_qget_ssize(b, &workstatus);
  2774. if (workstatus&MPD_Invalid_operation) {
  2775. mpd_seterror(result, MPD_Invalid_operation, status);
  2776. return;
  2777. }
  2778. if (n > ctx->prec || n < -ctx->prec) {
  2779. mpd_seterror(result, MPD_Invalid_operation, status);
  2780. return;
  2781. }
  2782. if (mpd_isinfinite(a)) {
  2783. mpd_qcopy(result, a, status);
  2784. return;
  2785. }
  2786. if (n >= 0) {
  2787. mpd_qshiftl(result, a, n, status);
  2788. _mpd_cap(result, ctx);
  2789. }
  2790. else {
  2791. if (!mpd_qcopy(result, a, status)) {
  2792. return;
  2793. }
  2794. _mpd_cap(result, ctx);
  2795. mpd_qshiftr_inplace(result, -n);
  2796. }
  2797. }
  2798. /* Logical Xor */
  2799. void
  2800. mpd_qxor(mpd_t *result, const mpd_t *a, const mpd_t *b,
  2801. const mpd_context_t *ctx, uint32_t *status)
  2802. {
  2803. const mpd_t *big = a, *small = b;
  2804. mpd_uint_t x, y, z, xbit, ybit;
  2805. int k, mswdigits;
  2806. mpd_ssize_t i;
  2807. if (mpd_isspecial(a) || mpd_isspecial(b) ||
  2808. mpd_isnegative(a) || mpd_isnegative(b) ||
  2809. a->exp != 0 || b->exp != 0) {
  2810. mpd_seterror(result, MPD_Invalid_operation, status);
  2811. return;
  2812. }
  2813. if (b->digits > a->digits) {
  2814. big = b;
  2815. small = a;
  2816. }
  2817. if (!mpd_qresize(result, big->len, status)) {
  2818. return;
  2819. }
  2820. /* full words */
  2821. for (i = 0; i < small->len-1; i++) {
  2822. x = small->data[i];
  2823. y = big->data[i];
  2824. z = 0;
  2825. for (k = 0; k < MPD_RDIGITS; k++) {
  2826. xbit = x % 10;
  2827. x /= 10;
  2828. ybit = y % 10;
  2829. y /= 10;
  2830. if (xbit > 1 || ybit > 1) {
  2831. goto invalid_operation;
  2832. }
  2833. z += (xbit^ybit) ? mpd_pow10[k] : 0;
  2834. }
  2835. result->data[i] = z;
  2836. }
  2837. /* most significant word of small */
  2838. x = small->data[i];
  2839. y = big->data[i];
  2840. z = 0;
  2841. mswdigits = mpd_word_digits(x);
  2842. for (k = 0; k < mswdigits; k++) {
  2843. xbit = x % 10;
  2844. x /= 10;
  2845. ybit = y % 10;
  2846. y /= 10;
  2847. if (xbit > 1 || ybit > 1) {
  2848. goto invalid_operation;
  2849. }
  2850. z += (xbit^ybit) ? mpd_pow10[k] : 0;
  2851. }
  2852. /* scan for digits > 1 and copy the rest of y */
  2853. for (; k < MPD_RDIGITS; k++) {
  2854. ybit = y % 10;
  2855. y /= 10;
  2856. if (ybit > 1) {
  2857. goto invalid_operation;
  2858. }
  2859. z += ybit*mpd_pow10[k];
  2860. }
  2861. result->data[i++] = z;
  2862. /* scan for digits > 1 and copy the rest of big */
  2863. for (; i < big->len; i++) {
  2864. y = big->data[i];
  2865. for (k = 0; k < MPD_RDIGITS; k++) {
  2866. ybit = y % 10;
  2867. y /= 10;
  2868. if (ybit > 1) {
  2869. goto invalid_operation;
  2870. }
  2871. }
  2872. result->data[i] = big->data[i];
  2873. }
  2874. mpd_clear_flags(result);
  2875. result->exp = 0;
  2876. result->len = _mpd_real_size(result->data, big->len);
  2877. mpd_qresize(result, result->len, status);
  2878. mpd_setdigits(result);
  2879. _mpd_cap(result, ctx);
  2880. return;
  2881. invalid_operation:
  2882. mpd_seterror(result, MPD_Invalid_operation, status);
  2883. }
  2884. /******************************************************************************/
  2885. /* Arithmetic operations */
  2886. /******************************************************************************/
  2887. /*
  2888. * The absolute value of a. If a is negative, the result is the same
  2889. * as the result of the minus operation. Otherwise, the result is the
  2890. * result of the plus operation.
  2891. */
  2892. void
  2893. mpd_qabs(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  2894. uint32_t *status)
  2895. {
  2896. if (mpd_isspecial(a)) {
  2897. if (mpd_qcheck_nan(result, a, ctx, status)) {
  2898. return;
  2899. }
  2900. }
  2901. if (mpd_isnegative(a)) {
  2902. mpd_qminus(result, a, ctx, status);
  2903. }
  2904. else {
  2905. mpd_qplus(result, a, ctx, status);
  2906. }
  2907. }
  2908. static inline void
  2909. _mpd_ptrswap(const mpd_t **a, const mpd_t **b)
  2910. {
  2911. const mpd_t *t = *a;
  2912. *a = *b;
  2913. *b = t;
  2914. }
  2915. /* Add or subtract infinities. */
  2916. static void
  2917. _mpd_qaddsub_inf(mpd_t *result, const mpd_t *a, const mpd_t *b, uint8_t sign_b,
  2918. uint32_t *status)
  2919. {
  2920. if (mpd_isinfinite(a)) {
  2921. if (mpd_sign(a) != sign_b && mpd_isinfinite(b)) {
  2922. mpd_seterror(result, MPD_Invalid_operation, status);
  2923. }
  2924. else {
  2925. mpd_setspecial(result, mpd_sign(a), MPD_INF);
  2926. }
  2927. return;
  2928. }
  2929. assert(mpd_isinfinite(b));
  2930. mpd_setspecial(result, sign_b, MPD_INF);
  2931. }
  2932. /* Add or subtract non-special numbers. */
  2933. static void
  2934. _mpd_qaddsub(mpd_t *result, const mpd_t *a, const mpd_t *b, uint8_t sign_b,
  2935. const mpd_context_t *ctx, uint32_t *status)
  2936. {
  2937. const mpd_t *big, *small;
  2938. MPD_NEW_STATIC(big_aligned,0,0,0,0);
  2939. MPD_NEW_CONST(tiny,0,0,1,1,1,1);
  2940. mpd_uint_t carry;
  2941. mpd_ssize_t newsize, shift;
  2942. mpd_ssize_t exp, i;
  2943. int swap = 0;
  2944. /* compare exponents */
  2945. big = a; small = b;
  2946. if (big->exp != small->exp) {
  2947. if (small->exp > big->exp) {
  2948. _mpd_ptrswap(&big, &small);
  2949. swap++;
  2950. }
  2951. /* align the coefficients */
  2952. if (!mpd_iszerocoeff(big)) {
  2953. exp = big->exp - 1;
  2954. exp += (big->digits > ctx->prec) ? 0 : big->digits-ctx->prec-1;
  2955. if (mpd_adjexp(small) < exp) {
  2956. /*
  2957. * Avoid huge shifts by substituting a value for small that is
  2958. * guaranteed to produce the same results.
  2959. *
  2960. * adjexp(small) < exp if and only if:
  2961. *
  2962. * bdigits <= prec AND
  2963. * bdigits+shift >= prec+2+sdigits AND
  2964. * exp = bexp+bdigits-prec-2
  2965. *
  2966. * 1234567000000000 -> bdigits + shift
  2967. * ----------XX1234 -> sdigits
  2968. * ----------X1 -> tiny-digits
  2969. * |- prec -|
  2970. *
  2971. * OR
  2972. *
  2973. * bdigits > prec AND
  2974. * shift > sdigits AND
  2975. * exp = bexp-1
  2976. *
  2977. * 1234567892100000 -> bdigits + shift
  2978. * ----------XX1234 -> sdigits
  2979. * ----------X1 -> tiny-digits
  2980. * |- prec -|
  2981. *
  2982. * If tiny is zero, adding or subtracting is a no-op.
  2983. * Otherwise, adding tiny generates a non-zero digit either
  2984. * below the rounding digit or the least significant digit
  2985. * of big. When subtracting, tiny is in the same position as
  2986. * the carry that would be generated by subtracting sdigits.
  2987. */
  2988. mpd_copy_flags(&tiny, small);
  2989. tiny.exp = exp;
  2990. tiny.digits = 1;
  2991. tiny.len = 1;
  2992. tiny.data[0] = mpd_iszerocoeff(small) ? 0 : 1;
  2993. small = &tiny;
  2994. }
  2995. /* This cannot wrap: the difference is positive and <= maxprec */
  2996. shift = big->exp - small->exp;
  2997. if (!mpd_qshiftl(&big_aligned, big, shift, status)) {
  2998. mpd_seterror(result, MPD_Malloc_error, status);
  2999. goto finish;
  3000. }
  3001. big = &big_aligned;
  3002. }
  3003. }
  3004. result->exp = small->exp;
  3005. /* compare length of coefficients */
  3006. if (big->len < small->len) {
  3007. _mpd_ptrswap(&big, &small);
  3008. swap++;
  3009. }
  3010. newsize = big->len;
  3011. if (!mpd_qresize(result, newsize, status)) {
  3012. goto finish;
  3013. }
  3014. if (mpd_sign(a) == sign_b) {
  3015. carry = _mpd_baseadd(result->data, big->data, small->data,
  3016. big->len, small->len);
  3017. if (carry) {
  3018. newsize = big->len + 1;
  3019. if (!mpd_qresize(result, newsize, status)) {
  3020. goto finish;
  3021. }
  3022. result->data[newsize-1] = carry;
  3023. }
  3024. result->len = newsize;
  3025. mpd_set_flags(result, sign_b);
  3026. }
  3027. else {
  3028. if (big->len == small->len) {
  3029. for (i=big->len-1; i >= 0; --i) {
  3030. if (big->data[i] != small->data[i]) {
  3031. if (big->data[i] < small->data[i]) {
  3032. _mpd_ptrswap(&big, &small);
  3033. swap++;
  3034. }
  3035. break;
  3036. }
  3037. }
  3038. }
  3039. _mpd_basesub(result->data, big->data, small->data,
  3040. big->len, small->len);
  3041. newsize = _mpd_real_size(result->data, big->len);
  3042. /* resize to smaller cannot fail */
  3043. (void)mpd_qresize(result, newsize, status);
  3044. result->len = newsize;
  3045. sign_b = (swap & 1) ? sign_b : mpd_sign(a);
  3046. mpd_set_flags(result, sign_b);
  3047. if (mpd_iszerocoeff(result)) {
  3048. mpd_set_positive(result);
  3049. if (ctx->round == MPD_ROUND_FLOOR) {
  3050. mpd_set_negative(result);
  3051. }
  3052. }
  3053. }
  3054. mpd_setdigits(result);
  3055. finish:
  3056. mpd_del(&big_aligned);
  3057. }
  3058. /* Add a and b. No specials, no finalizing. */
  3059. static void
  3060. _mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
  3061. const mpd_context_t *ctx, uint32_t *status)
  3062. {
  3063. _mpd_qaddsub(result, a, b, mpd_sign(b), ctx, status);
  3064. }
  3065. /* Subtract b from a. No specials, no finalizing. */
  3066. static void
  3067. _mpd_qsub(mpd_t *result, const mpd_t *a, const mpd_t *b,
  3068. const mpd_context_t *ctx, uint32_t *status)
  3069. {
  3070. _mpd_qaddsub(result, a, b, !mpd_sign(b), ctx, status);
  3071. }
  3072. /* Add a and b. */
  3073. void
  3074. mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
  3075. const mpd_context_t *ctx, uint32_t *status)
  3076. {
  3077. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  3078. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  3079. return;
  3080. }
  3081. _mpd_qaddsub_inf(result, a, b, mpd_sign(b), status);
  3082. return;
  3083. }
  3084. _mpd_qaddsub(result, a, b, mpd_sign(b), ctx, status);
  3085. mpd_qfinalize(result, ctx, status);
  3086. }
  3087. /* Add a and b. Set NaN/Invalid_operation if the result is inexact. */
  3088. static void
  3089. _mpd_qadd_exact(mpd_t *result, const mpd_t *a, const mpd_t *b,
  3090. const mpd_context_t *ctx, uint32_t *status)
  3091. {
  3092. uint32_t workstatus = 0;
  3093. mpd_qadd(result, a, b, ctx, &workstatus);
  3094. *status |= workstatus;
  3095. if (workstatus & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) {
  3096. mpd_seterror(result, MPD_Invalid_operation, status);
  3097. }
  3098. }
  3099. /* Subtract b from a. */
  3100. void
  3101. mpd_qsub(mpd_t *result, const mpd_t *a, const mpd_t *b,
  3102. const mpd_context_t *ctx, uint32_t *status)
  3103. {
  3104. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  3105. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  3106. return;
  3107. }
  3108. _mpd_qaddsub_inf(result, a, b, !mpd_sign(b), status);
  3109. return;
  3110. }
  3111. _mpd_qaddsub(result, a, b, !mpd_sign(b), ctx, status);
  3112. mpd_qfinalize(result, ctx, status);
  3113. }
  3114. /* Subtract b from a. Set NaN/Invalid_operation if the result is inexact. */
  3115. static void
  3116. _mpd_qsub_exact(mpd_t *result, const mpd_t *a, const mpd_t *b,
  3117. const mpd_context_t *ctx, uint32_t *status)
  3118. {
  3119. uint32_t workstatus = 0;
  3120. mpd_qsub(result, a, b, ctx, &workstatus);
  3121. *status |= workstatus;
  3122. if (workstatus & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) {
  3123. mpd_seterror(result, MPD_Invalid_operation, status);
  3124. }
  3125. }
  3126. /* Add decimal and mpd_ssize_t. */
  3127. void
  3128. mpd_qadd_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
  3129. const mpd_context_t *ctx, uint32_t *status)
  3130. {
  3131. mpd_context_t maxcontext;
  3132. MPD_NEW_STATIC(bb,0,0,0,0);
  3133. mpd_maxcontext(&maxcontext);
  3134. mpd_qsset_ssize(&bb, b, &maxcontext, status);
  3135. mpd_qadd(result, a, &bb, ctx, status);
  3136. mpd_del(&bb);
  3137. }
  3138. /* Add decimal and mpd_uint_t. */
  3139. void
  3140. mpd_qadd_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
  3141. const mpd_context_t *ctx, uint32_t *status)
  3142. {
  3143. mpd_context_t maxcontext;
  3144. MPD_NEW_STATIC(bb,0,0,0,0);
  3145. mpd_maxcontext(&maxcontext);
  3146. mpd_qsset_uint(&bb, b, &maxcontext, status);
  3147. mpd_qadd(result, a, &bb, ctx, status);
  3148. mpd_del(&bb);
  3149. }
  3150. /* Subtract mpd_ssize_t from decimal. */
  3151. void
  3152. mpd_qsub_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
  3153. const mpd_context_t *ctx, uint32_t *status)
  3154. {
  3155. mpd_context_t maxcontext;
  3156. MPD_NEW_STATIC(bb,0,0,0,0);
  3157. mpd_maxcontext(&maxcontext);
  3158. mpd_qsset_ssize(&bb, b, &maxcontext, status);
  3159. mpd_qsub(result, a, &bb, ctx, status);
  3160. mpd_del(&bb);
  3161. }
  3162. /* Subtract mpd_uint_t from decimal. */
  3163. void
  3164. mpd_qsub_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
  3165. const mpd_context_t *ctx, uint32_t *status)
  3166. {
  3167. mpd_context_t maxcontext;
  3168. MPD_NEW_STATIC(bb,0,0,0,0);
  3169. mpd_maxcontext(&maxcontext);
  3170. mpd_qsset_uint(&bb, b, &maxcontext, status);
  3171. mpd_qsub(result, a, &bb, ctx, status);
  3172. mpd_del(&bb);
  3173. }
  3174. /* Add decimal and int32_t. */
  3175. void
  3176. mpd_qadd_i32(mpd_t *result, const mpd_t *a, int32_t b,
  3177. const mpd_context_t *ctx, uint32_t *status)
  3178. {
  3179. mpd_qadd_ssize(result, a, b, ctx, status);
  3180. }
  3181. /* Add decimal and uint32_t. */
  3182. void
  3183. mpd_qadd_u32(mpd_t *result, const mpd_t *a, uint32_t b,
  3184. const mpd_context_t *ctx, uint32_t *status)
  3185. {
  3186. mpd_qadd_uint(result, a, b, ctx, status);
  3187. }
  3188. #ifdef CONFIG_64
  3189. /* Add decimal and int64_t. */
  3190. void
  3191. mpd_qadd_i64(mpd_t *result, const mpd_t *a, int64_t b,
  3192. const mpd_context_t *ctx, uint32_t *status)
  3193. {
  3194. mpd_qadd_ssize(result, a, b, ctx, status);
  3195. }
  3196. /* Add decimal and uint64_t. */
  3197. void
  3198. mpd_qadd_u64(mpd_t *result, const mpd_t *a, uint64_t b,
  3199. const mpd_context_t *ctx, uint32_t *status)
  3200. {
  3201. mpd_qadd_uint(result, a, b, ctx, status);
  3202. }
  3203. #elif !defined(LEGACY_COMPILER)
  3204. /* Add decimal and int64_t. */
  3205. void
  3206. mpd_qadd_i64(mpd_t *result, const mpd_t *a, int64_t b,
  3207. const mpd_context_t *ctx, uint32_t *status)
  3208. {
  3209. mpd_context_t maxcontext;
  3210. MPD_NEW_STATIC(bb,0,0,0,0);
  3211. mpd_maxcontext(&maxcontext);
  3212. mpd_qset_i64(&bb, b, &maxcontext, status);
  3213. mpd_qadd(result, a, &bb, ctx, status);
  3214. mpd_del(&bb);
  3215. }
  3216. /* Add decimal and uint64_t. */
  3217. void
  3218. mpd_qadd_u64(mpd_t *result, const mpd_t *a, uint64_t b,
  3219. const mpd_context_t *ctx, uint32_t *status)
  3220. {
  3221. mpd_context_t maxcontext;
  3222. MPD_NEW_STATIC(bb,0,0,0,0);
  3223. mpd_maxcontext(&maxcontext);
  3224. mpd_qset_u64(&bb, b, &maxcontext, status);
  3225. mpd_qadd(result, a, &bb, ctx, status);
  3226. mpd_del(&bb);
  3227. }
  3228. #endif
  3229. /* Subtract int32_t from decimal. */
  3230. void
  3231. mpd_qsub_i32(mpd_t *result, const mpd_t *a, int32_t b,
  3232. const mpd_context_t *ctx, uint32_t *status)
  3233. {
  3234. mpd_qsub_ssize(result, a, b, ctx, status);
  3235. }
  3236. /* Subtract uint32_t from decimal. */
  3237. void
  3238. mpd_qsub_u32(mpd_t *result, const mpd_t *a, uint32_t b,
  3239. const mpd_context_t *ctx, uint32_t *status)
  3240. {
  3241. mpd_qsub_uint(result, a, b, ctx, status);
  3242. }
  3243. #ifdef CONFIG_64
  3244. /* Subtract int64_t from decimal. */
  3245. void
  3246. mpd_qsub_i64(mpd_t *result, const mpd_t *a, int64_t b,
  3247. const mpd_context_t *ctx, uint32_t *status)
  3248. {
  3249. mpd_qsub_ssize(result, a, b, ctx, status);
  3250. }
  3251. /* Subtract uint64_t from decimal. */
  3252. void
  3253. mpd_qsub_u64(mpd_t *result, const mpd_t *a, uint64_t b,
  3254. const mpd_context_t *ctx, uint32_t *status)
  3255. {
  3256. mpd_qsub_uint(result, a, b, ctx, status);
  3257. }
  3258. #elif !defined(LEGACY_COMPILER)
  3259. /* Subtract int64_t from decimal. */
  3260. void
  3261. mpd_qsub_i64(mpd_t *result, const mpd_t *a, int64_t b,
  3262. const mpd_context_t *ctx, uint32_t *status)
  3263. {
  3264. mpd_context_t maxcontext;
  3265. MPD_NEW_STATIC(bb,0,0,0,0);
  3266. mpd_maxcontext(&maxcontext);
  3267. mpd_qset_i64(&bb, b, &maxcontext, status);
  3268. mpd_qsub(result, a, &bb, ctx, status);
  3269. mpd_del(&bb);
  3270. }
  3271. /* Subtract uint64_t from decimal. */
  3272. void
  3273. mpd_qsub_u64(mpd_t *result, const mpd_t *a, uint64_t b,
  3274. const mpd_context_t *ctx, uint32_t *status)
  3275. {
  3276. mpd_context_t maxcontext;
  3277. MPD_NEW_STATIC(bb,0,0,0,0);
  3278. mpd_maxcontext(&maxcontext);
  3279. mpd_qset_u64(&bb, b, &maxcontext, status);
  3280. mpd_qsub(result, a, &bb, ctx, status);
  3281. mpd_del(&bb);
  3282. }
  3283. #endif
  3284. /* Divide infinities. */
  3285. static void
  3286. _mpd_qdiv_inf(mpd_t *result, const mpd_t *a, const mpd_t *b,
  3287. const mpd_context_t *ctx, uint32_t *status)
  3288. {
  3289. if (mpd_isinfinite(a)) {
  3290. if (mpd_isinfinite(b)) {
  3291. mpd_seterror(result, MPD_Invalid_operation, status);
  3292. return;
  3293. }
  3294. mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
  3295. return;
  3296. }
  3297. assert(mpd_isinfinite(b));
  3298. _settriple(result, mpd_sign(a)^mpd_sign(b), 0, mpd_etiny(ctx));
  3299. *status |= MPD_Clamped;
  3300. }
  3301. enum {NO_IDEAL_EXP, SET_IDEAL_EXP};
  3302. /* Divide a by b. */
  3303. static void
  3304. _mpd_qdiv(int action, mpd_t *q, const mpd_t *a, const mpd_t *b,
  3305. const mpd_context_t *ctx, uint32_t *status)
  3306. {
  3307. MPD_NEW_STATIC(aligned,0,0,0,0);
  3308. mpd_uint_t ld;
  3309. mpd_ssize_t shift, exp, tz;
  3310. mpd_ssize_t newsize;
  3311. mpd_ssize_t ideal_exp;
  3312. mpd_uint_t rem;
  3313. uint8_t sign_a = mpd_sign(a);
  3314. uint8_t sign_b = mpd_sign(b);
  3315. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  3316. if (mpd_qcheck_nans(q, a, b, ctx, status)) {
  3317. return;
  3318. }
  3319. _mpd_qdiv_inf(q, a, b, ctx, status);
  3320. return;
  3321. }
  3322. if (mpd_iszerocoeff(b)) {
  3323. if (mpd_iszerocoeff(a)) {
  3324. mpd_seterror(q, MPD_Division_undefined, status);
  3325. }
  3326. else {
  3327. mpd_setspecial(q, sign_a^sign_b, MPD_INF);
  3328. *status |= MPD_Division_by_zero;
  3329. }
  3330. return;
  3331. }
  3332. if (mpd_iszerocoeff(a)) {
  3333. exp = a->exp - b->exp;
  3334. _settriple(q, sign_a^sign_b, 0, exp);
  3335. mpd_qfinalize(q, ctx, status);
  3336. return;
  3337. }
  3338. shift = (b->digits - a->digits) + ctx->prec + 1;
  3339. ideal_exp = a->exp - b->exp;
  3340. exp = ideal_exp - shift;
  3341. if (shift > 0) {
  3342. if (!mpd_qshiftl(&aligned, a, shift, status)) {
  3343. mpd_seterror(q, MPD_Malloc_error, status);
  3344. goto finish;
  3345. }
  3346. a = &aligned;
  3347. }
  3348. else if (shift < 0) {
  3349. shift = -shift;
  3350. if (!mpd_qshiftl(&aligned, b, shift, status)) {
  3351. mpd_seterror(q, MPD_Malloc_error, status);
  3352. goto finish;
  3353. }
  3354. b = &aligned;
  3355. }
  3356. newsize = a->len - b->len + 1;
  3357. if ((q != b && q != a) || (q == b && newsize > b->len)) {
  3358. if (!mpd_qresize(q, newsize, status)) {
  3359. mpd_seterror(q, MPD_Malloc_error, status);
  3360. goto finish;
  3361. }
  3362. }
  3363. if (b->len == 1) {
  3364. rem = _mpd_shortdiv(q->data, a->data, a->len, b->data[0]);
  3365. }
  3366. else if (b->len <= MPD_NEWTONDIV_CUTOFF) {
  3367. int ret = _mpd_basedivmod(q->data, NULL, a->data, b->data,
  3368. a->len, b->len);
  3369. if (ret < 0) {
  3370. mpd_seterror(q, MPD_Malloc_error, status);
  3371. goto finish;
  3372. }
  3373. rem = ret;
  3374. }
  3375. else {
  3376. MPD_NEW_STATIC(r,0,0,0,0);
  3377. _mpd_base_ndivmod(q, &r, a, b, status);
  3378. if (mpd_isspecial(q) || mpd_isspecial(&r)) {
  3379. mpd_setspecial(q, MPD_POS, MPD_NAN);
  3380. mpd_del(&r);
  3381. goto finish;
  3382. }
  3383. rem = !mpd_iszerocoeff(&r);
  3384. mpd_del(&r);
  3385. newsize = q->len;
  3386. }
  3387. newsize = _mpd_real_size(q->data, newsize);
  3388. /* resize to smaller cannot fail */
  3389. mpd_qresize(q, newsize, status);
  3390. mpd_set_flags(q, sign_a^sign_b);
  3391. q->len = newsize;
  3392. mpd_setdigits(q);
  3393. shift = ideal_exp - exp;
  3394. if (rem) {
  3395. ld = mpd_lsd(q->data[0]);
  3396. if (ld == 0 || ld == 5) {
  3397. q->data[0] += 1;
  3398. }
  3399. }
  3400. else if (action == SET_IDEAL_EXP && shift > 0) {
  3401. tz = mpd_trail_zeros(q);
  3402. shift = (tz > shift) ? shift : tz;
  3403. mpd_qshiftr_inplace(q, shift);
  3404. exp += shift;
  3405. }
  3406. q->exp = exp;
  3407. finish:
  3408. mpd_del(&aligned);
  3409. mpd_qfinalize(q, ctx, status);
  3410. }
  3411. /* Divide a by b. */
  3412. void
  3413. mpd_qdiv(mpd_t *q, const mpd_t *a, const mpd_t *b,
  3414. const mpd_context_t *ctx, uint32_t *status)
  3415. {
  3416. MPD_NEW_STATIC(aa,0,0,0,0);
  3417. MPD_NEW_STATIC(bb,0,0,0,0);
  3418. uint32_t xstatus = 0;
  3419. if (q == a) {
  3420. if (!mpd_qcopy(&aa, a, status)) {
  3421. mpd_seterror(q, MPD_Malloc_error, status);
  3422. goto out;
  3423. }
  3424. a = &aa;
  3425. }
  3426. if (q == b) {
  3427. if (!mpd_qcopy(&bb, b, status)) {
  3428. mpd_seterror(q, MPD_Malloc_error, status);
  3429. goto out;
  3430. }
  3431. b = &bb;
  3432. }
  3433. _mpd_qdiv(SET_IDEAL_EXP, q, a, b, ctx, &xstatus);
  3434. if (xstatus & (MPD_Malloc_error|MPD_Division_impossible)) {
  3435. /* Inexact quotients (the usual case) fill the entire context precision,
  3436. * which can lead to the above errors for very high precisions. Retry
  3437. * the operation with a lower precision in case the result is exact.
  3438. *
  3439. * We need an upper bound for the number of digits of a_coeff / b_coeff
  3440. * when the result is exact. If a_coeff' * 1 / b_coeff' is in lowest
  3441. * terms, then maxdigits(a_coeff') + maxdigits(1 / b_coeff') is a suitable
  3442. * bound.
  3443. *
  3444. * 1 / b_coeff' is exact iff b_coeff' exclusively has prime factors 2 or 5.
  3445. * The largest amount of digits is generated if b_coeff' is a power of 2 or
  3446. * a power of 5 and is less than or equal to log5(b_coeff') <= log2(b_coeff').
  3447. *
  3448. * We arrive at a total upper bound:
  3449. *
  3450. * maxdigits(a_coeff') + maxdigits(1 / b_coeff') <=
  3451. * log10(a_coeff) + log2(b_coeff) =
  3452. * log10(a_coeff) + log10(b_coeff) / log10(2) <=
  3453. * a->digits + b->digits * 4;
  3454. */
  3455. mpd_context_t workctx = *ctx;
  3456. uint32_t ystatus = 0;
  3457. workctx.prec = a->digits + b->digits * 4;
  3458. if (workctx.prec >= ctx->prec) {
  3459. *status |= (xstatus&MPD_Errors);
  3460. goto out; /* No point in retrying, keep the original error. */
  3461. }
  3462. _mpd_qdiv(SET_IDEAL_EXP, q, a, b, &workctx, &ystatus);
  3463. if (ystatus != 0) {
  3464. ystatus = *status | ((ystatus|xstatus)&MPD_Errors);
  3465. mpd_seterror(q, ystatus, status);
  3466. }
  3467. }
  3468. else {
  3469. *status |= xstatus;
  3470. }
  3471. out:
  3472. mpd_del(&aa);
  3473. mpd_del(&bb);
  3474. }
  3475. /* Internal function. */
  3476. static void
  3477. _mpd_qdivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
  3478. const mpd_context_t *ctx, uint32_t *status)
  3479. {
  3480. MPD_NEW_STATIC(aligned,0,0,0,0);
  3481. mpd_ssize_t qsize, rsize;
  3482. mpd_ssize_t ideal_exp, expdiff, shift;
  3483. uint8_t sign_a = mpd_sign(a);
  3484. uint8_t sign_ab = mpd_sign(a)^mpd_sign(b);
  3485. ideal_exp = (a->exp > b->exp) ? b->exp : a->exp;
  3486. if (mpd_iszerocoeff(a)) {
  3487. if (!mpd_qcopy(r, a, status)) {
  3488. goto nanresult; /* GCOV_NOT_REACHED */
  3489. }
  3490. r->exp = ideal_exp;
  3491. _settriple(q, sign_ab, 0, 0);
  3492. return;
  3493. }
  3494. expdiff = mpd_adjexp(a) - mpd_adjexp(b);
  3495. if (expdiff < 0) {
  3496. if (a->exp > b->exp) {
  3497. /* positive and less than b->digits - a->digits */
  3498. shift = a->exp - b->exp;
  3499. if (!mpd_qshiftl(r, a, shift, status)) {
  3500. goto nanresult;
  3501. }
  3502. r->exp = ideal_exp;
  3503. }
  3504. else {
  3505. if (!mpd_qcopy(r, a, status)) {
  3506. goto nanresult;
  3507. }
  3508. }
  3509. _settriple(q, sign_ab, 0, 0);
  3510. return;
  3511. }
  3512. if (expdiff > ctx->prec) {
  3513. *status |= MPD_Division_impossible;
  3514. goto nanresult;
  3515. }
  3516. /*
  3517. * At this point we have:
  3518. * (1) 0 <= a->exp + a->digits - b->exp - b->digits <= prec
  3519. * (2) a->exp - b->exp >= b->digits - a->digits
  3520. * (3) a->exp - b->exp <= prec + b->digits - a->digits
  3521. */
  3522. if (a->exp != b->exp) {
  3523. shift = a->exp - b->exp;
  3524. if (shift > 0) {
  3525. /* by (3), after the shift a->digits <= prec + b->digits */
  3526. if (!mpd_qshiftl(&aligned, a, shift, status)) {
  3527. goto nanresult;
  3528. }
  3529. a = &aligned;
  3530. }
  3531. else {
  3532. shift = -shift;
  3533. /* by (2), after the shift b->digits <= a->digits */
  3534. if (!mpd_qshiftl(&aligned, b, shift, status)) {
  3535. goto nanresult;
  3536. }
  3537. b = &aligned;
  3538. }
  3539. }
  3540. qsize = a->len - b->len + 1;
  3541. if (!(q == a && qsize < a->len) && !(q == b && qsize < b->len)) {
  3542. if (!mpd_qresize(q, qsize, status)) {
  3543. goto nanresult;
  3544. }
  3545. }
  3546. rsize = b->len;
  3547. if (!(r == a && rsize < a->len)) {
  3548. if (!mpd_qresize(r, rsize, status)) {
  3549. goto nanresult;
  3550. }
  3551. }
  3552. if (b->len == 1) {
  3553. assert(b->data[0] != 0); /* annotation for scan-build */
  3554. if (a->len == 1) {
  3555. _mpd_div_word(&q->data[0], &r->data[0], a->data[0], b->data[0]);
  3556. }
  3557. else {
  3558. r->data[0] = _mpd_shortdiv(q->data, a->data, a->len, b->data[0]);
  3559. }
  3560. }
  3561. else if (b->len <= MPD_NEWTONDIV_CUTOFF) {
  3562. int ret;
  3563. ret = _mpd_basedivmod(q->data, r->data, a->data, b->data,
  3564. a->len, b->len);
  3565. if (ret == -1) {
  3566. *status |= MPD_Malloc_error;
  3567. goto nanresult;
  3568. }
  3569. }
  3570. else {
  3571. _mpd_base_ndivmod(q, r, a, b, status);
  3572. if (mpd_isspecial(q) || mpd_isspecial(r)) {
  3573. goto nanresult;
  3574. }
  3575. qsize = q->len;
  3576. rsize = r->len;
  3577. }
  3578. qsize = _mpd_real_size(q->data, qsize);
  3579. /* resize to smaller cannot fail */
  3580. mpd_qresize(q, qsize, status);
  3581. q->len = qsize;
  3582. mpd_setdigits(q);
  3583. mpd_set_flags(q, sign_ab);
  3584. q->exp = 0;
  3585. if (q->digits > ctx->prec) {
  3586. *status |= MPD_Division_impossible;
  3587. goto nanresult;
  3588. }
  3589. rsize = _mpd_real_size(r->data, rsize);
  3590. /* resize to smaller cannot fail */
  3591. mpd_qresize(r, rsize, status);
  3592. r->len = rsize;
  3593. mpd_setdigits(r);
  3594. mpd_set_flags(r, sign_a);
  3595. r->exp = ideal_exp;
  3596. out:
  3597. mpd_del(&aligned);
  3598. return;
  3599. nanresult:
  3600. mpd_setspecial(q, MPD_POS, MPD_NAN);
  3601. mpd_setspecial(r, MPD_POS, MPD_NAN);
  3602. goto out;
  3603. }
  3604. /* Integer division with remainder. */
  3605. void
  3606. mpd_qdivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
  3607. const mpd_context_t *ctx, uint32_t *status)
  3608. {
  3609. uint8_t sign = mpd_sign(a)^mpd_sign(b);
  3610. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  3611. if (mpd_qcheck_nans(q, a, b, ctx, status)) {
  3612. mpd_qcopy(r, q, status);
  3613. return;
  3614. }
  3615. if (mpd_isinfinite(a)) {
  3616. if (mpd_isinfinite(b)) {
  3617. mpd_setspecial(q, MPD_POS, MPD_NAN);
  3618. }
  3619. else {
  3620. mpd_setspecial(q, sign, MPD_INF);
  3621. }
  3622. mpd_setspecial(r, MPD_POS, MPD_NAN);
  3623. *status |= MPD_Invalid_operation;
  3624. return;
  3625. }
  3626. if (mpd_isinfinite(b)) {
  3627. if (!mpd_qcopy(r, a, status)) {
  3628. mpd_seterror(q, MPD_Malloc_error, status);
  3629. return;
  3630. }
  3631. mpd_qfinalize(r, ctx, status);
  3632. _settriple(q, sign, 0, 0);
  3633. return;
  3634. }
  3635. /* debug */
  3636. abort(); /* GCOV_NOT_REACHED */
  3637. }
  3638. if (mpd_iszerocoeff(b)) {
  3639. if (mpd_iszerocoeff(a)) {
  3640. mpd_setspecial(q, MPD_POS, MPD_NAN);
  3641. mpd_setspecial(r, MPD_POS, MPD_NAN);
  3642. *status |= MPD_Division_undefined;
  3643. }
  3644. else {
  3645. mpd_setspecial(q, sign, MPD_INF);
  3646. mpd_setspecial(r, MPD_POS, MPD_NAN);
  3647. *status |= (MPD_Division_by_zero|MPD_Invalid_operation);
  3648. }
  3649. return;
  3650. }
  3651. _mpd_qdivmod(q, r, a, b, ctx, status);
  3652. mpd_qfinalize(q, ctx, status);
  3653. mpd_qfinalize(r, ctx, status);
  3654. }
  3655. void
  3656. mpd_qdivint(mpd_t *q, const mpd_t *a, const mpd_t *b,
  3657. const mpd_context_t *ctx, uint32_t *status)
  3658. {
  3659. MPD_NEW_STATIC(r,0,0,0,0);
  3660. uint8_t sign = mpd_sign(a)^mpd_sign(b);
  3661. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  3662. if (mpd_qcheck_nans(q, a, b, ctx, status)) {
  3663. return;
  3664. }
  3665. if (mpd_isinfinite(a) && mpd_isinfinite(b)) {
  3666. mpd_seterror(q, MPD_Invalid_operation, status);
  3667. return;
  3668. }
  3669. if (mpd_isinfinite(a)) {
  3670. mpd_setspecial(q, sign, MPD_INF);
  3671. return;
  3672. }
  3673. if (mpd_isinfinite(b)) {
  3674. _settriple(q, sign, 0, 0);
  3675. return;
  3676. }
  3677. /* debug */
  3678. abort(); /* GCOV_NOT_REACHED */
  3679. }
  3680. if (mpd_iszerocoeff(b)) {
  3681. if (mpd_iszerocoeff(a)) {
  3682. mpd_seterror(q, MPD_Division_undefined, status);
  3683. }
  3684. else {
  3685. mpd_setspecial(q, sign, MPD_INF);
  3686. *status |= MPD_Division_by_zero;
  3687. }
  3688. return;
  3689. }
  3690. _mpd_qdivmod(q, &r, a, b, ctx, status);
  3691. mpd_del(&r);
  3692. mpd_qfinalize(q, ctx, status);
  3693. }
  3694. /* Divide decimal by mpd_ssize_t. */
  3695. void
  3696. mpd_qdiv_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
  3697. const mpd_context_t *ctx, uint32_t *status)
  3698. {
  3699. mpd_context_t maxcontext;
  3700. MPD_NEW_STATIC(bb,0,0,0,0);
  3701. mpd_maxcontext(&maxcontext);
  3702. mpd_qsset_ssize(&bb, b, &maxcontext, status);
  3703. mpd_qdiv(result, a, &bb, ctx, status);
  3704. mpd_del(&bb);
  3705. }
  3706. /* Divide decimal by mpd_uint_t. */
  3707. void
  3708. mpd_qdiv_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
  3709. const mpd_context_t *ctx, uint32_t *status)
  3710. {
  3711. mpd_context_t maxcontext;
  3712. MPD_NEW_STATIC(bb,0,0,0,0);
  3713. mpd_maxcontext(&maxcontext);
  3714. mpd_qsset_uint(&bb, b, &maxcontext, status);
  3715. mpd_qdiv(result, a, &bb, ctx, status);
  3716. mpd_del(&bb);
  3717. }
  3718. /* Divide decimal by int32_t. */
  3719. void
  3720. mpd_qdiv_i32(mpd_t *result, const mpd_t *a, int32_t b,
  3721. const mpd_context_t *ctx, uint32_t *status)
  3722. {
  3723. mpd_qdiv_ssize(result, a, b, ctx, status);
  3724. }
  3725. /* Divide decimal by uint32_t. */
  3726. void
  3727. mpd_qdiv_u32(mpd_t *result, const mpd_t *a, uint32_t b,
  3728. const mpd_context_t *ctx, uint32_t *status)
  3729. {
  3730. mpd_qdiv_uint(result, a, b, ctx, status);
  3731. }
  3732. #ifdef CONFIG_64
  3733. /* Divide decimal by int64_t. */
  3734. void
  3735. mpd_qdiv_i64(mpd_t *result, const mpd_t *a, int64_t b,
  3736. const mpd_context_t *ctx, uint32_t *status)
  3737. {
  3738. mpd_qdiv_ssize(result, a, b, ctx, status);
  3739. }
  3740. /* Divide decimal by uint64_t. */
  3741. void
  3742. mpd_qdiv_u64(mpd_t *result, const mpd_t *a, uint64_t b,
  3743. const mpd_context_t *ctx, uint32_t *status)
  3744. {
  3745. mpd_qdiv_uint(result, a, b, ctx, status);
  3746. }
  3747. #elif !defined(LEGACY_COMPILER)
  3748. /* Divide decimal by int64_t. */
  3749. void
  3750. mpd_qdiv_i64(mpd_t *result, const mpd_t *a, int64_t b,
  3751. const mpd_context_t *ctx, uint32_t *status)
  3752. {
  3753. mpd_context_t maxcontext;
  3754. MPD_NEW_STATIC(bb,0,0,0,0);
  3755. mpd_maxcontext(&maxcontext);
  3756. mpd_qset_i64(&bb, b, &maxcontext, status);
  3757. mpd_qdiv(result, a, &bb, ctx, status);
  3758. mpd_del(&bb);
  3759. }
  3760. /* Divide decimal by uint64_t. */
  3761. void
  3762. mpd_qdiv_u64(mpd_t *result, const mpd_t *a, uint64_t b,
  3763. const mpd_context_t *ctx, uint32_t *status)
  3764. {
  3765. mpd_context_t maxcontext;
  3766. MPD_NEW_STATIC(bb,0,0,0,0);
  3767. mpd_maxcontext(&maxcontext);
  3768. mpd_qset_u64(&bb, b, &maxcontext, status);
  3769. mpd_qdiv(result, a, &bb, ctx, status);
  3770. mpd_del(&bb);
  3771. }
  3772. #endif
  3773. /* Pad the result with trailing zeros if it has fewer digits than prec. */
  3774. static void
  3775. _mpd_zeropad(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
  3776. {
  3777. if (!mpd_isspecial(result) && !mpd_iszero(result) &&
  3778. result->digits < ctx->prec) {
  3779. mpd_ssize_t shift = ctx->prec - result->digits;
  3780. mpd_qshiftl(result, result, shift, status);
  3781. result->exp -= shift;
  3782. }
  3783. }
  3784. /* Check if the result is guaranteed to be one. */
  3785. static int
  3786. _mpd_qexp_check_one(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  3787. uint32_t *status)
  3788. {
  3789. MPD_NEW_CONST(lim,0,-(ctx->prec+1),1,1,1,9);
  3790. MPD_NEW_SHARED(aa, a);
  3791. mpd_set_positive(&aa);
  3792. /* abs(a) <= 9 * 10**(-prec-1) */
  3793. if (_mpd_cmp(&aa, &lim) <= 0) {
  3794. _settriple(result, 0, 1, 0);
  3795. *status |= MPD_Rounded|MPD_Inexact;
  3796. return 1;
  3797. }
  3798. return 0;
  3799. }
  3800. /*
  3801. * Get the number of iterations for the Horner scheme in _mpd_qexp().
  3802. */
  3803. static inline mpd_ssize_t
  3804. _mpd_get_exp_iterations(const mpd_t *r, mpd_ssize_t p)
  3805. {
  3806. mpd_ssize_t log10pbyr; /* lower bound for log10(p / abs(r)) */
  3807. mpd_ssize_t n;
  3808. assert(p >= 10);
  3809. assert(!mpd_iszero(r));
  3810. assert(-p < mpd_adjexp(r) && mpd_adjexp(r) <= -1);
  3811. #ifdef CONFIG_64
  3812. if (p > (mpd_ssize_t)(1ULL<<52)) {
  3813. return MPD_SSIZE_MAX;
  3814. }
  3815. #endif
  3816. /*
  3817. * Lower bound for log10(p / abs(r)): adjexp(p) - (adjexp(r) + 1)
  3818. * At this point (for CONFIG_64, CONFIG_32 is not problematic):
  3819. * 1) 10 <= p <= 2**52
  3820. * 2) -p < adjexp(r) <= -1
  3821. * 3) 1 <= log10pbyr <= 2**52 + 14
  3822. */
  3823. log10pbyr = (mpd_word_digits(p)-1) - (mpd_adjexp(r)+1);
  3824. /*
  3825. * The numerator in the paper is 1.435 * p - 1.182, calculated
  3826. * exactly. We compensate for rounding errors by using 1.43503.
  3827. * ACL2 proofs:
  3828. * 1) exp-iter-approx-lower-bound: The term below evaluated
  3829. * in 53-bit floating point arithmetic is greater than or
  3830. * equal to the exact term used in the paper.
  3831. * 2) exp-iter-approx-upper-bound: The term below is less than
  3832. * or equal to 3/2 * p <= 3/2 * 2**52.
  3833. */
  3834. n = (mpd_ssize_t)ceil((1.43503*(double)p - 1.182) / (double)log10pbyr);
  3835. return n >= 3 ? n : 3;
  3836. }
  3837. /*
  3838. * Internal function, specials have been dealt with. Apart from Overflow
  3839. * and Underflow, two cases must be considered for the error of the result:
  3840. *
  3841. * 1) abs(a) <= 9 * 10**(-prec-1) ==> result == 1
  3842. *
  3843. * Absolute error: abs(1 - e**x) < 10**(-prec)
  3844. * -------------------------------------------
  3845. *
  3846. * 2) abs(a) > 9 * 10**(-prec-1)
  3847. *
  3848. * Relative error: abs(result - e**x) < 0.5 * 10**(-prec) * e**x
  3849. * -------------------------------------------------------------
  3850. *
  3851. * The algorithm is from Hull&Abrham, Variable Precision Exponential Function,
  3852. * ACM Transactions on Mathematical Software, Vol. 12, No. 2, June 1986.
  3853. *
  3854. * Main differences:
  3855. *
  3856. * - The number of iterations for the Horner scheme is calculated using
  3857. * 53-bit floating point arithmetic.
  3858. *
  3859. * - In the error analysis for ER (relative error accumulated in the
  3860. * evaluation of the truncated series) the reduced operand r may
  3861. * have any number of digits.
  3862. * ACL2 proof: exponent-relative-error
  3863. *
  3864. * - The analysis for early abortion has been adapted for the mpd_t
  3865. * ranges.
  3866. */
  3867. static void
  3868. _mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  3869. uint32_t *status)
  3870. {
  3871. mpd_context_t workctx;
  3872. MPD_NEW_STATIC(tmp,0,0,0,0);
  3873. MPD_NEW_STATIC(sum,0,0,0,0);
  3874. MPD_NEW_CONST(word,0,0,1,1,1,1);
  3875. mpd_ssize_t j, n, t;
  3876. assert(!mpd_isspecial(a));
  3877. if (mpd_iszerocoeff(a)) {
  3878. _settriple(result, MPD_POS, 1, 0);
  3879. return;
  3880. }
  3881. /*
  3882. * We are calculating e^x = e^(r*10^t) = (e^r)^(10^t), where abs(r) < 1 and t >= 0.
  3883. *
  3884. * If t > 0, we have:
  3885. *
  3886. * (1) 0.1 <= r < 1, so e^0.1 <= e^r. If t > MAX_T, overflow occurs:
  3887. *
  3888. * MAX-EMAX+1 < log10(e^(0.1*10*t)) <= log10(e^(r*10^t)) < adjexp(e^(r*10^t))+1
  3889. *
  3890. * (2) -1 < r <= -0.1, so e^r <= e^-0.1. If t > MAX_T, underflow occurs:
  3891. *
  3892. * adjexp(e^(r*10^t)) <= log10(e^(r*10^t)) <= log10(e^(-0.1*10^t)) < MIN-ETINY
  3893. */
  3894. #if defined(CONFIG_64)
  3895. #define MPD_EXP_MAX_T 19
  3896. #elif defined(CONFIG_32)
  3897. #define MPD_EXP_MAX_T 10
  3898. #endif
  3899. t = a->digits + a->exp;
  3900. t = (t > 0) ? t : 0;
  3901. if (t > MPD_EXP_MAX_T) {
  3902. if (mpd_ispositive(a)) {
  3903. mpd_setspecial(result, MPD_POS, MPD_INF);
  3904. *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
  3905. }
  3906. else {
  3907. _settriple(result, MPD_POS, 0, mpd_etiny(ctx));
  3908. *status |= (MPD_Inexact|MPD_Rounded|MPD_Subnormal|
  3909. MPD_Underflow|MPD_Clamped);
  3910. }
  3911. return;
  3912. }
  3913. /* abs(a) <= 9 * 10**(-prec-1) */
  3914. if (_mpd_qexp_check_one(result, a, ctx, status)) {
  3915. return;
  3916. }
  3917. mpd_maxcontext(&workctx);
  3918. workctx.prec = ctx->prec + t + 2;
  3919. workctx.prec = (workctx.prec < 10) ? 10 : workctx.prec;
  3920. workctx.round = MPD_ROUND_HALF_EVEN;
  3921. if (!mpd_qcopy(result, a, status)) {
  3922. return;
  3923. }
  3924. result->exp -= t;
  3925. /*
  3926. * At this point:
  3927. * 1) 9 * 10**(-prec-1) < abs(a)
  3928. * 2) 9 * 10**(-prec-t-1) < abs(r)
  3929. * 3) log10(9) - prec - t - 1 < log10(abs(r)) < adjexp(abs(r)) + 1
  3930. * 4) - prec - t - 2 < adjexp(abs(r)) <= -1
  3931. */
  3932. n = _mpd_get_exp_iterations(result, workctx.prec);
  3933. if (n == MPD_SSIZE_MAX) {
  3934. mpd_seterror(result, MPD_Invalid_operation, status); /* GCOV_UNLIKELY */
  3935. return; /* GCOV_UNLIKELY */
  3936. }
  3937. _settriple(&sum, MPD_POS, 1, 0);
  3938. for (j = n-1; j >= 1; j--) {
  3939. word.data[0] = j;
  3940. mpd_setdigits(&word);
  3941. mpd_qdiv(&tmp, result, &word, &workctx, &workctx.status);
  3942. mpd_qfma(&sum, &sum, &tmp, &one, &workctx, &workctx.status);
  3943. }
  3944. #ifdef CONFIG_64
  3945. _mpd_qpow_uint(result, &sum, mpd_pow10[t], MPD_POS, &workctx, status);
  3946. #else
  3947. if (t <= MPD_MAX_POW10) {
  3948. _mpd_qpow_uint(result, &sum, mpd_pow10[t], MPD_POS, &workctx, status);
  3949. }
  3950. else {
  3951. t -= MPD_MAX_POW10;
  3952. _mpd_qpow_uint(&tmp, &sum, mpd_pow10[MPD_MAX_POW10], MPD_POS,
  3953. &workctx, status);
  3954. _mpd_qpow_uint(result, &tmp, mpd_pow10[t], MPD_POS, &workctx, status);
  3955. }
  3956. #endif
  3957. mpd_del(&tmp);
  3958. mpd_del(&sum);
  3959. *status |= (workctx.status&MPD_Errors);
  3960. *status |= (MPD_Inexact|MPD_Rounded);
  3961. }
  3962. /* exp(a) */
  3963. void
  3964. mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  3965. uint32_t *status)
  3966. {
  3967. mpd_context_t workctx;
  3968. if (mpd_isspecial(a)) {
  3969. if (mpd_qcheck_nan(result, a, ctx, status)) {
  3970. return;
  3971. }
  3972. if (mpd_isnegative(a)) {
  3973. _settriple(result, MPD_POS, 0, 0);
  3974. }
  3975. else {
  3976. mpd_setspecial(result, MPD_POS, MPD_INF);
  3977. }
  3978. return;
  3979. }
  3980. if (mpd_iszerocoeff(a)) {
  3981. _settriple(result, MPD_POS, 1, 0);
  3982. return;
  3983. }
  3984. workctx = *ctx;
  3985. workctx.round = MPD_ROUND_HALF_EVEN;
  3986. if (ctx->allcr) {
  3987. MPD_NEW_STATIC(t1, 0,0,0,0);
  3988. MPD_NEW_STATIC(t2, 0,0,0,0);
  3989. MPD_NEW_STATIC(ulp, 0,0,0,0);
  3990. MPD_NEW_STATIC(aa, 0,0,0,0);
  3991. mpd_ssize_t prec;
  3992. mpd_ssize_t ulpexp;
  3993. uint32_t workstatus;
  3994. if (result == a) {
  3995. if (!mpd_qcopy(&aa, a, status)) {
  3996. mpd_seterror(result, MPD_Malloc_error, status);
  3997. return;
  3998. }
  3999. a = &aa;
  4000. }
  4001. workctx.clamp = 0;
  4002. prec = ctx->prec + 3;
  4003. while (1) {
  4004. workctx.prec = prec;
  4005. workstatus = 0;
  4006. _mpd_qexp(result, a, &workctx, &workstatus);
  4007. *status |= workstatus;
  4008. ulpexp = result->exp + result->digits - workctx.prec;
  4009. if (workstatus & MPD_Underflow) {
  4010. /* The effective work precision is result->digits. */
  4011. ulpexp = result->exp;
  4012. }
  4013. _ssettriple(&ulp, MPD_POS, 1, ulpexp);
  4014. /*
  4015. * At this point [1]:
  4016. * 1) abs(result - e**x) < 0.5 * 10**(-prec) * e**x
  4017. * 2) result - ulp < e**x < result + ulp
  4018. * 3) result - ulp < result < result + ulp
  4019. *
  4020. * If round(result-ulp)==round(result+ulp), then
  4021. * round(result)==round(e**x). Therefore the result
  4022. * is correctly rounded.
  4023. *
  4024. * [1] If abs(a) <= 9 * 10**(-prec-1), use the absolute
  4025. * error for a similar argument.
  4026. */
  4027. workctx.prec = ctx->prec;
  4028. mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
  4029. mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
  4030. if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
  4031. mpd_qcmp(&t1, &t2, status) == 0) {
  4032. workctx.clamp = ctx->clamp;
  4033. _mpd_zeropad(result, &workctx, status);
  4034. mpd_check_underflow(result, &workctx, status);
  4035. mpd_qfinalize(result, &workctx, status);
  4036. break;
  4037. }
  4038. prec += MPD_RDIGITS;
  4039. }
  4040. mpd_del(&t1);
  4041. mpd_del(&t2);
  4042. mpd_del(&ulp);
  4043. mpd_del(&aa);
  4044. }
  4045. else {
  4046. _mpd_qexp(result, a, &workctx, status);
  4047. _mpd_zeropad(result, &workctx, status);
  4048. mpd_check_underflow(result, &workctx, status);
  4049. mpd_qfinalize(result, &workctx, status);
  4050. }
  4051. }
  4052. /* Fused multiply-add: (a * b) + c, with a single final rounding. */
  4053. void
  4054. mpd_qfma(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_t *c,
  4055. const mpd_context_t *ctx, uint32_t *status)
  4056. {
  4057. uint32_t workstatus = 0;
  4058. mpd_t *cc = NULL;
  4059. if (result == c) {
  4060. if ((cc = mpd_qncopy(c)) == NULL) {
  4061. mpd_seterror(result, MPD_Malloc_error, status);
  4062. return;
  4063. }
  4064. c = cc;
  4065. }
  4066. _mpd_qmul(result, a, b, ctx, &workstatus);
  4067. if (!(workstatus&MPD_Invalid_operation)) {
  4068. mpd_qadd(result, result, c, ctx, &workstatus);
  4069. }
  4070. if (cc) mpd_del(cc);
  4071. *status |= workstatus;
  4072. }
  4073. /*
  4074. * Schedule the optimal precision increase for the Newton iteration.
  4075. * v := input operand
  4076. * z_0 := initial approximation
  4077. * initprec := natural number such that abs(log(v) - z_0) < 10**-initprec
  4078. * maxprec := target precision
  4079. *
  4080. * For convenience the output klist contains the elements in reverse order:
  4081. * klist := [k_n-1, ..., k_0], where
  4082. * 1) k_0 <= initprec and
  4083. * 2) abs(log(v) - result) < 10**(-2*k_n-1 + 1) <= 10**-maxprec.
  4084. */
  4085. static inline int
  4086. ln_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2], mpd_ssize_t maxprec,
  4087. mpd_ssize_t initprec)
  4088. {
  4089. mpd_ssize_t k;
  4090. int i;
  4091. assert(maxprec >= 2 && initprec >= 2);
  4092. if (maxprec <= initprec) return -1;
  4093. i = 0; k = maxprec;
  4094. do {
  4095. k = (k+2) / 2;
  4096. klist[i++] = k;
  4097. } while (k > initprec);
  4098. return i-1;
  4099. }
  4100. /* The constants have been verified with both decimal.py and mpfr. */
  4101. #ifdef CONFIG_64
  4102. #if MPD_RDIGITS != 19
  4103. #error "mpdecimal.c: MPD_RDIGITS must be 19."
  4104. #endif
  4105. static const mpd_uint_t mpd_ln10_data[MPD_MINALLOC_MAX] = {
  4106. 6983716328982174407ULL, 9089704281976336583ULL, 1515961135648465461ULL,
  4107. 4416816335727555703ULL, 2900988039194170265ULL, 2307925037472986509ULL,
  4108. 107598438319191292ULL, 3466624107184669231ULL, 4450099781311469159ULL,
  4109. 9807828059751193854ULL, 7713456862091670584ULL, 1492198849978748873ULL,
  4110. 6528728696511086257ULL, 2385392051446341972ULL, 8692180205189339507ULL,
  4111. 6518769751037497088ULL, 2375253577097505395ULL, 9095610299291824318ULL,
  4112. 982748238504564801ULL, 5438635917781170543ULL, 7547331541421808427ULL,
  4113. 752371033310119785ULL, 3171643095059950878ULL, 9785265383207606726ULL,
  4114. 2932258279850258550ULL, 5497347726624257094ULL, 2976979522110718264ULL,
  4115. 9221477656763693866ULL, 1979650047149510504ULL, 6674183485704422507ULL,
  4116. 9702766860595249671ULL, 9278096762712757753ULL, 9314848524948644871ULL,
  4117. 6826928280848118428ULL, 754403708474699401ULL, 230105703089634572ULL,
  4118. 1929203337658714166ULL, 7589402567763113569ULL, 4208241314695689016ULL,
  4119. 2922455440575892572ULL, 9356734206705811364ULL, 2684916746550586856ULL,
  4120. 644507064800027750ULL, 9476834636167921018ULL, 5659121373450747856ULL,
  4121. 2835522011480466371ULL, 6470806855677432162ULL, 7141748003688084012ULL,
  4122. 9619404400222105101ULL, 5504893431493939147ULL, 6674744042432743651ULL,
  4123. 2287698219886746543ULL, 7773262884616336622ULL, 1985283935053089653ULL,
  4124. 4680843799894826233ULL, 8168948290720832555ULL, 8067566662873690987ULL,
  4125. 6248633409525465082ULL, 9829834196778404228ULL, 3524802359972050895ULL,
  4126. 3327900967572609677ULL, 110148862877297603ULL, 179914546843642076ULL,
  4127. 2302585092994045684ULL
  4128. };
  4129. #else
  4130. #if MPD_RDIGITS != 9
  4131. #error "mpdecimal.c: MPD_RDIGITS must be 9."
  4132. #endif
  4133. static const mpd_uint_t mpd_ln10_data[MPD_MINALLOC_MAX] = {
  4134. 401682692UL, 708474699UL, 720754403UL, 30896345UL, 602301057UL, 765871416UL,
  4135. 192920333UL, 763113569UL, 589402567UL, 956890167UL, 82413146UL, 589257242UL,
  4136. 245544057UL, 811364292UL, 734206705UL, 868569356UL, 167465505UL, 775026849UL,
  4137. 706480002UL, 18064450UL, 636167921UL, 569476834UL, 734507478UL, 156591213UL,
  4138. 148046637UL, 283552201UL, 677432162UL, 470806855UL, 880840126UL, 417480036UL,
  4139. 210510171UL, 940440022UL, 939147961UL, 893431493UL, 436515504UL, 440424327UL,
  4140. 654366747UL, 821988674UL, 622228769UL, 884616336UL, 537773262UL, 350530896UL,
  4141. 319852839UL, 989482623UL, 468084379UL, 720832555UL, 168948290UL, 736909878UL,
  4142. 675666628UL, 546508280UL, 863340952UL, 404228624UL, 834196778UL, 508959829UL,
  4143. 23599720UL, 967735248UL, 96757260UL, 603332790UL, 862877297UL, 760110148UL,
  4144. 468436420UL, 401799145UL, 299404568UL, 230258509UL
  4145. };
  4146. #endif
  4147. /* _mpd_ln10 is used directly for precisions smaller than MINALLOC_MAX*RDIGITS.
  4148. Otherwise, it serves as the initial approximation for calculating ln(10). */
  4149. static const mpd_t _mpd_ln10 = {
  4150. MPD_STATIC|MPD_CONST_DATA, -(MPD_MINALLOC_MAX*MPD_RDIGITS-1),
  4151. MPD_MINALLOC_MAX*MPD_RDIGITS, MPD_MINALLOC_MAX, MPD_MINALLOC_MAX,
  4152. (mpd_uint_t *)mpd_ln10_data
  4153. };
  4154. /*
  4155. * Set 'result' to log(10).
  4156. * Ulp error: abs(result - log(10)) < ulp(log(10))
  4157. * Relative error: abs(result - log(10)) < 5 * 10**-prec * log(10)
  4158. *
  4159. * NOTE: The relative error is not derived from the ulp error, but
  4160. * calculated separately using the fact that 23/10 < log(10) < 24/10.
  4161. */
  4162. void
  4163. mpd_qln10(mpd_t *result, mpd_ssize_t prec, uint32_t *status)
  4164. {
  4165. mpd_context_t varcontext, maxcontext;
  4166. MPD_NEW_STATIC(tmp, 0,0,0,0);
  4167. MPD_NEW_CONST(static10, 0,0,2,1,1,10);
  4168. mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
  4169. mpd_uint_t rnd;
  4170. mpd_ssize_t shift;
  4171. int i;
  4172. assert(prec >= 1);
  4173. shift = MPD_MINALLOC_MAX*MPD_RDIGITS-prec;
  4174. shift = shift < 0 ? 0 : shift;
  4175. rnd = mpd_qshiftr(result, &_mpd_ln10, shift, status);
  4176. if (rnd == MPD_UINT_MAX) {
  4177. mpd_seterror(result, MPD_Malloc_error, status);
  4178. return;
  4179. }
  4180. result->exp = -(result->digits-1);
  4181. mpd_maxcontext(&maxcontext);
  4182. if (prec < MPD_MINALLOC_MAX*MPD_RDIGITS) {
  4183. maxcontext.prec = prec;
  4184. _mpd_apply_round_excess(result, rnd, &maxcontext, status);
  4185. *status |= (MPD_Inexact|MPD_Rounded);
  4186. return;
  4187. }
  4188. mpd_maxcontext(&varcontext);
  4189. varcontext.round = MPD_ROUND_TRUNC;
  4190. i = ln_schedule_prec(klist, prec+2, -result->exp);
  4191. for (; i >= 0; i--) {
  4192. varcontext.prec = 2*klist[i]+3;
  4193. result->flags ^= MPD_NEG;
  4194. _mpd_qexp(&tmp, result, &varcontext, status);
  4195. result->flags ^= MPD_NEG;
  4196. mpd_qmul(&tmp, &static10, &tmp, &varcontext, status);
  4197. mpd_qsub(&tmp, &tmp, &one, &maxcontext, status);
  4198. mpd_qadd(result, result, &tmp, &maxcontext, status);
  4199. if (mpd_isspecial(result)) {
  4200. break;
  4201. }
  4202. }
  4203. mpd_del(&tmp);
  4204. maxcontext.prec = prec;
  4205. mpd_qfinalize(result, &maxcontext, status);
  4206. }
  4207. /*
  4208. * Initial approximations for the ln() iteration. The values have the
  4209. * following properties (established with both decimal.py and mpfr):
  4210. *
  4211. * Index 0 - 400, logarithms of x in [1.00, 5.00]:
  4212. * abs(lnapprox[i] * 10**-3 - log((i+100)/100)) < 10**-2
  4213. * abs(lnapprox[i] * 10**-3 - log((i+1+100)/100)) < 10**-2
  4214. *
  4215. * Index 401 - 899, logarithms of x in (0.500, 0.999]:
  4216. * abs(-lnapprox[i] * 10**-3 - log((i+100)/1000)) < 10**-2
  4217. * abs(-lnapprox[i] * 10**-3 - log((i+1+100)/1000)) < 10**-2
  4218. */
  4219. static const uint16_t lnapprox[900] = {
  4220. /* index 0 - 400: log((i+100)/100) * 1000 */
  4221. 0, 10, 20, 30, 39, 49, 58, 68, 77, 86, 95, 104, 113, 122, 131, 140, 148, 157,
  4222. 166, 174, 182, 191, 199, 207, 215, 223, 231, 239, 247, 255, 262, 270, 278,
  4223. 285, 293, 300, 308, 315, 322, 329, 336, 344, 351, 358, 365, 372, 378, 385,
  4224. 392, 399, 406, 412, 419, 425, 432, 438, 445, 451, 457, 464, 470, 476, 482,
  4225. 489, 495, 501, 507, 513, 519, 525, 531, 536, 542, 548, 554, 560, 565, 571,
  4226. 577, 582, 588, 593, 599, 604, 610, 615, 621, 626, 631, 637, 642, 647, 652,
  4227. 658, 663, 668, 673, 678, 683, 688, 693, 698, 703, 708, 713, 718, 723, 728,
  4228. 732, 737, 742, 747, 751, 756, 761, 766, 770, 775, 779, 784, 788, 793, 798,
  4229. 802, 806, 811, 815, 820, 824, 829, 833, 837, 842, 846, 850, 854, 859, 863,
  4230. 867, 871, 876, 880, 884, 888, 892, 896, 900, 904, 908, 912, 916, 920, 924,
  4231. 928, 932, 936, 940, 944, 948, 952, 956, 959, 963, 967, 971, 975, 978, 982,
  4232. 986, 990, 993, 997, 1001, 1004, 1008, 1012, 1015, 1019, 1022, 1026, 1030,
  4233. 1033, 1037, 1040, 1044, 1047, 1051, 1054, 1058, 1061, 1065, 1068, 1072, 1075,
  4234. 1078, 1082, 1085, 1089, 1092, 1095, 1099, 1102, 1105, 1109, 1112, 1115, 1118,
  4235. 1122, 1125, 1128, 1131, 1135, 1138, 1141, 1144, 1147, 1151, 1154, 1157, 1160,
  4236. 1163, 1166, 1169, 1172, 1176, 1179, 1182, 1185, 1188, 1191, 1194, 1197, 1200,
  4237. 1203, 1206, 1209, 1212, 1215, 1218, 1221, 1224, 1227, 1230, 1233, 1235, 1238,
  4238. 1241, 1244, 1247, 1250, 1253, 1256, 1258, 1261, 1264, 1267, 1270, 1273, 1275,
  4239. 1278, 1281, 1284, 1286, 1289, 1292, 1295, 1297, 1300, 1303, 1306, 1308, 1311,
  4240. 1314, 1316, 1319, 1322, 1324, 1327, 1330, 1332, 1335, 1338, 1340, 1343, 1345,
  4241. 1348, 1351, 1353, 1356, 1358, 1361, 1364, 1366, 1369, 1371, 1374, 1376, 1379,
  4242. 1381, 1384, 1386, 1389, 1391, 1394, 1396, 1399, 1401, 1404, 1406, 1409, 1411,
  4243. 1413, 1416, 1418, 1421, 1423, 1426, 1428, 1430, 1433, 1435, 1437, 1440, 1442,
  4244. 1445, 1447, 1449, 1452, 1454, 1456, 1459, 1461, 1463, 1466, 1468, 1470, 1472,
  4245. 1475, 1477, 1479, 1482, 1484, 1486, 1488, 1491, 1493, 1495, 1497, 1500, 1502,
  4246. 1504, 1506, 1509, 1511, 1513, 1515, 1517, 1520, 1522, 1524, 1526, 1528, 1530,
  4247. 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1548, 1550, 1552, 1554, 1556, 1558,
  4248. 1560, 1562, 1564, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1581, 1583, 1585,
  4249. 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609,
  4250. /* index 401 - 899: -log((i+100)/1000) * 1000 */
  4251. 691, 689, 687, 685, 683, 681, 679, 677, 675, 673, 671, 669, 668, 666, 664,
  4252. 662, 660, 658, 656, 654, 652, 650, 648, 646, 644, 642, 641, 639, 637, 635,
  4253. 633, 631, 629, 627, 626, 624, 622, 620, 618, 616, 614, 612, 611, 609, 607,
  4254. 605, 603, 602, 600, 598, 596, 594, 592, 591, 589, 587, 585, 583, 582, 580,
  4255. 578, 576, 574, 573, 571, 569, 567, 566, 564, 562, 560, 559, 557, 555, 553,
  4256. 552, 550, 548, 546, 545, 543, 541, 540, 538, 536, 534, 533, 531, 529, 528,
  4257. 526, 524, 523, 521, 519, 518, 516, 514, 512, 511, 509, 508, 506, 504, 502,
  4258. 501, 499, 498, 496, 494, 493, 491, 489, 488, 486, 484, 483, 481, 480, 478,
  4259. 476, 475, 473, 472, 470, 468, 467, 465, 464, 462, 460, 459, 457, 456, 454,
  4260. 453, 451, 449, 448, 446, 445, 443, 442, 440, 438, 437, 435, 434, 432, 431,
  4261. 429, 428, 426, 425, 423, 422, 420, 419, 417, 416, 414, 412, 411, 410, 408,
  4262. 406, 405, 404, 402, 400, 399, 398, 396, 394, 393, 392, 390, 389, 387, 386,
  4263. 384, 383, 381, 380, 378, 377, 375, 374, 372, 371, 370, 368, 367, 365, 364,
  4264. 362, 361, 360, 358, 357, 355, 354, 352, 351, 350, 348, 347, 345, 344, 342,
  4265. 341, 340, 338, 337, 336, 334, 333, 331, 330, 328, 327, 326, 324, 323, 322,
  4266. 320, 319, 318, 316, 315, 313, 312, 311, 309, 308, 306, 305, 304, 302, 301,
  4267. 300, 298, 297, 296, 294, 293, 292, 290, 289, 288, 286, 285, 284, 282, 281,
  4268. 280, 278, 277, 276, 274, 273, 272, 270, 269, 268, 267, 265, 264, 263, 261,
  4269. 260, 259, 258, 256, 255, 254, 252, 251, 250, 248, 247, 246, 245, 243, 242,
  4270. 241, 240, 238, 237, 236, 234, 233, 232, 231, 229, 228, 227, 226, 224, 223,
  4271. 222, 221, 219, 218, 217, 216, 214, 213, 212, 211, 210, 208, 207, 206, 205,
  4272. 203, 202, 201, 200, 198, 197, 196, 195, 194, 192, 191, 190, 189, 188, 186,
  4273. 185, 184, 183, 182, 180, 179, 178, 177, 176, 174, 173, 172, 171, 170, 168,
  4274. 167, 166, 165, 164, 162, 161, 160, 159, 158, 157, 156, 154, 153, 152, 151,
  4275. 150, 148, 147, 146, 145, 144, 143, 142, 140, 139, 138, 137, 136, 135, 134,
  4276. 132, 131, 130, 129, 128, 127, 126, 124, 123, 122, 121, 120, 119, 118, 116,
  4277. 115, 114, 113, 112, 111, 110, 109, 108, 106, 105, 104, 103, 102, 101, 100,
  4278. 99, 98, 97, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 84, 83, 82, 81, 80, 79,
  4279. 78, 77, 76, 75, 74, 73, 72, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59,
  4280. 58, 57, 56, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39,
  4281. 38, 37, 36, 35, 34, 33, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19,
  4282. 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
  4283. };
  4284. /*
  4285. * Internal ln() function that does not check for specials, zero or one.
  4286. * Relative error: abs(result - log(a)) < 0.1 * 10**-prec * abs(log(a))
  4287. */
  4288. static void
  4289. _mpd_qln(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  4290. uint32_t *status)
  4291. {
  4292. mpd_context_t varcontext, maxcontext;
  4293. mpd_t *z = result;
  4294. MPD_NEW_STATIC(v,0,0,0,0);
  4295. MPD_NEW_STATIC(vtmp,0,0,0,0);
  4296. MPD_NEW_STATIC(tmp,0,0,0,0);
  4297. mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
  4298. mpd_ssize_t maxprec, shift, t;
  4299. mpd_ssize_t a_digits, a_exp;
  4300. mpd_uint_t dummy, x;
  4301. int i;
  4302. assert(!mpd_isspecial(a) && !mpd_iszerocoeff(a));
  4303. /*
  4304. * We are calculating ln(a) = ln(v * 10^t) = ln(v) + t*ln(10),
  4305. * where 0.5 < v <= 5.
  4306. */
  4307. if (!mpd_qcopy(&v, a, status)) {
  4308. mpd_seterror(result, MPD_Malloc_error, status);
  4309. goto finish;
  4310. }
  4311. /* Initial approximation: we have at least one non-zero digit */
  4312. _mpd_get_msdigits(&dummy, &x, &v, 3);
  4313. if (x < 10) x *= 10;
  4314. if (x < 100) x *= 10;
  4315. x -= 100;
  4316. /* a may equal z */
  4317. a_digits = a->digits;
  4318. a_exp = a->exp;
  4319. mpd_minalloc(z);
  4320. mpd_clear_flags(z);
  4321. z->data[0] = lnapprox[x];
  4322. z->len = 1;
  4323. z->exp = -3;
  4324. mpd_setdigits(z);
  4325. if (x <= 400) {
  4326. /* Reduce the input operand to 1.00 <= v <= 5.00. Let y = x + 100,
  4327. * so 100 <= y <= 500. Since y contains the most significant digits
  4328. * of v, y/100 <= v < (y+1)/100 and abs(z - log(v)) < 10**-2. */
  4329. v.exp = -(a_digits - 1);
  4330. t = a_exp + a_digits - 1;
  4331. }
  4332. else {
  4333. /* Reduce the input operand to 0.500 < v <= 0.999. Let y = x + 100,
  4334. * so 500 < y <= 999. Since y contains the most significant digits
  4335. * of v, y/1000 <= v < (y+1)/1000 and abs(z - log(v)) < 10**-2. */
  4336. v.exp = -a_digits;
  4337. t = a_exp + a_digits;
  4338. mpd_set_negative(z);
  4339. }
  4340. mpd_maxcontext(&maxcontext);
  4341. mpd_maxcontext(&varcontext);
  4342. varcontext.round = MPD_ROUND_TRUNC;
  4343. maxprec = ctx->prec + 2;
  4344. if (t == 0 && (x <= 15 || x >= 800)) {
  4345. /* 0.900 <= v <= 1.15: Estimate the magnitude of the logarithm.
  4346. * If ln(v) will underflow, skip the loop. Otherwise, adjust the
  4347. * precision upwards in order to obtain a sufficient number of
  4348. * significant digits.
  4349. *
  4350. * Case v > 1:
  4351. * abs((v-1)/10) < abs((v-1)/v) < abs(ln(v)) < abs(v-1)
  4352. * Case v < 1:
  4353. * abs(v-1) < abs(ln(v)) < abs((v-1)/v) < abs((v-1)*10)
  4354. */
  4355. int cmp = _mpd_cmp(&v, &one);
  4356. /* Upper bound (assume v > 1): abs(v-1), unrounded */
  4357. _mpd_qsub(&tmp, &v, &one, &maxcontext, &maxcontext.status);
  4358. if (maxcontext.status & MPD_Errors) {
  4359. mpd_seterror(result, MPD_Malloc_error, status);
  4360. goto finish;
  4361. }
  4362. if (cmp < 0) {
  4363. /* v < 1: abs((v-1)*10) */
  4364. tmp.exp += 1;
  4365. }
  4366. if (mpd_adjexp(&tmp) < mpd_etiny(ctx)) {
  4367. /* The upper bound is less than etiny: Underflow to zero */
  4368. _settriple(result, (cmp<0), 1, mpd_etiny(ctx)-1);
  4369. goto finish;
  4370. }
  4371. /* Lower bound: abs((v-1)/10) or abs(v-1) */
  4372. tmp.exp -= 1;
  4373. if (mpd_adjexp(&tmp) < 0) {
  4374. /* Absolute error of the loop: abs(z - log(v)) < 10**-p. If
  4375. * p = ctx->prec+2-adjexp(lower), then the relative error of
  4376. * the result is (using 10**adjexp(x) <= abs(x)):
  4377. *
  4378. * abs(z - log(v)) / abs(log(v)) < 10**-p / abs(log(v))
  4379. * <= 10**(-ctx->prec-2)
  4380. */
  4381. maxprec = maxprec - mpd_adjexp(&tmp);
  4382. }
  4383. }
  4384. i = ln_schedule_prec(klist, maxprec, 2);
  4385. for (; i >= 0; i--) {
  4386. varcontext.prec = 2*klist[i]+3;
  4387. z->flags ^= MPD_NEG;
  4388. _mpd_qexp(&tmp, z, &varcontext, status);
  4389. z->flags ^= MPD_NEG;
  4390. if (v.digits > varcontext.prec) {
  4391. shift = v.digits - varcontext.prec;
  4392. mpd_qshiftr(&vtmp, &v, shift, status);
  4393. vtmp.exp += shift;
  4394. mpd_qmul(&tmp, &vtmp, &tmp, &varcontext, status);
  4395. }
  4396. else {
  4397. mpd_qmul(&tmp, &v, &tmp, &varcontext, status);
  4398. }
  4399. mpd_qsub(&tmp, &tmp, &one, &maxcontext, status);
  4400. mpd_qadd(z, z, &tmp, &maxcontext, status);
  4401. if (mpd_isspecial(z)) {
  4402. break;
  4403. }
  4404. }
  4405. /*
  4406. * Case t == 0:
  4407. * t * log(10) == 0, the result does not change and the analysis
  4408. * above applies. If v < 0.900 or v > 1.15, the relative error is
  4409. * less than 10**(-ctx.prec-1).
  4410. * Case t != 0:
  4411. * z := approx(log(v))
  4412. * y := approx(log(10))
  4413. * p := maxprec = ctx->prec + 2
  4414. * Absolute errors:
  4415. * 1) abs(z - log(v)) < 10**-p
  4416. * 2) abs(y - log(10)) < 10**-p
  4417. * The multiplication is exact, so:
  4418. * 3) abs(t*y - t*log(10)) < t*10**-p
  4419. * The sum is exact, so:
  4420. * 4) abs((z + t*y) - (log(v) + t*log(10))) < (abs(t) + 1) * 10**-p
  4421. * Bounds for log(v) and log(10):
  4422. * 5) -7/10 < log(v) < 17/10
  4423. * 6) 23/10 < log(10) < 24/10
  4424. * Using 4), 5), 6) and t != 0, the relative error is:
  4425. *
  4426. * 7) relerr < ((abs(t) + 1)*10**-p) / abs(log(v) + t*log(10))
  4427. * < 0.5 * 10**(-p + 1) = 0.5 * 10**(-ctx->prec-1)
  4428. */
  4429. mpd_qln10(&v, maxprec+1, status);
  4430. mpd_qmul_ssize(&tmp, &v, t, &maxcontext, status);
  4431. mpd_qadd(result, &tmp, z, &maxcontext, status);
  4432. finish:
  4433. *status |= (MPD_Inexact|MPD_Rounded);
  4434. mpd_del(&v);
  4435. mpd_del(&vtmp);
  4436. mpd_del(&tmp);
  4437. }
  4438. /* ln(a) */
  4439. void
  4440. mpd_qln(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  4441. uint32_t *status)
  4442. {
  4443. mpd_context_t workctx;
  4444. mpd_ssize_t adjexp, t;
  4445. if (mpd_isspecial(a)) {
  4446. if (mpd_qcheck_nan(result, a, ctx, status)) {
  4447. return;
  4448. }
  4449. if (mpd_isnegative(a)) {
  4450. mpd_seterror(result, MPD_Invalid_operation, status);
  4451. return;
  4452. }
  4453. mpd_setspecial(result, MPD_POS, MPD_INF);
  4454. return;
  4455. }
  4456. if (mpd_iszerocoeff(a)) {
  4457. mpd_setspecial(result, MPD_NEG, MPD_INF);
  4458. return;
  4459. }
  4460. if (mpd_isnegative(a)) {
  4461. mpd_seterror(result, MPD_Invalid_operation, status);
  4462. return;
  4463. }
  4464. if (_mpd_cmp(a, &one) == 0) {
  4465. _settriple(result, MPD_POS, 0, 0);
  4466. return;
  4467. }
  4468. /*
  4469. * Check if the result will overflow (0 < x, x != 1):
  4470. * 1) log10(x) < 0 iff adjexp(x) < 0
  4471. * 2) 0 < x /\ x <= y ==> adjexp(x) <= adjexp(y)
  4472. * 3) 0 < x /\ x != 1 ==> 2 * abs(log10(x)) < abs(log(x))
  4473. * 4) adjexp(x) <= log10(x) < adjexp(x) + 1
  4474. *
  4475. * Case adjexp(x) >= 0:
  4476. * 5) 2 * adjexp(x) < abs(log(x))
  4477. * Case adjexp(x) > 0:
  4478. * 6) adjexp(2 * adjexp(x)) <= adjexp(abs(log(x)))
  4479. * Case adjexp(x) == 0:
  4480. * mpd_exp_digits(t)-1 == 0 <= emax (the shortcut is not triggered)
  4481. *
  4482. * Case adjexp(x) < 0:
  4483. * 7) 2 * (-adjexp(x) - 1) < abs(log(x))
  4484. * Case adjexp(x) < -1:
  4485. * 8) adjexp(2 * (-adjexp(x) - 1)) <= adjexp(abs(log(x)))
  4486. * Case adjexp(x) == -1:
  4487. * mpd_exp_digits(t)-1 == 0 <= emax (the shortcut is not triggered)
  4488. */
  4489. adjexp = mpd_adjexp(a);
  4490. t = (adjexp < 0) ? -adjexp-1 : adjexp;
  4491. t *= 2;
  4492. if (mpd_exp_digits(t)-1 > ctx->emax) {
  4493. *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
  4494. mpd_setspecial(result, (adjexp<0), MPD_INF);
  4495. return;
  4496. }
  4497. workctx = *ctx;
  4498. workctx.round = MPD_ROUND_HALF_EVEN;
  4499. if (ctx->allcr) {
  4500. MPD_NEW_STATIC(t1, 0,0,0,0);
  4501. MPD_NEW_STATIC(t2, 0,0,0,0);
  4502. MPD_NEW_STATIC(ulp, 0,0,0,0);
  4503. MPD_NEW_STATIC(aa, 0,0,0,0);
  4504. mpd_ssize_t prec;
  4505. if (result == a) {
  4506. if (!mpd_qcopy(&aa, a, status)) {
  4507. mpd_seterror(result, MPD_Malloc_error, status);
  4508. return;
  4509. }
  4510. a = &aa;
  4511. }
  4512. workctx.clamp = 0;
  4513. prec = ctx->prec + 3;
  4514. while (1) {
  4515. workctx.prec = prec;
  4516. _mpd_qln(result, a, &workctx, status);
  4517. _ssettriple(&ulp, MPD_POS, 1,
  4518. result->exp + result->digits-workctx.prec);
  4519. workctx.prec = ctx->prec;
  4520. mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
  4521. mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
  4522. if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
  4523. mpd_qcmp(&t1, &t2, status) == 0) {
  4524. workctx.clamp = ctx->clamp;
  4525. mpd_check_underflow(result, &workctx, status);
  4526. mpd_qfinalize(result, &workctx, status);
  4527. break;
  4528. }
  4529. prec += MPD_RDIGITS;
  4530. }
  4531. mpd_del(&t1);
  4532. mpd_del(&t2);
  4533. mpd_del(&ulp);
  4534. mpd_del(&aa);
  4535. }
  4536. else {
  4537. _mpd_qln(result, a, &workctx, status);
  4538. mpd_check_underflow(result, &workctx, status);
  4539. mpd_qfinalize(result, &workctx, status);
  4540. }
  4541. }
  4542. /*
  4543. * Internal log10() function that does not check for specials, zero or one.
  4544. * Case SKIP_FINALIZE:
  4545. * Relative error: abs(result - log10(a)) < 0.1 * 10**-prec * abs(log10(a))
  4546. * Case DO_FINALIZE:
  4547. * Ulp error: abs(result - log10(a)) < ulp(log10(a))
  4548. */
  4549. enum {SKIP_FINALIZE, DO_FINALIZE};
  4550. static void
  4551. _mpd_qlog10(int action, mpd_t *result, const mpd_t *a,
  4552. const mpd_context_t *ctx, uint32_t *status)
  4553. {
  4554. mpd_context_t workctx;
  4555. MPD_NEW_STATIC(ln10,0,0,0,0);
  4556. mpd_maxcontext(&workctx);
  4557. workctx.prec = ctx->prec + 3;
  4558. /* relative error: 0.1 * 10**(-p-3). The specific underflow shortcut
  4559. * in _mpd_qln() does not change the final result. */
  4560. _mpd_qln(result, a, &workctx, status);
  4561. /* relative error: 5 * 10**(-p-3) */
  4562. mpd_qln10(&ln10, workctx.prec, status);
  4563. if (action == DO_FINALIZE) {
  4564. workctx = *ctx;
  4565. workctx.round = MPD_ROUND_HALF_EVEN;
  4566. }
  4567. /* SKIP_FINALIZE: relative error: 5 * 10**(-p-3) */
  4568. _mpd_qdiv(NO_IDEAL_EXP, result, result, &ln10, &workctx, status);
  4569. mpd_del(&ln10);
  4570. }
  4571. /* log10(a) */
  4572. void
  4573. mpd_qlog10(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  4574. uint32_t *status)
  4575. {
  4576. mpd_context_t workctx;
  4577. mpd_ssize_t adjexp, t;
  4578. workctx = *ctx;
  4579. workctx.round = MPD_ROUND_HALF_EVEN;
  4580. if (mpd_isspecial(a)) {
  4581. if (mpd_qcheck_nan(result, a, ctx, status)) {
  4582. return;
  4583. }
  4584. if (mpd_isnegative(a)) {
  4585. mpd_seterror(result, MPD_Invalid_operation, status);
  4586. return;
  4587. }
  4588. mpd_setspecial(result, MPD_POS, MPD_INF);
  4589. return;
  4590. }
  4591. if (mpd_iszerocoeff(a)) {
  4592. mpd_setspecial(result, MPD_NEG, MPD_INF);
  4593. return;
  4594. }
  4595. if (mpd_isnegative(a)) {
  4596. mpd_seterror(result, MPD_Invalid_operation, status);
  4597. return;
  4598. }
  4599. if (mpd_coeff_ispow10(a)) {
  4600. uint8_t sign = 0;
  4601. adjexp = mpd_adjexp(a);
  4602. if (adjexp < 0) {
  4603. sign = 1;
  4604. adjexp = -adjexp;
  4605. }
  4606. _settriple(result, sign, adjexp, 0);
  4607. mpd_qfinalize(result, &workctx, status);
  4608. return;
  4609. }
  4610. /*
  4611. * Check if the result will overflow (0 < x, x != 1):
  4612. * 1) log10(x) < 0 iff adjexp(x) < 0
  4613. * 2) 0 < x /\ x <= y ==> adjexp(x) <= adjexp(y)
  4614. * 3) adjexp(x) <= log10(x) < adjexp(x) + 1
  4615. *
  4616. * Case adjexp(x) >= 0:
  4617. * 4) adjexp(x) <= abs(log10(x))
  4618. * Case adjexp(x) > 0:
  4619. * 5) adjexp(adjexp(x)) <= adjexp(abs(log10(x)))
  4620. * Case adjexp(x) == 0:
  4621. * mpd_exp_digits(t)-1 == 0 <= emax (the shortcut is not triggered)
  4622. *
  4623. * Case adjexp(x) < 0:
  4624. * 6) -adjexp(x) - 1 < abs(log10(x))
  4625. * Case adjexp(x) < -1:
  4626. * 7) adjexp(-adjexp(x) - 1) <= adjexp(abs(log(x)))
  4627. * Case adjexp(x) == -1:
  4628. * mpd_exp_digits(t)-1 == 0 <= emax (the shortcut is not triggered)
  4629. */
  4630. adjexp = mpd_adjexp(a);
  4631. t = (adjexp < 0) ? -adjexp-1 : adjexp;
  4632. if (mpd_exp_digits(t)-1 > ctx->emax) {
  4633. *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
  4634. mpd_setspecial(result, (adjexp<0), MPD_INF);
  4635. return;
  4636. }
  4637. if (ctx->allcr) {
  4638. MPD_NEW_STATIC(t1, 0,0,0,0);
  4639. MPD_NEW_STATIC(t2, 0,0,0,0);
  4640. MPD_NEW_STATIC(ulp, 0,0,0,0);
  4641. MPD_NEW_STATIC(aa, 0,0,0,0);
  4642. mpd_ssize_t prec;
  4643. if (result == a) {
  4644. if (!mpd_qcopy(&aa, a, status)) {
  4645. mpd_seterror(result, MPD_Malloc_error, status);
  4646. return;
  4647. }
  4648. a = &aa;
  4649. }
  4650. workctx.clamp = 0;
  4651. prec = ctx->prec + 3;
  4652. while (1) {
  4653. workctx.prec = prec;
  4654. _mpd_qlog10(SKIP_FINALIZE, result, a, &workctx, status);
  4655. _ssettriple(&ulp, MPD_POS, 1,
  4656. result->exp + result->digits-workctx.prec);
  4657. workctx.prec = ctx->prec;
  4658. mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
  4659. mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
  4660. if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
  4661. mpd_qcmp(&t1, &t2, status) == 0) {
  4662. workctx.clamp = ctx->clamp;
  4663. mpd_check_underflow(result, &workctx, status);
  4664. mpd_qfinalize(result, &workctx, status);
  4665. break;
  4666. }
  4667. prec += MPD_RDIGITS;
  4668. }
  4669. mpd_del(&t1);
  4670. mpd_del(&t2);
  4671. mpd_del(&ulp);
  4672. mpd_del(&aa);
  4673. }
  4674. else {
  4675. _mpd_qlog10(DO_FINALIZE, result, a, &workctx, status);
  4676. mpd_check_underflow(result, &workctx, status);
  4677. }
  4678. }
  4679. /*
  4680. * Maximum of the two operands. Attention: If one operand is a quiet NaN and the
  4681. * other is numeric, the numeric operand is returned. This may not be what one
  4682. * expects.
  4683. */
  4684. void
  4685. mpd_qmax(mpd_t *result, const mpd_t *a, const mpd_t *b,
  4686. const mpd_context_t *ctx, uint32_t *status)
  4687. {
  4688. int c;
  4689. if (mpd_isqnan(a) && !mpd_isnan(b)) {
  4690. mpd_qcopy(result, b, status);
  4691. }
  4692. else if (mpd_isqnan(b) && !mpd_isnan(a)) {
  4693. mpd_qcopy(result, a, status);
  4694. }
  4695. else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  4696. return;
  4697. }
  4698. else {
  4699. c = _mpd_cmp(a, b);
  4700. if (c == 0) {
  4701. c = _mpd_cmp_numequal(a, b);
  4702. }
  4703. if (c < 0) {
  4704. mpd_qcopy(result, b, status);
  4705. }
  4706. else {
  4707. mpd_qcopy(result, a, status);
  4708. }
  4709. }
  4710. mpd_qfinalize(result, ctx, status);
  4711. }
  4712. /*
  4713. * Maximum magnitude: Same as mpd_max(), but compares the operands with their
  4714. * sign ignored.
  4715. */
  4716. void
  4717. mpd_qmax_mag(mpd_t *result, const mpd_t *a, const mpd_t *b,
  4718. const mpd_context_t *ctx, uint32_t *status)
  4719. {
  4720. int c;
  4721. if (mpd_isqnan(a) && !mpd_isnan(b)) {
  4722. mpd_qcopy(result, b, status);
  4723. }
  4724. else if (mpd_isqnan(b) && !mpd_isnan(a)) {
  4725. mpd_qcopy(result, a, status);
  4726. }
  4727. else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  4728. return;
  4729. }
  4730. else {
  4731. c = _mpd_cmp_abs(a, b);
  4732. if (c == 0) {
  4733. c = _mpd_cmp_numequal(a, b);
  4734. }
  4735. if (c < 0) {
  4736. mpd_qcopy(result, b, status);
  4737. }
  4738. else {
  4739. mpd_qcopy(result, a, status);
  4740. }
  4741. }
  4742. mpd_qfinalize(result, ctx, status);
  4743. }
  4744. /*
  4745. * Minimum of the two operands. Attention: If one operand is a quiet NaN and the
  4746. * other is numeric, the numeric operand is returned. This may not be what one
  4747. * expects.
  4748. */
  4749. void
  4750. mpd_qmin(mpd_t *result, const mpd_t *a, const mpd_t *b,
  4751. const mpd_context_t *ctx, uint32_t *status)
  4752. {
  4753. int c;
  4754. if (mpd_isqnan(a) && !mpd_isnan(b)) {
  4755. mpd_qcopy(result, b, status);
  4756. }
  4757. else if (mpd_isqnan(b) && !mpd_isnan(a)) {
  4758. mpd_qcopy(result, a, status);
  4759. }
  4760. else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  4761. return;
  4762. }
  4763. else {
  4764. c = _mpd_cmp(a, b);
  4765. if (c == 0) {
  4766. c = _mpd_cmp_numequal(a, b);
  4767. }
  4768. if (c < 0) {
  4769. mpd_qcopy(result, a, status);
  4770. }
  4771. else {
  4772. mpd_qcopy(result, b, status);
  4773. }
  4774. }
  4775. mpd_qfinalize(result, ctx, status);
  4776. }
  4777. /*
  4778. * Minimum magnitude: Same as mpd_min(), but compares the operands with their
  4779. * sign ignored.
  4780. */
  4781. void
  4782. mpd_qmin_mag(mpd_t *result, const mpd_t *a, const mpd_t *b,
  4783. const mpd_context_t *ctx, uint32_t *status)
  4784. {
  4785. int c;
  4786. if (mpd_isqnan(a) && !mpd_isnan(b)) {
  4787. mpd_qcopy(result, b, status);
  4788. }
  4789. else if (mpd_isqnan(b) && !mpd_isnan(a)) {
  4790. mpd_qcopy(result, a, status);
  4791. }
  4792. else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  4793. return;
  4794. }
  4795. else {
  4796. c = _mpd_cmp_abs(a, b);
  4797. if (c == 0) {
  4798. c = _mpd_cmp_numequal(a, b);
  4799. }
  4800. if (c < 0) {
  4801. mpd_qcopy(result, a, status);
  4802. }
  4803. else {
  4804. mpd_qcopy(result, b, status);
  4805. }
  4806. }
  4807. mpd_qfinalize(result, ctx, status);
  4808. }
  4809. /* Minimum space needed for the result array in _karatsuba_rec(). */
  4810. static inline mpd_size_t
  4811. _kmul_resultsize(mpd_size_t la, mpd_size_t lb)
  4812. {
  4813. mpd_size_t n, m;
  4814. n = add_size_t(la, lb);
  4815. n = add_size_t(n, 1);
  4816. m = (la+1)/2 + 1;
  4817. m = mul_size_t(m, 3);
  4818. return (m > n) ? m : n;
  4819. }
  4820. /* Work space needed in _karatsuba_rec(). lim >= 4 */
  4821. static inline mpd_size_t
  4822. _kmul_worksize(mpd_size_t n, mpd_size_t lim)
  4823. {
  4824. mpd_size_t m;
  4825. if (n <= lim) {
  4826. return 0;
  4827. }
  4828. m = (n+1)/2 + 1;
  4829. return add_size_t(mul_size_t(m, 2), _kmul_worksize(m, lim));
  4830. }
  4831. #define MPD_KARATSUBA_BASECASE 16 /* must be >= 4 */
  4832. /*
  4833. * Add the product of a and b to c.
  4834. * c must be _kmul_resultsize(la, lb) in size.
  4835. * w is used as a work array and must be _kmul_worksize(a, lim) in size.
  4836. * Roman E. Maeder, Storage Allocation for the Karatsuba Integer Multiplication
  4837. * Algorithm. In "Design and implementation of symbolic computation systems",
  4838. * Springer, 1993, ISBN 354057235X, 9783540572350.
  4839. */
  4840. static void
  4841. _karatsuba_rec(mpd_uint_t *c, const mpd_uint_t *a, const mpd_uint_t *b,
  4842. mpd_uint_t *w, mpd_size_t la, mpd_size_t lb)
  4843. {
  4844. mpd_size_t m, lt;
  4845. assert(la >= lb && lb > 0);
  4846. assert(la <= MPD_KARATSUBA_BASECASE || w != NULL);
  4847. if (la <= MPD_KARATSUBA_BASECASE) {
  4848. _mpd_basemul(c, a, b, la, lb);
  4849. return;
  4850. }
  4851. m = (la+1)/2; /* ceil(la/2) */
  4852. /* lb <= m < la */
  4853. if (lb <= m) {
  4854. /* lb can now be larger than la-m */
  4855. if (lb > la-m) {
  4856. lt = lb + lb + 1; /* space needed for result array */
  4857. mpd_uint_zero(w, lt); /* clear result array */
  4858. _karatsuba_rec(w, b, a+m, w+lt, lb, la-m); /* b*ah */
  4859. }
  4860. else {
  4861. lt = (la-m) + (la-m) + 1; /* space needed for result array */
  4862. mpd_uint_zero(w, lt); /* clear result array */
  4863. _karatsuba_rec(w, a+m, b, w+lt, la-m, lb); /* ah*b */
  4864. }
  4865. _mpd_baseaddto(c+m, w, (la-m)+lb); /* add ah*b*B**m */
  4866. lt = m + m + 1; /* space needed for the result array */
  4867. mpd_uint_zero(w, lt); /* clear result array */
  4868. _karatsuba_rec(w, a, b, w+lt, m, lb); /* al*b */
  4869. _mpd_baseaddto(c, w, m+lb); /* add al*b */
  4870. return;
  4871. }
  4872. /* la >= lb > m */
  4873. memcpy(w, a, m * sizeof *w);
  4874. w[m] = 0;
  4875. _mpd_baseaddto(w, a+m, la-m);
  4876. memcpy(w+(m+1), b, m * sizeof *w);
  4877. w[m+1+m] = 0;
  4878. _mpd_baseaddto(w+(m+1), b+m, lb-m);
  4879. _karatsuba_rec(c+m, w, w+(m+1), w+2*(m+1), m+1, m+1);
  4880. lt = (la-m) + (la-m) + 1;
  4881. mpd_uint_zero(w, lt);
  4882. _karatsuba_rec(w, a+m, b+m, w+lt, la-m, lb-m);
  4883. _mpd_baseaddto(c+2*m, w, (la-m) + (lb-m));
  4884. _mpd_basesubfrom(c+m, w, (la-m) + (lb-m));
  4885. lt = m + m + 1;
  4886. mpd_uint_zero(w, lt);
  4887. _karatsuba_rec(w, a, b, w+lt, m, m);
  4888. _mpd_baseaddto(c, w, m+m);
  4889. _mpd_basesubfrom(c+m, w, m+m);
  4890. return;
  4891. }
  4892. /*
  4893. * Multiply u and v, using Karatsuba multiplication. Returns a pointer
  4894. * to the result or NULL in case of failure (malloc error).
  4895. * Conditions: ulen >= vlen, ulen >= 4
  4896. */
  4897. static mpd_uint_t *
  4898. _mpd_kmul(const mpd_uint_t *u, const mpd_uint_t *v,
  4899. mpd_size_t ulen, mpd_size_t vlen,
  4900. mpd_size_t *rsize)
  4901. {
  4902. mpd_uint_t *result = NULL, *w = NULL;
  4903. mpd_size_t m;
  4904. assert(ulen >= 4);
  4905. assert(ulen >= vlen);
  4906. *rsize = _kmul_resultsize(ulen, vlen);
  4907. if ((result = mpd_calloc(*rsize, sizeof *result)) == NULL) {
  4908. return NULL;
  4909. }
  4910. m = _kmul_worksize(ulen, MPD_KARATSUBA_BASECASE);
  4911. if (m && ((w = mpd_calloc(m, sizeof *w)) == NULL)) {
  4912. mpd_free(result);
  4913. return NULL;
  4914. }
  4915. _karatsuba_rec(result, u, v, w, ulen, vlen);
  4916. if (w) mpd_free(w);
  4917. return result;
  4918. }
  4919. /*
  4920. * Determine the minimum length for the number theoretic transform. Valid
  4921. * transform lengths are 2**n or 3*2**n, where 2**n <= MPD_MAXTRANSFORM_2N.
  4922. * The function finds the shortest length m such that rsize <= m.
  4923. */
  4924. static inline mpd_size_t
  4925. _mpd_get_transform_len(mpd_size_t rsize)
  4926. {
  4927. mpd_size_t log2rsize;
  4928. mpd_size_t x, step;
  4929. assert(rsize >= 4);
  4930. log2rsize = mpd_bsr(rsize);
  4931. if (rsize <= 1024) {
  4932. /* 2**n is faster in this range. */
  4933. x = ((mpd_size_t)1)<<log2rsize;
  4934. return (rsize == x) ? x : x<<1;
  4935. }
  4936. else if (rsize <= MPD_MAXTRANSFORM_2N) {
  4937. x = ((mpd_size_t)1)<<log2rsize;
  4938. if (rsize == x) return x;
  4939. step = x>>1;
  4940. x += step;
  4941. return (rsize <= x) ? x : x + step;
  4942. }
  4943. else if (rsize <= MPD_MAXTRANSFORM_2N+MPD_MAXTRANSFORM_2N/2) {
  4944. return MPD_MAXTRANSFORM_2N+MPD_MAXTRANSFORM_2N/2;
  4945. }
  4946. else if (rsize <= 3*MPD_MAXTRANSFORM_2N) {
  4947. return 3*MPD_MAXTRANSFORM_2N;
  4948. }
  4949. else {
  4950. return MPD_SIZE_MAX;
  4951. }
  4952. }
  4953. #ifdef PPRO
  4954. #ifndef _MSC_VER
  4955. static inline unsigned short
  4956. _mpd_get_control87(void)
  4957. {
  4958. unsigned short cw;
  4959. __asm__ __volatile__ ("fnstcw %0" : "=m" (cw));
  4960. return cw;
  4961. }
  4962. static inline void
  4963. _mpd_set_control87(unsigned short cw)
  4964. {
  4965. __asm__ __volatile__ ("fldcw %0" : : "m" (cw));
  4966. }
  4967. #endif
  4968. static unsigned int
  4969. mpd_set_fenv(void)
  4970. {
  4971. unsigned int cw;
  4972. #ifdef _MSC_VER
  4973. unsigned int flags =
  4974. _EM_INVALID|_EM_DENORMAL|_EM_ZERODIVIDE|_EM_OVERFLOW|
  4975. _EM_UNDERFLOW|_EM_INEXACT|_RC_CHOP|_PC_64;
  4976. unsigned int mask = _MCW_EM|_MCW_RC|_MCW_PC;
  4977. unsigned int dummy;
  4978. __control87_2(0, 0, &cw, NULL);
  4979. __control87_2(flags, mask, &dummy, NULL);
  4980. #else
  4981. cw = _mpd_get_control87();
  4982. _mpd_set_control87(cw|0xF3F);
  4983. #endif
  4984. return cw;
  4985. }
  4986. static void
  4987. mpd_restore_fenv(unsigned int cw)
  4988. {
  4989. #ifdef _MSC_VER
  4990. unsigned int mask = _MCW_EM|_MCW_RC|_MCW_PC;
  4991. unsigned int dummy;
  4992. __control87_2(cw, mask, &dummy, NULL);
  4993. #else
  4994. _mpd_set_control87((unsigned short)cw);
  4995. #endif
  4996. }
  4997. #endif /* PPRO */
  4998. /*
  4999. * Multiply u and v, using the fast number theoretic transform. Returns
  5000. * a pointer to the result or NULL in case of failure (malloc error).
  5001. */
  5002. static mpd_uint_t *
  5003. _mpd_fntmul(const mpd_uint_t *u, const mpd_uint_t *v,
  5004. mpd_size_t ulen, mpd_size_t vlen,
  5005. mpd_size_t *rsize)
  5006. {
  5007. mpd_uint_t *c1 = NULL, *c2 = NULL, *c3 = NULL, *vtmp = NULL;
  5008. mpd_size_t n;
  5009. #ifdef PPRO
  5010. unsigned int cw;
  5011. cw = mpd_set_fenv();
  5012. #endif
  5013. *rsize = add_size_t(ulen, vlen);
  5014. if ((n = _mpd_get_transform_len(*rsize)) == MPD_SIZE_MAX) {
  5015. goto malloc_error;
  5016. }
  5017. if ((c1 = mpd_calloc(n, sizeof *c1)) == NULL) {
  5018. goto malloc_error;
  5019. }
  5020. if ((c2 = mpd_calloc(n, sizeof *c2)) == NULL) {
  5021. goto malloc_error;
  5022. }
  5023. if ((c3 = mpd_calloc(n, sizeof *c3)) == NULL) {
  5024. goto malloc_error;
  5025. }
  5026. memcpy(c1, u, ulen * (sizeof *c1));
  5027. memcpy(c2, u, ulen * (sizeof *c2));
  5028. memcpy(c3, u, ulen * (sizeof *c3));
  5029. if (u == v) {
  5030. if (!fnt_autoconvolute(c1, n, P1) ||
  5031. !fnt_autoconvolute(c2, n, P2) ||
  5032. !fnt_autoconvolute(c3, n, P3)) {
  5033. goto malloc_error;
  5034. }
  5035. }
  5036. else {
  5037. if ((vtmp = mpd_calloc(n, sizeof *vtmp)) == NULL) {
  5038. goto malloc_error;
  5039. }
  5040. memcpy(vtmp, v, vlen * (sizeof *vtmp));
  5041. if (!fnt_convolute(c1, vtmp, n, P1)) {
  5042. mpd_free(vtmp);
  5043. goto malloc_error;
  5044. }
  5045. memcpy(vtmp, v, vlen * (sizeof *vtmp));
  5046. mpd_uint_zero(vtmp+vlen, n-vlen);
  5047. if (!fnt_convolute(c2, vtmp, n, P2)) {
  5048. mpd_free(vtmp);
  5049. goto malloc_error;
  5050. }
  5051. memcpy(vtmp, v, vlen * (sizeof *vtmp));
  5052. mpd_uint_zero(vtmp+vlen, n-vlen);
  5053. if (!fnt_convolute(c3, vtmp, n, P3)) {
  5054. mpd_free(vtmp);
  5055. goto malloc_error;
  5056. }
  5057. mpd_free(vtmp);
  5058. }
  5059. crt3(c1, c2, c3, *rsize);
  5060. out:
  5061. #ifdef PPRO
  5062. mpd_restore_fenv(cw);
  5063. #endif
  5064. if (c2) mpd_free(c2);
  5065. if (c3) mpd_free(c3);
  5066. return c1;
  5067. malloc_error:
  5068. if (c1) mpd_free(c1);
  5069. c1 = NULL;
  5070. goto out;
  5071. }
  5072. /*
  5073. * Karatsuba multiplication with FNT/basemul as the base case.
  5074. */
  5075. static int
  5076. _karatsuba_rec_fnt(mpd_uint_t *c, const mpd_uint_t *a, const mpd_uint_t *b,
  5077. mpd_uint_t *w, mpd_size_t la, mpd_size_t lb)
  5078. {
  5079. mpd_size_t m, lt;
  5080. assert(la >= lb && lb > 0);
  5081. assert(la <= 3*(MPD_MAXTRANSFORM_2N/2) || w != NULL);
  5082. if (la <= 3*(MPD_MAXTRANSFORM_2N/2)) {
  5083. if (lb <= 192) {
  5084. _mpd_basemul(c, b, a, lb, la);
  5085. }
  5086. else {
  5087. mpd_uint_t *result;
  5088. mpd_size_t dummy;
  5089. if ((result = _mpd_fntmul(a, b, la, lb, &dummy)) == NULL) {
  5090. return 0;
  5091. }
  5092. memcpy(c, result, (la+lb) * (sizeof *result));
  5093. mpd_free(result);
  5094. }
  5095. return 1;
  5096. }
  5097. m = (la+1)/2; /* ceil(la/2) */
  5098. /* lb <= m < la */
  5099. if (lb <= m) {
  5100. /* lb can now be larger than la-m */
  5101. if (lb > la-m) {
  5102. lt = lb + lb + 1; /* space needed for result array */
  5103. mpd_uint_zero(w, lt); /* clear result array */
  5104. if (!_karatsuba_rec_fnt(w, b, a+m, w+lt, lb, la-m)) { /* b*ah */
  5105. return 0; /* GCOV_UNLIKELY */
  5106. }
  5107. }
  5108. else {
  5109. lt = (la-m) + (la-m) + 1; /* space needed for result array */
  5110. mpd_uint_zero(w, lt); /* clear result array */
  5111. if (!_karatsuba_rec_fnt(w, a+m, b, w+lt, la-m, lb)) { /* ah*b */
  5112. return 0; /* GCOV_UNLIKELY */
  5113. }
  5114. }
  5115. _mpd_baseaddto(c+m, w, (la-m)+lb); /* add ah*b*B**m */
  5116. lt = m + m + 1; /* space needed for the result array */
  5117. mpd_uint_zero(w, lt); /* clear result array */
  5118. if (!_karatsuba_rec_fnt(w, a, b, w+lt, m, lb)) { /* al*b */
  5119. return 0; /* GCOV_UNLIKELY */
  5120. }
  5121. _mpd_baseaddto(c, w, m+lb); /* add al*b */
  5122. return 1;
  5123. }
  5124. /* la >= lb > m */
  5125. memcpy(w, a, m * sizeof *w);
  5126. w[m] = 0;
  5127. _mpd_baseaddto(w, a+m, la-m);
  5128. memcpy(w+(m+1), b, m * sizeof *w);
  5129. w[m+1+m] = 0;
  5130. _mpd_baseaddto(w+(m+1), b+m, lb-m);
  5131. if (!_karatsuba_rec_fnt(c+m, w, w+(m+1), w+2*(m+1), m+1, m+1)) {
  5132. return 0; /* GCOV_UNLIKELY */
  5133. }
  5134. lt = (la-m) + (la-m) + 1;
  5135. mpd_uint_zero(w, lt);
  5136. if (!_karatsuba_rec_fnt(w, a+m, b+m, w+lt, la-m, lb-m)) {
  5137. return 0; /* GCOV_UNLIKELY */
  5138. }
  5139. _mpd_baseaddto(c+2*m, w, (la-m) + (lb-m));
  5140. _mpd_basesubfrom(c+m, w, (la-m) + (lb-m));
  5141. lt = m + m + 1;
  5142. mpd_uint_zero(w, lt);
  5143. if (!_karatsuba_rec_fnt(w, a, b, w+lt, m, m)) {
  5144. return 0; /* GCOV_UNLIKELY */
  5145. }
  5146. _mpd_baseaddto(c, w, m+m);
  5147. _mpd_basesubfrom(c+m, w, m+m);
  5148. return 1;
  5149. }
  5150. /*
  5151. * Multiply u and v, using Karatsuba multiplication with the FNT as the
  5152. * base case. Returns a pointer to the result or NULL in case of failure
  5153. * (malloc error). Conditions: ulen >= vlen, ulen >= 4.
  5154. */
  5155. static mpd_uint_t *
  5156. _mpd_kmul_fnt(const mpd_uint_t *u, const mpd_uint_t *v,
  5157. mpd_size_t ulen, mpd_size_t vlen,
  5158. mpd_size_t *rsize)
  5159. {
  5160. mpd_uint_t *result = NULL, *w = NULL;
  5161. mpd_size_t m;
  5162. assert(ulen >= 4);
  5163. assert(ulen >= vlen);
  5164. *rsize = _kmul_resultsize(ulen, vlen);
  5165. if ((result = mpd_calloc(*rsize, sizeof *result)) == NULL) {
  5166. return NULL;
  5167. }
  5168. m = _kmul_worksize(ulen, 3*(MPD_MAXTRANSFORM_2N/2));
  5169. if (m && ((w = mpd_calloc(m, sizeof *w)) == NULL)) {
  5170. mpd_free(result); /* GCOV_UNLIKELY */
  5171. return NULL; /* GCOV_UNLIKELY */
  5172. }
  5173. if (!_karatsuba_rec_fnt(result, u, v, w, ulen, vlen)) {
  5174. mpd_free(result);
  5175. result = NULL;
  5176. }
  5177. if (w) mpd_free(w);
  5178. return result;
  5179. }
  5180. /* Deal with the special cases of multiplying infinities. */
  5181. static void
  5182. _mpd_qmul_inf(mpd_t *result, const mpd_t *a, const mpd_t *b, uint32_t *status)
  5183. {
  5184. if (mpd_isinfinite(a)) {
  5185. if (mpd_iszero(b)) {
  5186. mpd_seterror(result, MPD_Invalid_operation, status);
  5187. }
  5188. else {
  5189. mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
  5190. }
  5191. return;
  5192. }
  5193. assert(mpd_isinfinite(b));
  5194. if (mpd_iszero(a)) {
  5195. mpd_seterror(result, MPD_Invalid_operation, status);
  5196. }
  5197. else {
  5198. mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
  5199. }
  5200. }
  5201. /*
  5202. * Internal function: Multiply a and b. _mpd_qmul deals with specials but
  5203. * does NOT finalize the result. This is for use in mpd_fma().
  5204. */
  5205. static inline void
  5206. _mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
  5207. const mpd_context_t *ctx, uint32_t *status)
  5208. {
  5209. const mpd_t *big = a, *small = b;
  5210. mpd_uint_t *rdata = NULL;
  5211. mpd_uint_t rbuf[MPD_MINALLOC_MAX];
  5212. mpd_size_t rsize, i;
  5213. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  5214. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  5215. return;
  5216. }
  5217. _mpd_qmul_inf(result, a, b, status);
  5218. return;
  5219. }
  5220. if (small->len > big->len) {
  5221. _mpd_ptrswap(&big, &small);
  5222. }
  5223. rsize = big->len + small->len;
  5224. if (big->len == 1) {
  5225. _mpd_singlemul(result->data, big->data[0], small->data[0]);
  5226. goto finish;
  5227. }
  5228. if (rsize <= (mpd_size_t)MPD_MINALLOC_MAX) {
  5229. if (big->len == 2) {
  5230. _mpd_mul_2_le2(rbuf, big->data, small->data, small->len);
  5231. }
  5232. else {
  5233. mpd_uint_zero(rbuf, rsize);
  5234. if (small->len == 1) {
  5235. _mpd_shortmul(rbuf, big->data, big->len, small->data[0]);
  5236. }
  5237. else {
  5238. _mpd_basemul(rbuf, small->data, big->data, small->len, big->len);
  5239. }
  5240. }
  5241. if (!mpd_qresize(result, rsize, status)) {
  5242. return;
  5243. }
  5244. for(i = 0; i < rsize; i++) {
  5245. result->data[i] = rbuf[i];
  5246. }
  5247. goto finish;
  5248. }
  5249. if (small->len <= 256) {
  5250. rdata = mpd_calloc(rsize, sizeof *rdata);
  5251. if (rdata != NULL) {
  5252. if (small->len == 1) {
  5253. _mpd_shortmul(rdata, big->data, big->len, small->data[0]);
  5254. }
  5255. else {
  5256. _mpd_basemul(rdata, small->data, big->data, small->len, big->len);
  5257. }
  5258. }
  5259. }
  5260. else if (rsize <= 1024) {
  5261. rdata = _mpd_kmul(big->data, small->data, big->len, small->len, &rsize);
  5262. }
  5263. else if (rsize <= 3*MPD_MAXTRANSFORM_2N) {
  5264. rdata = _mpd_fntmul(big->data, small->data, big->len, small->len, &rsize);
  5265. }
  5266. else {
  5267. rdata = _mpd_kmul_fnt(big->data, small->data, big->len, small->len, &rsize);
  5268. }
  5269. if (rdata == NULL) {
  5270. mpd_seterror(result, MPD_Malloc_error, status);
  5271. return;
  5272. }
  5273. if (mpd_isdynamic_data(result)) {
  5274. mpd_free(result->data);
  5275. }
  5276. result->data = rdata;
  5277. result->alloc = rsize;
  5278. mpd_set_dynamic_data(result);
  5279. finish:
  5280. mpd_set_flags(result, mpd_sign(a)^mpd_sign(b));
  5281. result->exp = big->exp + small->exp;
  5282. result->len = _mpd_real_size(result->data, rsize);
  5283. /* resize to smaller cannot fail */
  5284. mpd_qresize(result, result->len, status);
  5285. mpd_setdigits(result);
  5286. }
  5287. /* Multiply a and b. */
  5288. void
  5289. mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
  5290. const mpd_context_t *ctx, uint32_t *status)
  5291. {
  5292. _mpd_qmul(result, a, b, ctx, status);
  5293. mpd_qfinalize(result, ctx, status);
  5294. }
  5295. /* Multiply a and b. Set NaN/Invalid_operation if the result is inexact. */
  5296. static void
  5297. _mpd_qmul_exact(mpd_t *result, const mpd_t *a, const mpd_t *b,
  5298. const mpd_context_t *ctx, uint32_t *status)
  5299. {
  5300. uint32_t workstatus = 0;
  5301. mpd_qmul(result, a, b, ctx, &workstatus);
  5302. *status |= workstatus;
  5303. if (workstatus & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) {
  5304. mpd_seterror(result, MPD_Invalid_operation, status);
  5305. }
  5306. }
  5307. /* Multiply decimal and mpd_ssize_t. */
  5308. void
  5309. mpd_qmul_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
  5310. const mpd_context_t *ctx, uint32_t *status)
  5311. {
  5312. mpd_context_t maxcontext;
  5313. MPD_NEW_STATIC(bb,0,0,0,0);
  5314. mpd_maxcontext(&maxcontext);
  5315. mpd_qsset_ssize(&bb, b, &maxcontext, status);
  5316. mpd_qmul(result, a, &bb, ctx, status);
  5317. mpd_del(&bb);
  5318. }
  5319. /* Multiply decimal and mpd_uint_t. */
  5320. void
  5321. mpd_qmul_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
  5322. const mpd_context_t *ctx, uint32_t *status)
  5323. {
  5324. mpd_context_t maxcontext;
  5325. MPD_NEW_STATIC(bb,0,0,0,0);
  5326. mpd_maxcontext(&maxcontext);
  5327. mpd_qsset_uint(&bb, b, &maxcontext, status);
  5328. mpd_qmul(result, a, &bb, ctx, status);
  5329. mpd_del(&bb);
  5330. }
  5331. void
  5332. mpd_qmul_i32(mpd_t *result, const mpd_t *a, int32_t b,
  5333. const mpd_context_t *ctx, uint32_t *status)
  5334. {
  5335. mpd_qmul_ssize(result, a, b, ctx, status);
  5336. }
  5337. void
  5338. mpd_qmul_u32(mpd_t *result, const mpd_t *a, uint32_t b,
  5339. const mpd_context_t *ctx, uint32_t *status)
  5340. {
  5341. mpd_qmul_uint(result, a, b, ctx, status);
  5342. }
  5343. #ifdef CONFIG_64
  5344. void
  5345. mpd_qmul_i64(mpd_t *result, const mpd_t *a, int64_t b,
  5346. const mpd_context_t *ctx, uint32_t *status)
  5347. {
  5348. mpd_qmul_ssize(result, a, b, ctx, status);
  5349. }
  5350. void
  5351. mpd_qmul_u64(mpd_t *result, const mpd_t *a, uint64_t b,
  5352. const mpd_context_t *ctx, uint32_t *status)
  5353. {
  5354. mpd_qmul_uint(result, a, b, ctx, status);
  5355. }
  5356. #elif !defined(LEGACY_COMPILER)
  5357. /* Multiply decimal and int64_t. */
  5358. void
  5359. mpd_qmul_i64(mpd_t *result, const mpd_t *a, int64_t b,
  5360. const mpd_context_t *ctx, uint32_t *status)
  5361. {
  5362. mpd_context_t maxcontext;
  5363. MPD_NEW_STATIC(bb,0,0,0,0);
  5364. mpd_maxcontext(&maxcontext);
  5365. mpd_qset_i64(&bb, b, &maxcontext, status);
  5366. mpd_qmul(result, a, &bb, ctx, status);
  5367. mpd_del(&bb);
  5368. }
  5369. /* Multiply decimal and uint64_t. */
  5370. void
  5371. mpd_qmul_u64(mpd_t *result, const mpd_t *a, uint64_t b,
  5372. const mpd_context_t *ctx, uint32_t *status)
  5373. {
  5374. mpd_context_t maxcontext;
  5375. MPD_NEW_STATIC(bb,0,0,0,0);
  5376. mpd_maxcontext(&maxcontext);
  5377. mpd_qset_u64(&bb, b, &maxcontext, status);
  5378. mpd_qmul(result, a, &bb, ctx, status);
  5379. mpd_del(&bb);
  5380. }
  5381. #endif
  5382. /* Like the minus operator. */
  5383. void
  5384. mpd_qminus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  5385. uint32_t *status)
  5386. {
  5387. if (mpd_isspecial(a)) {
  5388. if (mpd_qcheck_nan(result, a, ctx, status)) {
  5389. return;
  5390. }
  5391. }
  5392. if (mpd_iszero(a) && ctx->round != MPD_ROUND_FLOOR) {
  5393. mpd_qcopy_abs(result, a, status);
  5394. }
  5395. else {
  5396. mpd_qcopy_negate(result, a, status);
  5397. }
  5398. mpd_qfinalize(result, ctx, status);
  5399. }
  5400. /* Like the plus operator. */
  5401. void
  5402. mpd_qplus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  5403. uint32_t *status)
  5404. {
  5405. if (mpd_isspecial(a)) {
  5406. if (mpd_qcheck_nan(result, a, ctx, status)) {
  5407. return;
  5408. }
  5409. }
  5410. if (mpd_iszero(a) && ctx->round != MPD_ROUND_FLOOR) {
  5411. mpd_qcopy_abs(result, a, status);
  5412. }
  5413. else {
  5414. mpd_qcopy(result, a, status);
  5415. }
  5416. mpd_qfinalize(result, ctx, status);
  5417. }
  5418. /* The largest representable number that is smaller than the operand. */
  5419. void
  5420. mpd_qnext_minus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  5421. uint32_t *status)
  5422. {
  5423. mpd_context_t workctx;
  5424. MPD_NEW_CONST(tiny,MPD_POS,mpd_etiny(ctx)-1,1,1,1,1);
  5425. if (mpd_isspecial(a)) {
  5426. if (mpd_qcheck_nan(result, a, ctx, status)) {
  5427. return;
  5428. }
  5429. assert(mpd_isinfinite(a));
  5430. if (mpd_isnegative(a)) {
  5431. mpd_qcopy(result, a, status);
  5432. return;
  5433. }
  5434. else {
  5435. mpd_clear_flags(result);
  5436. mpd_qmaxcoeff(result, ctx, status);
  5437. if (mpd_isnan(result)) {
  5438. return;
  5439. }
  5440. result->exp = mpd_etop(ctx);
  5441. return;
  5442. }
  5443. }
  5444. mpd_workcontext(&workctx, ctx);
  5445. workctx.round = MPD_ROUND_FLOOR;
  5446. if (!mpd_qcopy(result, a, status)) {
  5447. return;
  5448. }
  5449. mpd_qfinalize(result, &workctx, &workctx.status);
  5450. if (workctx.status&(MPD_Inexact|MPD_Errors)) {
  5451. *status |= (workctx.status&MPD_Errors);
  5452. return;
  5453. }
  5454. workctx.status = 0;
  5455. mpd_qsub(result, a, &tiny, &workctx, &workctx.status);
  5456. *status |= (workctx.status&MPD_Errors);
  5457. }
  5458. /* The smallest representable number that is larger than the operand. */
  5459. void
  5460. mpd_qnext_plus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  5461. uint32_t *status)
  5462. {
  5463. mpd_context_t workctx;
  5464. MPD_NEW_CONST(tiny,MPD_POS,mpd_etiny(ctx)-1,1,1,1,1);
  5465. if (mpd_isspecial(a)) {
  5466. if (mpd_qcheck_nan(result, a, ctx, status)) {
  5467. return;
  5468. }
  5469. assert(mpd_isinfinite(a));
  5470. if (mpd_ispositive(a)) {
  5471. mpd_qcopy(result, a, status);
  5472. }
  5473. else {
  5474. mpd_clear_flags(result);
  5475. mpd_qmaxcoeff(result, ctx, status);
  5476. if (mpd_isnan(result)) {
  5477. return;
  5478. }
  5479. mpd_set_flags(result, MPD_NEG);
  5480. result->exp = mpd_etop(ctx);
  5481. }
  5482. return;
  5483. }
  5484. mpd_workcontext(&workctx, ctx);
  5485. workctx.round = MPD_ROUND_CEILING;
  5486. if (!mpd_qcopy(result, a, status)) {
  5487. return;
  5488. }
  5489. mpd_qfinalize(result, &workctx, &workctx.status);
  5490. if (workctx.status & (MPD_Inexact|MPD_Errors)) {
  5491. *status |= (workctx.status&MPD_Errors);
  5492. return;
  5493. }
  5494. workctx.status = 0;
  5495. mpd_qadd(result, a, &tiny, &workctx, &workctx.status);
  5496. *status |= (workctx.status&MPD_Errors);
  5497. }
  5498. /*
  5499. * The number closest to the first operand that is in the direction towards
  5500. * the second operand.
  5501. */
  5502. void
  5503. mpd_qnext_toward(mpd_t *result, const mpd_t *a, const mpd_t *b,
  5504. const mpd_context_t *ctx, uint32_t *status)
  5505. {
  5506. int c;
  5507. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  5508. return;
  5509. }
  5510. c = _mpd_cmp(a, b);
  5511. if (c == 0) {
  5512. mpd_qcopy_sign(result, a, b, status);
  5513. return;
  5514. }
  5515. if (c < 0) {
  5516. mpd_qnext_plus(result, a, ctx, status);
  5517. }
  5518. else {
  5519. mpd_qnext_minus(result, a, ctx, status);
  5520. }
  5521. if (mpd_isinfinite(result)) {
  5522. *status |= (MPD_Overflow|MPD_Rounded|MPD_Inexact);
  5523. }
  5524. else if (mpd_adjexp(result) < ctx->emin) {
  5525. *status |= (MPD_Underflow|MPD_Subnormal|MPD_Rounded|MPD_Inexact);
  5526. if (mpd_iszero(result)) {
  5527. *status |= MPD_Clamped;
  5528. }
  5529. }
  5530. }
  5531. /*
  5532. * Internal function: Integer power with mpd_uint_t exponent. The function
  5533. * can fail with MPD_Malloc_error.
  5534. *
  5535. * The error is equal to the error incurred in k-1 multiplications. Assuming
  5536. * the upper bound for the relative error in each operation:
  5537. *
  5538. * abs(err) = 5 * 10**-prec
  5539. * result = x**k * (1 + err)**(k-1)
  5540. */
  5541. static inline void
  5542. _mpd_qpow_uint(mpd_t *result, const mpd_t *base, mpd_uint_t exp,
  5543. uint8_t resultsign, const mpd_context_t *ctx, uint32_t *status)
  5544. {
  5545. uint32_t workstatus = 0;
  5546. mpd_uint_t n;
  5547. if (exp == 0) {
  5548. _settriple(result, resultsign, 1, 0); /* GCOV_NOT_REACHED */
  5549. return; /* GCOV_NOT_REACHED */
  5550. }
  5551. if (!mpd_qcopy(result, base, status)) {
  5552. return;
  5553. }
  5554. n = mpd_bits[mpd_bsr(exp)];
  5555. while (n >>= 1) {
  5556. mpd_qmul(result, result, result, ctx, &workstatus);
  5557. if (exp & n) {
  5558. mpd_qmul(result, result, base, ctx, &workstatus);
  5559. }
  5560. if (mpd_isspecial(result) ||
  5561. (mpd_iszerocoeff(result) && (workstatus & MPD_Clamped))) {
  5562. break;
  5563. }
  5564. }
  5565. *status |= workstatus;
  5566. mpd_set_sign(result, resultsign);
  5567. }
  5568. /*
  5569. * Internal function: Integer power with mpd_t exponent, tbase and texp
  5570. * are modified!! Function can fail with MPD_Malloc_error.
  5571. *
  5572. * The error is equal to the error incurred in k multiplications. Assuming
  5573. * the upper bound for the relative error in each operation:
  5574. *
  5575. * abs(err) = 5 * 10**-prec
  5576. * result = x**k * (1 + err)**k
  5577. */
  5578. static inline void
  5579. _mpd_qpow_mpd(mpd_t *result, mpd_t *tbase, mpd_t *texp, uint8_t resultsign,
  5580. const mpd_context_t *ctx, uint32_t *status)
  5581. {
  5582. uint32_t workstatus = 0;
  5583. mpd_context_t maxctx;
  5584. MPD_NEW_CONST(two,0,0,1,1,1,2);
  5585. mpd_maxcontext(&maxctx);
  5586. /* resize to smaller cannot fail */
  5587. mpd_qcopy(result, &one, status);
  5588. while (!mpd_iszero(texp)) {
  5589. if (mpd_isodd(texp)) {
  5590. mpd_qmul(result, result, tbase, ctx, &workstatus);
  5591. *status |= workstatus;
  5592. if (mpd_isspecial(result) ||
  5593. (mpd_iszerocoeff(result) && (workstatus & MPD_Clamped))) {
  5594. break;
  5595. }
  5596. }
  5597. mpd_qmul(tbase, tbase, tbase, ctx, &workstatus);
  5598. mpd_qdivint(texp, texp, &two, &maxctx, &workstatus);
  5599. if (mpd_isnan(tbase) || mpd_isnan(texp)) {
  5600. mpd_seterror(result, workstatus&MPD_Errors, status);
  5601. return;
  5602. }
  5603. }
  5604. mpd_set_sign(result, resultsign);
  5605. }
  5606. /*
  5607. * The power function for integer exponents. Relative error _before_ the
  5608. * final rounding to prec:
  5609. * abs(result - base**exp) < 0.1 * 10**-prec * abs(base**exp)
  5610. */
  5611. static void
  5612. _mpd_qpow_int(mpd_t *result, const mpd_t *base, const mpd_t *exp,
  5613. uint8_t resultsign,
  5614. const mpd_context_t *ctx, uint32_t *status)
  5615. {
  5616. mpd_context_t workctx;
  5617. MPD_NEW_STATIC(tbase,0,0,0,0);
  5618. MPD_NEW_STATIC(texp,0,0,0,0);
  5619. mpd_uint_t n;
  5620. mpd_workcontext(&workctx, ctx);
  5621. workctx.prec += (exp->digits + exp->exp + 2);
  5622. workctx.round = MPD_ROUND_HALF_EVEN;
  5623. workctx.clamp = 0;
  5624. if (mpd_isnegative(exp)) {
  5625. uint32_t workstatus = 0;
  5626. workctx.prec += 1;
  5627. mpd_qdiv(&tbase, &one, base, &workctx, &workstatus);
  5628. *status |= workstatus;
  5629. if (workstatus&MPD_Errors) {
  5630. mpd_setspecial(result, MPD_POS, MPD_NAN);
  5631. goto finish;
  5632. }
  5633. }
  5634. else {
  5635. if (!mpd_qcopy(&tbase, base, status)) {
  5636. mpd_setspecial(result, MPD_POS, MPD_NAN);
  5637. goto finish;
  5638. }
  5639. }
  5640. n = mpd_qabs_uint(exp, &workctx.status);
  5641. if (workctx.status&MPD_Invalid_operation) {
  5642. if (!mpd_qcopy(&texp, exp, status)) {
  5643. mpd_setspecial(result, MPD_POS, MPD_NAN); /* GCOV_UNLIKELY */
  5644. goto finish; /* GCOV_UNLIKELY */
  5645. }
  5646. _mpd_qpow_mpd(result, &tbase, &texp, resultsign, &workctx, status);
  5647. }
  5648. else {
  5649. _mpd_qpow_uint(result, &tbase, n, resultsign, &workctx, status);
  5650. }
  5651. if (mpd_isinfinite(result)) {
  5652. /* for ROUND_DOWN, ROUND_FLOOR, etc. */
  5653. _settriple(result, resultsign, 1, MPD_EXP_INF);
  5654. }
  5655. finish:
  5656. mpd_del(&tbase);
  5657. mpd_del(&texp);
  5658. mpd_qfinalize(result, ctx, status);
  5659. }
  5660. /*
  5661. * If the exponent is infinite and base equals one, the result is one
  5662. * with a coefficient of length prec. Otherwise, result is undefined.
  5663. * Return the value of the comparison against one.
  5664. */
  5665. static int
  5666. _qcheck_pow_one_inf(mpd_t *result, const mpd_t *base, uint8_t resultsign,
  5667. const mpd_context_t *ctx, uint32_t *status)
  5668. {
  5669. mpd_ssize_t shift;
  5670. int cmp;
  5671. if ((cmp = _mpd_cmp(base, &one)) == 0) {
  5672. shift = ctx->prec-1;
  5673. mpd_qshiftl(result, &one, shift, status);
  5674. result->exp = -shift;
  5675. mpd_set_flags(result, resultsign);
  5676. *status |= (MPD_Inexact|MPD_Rounded);
  5677. }
  5678. return cmp;
  5679. }
  5680. /*
  5681. * If abs(base) equals one, calculate the correct power of one result.
  5682. * Otherwise, result is undefined. Return the value of the comparison
  5683. * against 1.
  5684. *
  5685. * This is an internal function that does not check for specials.
  5686. */
  5687. static int
  5688. _qcheck_pow_one(mpd_t *result, const mpd_t *base, const mpd_t *exp,
  5689. uint8_t resultsign,
  5690. const mpd_context_t *ctx, uint32_t *status)
  5691. {
  5692. uint32_t workstatus = 0;
  5693. mpd_ssize_t shift;
  5694. int cmp;
  5695. if ((cmp = _mpd_cmp_abs(base, &one)) == 0) {
  5696. if (_mpd_isint(exp)) {
  5697. if (mpd_isnegative(exp)) {
  5698. _settriple(result, resultsign, 1, 0);
  5699. return 0;
  5700. }
  5701. /* 1.000**3 = 1.000000000 */
  5702. mpd_qmul_ssize(result, exp, -base->exp, ctx, &workstatus);
  5703. if (workstatus&MPD_Errors) {
  5704. *status |= (workstatus&MPD_Errors);
  5705. return 0;
  5706. }
  5707. /* digits-1 after exponentiation */
  5708. shift = mpd_qget_ssize(result, &workstatus);
  5709. /* shift is MPD_SSIZE_MAX if result is too large */
  5710. if (shift > ctx->prec-1) {
  5711. shift = ctx->prec-1;
  5712. *status |= MPD_Rounded;
  5713. }
  5714. }
  5715. else if (mpd_ispositive(base)) {
  5716. shift = ctx->prec-1;
  5717. *status |= (MPD_Inexact|MPD_Rounded);
  5718. }
  5719. else {
  5720. return -2; /* GCOV_NOT_REACHED */
  5721. }
  5722. if (!mpd_qshiftl(result, &one, shift, status)) {
  5723. return 0;
  5724. }
  5725. result->exp = -shift;
  5726. mpd_set_flags(result, resultsign);
  5727. }
  5728. return cmp;
  5729. }
  5730. /*
  5731. * Detect certain over/underflow of x**y.
  5732. * ACL2 proof: pow-bounds.lisp.
  5733. *
  5734. * Symbols:
  5735. *
  5736. * e: EXP_INF or EXP_CLAMP
  5737. * x: base
  5738. * y: exponent
  5739. *
  5740. * omega(e) = log10(abs(e))
  5741. * zeta(x) = log10(abs(log10(x)))
  5742. * theta(y) = log10(abs(y))
  5743. *
  5744. * Upper and lower bounds:
  5745. *
  5746. * ub_omega(e) = ceil(log10(abs(e)))
  5747. * lb_theta(y) = floor(log10(abs(y)))
  5748. *
  5749. * | floor(log10(floor(abs(log10(x))))) if x < 1/10 or x >= 10
  5750. * lb_zeta(x) = | floor(log10(abs(x-1)/10)) if 1/10 <= x < 1
  5751. * | floor(log10(abs((x-1)/100))) if 1 < x < 10
  5752. *
  5753. * ub_omega(e) and lb_theta(y) are obviously upper and lower bounds
  5754. * for omega(e) and theta(y).
  5755. *
  5756. * lb_zeta is a lower bound for zeta(x):
  5757. *
  5758. * x < 1/10 or x >= 10:
  5759. *
  5760. * abs(log10(x)) >= 1, so the outer log10 is well defined. Since log10
  5761. * is strictly increasing, the end result is a lower bound.
  5762. *
  5763. * 1/10 <= x < 1:
  5764. *
  5765. * We use: log10(x) <= (x-1)/log(10)
  5766. * abs(log10(x)) >= abs(x-1)/log(10)
  5767. * abs(log10(x)) >= abs(x-1)/10
  5768. *
  5769. * 1 < x < 10:
  5770. *
  5771. * We use: (x-1)/(x*log(10)) < log10(x)
  5772. * abs((x-1)/100) < abs(log10(x))
  5773. *
  5774. * XXX: abs((x-1)/10) would work, need ACL2 proof.
  5775. *
  5776. *
  5777. * Let (0 < x < 1 and y < 0) or (x > 1 and y > 0). (H1)
  5778. * Let ub_omega(exp_inf) < lb_zeta(x) + lb_theta(y) (H2)
  5779. *
  5780. * Then:
  5781. * log10(abs(exp_inf)) < log10(abs(log10(x))) + log10(abs(y)). (1)
  5782. * exp_inf < log10(x) * y (2)
  5783. * 10**exp_inf < x**y (3)
  5784. *
  5785. * Let (0 < x < 1 and y > 0) or (x > 1 and y < 0). (H3)
  5786. * Let ub_omega(exp_clamp) < lb_zeta(x) + lb_theta(y) (H4)
  5787. *
  5788. * Then:
  5789. * log10(abs(exp_clamp)) < log10(abs(log10(x))) + log10(abs(y)). (4)
  5790. * log10(x) * y < exp_clamp (5)
  5791. * x**y < 10**exp_clamp (6)
  5792. *
  5793. */
  5794. static mpd_ssize_t
  5795. _lower_bound_zeta(const mpd_t *x, uint32_t *status)
  5796. {
  5797. mpd_context_t maxctx;
  5798. MPD_NEW_STATIC(scratch,0,0,0,0);
  5799. mpd_ssize_t t, u;
  5800. t = mpd_adjexp(x);
  5801. if (t > 0) {
  5802. /* x >= 10 -> floor(log10(floor(abs(log10(x))))) */
  5803. return mpd_exp_digits(t) - 1;
  5804. }
  5805. else if (t < -1) {
  5806. /* x < 1/10 -> floor(log10(floor(abs(log10(x))))) */
  5807. return mpd_exp_digits(t+1) - 1;
  5808. }
  5809. else {
  5810. mpd_maxcontext(&maxctx);
  5811. mpd_qsub(&scratch, x, &one, &maxctx, status);
  5812. if (mpd_isspecial(&scratch)) {
  5813. mpd_del(&scratch);
  5814. return MPD_SSIZE_MAX;
  5815. }
  5816. u = mpd_adjexp(&scratch);
  5817. mpd_del(&scratch);
  5818. /* t == -1, 1/10 <= x < 1 -> floor(log10(abs(x-1)/10))
  5819. * t == 0, 1 < x < 10 -> floor(log10(abs(x-1)/100)) */
  5820. return (t == 0) ? u-2 : u-1;
  5821. }
  5822. }
  5823. /*
  5824. * Detect cases of certain overflow/underflow in the power function.
  5825. * Assumptions: x != 1, y != 0. The proof above is for positive x.
  5826. * If x is negative and y is an odd integer, x**y == -(abs(x)**y),
  5827. * so the analysis does not change.
  5828. */
  5829. static int
  5830. _qcheck_pow_bounds(mpd_t *result, const mpd_t *x, const mpd_t *y,
  5831. uint8_t resultsign,
  5832. const mpd_context_t *ctx, uint32_t *status)
  5833. {
  5834. MPD_NEW_SHARED(abs_x, x);
  5835. mpd_ssize_t ub_omega, lb_zeta, lb_theta;
  5836. uint8_t sign;
  5837. mpd_set_positive(&abs_x);
  5838. lb_theta = mpd_adjexp(y);
  5839. lb_zeta = _lower_bound_zeta(&abs_x, status);
  5840. if (lb_zeta == MPD_SSIZE_MAX) {
  5841. mpd_seterror(result, MPD_Malloc_error, status);
  5842. return 1;
  5843. }
  5844. sign = (mpd_adjexp(&abs_x) < 0) ^ mpd_sign(y);
  5845. if (sign == 0) {
  5846. /* (0 < |x| < 1 and y < 0) or (|x| > 1 and y > 0) */
  5847. ub_omega = mpd_exp_digits(ctx->emax);
  5848. if (ub_omega < lb_zeta + lb_theta) {
  5849. _settriple(result, resultsign, 1, MPD_EXP_INF);
  5850. mpd_qfinalize(result, ctx, status);
  5851. return 1;
  5852. }
  5853. }
  5854. else {
  5855. /* (0 < |x| < 1 and y > 0) or (|x| > 1 and y < 0). */
  5856. ub_omega = mpd_exp_digits(mpd_etiny(ctx));
  5857. if (ub_omega < lb_zeta + lb_theta) {
  5858. _settriple(result, resultsign, 1, mpd_etiny(ctx)-1);
  5859. mpd_qfinalize(result, ctx, status);
  5860. return 1;
  5861. }
  5862. }
  5863. return 0;
  5864. }
  5865. /*
  5866. * TODO: Implement algorithm for computing exact powers from decimal.py.
  5867. * In order to prevent infinite loops, this has to be called before
  5868. * using Ziv's strategy for correct rounding.
  5869. */
  5870. /*
  5871. static int
  5872. _mpd_qpow_exact(mpd_t *result, const mpd_t *base, const mpd_t *exp,
  5873. const mpd_context_t *ctx, uint32_t *status)
  5874. {
  5875. return 0;
  5876. }
  5877. */
  5878. /*
  5879. * The power function for real exponents.
  5880. * Relative error: abs(result - e**y) < e**y * 1/5 * 10**(-prec - 1)
  5881. */
  5882. static void
  5883. _mpd_qpow_real(mpd_t *result, const mpd_t *base, const mpd_t *exp,
  5884. const mpd_context_t *ctx, uint32_t *status)
  5885. {
  5886. mpd_context_t workctx;
  5887. MPD_NEW_STATIC(texp,0,0,0,0);
  5888. if (!mpd_qcopy(&texp, exp, status)) {
  5889. mpd_seterror(result, MPD_Malloc_error, status);
  5890. return;
  5891. }
  5892. mpd_maxcontext(&workctx);
  5893. workctx.prec = (base->digits > ctx->prec) ? base->digits : ctx->prec;
  5894. workctx.prec += (4 + MPD_EXPDIGITS);
  5895. workctx.round = MPD_ROUND_HALF_EVEN;
  5896. workctx.allcr = ctx->allcr;
  5897. /*
  5898. * extra := MPD_EXPDIGITS = MPD_EXP_MAX_T
  5899. * wp := prec + 4 + extra
  5900. * abs(err) < 5 * 10**-wp
  5901. * y := log(base) * exp
  5902. * Calculate:
  5903. * 1) e**(y * (1 + err)**2) * (1 + err)
  5904. * = e**y * e**(y * (2*err + err**2)) * (1 + err)
  5905. * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  5906. * Relative error of the underlined term:
  5907. * 2) abs(e**(y * (2*err + err**2)) - 1)
  5908. * Case abs(y) >= 10**extra:
  5909. * 3) adjexp(y)+1 > log10(abs(y)) >= extra
  5910. * This triggers the Overflow/Underflow shortcut in _mpd_qexp(),
  5911. * so no further analysis is necessary.
  5912. * Case abs(y) < 10**extra:
  5913. * 4) abs(y * (2*err + err**2)) < 1/5 * 10**(-prec - 2)
  5914. * Use (see _mpd_qexp):
  5915. * 5) abs(x) <= 9/10 * 10**-p ==> abs(e**x - 1) < 10**-p
  5916. * With 2), 4) and 5):
  5917. * 6) abs(e**(y * (2*err + err**2)) - 1) < 10**(-prec - 2)
  5918. * The complete relative error of 1) is:
  5919. * 7) abs(result - e**y) < e**y * 1/5 * 10**(-prec - 1)
  5920. */
  5921. mpd_qln(result, base, &workctx, &workctx.status);
  5922. mpd_qmul(result, result, &texp, &workctx, &workctx.status);
  5923. mpd_qexp(result, result, &workctx, status);
  5924. mpd_del(&texp);
  5925. *status |= (workctx.status&MPD_Errors);
  5926. *status |= (MPD_Inexact|MPD_Rounded);
  5927. }
  5928. /* The power function: base**exp */
  5929. void
  5930. mpd_qpow(mpd_t *result, const mpd_t *base, const mpd_t *exp,
  5931. const mpd_context_t *ctx, uint32_t *status)
  5932. {
  5933. uint8_t resultsign = 0;
  5934. int intexp = 0;
  5935. int cmp;
  5936. if (mpd_isspecial(base) || mpd_isspecial(exp)) {
  5937. if (mpd_qcheck_nans(result, base, exp, ctx, status)) {
  5938. return;
  5939. }
  5940. }
  5941. if (mpd_isinteger(exp)) {
  5942. intexp = 1;
  5943. resultsign = mpd_isnegative(base) && mpd_isodd(exp);
  5944. }
  5945. if (mpd_iszero(base)) {
  5946. if (mpd_iszero(exp)) {
  5947. mpd_seterror(result, MPD_Invalid_operation, status);
  5948. }
  5949. else if (mpd_isnegative(exp)) {
  5950. mpd_setspecial(result, resultsign, MPD_INF);
  5951. }
  5952. else {
  5953. _settriple(result, resultsign, 0, 0);
  5954. }
  5955. return;
  5956. }
  5957. if (mpd_isnegative(base)) {
  5958. if (!intexp || mpd_isinfinite(exp)) {
  5959. mpd_seterror(result, MPD_Invalid_operation, status);
  5960. return;
  5961. }
  5962. }
  5963. if (mpd_isinfinite(exp)) {
  5964. /* power of one */
  5965. cmp = _qcheck_pow_one_inf(result, base, resultsign, ctx, status);
  5966. if (cmp == 0) {
  5967. return;
  5968. }
  5969. else {
  5970. cmp *= mpd_arith_sign(exp);
  5971. if (cmp < 0) {
  5972. _settriple(result, resultsign, 0, 0);
  5973. }
  5974. else {
  5975. mpd_setspecial(result, resultsign, MPD_INF);
  5976. }
  5977. }
  5978. return;
  5979. }
  5980. if (mpd_isinfinite(base)) {
  5981. if (mpd_iszero(exp)) {
  5982. _settriple(result, resultsign, 1, 0);
  5983. }
  5984. else if (mpd_isnegative(exp)) {
  5985. _settriple(result, resultsign, 0, 0);
  5986. }
  5987. else {
  5988. mpd_setspecial(result, resultsign, MPD_INF);
  5989. }
  5990. return;
  5991. }
  5992. if (mpd_iszero(exp)) {
  5993. _settriple(result, resultsign, 1, 0);
  5994. return;
  5995. }
  5996. if (_qcheck_pow_one(result, base, exp, resultsign, ctx, status) == 0) {
  5997. return;
  5998. }
  5999. if (_qcheck_pow_bounds(result, base, exp, resultsign, ctx, status)) {
  6000. return;
  6001. }
  6002. if (intexp) {
  6003. _mpd_qpow_int(result, base, exp, resultsign, ctx, status);
  6004. }
  6005. else {
  6006. _mpd_qpow_real(result, base, exp, ctx, status);
  6007. if (!mpd_isspecial(result) && _mpd_cmp(result, &one) == 0) {
  6008. mpd_ssize_t shift = ctx->prec-1;
  6009. mpd_qshiftl(result, &one, shift, status);
  6010. result->exp = -shift;
  6011. }
  6012. if (mpd_isinfinite(result)) {
  6013. /* for ROUND_DOWN, ROUND_FLOOR, etc. */
  6014. _settriple(result, MPD_POS, 1, MPD_EXP_INF);
  6015. }
  6016. mpd_qfinalize(result, ctx, status);
  6017. }
  6018. }
  6019. /*
  6020. * Internal function: Integer powmod with mpd_uint_t exponent, base is modified!
  6021. * Function can fail with MPD_Malloc_error.
  6022. */
  6023. static inline void
  6024. _mpd_qpowmod_uint(mpd_t *result, mpd_t *base, mpd_uint_t exp,
  6025. const mpd_t *mod, uint32_t *status)
  6026. {
  6027. mpd_context_t maxcontext;
  6028. mpd_maxcontext(&maxcontext);
  6029. /* resize to smaller cannot fail */
  6030. mpd_qcopy(result, &one, status);
  6031. while (exp > 0) {
  6032. if (exp & 1) {
  6033. _mpd_qmul_exact(result, result, base, &maxcontext, status);
  6034. mpd_qrem(result, result, mod, &maxcontext, status);
  6035. }
  6036. _mpd_qmul_exact(base, base, base, &maxcontext, status);
  6037. mpd_qrem(base, base, mod, &maxcontext, status);
  6038. exp >>= 1;
  6039. }
  6040. }
  6041. /* The powmod function: (base**exp) % mod */
  6042. void
  6043. mpd_qpowmod(mpd_t *result, const mpd_t *base, const mpd_t *exp,
  6044. const mpd_t *mod,
  6045. const mpd_context_t *ctx, uint32_t *status)
  6046. {
  6047. mpd_context_t maxcontext;
  6048. MPD_NEW_STATIC(tbase,0,0,0,0);
  6049. MPD_NEW_STATIC(texp,0,0,0,0);
  6050. MPD_NEW_STATIC(tmod,0,0,0,0);
  6051. MPD_NEW_STATIC(tmp,0,0,0,0);
  6052. MPD_NEW_CONST(two,0,0,1,1,1,2);
  6053. mpd_ssize_t tbase_exp, texp_exp;
  6054. mpd_ssize_t i;
  6055. mpd_t t;
  6056. mpd_uint_t r;
  6057. uint8_t sign;
  6058. if (mpd_isspecial(base) || mpd_isspecial(exp) || mpd_isspecial(mod)) {
  6059. if (mpd_qcheck_3nans(result, base, exp, mod, ctx, status)) {
  6060. return;
  6061. }
  6062. mpd_seterror(result, MPD_Invalid_operation, status);
  6063. return;
  6064. }
  6065. if (!_mpd_isint(base) || !_mpd_isint(exp) || !_mpd_isint(mod)) {
  6066. mpd_seterror(result, MPD_Invalid_operation, status);
  6067. return;
  6068. }
  6069. if (mpd_iszerocoeff(mod)) {
  6070. mpd_seterror(result, MPD_Invalid_operation, status);
  6071. return;
  6072. }
  6073. if (mod->digits+mod->exp > ctx->prec) {
  6074. mpd_seterror(result, MPD_Invalid_operation, status);
  6075. return;
  6076. }
  6077. sign = (mpd_isnegative(base)) && (mpd_isodd(exp));
  6078. if (mpd_iszerocoeff(exp)) {
  6079. if (mpd_iszerocoeff(base)) {
  6080. mpd_seterror(result, MPD_Invalid_operation, status);
  6081. return;
  6082. }
  6083. r = (_mpd_cmp_abs(mod, &one)==0) ? 0 : 1;
  6084. _settriple(result, sign, r, 0);
  6085. return;
  6086. }
  6087. if (mpd_isnegative(exp)) {
  6088. mpd_seterror(result, MPD_Invalid_operation, status);
  6089. return;
  6090. }
  6091. if (mpd_iszerocoeff(base)) {
  6092. _settriple(result, sign, 0, 0);
  6093. return;
  6094. }
  6095. mpd_maxcontext(&maxcontext);
  6096. mpd_qrescale(&tmod, mod, 0, &maxcontext, &maxcontext.status);
  6097. if (maxcontext.status&MPD_Errors) {
  6098. mpd_seterror(result, maxcontext.status&MPD_Errors, status);
  6099. goto out;
  6100. }
  6101. maxcontext.status = 0;
  6102. mpd_set_positive(&tmod);
  6103. mpd_qround_to_int(&tbase, base, &maxcontext, status);
  6104. mpd_set_positive(&tbase);
  6105. tbase_exp = tbase.exp;
  6106. tbase.exp = 0;
  6107. mpd_qround_to_int(&texp, exp, &maxcontext, status);
  6108. texp_exp = texp.exp;
  6109. texp.exp = 0;
  6110. /* base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo */
  6111. mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
  6112. mpd_qshiftl(result, &one, tbase_exp, status);
  6113. mpd_qrem(result, result, &tmod, &maxcontext, status);
  6114. _mpd_qmul_exact(&tbase, &tbase, result, &maxcontext, status);
  6115. mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
  6116. if (mpd_isspecial(&tbase) ||
  6117. mpd_isspecial(&texp) ||
  6118. mpd_isspecial(&tmod)) {
  6119. goto mpd_errors;
  6120. }
  6121. for (i = 0; i < texp_exp; i++) {
  6122. _mpd_qpowmod_uint(&tmp, &tbase, 10, &tmod, status);
  6123. t = tmp;
  6124. tmp = tbase;
  6125. tbase = t;
  6126. }
  6127. if (mpd_isspecial(&tbase)) {
  6128. goto mpd_errors; /* GCOV_UNLIKELY */
  6129. }
  6130. /* resize to smaller cannot fail */
  6131. mpd_qcopy(result, &one, status);
  6132. while (mpd_isfinite(&texp) && !mpd_iszero(&texp)) {
  6133. if (mpd_isodd(&texp)) {
  6134. _mpd_qmul_exact(result, result, &tbase, &maxcontext, status);
  6135. mpd_qrem(result, result, &tmod, &maxcontext, status);
  6136. }
  6137. _mpd_qmul_exact(&tbase, &tbase, &tbase, &maxcontext, status);
  6138. mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
  6139. mpd_qdivint(&texp, &texp, &two, &maxcontext, status);
  6140. }
  6141. if (mpd_isspecial(&texp) || mpd_isspecial(&tbase) ||
  6142. mpd_isspecial(&tmod) || mpd_isspecial(result)) {
  6143. /* MPD_Malloc_error */
  6144. goto mpd_errors;
  6145. }
  6146. else {
  6147. mpd_set_sign(result, sign);
  6148. }
  6149. out:
  6150. mpd_del(&tbase);
  6151. mpd_del(&texp);
  6152. mpd_del(&tmod);
  6153. mpd_del(&tmp);
  6154. return;
  6155. mpd_errors:
  6156. mpd_setspecial(result, MPD_POS, MPD_NAN);
  6157. goto out;
  6158. }
  6159. void
  6160. mpd_qquantize(mpd_t *result, const mpd_t *a, const mpd_t *b,
  6161. const mpd_context_t *ctx, uint32_t *status)
  6162. {
  6163. uint32_t workstatus = 0;
  6164. mpd_ssize_t b_exp = b->exp;
  6165. mpd_ssize_t expdiff, shift;
  6166. mpd_uint_t rnd;
  6167. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  6168. if (mpd_qcheck_nans(result, a, b, ctx, status)) {
  6169. return;
  6170. }
  6171. if (mpd_isinfinite(a) && mpd_isinfinite(b)) {
  6172. mpd_qcopy(result, a, status);
  6173. return;
  6174. }
  6175. mpd_seterror(result, MPD_Invalid_operation, status);
  6176. return;
  6177. }
  6178. if (b->exp > ctx->emax || b->exp < mpd_etiny(ctx)) {
  6179. mpd_seterror(result, MPD_Invalid_operation, status);
  6180. return;
  6181. }
  6182. if (mpd_iszero(a)) {
  6183. _settriple(result, mpd_sign(a), 0, b->exp);
  6184. mpd_qfinalize(result, ctx, status);
  6185. return;
  6186. }
  6187. expdiff = a->exp - b->exp;
  6188. if (a->digits + expdiff > ctx->prec) {
  6189. mpd_seterror(result, MPD_Invalid_operation, status);
  6190. return;
  6191. }
  6192. if (expdiff >= 0) {
  6193. shift = expdiff;
  6194. if (!mpd_qshiftl(result, a, shift, status)) {
  6195. return;
  6196. }
  6197. result->exp = b_exp;
  6198. }
  6199. else {
  6200. /* At this point expdiff < 0 and a->digits+expdiff <= prec,
  6201. * so the shift before an increment will fit in prec. */
  6202. shift = -expdiff;
  6203. rnd = mpd_qshiftr(result, a, shift, status);
  6204. if (rnd == MPD_UINT_MAX) {
  6205. return;
  6206. }
  6207. result->exp = b_exp;
  6208. if (!_mpd_apply_round_fit(result, rnd, ctx, status)) {
  6209. return;
  6210. }
  6211. workstatus |= MPD_Rounded;
  6212. if (rnd) {
  6213. workstatus |= MPD_Inexact;
  6214. }
  6215. }
  6216. if (mpd_adjexp(result) > ctx->emax ||
  6217. mpd_adjexp(result) < mpd_etiny(ctx)) {
  6218. mpd_seterror(result, MPD_Invalid_operation, status);
  6219. return;
  6220. }
  6221. *status |= workstatus;
  6222. mpd_qfinalize(result, ctx, status);
  6223. }
  6224. void
  6225. mpd_qreduce(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6226. uint32_t *status)
  6227. {
  6228. mpd_ssize_t shift, maxexp, maxshift;
  6229. uint8_t sign_a = mpd_sign(a);
  6230. if (mpd_isspecial(a)) {
  6231. if (mpd_qcheck_nan(result, a, ctx, status)) {
  6232. return;
  6233. }
  6234. mpd_qcopy(result, a, status);
  6235. return;
  6236. }
  6237. if (!mpd_qcopy(result, a, status)) {
  6238. return;
  6239. }
  6240. mpd_qfinalize(result, ctx, status);
  6241. if (mpd_isspecial(result)) {
  6242. return;
  6243. }
  6244. if (mpd_iszero(result)) {
  6245. _settriple(result, sign_a, 0, 0);
  6246. return;
  6247. }
  6248. shift = mpd_trail_zeros(result);
  6249. maxexp = (ctx->clamp) ? mpd_etop(ctx) : ctx->emax;
  6250. /* After the finalizing above result->exp <= maxexp. */
  6251. maxshift = maxexp - result->exp;
  6252. shift = (shift > maxshift) ? maxshift : shift;
  6253. mpd_qshiftr_inplace(result, shift);
  6254. result->exp += shift;
  6255. }
  6256. void
  6257. mpd_qrem(mpd_t *r, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx,
  6258. uint32_t *status)
  6259. {
  6260. MPD_NEW_STATIC(q,0,0,0,0);
  6261. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  6262. if (mpd_qcheck_nans(r, a, b, ctx, status)) {
  6263. return;
  6264. }
  6265. if (mpd_isinfinite(a)) {
  6266. mpd_seterror(r, MPD_Invalid_operation, status);
  6267. return;
  6268. }
  6269. if (mpd_isinfinite(b)) {
  6270. mpd_qcopy(r, a, status);
  6271. mpd_qfinalize(r, ctx, status);
  6272. return;
  6273. }
  6274. /* debug */
  6275. abort(); /* GCOV_NOT_REACHED */
  6276. }
  6277. if (mpd_iszerocoeff(b)) {
  6278. if (mpd_iszerocoeff(a)) {
  6279. mpd_seterror(r, MPD_Division_undefined, status);
  6280. }
  6281. else {
  6282. mpd_seterror(r, MPD_Invalid_operation, status);
  6283. }
  6284. return;
  6285. }
  6286. _mpd_qdivmod(&q, r, a, b, ctx, status);
  6287. mpd_del(&q);
  6288. mpd_qfinalize(r, ctx, status);
  6289. }
  6290. void
  6291. mpd_qrem_near(mpd_t *r, const mpd_t *a, const mpd_t *b,
  6292. const mpd_context_t *ctx, uint32_t *status)
  6293. {
  6294. mpd_context_t workctx;
  6295. MPD_NEW_STATIC(btmp,0,0,0,0);
  6296. MPD_NEW_STATIC(q,0,0,0,0);
  6297. mpd_ssize_t expdiff, qdigits;
  6298. int cmp, isodd, allnine;
  6299. assert(r != NULL); /* annotation for scan-build */
  6300. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  6301. if (mpd_qcheck_nans(r, a, b, ctx, status)) {
  6302. return;
  6303. }
  6304. if (mpd_isinfinite(a)) {
  6305. mpd_seterror(r, MPD_Invalid_operation, status);
  6306. return;
  6307. }
  6308. if (mpd_isinfinite(b)) {
  6309. mpd_qcopy(r, a, status);
  6310. mpd_qfinalize(r, ctx, status);
  6311. return;
  6312. }
  6313. /* debug */
  6314. abort(); /* GCOV_NOT_REACHED */
  6315. }
  6316. if (mpd_iszerocoeff(b)) {
  6317. if (mpd_iszerocoeff(a)) {
  6318. mpd_seterror(r, MPD_Division_undefined, status);
  6319. }
  6320. else {
  6321. mpd_seterror(r, MPD_Invalid_operation, status);
  6322. }
  6323. return;
  6324. }
  6325. if (r == b) {
  6326. if (!mpd_qcopy(&btmp, b, status)) {
  6327. mpd_seterror(r, MPD_Malloc_error, status);
  6328. return;
  6329. }
  6330. b = &btmp;
  6331. }
  6332. _mpd_qdivmod(&q, r, a, b, ctx, status);
  6333. if (mpd_isnan(&q) || mpd_isnan(r)) {
  6334. goto finish;
  6335. }
  6336. if (mpd_iszerocoeff(r)) {
  6337. goto finish;
  6338. }
  6339. expdiff = mpd_adjexp(b) - mpd_adjexp(r);
  6340. if (-1 <= expdiff && expdiff <= 1) {
  6341. allnine = mpd_coeff_isallnine(&q);
  6342. qdigits = q.digits;
  6343. isodd = mpd_isodd(&q);
  6344. mpd_maxcontext(&workctx);
  6345. if (mpd_sign(a) == mpd_sign(b)) {
  6346. /* sign(r) == sign(b) */
  6347. _mpd_qsub(&q, r, b, &workctx, &workctx.status);
  6348. }
  6349. else {
  6350. /* sign(r) != sign(b) */
  6351. _mpd_qadd(&q, r, b, &workctx, &workctx.status);
  6352. }
  6353. if (workctx.status&MPD_Errors) {
  6354. mpd_seterror(r, workctx.status&MPD_Errors, status);
  6355. goto finish;
  6356. }
  6357. cmp = _mpd_cmp_abs(&q, r);
  6358. if (cmp < 0 || (cmp == 0 && isodd)) {
  6359. /* abs(r) > abs(b)/2 or abs(r) == abs(b)/2 and isodd(quotient) */
  6360. if (allnine && qdigits == ctx->prec) {
  6361. /* abs(quotient) + 1 == 10**prec */
  6362. mpd_seterror(r, MPD_Division_impossible, status);
  6363. goto finish;
  6364. }
  6365. mpd_qcopy(r, &q, status);
  6366. }
  6367. }
  6368. finish:
  6369. mpd_del(&btmp);
  6370. mpd_del(&q);
  6371. mpd_qfinalize(r, ctx, status);
  6372. }
  6373. static void
  6374. _mpd_qrescale(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
  6375. const mpd_context_t *ctx, uint32_t *status)
  6376. {
  6377. mpd_ssize_t expdiff, shift;
  6378. mpd_uint_t rnd;
  6379. if (mpd_isspecial(a)) {
  6380. mpd_qcopy(result, a, status);
  6381. return;
  6382. }
  6383. if (mpd_iszero(a)) {
  6384. _settriple(result, mpd_sign(a), 0, exp);
  6385. return;
  6386. }
  6387. expdiff = a->exp - exp;
  6388. if (expdiff >= 0) {
  6389. shift = expdiff;
  6390. if (a->digits + shift > MPD_MAX_PREC+1) {
  6391. mpd_seterror(result, MPD_Invalid_operation, status);
  6392. return;
  6393. }
  6394. if (!mpd_qshiftl(result, a, shift, status)) {
  6395. return;
  6396. }
  6397. result->exp = exp;
  6398. }
  6399. else {
  6400. shift = -expdiff;
  6401. rnd = mpd_qshiftr(result, a, shift, status);
  6402. if (rnd == MPD_UINT_MAX) {
  6403. return;
  6404. }
  6405. result->exp = exp;
  6406. _mpd_apply_round_excess(result, rnd, ctx, status);
  6407. *status |= MPD_Rounded;
  6408. if (rnd) {
  6409. *status |= MPD_Inexact;
  6410. }
  6411. }
  6412. if (mpd_issubnormal(result, ctx)) {
  6413. *status |= MPD_Subnormal;
  6414. }
  6415. }
  6416. /*
  6417. * Rescale a number so that it has exponent 'exp'. Does not regard context
  6418. * precision, emax, emin, but uses the rounding mode. Special numbers are
  6419. * quietly copied. Restrictions:
  6420. *
  6421. * MPD_MIN_ETINY <= exp <= MPD_MAX_EMAX+1
  6422. * result->digits <= MPD_MAX_PREC+1
  6423. */
  6424. void
  6425. mpd_qrescale(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
  6426. const mpd_context_t *ctx, uint32_t *status)
  6427. {
  6428. if (exp > MPD_MAX_EMAX+1 || exp < MPD_MIN_ETINY) {
  6429. mpd_seterror(result, MPD_Invalid_operation, status);
  6430. return;
  6431. }
  6432. _mpd_qrescale(result, a, exp, ctx, status);
  6433. }
  6434. /*
  6435. * Same as mpd_qrescale, but with relaxed restrictions. The result of this
  6436. * function should only be used for formatting a number and never as input
  6437. * for other operations.
  6438. *
  6439. * MPD_MIN_ETINY-MPD_MAX_PREC <= exp <= MPD_MAX_EMAX+1
  6440. * result->digits <= MPD_MAX_PREC+1
  6441. */
  6442. void
  6443. mpd_qrescale_fmt(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
  6444. const mpd_context_t *ctx, uint32_t *status)
  6445. {
  6446. if (exp > MPD_MAX_EMAX+1 || exp < MPD_MIN_ETINY-MPD_MAX_PREC) {
  6447. mpd_seterror(result, MPD_Invalid_operation, status);
  6448. return;
  6449. }
  6450. _mpd_qrescale(result, a, exp, ctx, status);
  6451. }
  6452. /* Round to an integer according to 'action' and ctx->round. */
  6453. enum {TO_INT_EXACT, TO_INT_SILENT, TO_INT_TRUNC};
  6454. static void
  6455. _mpd_qround_to_integral(int action, mpd_t *result, const mpd_t *a,
  6456. const mpd_context_t *ctx, uint32_t *status)
  6457. {
  6458. mpd_uint_t rnd;
  6459. if (mpd_isspecial(a)) {
  6460. if (mpd_qcheck_nan(result, a, ctx, status)) {
  6461. return;
  6462. }
  6463. mpd_qcopy(result, a, status);
  6464. return;
  6465. }
  6466. if (a->exp >= 0) {
  6467. mpd_qcopy(result, a, status);
  6468. return;
  6469. }
  6470. if (mpd_iszerocoeff(a)) {
  6471. _settriple(result, mpd_sign(a), 0, 0);
  6472. return;
  6473. }
  6474. rnd = mpd_qshiftr(result, a, -a->exp, status);
  6475. if (rnd == MPD_UINT_MAX) {
  6476. return;
  6477. }
  6478. result->exp = 0;
  6479. if (action == TO_INT_EXACT || action == TO_INT_SILENT) {
  6480. _mpd_apply_round_excess(result, rnd, ctx, status);
  6481. if (action == TO_INT_EXACT) {
  6482. *status |= MPD_Rounded;
  6483. if (rnd) {
  6484. *status |= MPD_Inexact;
  6485. }
  6486. }
  6487. }
  6488. }
  6489. void
  6490. mpd_qround_to_intx(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6491. uint32_t *status)
  6492. {
  6493. (void)_mpd_qround_to_integral(TO_INT_EXACT, result, a, ctx, status);
  6494. }
  6495. void
  6496. mpd_qround_to_int(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6497. uint32_t *status)
  6498. {
  6499. (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a, ctx, status);
  6500. }
  6501. void
  6502. mpd_qtrunc(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6503. uint32_t *status)
  6504. {
  6505. if (mpd_isspecial(a)) {
  6506. mpd_seterror(result, MPD_Invalid_operation, status);
  6507. return;
  6508. }
  6509. (void)_mpd_qround_to_integral(TO_INT_TRUNC, result, a, ctx, status);
  6510. }
  6511. void
  6512. mpd_qfloor(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6513. uint32_t *status)
  6514. {
  6515. mpd_context_t workctx = *ctx;
  6516. if (mpd_isspecial(a)) {
  6517. mpd_seterror(result, MPD_Invalid_operation, status);
  6518. return;
  6519. }
  6520. workctx.round = MPD_ROUND_FLOOR;
  6521. (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a,
  6522. &workctx, status);
  6523. }
  6524. void
  6525. mpd_qceil(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6526. uint32_t *status)
  6527. {
  6528. mpd_context_t workctx = *ctx;
  6529. if (mpd_isspecial(a)) {
  6530. mpd_seterror(result, MPD_Invalid_operation, status);
  6531. return;
  6532. }
  6533. workctx.round = MPD_ROUND_CEILING;
  6534. (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a,
  6535. &workctx, status);
  6536. }
  6537. int
  6538. mpd_same_quantum(const mpd_t *a, const mpd_t *b)
  6539. {
  6540. if (mpd_isspecial(a) || mpd_isspecial(b)) {
  6541. return ((mpd_isnan(a) && mpd_isnan(b)) ||
  6542. (mpd_isinfinite(a) && mpd_isinfinite(b)));
  6543. }
  6544. return a->exp == b->exp;
  6545. }
  6546. /* Schedule the increase in precision for the Newton iteration. */
  6547. static inline int
  6548. recpr_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2],
  6549. mpd_ssize_t maxprec, mpd_ssize_t initprec)
  6550. {
  6551. mpd_ssize_t k;
  6552. int i;
  6553. assert(maxprec > 0 && initprec > 0);
  6554. if (maxprec <= initprec) return -1;
  6555. i = 0; k = maxprec;
  6556. do {
  6557. k = (k+1) / 2;
  6558. klist[i++] = k;
  6559. } while (k > initprec);
  6560. return i-1;
  6561. }
  6562. /*
  6563. * Initial approximation for the reciprocal:
  6564. * k_0 := MPD_RDIGITS-2
  6565. * z_0 := 10**(-k_0) * floor(10**(2*k_0 + 2) / floor(v * 10**(k_0 + 2)))
  6566. * Absolute error:
  6567. * |1/v - z_0| < 10**(-k_0)
  6568. * ACL2 proof: maxerror-inverse-approx
  6569. */
  6570. static void
  6571. _mpd_qreciprocal_approx(mpd_t *z, const mpd_t *v, uint32_t *status)
  6572. {
  6573. mpd_uint_t p10data[2] = {0, mpd_pow10[MPD_RDIGITS-2]};
  6574. mpd_uint_t dummy, word;
  6575. int n;
  6576. assert(v->exp == -v->digits);
  6577. _mpd_get_msdigits(&dummy, &word, v, MPD_RDIGITS);
  6578. n = mpd_word_digits(word);
  6579. word *= mpd_pow10[MPD_RDIGITS-n];
  6580. mpd_qresize(z, 2, status);
  6581. (void)_mpd_shortdiv(z->data, p10data, 2, word);
  6582. mpd_clear_flags(z);
  6583. z->exp = -(MPD_RDIGITS-2);
  6584. z->len = (z->data[1] == 0) ? 1 : 2;
  6585. mpd_setdigits(z);
  6586. }
  6587. /*
  6588. * Reciprocal, calculated with Newton's Method. Assumption: result != a.
  6589. * NOTE: The comments in the function show that certain operations are
  6590. * exact. The proof for the maximum error is too long to fit in here.
  6591. * ACL2 proof: maxerror-inverse-complete
  6592. */
  6593. static void
  6594. _mpd_qreciprocal(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6595. uint32_t *status)
  6596. {
  6597. mpd_context_t varcontext, maxcontext;
  6598. mpd_t *z = result; /* current approximation */
  6599. mpd_t *v; /* a, normalized to a number between 0.1 and 1 */
  6600. MPD_NEW_SHARED(vtmp, a); /* v shares data with a */
  6601. MPD_NEW_STATIC(s,0,0,0,0); /* temporary variable */
  6602. MPD_NEW_STATIC(t,0,0,0,0); /* temporary variable */
  6603. MPD_NEW_CONST(two,0,0,1,1,1,2); /* const 2 */
  6604. mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
  6605. mpd_ssize_t adj, maxprec, initprec;
  6606. uint8_t sign = mpd_sign(a);
  6607. int i;
  6608. assert(result != a);
  6609. v = &vtmp;
  6610. mpd_clear_flags(v);
  6611. adj = v->digits + v->exp;
  6612. v->exp = -v->digits;
  6613. /* Initial approximation */
  6614. _mpd_qreciprocal_approx(z, v, status);
  6615. mpd_maxcontext(&varcontext);
  6616. mpd_maxcontext(&maxcontext);
  6617. varcontext.round = maxcontext.round = MPD_ROUND_TRUNC;
  6618. varcontext.emax = maxcontext.emax = MPD_MAX_EMAX + 100;
  6619. varcontext.emin = maxcontext.emin = MPD_MIN_EMIN - 100;
  6620. maxcontext.prec = MPD_MAX_PREC + 100;
  6621. maxprec = ctx->prec;
  6622. maxprec += 2;
  6623. initprec = MPD_RDIGITS-3;
  6624. i = recpr_schedule_prec(klist, maxprec, initprec);
  6625. for (; i >= 0; i--) {
  6626. /* Loop invariant: z->digits <= klist[i]+7 */
  6627. /* Let s := z**2, exact result */
  6628. _mpd_qmul_exact(&s, z, z, &maxcontext, status);
  6629. varcontext.prec = 2*klist[i] + 5;
  6630. if (v->digits > varcontext.prec) {
  6631. /* Let t := v, truncated to n >= 2*k+5 fraction digits */
  6632. mpd_qshiftr(&t, v, v->digits-varcontext.prec, status);
  6633. t.exp = -varcontext.prec;
  6634. /* Let t := trunc(v)*s, truncated to n >= 2*k+1 fraction digits */
  6635. mpd_qmul(&t, &t, &s, &varcontext, status);
  6636. }
  6637. else { /* v->digits <= 2*k+5 */
  6638. /* Let t := v*s, truncated to n >= 2*k+1 fraction digits */
  6639. mpd_qmul(&t, v, &s, &varcontext, status);
  6640. }
  6641. /* Let s := 2*z, exact result */
  6642. _mpd_qmul_exact(&s, z, &two, &maxcontext, status);
  6643. /* s.digits < t.digits <= 2*k+5, |adjexp(s)-adjexp(t)| <= 1,
  6644. * so the subtraction generates at most 2*k+6 <= klist[i+1]+7
  6645. * digits. The loop invariant is preserved. */
  6646. _mpd_qsub_exact(z, &s, &t, &maxcontext, status);
  6647. }
  6648. if (!mpd_isspecial(z)) {
  6649. z->exp -= adj;
  6650. mpd_set_flags(z, sign);
  6651. }
  6652. mpd_del(&s);
  6653. mpd_del(&t);
  6654. mpd_qfinalize(z, ctx, status);
  6655. }
  6656. /*
  6657. * Internal function for large numbers:
  6658. *
  6659. * q, r = divmod(coeff(a), coeff(b))
  6660. *
  6661. * Strategy: Multiply the dividend by the reciprocal of the divisor. The
  6662. * inexact result is fixed by a small loop, using at most one iteration.
  6663. *
  6664. * ACL2 proofs:
  6665. * ------------
  6666. * 1) q is a natural number. (ndivmod-quotient-natp)
  6667. * 2) r is a natural number. (ndivmod-remainder-natp)
  6668. * 3) a = q * b + r (ndivmod-q*b+r==a)
  6669. * 4) r < b (ndivmod-remainder-<-b)
  6670. */
  6671. static void
  6672. _mpd_base_ndivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
  6673. uint32_t *status)
  6674. {
  6675. mpd_context_t workctx;
  6676. mpd_t *qq = q, *rr = r;
  6677. mpd_t aa, bb;
  6678. int k;
  6679. _mpd_copy_shared(&aa, a);
  6680. _mpd_copy_shared(&bb, b);
  6681. mpd_set_positive(&aa);
  6682. mpd_set_positive(&bb);
  6683. aa.exp = 0;
  6684. bb.exp = 0;
  6685. if (q == a || q == b) {
  6686. if ((qq = mpd_qnew()) == NULL) {
  6687. *status |= MPD_Malloc_error;
  6688. goto nanresult;
  6689. }
  6690. }
  6691. if (r == a || r == b) {
  6692. if ((rr = mpd_qnew()) == NULL) {
  6693. *status |= MPD_Malloc_error;
  6694. goto nanresult;
  6695. }
  6696. }
  6697. mpd_maxcontext(&workctx);
  6698. /* Let prec := adigits - bdigits + 4 */
  6699. workctx.prec = a->digits - b->digits + 1 + 3;
  6700. if (a->digits > MPD_MAX_PREC || workctx.prec > MPD_MAX_PREC) {
  6701. *status |= MPD_Division_impossible;
  6702. goto nanresult;
  6703. }
  6704. /* Let x := _mpd_qreciprocal(b, prec)
  6705. * Then x is bounded by:
  6706. * 1) 1/b - 10**(-prec - bdigits) < x < 1/b + 10**(-prec - bdigits)
  6707. * 2) 1/b - 10**(-adigits - 4) < x < 1/b + 10**(-adigits - 4)
  6708. */
  6709. _mpd_qreciprocal(rr, &bb, &workctx, &workctx.status);
  6710. /* Get an estimate for the quotient. Let q := a * x
  6711. * Then q is bounded by:
  6712. * 3) a/b - 10**-4 < q < a/b + 10**-4
  6713. */
  6714. _mpd_qmul(qq, &aa, rr, &workctx, &workctx.status);
  6715. /* Truncate q to an integer:
  6716. * 4) a/b - 2 < trunc(q) < a/b + 1
  6717. */
  6718. mpd_qtrunc(qq, qq, &workctx, &workctx.status);
  6719. workctx.prec = aa.digits + 3;
  6720. workctx.emax = MPD_MAX_EMAX + 3;
  6721. workctx.emin = MPD_MIN_EMIN - 3;
  6722. /* Multiply the estimate for q by b:
  6723. * 5) a - 2 * b < trunc(q) * b < a + b
  6724. */
  6725. _mpd_qmul(rr, &bb, qq, &workctx, &workctx.status);
  6726. /* Get the estimate for r such that a = q * b + r. */
  6727. _mpd_qsub_exact(rr, &aa, rr, &workctx, &workctx.status);
  6728. /* Fix the result. At this point -b < r < 2*b, so the correction loop
  6729. takes at most one iteration. */
  6730. for (k = 0;; k++) {
  6731. if (mpd_isspecial(qq) || mpd_isspecial(rr)) {
  6732. *status |= (workctx.status&MPD_Errors);
  6733. goto nanresult;
  6734. }
  6735. if (k > 2) { /* Allow two iterations despite the proof. */
  6736. mpd_err_warn("libmpdec: internal error in " /* GCOV_NOT_REACHED */
  6737. "_mpd_base_ndivmod: please report"); /* GCOV_NOT_REACHED */
  6738. *status |= MPD_Invalid_operation; /* GCOV_NOT_REACHED */
  6739. goto nanresult; /* GCOV_NOT_REACHED */
  6740. }
  6741. /* r < 0 */
  6742. else if (_mpd_cmp(&zero, rr) == 1) {
  6743. _mpd_qadd_exact(rr, rr, &bb, &workctx, &workctx.status);
  6744. _mpd_qadd_exact(qq, qq, &minus_one, &workctx, &workctx.status);
  6745. }
  6746. /* 0 <= r < b */
  6747. else if (_mpd_cmp(rr, &bb) == -1) {
  6748. break;
  6749. }
  6750. /* r >= b */
  6751. else {
  6752. _mpd_qsub_exact(rr, rr, &bb, &workctx, &workctx.status);
  6753. _mpd_qadd_exact(qq, qq, &one, &workctx, &workctx.status);
  6754. }
  6755. }
  6756. if (qq != q) {
  6757. if (!mpd_qcopy(q, qq, status)) {
  6758. goto nanresult; /* GCOV_UNLIKELY */
  6759. }
  6760. mpd_del(qq);
  6761. }
  6762. if (rr != r) {
  6763. if (!mpd_qcopy(r, rr, status)) {
  6764. goto nanresult; /* GCOV_UNLIKELY */
  6765. }
  6766. mpd_del(rr);
  6767. }
  6768. *status |= (workctx.status&MPD_Errors);
  6769. return;
  6770. nanresult:
  6771. if (qq && qq != q) mpd_del(qq);
  6772. if (rr && rr != r) mpd_del(rr);
  6773. mpd_setspecial(q, MPD_POS, MPD_NAN);
  6774. mpd_setspecial(r, MPD_POS, MPD_NAN);
  6775. }
  6776. /* LIBMPDEC_ONLY */
  6777. /*
  6778. * Schedule the optimal precision increase for the Newton iteration.
  6779. * v := input operand
  6780. * z_0 := initial approximation
  6781. * initprec := natural number such that abs(sqrt(v) - z_0) < 10**-initprec
  6782. * maxprec := target precision
  6783. *
  6784. * For convenience the output klist contains the elements in reverse order:
  6785. * klist := [k_n-1, ..., k_0], where
  6786. * 1) k_0 <= initprec and
  6787. * 2) abs(sqrt(v) - result) < 10**(-2*k_n-1 + 2) <= 10**-maxprec.
  6788. */
  6789. static inline int
  6790. invroot_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2],
  6791. mpd_ssize_t maxprec, mpd_ssize_t initprec)
  6792. {
  6793. mpd_ssize_t k;
  6794. int i;
  6795. assert(maxprec >= 3 && initprec >= 3);
  6796. if (maxprec <= initprec) return -1;
  6797. i = 0; k = maxprec;
  6798. do {
  6799. k = (k+3) / 2;
  6800. klist[i++] = k;
  6801. } while (k > initprec);
  6802. return i-1;
  6803. }
  6804. /*
  6805. * Initial approximation for the inverse square root function.
  6806. * Input:
  6807. * v := rational number, with 1 <= v < 100
  6808. * vhat := floor(v * 10**6)
  6809. * Output:
  6810. * z := approximation to 1/sqrt(v), such that abs(z - 1/sqrt(v)) < 10**-3.
  6811. */
  6812. static inline void
  6813. _invroot_init_approx(mpd_t *z, mpd_uint_t vhat)
  6814. {
  6815. mpd_uint_t lo = 1000;
  6816. mpd_uint_t hi = 10000;
  6817. mpd_uint_t a, sq;
  6818. assert(lo*lo <= vhat && vhat < (hi+1)*(hi+1));
  6819. for(;;) {
  6820. a = (lo + hi) / 2;
  6821. sq = a * a;
  6822. if (vhat >= sq) {
  6823. if (vhat < sq + 2*a + 1) {
  6824. break;
  6825. }
  6826. lo = a + 1;
  6827. }
  6828. else {
  6829. hi = a - 1;
  6830. }
  6831. }
  6832. /*
  6833. * After the binary search we have:
  6834. * 1) a**2 <= floor(v * 10**6) < (a + 1)**2
  6835. * This implies:
  6836. * 2) a**2 <= v * 10**6 < (a + 1)**2
  6837. * 3) a <= sqrt(v) * 10**3 < a + 1
  6838. * Since 10**3 <= a:
  6839. * 4) 0 <= 10**prec/a - 1/sqrt(v) < 10**-prec
  6840. * We have:
  6841. * 5) 10**3/a - 10**-3 < floor(10**9/a) * 10**-6 <= 10**3/a
  6842. * Merging 4) and 5):
  6843. * 6) abs(floor(10**9/a) * 10**-6 - 1/sqrt(v)) < 10**-3
  6844. */
  6845. mpd_minalloc(z);
  6846. mpd_clear_flags(z);
  6847. z->data[0] = 1000000000UL / a;
  6848. z->len = 1;
  6849. z->exp = -6;
  6850. mpd_setdigits(z);
  6851. }
  6852. /*
  6853. * Set 'result' to 1/sqrt(a).
  6854. * Relative error: abs(result - 1/sqrt(a)) < 10**-prec * 1/sqrt(a)
  6855. */
  6856. static void
  6857. _mpd_qinvroot(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6858. uint32_t *status)
  6859. {
  6860. uint32_t workstatus = 0;
  6861. mpd_context_t varcontext, maxcontext;
  6862. mpd_t *z = result; /* current approximation */
  6863. mpd_t *v; /* a, normalized to a number between 1 and 100 */
  6864. MPD_NEW_SHARED(vtmp, a); /* by default v will share data with a */
  6865. MPD_NEW_STATIC(s,0,0,0,0); /* temporary variable */
  6866. MPD_NEW_STATIC(t,0,0,0,0); /* temporary variable */
  6867. MPD_NEW_CONST(one_half,0,-1,1,1,1,5);
  6868. MPD_NEW_CONST(three,0,0,1,1,1,3);
  6869. mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
  6870. mpd_ssize_t ideal_exp, shift;
  6871. mpd_ssize_t adj, tz;
  6872. mpd_ssize_t maxprec, fracdigits;
  6873. mpd_uint_t vhat, dummy;
  6874. int i, n;
  6875. ideal_exp = -(a->exp - (a->exp & 1)) / 2;
  6876. v = &vtmp;
  6877. if (result == a) {
  6878. if ((v = mpd_qncopy(a)) == NULL) {
  6879. mpd_seterror(result, MPD_Malloc_error, status);
  6880. return;
  6881. }
  6882. }
  6883. /* normalize a to 1 <= v < 100 */
  6884. if ((v->digits+v->exp) & 1) {
  6885. fracdigits = v->digits - 1;
  6886. v->exp = -fracdigits;
  6887. n = (v->digits > 7) ? 7 : (int)v->digits;
  6888. /* Let vhat := floor(v * 10**(2*initprec)) */
  6889. _mpd_get_msdigits(&dummy, &vhat, v, n);
  6890. if (n < 7) {
  6891. vhat *= mpd_pow10[7-n];
  6892. }
  6893. }
  6894. else {
  6895. fracdigits = v->digits - 2;
  6896. v->exp = -fracdigits;
  6897. n = (v->digits > 8) ? 8 : (int)v->digits;
  6898. /* Let vhat := floor(v * 10**(2*initprec)) */
  6899. _mpd_get_msdigits(&dummy, &vhat, v, n);
  6900. if (n < 8) {
  6901. vhat *= mpd_pow10[8-n];
  6902. }
  6903. }
  6904. adj = (a->exp-v->exp) / 2;
  6905. /* initial approximation */
  6906. _invroot_init_approx(z, vhat);
  6907. mpd_maxcontext(&maxcontext);
  6908. mpd_maxcontext(&varcontext);
  6909. varcontext.round = MPD_ROUND_TRUNC;
  6910. maxprec = ctx->prec + 1;
  6911. /* initprec == 3 */
  6912. i = invroot_schedule_prec(klist, maxprec, 3);
  6913. for (; i >= 0; i--) {
  6914. varcontext.prec = 2*klist[i]+2;
  6915. mpd_qmul(&s, z, z, &maxcontext, &workstatus);
  6916. if (v->digits > varcontext.prec) {
  6917. shift = v->digits - varcontext.prec;
  6918. mpd_qshiftr(&t, v, shift, &workstatus);
  6919. t.exp += shift;
  6920. mpd_qmul(&t, &t, &s, &varcontext, &workstatus);
  6921. }
  6922. else {
  6923. mpd_qmul(&t, v, &s, &varcontext, &workstatus);
  6924. }
  6925. mpd_qsub(&t, &three, &t, &maxcontext, &workstatus);
  6926. mpd_qmul(z, z, &t, &varcontext, &workstatus);
  6927. mpd_qmul(z, z, &one_half, &maxcontext, &workstatus);
  6928. }
  6929. z->exp -= adj;
  6930. tz = mpd_trail_zeros(result);
  6931. shift = ideal_exp - result->exp;
  6932. shift = (tz > shift) ? shift : tz;
  6933. if (shift > 0) {
  6934. mpd_qshiftr_inplace(result, shift);
  6935. result->exp += shift;
  6936. }
  6937. mpd_del(&s);
  6938. mpd_del(&t);
  6939. if (v != &vtmp) mpd_del(v);
  6940. *status |= (workstatus&MPD_Errors);
  6941. *status |= (MPD_Rounded|MPD_Inexact);
  6942. }
  6943. void
  6944. mpd_qinvroot(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6945. uint32_t *status)
  6946. {
  6947. mpd_context_t workctx;
  6948. if (mpd_isspecial(a)) {
  6949. if (mpd_qcheck_nan(result, a, ctx, status)) {
  6950. return;
  6951. }
  6952. if (mpd_isnegative(a)) {
  6953. mpd_seterror(result, MPD_Invalid_operation, status);
  6954. return;
  6955. }
  6956. /* positive infinity */
  6957. _settriple(result, MPD_POS, 0, mpd_etiny(ctx));
  6958. *status |= MPD_Clamped;
  6959. return;
  6960. }
  6961. if (mpd_iszero(a)) {
  6962. mpd_setspecial(result, mpd_sign(a), MPD_INF);
  6963. *status |= MPD_Division_by_zero;
  6964. return;
  6965. }
  6966. if (mpd_isnegative(a)) {
  6967. mpd_seterror(result, MPD_Invalid_operation, status);
  6968. return;
  6969. }
  6970. workctx = *ctx;
  6971. workctx.prec += 2;
  6972. workctx.round = MPD_ROUND_HALF_EVEN;
  6973. _mpd_qinvroot(result, a, &workctx, status);
  6974. mpd_qfinalize(result, ctx, status);
  6975. }
  6976. /* END LIBMPDEC_ONLY */
  6977. /* Algorithm from decimal.py */
  6978. static void
  6979. _mpd_qsqrt(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  6980. uint32_t *status)
  6981. {
  6982. mpd_context_t maxcontext;
  6983. MPD_NEW_STATIC(c,0,0,0,0);
  6984. MPD_NEW_STATIC(q,0,0,0,0);
  6985. MPD_NEW_STATIC(r,0,0,0,0);
  6986. MPD_NEW_CONST(two,0,0,1,1,1,2);
  6987. mpd_ssize_t prec, ideal_exp;
  6988. mpd_ssize_t l, shift;
  6989. int exact = 0;
  6990. ideal_exp = (a->exp - (a->exp & 1)) / 2;
  6991. if (mpd_isspecial(a)) {
  6992. if (mpd_qcheck_nan(result, a, ctx, status)) {
  6993. return;
  6994. }
  6995. if (mpd_isnegative(a)) {
  6996. mpd_seterror(result, MPD_Invalid_operation, status);
  6997. return;
  6998. }
  6999. mpd_setspecial(result, MPD_POS, MPD_INF);
  7000. return;
  7001. }
  7002. if (mpd_iszero(a)) {
  7003. _settriple(result, mpd_sign(a), 0, ideal_exp);
  7004. mpd_qfinalize(result, ctx, status);
  7005. return;
  7006. }
  7007. if (mpd_isnegative(a)) {
  7008. mpd_seterror(result, MPD_Invalid_operation, status);
  7009. return;
  7010. }
  7011. mpd_maxcontext(&maxcontext);
  7012. prec = ctx->prec + 1;
  7013. if (!mpd_qcopy(&c, a, status)) {
  7014. goto malloc_error;
  7015. }
  7016. c.exp = 0;
  7017. if (a->exp & 1) {
  7018. if (!mpd_qshiftl(&c, &c, 1, status)) {
  7019. goto malloc_error;
  7020. }
  7021. l = (a->digits >> 1) + 1;
  7022. }
  7023. else {
  7024. l = (a->digits + 1) >> 1;
  7025. }
  7026. shift = prec - l;
  7027. if (shift >= 0) {
  7028. if (!mpd_qshiftl(&c, &c, 2*shift, status)) {
  7029. goto malloc_error;
  7030. }
  7031. exact = 1;
  7032. }
  7033. else {
  7034. exact = !mpd_qshiftr_inplace(&c, -2*shift);
  7035. }
  7036. ideal_exp -= shift;
  7037. /* find result = floor(sqrt(c)) using Newton's method */
  7038. if (!mpd_qshiftl(result, &one, prec, status)) {
  7039. goto malloc_error;
  7040. }
  7041. while (1) {
  7042. _mpd_qdivmod(&q, &r, &c, result, &maxcontext, &maxcontext.status);
  7043. if (mpd_isspecial(result) || mpd_isspecial(&q)) {
  7044. mpd_seterror(result, maxcontext.status&MPD_Errors, status);
  7045. goto out;
  7046. }
  7047. if (_mpd_cmp(result, &q) <= 0) {
  7048. break;
  7049. }
  7050. _mpd_qadd_exact(result, result, &q, &maxcontext, &maxcontext.status);
  7051. if (mpd_isspecial(result)) {
  7052. mpd_seterror(result, maxcontext.status&MPD_Errors, status);
  7053. goto out;
  7054. }
  7055. _mpd_qdivmod(result, &r, result, &two, &maxcontext, &maxcontext.status);
  7056. }
  7057. if (exact) {
  7058. _mpd_qmul_exact(&r, result, result, &maxcontext, &maxcontext.status);
  7059. if (mpd_isspecial(&r)) {
  7060. mpd_seterror(result, maxcontext.status&MPD_Errors, status);
  7061. goto out;
  7062. }
  7063. exact = (_mpd_cmp(&r, &c) == 0);
  7064. }
  7065. if (exact) {
  7066. if (shift >= 0) {
  7067. mpd_qshiftr_inplace(result, shift);
  7068. }
  7069. else {
  7070. if (!mpd_qshiftl(result, result, -shift, status)) {
  7071. goto malloc_error;
  7072. }
  7073. }
  7074. ideal_exp += shift;
  7075. }
  7076. else {
  7077. int lsd = (int)mpd_lsd(result->data[0]);
  7078. if (lsd == 0 || lsd == 5) {
  7079. result->data[0] += 1;
  7080. }
  7081. }
  7082. result->exp = ideal_exp;
  7083. out:
  7084. mpd_del(&c);
  7085. mpd_del(&q);
  7086. mpd_del(&r);
  7087. maxcontext = *ctx;
  7088. maxcontext.round = MPD_ROUND_HALF_EVEN;
  7089. mpd_qfinalize(result, &maxcontext, status);
  7090. return;
  7091. malloc_error:
  7092. mpd_seterror(result, MPD_Malloc_error, status);
  7093. goto out;
  7094. }
  7095. void
  7096. mpd_qsqrt(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
  7097. uint32_t *status)
  7098. {
  7099. MPD_NEW_STATIC(aa,0,0,0,0);
  7100. uint32_t xstatus = 0;
  7101. if (result == a) {
  7102. if (!mpd_qcopy(&aa, a, status)) {
  7103. mpd_seterror(result, MPD_Malloc_error, status);
  7104. goto out;
  7105. }
  7106. a = &aa;
  7107. }
  7108. _mpd_qsqrt(result, a, ctx, &xstatus);
  7109. if (xstatus & (MPD_Malloc_error|MPD_Division_impossible)) {
  7110. /* The above conditions can occur at very high context precisions
  7111. * if intermediate values get too large. Retry the operation with
  7112. * a lower context precision in case the result is exact.
  7113. *
  7114. * If the result is exact, an upper bound for the number of digits
  7115. * is the number of digits in the input.
  7116. *
  7117. * NOTE: sqrt(40e9) = 2.0e+5 /\ digits(40e9) = digits(2.0e+5) = 2
  7118. */
  7119. uint32_t ystatus = 0;
  7120. mpd_context_t workctx = *ctx;
  7121. workctx.prec = a->digits;
  7122. if (workctx.prec >= ctx->prec) {
  7123. *status |= (xstatus|MPD_Errors);
  7124. goto out; /* No point in repeating this, keep the original error. */
  7125. }
  7126. _mpd_qsqrt(result, a, &workctx, &ystatus);
  7127. if (ystatus != 0) {
  7128. ystatus = *status | ((xstatus|ystatus)&MPD_Errors);
  7129. mpd_seterror(result, ystatus, status);
  7130. }
  7131. }
  7132. else {
  7133. *status |= xstatus;
  7134. }
  7135. out:
  7136. mpd_del(&aa);
  7137. }
  7138. /******************************************************************************/
  7139. /* Base conversions */
  7140. /******************************************************************************/
  7141. /* Space needed to represent an integer mpd_t in base 'base'. */
  7142. size_t
  7143. mpd_sizeinbase(const mpd_t *a, uint32_t base)
  7144. {
  7145. double x;
  7146. size_t digits;
  7147. double upper_bound;
  7148. assert(mpd_isinteger(a));
  7149. assert(base >= 2);
  7150. if (mpd_iszero(a)) {
  7151. return 1;
  7152. }
  7153. digits = a->digits+a->exp;
  7154. #ifdef CONFIG_64
  7155. /* ceil(2711437152599294 / log10(2)) + 4 == 2**53 */
  7156. if (digits > 2711437152599294ULL) {
  7157. return SIZE_MAX;
  7158. }
  7159. upper_bound = (double)((1ULL<<53)-1);
  7160. #else
  7161. upper_bound = (double)(SIZE_MAX-1);
  7162. #endif
  7163. x = (double)digits / log10(base);
  7164. return (x > upper_bound) ? SIZE_MAX : (size_t)x + 1;
  7165. }
  7166. /* Space needed to import a base 'base' integer of length 'srclen'. */
  7167. static mpd_ssize_t
  7168. _mpd_importsize(size_t srclen, uint32_t base)
  7169. {
  7170. double x;
  7171. double upper_bound;
  7172. assert(srclen > 0);
  7173. assert(base >= 2);
  7174. #if SIZE_MAX == UINT64_MAX
  7175. if (srclen > (1ULL<<53)) {
  7176. return MPD_SSIZE_MAX;
  7177. }
  7178. assert((1ULL<<53) <= MPD_MAXIMPORT);
  7179. upper_bound = (double)((1ULL<<53)-1);
  7180. #else
  7181. upper_bound = MPD_MAXIMPORT-1;
  7182. #endif
  7183. x = (double)srclen * (log10(base)/MPD_RDIGITS);
  7184. return (x > upper_bound) ? MPD_SSIZE_MAX : (mpd_ssize_t)x + 1;
  7185. }
  7186. static uint8_t
  7187. mpd_resize_u16(uint16_t **w, size_t nmemb)
  7188. {
  7189. uint8_t err = 0;
  7190. *w = mpd_realloc(*w, nmemb, sizeof **w, &err);
  7191. return !err;
  7192. }
  7193. static uint8_t
  7194. mpd_resize_u32(uint32_t **w, size_t nmemb)
  7195. {
  7196. uint8_t err = 0;
  7197. *w = mpd_realloc(*w, nmemb, sizeof **w, &err);
  7198. return !err;
  7199. }
  7200. static size_t
  7201. _baseconv_to_u16(uint16_t **w, size_t wlen, mpd_uint_t wbase,
  7202. mpd_uint_t *u, mpd_ssize_t ulen)
  7203. {
  7204. size_t n = 0;
  7205. assert(wlen > 0 && ulen > 0);
  7206. assert(wbase <= (1U<<16));
  7207. do {
  7208. if (n >= wlen) {
  7209. if (!mpd_resize_u16(w, n+1)) {
  7210. return SIZE_MAX;
  7211. }
  7212. wlen = n+1;
  7213. }
  7214. (*w)[n++] = (uint16_t)_mpd_shortdiv(u, u, ulen, wbase);
  7215. /* ulen is at least 1. u[ulen-1] can only be zero if ulen == 1. */
  7216. ulen = _mpd_real_size(u, ulen);
  7217. } while (u[ulen-1] != 0);
  7218. return n;
  7219. }
  7220. static size_t
  7221. _coeff_from_u16(mpd_t *w, mpd_ssize_t wlen,
  7222. const mpd_uint_t *u, size_t ulen, uint32_t ubase,
  7223. uint32_t *status)
  7224. {
  7225. mpd_ssize_t n = 0;
  7226. mpd_uint_t carry;
  7227. assert(wlen > 0 && ulen > 0);
  7228. assert(ubase <= (1U<<16));
  7229. w->data[n++] = u[--ulen];
  7230. while (--ulen != SIZE_MAX) {
  7231. carry = _mpd_shortmul_c(w->data, w->data, n, ubase);
  7232. if (carry) {
  7233. if (n >= wlen) {
  7234. if (!mpd_qresize(w, n+1, status)) {
  7235. return SIZE_MAX;
  7236. }
  7237. wlen = n+1;
  7238. }
  7239. w->data[n++] = carry;
  7240. }
  7241. carry = _mpd_shortadd(w->data, n, u[ulen]);
  7242. if (carry) {
  7243. if (n >= wlen) {
  7244. if (!mpd_qresize(w, n+1, status)) {
  7245. return SIZE_MAX;
  7246. }
  7247. wlen = n+1;
  7248. }
  7249. w->data[n++] = carry;
  7250. }
  7251. }
  7252. return n;
  7253. }
  7254. /* target base wbase < source base ubase */
  7255. static size_t
  7256. _baseconv_to_smaller(uint32_t **w, size_t wlen, uint32_t wbase,
  7257. mpd_uint_t *u, mpd_ssize_t ulen, mpd_uint_t ubase)
  7258. {
  7259. size_t n = 0;
  7260. assert(wlen > 0 && ulen > 0);
  7261. assert(wbase < ubase);
  7262. do {
  7263. if (n >= wlen) {
  7264. if (!mpd_resize_u32(w, n+1)) {
  7265. return SIZE_MAX;
  7266. }
  7267. wlen = n+1;
  7268. }
  7269. (*w)[n++] = (uint32_t)_mpd_shortdiv_b(u, u, ulen, wbase, ubase);
  7270. /* ulen is at least 1. u[ulen-1] can only be zero if ulen == 1. */
  7271. ulen = _mpd_real_size(u, ulen);
  7272. } while (u[ulen-1] != 0);
  7273. return n;
  7274. }
  7275. #ifdef CONFIG_32
  7276. /* target base 'wbase' == source base 'ubase' */
  7277. static size_t
  7278. _copy_equal_base(uint32_t **w, size_t wlen,
  7279. const uint32_t *u, size_t ulen)
  7280. {
  7281. if (wlen < ulen) {
  7282. if (!mpd_resize_u32(w, ulen)) {
  7283. return SIZE_MAX;
  7284. }
  7285. }
  7286. memcpy(*w, u, ulen * (sizeof **w));
  7287. return ulen;
  7288. }
  7289. /* target base 'wbase' > source base 'ubase' */
  7290. static size_t
  7291. _baseconv_to_larger(uint32_t **w, size_t wlen, mpd_uint_t wbase,
  7292. const mpd_uint_t *u, size_t ulen, mpd_uint_t ubase)
  7293. {
  7294. size_t n = 0;
  7295. mpd_uint_t carry;
  7296. assert(wlen > 0 && ulen > 0);
  7297. assert(ubase < wbase);
  7298. (*w)[n++] = u[--ulen];
  7299. while (--ulen != SIZE_MAX) {
  7300. carry = _mpd_shortmul_b(*w, *w, n, ubase, wbase);
  7301. if (carry) {
  7302. if (n >= wlen) {
  7303. if (!mpd_resize_u32(w, n+1)) {
  7304. return SIZE_MAX;
  7305. }
  7306. wlen = n+1;
  7307. }
  7308. (*w)[n++] = carry;
  7309. }
  7310. carry = _mpd_shortadd_b(*w, n, u[ulen], wbase);
  7311. if (carry) {
  7312. if (n >= wlen) {
  7313. if (!mpd_resize_u32(w, n+1)) {
  7314. return SIZE_MAX;
  7315. }
  7316. wlen = n+1;
  7317. }
  7318. (*w)[n++] = carry;
  7319. }
  7320. }
  7321. return n;
  7322. }
  7323. /* target base wbase < source base ubase */
  7324. static size_t
  7325. _coeff_from_larger_base(mpd_t *w, size_t wlen, mpd_uint_t wbase,
  7326. mpd_uint_t *u, mpd_ssize_t ulen, mpd_uint_t ubase,
  7327. uint32_t *status)
  7328. {
  7329. size_t n = 0;
  7330. assert(wlen > 0 && ulen > 0);
  7331. assert(wbase < ubase);
  7332. do {
  7333. if (n >= wlen) {
  7334. if (!mpd_qresize(w, n+1, status)) {
  7335. return SIZE_MAX;
  7336. }
  7337. wlen = n+1;
  7338. }
  7339. w->data[n++] = (uint32_t)_mpd_shortdiv_b(u, u, ulen, wbase, ubase);
  7340. /* ulen is at least 1. u[ulen-1] can only be zero if ulen == 1. */
  7341. ulen = _mpd_real_size(u, ulen);
  7342. } while (u[ulen-1] != 0);
  7343. return n;
  7344. }
  7345. #endif
  7346. /* target base 'wbase' > source base 'ubase' */
  7347. static size_t
  7348. _coeff_from_smaller_base(mpd_t *w, mpd_ssize_t wlen, mpd_uint_t wbase,
  7349. const uint32_t *u, size_t ulen, mpd_uint_t ubase,
  7350. uint32_t *status)
  7351. {
  7352. mpd_ssize_t n = 0;
  7353. mpd_uint_t carry;
  7354. assert(wlen > 0 && ulen > 0);
  7355. assert(wbase > ubase);
  7356. w->data[n++] = u[--ulen];
  7357. while (--ulen != SIZE_MAX) {
  7358. carry = _mpd_shortmul_b(w->data, w->data, n, ubase, wbase);
  7359. if (carry) {
  7360. if (n >= wlen) {
  7361. if (!mpd_qresize(w, n+1, status)) {
  7362. return SIZE_MAX;
  7363. }
  7364. wlen = n+1;
  7365. }
  7366. w->data[n++] = carry;
  7367. }
  7368. carry = _mpd_shortadd_b(w->data, n, u[ulen], wbase);
  7369. if (carry) {
  7370. if (n >= wlen) {
  7371. if (!mpd_qresize(w, n+1, status)) {
  7372. return SIZE_MAX;
  7373. }
  7374. wlen = n+1;
  7375. }
  7376. w->data[n++] = carry;
  7377. }
  7378. }
  7379. return n;
  7380. }
  7381. /*
  7382. * Convert an integer mpd_t to a multiprecision integer with base <= 2**16.
  7383. * The least significant word of the result is (*rdata)[0].
  7384. *
  7385. * If rdata is NULL, space is allocated by the function and rlen is irrelevant.
  7386. * In case of an error any allocated storage is freed and rdata is set back to
  7387. * NULL.
  7388. *
  7389. * If rdata is non-NULL, it MUST be allocated by one of libmpdec's allocation
  7390. * functions and rlen MUST be correct. If necessary, the function will resize
  7391. * rdata. In case of an error the caller must free rdata.
  7392. *
  7393. * Return value: In case of success, the exact length of rdata, SIZE_MAX
  7394. * otherwise.
  7395. */
  7396. size_t
  7397. mpd_qexport_u16(uint16_t **rdata, size_t rlen, uint32_t rbase,
  7398. const mpd_t *src, uint32_t *status)
  7399. {
  7400. MPD_NEW_STATIC(tsrc,0,0,0,0);
  7401. int alloc = 0; /* rdata == NULL */
  7402. size_t n;
  7403. assert(rbase <= (1U<<16));
  7404. if (mpd_isspecial(src) || !_mpd_isint(src)) {
  7405. *status |= MPD_Invalid_operation;
  7406. return SIZE_MAX;
  7407. }
  7408. if (*rdata == NULL) {
  7409. rlen = mpd_sizeinbase(src, rbase);
  7410. if (rlen == SIZE_MAX) {
  7411. *status |= MPD_Invalid_operation;
  7412. return SIZE_MAX;
  7413. }
  7414. *rdata = mpd_alloc(rlen, sizeof **rdata);
  7415. if (*rdata == NULL) {
  7416. goto malloc_error;
  7417. }
  7418. alloc = 1;
  7419. }
  7420. if (mpd_iszero(src)) {
  7421. **rdata = 0;
  7422. return 1;
  7423. }
  7424. if (src->exp >= 0) {
  7425. if (!mpd_qshiftl(&tsrc, src, src->exp, status)) {
  7426. goto malloc_error;
  7427. }
  7428. }
  7429. else {
  7430. if (mpd_qshiftr(&tsrc, src, -src->exp, status) == MPD_UINT_MAX) {
  7431. goto malloc_error;
  7432. }
  7433. }
  7434. n = _baseconv_to_u16(rdata, rlen, rbase, tsrc.data, tsrc.len);
  7435. if (n == SIZE_MAX) {
  7436. goto malloc_error;
  7437. }
  7438. out:
  7439. mpd_del(&tsrc);
  7440. return n;
  7441. malloc_error:
  7442. if (alloc) {
  7443. mpd_free(*rdata);
  7444. *rdata = NULL;
  7445. }
  7446. n = SIZE_MAX;
  7447. *status |= MPD_Malloc_error;
  7448. goto out;
  7449. }
  7450. /*
  7451. * Convert an integer mpd_t to a multiprecision integer with base<=UINT32_MAX.
  7452. * The least significant word of the result is (*rdata)[0].
  7453. *
  7454. * If rdata is NULL, space is allocated by the function and rlen is irrelevant.
  7455. * In case of an error any allocated storage is freed and rdata is set back to
  7456. * NULL.
  7457. *
  7458. * If rdata is non-NULL, it MUST be allocated by one of libmpdec's allocation
  7459. * functions and rlen MUST be correct. If necessary, the function will resize
  7460. * rdata. In case of an error the caller must free rdata.
  7461. *
  7462. * Return value: In case of success, the exact length of rdata, SIZE_MAX
  7463. * otherwise.
  7464. */
  7465. size_t
  7466. mpd_qexport_u32(uint32_t **rdata, size_t rlen, uint32_t rbase,
  7467. const mpd_t *src, uint32_t *status)
  7468. {
  7469. MPD_NEW_STATIC(tsrc,0,0,0,0);
  7470. int alloc = 0; /* rdata == NULL */
  7471. size_t n;
  7472. if (mpd_isspecial(src) || !_mpd_isint(src)) {
  7473. *status |= MPD_Invalid_operation;
  7474. return SIZE_MAX;
  7475. }
  7476. if (*rdata == NULL) {
  7477. rlen = mpd_sizeinbase(src, rbase);
  7478. if (rlen == SIZE_MAX) {
  7479. *status |= MPD_Invalid_operation;
  7480. return SIZE_MAX;
  7481. }
  7482. *rdata = mpd_alloc(rlen, sizeof **rdata);
  7483. if (*rdata == NULL) {
  7484. goto malloc_error;
  7485. }
  7486. alloc = 1;
  7487. }
  7488. if (mpd_iszero(src)) {
  7489. **rdata = 0;
  7490. return 1;
  7491. }
  7492. if (src->exp >= 0) {
  7493. if (!mpd_qshiftl(&tsrc, src, src->exp, status)) {
  7494. goto malloc_error;
  7495. }
  7496. }
  7497. else {
  7498. if (mpd_qshiftr(&tsrc, src, -src->exp, status) == MPD_UINT_MAX) {
  7499. goto malloc_error;
  7500. }
  7501. }
  7502. #ifdef CONFIG_64
  7503. n = _baseconv_to_smaller(rdata, rlen, rbase,
  7504. tsrc.data, tsrc.len, MPD_RADIX);
  7505. #else
  7506. if (rbase == MPD_RADIX) {
  7507. n = _copy_equal_base(rdata, rlen, tsrc.data, tsrc.len);
  7508. }
  7509. else if (rbase < MPD_RADIX) {
  7510. n = _baseconv_to_smaller(rdata, rlen, rbase,
  7511. tsrc.data, tsrc.len, MPD_RADIX);
  7512. }
  7513. else {
  7514. n = _baseconv_to_larger(rdata, rlen, rbase,
  7515. tsrc.data, tsrc.len, MPD_RADIX);
  7516. }
  7517. #endif
  7518. if (n == SIZE_MAX) {
  7519. goto malloc_error;
  7520. }
  7521. out:
  7522. mpd_del(&tsrc);
  7523. return n;
  7524. malloc_error:
  7525. if (alloc) {
  7526. mpd_free(*rdata);
  7527. *rdata = NULL;
  7528. }
  7529. n = SIZE_MAX;
  7530. *status |= MPD_Malloc_error;
  7531. goto out;
  7532. }
  7533. /*
  7534. * Converts a multiprecision integer with base <= UINT16_MAX+1 to an mpd_t.
  7535. * The least significant word of the source is srcdata[0].
  7536. */
  7537. void
  7538. mpd_qimport_u16(mpd_t *result,
  7539. const uint16_t *srcdata, size_t srclen,
  7540. uint8_t srcsign, uint32_t srcbase,
  7541. const mpd_context_t *ctx, uint32_t *status)
  7542. {
  7543. mpd_uint_t *usrc; /* uint16_t src copied to an mpd_uint_t array */
  7544. mpd_ssize_t rlen; /* length of the result */
  7545. size_t n;
  7546. assert(srclen > 0);
  7547. assert(srcbase <= (1U<<16));
  7548. rlen = _mpd_importsize(srclen, srcbase);
  7549. if (rlen == MPD_SSIZE_MAX) {
  7550. mpd_seterror(result, MPD_Invalid_operation, status);
  7551. return;
  7552. }
  7553. usrc = mpd_alloc((mpd_size_t)srclen, sizeof *usrc);
  7554. if (usrc == NULL) {
  7555. mpd_seterror(result, MPD_Malloc_error, status);
  7556. return;
  7557. }
  7558. for (n = 0; n < srclen; n++) {
  7559. usrc[n] = srcdata[n];
  7560. }
  7561. if (!mpd_qresize(result, rlen, status)) {
  7562. goto finish;
  7563. }
  7564. n = _coeff_from_u16(result, rlen, usrc, srclen, srcbase, status);
  7565. if (n == SIZE_MAX) {
  7566. goto finish;
  7567. }
  7568. mpd_set_flags(result, srcsign);
  7569. result->exp = 0;
  7570. result->len = n;
  7571. mpd_setdigits(result);
  7572. mpd_qresize(result, result->len, status);
  7573. mpd_qfinalize(result, ctx, status);
  7574. finish:
  7575. mpd_free(usrc);
  7576. }
  7577. /*
  7578. * Converts a multiprecision integer with base <= UINT32_MAX to an mpd_t.
  7579. * The least significant word of the source is srcdata[0].
  7580. */
  7581. void
  7582. mpd_qimport_u32(mpd_t *result,
  7583. const uint32_t *srcdata, size_t srclen,
  7584. uint8_t srcsign, uint32_t srcbase,
  7585. const mpd_context_t *ctx, uint32_t *status)
  7586. {
  7587. mpd_ssize_t rlen; /* length of the result */
  7588. size_t n;
  7589. assert(srclen > 0);
  7590. rlen = _mpd_importsize(srclen, srcbase);
  7591. if (rlen == MPD_SSIZE_MAX) {
  7592. mpd_seterror(result, MPD_Invalid_operation, status);
  7593. return;
  7594. }
  7595. if (!mpd_qresize(result, rlen, status)) {
  7596. return;
  7597. }
  7598. #ifdef CONFIG_64
  7599. n = _coeff_from_smaller_base(result, rlen, MPD_RADIX,
  7600. srcdata, srclen, srcbase,
  7601. status);
  7602. #else
  7603. if (srcbase == MPD_RADIX) {
  7604. if (!mpd_qresize(result, srclen, status)) {
  7605. return;
  7606. }
  7607. memcpy(result->data, srcdata, srclen * (sizeof *srcdata));
  7608. n = srclen;
  7609. }
  7610. else if (srcbase < MPD_RADIX) {
  7611. n = _coeff_from_smaller_base(result, rlen, MPD_RADIX,
  7612. srcdata, srclen, srcbase,
  7613. status);
  7614. }
  7615. else {
  7616. mpd_uint_t *usrc = mpd_alloc((mpd_size_t)srclen, sizeof *usrc);
  7617. if (usrc == NULL) {
  7618. mpd_seterror(result, MPD_Malloc_error, status);
  7619. return;
  7620. }
  7621. for (n = 0; n < srclen; n++) {
  7622. usrc[n] = srcdata[n];
  7623. }
  7624. n = _coeff_from_larger_base(result, rlen, MPD_RADIX,
  7625. usrc, (mpd_ssize_t)srclen, srcbase,
  7626. status);
  7627. mpd_free(usrc);
  7628. }
  7629. #endif
  7630. if (n == SIZE_MAX) {
  7631. return;
  7632. }
  7633. mpd_set_flags(result, srcsign);
  7634. result->exp = 0;
  7635. result->len = n;
  7636. mpd_setdigits(result);
  7637. mpd_qresize(result, result->len, status);
  7638. mpd_qfinalize(result, ctx, status);
  7639. }
  7640. /******************************************************************************/
  7641. /* From triple */
  7642. /******************************************************************************/
  7643. #if defined(CONFIG_64) && defined(__SIZEOF_INT128__)
  7644. static mpd_ssize_t
  7645. _set_coeff(uint64_t data[3], uint64_t hi, uint64_t lo)
  7646. {
  7647. __uint128_t d = ((__uint128_t)hi << 64) + lo;
  7648. __uint128_t q, r;
  7649. q = d / MPD_RADIX;
  7650. r = d % MPD_RADIX;
  7651. data[0] = (uint64_t)r;
  7652. d = q;
  7653. q = d / MPD_RADIX;
  7654. r = d % MPD_RADIX;
  7655. data[1] = (uint64_t)r;
  7656. d = q;
  7657. q = d / MPD_RADIX;
  7658. r = d % MPD_RADIX;
  7659. data[2] = (uint64_t)r;
  7660. if (q != 0) {
  7661. abort(); /* GCOV_NOT_REACHED */
  7662. }
  7663. return data[2] != 0 ? 3 : (data[1] != 0 ? 2 : 1);
  7664. }
  7665. #else
  7666. static size_t
  7667. _uint_from_u16(mpd_uint_t *w, mpd_ssize_t wlen, const uint16_t *u, size_t ulen)
  7668. {
  7669. const mpd_uint_t ubase = 1U<<16;
  7670. mpd_ssize_t n = 0;
  7671. mpd_uint_t carry;
  7672. assert(wlen > 0 && ulen > 0);
  7673. w[n++] = u[--ulen];
  7674. while (--ulen != SIZE_MAX) {
  7675. carry = _mpd_shortmul_c(w, w, n, ubase);
  7676. if (carry) {
  7677. if (n >= wlen) {
  7678. abort(); /* GCOV_NOT_REACHED */
  7679. }
  7680. w[n++] = carry;
  7681. }
  7682. carry = _mpd_shortadd(w, n, u[ulen]);
  7683. if (carry) {
  7684. if (n >= wlen) {
  7685. abort(); /* GCOV_NOT_REACHED */
  7686. }
  7687. w[n++] = carry;
  7688. }
  7689. }
  7690. return n;
  7691. }
  7692. static mpd_ssize_t
  7693. _set_coeff(mpd_uint_t *data, mpd_ssize_t len, uint64_t hi, uint64_t lo)
  7694. {
  7695. uint16_t u16[8] = {0};
  7696. u16[7] = (uint16_t)((hi & 0xFFFF000000000000ULL) >> 48);
  7697. u16[6] = (uint16_t)((hi & 0x0000FFFF00000000ULL) >> 32);
  7698. u16[5] = (uint16_t)((hi & 0x00000000FFFF0000ULL) >> 16);
  7699. u16[4] = (uint16_t) (hi & 0x000000000000FFFFULL);
  7700. u16[3] = (uint16_t)((lo & 0xFFFF000000000000ULL) >> 48);
  7701. u16[2] = (uint16_t)((lo & 0x0000FFFF00000000ULL) >> 32);
  7702. u16[1] = (uint16_t)((lo & 0x00000000FFFF0000ULL) >> 16);
  7703. u16[0] = (uint16_t) (lo & 0x000000000000FFFFULL);
  7704. return (mpd_ssize_t)_uint_from_u16(data, len, u16, 8);
  7705. }
  7706. #endif
  7707. static int
  7708. _set_uint128_coeff_exp(mpd_t *result, uint64_t hi, uint64_t lo, mpd_ssize_t exp)
  7709. {
  7710. mpd_uint_t data[5] = {0};
  7711. uint32_t status = 0;
  7712. mpd_ssize_t len;
  7713. #if defined(CONFIG_64) && defined(__SIZEOF_INT128__)
  7714. len = _set_coeff(data, hi, lo);
  7715. #else
  7716. len = _set_coeff(data, 5, hi, lo);
  7717. #endif
  7718. if (!mpd_qresize(result, len, &status)) {
  7719. return -1;
  7720. }
  7721. for (mpd_ssize_t i = 0; i < len; i++) {
  7722. result->data[i] = data[i];
  7723. }
  7724. result->exp = exp;
  7725. result->len = len;
  7726. mpd_setdigits(result);
  7727. return 0;
  7728. }
  7729. int
  7730. mpd_from_uint128_triple(mpd_t *result, const mpd_uint128_triple_t *triple, uint32_t *status)
  7731. {
  7732. static const mpd_context_t maxcontext = {
  7733. .prec=MPD_MAX_PREC,
  7734. .emax=MPD_MAX_EMAX,
  7735. .emin=MPD_MIN_EMIN,
  7736. .round=MPD_ROUND_HALF_EVEN,
  7737. .traps=MPD_Traps,
  7738. .status=0,
  7739. .newtrap=0,
  7740. .clamp=0,
  7741. .allcr=1,
  7742. };
  7743. const enum mpd_triple_class tag = triple->tag;
  7744. const uint8_t sign = triple->sign;
  7745. const uint64_t hi = triple->hi;
  7746. const uint64_t lo = triple->lo;
  7747. mpd_ssize_t exp;
  7748. #ifdef CONFIG_32
  7749. if (triple->exp < MPD_SSIZE_MIN || triple->exp > MPD_SSIZE_MAX) {
  7750. goto conversion_error;
  7751. }
  7752. #endif
  7753. exp = (mpd_ssize_t)triple->exp;
  7754. switch (tag) {
  7755. case MPD_TRIPLE_QNAN: case MPD_TRIPLE_SNAN: {
  7756. if (sign > 1 || exp != 0) {
  7757. goto conversion_error;
  7758. }
  7759. const uint8_t flags = tag == MPD_TRIPLE_QNAN ? MPD_NAN : MPD_SNAN;
  7760. mpd_setspecial(result, sign, flags);
  7761. if (hi == 0 && lo == 0) { /* no payload */
  7762. return 0;
  7763. }
  7764. if (_set_uint128_coeff_exp(result, hi, lo, exp) < 0) {
  7765. goto malloc_error;
  7766. }
  7767. return 0;
  7768. }
  7769. case MPD_TRIPLE_INF: {
  7770. if (sign > 1 || hi != 0 || lo != 0 || exp != 0) {
  7771. goto conversion_error;
  7772. }
  7773. mpd_setspecial(result, sign, MPD_INF);
  7774. return 0;
  7775. }
  7776. case MPD_TRIPLE_NORMAL: {
  7777. if (sign > 1) {
  7778. goto conversion_error;
  7779. }
  7780. const uint8_t flags = sign ? MPD_NEG : MPD_POS;
  7781. mpd_set_flags(result, flags);
  7782. if (exp > MPD_EXP_INF) {
  7783. exp = MPD_EXP_INF;
  7784. }
  7785. if (exp == MPD_SSIZE_MIN) {
  7786. exp = MPD_SSIZE_MIN+1;
  7787. }
  7788. if (_set_uint128_coeff_exp(result, hi, lo, exp) < 0) {
  7789. goto malloc_error;
  7790. }
  7791. uint32_t workstatus = 0;
  7792. mpd_qfinalize(result, &maxcontext, &workstatus);
  7793. if (workstatus & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) {
  7794. goto conversion_error;
  7795. }
  7796. return 0;
  7797. }
  7798. default:
  7799. goto conversion_error;
  7800. }
  7801. conversion_error:
  7802. mpd_seterror(result, MPD_Conversion_syntax, status);
  7803. return -1;
  7804. malloc_error:
  7805. mpd_seterror(result, MPD_Malloc_error, status);
  7806. return -1;
  7807. }
  7808. /******************************************************************************/
  7809. /* As triple */
  7810. /******************************************************************************/
  7811. #if defined(CONFIG_64) && defined(__SIZEOF_INT128__)
  7812. static void
  7813. _get_coeff(uint64_t *hi, uint64_t *lo, const mpd_t *a)
  7814. {
  7815. __uint128_t u128 = 0;
  7816. switch (a->len) {
  7817. case 3:
  7818. u128 = a->data[2]; /* fall through */
  7819. case 2:
  7820. u128 = u128 * MPD_RADIX + a->data[1]; /* fall through */
  7821. case 1:
  7822. u128 = u128 * MPD_RADIX + a->data[0];
  7823. break;
  7824. default:
  7825. abort(); /* GCOV_NOT_REACHED */
  7826. }
  7827. *hi = u128 >> 64;
  7828. *lo = (uint64_t)u128;
  7829. }
  7830. #else
  7831. static size_t
  7832. _uint_to_u16(uint16_t w[8], mpd_uint_t *u, mpd_ssize_t ulen)
  7833. {
  7834. const mpd_uint_t wbase = 1U<<16;
  7835. size_t n = 0;
  7836. assert(ulen > 0);
  7837. do {
  7838. if (n >= 8) {
  7839. abort(); /* GCOV_NOT_REACHED */
  7840. }
  7841. w[n++] = (uint16_t)_mpd_shortdiv(u, u, ulen, wbase);
  7842. /* ulen is at least 1. u[ulen-1] can only be zero if ulen == 1. */
  7843. ulen = _mpd_real_size(u, ulen);
  7844. } while (u[ulen-1] != 0);
  7845. return n;
  7846. }
  7847. static void
  7848. _get_coeff(uint64_t *hi, uint64_t *lo, const mpd_t *a)
  7849. {
  7850. uint16_t u16[8] = {0};
  7851. mpd_uint_t data[5] = {0};
  7852. switch (a->len) {
  7853. case 5:
  7854. data[4] = a->data[4]; /* fall through */
  7855. case 4:
  7856. data[3] = a->data[3]; /* fall through */
  7857. case 3:
  7858. data[2] = a->data[2]; /* fall through */
  7859. case 2:
  7860. data[1] = a->data[1]; /* fall through */
  7861. case 1:
  7862. data[0] = a->data[0];
  7863. break;
  7864. default:
  7865. abort(); /* GCOV_NOT_REACHED */
  7866. }
  7867. _uint_to_u16(u16, data, a->len);
  7868. *hi = (uint64_t)u16[7] << 48;
  7869. *hi |= (uint64_t)u16[6] << 32;
  7870. *hi |= (uint64_t)u16[5] << 16;
  7871. *hi |= (uint64_t)u16[4];
  7872. *lo = (uint64_t)u16[3] << 48;
  7873. *lo |= (uint64_t)u16[2] << 32;
  7874. *lo |= (uint64_t)u16[1] << 16;
  7875. *lo |= (uint64_t)u16[0];
  7876. }
  7877. #endif
  7878. static enum mpd_triple_class
  7879. _coeff_as_uint128(uint64_t *hi, uint64_t *lo, const mpd_t *a)
  7880. {
  7881. #ifdef CONFIG_64
  7882. static mpd_uint_t uint128_max_data[3] = { 3374607431768211455ULL, 4028236692093846346ULL, 3ULL };
  7883. static const mpd_t uint128_max = { MPD_STATIC|MPD_CONST_DATA, 0, 39, 3, 3, uint128_max_data };
  7884. #else
  7885. static mpd_uint_t uint128_max_data[5] = { 768211455U, 374607431U, 938463463U, 282366920U, 340U };
  7886. static const mpd_t uint128_max = { MPD_STATIC|MPD_CONST_DATA, 0, 39, 5, 5, uint128_max_data };
  7887. #endif
  7888. enum mpd_triple_class ret = MPD_TRIPLE_NORMAL;
  7889. uint32_t status = 0;
  7890. mpd_t coeff;
  7891. *hi = *lo = 0ULL;
  7892. if (mpd_isspecial(a)) {
  7893. if (mpd_isinfinite(a)) {
  7894. return MPD_TRIPLE_INF;
  7895. }
  7896. ret = mpd_isqnan(a) ? MPD_TRIPLE_QNAN : MPD_TRIPLE_SNAN;
  7897. if (a->len == 0) { /* no payload */
  7898. return ret;
  7899. }
  7900. }
  7901. else if (mpd_iszero(a)) {
  7902. return ret;
  7903. }
  7904. _mpd_copy_shared(&coeff, a);
  7905. mpd_set_flags(&coeff, 0);
  7906. coeff.exp = 0;
  7907. if (mpd_qcmp(&coeff, &uint128_max, &status) > 0) {
  7908. return MPD_TRIPLE_ERROR;
  7909. }
  7910. _get_coeff(hi, lo, &coeff);
  7911. return ret;
  7912. }
  7913. mpd_uint128_triple_t
  7914. mpd_as_uint128_triple(const mpd_t *a)
  7915. {
  7916. mpd_uint128_triple_t triple = { MPD_TRIPLE_ERROR, 0, 0, 0, 0 };
  7917. triple.tag = _coeff_as_uint128(&triple.hi, &triple.lo, a);
  7918. if (triple.tag == MPD_TRIPLE_ERROR) {
  7919. return triple;
  7920. }
  7921. triple.sign = !!mpd_isnegative(a);
  7922. if (triple.tag == MPD_TRIPLE_NORMAL) {
  7923. triple.exp = a->exp;
  7924. }
  7925. return triple;
  7926. }