imath.c 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780
  1. /*
  2. Name: imath.c
  3. Purpose: Arbitrary precision integer arithmetic routines.
  4. Author: M. J. Fromberger
  5. Copyright (C) 2002-2007 Michael J. Fromberger, All Rights Reserved.
  6. Permission is hereby granted, free of charge, to any person obtaining a copy
  7. of this software and associated documentation files (the "Software"), to deal
  8. in the Software without restriction, including without limitation the rights
  9. to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. copies of the Software, and to permit persons to whom the Software is
  11. furnished to do so, subject to the following conditions:
  12. The above copyright notice and this permission notice shall be included in
  13. all copies or substantial portions of the Software.
  14. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  19. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  20. SOFTWARE.
  21. */
  22. #include "imath.h"
  23. #include <assert.h>
  24. #include <ctype.h>
  25. #include <stdlib.h>
  26. #include <string.h>
  27. const mp_result MP_OK = 0; /* no error, all is well */
  28. const mp_result MP_FALSE = 0; /* boolean false */
  29. const mp_result MP_TRUE = -1; /* boolean true */
  30. const mp_result MP_MEMORY = -2; /* out of memory */
  31. const mp_result MP_RANGE = -3; /* argument out of range */
  32. const mp_result MP_UNDEF = -4; /* result undefined */
  33. const mp_result MP_TRUNC = -5; /* output truncated */
  34. const mp_result MP_BADARG = -6; /* invalid null argument */
  35. const mp_result MP_MINERR = -6;
  36. const mp_sign MP_NEG = 1; /* value is strictly negative */
  37. const mp_sign MP_ZPOS = 0; /* value is non-negative */
  38. static const char *s_unknown_err = "unknown result code";
  39. static const char *s_error_msg[] = {"error code 0", "boolean true",
  40. "out of memory", "argument out of range",
  41. "result undefined", "output truncated",
  42. "invalid argument", NULL};
  43. /* The ith entry of this table gives the value of log_i(2).
  44. An integer value n requires ceil(log_i(n)) digits to be represented
  45. in base i. Since it is easy to compute lg(n), by counting bits, we
  46. can compute log_i(n) = lg(n) * log_i(2).
  47. The use of this table eliminates a dependency upon linkage against
  48. the standard math libraries.
  49. If MP_MAX_RADIX is increased, this table should be expanded too.
  50. */
  51. static const double s_log2[] = {
  52. 0.000000000, 0.000000000, 1.000000000, 0.630929754, /* (D)(D) 2 3 */
  53. 0.500000000, 0.430676558, 0.386852807, 0.356207187, /* 4 5 6 7 */
  54. 0.333333333, 0.315464877, 0.301029996, 0.289064826, /* 8 9 10 11 */
  55. 0.278942946, 0.270238154, 0.262649535, 0.255958025, /* 12 13 14 15 */
  56. 0.250000000, 0.244650542, 0.239812467, 0.235408913, /* 16 17 18 19 */
  57. 0.231378213, 0.227670249, 0.224243824, 0.221064729, /* 20 21 22 23 */
  58. 0.218104292, 0.215338279, 0.212746054, 0.210309918, /* 24 25 26 27 */
  59. 0.208014598, 0.205846832, 0.203795047, 0.201849087, /* 28 29 30 31 */
  60. 0.200000000, 0.198239863, 0.196561632, 0.194959022, /* 32 33 34 35 */
  61. 0.193426404, /* 36 */
  62. };
  63. /* Return the number of digits needed to represent a static value */
  64. #define MP_VALUE_DIGITS(V) \
  65. ((sizeof(V) + (sizeof(mp_digit) - 1)) / sizeof(mp_digit))
  66. /* Round precision P to nearest word boundary */
  67. static inline mp_size s_round_prec(mp_size P) { return 2 * ((P + 1) / 2); }
  68. /* Set array P of S digits to zero */
  69. static inline void ZERO(mp_digit *P, mp_size S) {
  70. mp_size i__ = S * sizeof(mp_digit);
  71. mp_digit *p__ = P;
  72. memset(p__, 0, i__);
  73. }
  74. /* Copy S digits from array P to array Q */
  75. static inline void COPY(mp_digit *P, mp_digit *Q, mp_size S) {
  76. mp_size i__ = S * sizeof(mp_digit);
  77. mp_digit *p__ = P;
  78. mp_digit *q__ = Q;
  79. memcpy(q__, p__, i__);
  80. }
  81. /* Reverse N elements of unsigned char in A. */
  82. static inline void REV(unsigned char *A, int N) {
  83. unsigned char *u_ = A;
  84. unsigned char *v_ = u_ + N - 1;
  85. while (u_ < v_) {
  86. unsigned char xch = *u_;
  87. *u_++ = *v_;
  88. *v_-- = xch;
  89. }
  90. }
  91. /* Strip leading zeroes from z_ in-place. */
  92. static inline void CLAMP(mp_int z_) {
  93. mp_size uz_ = MP_USED(z_);
  94. mp_digit *dz_ = MP_DIGITS(z_) + uz_ - 1;
  95. while (uz_ > 1 && (*dz_-- == 0)) --uz_;
  96. z_->used = uz_;
  97. }
  98. /* Select min/max. */
  99. static inline int MIN(int A, int B) { return (B < A ? B : A); }
  100. static inline mp_size MAX(mp_size A, mp_size B) { return (B > A ? B : A); }
  101. /* Exchange lvalues A and B of type T, e.g.
  102. SWAP(int, x, y) where x and y are variables of type int. */
  103. #define SWAP(T, A, B) \
  104. do { \
  105. T t_ = (A); \
  106. A = (B); \
  107. B = t_; \
  108. } while (0)
  109. /* Declare a block of N temporary mpz_t values.
  110. These values are initialized to zero.
  111. You must add CLEANUP_TEMP() at the end of the function.
  112. Use TEMP(i) to access a pointer to the ith value.
  113. */
  114. #define DECLARE_TEMP(N) \
  115. struct { \
  116. mpz_t value[(N)]; \
  117. int len; \
  118. mp_result err; \
  119. } temp_ = { \
  120. .len = (N), \
  121. .err = MP_OK, \
  122. }; \
  123. do { \
  124. for (int i = 0; i < temp_.len; i++) { \
  125. mp_int_init(TEMP(i)); \
  126. } \
  127. } while (0)
  128. /* Clear all allocated temp values. */
  129. #define CLEANUP_TEMP() \
  130. CLEANUP: \
  131. do { \
  132. for (int i = 0; i < temp_.len; i++) { \
  133. mp_int_clear(TEMP(i)); \
  134. } \
  135. if (temp_.err != MP_OK) { \
  136. return temp_.err; \
  137. } \
  138. } while (0)
  139. /* A pointer to the kth temp value. */
  140. #define TEMP(K) (temp_.value + (K))
  141. /* Evaluate E, an expression of type mp_result expected to return MP_OK. If
  142. the value is not MP_OK, the error is cached and control resumes at the
  143. cleanup handler, which returns it.
  144. */
  145. #define REQUIRE(E) \
  146. do { \
  147. temp_.err = (E); \
  148. if (temp_.err != MP_OK) goto CLEANUP; \
  149. } while (0)
  150. /* Compare value to zero. */
  151. static inline int CMPZ(mp_int Z) {
  152. if (Z->used == 1 && Z->digits[0] == 0) return 0;
  153. return (Z->sign == MP_NEG) ? -1 : 1;
  154. }
  155. static inline mp_word UPPER_HALF(mp_word W) { return (W >> MP_DIGIT_BIT); }
  156. static inline mp_digit LOWER_HALF(mp_word W) { return (mp_digit)(W); }
  157. /* Report whether the highest-order bit of W is 1. */
  158. static inline bool HIGH_BIT_SET(mp_word W) {
  159. return (W >> (MP_WORD_BIT - 1)) != 0;
  160. }
  161. /* Report whether adding W + V will carry out. */
  162. static inline bool ADD_WILL_OVERFLOW(mp_word W, mp_word V) {
  163. return ((MP_WORD_MAX - V) < W);
  164. }
  165. /* Default number of digits allocated to a new mp_int */
  166. static mp_size default_precision = 8;
  167. void mp_int_default_precision(mp_size size) {
  168. assert(size > 0);
  169. default_precision = size;
  170. }
  171. /* Minimum number of digits to invoke recursive multiply */
  172. static mp_size multiply_threshold = 32;
  173. void mp_int_multiply_threshold(mp_size thresh) {
  174. assert(thresh >= sizeof(mp_word));
  175. multiply_threshold = thresh;
  176. }
  177. /* Allocate a buffer of (at least) num digits, or return
  178. NULL if that couldn't be done. */
  179. static mp_digit *s_alloc(mp_size num);
  180. /* Release a buffer of digits allocated by s_alloc(). */
  181. static void s_free(void *ptr);
  182. /* Insure that z has at least min digits allocated, resizing if
  183. necessary. Returns true if successful, false if out of memory. */
  184. static bool s_pad(mp_int z, mp_size min);
  185. /* Ensure Z has at least N digits allocated. */
  186. static inline mp_result GROW(mp_int Z, mp_size N) {
  187. return s_pad(Z, N) ? MP_OK : MP_MEMORY;
  188. }
  189. /* Fill in a "fake" mp_int on the stack with a given value */
  190. static void s_fake(mp_int z, mp_small value, mp_digit vbuf[]);
  191. static void s_ufake(mp_int z, mp_usmall value, mp_digit vbuf[]);
  192. /* Compare two runs of digits of given length, returns <0, 0, >0 */
  193. static int s_cdig(mp_digit *da, mp_digit *db, mp_size len);
  194. /* Pack the unsigned digits of v into array t */
  195. static int s_uvpack(mp_usmall v, mp_digit t[]);
  196. /* Compare magnitudes of a and b, returns <0, 0, >0 */
  197. static int s_ucmp(mp_int a, mp_int b);
  198. /* Compare magnitudes of a and v, returns <0, 0, >0 */
  199. static int s_vcmp(mp_int a, mp_small v);
  200. static int s_uvcmp(mp_int a, mp_usmall uv);
  201. /* Unsigned magnitude addition; assumes dc is big enough.
  202. Carry out is returned (no memory allocated). */
  203. static mp_digit s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
  204. mp_size size_b);
  205. /* Unsigned magnitude subtraction. Assumes dc is big enough. */
  206. static void s_usub(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
  207. mp_size size_b);
  208. /* Unsigned recursive multiplication. Assumes dc is big enough. */
  209. static int s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
  210. mp_size size_b);
  211. /* Unsigned magnitude multiplication. Assumes dc is big enough. */
  212. static void s_umul(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
  213. mp_size size_b);
  214. /* Unsigned recursive squaring. Assumes dc is big enough. */
  215. static int s_ksqr(mp_digit *da, mp_digit *dc, mp_size size_a);
  216. /* Unsigned magnitude squaring. Assumes dc is big enough. */
  217. static void s_usqr(mp_digit *da, mp_digit *dc, mp_size size_a);
  218. /* Single digit addition. Assumes a is big enough. */
  219. static void s_dadd(mp_int a, mp_digit b);
  220. /* Single digit multiplication. Assumes a is big enough. */
  221. static void s_dmul(mp_int a, mp_digit b);
  222. /* Single digit multiplication on buffers; assumes dc is big enough. */
  223. static void s_dbmul(mp_digit *da, mp_digit b, mp_digit *dc, mp_size size_a);
  224. /* Single digit division. Replaces a with the quotient,
  225. returns the remainder. */
  226. static mp_digit s_ddiv(mp_int a, mp_digit b);
  227. /* Quick division by a power of 2, replaces z (no allocation) */
  228. static void s_qdiv(mp_int z, mp_size p2);
  229. /* Quick remainder by a power of 2, replaces z (no allocation) */
  230. static void s_qmod(mp_int z, mp_size p2);
  231. /* Quick multiplication by a power of 2, replaces z.
  232. Allocates if necessary; returns false in case this fails. */
  233. static int s_qmul(mp_int z, mp_size p2);
  234. /* Quick subtraction from a power of 2, replaces z.
  235. Allocates if necessary; returns false in case this fails. */
  236. static int s_qsub(mp_int z, mp_size p2);
  237. /* Return maximum k such that 2^k divides z. */
  238. static int s_dp2k(mp_int z);
  239. /* Return k >= 0 such that z = 2^k, or -1 if there is no such k. */
  240. static int s_isp2(mp_int z);
  241. /* Set z to 2^k. May allocate; returns false in case this fails. */
  242. static int s_2expt(mp_int z, mp_small k);
  243. /* Normalize a and b for division, returns normalization constant */
  244. static int s_norm(mp_int a, mp_int b);
  245. /* Compute constant mu for Barrett reduction, given modulus m, result
  246. replaces z, m is untouched. */
  247. static mp_result s_brmu(mp_int z, mp_int m);
  248. /* Reduce a modulo m, using Barrett's algorithm. */
  249. static int s_reduce(mp_int x, mp_int m, mp_int mu, mp_int q1, mp_int q2);
  250. /* Modular exponentiation, using Barrett reduction */
  251. static mp_result s_embar(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c);
  252. /* Unsigned magnitude division. Assumes |a| > |b|. Allocates temporaries;
  253. overwrites a with quotient, b with remainder. */
  254. static mp_result s_udiv_knuth(mp_int a, mp_int b);
  255. /* Compute the number of digits in radix r required to represent the given
  256. value. Does not account for sign flags, terminators, etc. */
  257. static int s_outlen(mp_int z, mp_size r);
  258. /* Guess how many digits of precision will be needed to represent a radix r
  259. value of the specified number of digits. Returns a value guaranteed to be
  260. no smaller than the actual number required. */
  261. static mp_size s_inlen(int len, mp_size r);
  262. /* Convert a character to a digit value in radix r, or
  263. -1 if out of range */
  264. static int s_ch2val(char c, int r);
  265. /* Convert a digit value to a character */
  266. static char s_val2ch(int v, int caps);
  267. /* Take 2's complement of a buffer in place */
  268. static void s_2comp(unsigned char *buf, int len);
  269. /* Convert a value to binary, ignoring sign. On input, *limpos is the bound on
  270. how many bytes should be written to buf; on output, *limpos is set to the
  271. number of bytes actually written. */
  272. static mp_result s_tobin(mp_int z, unsigned char *buf, int *limpos, int pad);
  273. /* Multiply X by Y into Z, ignoring signs. Requires that Z have enough storage
  274. preallocated to hold the result. */
  275. static inline void UMUL(mp_int X, mp_int Y, mp_int Z) {
  276. mp_size ua_ = MP_USED(X);
  277. mp_size ub_ = MP_USED(Y);
  278. mp_size o_ = ua_ + ub_;
  279. ZERO(MP_DIGITS(Z), o_);
  280. (void)s_kmul(MP_DIGITS(X), MP_DIGITS(Y), MP_DIGITS(Z), ua_, ub_);
  281. Z->used = o_;
  282. CLAMP(Z);
  283. }
  284. /* Square X into Z. Requires that Z have enough storage to hold the result. */
  285. static inline void USQR(mp_int X, mp_int Z) {
  286. mp_size ua_ = MP_USED(X);
  287. mp_size o_ = ua_ + ua_;
  288. ZERO(MP_DIGITS(Z), o_);
  289. (void)s_ksqr(MP_DIGITS(X), MP_DIGITS(Z), ua_);
  290. Z->used = o_;
  291. CLAMP(Z);
  292. }
  293. mp_result mp_int_init(mp_int z) {
  294. if (z == NULL) return MP_BADARG;
  295. z->single = 0;
  296. z->digits = &(z->single);
  297. z->alloc = 1;
  298. z->used = 1;
  299. z->sign = MP_ZPOS;
  300. return MP_OK;
  301. }
  302. mp_int mp_int_alloc(void) {
  303. mp_int out = malloc(sizeof(mpz_t));
  304. if (out != NULL) mp_int_init(out);
  305. return out;
  306. }
  307. mp_result mp_int_init_size(mp_int z, mp_size prec) {
  308. assert(z != NULL);
  309. if (prec == 0) {
  310. prec = default_precision;
  311. } else if (prec == 1) {
  312. return mp_int_init(z);
  313. } else {
  314. prec = s_round_prec(prec);
  315. }
  316. z->digits = s_alloc(prec);
  317. if (MP_DIGITS(z) == NULL) return MP_MEMORY;
  318. z->digits[0] = 0;
  319. z->used = 1;
  320. z->alloc = prec;
  321. z->sign = MP_ZPOS;
  322. return MP_OK;
  323. }
  324. mp_result mp_int_init_copy(mp_int z, mp_int old) {
  325. assert(z != NULL && old != NULL);
  326. mp_size uold = MP_USED(old);
  327. if (uold == 1) {
  328. mp_int_init(z);
  329. } else {
  330. mp_size target = MAX(uold, default_precision);
  331. mp_result res = mp_int_init_size(z, target);
  332. if (res != MP_OK) return res;
  333. }
  334. z->used = uold;
  335. z->sign = old->sign;
  336. COPY(MP_DIGITS(old), MP_DIGITS(z), uold);
  337. return MP_OK;
  338. }
  339. mp_result mp_int_init_value(mp_int z, mp_small value) {
  340. mpz_t vtmp;
  341. mp_digit vbuf[MP_VALUE_DIGITS(value)];
  342. s_fake(&vtmp, value, vbuf);
  343. return mp_int_init_copy(z, &vtmp);
  344. }
  345. mp_result mp_int_init_uvalue(mp_int z, mp_usmall uvalue) {
  346. mpz_t vtmp;
  347. mp_digit vbuf[MP_VALUE_DIGITS(uvalue)];
  348. s_ufake(&vtmp, uvalue, vbuf);
  349. return mp_int_init_copy(z, &vtmp);
  350. }
  351. mp_result mp_int_set_value(mp_int z, mp_small value) {
  352. mpz_t vtmp;
  353. mp_digit vbuf[MP_VALUE_DIGITS(value)];
  354. s_fake(&vtmp, value, vbuf);
  355. return mp_int_copy(&vtmp, z);
  356. }
  357. mp_result mp_int_set_uvalue(mp_int z, mp_usmall uvalue) {
  358. mpz_t vtmp;
  359. mp_digit vbuf[MP_VALUE_DIGITS(uvalue)];
  360. s_ufake(&vtmp, uvalue, vbuf);
  361. return mp_int_copy(&vtmp, z);
  362. }
  363. void mp_int_clear(mp_int z) {
  364. if (z == NULL) return;
  365. if (MP_DIGITS(z) != NULL) {
  366. if (MP_DIGITS(z) != &(z->single)) s_free(MP_DIGITS(z));
  367. z->digits = NULL;
  368. }
  369. }
  370. void mp_int_free(mp_int z) {
  371. assert(z != NULL);
  372. mp_int_clear(z);
  373. free(z); /* note: NOT s_free() */
  374. }
  375. mp_result mp_int_copy(mp_int a, mp_int c) {
  376. assert(a != NULL && c != NULL);
  377. if (a != c) {
  378. mp_size ua = MP_USED(a);
  379. mp_digit *da, *dc;
  380. if (!s_pad(c, ua)) return MP_MEMORY;
  381. da = MP_DIGITS(a);
  382. dc = MP_DIGITS(c);
  383. COPY(da, dc, ua);
  384. c->used = ua;
  385. c->sign = a->sign;
  386. }
  387. return MP_OK;
  388. }
  389. void mp_int_swap(mp_int a, mp_int c) {
  390. if (a != c) {
  391. mpz_t tmp = *a;
  392. *a = *c;
  393. *c = tmp;
  394. if (MP_DIGITS(a) == &(c->single)) a->digits = &(a->single);
  395. if (MP_DIGITS(c) == &(a->single)) c->digits = &(c->single);
  396. }
  397. }
  398. void mp_int_zero(mp_int z) {
  399. assert(z != NULL);
  400. z->digits[0] = 0;
  401. z->used = 1;
  402. z->sign = MP_ZPOS;
  403. }
  404. mp_result mp_int_abs(mp_int a, mp_int c) {
  405. assert(a != NULL && c != NULL);
  406. mp_result res;
  407. if ((res = mp_int_copy(a, c)) != MP_OK) return res;
  408. c->sign = MP_ZPOS;
  409. return MP_OK;
  410. }
  411. mp_result mp_int_neg(mp_int a, mp_int c) {
  412. assert(a != NULL && c != NULL);
  413. mp_result res;
  414. if ((res = mp_int_copy(a, c)) != MP_OK) return res;
  415. if (CMPZ(c) != 0) c->sign = 1 - MP_SIGN(a);
  416. return MP_OK;
  417. }
  418. mp_result mp_int_add(mp_int a, mp_int b, mp_int c) {
  419. assert(a != NULL && b != NULL && c != NULL);
  420. mp_size ua = MP_USED(a);
  421. mp_size ub = MP_USED(b);
  422. mp_size max = MAX(ua, ub);
  423. if (MP_SIGN(a) == MP_SIGN(b)) {
  424. /* Same sign -- add magnitudes, preserve sign of addends */
  425. if (!s_pad(c, max)) return MP_MEMORY;
  426. mp_digit carry = s_uadd(MP_DIGITS(a), MP_DIGITS(b), MP_DIGITS(c), ua, ub);
  427. mp_size uc = max;
  428. if (carry) {
  429. if (!s_pad(c, max + 1)) return MP_MEMORY;
  430. c->digits[max] = carry;
  431. ++uc;
  432. }
  433. c->used = uc;
  434. c->sign = a->sign;
  435. } else {
  436. /* Different signs -- subtract magnitudes, preserve sign of greater */
  437. int cmp = s_ucmp(a, b); /* magnitude comparison, sign ignored */
  438. /* Set x to max(a, b), y to min(a, b) to simplify later code.
  439. A special case yields zero for equal magnitudes.
  440. */
  441. mp_int x, y;
  442. if (cmp == 0) {
  443. mp_int_zero(c);
  444. return MP_OK;
  445. } else if (cmp < 0) {
  446. x = b;
  447. y = a;
  448. } else {
  449. x = a;
  450. y = b;
  451. }
  452. if (!s_pad(c, MP_USED(x))) return MP_MEMORY;
  453. /* Subtract smaller from larger */
  454. s_usub(MP_DIGITS(x), MP_DIGITS(y), MP_DIGITS(c), MP_USED(x), MP_USED(y));
  455. c->used = x->used;
  456. CLAMP(c);
  457. /* Give result the sign of the larger */
  458. c->sign = x->sign;
  459. }
  460. return MP_OK;
  461. }
  462. mp_result mp_int_add_value(mp_int a, mp_small value, mp_int c) {
  463. mpz_t vtmp;
  464. mp_digit vbuf[MP_VALUE_DIGITS(value)];
  465. s_fake(&vtmp, value, vbuf);
  466. return mp_int_add(a, &vtmp, c);
  467. }
  468. mp_result mp_int_sub(mp_int a, mp_int b, mp_int c) {
  469. assert(a != NULL && b != NULL && c != NULL);
  470. mp_size ua = MP_USED(a);
  471. mp_size ub = MP_USED(b);
  472. mp_size max = MAX(ua, ub);
  473. if (MP_SIGN(a) != MP_SIGN(b)) {
  474. /* Different signs -- add magnitudes and keep sign of a */
  475. if (!s_pad(c, max)) return MP_MEMORY;
  476. mp_digit carry = s_uadd(MP_DIGITS(a), MP_DIGITS(b), MP_DIGITS(c), ua, ub);
  477. mp_size uc = max;
  478. if (carry) {
  479. if (!s_pad(c, max + 1)) return MP_MEMORY;
  480. c->digits[max] = carry;
  481. ++uc;
  482. }
  483. c->used = uc;
  484. c->sign = a->sign;
  485. } else {
  486. /* Same signs -- subtract magnitudes */
  487. if (!s_pad(c, max)) return MP_MEMORY;
  488. mp_int x, y;
  489. mp_sign osign;
  490. int cmp = s_ucmp(a, b);
  491. if (cmp >= 0) {
  492. x = a;
  493. y = b;
  494. osign = MP_ZPOS;
  495. } else {
  496. x = b;
  497. y = a;
  498. osign = MP_NEG;
  499. }
  500. if (MP_SIGN(a) == MP_NEG && cmp != 0) osign = 1 - osign;
  501. s_usub(MP_DIGITS(x), MP_DIGITS(y), MP_DIGITS(c), MP_USED(x), MP_USED(y));
  502. c->used = x->used;
  503. CLAMP(c);
  504. c->sign = osign;
  505. }
  506. return MP_OK;
  507. }
  508. mp_result mp_int_sub_value(mp_int a, mp_small value, mp_int c) {
  509. mpz_t vtmp;
  510. mp_digit vbuf[MP_VALUE_DIGITS(value)];
  511. s_fake(&vtmp, value, vbuf);
  512. return mp_int_sub(a, &vtmp, c);
  513. }
  514. mp_result mp_int_mul(mp_int a, mp_int b, mp_int c) {
  515. assert(a != NULL && b != NULL && c != NULL);
  516. /* If either input is zero, we can shortcut multiplication */
  517. if (mp_int_compare_zero(a) == 0 || mp_int_compare_zero(b) == 0) {
  518. mp_int_zero(c);
  519. return MP_OK;
  520. }
  521. /* Output is positive if inputs have same sign, otherwise negative */
  522. mp_sign osign = (MP_SIGN(a) == MP_SIGN(b)) ? MP_ZPOS : MP_NEG;
  523. /* If the output is not identical to any of the inputs, we'll write the
  524. results directly; otherwise, allocate a temporary space. */
  525. mp_size ua = MP_USED(a);
  526. mp_size ub = MP_USED(b);
  527. mp_size osize = MAX(ua, ub);
  528. osize = 4 * ((osize + 1) / 2);
  529. mp_digit *out;
  530. mp_size p = 0;
  531. if (c == a || c == b) {
  532. p = MAX(s_round_prec(osize), default_precision);
  533. if ((out = s_alloc(p)) == NULL) return MP_MEMORY;
  534. } else {
  535. if (!s_pad(c, osize)) return MP_MEMORY;
  536. out = MP_DIGITS(c);
  537. }
  538. ZERO(out, osize);
  539. if (!s_kmul(MP_DIGITS(a), MP_DIGITS(b), out, ua, ub)) return MP_MEMORY;
  540. /* If we allocated a new buffer, get rid of whatever memory c was already
  541. using, and fix up its fields to reflect that.
  542. */
  543. if (out != MP_DIGITS(c)) {
  544. if ((void *)MP_DIGITS(c) != (void *)c) s_free(MP_DIGITS(c));
  545. c->digits = out;
  546. c->alloc = p;
  547. }
  548. c->used = osize; /* might not be true, but we'll fix it ... */
  549. CLAMP(c); /* ... right here */
  550. c->sign = osign;
  551. return MP_OK;
  552. }
  553. mp_result mp_int_mul_value(mp_int a, mp_small value, mp_int c) {
  554. mpz_t vtmp;
  555. mp_digit vbuf[MP_VALUE_DIGITS(value)];
  556. s_fake(&vtmp, value, vbuf);
  557. return mp_int_mul(a, &vtmp, c);
  558. }
  559. mp_result mp_int_mul_pow2(mp_int a, mp_small p2, mp_int c) {
  560. assert(a != NULL && c != NULL && p2 >= 0);
  561. mp_result res = mp_int_copy(a, c);
  562. if (res != MP_OK) return res;
  563. if (s_qmul(c, (mp_size)p2)) {
  564. return MP_OK;
  565. } else {
  566. return MP_MEMORY;
  567. }
  568. }
  569. mp_result mp_int_sqr(mp_int a, mp_int c) {
  570. assert(a != NULL && c != NULL);
  571. /* Get a temporary buffer big enough to hold the result */
  572. mp_size osize = (mp_size)4 * ((MP_USED(a) + 1) / 2);
  573. mp_size p = 0;
  574. mp_digit *out;
  575. if (a == c) {
  576. p = s_round_prec(osize);
  577. p = MAX(p, default_precision);
  578. if ((out = s_alloc(p)) == NULL) return MP_MEMORY;
  579. } else {
  580. if (!s_pad(c, osize)) return MP_MEMORY;
  581. out = MP_DIGITS(c);
  582. }
  583. ZERO(out, osize);
  584. s_ksqr(MP_DIGITS(a), out, MP_USED(a));
  585. /* Get rid of whatever memory c was already using, and fix up its fields to
  586. reflect the new digit array it's using
  587. */
  588. if (out != MP_DIGITS(c)) {
  589. if ((void *)MP_DIGITS(c) != (void *)c) s_free(MP_DIGITS(c));
  590. c->digits = out;
  591. c->alloc = p;
  592. }
  593. c->used = osize; /* might not be true, but we'll fix it ... */
  594. CLAMP(c); /* ... right here */
  595. c->sign = MP_ZPOS;
  596. return MP_OK;
  597. }
  598. mp_result mp_int_div(mp_int a, mp_int b, mp_int q, mp_int r) {
  599. assert(a != NULL && b != NULL && q != r);
  600. int cmp;
  601. mp_result res = MP_OK;
  602. mp_int qout, rout;
  603. mp_sign sa = MP_SIGN(a);
  604. mp_sign sb = MP_SIGN(b);
  605. if (CMPZ(b) == 0) {
  606. return MP_UNDEF;
  607. } else if ((cmp = s_ucmp(a, b)) < 0) {
  608. /* If |a| < |b|, no division is required:
  609. q = 0, r = a
  610. */
  611. if (r && (res = mp_int_copy(a, r)) != MP_OK) return res;
  612. if (q) mp_int_zero(q);
  613. return MP_OK;
  614. } else if (cmp == 0) {
  615. /* If |a| = |b|, no division is required:
  616. q = 1 or -1, r = 0
  617. */
  618. if (r) mp_int_zero(r);
  619. if (q) {
  620. mp_int_zero(q);
  621. q->digits[0] = 1;
  622. if (sa != sb) q->sign = MP_NEG;
  623. }
  624. return MP_OK;
  625. }
  626. /* When |a| > |b|, real division is required. We need someplace to store
  627. quotient and remainder, but q and r are allowed to be NULL or to overlap
  628. with the inputs.
  629. */
  630. DECLARE_TEMP(2);
  631. int lg;
  632. if ((lg = s_isp2(b)) < 0) {
  633. if (q && b != q) {
  634. REQUIRE(mp_int_copy(a, q));
  635. qout = q;
  636. } else {
  637. REQUIRE(mp_int_copy(a, TEMP(0)));
  638. qout = TEMP(0);
  639. }
  640. if (r && a != r) {
  641. REQUIRE(mp_int_copy(b, r));
  642. rout = r;
  643. } else {
  644. REQUIRE(mp_int_copy(b, TEMP(1)));
  645. rout = TEMP(1);
  646. }
  647. REQUIRE(s_udiv_knuth(qout, rout));
  648. } else {
  649. if (q) REQUIRE(mp_int_copy(a, q));
  650. if (r) REQUIRE(mp_int_copy(a, r));
  651. if (q) s_qdiv(q, (mp_size)lg);
  652. qout = q;
  653. if (r) s_qmod(r, (mp_size)lg);
  654. rout = r;
  655. }
  656. /* Recompute signs for output */
  657. if (rout) {
  658. rout->sign = sa;
  659. if (CMPZ(rout) == 0) rout->sign = MP_ZPOS;
  660. }
  661. if (qout) {
  662. qout->sign = (sa == sb) ? MP_ZPOS : MP_NEG;
  663. if (CMPZ(qout) == 0) qout->sign = MP_ZPOS;
  664. }
  665. if (q) REQUIRE(mp_int_copy(qout, q));
  666. if (r) REQUIRE(mp_int_copy(rout, r));
  667. CLEANUP_TEMP();
  668. return res;
  669. }
  670. mp_result mp_int_mod(mp_int a, mp_int m, mp_int c) {
  671. DECLARE_TEMP(1);
  672. mp_int out = (m == c) ? TEMP(0) : c;
  673. REQUIRE(mp_int_div(a, m, NULL, out));
  674. if (CMPZ(out) < 0) {
  675. REQUIRE(mp_int_add(out, m, c));
  676. } else {
  677. REQUIRE(mp_int_copy(out, c));
  678. }
  679. CLEANUP_TEMP();
  680. return MP_OK;
  681. }
  682. mp_result mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r) {
  683. mpz_t vtmp;
  684. mp_digit vbuf[MP_VALUE_DIGITS(value)];
  685. s_fake(&vtmp, value, vbuf);
  686. DECLARE_TEMP(1);
  687. REQUIRE(mp_int_div(a, &vtmp, q, TEMP(0)));
  688. if (r) (void)mp_int_to_int(TEMP(0), r); /* can't fail */
  689. CLEANUP_TEMP();
  690. return MP_OK;
  691. }
  692. mp_result mp_int_div_pow2(mp_int a, mp_small p2, mp_int q, mp_int r) {
  693. assert(a != NULL && p2 >= 0 && q != r);
  694. mp_result res = MP_OK;
  695. if (q != NULL && (res = mp_int_copy(a, q)) == MP_OK) {
  696. s_qdiv(q, (mp_size)p2);
  697. }
  698. if (res == MP_OK && r != NULL && (res = mp_int_copy(a, r)) == MP_OK) {
  699. s_qmod(r, (mp_size)p2);
  700. }
  701. return res;
  702. }
  703. mp_result mp_int_expt(mp_int a, mp_small b, mp_int c) {
  704. assert(c != NULL);
  705. if (b < 0) return MP_RANGE;
  706. DECLARE_TEMP(1);
  707. REQUIRE(mp_int_copy(a, TEMP(0)));
  708. (void)mp_int_set_value(c, 1);
  709. unsigned int v = labs(b);
  710. while (v != 0) {
  711. if (v & 1) {
  712. REQUIRE(mp_int_mul(c, TEMP(0), c));
  713. }
  714. v >>= 1;
  715. if (v == 0) break;
  716. REQUIRE(mp_int_sqr(TEMP(0), TEMP(0)));
  717. }
  718. CLEANUP_TEMP();
  719. return MP_OK;
  720. }
  721. mp_result mp_int_expt_value(mp_small a, mp_small b, mp_int c) {
  722. assert(c != NULL);
  723. if (b < 0) return MP_RANGE;
  724. DECLARE_TEMP(1);
  725. REQUIRE(mp_int_set_value(TEMP(0), a));
  726. (void)mp_int_set_value(c, 1);
  727. unsigned int v = labs(b);
  728. while (v != 0) {
  729. if (v & 1) {
  730. REQUIRE(mp_int_mul(c, TEMP(0), c));
  731. }
  732. v >>= 1;
  733. if (v == 0) break;
  734. REQUIRE(mp_int_sqr(TEMP(0), TEMP(0)));
  735. }
  736. CLEANUP_TEMP();
  737. return MP_OK;
  738. }
  739. mp_result mp_int_expt_full(mp_int a, mp_int b, mp_int c) {
  740. assert(a != NULL && b != NULL && c != NULL);
  741. if (MP_SIGN(b) == MP_NEG) return MP_RANGE;
  742. DECLARE_TEMP(1);
  743. REQUIRE(mp_int_copy(a, TEMP(0)));
  744. (void)mp_int_set_value(c, 1);
  745. for (unsigned ix = 0; ix < MP_USED(b); ++ix) {
  746. mp_digit d = b->digits[ix];
  747. for (unsigned jx = 0; jx < MP_DIGIT_BIT; ++jx) {
  748. if (d & 1) {
  749. REQUIRE(mp_int_mul(c, TEMP(0), c));
  750. }
  751. d >>= 1;
  752. if (d == 0 && ix + 1 == MP_USED(b)) break;
  753. REQUIRE(mp_int_sqr(TEMP(0), TEMP(0)));
  754. }
  755. }
  756. CLEANUP_TEMP();
  757. return MP_OK;
  758. }
  759. int mp_int_compare(mp_int a, mp_int b) {
  760. assert(a != NULL && b != NULL);
  761. mp_sign sa = MP_SIGN(a);
  762. if (sa == MP_SIGN(b)) {
  763. int cmp = s_ucmp(a, b);
  764. /* If they're both zero or positive, the normal comparison applies; if both
  765. negative, the sense is reversed. */
  766. if (sa == MP_ZPOS) {
  767. return cmp;
  768. } else {
  769. return -cmp;
  770. }
  771. } else if (sa == MP_ZPOS) {
  772. return 1;
  773. } else {
  774. return -1;
  775. }
  776. }
  777. int mp_int_compare_unsigned(mp_int a, mp_int b) {
  778. assert(a != NULL && b != NULL);
  779. return s_ucmp(a, b);
  780. }
  781. int mp_int_compare_zero(mp_int z) {
  782. assert(z != NULL);
  783. if (MP_USED(z) == 1 && z->digits[0] == 0) {
  784. return 0;
  785. } else if (MP_SIGN(z) == MP_ZPOS) {
  786. return 1;
  787. } else {
  788. return -1;
  789. }
  790. }
  791. int mp_int_compare_value(mp_int z, mp_small value) {
  792. assert(z != NULL);
  793. mp_sign vsign = (value < 0) ? MP_NEG : MP_ZPOS;
  794. if (vsign == MP_SIGN(z)) {
  795. int cmp = s_vcmp(z, value);
  796. return (vsign == MP_ZPOS) ? cmp : -cmp;
  797. } else {
  798. return (value < 0) ? 1 : -1;
  799. }
  800. }
  801. int mp_int_compare_uvalue(mp_int z, mp_usmall uv) {
  802. assert(z != NULL);
  803. if (MP_SIGN(z) == MP_NEG) {
  804. return -1;
  805. } else {
  806. return s_uvcmp(z, uv);
  807. }
  808. }
  809. mp_result mp_int_exptmod(mp_int a, mp_int b, mp_int m, mp_int c) {
  810. assert(a != NULL && b != NULL && c != NULL && m != NULL);
  811. /* Zero moduli and negative exponents are not considered. */
  812. if (CMPZ(m) == 0) return MP_UNDEF;
  813. if (CMPZ(b) < 0) return MP_RANGE;
  814. mp_size um = MP_USED(m);
  815. DECLARE_TEMP(3);
  816. REQUIRE(GROW(TEMP(0), 2 * um));
  817. REQUIRE(GROW(TEMP(1), 2 * um));
  818. mp_int s;
  819. if (c == b || c == m) {
  820. REQUIRE(GROW(TEMP(2), 2 * um));
  821. s = TEMP(2);
  822. } else {
  823. s = c;
  824. }
  825. REQUIRE(mp_int_mod(a, m, TEMP(0)));
  826. REQUIRE(s_brmu(TEMP(1), m));
  827. REQUIRE(s_embar(TEMP(0), b, m, TEMP(1), s));
  828. REQUIRE(mp_int_copy(s, c));
  829. CLEANUP_TEMP();
  830. return MP_OK;
  831. }
  832. mp_result mp_int_exptmod_evalue(mp_int a, mp_small value, mp_int m, mp_int c) {
  833. mpz_t vtmp;
  834. mp_digit vbuf[MP_VALUE_DIGITS(value)];
  835. s_fake(&vtmp, value, vbuf);
  836. return mp_int_exptmod(a, &vtmp, m, c);
  837. }
  838. mp_result mp_int_exptmod_bvalue(mp_small value, mp_int b, mp_int m, mp_int c) {
  839. mpz_t vtmp;
  840. mp_digit vbuf[MP_VALUE_DIGITS(value)];
  841. s_fake(&vtmp, value, vbuf);
  842. return mp_int_exptmod(&vtmp, b, m, c);
  843. }
  844. mp_result mp_int_exptmod_known(mp_int a, mp_int b, mp_int m, mp_int mu,
  845. mp_int c) {
  846. assert(a && b && m && c);
  847. /* Zero moduli and negative exponents are not considered. */
  848. if (CMPZ(m) == 0) return MP_UNDEF;
  849. if (CMPZ(b) < 0) return MP_RANGE;
  850. DECLARE_TEMP(2);
  851. mp_size um = MP_USED(m);
  852. REQUIRE(GROW(TEMP(0), 2 * um));
  853. mp_int s;
  854. if (c == b || c == m) {
  855. REQUIRE(GROW(TEMP(1), 2 * um));
  856. s = TEMP(1);
  857. } else {
  858. s = c;
  859. }
  860. REQUIRE(mp_int_mod(a, m, TEMP(0)));
  861. REQUIRE(s_embar(TEMP(0), b, m, mu, s));
  862. REQUIRE(mp_int_copy(s, c));
  863. CLEANUP_TEMP();
  864. return MP_OK;
  865. }
  866. mp_result mp_int_redux_const(mp_int m, mp_int c) {
  867. assert(m != NULL && c != NULL && m != c);
  868. return s_brmu(c, m);
  869. }
  870. mp_result mp_int_invmod(mp_int a, mp_int m, mp_int c) {
  871. assert(a != NULL && m != NULL && c != NULL);
  872. if (CMPZ(a) == 0 || CMPZ(m) <= 0) return MP_RANGE;
  873. DECLARE_TEMP(2);
  874. REQUIRE(mp_int_egcd(a, m, TEMP(0), TEMP(1), NULL));
  875. if (mp_int_compare_value(TEMP(0), 1) != 0) {
  876. REQUIRE(MP_UNDEF);
  877. }
  878. /* It is first necessary to constrain the value to the proper range */
  879. REQUIRE(mp_int_mod(TEMP(1), m, TEMP(1)));
  880. /* Now, if 'a' was originally negative, the value we have is actually the
  881. magnitude of the negative representative; to get the positive value we
  882. have to subtract from the modulus. Otherwise, the value is okay as it
  883. stands.
  884. */
  885. if (MP_SIGN(a) == MP_NEG) {
  886. REQUIRE(mp_int_sub(m, TEMP(1), c));
  887. } else {
  888. REQUIRE(mp_int_copy(TEMP(1), c));
  889. }
  890. CLEANUP_TEMP();
  891. return MP_OK;
  892. }
  893. /* Binary GCD algorithm due to Josef Stein, 1961 */
  894. mp_result mp_int_gcd(mp_int a, mp_int b, mp_int c) {
  895. assert(a != NULL && b != NULL && c != NULL);
  896. int ca = CMPZ(a);
  897. int cb = CMPZ(b);
  898. if (ca == 0 && cb == 0) {
  899. return MP_UNDEF;
  900. } else if (ca == 0) {
  901. return mp_int_abs(b, c);
  902. } else if (cb == 0) {
  903. return mp_int_abs(a, c);
  904. }
  905. DECLARE_TEMP(3);
  906. REQUIRE(mp_int_copy(a, TEMP(0)));
  907. REQUIRE(mp_int_copy(b, TEMP(1)));
  908. TEMP(0)->sign = MP_ZPOS;
  909. TEMP(1)->sign = MP_ZPOS;
  910. int k = 0;
  911. { /* Divide out common factors of 2 from u and v */
  912. int div2_u = s_dp2k(TEMP(0));
  913. int div2_v = s_dp2k(TEMP(1));
  914. k = MIN(div2_u, div2_v);
  915. s_qdiv(TEMP(0), (mp_size)k);
  916. s_qdiv(TEMP(1), (mp_size)k);
  917. }
  918. if (mp_int_is_odd(TEMP(0))) {
  919. REQUIRE(mp_int_neg(TEMP(1), TEMP(2)));
  920. } else {
  921. REQUIRE(mp_int_copy(TEMP(0), TEMP(2)));
  922. }
  923. for (;;) {
  924. s_qdiv(TEMP(2), s_dp2k(TEMP(2)));
  925. if (CMPZ(TEMP(2)) > 0) {
  926. REQUIRE(mp_int_copy(TEMP(2), TEMP(0)));
  927. } else {
  928. REQUIRE(mp_int_neg(TEMP(2), TEMP(1)));
  929. }
  930. REQUIRE(mp_int_sub(TEMP(0), TEMP(1), TEMP(2)));
  931. if (CMPZ(TEMP(2)) == 0) break;
  932. }
  933. REQUIRE(mp_int_abs(TEMP(0), c));
  934. if (!s_qmul(c, (mp_size)k)) REQUIRE(MP_MEMORY);
  935. CLEANUP_TEMP();
  936. return MP_OK;
  937. }
  938. /* This is the binary GCD algorithm again, but this time we keep track of the
  939. elementary matrix operations as we go, so we can get values x and y
  940. satisfying c = ax + by.
  941. */
  942. mp_result mp_int_egcd(mp_int a, mp_int b, mp_int c, mp_int x, mp_int y) {
  943. assert(a != NULL && b != NULL && c != NULL && (x != NULL || y != NULL));
  944. mp_result res = MP_OK;
  945. int ca = CMPZ(a);
  946. int cb = CMPZ(b);
  947. if (ca == 0 && cb == 0) {
  948. return MP_UNDEF;
  949. } else if (ca == 0) {
  950. if ((res = mp_int_abs(b, c)) != MP_OK) return res;
  951. mp_int_zero(x);
  952. (void)mp_int_set_value(y, 1);
  953. return MP_OK;
  954. } else if (cb == 0) {
  955. if ((res = mp_int_abs(a, c)) != MP_OK) return res;
  956. (void)mp_int_set_value(x, 1);
  957. mp_int_zero(y);
  958. return MP_OK;
  959. }
  960. /* Initialize temporaries:
  961. A:0, B:1, C:2, D:3, u:4, v:5, ou:6, ov:7 */
  962. DECLARE_TEMP(8);
  963. REQUIRE(mp_int_set_value(TEMP(0), 1));
  964. REQUIRE(mp_int_set_value(TEMP(3), 1));
  965. REQUIRE(mp_int_copy(a, TEMP(4)));
  966. REQUIRE(mp_int_copy(b, TEMP(5)));
  967. /* We will work with absolute values here */
  968. TEMP(4)->sign = MP_ZPOS;
  969. TEMP(5)->sign = MP_ZPOS;
  970. int k = 0;
  971. { /* Divide out common factors of 2 from u and v */
  972. int div2_u = s_dp2k(TEMP(4)), div2_v = s_dp2k(TEMP(5));
  973. k = MIN(div2_u, div2_v);
  974. s_qdiv(TEMP(4), k);
  975. s_qdiv(TEMP(5), k);
  976. }
  977. REQUIRE(mp_int_copy(TEMP(4), TEMP(6)));
  978. REQUIRE(mp_int_copy(TEMP(5), TEMP(7)));
  979. for (;;) {
  980. while (mp_int_is_even(TEMP(4))) {
  981. s_qdiv(TEMP(4), 1);
  982. if (mp_int_is_odd(TEMP(0)) || mp_int_is_odd(TEMP(1))) {
  983. REQUIRE(mp_int_add(TEMP(0), TEMP(7), TEMP(0)));
  984. REQUIRE(mp_int_sub(TEMP(1), TEMP(6), TEMP(1)));
  985. }
  986. s_qdiv(TEMP(0), 1);
  987. s_qdiv(TEMP(1), 1);
  988. }
  989. while (mp_int_is_even(TEMP(5))) {
  990. s_qdiv(TEMP(5), 1);
  991. if (mp_int_is_odd(TEMP(2)) || mp_int_is_odd(TEMP(3))) {
  992. REQUIRE(mp_int_add(TEMP(2), TEMP(7), TEMP(2)));
  993. REQUIRE(mp_int_sub(TEMP(3), TEMP(6), TEMP(3)));
  994. }
  995. s_qdiv(TEMP(2), 1);
  996. s_qdiv(TEMP(3), 1);
  997. }
  998. if (mp_int_compare(TEMP(4), TEMP(5)) >= 0) {
  999. REQUIRE(mp_int_sub(TEMP(4), TEMP(5), TEMP(4)));
  1000. REQUIRE(mp_int_sub(TEMP(0), TEMP(2), TEMP(0)));
  1001. REQUIRE(mp_int_sub(TEMP(1), TEMP(3), TEMP(1)));
  1002. } else {
  1003. REQUIRE(mp_int_sub(TEMP(5), TEMP(4), TEMP(5)));
  1004. REQUIRE(mp_int_sub(TEMP(2), TEMP(0), TEMP(2)));
  1005. REQUIRE(mp_int_sub(TEMP(3), TEMP(1), TEMP(3)));
  1006. }
  1007. if (CMPZ(TEMP(4)) == 0) {
  1008. if (x) REQUIRE(mp_int_copy(TEMP(2), x));
  1009. if (y) REQUIRE(mp_int_copy(TEMP(3), y));
  1010. if (c) {
  1011. if (!s_qmul(TEMP(5), k)) {
  1012. REQUIRE(MP_MEMORY);
  1013. }
  1014. REQUIRE(mp_int_copy(TEMP(5), c));
  1015. }
  1016. break;
  1017. }
  1018. }
  1019. CLEANUP_TEMP();
  1020. return MP_OK;
  1021. }
  1022. mp_result mp_int_lcm(mp_int a, mp_int b, mp_int c) {
  1023. assert(a != NULL && b != NULL && c != NULL);
  1024. /* Since a * b = gcd(a, b) * lcm(a, b), we can compute
  1025. lcm(a, b) = (a / gcd(a, b)) * b.
  1026. This formulation insures everything works even if the input
  1027. variables share space.
  1028. */
  1029. DECLARE_TEMP(1);
  1030. REQUIRE(mp_int_gcd(a, b, TEMP(0)));
  1031. REQUIRE(mp_int_div(a, TEMP(0), TEMP(0), NULL));
  1032. REQUIRE(mp_int_mul(TEMP(0), b, TEMP(0)));
  1033. REQUIRE(mp_int_copy(TEMP(0), c));
  1034. CLEANUP_TEMP();
  1035. return MP_OK;
  1036. }
  1037. bool mp_int_divisible_value(mp_int a, mp_small v) {
  1038. mp_small rem = 0;
  1039. if (mp_int_div_value(a, v, NULL, &rem) != MP_OK) {
  1040. return false;
  1041. }
  1042. return rem == 0;
  1043. }
  1044. int mp_int_is_pow2(mp_int z) {
  1045. assert(z != NULL);
  1046. return s_isp2(z);
  1047. }
  1048. /* Implementation of Newton's root finding method, based loosely on a patch
  1049. contributed by Hal Finkel <half@halssoftware.com>
  1050. modified by M. J. Fromberger.
  1051. */
  1052. mp_result mp_int_root(mp_int a, mp_small b, mp_int c) {
  1053. assert(a != NULL && c != NULL && b > 0);
  1054. if (b == 1) {
  1055. return mp_int_copy(a, c);
  1056. }
  1057. bool flips = false;
  1058. if (MP_SIGN(a) == MP_NEG) {
  1059. if (b % 2 == 0) {
  1060. return MP_UNDEF; /* root does not exist for negative a with even b */
  1061. } else {
  1062. flips = true;
  1063. }
  1064. }
  1065. DECLARE_TEMP(5);
  1066. REQUIRE(mp_int_copy(a, TEMP(0)));
  1067. REQUIRE(mp_int_copy(a, TEMP(1)));
  1068. TEMP(0)->sign = MP_ZPOS;
  1069. TEMP(1)->sign = MP_ZPOS;
  1070. for (;;) {
  1071. REQUIRE(mp_int_expt(TEMP(1), b, TEMP(2)));
  1072. if (mp_int_compare_unsigned(TEMP(2), TEMP(0)) <= 0) break;
  1073. REQUIRE(mp_int_sub(TEMP(2), TEMP(0), TEMP(2)));
  1074. REQUIRE(mp_int_expt(TEMP(1), b - 1, TEMP(3)));
  1075. REQUIRE(mp_int_mul_value(TEMP(3), b, TEMP(3)));
  1076. REQUIRE(mp_int_div(TEMP(2), TEMP(3), TEMP(4), NULL));
  1077. REQUIRE(mp_int_sub(TEMP(1), TEMP(4), TEMP(4)));
  1078. if (mp_int_compare_unsigned(TEMP(1), TEMP(4)) == 0) {
  1079. REQUIRE(mp_int_sub_value(TEMP(4), 1, TEMP(4)));
  1080. }
  1081. REQUIRE(mp_int_copy(TEMP(4), TEMP(1)));
  1082. }
  1083. REQUIRE(mp_int_copy(TEMP(1), c));
  1084. /* If the original value of a was negative, flip the output sign. */
  1085. if (flips) (void)mp_int_neg(c, c); /* cannot fail */
  1086. CLEANUP_TEMP();
  1087. return MP_OK;
  1088. }
  1089. mp_result mp_int_to_int(mp_int z, mp_small *out) {
  1090. assert(z != NULL);
  1091. /* Make sure the value is representable as a small integer */
  1092. mp_sign sz = MP_SIGN(z);
  1093. if ((sz == MP_ZPOS && mp_int_compare_value(z, MP_SMALL_MAX) > 0) ||
  1094. mp_int_compare_value(z, MP_SMALL_MIN) < 0) {
  1095. return MP_RANGE;
  1096. }
  1097. mp_usmall uz = MP_USED(z);
  1098. mp_digit *dz = MP_DIGITS(z) + uz - 1;
  1099. mp_small uv = 0;
  1100. while (uz > 0) {
  1101. uv <<= MP_DIGIT_BIT / 2;
  1102. uv = (uv << (MP_DIGIT_BIT / 2)) | *dz--;
  1103. --uz;
  1104. }
  1105. if (out) *out = (mp_small)((sz == MP_NEG) ? -uv : uv);
  1106. return MP_OK;
  1107. }
  1108. mp_result mp_int_to_uint(mp_int z, mp_usmall *out) {
  1109. assert(z != NULL);
  1110. /* Make sure the value is representable as an unsigned small integer */
  1111. mp_size sz = MP_SIGN(z);
  1112. if (sz == MP_NEG || mp_int_compare_uvalue(z, MP_USMALL_MAX) > 0) {
  1113. return MP_RANGE;
  1114. }
  1115. mp_size uz = MP_USED(z);
  1116. mp_digit *dz = MP_DIGITS(z) + uz - 1;
  1117. mp_usmall uv = 0;
  1118. while (uz > 0) {
  1119. uv <<= MP_DIGIT_BIT / 2;
  1120. uv = (uv << (MP_DIGIT_BIT / 2)) | *dz--;
  1121. --uz;
  1122. }
  1123. if (out) *out = uv;
  1124. return MP_OK;
  1125. }
  1126. mp_result mp_int_to_string(mp_int z, mp_size radix, char *str, int limit) {
  1127. assert(z != NULL && str != NULL && limit >= 2);
  1128. assert(radix >= MP_MIN_RADIX && radix <= MP_MAX_RADIX);
  1129. int cmp = 0;
  1130. if (CMPZ(z) == 0) {
  1131. *str++ = s_val2ch(0, 1);
  1132. } else {
  1133. mp_result res;
  1134. mpz_t tmp;
  1135. char *h, *t;
  1136. if ((res = mp_int_init_copy(&tmp, z)) != MP_OK) return res;
  1137. if (MP_SIGN(z) == MP_NEG) {
  1138. *str++ = '-';
  1139. --limit;
  1140. }
  1141. h = str;
  1142. /* Generate digits in reverse order until finished or limit reached */
  1143. for (/* */; limit > 0; --limit) {
  1144. mp_digit d;
  1145. if ((cmp = CMPZ(&tmp)) == 0) break;
  1146. d = s_ddiv(&tmp, (mp_digit)radix);
  1147. *str++ = s_val2ch(d, 1);
  1148. }
  1149. t = str - 1;
  1150. /* Put digits back in correct output order */
  1151. while (h < t) {
  1152. char tc = *h;
  1153. *h++ = *t;
  1154. *t-- = tc;
  1155. }
  1156. mp_int_clear(&tmp);
  1157. }
  1158. *str = '\0';
  1159. if (cmp == 0) {
  1160. return MP_OK;
  1161. } else {
  1162. return MP_TRUNC;
  1163. }
  1164. }
  1165. mp_result mp_int_string_len(mp_int z, mp_size radix) {
  1166. assert(z != NULL);
  1167. assert(radix >= MP_MIN_RADIX && radix <= MP_MAX_RADIX);
  1168. int len = s_outlen(z, radix) + 1; /* for terminator */
  1169. /* Allow for sign marker on negatives */
  1170. if (MP_SIGN(z) == MP_NEG) len += 1;
  1171. return len;
  1172. }
  1173. /* Read zero-terminated string into z */
  1174. mp_result mp_int_read_string(mp_int z, mp_size radix, const char *str) {
  1175. return mp_int_read_cstring(z, radix, str, NULL);
  1176. }
  1177. mp_result mp_int_read_cstring(mp_int z, mp_size radix, const char *str,
  1178. char **end) {
  1179. assert(z != NULL && str != NULL);
  1180. assert(radix >= MP_MIN_RADIX && radix <= MP_MAX_RADIX);
  1181. /* Skip leading whitespace */
  1182. while (isspace((unsigned char)*str)) ++str;
  1183. /* Handle leading sign tag (+/-, positive default) */
  1184. switch (*str) {
  1185. case '-':
  1186. z->sign = MP_NEG;
  1187. ++str;
  1188. break;
  1189. case '+':
  1190. ++str; /* fallthrough */
  1191. default:
  1192. z->sign = MP_ZPOS;
  1193. break;
  1194. }
  1195. /* Skip leading zeroes */
  1196. int ch;
  1197. while ((ch = s_ch2val(*str, radix)) == 0) ++str;
  1198. /* Make sure there is enough space for the value */
  1199. if (!s_pad(z, s_inlen(strlen(str), radix))) return MP_MEMORY;
  1200. z->used = 1;
  1201. z->digits[0] = 0;
  1202. while (*str != '\0' && ((ch = s_ch2val(*str, radix)) >= 0)) {
  1203. s_dmul(z, (mp_digit)radix);
  1204. s_dadd(z, (mp_digit)ch);
  1205. ++str;
  1206. }
  1207. CLAMP(z);
  1208. /* Override sign for zero, even if negative specified. */
  1209. if (CMPZ(z) == 0) z->sign = MP_ZPOS;
  1210. if (end != NULL) *end = (char *)str;
  1211. /* Return a truncation error if the string has unprocessed characters
  1212. remaining, so the caller can tell if the whole string was done */
  1213. if (*str != '\0') {
  1214. return MP_TRUNC;
  1215. } else {
  1216. return MP_OK;
  1217. }
  1218. }
  1219. mp_result mp_int_count_bits(mp_int z) {
  1220. assert(z != NULL);
  1221. mp_size uz = MP_USED(z);
  1222. if (uz == 1 && z->digits[0] == 0) return 1;
  1223. --uz;
  1224. mp_size nbits = uz * MP_DIGIT_BIT;
  1225. mp_digit d = z->digits[uz];
  1226. while (d != 0) {
  1227. d >>= 1;
  1228. ++nbits;
  1229. }
  1230. return nbits;
  1231. }
  1232. mp_result mp_int_to_binary(mp_int z, unsigned char *buf, int limit) {
  1233. static const int PAD_FOR_2C = 1;
  1234. assert(z != NULL && buf != NULL);
  1235. int limpos = limit;
  1236. mp_result res = s_tobin(z, buf, &limpos, PAD_FOR_2C);
  1237. if (MP_SIGN(z) == MP_NEG) s_2comp(buf, limpos);
  1238. return res;
  1239. }
  1240. mp_result mp_int_read_binary(mp_int z, unsigned char *buf, int len) {
  1241. assert(z != NULL && buf != NULL && len > 0);
  1242. /* Figure out how many digits are needed to represent this value */
  1243. mp_size need = ((len * CHAR_BIT) + (MP_DIGIT_BIT - 1)) / MP_DIGIT_BIT;
  1244. if (!s_pad(z, need)) return MP_MEMORY;
  1245. mp_int_zero(z);
  1246. /* If the high-order bit is set, take the 2's complement before reading the
  1247. value (it will be restored afterward) */
  1248. if (buf[0] >> (CHAR_BIT - 1)) {
  1249. z->sign = MP_NEG;
  1250. s_2comp(buf, len);
  1251. }
  1252. mp_digit *dz = MP_DIGITS(z);
  1253. unsigned char *tmp = buf;
  1254. for (int i = len; i > 0; --i, ++tmp) {
  1255. s_qmul(z, (mp_size)CHAR_BIT);
  1256. *dz |= *tmp;
  1257. }
  1258. /* Restore 2's complement if we took it before */
  1259. if (MP_SIGN(z) == MP_NEG) s_2comp(buf, len);
  1260. return MP_OK;
  1261. }
  1262. mp_result mp_int_binary_len(mp_int z) {
  1263. mp_result res = mp_int_count_bits(z);
  1264. if (res <= 0) return res;
  1265. int bytes = mp_int_unsigned_len(z);
  1266. /* If the highest-order bit falls exactly on a byte boundary, we need to pad
  1267. with an extra byte so that the sign will be read correctly when reading it
  1268. back in. */
  1269. if (bytes * CHAR_BIT == res) ++bytes;
  1270. return bytes;
  1271. }
  1272. mp_result mp_int_to_unsigned(mp_int z, unsigned char *buf, int limit) {
  1273. static const int NO_PADDING = 0;
  1274. assert(z != NULL && buf != NULL);
  1275. return s_tobin(z, buf, &limit, NO_PADDING);
  1276. }
  1277. mp_result mp_int_read_unsigned(mp_int z, unsigned char *buf, int len) {
  1278. assert(z != NULL && buf != NULL && len > 0);
  1279. /* Figure out how many digits are needed to represent this value */
  1280. mp_size need = ((len * CHAR_BIT) + (MP_DIGIT_BIT - 1)) / MP_DIGIT_BIT;
  1281. if (!s_pad(z, need)) return MP_MEMORY;
  1282. mp_int_zero(z);
  1283. unsigned char *tmp = buf;
  1284. for (int i = len; i > 0; --i, ++tmp) {
  1285. (void)s_qmul(z, CHAR_BIT);
  1286. *MP_DIGITS(z) |= *tmp;
  1287. }
  1288. return MP_OK;
  1289. }
  1290. mp_result mp_int_unsigned_len(mp_int z) {
  1291. mp_result res = mp_int_count_bits(z);
  1292. if (res <= 0) return res;
  1293. int bytes = (res + (CHAR_BIT - 1)) / CHAR_BIT;
  1294. return bytes;
  1295. }
  1296. const char *mp_error_string(mp_result res) {
  1297. if (res > 0) return s_unknown_err;
  1298. res = -res;
  1299. int ix;
  1300. for (ix = 0; ix < res && s_error_msg[ix] != NULL; ++ix)
  1301. ;
  1302. if (s_error_msg[ix] != NULL) {
  1303. return s_error_msg[ix];
  1304. } else {
  1305. return s_unknown_err;
  1306. }
  1307. }
  1308. /*------------------------------------------------------------------------*/
  1309. /* Private functions for internal use. These make assumptions. */
  1310. #if DEBUG
  1311. static const mp_digit fill = (mp_digit)0xdeadbeefabad1dea;
  1312. #endif
  1313. static mp_digit *s_alloc(mp_size num) {
  1314. mp_digit *out = malloc(num * sizeof(mp_digit));
  1315. assert(out != NULL);
  1316. #if DEBUG
  1317. for (mp_size ix = 0; ix < num; ++ix) out[ix] = fill;
  1318. #endif
  1319. return out;
  1320. }
  1321. static mp_digit *s_realloc(mp_digit *old, mp_size osize, mp_size nsize) {
  1322. #if DEBUG
  1323. mp_digit *new = s_alloc(nsize);
  1324. assert(new != NULL);
  1325. for (mp_size ix = 0; ix < nsize; ++ix) new[ix] = fill;
  1326. memcpy(new, old, osize * sizeof(mp_digit));
  1327. #else
  1328. mp_digit *new = realloc(old, nsize * sizeof(mp_digit));
  1329. assert(new != NULL);
  1330. #endif
  1331. return new;
  1332. }
  1333. static void s_free(void *ptr) { free(ptr); }
  1334. static bool s_pad(mp_int z, mp_size min) {
  1335. if (MP_ALLOC(z) < min) {
  1336. mp_size nsize = s_round_prec(min);
  1337. mp_digit *tmp;
  1338. if (z->digits == &(z->single)) {
  1339. if ((tmp = s_alloc(nsize)) == NULL) return false;
  1340. tmp[0] = z->single;
  1341. } else if ((tmp = s_realloc(MP_DIGITS(z), MP_ALLOC(z), nsize)) == NULL) {
  1342. return false;
  1343. }
  1344. z->digits = tmp;
  1345. z->alloc = nsize;
  1346. }
  1347. return true;
  1348. }
  1349. /* Note: This will not work correctly when value == MP_SMALL_MIN */
  1350. static void s_fake(mp_int z, mp_small value, mp_digit vbuf[]) {
  1351. mp_usmall uv = (mp_usmall)(value < 0) ? -value : value;
  1352. s_ufake(z, uv, vbuf);
  1353. if (value < 0) z->sign = MP_NEG;
  1354. }
  1355. static void s_ufake(mp_int z, mp_usmall value, mp_digit vbuf[]) {
  1356. mp_size ndig = (mp_size)s_uvpack(value, vbuf);
  1357. z->used = ndig;
  1358. z->alloc = MP_VALUE_DIGITS(value);
  1359. z->sign = MP_ZPOS;
  1360. z->digits = vbuf;
  1361. }
  1362. static int s_cdig(mp_digit *da, mp_digit *db, mp_size len) {
  1363. mp_digit *dat = da + len - 1, *dbt = db + len - 1;
  1364. for (/* */; len != 0; --len, --dat, --dbt) {
  1365. if (*dat > *dbt) {
  1366. return 1;
  1367. } else if (*dat < *dbt) {
  1368. return -1;
  1369. }
  1370. }
  1371. return 0;
  1372. }
  1373. static int s_uvpack(mp_usmall uv, mp_digit t[]) {
  1374. int ndig = 0;
  1375. if (uv == 0)
  1376. t[ndig++] = 0;
  1377. else {
  1378. while (uv != 0) {
  1379. t[ndig++] = (mp_digit)uv;
  1380. uv >>= MP_DIGIT_BIT / 2;
  1381. uv >>= MP_DIGIT_BIT / 2;
  1382. }
  1383. }
  1384. return ndig;
  1385. }
  1386. static int s_ucmp(mp_int a, mp_int b) {
  1387. mp_size ua = MP_USED(a), ub = MP_USED(b);
  1388. if (ua > ub) {
  1389. return 1;
  1390. } else if (ub > ua) {
  1391. return -1;
  1392. } else {
  1393. return s_cdig(MP_DIGITS(a), MP_DIGITS(b), ua);
  1394. }
  1395. }
  1396. static int s_vcmp(mp_int a, mp_small v) {
  1397. mp_usmall uv = (v < 0) ? -(mp_usmall)v : (mp_usmall)v;
  1398. return s_uvcmp(a, uv);
  1399. }
  1400. static int s_uvcmp(mp_int a, mp_usmall uv) {
  1401. mpz_t vtmp;
  1402. mp_digit vdig[MP_VALUE_DIGITS(uv)];
  1403. s_ufake(&vtmp, uv, vdig);
  1404. return s_ucmp(a, &vtmp);
  1405. }
  1406. static mp_digit s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
  1407. mp_size size_b) {
  1408. mp_size pos;
  1409. mp_word w = 0;
  1410. /* Insure that da is the longer of the two to simplify later code */
  1411. if (size_b > size_a) {
  1412. SWAP(mp_digit *, da, db);
  1413. SWAP(mp_size, size_a, size_b);
  1414. }
  1415. /* Add corresponding digits until the shorter number runs out */
  1416. for (pos = 0; pos < size_b; ++pos, ++da, ++db, ++dc) {
  1417. w = w + (mp_word)*da + (mp_word)*db;
  1418. *dc = LOWER_HALF(w);
  1419. w = UPPER_HALF(w);
  1420. }
  1421. /* Propagate carries as far as necessary */
  1422. for (/* */; pos < size_a; ++pos, ++da, ++dc) {
  1423. w = w + *da;
  1424. *dc = LOWER_HALF(w);
  1425. w = UPPER_HALF(w);
  1426. }
  1427. /* Return carry out */
  1428. return (mp_digit)w;
  1429. }
  1430. static void s_usub(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
  1431. mp_size size_b) {
  1432. mp_size pos;
  1433. mp_word w = 0;
  1434. /* We assume that |a| >= |b| so this should definitely hold */
  1435. assert(size_a >= size_b);
  1436. /* Subtract corresponding digits and propagate borrow */
  1437. for (pos = 0; pos < size_b; ++pos, ++da, ++db, ++dc) {
  1438. w = ((mp_word)MP_DIGIT_MAX + 1 + /* MP_RADIX */
  1439. (mp_word)*da) -
  1440. w - (mp_word)*db;
  1441. *dc = LOWER_HALF(w);
  1442. w = (UPPER_HALF(w) == 0);
  1443. }
  1444. /* Finish the subtraction for remaining upper digits of da */
  1445. for (/* */; pos < size_a; ++pos, ++da, ++dc) {
  1446. w = ((mp_word)MP_DIGIT_MAX + 1 + /* MP_RADIX */
  1447. (mp_word)*da) -
  1448. w;
  1449. *dc = LOWER_HALF(w);
  1450. w = (UPPER_HALF(w) == 0);
  1451. }
  1452. /* If there is a borrow out at the end, it violates the precondition */
  1453. assert(w == 0);
  1454. }
  1455. static int s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
  1456. mp_size size_b) {
  1457. mp_size bot_size;
  1458. /* Make sure b is the smaller of the two input values */
  1459. if (size_b > size_a) {
  1460. SWAP(mp_digit *, da, db);
  1461. SWAP(mp_size, size_a, size_b);
  1462. }
  1463. /* Insure that the bottom is the larger half in an odd-length split; the code
  1464. below relies on this being true.
  1465. */
  1466. bot_size = (size_a + 1) / 2;
  1467. /* If the values are big enough to bother with recursion, use the Karatsuba
  1468. algorithm to compute the product; otherwise use the normal multiplication
  1469. algorithm
  1470. */
  1471. if (multiply_threshold && size_a >= multiply_threshold && size_b > bot_size) {
  1472. mp_digit *t1, *t2, *t3, carry;
  1473. mp_digit *a_top = da + bot_size;
  1474. mp_digit *b_top = db + bot_size;
  1475. mp_size at_size = size_a - bot_size;
  1476. mp_size bt_size = size_b - bot_size;
  1477. mp_size buf_size = 2 * bot_size;
  1478. /* Do a single allocation for all three temporary buffers needed; each
  1479. buffer must be big enough to hold the product of two bottom halves, and
  1480. one buffer needs space for the completed product; twice the space is
  1481. plenty.
  1482. */
  1483. if ((t1 = s_alloc(4 * buf_size)) == NULL) return 0;
  1484. t2 = t1 + buf_size;
  1485. t3 = t2 + buf_size;
  1486. ZERO(t1, 4 * buf_size);
  1487. /* t1 and t2 are initially used as temporaries to compute the inner product
  1488. (a1 + a0)(b1 + b0) = a1b1 + a1b0 + a0b1 + a0b0
  1489. */
  1490. carry = s_uadd(da, a_top, t1, bot_size, at_size); /* t1 = a1 + a0 */
  1491. t1[bot_size] = carry;
  1492. carry = s_uadd(db, b_top, t2, bot_size, bt_size); /* t2 = b1 + b0 */
  1493. t2[bot_size] = carry;
  1494. (void)s_kmul(t1, t2, t3, bot_size + 1, bot_size + 1); /* t3 = t1 * t2 */
  1495. /* Now we'll get t1 = a0b0 and t2 = a1b1, and subtract them out so that
  1496. we're left with only the pieces we want: t3 = a1b0 + a0b1
  1497. */
  1498. ZERO(t1, buf_size);
  1499. ZERO(t2, buf_size);
  1500. (void)s_kmul(da, db, t1, bot_size, bot_size); /* t1 = a0 * b0 */
  1501. (void)s_kmul(a_top, b_top, t2, at_size, bt_size); /* t2 = a1 * b1 */
  1502. /* Subtract out t1 and t2 to get the inner product */
  1503. s_usub(t3, t1, t3, buf_size + 2, buf_size);
  1504. s_usub(t3, t2, t3, buf_size + 2, buf_size);
  1505. /* Assemble the output value */
  1506. COPY(t1, dc, buf_size);
  1507. carry = s_uadd(t3, dc + bot_size, dc + bot_size, buf_size + 1, buf_size);
  1508. assert(carry == 0);
  1509. carry =
  1510. s_uadd(t2, dc + 2 * bot_size, dc + 2 * bot_size, buf_size, buf_size);
  1511. assert(carry == 0);
  1512. s_free(t1); /* note t2 and t3 are just internal pointers to t1 */
  1513. } else {
  1514. s_umul(da, db, dc, size_a, size_b);
  1515. }
  1516. return 1;
  1517. }
  1518. static void s_umul(mp_digit *da, mp_digit *db, mp_digit *dc, mp_size size_a,
  1519. mp_size size_b) {
  1520. mp_size a, b;
  1521. mp_word w;
  1522. for (a = 0; a < size_a; ++a, ++dc, ++da) {
  1523. mp_digit *dct = dc;
  1524. mp_digit *dbt = db;
  1525. if (*da == 0) continue;
  1526. w = 0;
  1527. for (b = 0; b < size_b; ++b, ++dbt, ++dct) {
  1528. w = (mp_word)*da * (mp_word)*dbt + w + (mp_word)*dct;
  1529. *dct = LOWER_HALF(w);
  1530. w = UPPER_HALF(w);
  1531. }
  1532. *dct = (mp_digit)w;
  1533. }
  1534. }
  1535. static int s_ksqr(mp_digit *da, mp_digit *dc, mp_size size_a) {
  1536. if (multiply_threshold && size_a > multiply_threshold) {
  1537. mp_size bot_size = (size_a + 1) / 2;
  1538. mp_digit *a_top = da + bot_size;
  1539. mp_digit *t1, *t2, *t3, carry;
  1540. mp_size at_size = size_a - bot_size;
  1541. mp_size buf_size = 2 * bot_size;
  1542. if ((t1 = s_alloc(4 * buf_size)) == NULL) return 0;
  1543. t2 = t1 + buf_size;
  1544. t3 = t2 + buf_size;
  1545. ZERO(t1, 4 * buf_size);
  1546. (void)s_ksqr(da, t1, bot_size); /* t1 = a0 ^ 2 */
  1547. (void)s_ksqr(a_top, t2, at_size); /* t2 = a1 ^ 2 */
  1548. (void)s_kmul(da, a_top, t3, bot_size, at_size); /* t3 = a0 * a1 */
  1549. /* Quick multiply t3 by 2, shifting left (can't overflow) */
  1550. {
  1551. int i, top = bot_size + at_size;
  1552. mp_word w, save = 0;
  1553. for (i = 0; i < top; ++i) {
  1554. w = t3[i];
  1555. w = (w << 1) | save;
  1556. t3[i] = LOWER_HALF(w);
  1557. save = UPPER_HALF(w);
  1558. }
  1559. t3[i] = LOWER_HALF(save);
  1560. }
  1561. /* Assemble the output value */
  1562. COPY(t1, dc, 2 * bot_size);
  1563. carry = s_uadd(t3, dc + bot_size, dc + bot_size, buf_size + 1, buf_size);
  1564. assert(carry == 0);
  1565. carry =
  1566. s_uadd(t2, dc + 2 * bot_size, dc + 2 * bot_size, buf_size, buf_size);
  1567. assert(carry == 0);
  1568. s_free(t1); /* note that t2 and t2 are internal pointers only */
  1569. } else {
  1570. s_usqr(da, dc, size_a);
  1571. }
  1572. return 1;
  1573. }
  1574. static void s_usqr(mp_digit *da, mp_digit *dc, mp_size size_a) {
  1575. mp_size i, j;
  1576. mp_word w;
  1577. for (i = 0; i < size_a; ++i, dc += 2, ++da) {
  1578. mp_digit *dct = dc, *dat = da;
  1579. if (*da == 0) continue;
  1580. /* Take care of the first digit, no rollover */
  1581. w = (mp_word)*dat * (mp_word)*dat + (mp_word)*dct;
  1582. *dct = LOWER_HALF(w);
  1583. w = UPPER_HALF(w);
  1584. ++dat;
  1585. ++dct;
  1586. for (j = i + 1; j < size_a; ++j, ++dat, ++dct) {
  1587. mp_word t = (mp_word)*da * (mp_word)*dat;
  1588. mp_word u = w + (mp_word)*dct, ov = 0;
  1589. /* Check if doubling t will overflow a word */
  1590. if (HIGH_BIT_SET(t)) ov = 1;
  1591. w = t + t;
  1592. /* Check if adding u to w will overflow a word */
  1593. if (ADD_WILL_OVERFLOW(w, u)) ov = 1;
  1594. w += u;
  1595. *dct = LOWER_HALF(w);
  1596. w = UPPER_HALF(w);
  1597. if (ov) {
  1598. w += MP_DIGIT_MAX; /* MP_RADIX */
  1599. ++w;
  1600. }
  1601. }
  1602. w = w + *dct;
  1603. *dct = (mp_digit)w;
  1604. while ((w = UPPER_HALF(w)) != 0) {
  1605. ++dct;
  1606. w = w + *dct;
  1607. *dct = LOWER_HALF(w);
  1608. }
  1609. assert(w == 0);
  1610. }
  1611. }
  1612. static void s_dadd(mp_int a, mp_digit b) {
  1613. mp_word w = 0;
  1614. mp_digit *da = MP_DIGITS(a);
  1615. mp_size ua = MP_USED(a);
  1616. w = (mp_word)*da + b;
  1617. *da++ = LOWER_HALF(w);
  1618. w = UPPER_HALF(w);
  1619. for (ua -= 1; ua > 0; --ua, ++da) {
  1620. w = (mp_word)*da + w;
  1621. *da = LOWER_HALF(w);
  1622. w = UPPER_HALF(w);
  1623. }
  1624. if (w) {
  1625. *da = (mp_digit)w;
  1626. a->used += 1;
  1627. }
  1628. }
  1629. static void s_dmul(mp_int a, mp_digit b) {
  1630. mp_word w = 0;
  1631. mp_digit *da = MP_DIGITS(a);
  1632. mp_size ua = MP_USED(a);
  1633. while (ua > 0) {
  1634. w = (mp_word)*da * b + w;
  1635. *da++ = LOWER_HALF(w);
  1636. w = UPPER_HALF(w);
  1637. --ua;
  1638. }
  1639. if (w) {
  1640. *da = (mp_digit)w;
  1641. a->used += 1;
  1642. }
  1643. }
  1644. static void s_dbmul(mp_digit *da, mp_digit b, mp_digit *dc, mp_size size_a) {
  1645. mp_word w = 0;
  1646. while (size_a > 0) {
  1647. w = (mp_word)*da++ * (mp_word)b + w;
  1648. *dc++ = LOWER_HALF(w);
  1649. w = UPPER_HALF(w);
  1650. --size_a;
  1651. }
  1652. if (w) *dc = LOWER_HALF(w);
  1653. }
  1654. static mp_digit s_ddiv(mp_int a, mp_digit b) {
  1655. mp_word w = 0, qdigit;
  1656. mp_size ua = MP_USED(a);
  1657. mp_digit *da = MP_DIGITS(a) + ua - 1;
  1658. for (/* */; ua > 0; --ua, --da) {
  1659. w = (w << MP_DIGIT_BIT) | *da;
  1660. if (w >= b) {
  1661. qdigit = w / b;
  1662. w = w % b;
  1663. } else {
  1664. qdigit = 0;
  1665. }
  1666. *da = (mp_digit)qdigit;
  1667. }
  1668. CLAMP(a);
  1669. return (mp_digit)w;
  1670. }
  1671. static void s_qdiv(mp_int z, mp_size p2) {
  1672. mp_size ndig = p2 / MP_DIGIT_BIT, nbits = p2 % MP_DIGIT_BIT;
  1673. mp_size uz = MP_USED(z);
  1674. if (ndig) {
  1675. mp_size mark;
  1676. mp_digit *to, *from;
  1677. if (ndig >= uz) {
  1678. mp_int_zero(z);
  1679. return;
  1680. }
  1681. to = MP_DIGITS(z);
  1682. from = to + ndig;
  1683. for (mark = ndig; mark < uz; ++mark) {
  1684. *to++ = *from++;
  1685. }
  1686. z->used = uz - ndig;
  1687. }
  1688. if (nbits) {
  1689. mp_digit d = 0, *dz, save;
  1690. mp_size up = MP_DIGIT_BIT - nbits;
  1691. uz = MP_USED(z);
  1692. dz = MP_DIGITS(z) + uz - 1;
  1693. for (/* */; uz > 0; --uz, --dz) {
  1694. save = *dz;
  1695. *dz = (*dz >> nbits) | (d << up);
  1696. d = save;
  1697. }
  1698. CLAMP(z);
  1699. }
  1700. if (MP_USED(z) == 1 && z->digits[0] == 0) z->sign = MP_ZPOS;
  1701. }
  1702. static void s_qmod(mp_int z, mp_size p2) {
  1703. mp_size start = p2 / MP_DIGIT_BIT + 1, rest = p2 % MP_DIGIT_BIT;
  1704. mp_size uz = MP_USED(z);
  1705. mp_digit mask = (1u << rest) - 1;
  1706. if (start <= uz) {
  1707. z->used = start;
  1708. z->digits[start - 1] &= mask;
  1709. CLAMP(z);
  1710. }
  1711. }
  1712. static int s_qmul(mp_int z, mp_size p2) {
  1713. mp_size uz, need, rest, extra, i;
  1714. mp_digit *from, *to, d;
  1715. if (p2 == 0) return 1;
  1716. uz = MP_USED(z);
  1717. need = p2 / MP_DIGIT_BIT;
  1718. rest = p2 % MP_DIGIT_BIT;
  1719. /* Figure out if we need an extra digit at the top end; this occurs if the
  1720. topmost `rest' bits of the high-order digit of z are not zero, meaning
  1721. they will be shifted off the end if not preserved */
  1722. extra = 0;
  1723. if (rest != 0) {
  1724. mp_digit *dz = MP_DIGITS(z) + uz - 1;
  1725. if ((*dz >> (MP_DIGIT_BIT - rest)) != 0) extra = 1;
  1726. }
  1727. if (!s_pad(z, uz + need + extra)) return 0;
  1728. /* If we need to shift by whole digits, do that in one pass, then
  1729. to back and shift by partial digits.
  1730. */
  1731. if (need > 0) {
  1732. from = MP_DIGITS(z) + uz - 1;
  1733. to = from + need;
  1734. for (i = 0; i < uz; ++i) *to-- = *from--;
  1735. ZERO(MP_DIGITS(z), need);
  1736. uz += need;
  1737. }
  1738. if (rest) {
  1739. d = 0;
  1740. for (i = need, from = MP_DIGITS(z) + need; i < uz; ++i, ++from) {
  1741. mp_digit save = *from;
  1742. *from = (*from << rest) | (d >> (MP_DIGIT_BIT - rest));
  1743. d = save;
  1744. }
  1745. d >>= (MP_DIGIT_BIT - rest);
  1746. if (d != 0) {
  1747. *from = d;
  1748. uz += extra;
  1749. }
  1750. }
  1751. z->used = uz;
  1752. CLAMP(z);
  1753. return 1;
  1754. }
  1755. /* Compute z = 2^p2 - |z|; requires that 2^p2 >= |z|
  1756. The sign of the result is always zero/positive.
  1757. */
  1758. static int s_qsub(mp_int z, mp_size p2) {
  1759. mp_digit hi = (1u << (p2 % MP_DIGIT_BIT)), *zp;
  1760. mp_size tdig = (p2 / MP_DIGIT_BIT), pos;
  1761. mp_word w = 0;
  1762. if (!s_pad(z, tdig + 1)) return 0;
  1763. for (pos = 0, zp = MP_DIGITS(z); pos < tdig; ++pos, ++zp) {
  1764. w = ((mp_word)MP_DIGIT_MAX + 1) - w - (mp_word)*zp;
  1765. *zp = LOWER_HALF(w);
  1766. w = UPPER_HALF(w) ? 0 : 1;
  1767. }
  1768. w = ((mp_word)MP_DIGIT_MAX + 1 + hi) - w - (mp_word)*zp;
  1769. *zp = LOWER_HALF(w);
  1770. assert(UPPER_HALF(w) != 0); /* no borrow out should be possible */
  1771. z->sign = MP_ZPOS;
  1772. CLAMP(z);
  1773. return 1;
  1774. }
  1775. static int s_dp2k(mp_int z) {
  1776. int k = 0;
  1777. mp_digit *dp = MP_DIGITS(z), d;
  1778. if (MP_USED(z) == 1 && *dp == 0) return 1;
  1779. while (*dp == 0) {
  1780. k += MP_DIGIT_BIT;
  1781. ++dp;
  1782. }
  1783. d = *dp;
  1784. while ((d & 1) == 0) {
  1785. d >>= 1;
  1786. ++k;
  1787. }
  1788. return k;
  1789. }
  1790. static int s_isp2(mp_int z) {
  1791. mp_size uz = MP_USED(z), k = 0;
  1792. mp_digit *dz = MP_DIGITS(z), d;
  1793. while (uz > 1) {
  1794. if (*dz++ != 0) return -1;
  1795. k += MP_DIGIT_BIT;
  1796. --uz;
  1797. }
  1798. d = *dz;
  1799. while (d > 1) {
  1800. if (d & 1) return -1;
  1801. ++k;
  1802. d >>= 1;
  1803. }
  1804. return (int)k;
  1805. }
  1806. static int s_2expt(mp_int z, mp_small k) {
  1807. mp_size ndig, rest;
  1808. mp_digit *dz;
  1809. ndig = (k + MP_DIGIT_BIT) / MP_DIGIT_BIT;
  1810. rest = k % MP_DIGIT_BIT;
  1811. if (!s_pad(z, ndig)) return 0;
  1812. dz = MP_DIGITS(z);
  1813. ZERO(dz, ndig);
  1814. *(dz + ndig - 1) = (1u << rest);
  1815. z->used = ndig;
  1816. return 1;
  1817. }
  1818. static int s_norm(mp_int a, mp_int b) {
  1819. mp_digit d = b->digits[MP_USED(b) - 1];
  1820. int k = 0;
  1821. while (d < (1u << (mp_digit)(MP_DIGIT_BIT - 1))) { /* d < (MP_RADIX / 2) */
  1822. d <<= 1;
  1823. ++k;
  1824. }
  1825. /* These multiplications can't fail */
  1826. if (k != 0) {
  1827. (void)s_qmul(a, (mp_size)k);
  1828. (void)s_qmul(b, (mp_size)k);
  1829. }
  1830. return k;
  1831. }
  1832. static mp_result s_brmu(mp_int z, mp_int m) {
  1833. mp_size um = MP_USED(m) * 2;
  1834. if (!s_pad(z, um)) return MP_MEMORY;
  1835. s_2expt(z, MP_DIGIT_BIT * um);
  1836. return mp_int_div(z, m, z, NULL);
  1837. }
  1838. static int s_reduce(mp_int x, mp_int m, mp_int mu, mp_int q1, mp_int q2) {
  1839. mp_size um = MP_USED(m), umb_p1, umb_m1;
  1840. umb_p1 = (um + 1) * MP_DIGIT_BIT;
  1841. umb_m1 = (um - 1) * MP_DIGIT_BIT;
  1842. if (mp_int_copy(x, q1) != MP_OK) return 0;
  1843. /* Compute q2 = floor((floor(x / b^(k-1)) * mu) / b^(k+1)) */
  1844. s_qdiv(q1, umb_m1);
  1845. UMUL(q1, mu, q2);
  1846. s_qdiv(q2, umb_p1);
  1847. /* Set x = x mod b^(k+1) */
  1848. s_qmod(x, umb_p1);
  1849. /* Now, q is a guess for the quotient a / m.
  1850. Compute x - q * m mod b^(k+1), replacing x. This may be off
  1851. by a factor of 2m, but no more than that.
  1852. */
  1853. UMUL(q2, m, q1);
  1854. s_qmod(q1, umb_p1);
  1855. (void)mp_int_sub(x, q1, x); /* can't fail */
  1856. /* The result may be < 0; if it is, add b^(k+1) to pin it in the proper
  1857. range. */
  1858. if ((CMPZ(x) < 0) && !s_qsub(x, umb_p1)) return 0;
  1859. /* If x > m, we need to back it off until it is in range. This will be
  1860. required at most twice. */
  1861. if (mp_int_compare(x, m) >= 0) {
  1862. (void)mp_int_sub(x, m, x);
  1863. if (mp_int_compare(x, m) >= 0) {
  1864. (void)mp_int_sub(x, m, x);
  1865. }
  1866. }
  1867. /* At this point, x has been properly reduced. */
  1868. return 1;
  1869. }
  1870. /* Perform modular exponentiation using Barrett's method, where mu is the
  1871. reduction constant for m. Assumes a < m, b > 0. */
  1872. static mp_result s_embar(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c) {
  1873. mp_digit umu = MP_USED(mu);
  1874. mp_digit *db = MP_DIGITS(b);
  1875. mp_digit *dbt = db + MP_USED(b) - 1;
  1876. DECLARE_TEMP(3);
  1877. REQUIRE(GROW(TEMP(0), 4 * umu));
  1878. REQUIRE(GROW(TEMP(1), 4 * umu));
  1879. REQUIRE(GROW(TEMP(2), 4 * umu));
  1880. ZERO(TEMP(0)->digits, TEMP(0)->alloc);
  1881. ZERO(TEMP(1)->digits, TEMP(1)->alloc);
  1882. ZERO(TEMP(2)->digits, TEMP(2)->alloc);
  1883. (void)mp_int_set_value(c, 1);
  1884. /* Take care of low-order digits */
  1885. while (db < dbt) {
  1886. mp_digit d = *db;
  1887. for (int i = MP_DIGIT_BIT; i > 0; --i, d >>= 1) {
  1888. if (d & 1) {
  1889. /* The use of a second temporary avoids allocation */
  1890. UMUL(c, a, TEMP(0));
  1891. if (!s_reduce(TEMP(0), m, mu, TEMP(1), TEMP(2))) {
  1892. REQUIRE(MP_MEMORY);
  1893. }
  1894. mp_int_copy(TEMP(0), c);
  1895. }
  1896. USQR(a, TEMP(0));
  1897. assert(MP_SIGN(TEMP(0)) == MP_ZPOS);
  1898. if (!s_reduce(TEMP(0), m, mu, TEMP(1), TEMP(2))) {
  1899. REQUIRE(MP_MEMORY);
  1900. }
  1901. assert(MP_SIGN(TEMP(0)) == MP_ZPOS);
  1902. mp_int_copy(TEMP(0), a);
  1903. }
  1904. ++db;
  1905. }
  1906. /* Take care of highest-order digit */
  1907. mp_digit d = *dbt;
  1908. for (;;) {
  1909. if (d & 1) {
  1910. UMUL(c, a, TEMP(0));
  1911. if (!s_reduce(TEMP(0), m, mu, TEMP(1), TEMP(2))) {
  1912. REQUIRE(MP_MEMORY);
  1913. }
  1914. mp_int_copy(TEMP(0), c);
  1915. }
  1916. d >>= 1;
  1917. if (!d) break;
  1918. USQR(a, TEMP(0));
  1919. if (!s_reduce(TEMP(0), m, mu, TEMP(1), TEMP(2))) {
  1920. REQUIRE(MP_MEMORY);
  1921. }
  1922. (void)mp_int_copy(TEMP(0), a);
  1923. }
  1924. CLEANUP_TEMP();
  1925. return MP_OK;
  1926. }
  1927. /* Division of nonnegative integers
  1928. This function implements division algorithm for unsigned multi-precision
  1929. integers. The algorithm is based on Algorithm D from Knuth's "The Art of
  1930. Computer Programming", 3rd ed. 1998, pg 272-273.
  1931. We diverge from Knuth's algorithm in that we do not perform the subtraction
  1932. from the remainder until we have determined that we have the correct
  1933. quotient digit. This makes our algorithm less efficient that Knuth because
  1934. we might have to perform multiple multiplication and comparison steps before
  1935. the subtraction. The advantage is that it is easy to implement and ensure
  1936. correctness without worrying about underflow from the subtraction.
  1937. inputs: u a n+m digit integer in base b (b is 2^MP_DIGIT_BIT)
  1938. v a n digit integer in base b (b is 2^MP_DIGIT_BIT)
  1939. n >= 1
  1940. m >= 0
  1941. outputs: u / v stored in u
  1942. u % v stored in v
  1943. */
  1944. static mp_result s_udiv_knuth(mp_int u, mp_int v) {
  1945. /* Force signs to positive */
  1946. u->sign = MP_ZPOS;
  1947. v->sign = MP_ZPOS;
  1948. /* Use simple division algorithm when v is only one digit long */
  1949. if (MP_USED(v) == 1) {
  1950. mp_digit d, rem;
  1951. d = v->digits[0];
  1952. rem = s_ddiv(u, d);
  1953. mp_int_set_value(v, rem);
  1954. return MP_OK;
  1955. }
  1956. /* Algorithm D
  1957. The n and m variables are defined as used by Knuth.
  1958. u is an n digit number with digits u_{n-1}..u_0.
  1959. v is an n+m digit number with digits from v_{m+n-1}..v_0.
  1960. We require that n > 1 and m >= 0
  1961. */
  1962. mp_size n = MP_USED(v);
  1963. mp_size m = MP_USED(u) - n;
  1964. assert(n > 1);
  1965. /* assert(m >= 0) follows because m is unsigned. */
  1966. /* D1: Normalize.
  1967. The normalization step provides the necessary condition for Theorem B,
  1968. which states that the quotient estimate for q_j, call it qhat
  1969. qhat = u_{j+n}u_{j+n-1} / v_{n-1}
  1970. is bounded by
  1971. qhat - 2 <= q_j <= qhat.
  1972. That is, qhat is always greater than the actual quotient digit q,
  1973. and it is never more than two larger than the actual quotient digit.
  1974. */
  1975. int k = s_norm(u, v);
  1976. /* Extend size of u by one if needed.
  1977. The algorithm begins with a value of u that has one more digit of input.
  1978. The normalization step sets u_{m+n}..u_0 = 2^k * u_{m+n-1}..u_0. If the
  1979. multiplication did not increase the number of digits of u, we need to add
  1980. a leading zero here.
  1981. */
  1982. if (k == 0 || MP_USED(u) != m + n + 1) {
  1983. if (!s_pad(u, m + n + 1)) return MP_MEMORY;
  1984. u->digits[m + n] = 0;
  1985. u->used = m + n + 1;
  1986. }
  1987. /* Add a leading 0 to v.
  1988. The multiplication in step D4 multiplies qhat * 0v_{n-1}..v_0. We need to
  1989. add the leading zero to v here to ensure that the multiplication will
  1990. produce the full n+1 digit result.
  1991. */
  1992. if (!s_pad(v, n + 1)) return MP_MEMORY;
  1993. v->digits[n] = 0;
  1994. /* Initialize temporary variables q and t.
  1995. q allocates space for m+1 digits to store the quotient digits
  1996. t allocates space for n+1 digits to hold the result of q_j*v
  1997. */
  1998. DECLARE_TEMP(2);
  1999. REQUIRE(GROW(TEMP(0), m + 1));
  2000. REQUIRE(GROW(TEMP(1), n + 1));
  2001. /* D2: Initialize j */
  2002. int j = m;
  2003. mpz_t r;
  2004. r.digits = MP_DIGITS(u) + j; /* The contents of r are shared with u */
  2005. r.used = n + 1;
  2006. r.sign = MP_ZPOS;
  2007. r.alloc = MP_ALLOC(u);
  2008. ZERO(TEMP(1)->digits, TEMP(1)->alloc);
  2009. /* Calculate the m+1 digits of the quotient result */
  2010. for (; j >= 0; j--) {
  2011. /* D3: Calculate q' */
  2012. /* r->digits is aligned to position j of the number u */
  2013. mp_word pfx, qhat;
  2014. pfx = r.digits[n];
  2015. pfx <<= MP_DIGIT_BIT / 2;
  2016. pfx <<= MP_DIGIT_BIT / 2;
  2017. pfx |= r.digits[n - 1]; /* pfx = u_{j+n}{j+n-1} */
  2018. qhat = pfx / v->digits[n - 1];
  2019. /* Check to see if qhat > b, and decrease qhat if so.
  2020. Theorem B guarantess that qhat is at most 2 larger than the
  2021. actual value, so it is possible that qhat is greater than
  2022. the maximum value that will fit in a digit */
  2023. if (qhat > MP_DIGIT_MAX) qhat = MP_DIGIT_MAX;
  2024. /* D4,D5,D6: Multiply qhat * v and test for a correct value of q
  2025. We proceed a bit different than the way described by Knuth. This way is
  2026. simpler but less efficent. Instead of doing the multiply and subtract
  2027. then checking for underflow, we first do the multiply of qhat * v and
  2028. see if it is larger than the current remainder r. If it is larger, we
  2029. decrease qhat by one and try again. We may need to decrease qhat one
  2030. more time before we get a value that is smaller than r.
  2031. This way is less efficent than Knuth because we do more multiplies, but
  2032. we do not need to worry about underflow this way.
  2033. */
  2034. /* t = qhat * v */
  2035. s_dbmul(MP_DIGITS(v), (mp_digit)qhat, TEMP(1)->digits, n + 1);
  2036. TEMP(1)->used = n + 1;
  2037. CLAMP(TEMP(1));
  2038. /* Clamp r for the comparison. Comparisons do not like leading zeros. */
  2039. CLAMP(&r);
  2040. if (s_ucmp(TEMP(1), &r) > 0) { /* would the remainder be negative? */
  2041. qhat -= 1; /* try a smaller q */
  2042. s_dbmul(MP_DIGITS(v), (mp_digit)qhat, TEMP(1)->digits, n + 1);
  2043. TEMP(1)->used = n + 1;
  2044. CLAMP(TEMP(1));
  2045. if (s_ucmp(TEMP(1), &r) > 0) { /* would the remainder be negative? */
  2046. assert(qhat > 0);
  2047. qhat -= 1; /* try a smaller q */
  2048. s_dbmul(MP_DIGITS(v), (mp_digit)qhat, TEMP(1)->digits, n + 1);
  2049. TEMP(1)->used = n + 1;
  2050. CLAMP(TEMP(1));
  2051. }
  2052. assert(s_ucmp(TEMP(1), &r) <= 0 && "The mathematics failed us.");
  2053. }
  2054. /* Unclamp r. The D algorithm expects r = u_{j+n}..u_j to always be n+1
  2055. digits long. */
  2056. r.used = n + 1;
  2057. /* D4: Multiply and subtract
  2058. Note: The multiply was completed above so we only need to subtract here.
  2059. */
  2060. s_usub(r.digits, TEMP(1)->digits, r.digits, r.used, TEMP(1)->used);
  2061. /* D5: Test remainder
  2062. Note: Not needed because we always check that qhat is the correct value
  2063. before performing the subtract. Value cast to mp_digit to prevent
  2064. warning, qhat has been clamped to MP_DIGIT_MAX
  2065. */
  2066. TEMP(0)->digits[j] = (mp_digit)qhat;
  2067. /* D6: Add back
  2068. Note: Not needed because we always check that qhat is the correct value
  2069. before performing the subtract.
  2070. */
  2071. /* D7: Loop on j */
  2072. r.digits--;
  2073. ZERO(TEMP(1)->digits, TEMP(1)->alloc);
  2074. }
  2075. /* Get rid of leading zeros in q */
  2076. TEMP(0)->used = m + 1;
  2077. CLAMP(TEMP(0));
  2078. /* Denormalize the remainder */
  2079. CLAMP(u); /* use u here because the r.digits pointer is off-by-one */
  2080. if (k != 0) s_qdiv(u, k);
  2081. mp_int_copy(u, v); /* ok: 0 <= r < v */
  2082. mp_int_copy(TEMP(0), u); /* ok: q <= u */
  2083. CLEANUP_TEMP();
  2084. return MP_OK;
  2085. }
  2086. static int s_outlen(mp_int z, mp_size r) {
  2087. assert(r >= MP_MIN_RADIX && r <= MP_MAX_RADIX);
  2088. mp_result bits = mp_int_count_bits(z);
  2089. double raw = (double)bits * s_log2[r];
  2090. return (int)(raw + 0.999999);
  2091. }
  2092. static mp_size s_inlen(int len, mp_size r) {
  2093. double raw = (double)len / s_log2[r];
  2094. mp_size bits = (mp_size)(raw + 0.5);
  2095. return (mp_size)((bits + (MP_DIGIT_BIT - 1)) / MP_DIGIT_BIT) + 1;
  2096. }
  2097. static int s_ch2val(char c, int r) {
  2098. int out;
  2099. /*
  2100. * In some locales, isalpha() accepts characters outside the range A-Z,
  2101. * producing out<0 or out>=36. The "out >= r" check will always catch
  2102. * out>=36. Though nothing explicitly catches out<0, our caller reacts the
  2103. * same way to every negative return value.
  2104. */
  2105. if (isdigit((unsigned char)c))
  2106. out = c - '0';
  2107. else if (r > 10 && isalpha((unsigned char)c))
  2108. out = toupper((unsigned char)c) - 'A' + 10;
  2109. else
  2110. return -1;
  2111. return (out >= r) ? -1 : out;
  2112. }
  2113. static char s_val2ch(int v, int caps) {
  2114. assert(v >= 0);
  2115. if (v < 10) {
  2116. return v + '0';
  2117. } else {
  2118. char out = (v - 10) + 'a';
  2119. if (caps) {
  2120. return toupper((unsigned char)out);
  2121. } else {
  2122. return out;
  2123. }
  2124. }
  2125. }
  2126. static void s_2comp(unsigned char *buf, int len) {
  2127. unsigned short s = 1;
  2128. for (int i = len - 1; i >= 0; --i) {
  2129. unsigned char c = ~buf[i];
  2130. s = c + s;
  2131. c = s & UCHAR_MAX;
  2132. s >>= CHAR_BIT;
  2133. buf[i] = c;
  2134. }
  2135. /* last carry out is ignored */
  2136. }
  2137. static mp_result s_tobin(mp_int z, unsigned char *buf, int *limpos, int pad) {
  2138. int pos = 0, limit = *limpos;
  2139. mp_size uz = MP_USED(z);
  2140. mp_digit *dz = MP_DIGITS(z);
  2141. while (uz > 0 && pos < limit) {
  2142. mp_digit d = *dz++;
  2143. int i;
  2144. for (i = sizeof(mp_digit); i > 0 && pos < limit; --i) {
  2145. buf[pos++] = (unsigned char)d;
  2146. d >>= CHAR_BIT;
  2147. /* Don't write leading zeroes */
  2148. if (d == 0 && uz == 1) i = 0; /* exit loop without signaling truncation */
  2149. }
  2150. /* Detect truncation (loop exited with pos >= limit) */
  2151. if (i > 0) break;
  2152. --uz;
  2153. }
  2154. if (pad != 0 && (buf[pos - 1] >> (CHAR_BIT - 1))) {
  2155. if (pos < limit) {
  2156. buf[pos++] = 0;
  2157. } else {
  2158. uz = 1;
  2159. }
  2160. }
  2161. /* Digits are in reverse order, fix that */
  2162. REV(buf, pos);
  2163. /* Return the number of bytes actually written */
  2164. *limpos = pos;
  2165. return (uz == 0) ? MP_OK : MP_TRUNC;
  2166. }
  2167. /* Here there be dragons */