ge25519_p1p1_to_p2.S 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236
  1. # qhasm: int64 rp
  2. # qhasm: int64 pp
  3. # qhasm: input rp
  4. # qhasm: input pp
  5. # qhasm: int64 caller1
  6. # qhasm: int64 caller2
  7. # qhasm: int64 caller3
  8. # qhasm: int64 caller4
  9. # qhasm: int64 caller5
  10. # qhasm: int64 caller6
  11. # qhasm: int64 caller7
  12. # qhasm: caller caller1
  13. # qhasm: caller caller2
  14. # qhasm: caller caller3
  15. # qhasm: caller caller4
  16. # qhasm: caller caller5
  17. # qhasm: caller caller6
  18. # qhasm: caller caller7
  19. # qhasm: stack64 caller1_stack
  20. # qhasm: stack64 caller2_stack
  21. # qhasm: stack64 caller3_stack
  22. # qhasm: stack64 caller4_stack
  23. # qhasm: stack64 caller5_stack
  24. # qhasm: stack64 caller6_stack
  25. # qhasm: stack64 caller7_stack
  26. # qhasm: int64 rx0
  27. # qhasm: int64 rx1
  28. # qhasm: int64 rx2
  29. # qhasm: int64 rx3
  30. # qhasm: int64 ry0
  31. # qhasm: int64 ry1
  32. # qhasm: int64 ry2
  33. # qhasm: int64 ry3
  34. # qhasm: int64 rz0
  35. # qhasm: int64 rz1
  36. # qhasm: int64 rz2
  37. # qhasm: int64 rz3
  38. # qhasm: int64 mulr4
  39. # qhasm: int64 mulr5
  40. # qhasm: int64 mulr6
  41. # qhasm: int64 mulr7
  42. # qhasm: int64 mulr8
  43. # qhasm: int64 mulrax
  44. # qhasm: int64 mulrdx
  45. # qhasm: int64 mulx0
  46. # qhasm: int64 mulx1
  47. # qhasm: int64 mulx2
  48. # qhasm: int64 mulx3
  49. # qhasm: int64 mulc
  50. # qhasm: int64 mulzero
  51. # qhasm: int64 muli38
  52. # qhasm: enter CRYPTO_NAMESPACE(ge25519_p1p1_to_p2)
  53. .text
  54. .p2align 5
  55. .globl _CRYPTO_NAMESPACE(ge25519_p1p1_to_p2)
  56. .globl CRYPTO_NAMESPACE(ge25519_p1p1_to_p2)
  57. _CRYPTO_NAMESPACE(ge25519_p1p1_to_p2):
  58. CRYPTO_NAMESPACE(ge25519_p1p1_to_p2):
  59. mov %rsp,%r11
  60. and $31,%r11
  61. add $64,%r11
  62. sub %r11,%rsp
  63. # qhasm: caller1_stack = caller1
  64. # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
  65. # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
  66. movq %r11,0(%rsp)
  67. # qhasm: caller2_stack = caller2
  68. # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
  69. # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
  70. movq %r12,8(%rsp)
  71. # qhasm: caller3_stack = caller3
  72. # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
  73. # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
  74. movq %r13,16(%rsp)
  75. # qhasm: caller4_stack = caller4
  76. # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
  77. # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
  78. movq %r14,24(%rsp)
  79. # qhasm: caller5_stack = caller5
  80. # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
  81. # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
  82. movq %r15,32(%rsp)
  83. # qhasm: caller6_stack = caller6
  84. # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
  85. # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
  86. movq %rbx,40(%rsp)
  87. # qhasm: caller7_stack = caller7
  88. # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
  89. # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
  90. movq %rbp,48(%rsp)
  91. # qhasm: mulr4 = 0
  92. # asm 1: mov $0,>mulr4=int64#4
  93. # asm 2: mov $0,>mulr4=%rcx
  94. mov $0,%rcx
  95. # qhasm: mulr5 = 0
  96. # asm 1: mov $0,>mulr5=int64#5
  97. # asm 2: mov $0,>mulr5=%r8
  98. mov $0,%r8
  99. # qhasm: mulr6 = 0
  100. # asm 1: mov $0,>mulr6=int64#6
  101. # asm 2: mov $0,>mulr6=%r9
  102. mov $0,%r9
  103. # qhasm: mulr7 = 0
  104. # asm 1: mov $0,>mulr7=int64#8
  105. # asm 2: mov $0,>mulr7=%r10
  106. mov $0,%r10
  107. # qhasm: mulx0 = *(uint64 *)(pp + 0)
  108. # asm 1: movq 0(<pp=int64#2),>mulx0=int64#9
  109. # asm 2: movq 0(<pp=%rsi),>mulx0=%r11
  110. movq 0(%rsi),%r11
  111. # qhasm: mulrax = *(uint64 *)(pp + 96)
  112. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  113. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  114. movq 96(%rsi),%rax
  115. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  116. # asm 1: mul <mulx0=int64#9
  117. # asm 2: mul <mulx0=%r11
  118. mul %r11
  119. # qhasm: rx0 = mulrax
  120. # asm 1: mov <mulrax=int64#7,>rx0=int64#10
  121. # asm 2: mov <mulrax=%rax,>rx0=%r12
  122. mov %rax,%r12
  123. # qhasm: rx1 = mulrdx
  124. # asm 1: mov <mulrdx=int64#3,>rx1=int64#11
  125. # asm 2: mov <mulrdx=%rdx,>rx1=%r13
  126. mov %rdx,%r13
  127. # qhasm: mulrax = *(uint64 *)(pp + 104)
  128. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
  129. # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
  130. movq 104(%rsi),%rax
  131. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  132. # asm 1: mul <mulx0=int64#9
  133. # asm 2: mul <mulx0=%r11
  134. mul %r11
  135. # qhasm: carry? rx1 += mulrax
  136. # asm 1: add <mulrax=int64#7,<rx1=int64#11
  137. # asm 2: add <mulrax=%rax,<rx1=%r13
  138. add %rax,%r13
  139. # qhasm: rx2 = 0
  140. # asm 1: mov $0,>rx2=int64#12
  141. # asm 2: mov $0,>rx2=%r14
  142. mov $0,%r14
  143. # qhasm: rx2 += mulrdx + carry
  144. # asm 1: adc <mulrdx=int64#3,<rx2=int64#12
  145. # asm 2: adc <mulrdx=%rdx,<rx2=%r14
  146. adc %rdx,%r14
  147. # qhasm: mulrax = *(uint64 *)(pp + 112)
  148. # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
  149. # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
  150. movq 112(%rsi),%rax
  151. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  152. # asm 1: mul <mulx0=int64#9
  153. # asm 2: mul <mulx0=%r11
  154. mul %r11
  155. # qhasm: carry? rx2 += mulrax
  156. # asm 1: add <mulrax=int64#7,<rx2=int64#12
  157. # asm 2: add <mulrax=%rax,<rx2=%r14
  158. add %rax,%r14
  159. # qhasm: rx3 = 0
  160. # asm 1: mov $0,>rx3=int64#13
  161. # asm 2: mov $0,>rx3=%r15
  162. mov $0,%r15
  163. # qhasm: rx3 += mulrdx + carry
  164. # asm 1: adc <mulrdx=int64#3,<rx3=int64#13
  165. # asm 2: adc <mulrdx=%rdx,<rx3=%r15
  166. adc %rdx,%r15
  167. # qhasm: mulrax = *(uint64 *)(pp + 120)
  168. # asm 1: movq 120(<pp=int64#2),>mulrax=int64#7
  169. # asm 2: movq 120(<pp=%rsi),>mulrax=%rax
  170. movq 120(%rsi),%rax
  171. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  172. # asm 1: mul <mulx0=int64#9
  173. # asm 2: mul <mulx0=%r11
  174. mul %r11
  175. # qhasm: carry? rx3 += mulrax
  176. # asm 1: add <mulrax=int64#7,<rx3=int64#13
  177. # asm 2: add <mulrax=%rax,<rx3=%r15
  178. add %rax,%r15
  179. # qhasm: mulr4 += mulrdx + carry
  180. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
  181. # asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
  182. adc %rdx,%rcx
  183. # qhasm: mulx1 = *(uint64 *)(pp + 8)
  184. # asm 1: movq 8(<pp=int64#2),>mulx1=int64#9
  185. # asm 2: movq 8(<pp=%rsi),>mulx1=%r11
  186. movq 8(%rsi),%r11
  187. # qhasm: mulrax = *(uint64 *)(pp + 96)
  188. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  189. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  190. movq 96(%rsi),%rax
  191. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  192. # asm 1: mul <mulx1=int64#9
  193. # asm 2: mul <mulx1=%r11
  194. mul %r11
  195. # qhasm: carry? rx1 += mulrax
  196. # asm 1: add <mulrax=int64#7,<rx1=int64#11
  197. # asm 2: add <mulrax=%rax,<rx1=%r13
  198. add %rax,%r13
  199. # qhasm: mulc = 0
  200. # asm 1: mov $0,>mulc=int64#14
  201. # asm 2: mov $0,>mulc=%rbx
  202. mov $0,%rbx
  203. # qhasm: mulc += mulrdx + carry
  204. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  205. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  206. adc %rdx,%rbx
  207. # qhasm: mulrax = *(uint64 *)(pp + 104)
  208. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
  209. # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
  210. movq 104(%rsi),%rax
  211. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  212. # asm 1: mul <mulx1=int64#9
  213. # asm 2: mul <mulx1=%r11
  214. mul %r11
  215. # qhasm: carry? rx2 += mulrax
  216. # asm 1: add <mulrax=int64#7,<rx2=int64#12
  217. # asm 2: add <mulrax=%rax,<rx2=%r14
  218. add %rax,%r14
  219. # qhasm: mulrdx += 0 + carry
  220. # asm 1: adc $0,<mulrdx=int64#3
  221. # asm 2: adc $0,<mulrdx=%rdx
  222. adc $0,%rdx
  223. # qhasm: carry? rx2 += mulc
  224. # asm 1: add <mulc=int64#14,<rx2=int64#12
  225. # asm 2: add <mulc=%rbx,<rx2=%r14
  226. add %rbx,%r14
  227. # qhasm: mulc = 0
  228. # asm 1: mov $0,>mulc=int64#14
  229. # asm 2: mov $0,>mulc=%rbx
  230. mov $0,%rbx
  231. # qhasm: mulc += mulrdx + carry
  232. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  233. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  234. adc %rdx,%rbx
  235. # qhasm: mulrax = *(uint64 *)(pp + 112)
  236. # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
  237. # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
  238. movq 112(%rsi),%rax
  239. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  240. # asm 1: mul <mulx1=int64#9
  241. # asm 2: mul <mulx1=%r11
  242. mul %r11
  243. # qhasm: carry? rx3 += mulrax
  244. # asm 1: add <mulrax=int64#7,<rx3=int64#13
  245. # asm 2: add <mulrax=%rax,<rx3=%r15
  246. add %rax,%r15
  247. # qhasm: mulrdx += 0 + carry
  248. # asm 1: adc $0,<mulrdx=int64#3
  249. # asm 2: adc $0,<mulrdx=%rdx
  250. adc $0,%rdx
  251. # qhasm: carry? rx3 += mulc
  252. # asm 1: add <mulc=int64#14,<rx3=int64#13
  253. # asm 2: add <mulc=%rbx,<rx3=%r15
  254. add %rbx,%r15
  255. # qhasm: mulc = 0
  256. # asm 1: mov $0,>mulc=int64#14
  257. # asm 2: mov $0,>mulc=%rbx
  258. mov $0,%rbx
  259. # qhasm: mulc += mulrdx + carry
  260. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  261. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  262. adc %rdx,%rbx
  263. # qhasm: mulrax = *(uint64 *)(pp + 120)
  264. # asm 1: movq 120(<pp=int64#2),>mulrax=int64#7
  265. # asm 2: movq 120(<pp=%rsi),>mulrax=%rax
  266. movq 120(%rsi),%rax
  267. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  268. # asm 1: mul <mulx1=int64#9
  269. # asm 2: mul <mulx1=%r11
  270. mul %r11
  271. # qhasm: carry? mulr4 += mulrax
  272. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  273. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  274. add %rax,%rcx
  275. # qhasm: mulrdx += 0 + carry
  276. # asm 1: adc $0,<mulrdx=int64#3
  277. # asm 2: adc $0,<mulrdx=%rdx
  278. adc $0,%rdx
  279. # qhasm: carry? mulr4 += mulc
  280. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  281. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  282. add %rbx,%rcx
  283. # qhasm: mulr5 += mulrdx + carry
  284. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
  285. # asm 2: adc <mulrdx=%rdx,<mulr5=%r8
  286. adc %rdx,%r8
  287. # qhasm: mulx2 = *(uint64 *)(pp + 16)
  288. # asm 1: movq 16(<pp=int64#2),>mulx2=int64#9
  289. # asm 2: movq 16(<pp=%rsi),>mulx2=%r11
  290. movq 16(%rsi),%r11
  291. # qhasm: mulrax = *(uint64 *)(pp + 96)
  292. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  293. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  294. movq 96(%rsi),%rax
  295. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  296. # asm 1: mul <mulx2=int64#9
  297. # asm 2: mul <mulx2=%r11
  298. mul %r11
  299. # qhasm: carry? rx2 += mulrax
  300. # asm 1: add <mulrax=int64#7,<rx2=int64#12
  301. # asm 2: add <mulrax=%rax,<rx2=%r14
  302. add %rax,%r14
  303. # qhasm: mulc = 0
  304. # asm 1: mov $0,>mulc=int64#14
  305. # asm 2: mov $0,>mulc=%rbx
  306. mov $0,%rbx
  307. # qhasm: mulc += mulrdx + carry
  308. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  309. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  310. adc %rdx,%rbx
  311. # qhasm: mulrax = *(uint64 *)(pp + 104)
  312. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
  313. # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
  314. movq 104(%rsi),%rax
  315. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  316. # asm 1: mul <mulx2=int64#9
  317. # asm 2: mul <mulx2=%r11
  318. mul %r11
  319. # qhasm: carry? rx3 += mulrax
  320. # asm 1: add <mulrax=int64#7,<rx3=int64#13
  321. # asm 2: add <mulrax=%rax,<rx3=%r15
  322. add %rax,%r15
  323. # qhasm: mulrdx += 0 + carry
  324. # asm 1: adc $0,<mulrdx=int64#3
  325. # asm 2: adc $0,<mulrdx=%rdx
  326. adc $0,%rdx
  327. # qhasm: carry? rx3 += mulc
  328. # asm 1: add <mulc=int64#14,<rx3=int64#13
  329. # asm 2: add <mulc=%rbx,<rx3=%r15
  330. add %rbx,%r15
  331. # qhasm: mulc = 0
  332. # asm 1: mov $0,>mulc=int64#14
  333. # asm 2: mov $0,>mulc=%rbx
  334. mov $0,%rbx
  335. # qhasm: mulc += mulrdx + carry
  336. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  337. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  338. adc %rdx,%rbx
  339. # qhasm: mulrax = *(uint64 *)(pp + 112)
  340. # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
  341. # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
  342. movq 112(%rsi),%rax
  343. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  344. # asm 1: mul <mulx2=int64#9
  345. # asm 2: mul <mulx2=%r11
  346. mul %r11
  347. # qhasm: carry? mulr4 += mulrax
  348. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  349. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  350. add %rax,%rcx
  351. # qhasm: mulrdx += 0 + carry
  352. # asm 1: adc $0,<mulrdx=int64#3
  353. # asm 2: adc $0,<mulrdx=%rdx
  354. adc $0,%rdx
  355. # qhasm: carry? mulr4 += mulc
  356. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  357. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  358. add %rbx,%rcx
  359. # qhasm: mulc = 0
  360. # asm 1: mov $0,>mulc=int64#14
  361. # asm 2: mov $0,>mulc=%rbx
  362. mov $0,%rbx
  363. # qhasm: mulc += mulrdx + carry
  364. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  365. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  366. adc %rdx,%rbx
  367. # qhasm: mulrax = *(uint64 *)(pp + 120)
  368. # asm 1: movq 120(<pp=int64#2),>mulrax=int64#7
  369. # asm 2: movq 120(<pp=%rsi),>mulrax=%rax
  370. movq 120(%rsi),%rax
  371. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  372. # asm 1: mul <mulx2=int64#9
  373. # asm 2: mul <mulx2=%r11
  374. mul %r11
  375. # qhasm: carry? mulr5 += mulrax
  376. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  377. # asm 2: add <mulrax=%rax,<mulr5=%r8
  378. add %rax,%r8
  379. # qhasm: mulrdx += 0 + carry
  380. # asm 1: adc $0,<mulrdx=int64#3
  381. # asm 2: adc $0,<mulrdx=%rdx
  382. adc $0,%rdx
  383. # qhasm: carry? mulr5 += mulc
  384. # asm 1: add <mulc=int64#14,<mulr5=int64#5
  385. # asm 2: add <mulc=%rbx,<mulr5=%r8
  386. add %rbx,%r8
  387. # qhasm: mulr6 += mulrdx + carry
  388. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
  389. # asm 2: adc <mulrdx=%rdx,<mulr6=%r9
  390. adc %rdx,%r9
  391. # qhasm: mulx3 = *(uint64 *)(pp + 24)
  392. # asm 1: movq 24(<pp=int64#2),>mulx3=int64#9
  393. # asm 2: movq 24(<pp=%rsi),>mulx3=%r11
  394. movq 24(%rsi),%r11
  395. # qhasm: mulrax = *(uint64 *)(pp + 96)
  396. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  397. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  398. movq 96(%rsi),%rax
  399. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  400. # asm 1: mul <mulx3=int64#9
  401. # asm 2: mul <mulx3=%r11
  402. mul %r11
  403. # qhasm: carry? rx3 += mulrax
  404. # asm 1: add <mulrax=int64#7,<rx3=int64#13
  405. # asm 2: add <mulrax=%rax,<rx3=%r15
  406. add %rax,%r15
  407. # qhasm: mulc = 0
  408. # asm 1: mov $0,>mulc=int64#14
  409. # asm 2: mov $0,>mulc=%rbx
  410. mov $0,%rbx
  411. # qhasm: mulc += mulrdx + carry
  412. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  413. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  414. adc %rdx,%rbx
  415. # qhasm: mulrax = *(uint64 *)(pp + 104)
  416. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
  417. # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
  418. movq 104(%rsi),%rax
  419. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  420. # asm 1: mul <mulx3=int64#9
  421. # asm 2: mul <mulx3=%r11
  422. mul %r11
  423. # qhasm: carry? mulr4 += mulrax
  424. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  425. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  426. add %rax,%rcx
  427. # qhasm: mulrdx += 0 + carry
  428. # asm 1: adc $0,<mulrdx=int64#3
  429. # asm 2: adc $0,<mulrdx=%rdx
  430. adc $0,%rdx
  431. # qhasm: carry? mulr4 += mulc
  432. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  433. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  434. add %rbx,%rcx
  435. # qhasm: mulc = 0
  436. # asm 1: mov $0,>mulc=int64#14
  437. # asm 2: mov $0,>mulc=%rbx
  438. mov $0,%rbx
  439. # qhasm: mulc += mulrdx + carry
  440. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  441. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  442. adc %rdx,%rbx
  443. # qhasm: mulrax = *(uint64 *)(pp + 112)
  444. # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
  445. # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
  446. movq 112(%rsi),%rax
  447. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  448. # asm 1: mul <mulx3=int64#9
  449. # asm 2: mul <mulx3=%r11
  450. mul %r11
  451. # qhasm: carry? mulr5 += mulrax
  452. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  453. # asm 2: add <mulrax=%rax,<mulr5=%r8
  454. add %rax,%r8
  455. # qhasm: mulrdx += 0 + carry
  456. # asm 1: adc $0,<mulrdx=int64#3
  457. # asm 2: adc $0,<mulrdx=%rdx
  458. adc $0,%rdx
  459. # qhasm: carry? mulr5 += mulc
  460. # asm 1: add <mulc=int64#14,<mulr5=int64#5
  461. # asm 2: add <mulc=%rbx,<mulr5=%r8
  462. add %rbx,%r8
  463. # qhasm: mulc = 0
  464. # asm 1: mov $0,>mulc=int64#14
  465. # asm 2: mov $0,>mulc=%rbx
  466. mov $0,%rbx
  467. # qhasm: mulc += mulrdx + carry
  468. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  469. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  470. adc %rdx,%rbx
  471. # qhasm: mulrax = *(uint64 *)(pp + 120)
  472. # asm 1: movq 120(<pp=int64#2),>mulrax=int64#7
  473. # asm 2: movq 120(<pp=%rsi),>mulrax=%rax
  474. movq 120(%rsi),%rax
  475. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  476. # asm 1: mul <mulx3=int64#9
  477. # asm 2: mul <mulx3=%r11
  478. mul %r11
  479. # qhasm: carry? mulr6 += mulrax
  480. # asm 1: add <mulrax=int64#7,<mulr6=int64#6
  481. # asm 2: add <mulrax=%rax,<mulr6=%r9
  482. add %rax,%r9
  483. # qhasm: mulrdx += 0 + carry
  484. # asm 1: adc $0,<mulrdx=int64#3
  485. # asm 2: adc $0,<mulrdx=%rdx
  486. adc $0,%rdx
  487. # qhasm: carry? mulr6 += mulc
  488. # asm 1: add <mulc=int64#14,<mulr6=int64#6
  489. # asm 2: add <mulc=%rbx,<mulr6=%r9
  490. add %rbx,%r9
  491. # qhasm: mulr7 += mulrdx + carry
  492. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
  493. # asm 2: adc <mulrdx=%rdx,<mulr7=%r10
  494. adc %rdx,%r10
  495. # qhasm: mulrax = mulr4
  496. # asm 1: mov <mulr4=int64#4,>mulrax=int64#7
  497. # asm 2: mov <mulr4=%rcx,>mulrax=%rax
  498. mov %rcx,%rax
  499. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  500. mulq CRYPTO_NAMESPACE(38)(%rip)
  501. # qhasm: mulr4 = mulrax
  502. # asm 1: mov <mulrax=int64#7,>mulr4=int64#4
  503. # asm 2: mov <mulrax=%rax,>mulr4=%rcx
  504. mov %rax,%rcx
  505. # qhasm: mulrax = mulr5
  506. # asm 1: mov <mulr5=int64#5,>mulrax=int64#7
  507. # asm 2: mov <mulr5=%r8,>mulrax=%rax
  508. mov %r8,%rax
  509. # qhasm: mulr5 = mulrdx
  510. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#5
  511. # asm 2: mov <mulrdx=%rdx,>mulr5=%r8
  512. mov %rdx,%r8
  513. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  514. mulq CRYPTO_NAMESPACE(38)(%rip)
  515. # qhasm: carry? mulr5 += mulrax
  516. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  517. # asm 2: add <mulrax=%rax,<mulr5=%r8
  518. add %rax,%r8
  519. # qhasm: mulrax = mulr6
  520. # asm 1: mov <mulr6=int64#6,>mulrax=int64#7
  521. # asm 2: mov <mulr6=%r9,>mulrax=%rax
  522. mov %r9,%rax
  523. # qhasm: mulr6 = 0
  524. # asm 1: mov $0,>mulr6=int64#6
  525. # asm 2: mov $0,>mulr6=%r9
  526. mov $0,%r9
  527. # qhasm: mulr6 += mulrdx + carry
  528. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
  529. # asm 2: adc <mulrdx=%rdx,<mulr6=%r9
  530. adc %rdx,%r9
  531. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  532. mulq CRYPTO_NAMESPACE(38)(%rip)
  533. # qhasm: carry? mulr6 += mulrax
  534. # asm 1: add <mulrax=int64#7,<mulr6=int64#6
  535. # asm 2: add <mulrax=%rax,<mulr6=%r9
  536. add %rax,%r9
  537. # qhasm: mulrax = mulr7
  538. # asm 1: mov <mulr7=int64#8,>mulrax=int64#7
  539. # asm 2: mov <mulr7=%r10,>mulrax=%rax
  540. mov %r10,%rax
  541. # qhasm: mulr7 = 0
  542. # asm 1: mov $0,>mulr7=int64#8
  543. # asm 2: mov $0,>mulr7=%r10
  544. mov $0,%r10
  545. # qhasm: mulr7 += mulrdx + carry
  546. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
  547. # asm 2: adc <mulrdx=%rdx,<mulr7=%r10
  548. adc %rdx,%r10
  549. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  550. mulq CRYPTO_NAMESPACE(38)(%rip)
  551. # qhasm: carry? mulr7 += mulrax
  552. # asm 1: add <mulrax=int64#7,<mulr7=int64#8
  553. # asm 2: add <mulrax=%rax,<mulr7=%r10
  554. add %rax,%r10
  555. # qhasm: mulr8 = 0
  556. # asm 1: mov $0,>mulr8=int64#7
  557. # asm 2: mov $0,>mulr8=%rax
  558. mov $0,%rax
  559. # qhasm: mulr8 += mulrdx + carry
  560. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  561. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  562. adc %rdx,%rax
  563. # qhasm: carry? rx0 += mulr4
  564. # asm 1: add <mulr4=int64#4,<rx0=int64#10
  565. # asm 2: add <mulr4=%rcx,<rx0=%r12
  566. add %rcx,%r12
  567. # qhasm: carry? rx1 += mulr5 + carry
  568. # asm 1: adc <mulr5=int64#5,<rx1=int64#11
  569. # asm 2: adc <mulr5=%r8,<rx1=%r13
  570. adc %r8,%r13
  571. # qhasm: carry? rx2 += mulr6 + carry
  572. # asm 1: adc <mulr6=int64#6,<rx2=int64#12
  573. # asm 2: adc <mulr6=%r9,<rx2=%r14
  574. adc %r9,%r14
  575. # qhasm: carry? rx3 += mulr7 + carry
  576. # asm 1: adc <mulr7=int64#8,<rx3=int64#13
  577. # asm 2: adc <mulr7=%r10,<rx3=%r15
  578. adc %r10,%r15
  579. # qhasm: mulzero = 0
  580. # asm 1: mov $0,>mulzero=int64#3
  581. # asm 2: mov $0,>mulzero=%rdx
  582. mov $0,%rdx
  583. # qhasm: mulr8 += mulzero + carry
  584. # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
  585. # asm 2: adc <mulzero=%rdx,<mulr8=%rax
  586. adc %rdx,%rax
  587. # qhasm: mulr8 *= 38
  588. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4
  589. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx
  590. imulq $38,%rax,%rcx
  591. # qhasm: carry? rx0 += mulr8
  592. # asm 1: add <mulr8=int64#4,<rx0=int64#10
  593. # asm 2: add <mulr8=%rcx,<rx0=%r12
  594. add %rcx,%r12
  595. # qhasm: carry? rx1 += mulzero + carry
  596. # asm 1: adc <mulzero=int64#3,<rx1=int64#11
  597. # asm 2: adc <mulzero=%rdx,<rx1=%r13
  598. adc %rdx,%r13
  599. # qhasm: carry? rx2 += mulzero + carry
  600. # asm 1: adc <mulzero=int64#3,<rx2=int64#12
  601. # asm 2: adc <mulzero=%rdx,<rx2=%r14
  602. adc %rdx,%r14
  603. # qhasm: carry? rx3 += mulzero + carry
  604. # asm 1: adc <mulzero=int64#3,<rx3=int64#13
  605. # asm 2: adc <mulzero=%rdx,<rx3=%r15
  606. adc %rdx,%r15
  607. # qhasm: mulzero += mulzero + carry
  608. # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
  609. # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
  610. adc %rdx,%rdx
  611. # qhasm: mulzero *= 38
  612. # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
  613. # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
  614. imulq $38,%rdx,%rdx
  615. # qhasm: rx0 += mulzero
  616. # asm 1: add <mulzero=int64#3,<rx0=int64#10
  617. # asm 2: add <mulzero=%rdx,<rx0=%r12
  618. add %rdx,%r12
  619. # qhasm: *(uint64 *)(rp + 0) = rx0
  620. # asm 1: movq <rx0=int64#10,0(<rp=int64#1)
  621. # asm 2: movq <rx0=%r12,0(<rp=%rdi)
  622. movq %r12,0(%rdi)
  623. # qhasm: *(uint64 *)(rp + 8) = rx1
  624. # asm 1: movq <rx1=int64#11,8(<rp=int64#1)
  625. # asm 2: movq <rx1=%r13,8(<rp=%rdi)
  626. movq %r13,8(%rdi)
  627. # qhasm: *(uint64 *)(rp + 16) = rx2
  628. # asm 1: movq <rx2=int64#12,16(<rp=int64#1)
  629. # asm 2: movq <rx2=%r14,16(<rp=%rdi)
  630. movq %r14,16(%rdi)
  631. # qhasm: *(uint64 *)(rp + 24) = rx3
  632. # asm 1: movq <rx3=int64#13,24(<rp=int64#1)
  633. # asm 2: movq <rx3=%r15,24(<rp=%rdi)
  634. movq %r15,24(%rdi)
  635. # qhasm: mulr4 = 0
  636. # asm 1: mov $0,>mulr4=int64#4
  637. # asm 2: mov $0,>mulr4=%rcx
  638. mov $0,%rcx
  639. # qhasm: mulr5 = 0
  640. # asm 1: mov $0,>mulr5=int64#5
  641. # asm 2: mov $0,>mulr5=%r8
  642. mov $0,%r8
  643. # qhasm: mulr6 = 0
  644. # asm 1: mov $0,>mulr6=int64#6
  645. # asm 2: mov $0,>mulr6=%r9
  646. mov $0,%r9
  647. # qhasm: mulr7 = 0
  648. # asm 1: mov $0,>mulr7=int64#8
  649. # asm 2: mov $0,>mulr7=%r10
  650. mov $0,%r10
  651. # qhasm: mulx0 = *(uint64 *)(pp + 64)
  652. # asm 1: movq 64(<pp=int64#2),>mulx0=int64#9
  653. # asm 2: movq 64(<pp=%rsi),>mulx0=%r11
  654. movq 64(%rsi),%r11
  655. # qhasm: mulrax = *(uint64 *)(pp + 32)
  656. # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
  657. # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
  658. movq 32(%rsi),%rax
  659. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  660. # asm 1: mul <mulx0=int64#9
  661. # asm 2: mul <mulx0=%r11
  662. mul %r11
  663. # qhasm: ry0 = mulrax
  664. # asm 1: mov <mulrax=int64#7,>ry0=int64#10
  665. # asm 2: mov <mulrax=%rax,>ry0=%r12
  666. mov %rax,%r12
  667. # qhasm: ry1 = mulrdx
  668. # asm 1: mov <mulrdx=int64#3,>ry1=int64#11
  669. # asm 2: mov <mulrdx=%rdx,>ry1=%r13
  670. mov %rdx,%r13
  671. # qhasm: mulrax = *(uint64 *)(pp + 40)
  672. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  673. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  674. movq 40(%rsi),%rax
  675. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  676. # asm 1: mul <mulx0=int64#9
  677. # asm 2: mul <mulx0=%r11
  678. mul %r11
  679. # qhasm: carry? ry1 += mulrax
  680. # asm 1: add <mulrax=int64#7,<ry1=int64#11
  681. # asm 2: add <mulrax=%rax,<ry1=%r13
  682. add %rax,%r13
  683. # qhasm: ry2 = 0
  684. # asm 1: mov $0,>ry2=int64#12
  685. # asm 2: mov $0,>ry2=%r14
  686. mov $0,%r14
  687. # qhasm: ry2 += mulrdx + carry
  688. # asm 1: adc <mulrdx=int64#3,<ry2=int64#12
  689. # asm 2: adc <mulrdx=%rdx,<ry2=%r14
  690. adc %rdx,%r14
  691. # qhasm: mulrax = *(uint64 *)(pp + 48)
  692. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
  693. # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
  694. movq 48(%rsi),%rax
  695. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  696. # asm 1: mul <mulx0=int64#9
  697. # asm 2: mul <mulx0=%r11
  698. mul %r11
  699. # qhasm: carry? ry2 += mulrax
  700. # asm 1: add <mulrax=int64#7,<ry2=int64#12
  701. # asm 2: add <mulrax=%rax,<ry2=%r14
  702. add %rax,%r14
  703. # qhasm: ry3 = 0
  704. # asm 1: mov $0,>ry3=int64#13
  705. # asm 2: mov $0,>ry3=%r15
  706. mov $0,%r15
  707. # qhasm: ry3 += mulrdx + carry
  708. # asm 1: adc <mulrdx=int64#3,<ry3=int64#13
  709. # asm 2: adc <mulrdx=%rdx,<ry3=%r15
  710. adc %rdx,%r15
  711. # qhasm: mulrax = *(uint64 *)(pp + 56)
  712. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
  713. # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
  714. movq 56(%rsi),%rax
  715. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  716. # asm 1: mul <mulx0=int64#9
  717. # asm 2: mul <mulx0=%r11
  718. mul %r11
  719. # qhasm: carry? ry3 += mulrax
  720. # asm 1: add <mulrax=int64#7,<ry3=int64#13
  721. # asm 2: add <mulrax=%rax,<ry3=%r15
  722. add %rax,%r15
  723. # qhasm: mulr4 += mulrdx + carry
  724. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
  725. # asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
  726. adc %rdx,%rcx
  727. # qhasm: mulx1 = *(uint64 *)(pp + 72)
  728. # asm 1: movq 72(<pp=int64#2),>mulx1=int64#9
  729. # asm 2: movq 72(<pp=%rsi),>mulx1=%r11
  730. movq 72(%rsi),%r11
  731. # qhasm: mulrax = *(uint64 *)(pp + 32)
  732. # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
  733. # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
  734. movq 32(%rsi),%rax
  735. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  736. # asm 1: mul <mulx1=int64#9
  737. # asm 2: mul <mulx1=%r11
  738. mul %r11
  739. # qhasm: carry? ry1 += mulrax
  740. # asm 1: add <mulrax=int64#7,<ry1=int64#11
  741. # asm 2: add <mulrax=%rax,<ry1=%r13
  742. add %rax,%r13
  743. # qhasm: mulc = 0
  744. # asm 1: mov $0,>mulc=int64#14
  745. # asm 2: mov $0,>mulc=%rbx
  746. mov $0,%rbx
  747. # qhasm: mulc += mulrdx + carry
  748. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  749. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  750. adc %rdx,%rbx
  751. # qhasm: mulrax = *(uint64 *)(pp + 40)
  752. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  753. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  754. movq 40(%rsi),%rax
  755. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  756. # asm 1: mul <mulx1=int64#9
  757. # asm 2: mul <mulx1=%r11
  758. mul %r11
  759. # qhasm: carry? ry2 += mulrax
  760. # asm 1: add <mulrax=int64#7,<ry2=int64#12
  761. # asm 2: add <mulrax=%rax,<ry2=%r14
  762. add %rax,%r14
  763. # qhasm: mulrdx += 0 + carry
  764. # asm 1: adc $0,<mulrdx=int64#3
  765. # asm 2: adc $0,<mulrdx=%rdx
  766. adc $0,%rdx
  767. # qhasm: carry? ry2 += mulc
  768. # asm 1: add <mulc=int64#14,<ry2=int64#12
  769. # asm 2: add <mulc=%rbx,<ry2=%r14
  770. add %rbx,%r14
  771. # qhasm: mulc = 0
  772. # asm 1: mov $0,>mulc=int64#14
  773. # asm 2: mov $0,>mulc=%rbx
  774. mov $0,%rbx
  775. # qhasm: mulc += mulrdx + carry
  776. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  777. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  778. adc %rdx,%rbx
  779. # qhasm: mulrax = *(uint64 *)(pp + 48)
  780. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
  781. # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
  782. movq 48(%rsi),%rax
  783. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  784. # asm 1: mul <mulx1=int64#9
  785. # asm 2: mul <mulx1=%r11
  786. mul %r11
  787. # qhasm: carry? ry3 += mulrax
  788. # asm 1: add <mulrax=int64#7,<ry3=int64#13
  789. # asm 2: add <mulrax=%rax,<ry3=%r15
  790. add %rax,%r15
  791. # qhasm: mulrdx += 0 + carry
  792. # asm 1: adc $0,<mulrdx=int64#3
  793. # asm 2: adc $0,<mulrdx=%rdx
  794. adc $0,%rdx
  795. # qhasm: carry? ry3 += mulc
  796. # asm 1: add <mulc=int64#14,<ry3=int64#13
  797. # asm 2: add <mulc=%rbx,<ry3=%r15
  798. add %rbx,%r15
  799. # qhasm: mulc = 0
  800. # asm 1: mov $0,>mulc=int64#14
  801. # asm 2: mov $0,>mulc=%rbx
  802. mov $0,%rbx
  803. # qhasm: mulc += mulrdx + carry
  804. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  805. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  806. adc %rdx,%rbx
  807. # qhasm: mulrax = *(uint64 *)(pp + 56)
  808. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
  809. # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
  810. movq 56(%rsi),%rax
  811. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  812. # asm 1: mul <mulx1=int64#9
  813. # asm 2: mul <mulx1=%r11
  814. mul %r11
  815. # qhasm: carry? mulr4 += mulrax
  816. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  817. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  818. add %rax,%rcx
  819. # qhasm: mulrdx += 0 + carry
  820. # asm 1: adc $0,<mulrdx=int64#3
  821. # asm 2: adc $0,<mulrdx=%rdx
  822. adc $0,%rdx
  823. # qhasm: carry? mulr4 += mulc
  824. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  825. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  826. add %rbx,%rcx
  827. # qhasm: mulr5 += mulrdx + carry
  828. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
  829. # asm 2: adc <mulrdx=%rdx,<mulr5=%r8
  830. adc %rdx,%r8
  831. # qhasm: mulx2 = *(uint64 *)(pp + 80)
  832. # asm 1: movq 80(<pp=int64#2),>mulx2=int64#9
  833. # asm 2: movq 80(<pp=%rsi),>mulx2=%r11
  834. movq 80(%rsi),%r11
  835. # qhasm: mulrax = *(uint64 *)(pp + 32)
  836. # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
  837. # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
  838. movq 32(%rsi),%rax
  839. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  840. # asm 1: mul <mulx2=int64#9
  841. # asm 2: mul <mulx2=%r11
  842. mul %r11
  843. # qhasm: carry? ry2 += mulrax
  844. # asm 1: add <mulrax=int64#7,<ry2=int64#12
  845. # asm 2: add <mulrax=%rax,<ry2=%r14
  846. add %rax,%r14
  847. # qhasm: mulc = 0
  848. # asm 1: mov $0,>mulc=int64#14
  849. # asm 2: mov $0,>mulc=%rbx
  850. mov $0,%rbx
  851. # qhasm: mulc += mulrdx + carry
  852. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  853. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  854. adc %rdx,%rbx
  855. # qhasm: mulrax = *(uint64 *)(pp + 40)
  856. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  857. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  858. movq 40(%rsi),%rax
  859. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  860. # asm 1: mul <mulx2=int64#9
  861. # asm 2: mul <mulx2=%r11
  862. mul %r11
  863. # qhasm: carry? ry3 += mulrax
  864. # asm 1: add <mulrax=int64#7,<ry3=int64#13
  865. # asm 2: add <mulrax=%rax,<ry3=%r15
  866. add %rax,%r15
  867. # qhasm: mulrdx += 0 + carry
  868. # asm 1: adc $0,<mulrdx=int64#3
  869. # asm 2: adc $0,<mulrdx=%rdx
  870. adc $0,%rdx
  871. # qhasm: carry? ry3 += mulc
  872. # asm 1: add <mulc=int64#14,<ry3=int64#13
  873. # asm 2: add <mulc=%rbx,<ry3=%r15
  874. add %rbx,%r15
  875. # qhasm: mulc = 0
  876. # asm 1: mov $0,>mulc=int64#14
  877. # asm 2: mov $0,>mulc=%rbx
  878. mov $0,%rbx
  879. # qhasm: mulc += mulrdx + carry
  880. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  881. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  882. adc %rdx,%rbx
  883. # qhasm: mulrax = *(uint64 *)(pp + 48)
  884. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
  885. # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
  886. movq 48(%rsi),%rax
  887. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  888. # asm 1: mul <mulx2=int64#9
  889. # asm 2: mul <mulx2=%r11
  890. mul %r11
  891. # qhasm: carry? mulr4 += mulrax
  892. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  893. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  894. add %rax,%rcx
  895. # qhasm: mulrdx += 0 + carry
  896. # asm 1: adc $0,<mulrdx=int64#3
  897. # asm 2: adc $0,<mulrdx=%rdx
  898. adc $0,%rdx
  899. # qhasm: carry? mulr4 += mulc
  900. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  901. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  902. add %rbx,%rcx
  903. # qhasm: mulc = 0
  904. # asm 1: mov $0,>mulc=int64#14
  905. # asm 2: mov $0,>mulc=%rbx
  906. mov $0,%rbx
  907. # qhasm: mulc += mulrdx + carry
  908. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  909. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  910. adc %rdx,%rbx
  911. # qhasm: mulrax = *(uint64 *)(pp + 56)
  912. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
  913. # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
  914. movq 56(%rsi),%rax
  915. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  916. # asm 1: mul <mulx2=int64#9
  917. # asm 2: mul <mulx2=%r11
  918. mul %r11
  919. # qhasm: carry? mulr5 += mulrax
  920. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  921. # asm 2: add <mulrax=%rax,<mulr5=%r8
  922. add %rax,%r8
  923. # qhasm: mulrdx += 0 + carry
  924. # asm 1: adc $0,<mulrdx=int64#3
  925. # asm 2: adc $0,<mulrdx=%rdx
  926. adc $0,%rdx
  927. # qhasm: carry? mulr5 += mulc
  928. # asm 1: add <mulc=int64#14,<mulr5=int64#5
  929. # asm 2: add <mulc=%rbx,<mulr5=%r8
  930. add %rbx,%r8
  931. # qhasm: mulr6 += mulrdx + carry
  932. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
  933. # asm 2: adc <mulrdx=%rdx,<mulr6=%r9
  934. adc %rdx,%r9
  935. # qhasm: mulx3 = *(uint64 *)(pp + 88)
  936. # asm 1: movq 88(<pp=int64#2),>mulx3=int64#9
  937. # asm 2: movq 88(<pp=%rsi),>mulx3=%r11
  938. movq 88(%rsi),%r11
  939. # qhasm: mulrax = *(uint64 *)(pp + 32)
  940. # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
  941. # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
  942. movq 32(%rsi),%rax
  943. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  944. # asm 1: mul <mulx3=int64#9
  945. # asm 2: mul <mulx3=%r11
  946. mul %r11
  947. # qhasm: carry? ry3 += mulrax
  948. # asm 1: add <mulrax=int64#7,<ry3=int64#13
  949. # asm 2: add <mulrax=%rax,<ry3=%r15
  950. add %rax,%r15
  951. # qhasm: mulc = 0
  952. # asm 1: mov $0,>mulc=int64#14
  953. # asm 2: mov $0,>mulc=%rbx
  954. mov $0,%rbx
  955. # qhasm: mulc += mulrdx + carry
  956. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  957. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  958. adc %rdx,%rbx
  959. # qhasm: mulrax = *(uint64 *)(pp + 40)
  960. # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
  961. # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
  962. movq 40(%rsi),%rax
  963. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  964. # asm 1: mul <mulx3=int64#9
  965. # asm 2: mul <mulx3=%r11
  966. mul %r11
  967. # qhasm: carry? mulr4 += mulrax
  968. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  969. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  970. add %rax,%rcx
  971. # qhasm: mulrdx += 0 + carry
  972. # asm 1: adc $0,<mulrdx=int64#3
  973. # asm 2: adc $0,<mulrdx=%rdx
  974. adc $0,%rdx
  975. # qhasm: carry? mulr4 += mulc
  976. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  977. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  978. add %rbx,%rcx
  979. # qhasm: mulc = 0
  980. # asm 1: mov $0,>mulc=int64#14
  981. # asm 2: mov $0,>mulc=%rbx
  982. mov $0,%rbx
  983. # qhasm: mulc += mulrdx + carry
  984. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  985. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  986. adc %rdx,%rbx
  987. # qhasm: mulrax = *(uint64 *)(pp + 48)
  988. # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
  989. # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
  990. movq 48(%rsi),%rax
  991. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  992. # asm 1: mul <mulx3=int64#9
  993. # asm 2: mul <mulx3=%r11
  994. mul %r11
  995. # qhasm: carry? mulr5 += mulrax
  996. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  997. # asm 2: add <mulrax=%rax,<mulr5=%r8
  998. add %rax,%r8
  999. # qhasm: mulrdx += 0 + carry
  1000. # asm 1: adc $0,<mulrdx=int64#3
  1001. # asm 2: adc $0,<mulrdx=%rdx
  1002. adc $0,%rdx
  1003. # qhasm: carry? mulr5 += mulc
  1004. # asm 1: add <mulc=int64#14,<mulr5=int64#5
  1005. # asm 2: add <mulc=%rbx,<mulr5=%r8
  1006. add %rbx,%r8
  1007. # qhasm: mulc = 0
  1008. # asm 1: mov $0,>mulc=int64#14
  1009. # asm 2: mov $0,>mulc=%rbx
  1010. mov $0,%rbx
  1011. # qhasm: mulc += mulrdx + carry
  1012. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1013. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1014. adc %rdx,%rbx
  1015. # qhasm: mulrax = *(uint64 *)(pp + 56)
  1016. # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
  1017. # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
  1018. movq 56(%rsi),%rax
  1019. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1020. # asm 1: mul <mulx3=int64#9
  1021. # asm 2: mul <mulx3=%r11
  1022. mul %r11
  1023. # qhasm: carry? mulr6 += mulrax
  1024. # asm 1: add <mulrax=int64#7,<mulr6=int64#6
  1025. # asm 2: add <mulrax=%rax,<mulr6=%r9
  1026. add %rax,%r9
  1027. # qhasm: mulrdx += 0 + carry
  1028. # asm 1: adc $0,<mulrdx=int64#3
  1029. # asm 2: adc $0,<mulrdx=%rdx
  1030. adc $0,%rdx
  1031. # qhasm: carry? mulr6 += mulc
  1032. # asm 1: add <mulc=int64#14,<mulr6=int64#6
  1033. # asm 2: add <mulc=%rbx,<mulr6=%r9
  1034. add %rbx,%r9
  1035. # qhasm: mulr7 += mulrdx + carry
  1036. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
  1037. # asm 2: adc <mulrdx=%rdx,<mulr7=%r10
  1038. adc %rdx,%r10
  1039. # qhasm: mulrax = mulr4
  1040. # asm 1: mov <mulr4=int64#4,>mulrax=int64#7
  1041. # asm 2: mov <mulr4=%rcx,>mulrax=%rax
  1042. mov %rcx,%rax
  1043. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1044. mulq CRYPTO_NAMESPACE(38)(%rip)
  1045. # qhasm: mulr4 = mulrax
  1046. # asm 1: mov <mulrax=int64#7,>mulr4=int64#4
  1047. # asm 2: mov <mulrax=%rax,>mulr4=%rcx
  1048. mov %rax,%rcx
  1049. # qhasm: mulrax = mulr5
  1050. # asm 1: mov <mulr5=int64#5,>mulrax=int64#7
  1051. # asm 2: mov <mulr5=%r8,>mulrax=%rax
  1052. mov %r8,%rax
  1053. # qhasm: mulr5 = mulrdx
  1054. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#5
  1055. # asm 2: mov <mulrdx=%rdx,>mulr5=%r8
  1056. mov %rdx,%r8
  1057. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1058. mulq CRYPTO_NAMESPACE(38)(%rip)
  1059. # qhasm: carry? mulr5 += mulrax
  1060. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  1061. # asm 2: add <mulrax=%rax,<mulr5=%r8
  1062. add %rax,%r8
  1063. # qhasm: mulrax = mulr6
  1064. # asm 1: mov <mulr6=int64#6,>mulrax=int64#7
  1065. # asm 2: mov <mulr6=%r9,>mulrax=%rax
  1066. mov %r9,%rax
  1067. # qhasm: mulr6 = 0
  1068. # asm 1: mov $0,>mulr6=int64#6
  1069. # asm 2: mov $0,>mulr6=%r9
  1070. mov $0,%r9
  1071. # qhasm: mulr6 += mulrdx + carry
  1072. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
  1073. # asm 2: adc <mulrdx=%rdx,<mulr6=%r9
  1074. adc %rdx,%r9
  1075. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1076. mulq CRYPTO_NAMESPACE(38)(%rip)
  1077. # qhasm: carry? mulr6 += mulrax
  1078. # asm 1: add <mulrax=int64#7,<mulr6=int64#6
  1079. # asm 2: add <mulrax=%rax,<mulr6=%r9
  1080. add %rax,%r9
  1081. # qhasm: mulrax = mulr7
  1082. # asm 1: mov <mulr7=int64#8,>mulrax=int64#7
  1083. # asm 2: mov <mulr7=%r10,>mulrax=%rax
  1084. mov %r10,%rax
  1085. # qhasm: mulr7 = 0
  1086. # asm 1: mov $0,>mulr7=int64#8
  1087. # asm 2: mov $0,>mulr7=%r10
  1088. mov $0,%r10
  1089. # qhasm: mulr7 += mulrdx + carry
  1090. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
  1091. # asm 2: adc <mulrdx=%rdx,<mulr7=%r10
  1092. adc %rdx,%r10
  1093. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1094. mulq CRYPTO_NAMESPACE(38)(%rip)
  1095. # qhasm: carry? mulr7 += mulrax
  1096. # asm 1: add <mulrax=int64#7,<mulr7=int64#8
  1097. # asm 2: add <mulrax=%rax,<mulr7=%r10
  1098. add %rax,%r10
  1099. # qhasm: mulr8 = 0
  1100. # asm 1: mov $0,>mulr8=int64#7
  1101. # asm 2: mov $0,>mulr8=%rax
  1102. mov $0,%rax
  1103. # qhasm: mulr8 += mulrdx + carry
  1104. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  1105. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  1106. adc %rdx,%rax
  1107. # qhasm: carry? ry0 += mulr4
  1108. # asm 1: add <mulr4=int64#4,<ry0=int64#10
  1109. # asm 2: add <mulr4=%rcx,<ry0=%r12
  1110. add %rcx,%r12
  1111. # qhasm: carry? ry1 += mulr5 + carry
  1112. # asm 1: adc <mulr5=int64#5,<ry1=int64#11
  1113. # asm 2: adc <mulr5=%r8,<ry1=%r13
  1114. adc %r8,%r13
  1115. # qhasm: carry? ry2 += mulr6 + carry
  1116. # asm 1: adc <mulr6=int64#6,<ry2=int64#12
  1117. # asm 2: adc <mulr6=%r9,<ry2=%r14
  1118. adc %r9,%r14
  1119. # qhasm: carry? ry3 += mulr7 + carry
  1120. # asm 1: adc <mulr7=int64#8,<ry3=int64#13
  1121. # asm 2: adc <mulr7=%r10,<ry3=%r15
  1122. adc %r10,%r15
  1123. # qhasm: mulzero = 0
  1124. # asm 1: mov $0,>mulzero=int64#3
  1125. # asm 2: mov $0,>mulzero=%rdx
  1126. mov $0,%rdx
  1127. # qhasm: mulr8 += mulzero + carry
  1128. # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
  1129. # asm 2: adc <mulzero=%rdx,<mulr8=%rax
  1130. adc %rdx,%rax
  1131. # qhasm: mulr8 *= 38
  1132. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4
  1133. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx
  1134. imulq $38,%rax,%rcx
  1135. # qhasm: carry? ry0 += mulr8
  1136. # asm 1: add <mulr8=int64#4,<ry0=int64#10
  1137. # asm 2: add <mulr8=%rcx,<ry0=%r12
  1138. add %rcx,%r12
  1139. # qhasm: carry? ry1 += mulzero + carry
  1140. # asm 1: adc <mulzero=int64#3,<ry1=int64#11
  1141. # asm 2: adc <mulzero=%rdx,<ry1=%r13
  1142. adc %rdx,%r13
  1143. # qhasm: carry? ry2 += mulzero + carry
  1144. # asm 1: adc <mulzero=int64#3,<ry2=int64#12
  1145. # asm 2: adc <mulzero=%rdx,<ry2=%r14
  1146. adc %rdx,%r14
  1147. # qhasm: carry? ry3 += mulzero + carry
  1148. # asm 1: adc <mulzero=int64#3,<ry3=int64#13
  1149. # asm 2: adc <mulzero=%rdx,<ry3=%r15
  1150. adc %rdx,%r15
  1151. # qhasm: mulzero += mulzero + carry
  1152. # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
  1153. # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
  1154. adc %rdx,%rdx
  1155. # qhasm: mulzero *= 38
  1156. # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
  1157. # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
  1158. imulq $38,%rdx,%rdx
  1159. # qhasm: ry0 += mulzero
  1160. # asm 1: add <mulzero=int64#3,<ry0=int64#10
  1161. # asm 2: add <mulzero=%rdx,<ry0=%r12
  1162. add %rdx,%r12
  1163. # qhasm: *(uint64 *)(rp + 32) = ry0
  1164. # asm 1: movq <ry0=int64#10,32(<rp=int64#1)
  1165. # asm 2: movq <ry0=%r12,32(<rp=%rdi)
  1166. movq %r12,32(%rdi)
  1167. # qhasm: *(uint64 *)(rp + 40) = ry1
  1168. # asm 1: movq <ry1=int64#11,40(<rp=int64#1)
  1169. # asm 2: movq <ry1=%r13,40(<rp=%rdi)
  1170. movq %r13,40(%rdi)
  1171. # qhasm: *(uint64 *)(rp + 48) = ry2
  1172. # asm 1: movq <ry2=int64#12,48(<rp=int64#1)
  1173. # asm 2: movq <ry2=%r14,48(<rp=%rdi)
  1174. movq %r14,48(%rdi)
  1175. # qhasm: *(uint64 *)(rp + 56) = ry3
  1176. # asm 1: movq <ry3=int64#13,56(<rp=int64#1)
  1177. # asm 2: movq <ry3=%r15,56(<rp=%rdi)
  1178. movq %r15,56(%rdi)
  1179. # qhasm: mulr4 = 0
  1180. # asm 1: mov $0,>mulr4=int64#4
  1181. # asm 2: mov $0,>mulr4=%rcx
  1182. mov $0,%rcx
  1183. # qhasm: mulr5 = 0
  1184. # asm 1: mov $0,>mulr5=int64#5
  1185. # asm 2: mov $0,>mulr5=%r8
  1186. mov $0,%r8
  1187. # qhasm: mulr6 = 0
  1188. # asm 1: mov $0,>mulr6=int64#6
  1189. # asm 2: mov $0,>mulr6=%r9
  1190. mov $0,%r9
  1191. # qhasm: mulr7 = 0
  1192. # asm 1: mov $0,>mulr7=int64#8
  1193. # asm 2: mov $0,>mulr7=%r10
  1194. mov $0,%r10
  1195. # qhasm: mulx0 = *(uint64 *)(pp + 32)
  1196. # asm 1: movq 32(<pp=int64#2),>mulx0=int64#9
  1197. # asm 2: movq 32(<pp=%rsi),>mulx0=%r11
  1198. movq 32(%rsi),%r11
  1199. # qhasm: mulrax = *(uint64 *)(pp + 96)
  1200. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  1201. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  1202. movq 96(%rsi),%rax
  1203. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1204. # asm 1: mul <mulx0=int64#9
  1205. # asm 2: mul <mulx0=%r11
  1206. mul %r11
  1207. # qhasm: rz0 = mulrax
  1208. # asm 1: mov <mulrax=int64#7,>rz0=int64#10
  1209. # asm 2: mov <mulrax=%rax,>rz0=%r12
  1210. mov %rax,%r12
  1211. # qhasm: rz1 = mulrdx
  1212. # asm 1: mov <mulrdx=int64#3,>rz1=int64#11
  1213. # asm 2: mov <mulrdx=%rdx,>rz1=%r13
  1214. mov %rdx,%r13
  1215. # qhasm: mulrax = *(uint64 *)(pp + 104)
  1216. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
  1217. # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
  1218. movq 104(%rsi),%rax
  1219. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1220. # asm 1: mul <mulx0=int64#9
  1221. # asm 2: mul <mulx0=%r11
  1222. mul %r11
  1223. # qhasm: carry? rz1 += mulrax
  1224. # asm 1: add <mulrax=int64#7,<rz1=int64#11
  1225. # asm 2: add <mulrax=%rax,<rz1=%r13
  1226. add %rax,%r13
  1227. # qhasm: rz2 = 0
  1228. # asm 1: mov $0,>rz2=int64#12
  1229. # asm 2: mov $0,>rz2=%r14
  1230. mov $0,%r14
  1231. # qhasm: rz2 += mulrdx + carry
  1232. # asm 1: adc <mulrdx=int64#3,<rz2=int64#12
  1233. # asm 2: adc <mulrdx=%rdx,<rz2=%r14
  1234. adc %rdx,%r14
  1235. # qhasm: mulrax = *(uint64 *)(pp + 112)
  1236. # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
  1237. # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
  1238. movq 112(%rsi),%rax
  1239. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1240. # asm 1: mul <mulx0=int64#9
  1241. # asm 2: mul <mulx0=%r11
  1242. mul %r11
  1243. # qhasm: carry? rz2 += mulrax
  1244. # asm 1: add <mulrax=int64#7,<rz2=int64#12
  1245. # asm 2: add <mulrax=%rax,<rz2=%r14
  1246. add %rax,%r14
  1247. # qhasm: rz3 = 0
  1248. # asm 1: mov $0,>rz3=int64#13
  1249. # asm 2: mov $0,>rz3=%r15
  1250. mov $0,%r15
  1251. # qhasm: rz3 += mulrdx + carry
  1252. # asm 1: adc <mulrdx=int64#3,<rz3=int64#13
  1253. # asm 2: adc <mulrdx=%rdx,<rz3=%r15
  1254. adc %rdx,%r15
  1255. # qhasm: mulrax = *(uint64 *)(pp + 120)
  1256. # asm 1: movq 120(<pp=int64#2),>mulrax=int64#7
  1257. # asm 2: movq 120(<pp=%rsi),>mulrax=%rax
  1258. movq 120(%rsi),%rax
  1259. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1260. # asm 1: mul <mulx0=int64#9
  1261. # asm 2: mul <mulx0=%r11
  1262. mul %r11
  1263. # qhasm: carry? rz3 += mulrax
  1264. # asm 1: add <mulrax=int64#7,<rz3=int64#13
  1265. # asm 2: add <mulrax=%rax,<rz3=%r15
  1266. add %rax,%r15
  1267. # qhasm: mulr4 += mulrdx + carry
  1268. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
  1269. # asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
  1270. adc %rdx,%rcx
  1271. # qhasm: mulx1 = *(uint64 *)(pp + 40)
  1272. # asm 1: movq 40(<pp=int64#2),>mulx1=int64#9
  1273. # asm 2: movq 40(<pp=%rsi),>mulx1=%r11
  1274. movq 40(%rsi),%r11
  1275. # qhasm: mulrax = *(uint64 *)(pp + 96)
  1276. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  1277. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  1278. movq 96(%rsi),%rax
  1279. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1280. # asm 1: mul <mulx1=int64#9
  1281. # asm 2: mul <mulx1=%r11
  1282. mul %r11
  1283. # qhasm: carry? rz1 += mulrax
  1284. # asm 1: add <mulrax=int64#7,<rz1=int64#11
  1285. # asm 2: add <mulrax=%rax,<rz1=%r13
  1286. add %rax,%r13
  1287. # qhasm: mulc = 0
  1288. # asm 1: mov $0,>mulc=int64#14
  1289. # asm 2: mov $0,>mulc=%rbx
  1290. mov $0,%rbx
  1291. # qhasm: mulc += mulrdx + carry
  1292. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1293. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1294. adc %rdx,%rbx
  1295. # qhasm: mulrax = *(uint64 *)(pp + 104)
  1296. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
  1297. # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
  1298. movq 104(%rsi),%rax
  1299. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1300. # asm 1: mul <mulx1=int64#9
  1301. # asm 2: mul <mulx1=%r11
  1302. mul %r11
  1303. # qhasm: carry? rz2 += mulrax
  1304. # asm 1: add <mulrax=int64#7,<rz2=int64#12
  1305. # asm 2: add <mulrax=%rax,<rz2=%r14
  1306. add %rax,%r14
  1307. # qhasm: mulrdx += 0 + carry
  1308. # asm 1: adc $0,<mulrdx=int64#3
  1309. # asm 2: adc $0,<mulrdx=%rdx
  1310. adc $0,%rdx
  1311. # qhasm: carry? rz2 += mulc
  1312. # asm 1: add <mulc=int64#14,<rz2=int64#12
  1313. # asm 2: add <mulc=%rbx,<rz2=%r14
  1314. add %rbx,%r14
  1315. # qhasm: mulc = 0
  1316. # asm 1: mov $0,>mulc=int64#14
  1317. # asm 2: mov $0,>mulc=%rbx
  1318. mov $0,%rbx
  1319. # qhasm: mulc += mulrdx + carry
  1320. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1321. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1322. adc %rdx,%rbx
  1323. # qhasm: mulrax = *(uint64 *)(pp + 112)
  1324. # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
  1325. # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
  1326. movq 112(%rsi),%rax
  1327. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1328. # asm 1: mul <mulx1=int64#9
  1329. # asm 2: mul <mulx1=%r11
  1330. mul %r11
  1331. # qhasm: carry? rz3 += mulrax
  1332. # asm 1: add <mulrax=int64#7,<rz3=int64#13
  1333. # asm 2: add <mulrax=%rax,<rz3=%r15
  1334. add %rax,%r15
  1335. # qhasm: mulrdx += 0 + carry
  1336. # asm 1: adc $0,<mulrdx=int64#3
  1337. # asm 2: adc $0,<mulrdx=%rdx
  1338. adc $0,%rdx
  1339. # qhasm: carry? rz3 += mulc
  1340. # asm 1: add <mulc=int64#14,<rz3=int64#13
  1341. # asm 2: add <mulc=%rbx,<rz3=%r15
  1342. add %rbx,%r15
  1343. # qhasm: mulc = 0
  1344. # asm 1: mov $0,>mulc=int64#14
  1345. # asm 2: mov $0,>mulc=%rbx
  1346. mov $0,%rbx
  1347. # qhasm: mulc += mulrdx + carry
  1348. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1349. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1350. adc %rdx,%rbx
  1351. # qhasm: mulrax = *(uint64 *)(pp + 120)
  1352. # asm 1: movq 120(<pp=int64#2),>mulrax=int64#7
  1353. # asm 2: movq 120(<pp=%rsi),>mulrax=%rax
  1354. movq 120(%rsi),%rax
  1355. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1356. # asm 1: mul <mulx1=int64#9
  1357. # asm 2: mul <mulx1=%r11
  1358. mul %r11
  1359. # qhasm: carry? mulr4 += mulrax
  1360. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  1361. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  1362. add %rax,%rcx
  1363. # qhasm: mulrdx += 0 + carry
  1364. # asm 1: adc $0,<mulrdx=int64#3
  1365. # asm 2: adc $0,<mulrdx=%rdx
  1366. adc $0,%rdx
  1367. # qhasm: carry? mulr4 += mulc
  1368. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  1369. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  1370. add %rbx,%rcx
  1371. # qhasm: mulr5 += mulrdx + carry
  1372. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
  1373. # asm 2: adc <mulrdx=%rdx,<mulr5=%r8
  1374. adc %rdx,%r8
  1375. # qhasm: mulx2 = *(uint64 *)(pp + 48)
  1376. # asm 1: movq 48(<pp=int64#2),>mulx2=int64#9
  1377. # asm 2: movq 48(<pp=%rsi),>mulx2=%r11
  1378. movq 48(%rsi),%r11
  1379. # qhasm: mulrax = *(uint64 *)(pp + 96)
  1380. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  1381. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  1382. movq 96(%rsi),%rax
  1383. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1384. # asm 1: mul <mulx2=int64#9
  1385. # asm 2: mul <mulx2=%r11
  1386. mul %r11
  1387. # qhasm: carry? rz2 += mulrax
  1388. # asm 1: add <mulrax=int64#7,<rz2=int64#12
  1389. # asm 2: add <mulrax=%rax,<rz2=%r14
  1390. add %rax,%r14
  1391. # qhasm: mulc = 0
  1392. # asm 1: mov $0,>mulc=int64#14
  1393. # asm 2: mov $0,>mulc=%rbx
  1394. mov $0,%rbx
  1395. # qhasm: mulc += mulrdx + carry
  1396. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1397. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1398. adc %rdx,%rbx
  1399. # qhasm: mulrax = *(uint64 *)(pp + 104)
  1400. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
  1401. # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
  1402. movq 104(%rsi),%rax
  1403. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1404. # asm 1: mul <mulx2=int64#9
  1405. # asm 2: mul <mulx2=%r11
  1406. mul %r11
  1407. # qhasm: carry? rz3 += mulrax
  1408. # asm 1: add <mulrax=int64#7,<rz3=int64#13
  1409. # asm 2: add <mulrax=%rax,<rz3=%r15
  1410. add %rax,%r15
  1411. # qhasm: mulrdx += 0 + carry
  1412. # asm 1: adc $0,<mulrdx=int64#3
  1413. # asm 2: adc $0,<mulrdx=%rdx
  1414. adc $0,%rdx
  1415. # qhasm: carry? rz3 += mulc
  1416. # asm 1: add <mulc=int64#14,<rz3=int64#13
  1417. # asm 2: add <mulc=%rbx,<rz3=%r15
  1418. add %rbx,%r15
  1419. # qhasm: mulc = 0
  1420. # asm 1: mov $0,>mulc=int64#14
  1421. # asm 2: mov $0,>mulc=%rbx
  1422. mov $0,%rbx
  1423. # qhasm: mulc += mulrdx + carry
  1424. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1425. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1426. adc %rdx,%rbx
  1427. # qhasm: mulrax = *(uint64 *)(pp + 112)
  1428. # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
  1429. # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
  1430. movq 112(%rsi),%rax
  1431. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1432. # asm 1: mul <mulx2=int64#9
  1433. # asm 2: mul <mulx2=%r11
  1434. mul %r11
  1435. # qhasm: carry? mulr4 += mulrax
  1436. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  1437. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  1438. add %rax,%rcx
  1439. # qhasm: mulrdx += 0 + carry
  1440. # asm 1: adc $0,<mulrdx=int64#3
  1441. # asm 2: adc $0,<mulrdx=%rdx
  1442. adc $0,%rdx
  1443. # qhasm: carry? mulr4 += mulc
  1444. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  1445. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  1446. add %rbx,%rcx
  1447. # qhasm: mulc = 0
  1448. # asm 1: mov $0,>mulc=int64#14
  1449. # asm 2: mov $0,>mulc=%rbx
  1450. mov $0,%rbx
  1451. # qhasm: mulc += mulrdx + carry
  1452. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1453. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1454. adc %rdx,%rbx
  1455. # qhasm: mulrax = *(uint64 *)(pp + 120)
  1456. # asm 1: movq 120(<pp=int64#2),>mulrax=int64#7
  1457. # asm 2: movq 120(<pp=%rsi),>mulrax=%rax
  1458. movq 120(%rsi),%rax
  1459. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1460. # asm 1: mul <mulx2=int64#9
  1461. # asm 2: mul <mulx2=%r11
  1462. mul %r11
  1463. # qhasm: carry? mulr5 += mulrax
  1464. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  1465. # asm 2: add <mulrax=%rax,<mulr5=%r8
  1466. add %rax,%r8
  1467. # qhasm: mulrdx += 0 + carry
  1468. # asm 1: adc $0,<mulrdx=int64#3
  1469. # asm 2: adc $0,<mulrdx=%rdx
  1470. adc $0,%rdx
  1471. # qhasm: carry? mulr5 += mulc
  1472. # asm 1: add <mulc=int64#14,<mulr5=int64#5
  1473. # asm 2: add <mulc=%rbx,<mulr5=%r8
  1474. add %rbx,%r8
  1475. # qhasm: mulr6 += mulrdx + carry
  1476. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
  1477. # asm 2: adc <mulrdx=%rdx,<mulr6=%r9
  1478. adc %rdx,%r9
  1479. # qhasm: mulx3 = *(uint64 *)(pp + 56)
  1480. # asm 1: movq 56(<pp=int64#2),>mulx3=int64#9
  1481. # asm 2: movq 56(<pp=%rsi),>mulx3=%r11
  1482. movq 56(%rsi),%r11
  1483. # qhasm: mulrax = *(uint64 *)(pp + 96)
  1484. # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
  1485. # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
  1486. movq 96(%rsi),%rax
  1487. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1488. # asm 1: mul <mulx3=int64#9
  1489. # asm 2: mul <mulx3=%r11
  1490. mul %r11
  1491. # qhasm: carry? rz3 += mulrax
  1492. # asm 1: add <mulrax=int64#7,<rz3=int64#13
  1493. # asm 2: add <mulrax=%rax,<rz3=%r15
  1494. add %rax,%r15
  1495. # qhasm: mulc = 0
  1496. # asm 1: mov $0,>mulc=int64#14
  1497. # asm 2: mov $0,>mulc=%rbx
  1498. mov $0,%rbx
  1499. # qhasm: mulc += mulrdx + carry
  1500. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1501. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1502. adc %rdx,%rbx
  1503. # qhasm: mulrax = *(uint64 *)(pp + 104)
  1504. # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
  1505. # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
  1506. movq 104(%rsi),%rax
  1507. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1508. # asm 1: mul <mulx3=int64#9
  1509. # asm 2: mul <mulx3=%r11
  1510. mul %r11
  1511. # qhasm: carry? mulr4 += mulrax
  1512. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  1513. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  1514. add %rax,%rcx
  1515. # qhasm: mulrdx += 0 + carry
  1516. # asm 1: adc $0,<mulrdx=int64#3
  1517. # asm 2: adc $0,<mulrdx=%rdx
  1518. adc $0,%rdx
  1519. # qhasm: carry? mulr4 += mulc
  1520. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  1521. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  1522. add %rbx,%rcx
  1523. # qhasm: mulc = 0
  1524. # asm 1: mov $0,>mulc=int64#14
  1525. # asm 2: mov $0,>mulc=%rbx
  1526. mov $0,%rbx
  1527. # qhasm: mulc += mulrdx + carry
  1528. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1529. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1530. adc %rdx,%rbx
  1531. # qhasm: mulrax = *(uint64 *)(pp + 112)
  1532. # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
  1533. # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
  1534. movq 112(%rsi),%rax
  1535. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1536. # asm 1: mul <mulx3=int64#9
  1537. # asm 2: mul <mulx3=%r11
  1538. mul %r11
  1539. # qhasm: carry? mulr5 += mulrax
  1540. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  1541. # asm 2: add <mulrax=%rax,<mulr5=%r8
  1542. add %rax,%r8
  1543. # qhasm: mulrdx += 0 + carry
  1544. # asm 1: adc $0,<mulrdx=int64#3
  1545. # asm 2: adc $0,<mulrdx=%rdx
  1546. adc $0,%rdx
  1547. # qhasm: carry? mulr5 += mulc
  1548. # asm 1: add <mulc=int64#14,<mulr5=int64#5
  1549. # asm 2: add <mulc=%rbx,<mulr5=%r8
  1550. add %rbx,%r8
  1551. # qhasm: mulc = 0
  1552. # asm 1: mov $0,>mulc=int64#14
  1553. # asm 2: mov $0,>mulc=%rbx
  1554. mov $0,%rbx
  1555. # qhasm: mulc += mulrdx + carry
  1556. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1557. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1558. adc %rdx,%rbx
  1559. # qhasm: mulrax = *(uint64 *)(pp + 120)
  1560. # asm 1: movq 120(<pp=int64#2),>mulrax=int64#7
  1561. # asm 2: movq 120(<pp=%rsi),>mulrax=%rax
  1562. movq 120(%rsi),%rax
  1563. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1564. # asm 1: mul <mulx3=int64#9
  1565. # asm 2: mul <mulx3=%r11
  1566. mul %r11
  1567. # qhasm: carry? mulr6 += mulrax
  1568. # asm 1: add <mulrax=int64#7,<mulr6=int64#6
  1569. # asm 2: add <mulrax=%rax,<mulr6=%r9
  1570. add %rax,%r9
  1571. # qhasm: mulrdx += 0 + carry
  1572. # asm 1: adc $0,<mulrdx=int64#3
  1573. # asm 2: adc $0,<mulrdx=%rdx
  1574. adc $0,%rdx
  1575. # qhasm: carry? mulr6 += mulc
  1576. # asm 1: add <mulc=int64#14,<mulr6=int64#6
  1577. # asm 2: add <mulc=%rbx,<mulr6=%r9
  1578. add %rbx,%r9
  1579. # qhasm: mulr7 += mulrdx + carry
  1580. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
  1581. # asm 2: adc <mulrdx=%rdx,<mulr7=%r10
  1582. adc %rdx,%r10
  1583. # qhasm: mulrax = mulr4
  1584. # asm 1: mov <mulr4=int64#4,>mulrax=int64#7
  1585. # asm 2: mov <mulr4=%rcx,>mulrax=%rax
  1586. mov %rcx,%rax
  1587. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1588. mulq CRYPTO_NAMESPACE(38)(%rip)
  1589. # qhasm: mulr4 = mulrax
  1590. # asm 1: mov <mulrax=int64#7,>mulr4=int64#2
  1591. # asm 2: mov <mulrax=%rax,>mulr4=%rsi
  1592. mov %rax,%rsi
  1593. # qhasm: mulrax = mulr5
  1594. # asm 1: mov <mulr5=int64#5,>mulrax=int64#7
  1595. # asm 2: mov <mulr5=%r8,>mulrax=%rax
  1596. mov %r8,%rax
  1597. # qhasm: mulr5 = mulrdx
  1598. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#4
  1599. # asm 2: mov <mulrdx=%rdx,>mulr5=%rcx
  1600. mov %rdx,%rcx
  1601. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1602. mulq CRYPTO_NAMESPACE(38)(%rip)
  1603. # qhasm: carry? mulr5 += mulrax
  1604. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  1605. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  1606. add %rax,%rcx
  1607. # qhasm: mulrax = mulr6
  1608. # asm 1: mov <mulr6=int64#6,>mulrax=int64#7
  1609. # asm 2: mov <mulr6=%r9,>mulrax=%rax
  1610. mov %r9,%rax
  1611. # qhasm: mulr6 = 0
  1612. # asm 1: mov $0,>mulr6=int64#5
  1613. # asm 2: mov $0,>mulr6=%r8
  1614. mov $0,%r8
  1615. # qhasm: mulr6 += mulrdx + carry
  1616. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
  1617. # asm 2: adc <mulrdx=%rdx,<mulr6=%r8
  1618. adc %rdx,%r8
  1619. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1620. mulq CRYPTO_NAMESPACE(38)(%rip)
  1621. # qhasm: carry? mulr6 += mulrax
  1622. # asm 1: add <mulrax=int64#7,<mulr6=int64#5
  1623. # asm 2: add <mulrax=%rax,<mulr6=%r8
  1624. add %rax,%r8
  1625. # qhasm: mulrax = mulr7
  1626. # asm 1: mov <mulr7=int64#8,>mulrax=int64#7
  1627. # asm 2: mov <mulr7=%r10,>mulrax=%rax
  1628. mov %r10,%rax
  1629. # qhasm: mulr7 = 0
  1630. # asm 1: mov $0,>mulr7=int64#6
  1631. # asm 2: mov $0,>mulr7=%r9
  1632. mov $0,%r9
  1633. # qhasm: mulr7 += mulrdx + carry
  1634. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
  1635. # asm 2: adc <mulrdx=%rdx,<mulr7=%r9
  1636. adc %rdx,%r9
  1637. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1638. mulq CRYPTO_NAMESPACE(38)(%rip)
  1639. # qhasm: carry? mulr7 += mulrax
  1640. # asm 1: add <mulrax=int64#7,<mulr7=int64#6
  1641. # asm 2: add <mulrax=%rax,<mulr7=%r9
  1642. add %rax,%r9
  1643. # qhasm: mulr8 = 0
  1644. # asm 1: mov $0,>mulr8=int64#7
  1645. # asm 2: mov $0,>mulr8=%rax
  1646. mov $0,%rax
  1647. # qhasm: mulr8 += mulrdx + carry
  1648. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  1649. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  1650. adc %rdx,%rax
  1651. # qhasm: carry? rz0 += mulr4
  1652. # asm 1: add <mulr4=int64#2,<rz0=int64#10
  1653. # asm 2: add <mulr4=%rsi,<rz0=%r12
  1654. add %rsi,%r12
  1655. # qhasm: carry? rz1 += mulr5 + carry
  1656. # asm 1: adc <mulr5=int64#4,<rz1=int64#11
  1657. # asm 2: adc <mulr5=%rcx,<rz1=%r13
  1658. adc %rcx,%r13
  1659. # qhasm: carry? rz2 += mulr6 + carry
  1660. # asm 1: adc <mulr6=int64#5,<rz2=int64#12
  1661. # asm 2: adc <mulr6=%r8,<rz2=%r14
  1662. adc %r8,%r14
  1663. # qhasm: carry? rz3 += mulr7 + carry
  1664. # asm 1: adc <mulr7=int64#6,<rz3=int64#13
  1665. # asm 2: adc <mulr7=%r9,<rz3=%r15
  1666. adc %r9,%r15
  1667. # qhasm: mulzero = 0
  1668. # asm 1: mov $0,>mulzero=int64#2
  1669. # asm 2: mov $0,>mulzero=%rsi
  1670. mov $0,%rsi
  1671. # qhasm: mulr8 += mulzero + carry
  1672. # asm 1: adc <mulzero=int64#2,<mulr8=int64#7
  1673. # asm 2: adc <mulzero=%rsi,<mulr8=%rax
  1674. adc %rsi,%rax
  1675. # qhasm: mulr8 *= 38
  1676. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3
  1677. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx
  1678. imulq $38,%rax,%rdx
  1679. # qhasm: carry? rz0 += mulr8
  1680. # asm 1: add <mulr8=int64#3,<rz0=int64#10
  1681. # asm 2: add <mulr8=%rdx,<rz0=%r12
  1682. add %rdx,%r12
  1683. # qhasm: carry? rz1 += mulzero + carry
  1684. # asm 1: adc <mulzero=int64#2,<rz1=int64#11
  1685. # asm 2: adc <mulzero=%rsi,<rz1=%r13
  1686. adc %rsi,%r13
  1687. # qhasm: carry? rz2 += mulzero + carry
  1688. # asm 1: adc <mulzero=int64#2,<rz2=int64#12
  1689. # asm 2: adc <mulzero=%rsi,<rz2=%r14
  1690. adc %rsi,%r14
  1691. # qhasm: carry? rz3 += mulzero + carry
  1692. # asm 1: adc <mulzero=int64#2,<rz3=int64#13
  1693. # asm 2: adc <mulzero=%rsi,<rz3=%r15
  1694. adc %rsi,%r15
  1695. # qhasm: mulzero += mulzero + carry
  1696. # asm 1: adc <mulzero=int64#2,<mulzero=int64#2
  1697. # asm 2: adc <mulzero=%rsi,<mulzero=%rsi
  1698. adc %rsi,%rsi
  1699. # qhasm: mulzero *= 38
  1700. # asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2
  1701. # asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi
  1702. imulq $38,%rsi,%rsi
  1703. # qhasm: rz0 += mulzero
  1704. # asm 1: add <mulzero=int64#2,<rz0=int64#10
  1705. # asm 2: add <mulzero=%rsi,<rz0=%r12
  1706. add %rsi,%r12
  1707. # qhasm: *(uint64 *)(rp + 64) = rz0
  1708. # asm 1: movq <rz0=int64#10,64(<rp=int64#1)
  1709. # asm 2: movq <rz0=%r12,64(<rp=%rdi)
  1710. movq %r12,64(%rdi)
  1711. # qhasm: *(uint64 *)(rp + 72) = rz1
  1712. # asm 1: movq <rz1=int64#11,72(<rp=int64#1)
  1713. # asm 2: movq <rz1=%r13,72(<rp=%rdi)
  1714. movq %r13,72(%rdi)
  1715. # qhasm: *(uint64 *)(rp + 80) = rz2
  1716. # asm 1: movq <rz2=int64#12,80(<rp=int64#1)
  1717. # asm 2: movq <rz2=%r14,80(<rp=%rdi)
  1718. movq %r14,80(%rdi)
  1719. # qhasm: *(uint64 *)(rp + 88) = rz3
  1720. # asm 1: movq <rz3=int64#13,88(<rp=int64#1)
  1721. # asm 2: movq <rz3=%r15,88(<rp=%rdi)
  1722. movq %r15,88(%rdi)
  1723. # qhasm: caller1 = caller1_stack
  1724. # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
  1725. # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
  1726. movq 0(%rsp),%r11
  1727. # qhasm: caller2 = caller2_stack
  1728. # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
  1729. # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
  1730. movq 8(%rsp),%r12
  1731. # qhasm: caller3 = caller3_stack
  1732. # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
  1733. # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
  1734. movq 16(%rsp),%r13
  1735. # qhasm: caller4 = caller4_stack
  1736. # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
  1737. # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
  1738. movq 24(%rsp),%r14
  1739. # qhasm: caller5 = caller5_stack
  1740. # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
  1741. # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
  1742. movq 32(%rsp),%r15
  1743. # qhasm: caller6 = caller6_stack
  1744. # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
  1745. # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
  1746. movq 40(%rsp),%rbx
  1747. # qhasm: caller7 = caller7_stack
  1748. # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
  1749. # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
  1750. movq 48(%rsp),%rbp
  1751. # qhasm: leave
  1752. add %r11,%rsp
  1753. mov %rdi,%rax
  1754. mov %rsi,%rdx
  1755. ret