ge25519_nielsadd2.S 139 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791
  1. # qhasm: int64 rp
  2. # qhasm: int64 qp
  3. # qhasm: input rp
  4. # qhasm: input qp
  5. # qhasm: int64 caller1
  6. # qhasm: int64 caller2
  7. # qhasm: int64 caller3
  8. # qhasm: int64 caller4
  9. # qhasm: int64 caller5
  10. # qhasm: int64 caller6
  11. # qhasm: int64 caller7
  12. # qhasm: caller caller1
  13. # qhasm: caller caller2
  14. # qhasm: caller caller3
  15. # qhasm: caller caller4
  16. # qhasm: caller caller5
  17. # qhasm: caller caller6
  18. # qhasm: caller caller7
  19. # qhasm: stack64 caller1_stack
  20. # qhasm: stack64 caller2_stack
  21. # qhasm: stack64 caller3_stack
  22. # qhasm: stack64 caller4_stack
  23. # qhasm: stack64 caller5_stack
  24. # qhasm: stack64 caller6_stack
  25. # qhasm: stack64 caller7_stack
  26. # qhasm: int64 a0
  27. # qhasm: int64 a1
  28. # qhasm: int64 a2
  29. # qhasm: int64 a3
  30. # qhasm: stack64 a0_stack
  31. # qhasm: stack64 a1_stack
  32. # qhasm: stack64 a2_stack
  33. # qhasm: stack64 a3_stack
  34. # qhasm: int64 b0
  35. # qhasm: int64 b1
  36. # qhasm: int64 b2
  37. # qhasm: int64 b3
  38. # qhasm: stack64 b0_stack
  39. # qhasm: stack64 b1_stack
  40. # qhasm: stack64 b2_stack
  41. # qhasm: stack64 b3_stack
  42. # qhasm: int64 c0
  43. # qhasm: int64 c1
  44. # qhasm: int64 c2
  45. # qhasm: int64 c3
  46. # qhasm: stack64 c0_stack
  47. # qhasm: stack64 c1_stack
  48. # qhasm: stack64 c2_stack
  49. # qhasm: stack64 c3_stack
  50. # qhasm: int64 d0
  51. # qhasm: int64 d1
  52. # qhasm: int64 d2
  53. # qhasm: int64 d3
  54. # qhasm: stack64 d0_stack
  55. # qhasm: stack64 d1_stack
  56. # qhasm: stack64 d2_stack
  57. # qhasm: stack64 d3_stack
  58. # qhasm: int64 e0
  59. # qhasm: int64 e1
  60. # qhasm: int64 e2
  61. # qhasm: int64 e3
  62. # qhasm: stack64 e0_stack
  63. # qhasm: stack64 e1_stack
  64. # qhasm: stack64 e2_stack
  65. # qhasm: stack64 e3_stack
  66. # qhasm: int64 f0
  67. # qhasm: int64 f1
  68. # qhasm: int64 f2
  69. # qhasm: int64 f3
  70. # qhasm: stack64 f0_stack
  71. # qhasm: stack64 f1_stack
  72. # qhasm: stack64 f2_stack
  73. # qhasm: stack64 f3_stack
  74. # qhasm: int64 g0
  75. # qhasm: int64 g1
  76. # qhasm: int64 g2
  77. # qhasm: int64 g3
  78. # qhasm: stack64 g0_stack
  79. # qhasm: stack64 g1_stack
  80. # qhasm: stack64 g2_stack
  81. # qhasm: stack64 g3_stack
  82. # qhasm: int64 h0
  83. # qhasm: int64 h1
  84. # qhasm: int64 h2
  85. # qhasm: int64 h3
  86. # qhasm: stack64 h0_stack
  87. # qhasm: stack64 h1_stack
  88. # qhasm: stack64 h2_stack
  89. # qhasm: stack64 h3_stack
  90. # qhasm: int64 qt0
  91. # qhasm: int64 qt1
  92. # qhasm: int64 qt2
  93. # qhasm: int64 qt3
  94. # qhasm: stack64 qt0_stack
  95. # qhasm: stack64 qt1_stack
  96. # qhasm: stack64 qt2_stack
  97. # qhasm: stack64 qt3_stack
  98. # qhasm: int64 t10
  99. # qhasm: int64 t11
  100. # qhasm: int64 t12
  101. # qhasm: int64 t13
  102. # qhasm: stack64 t10_stack
  103. # qhasm: stack64 t11_stack
  104. # qhasm: stack64 t12_stack
  105. # qhasm: stack64 t13_stack
  106. # qhasm: int64 t20
  107. # qhasm: int64 t21
  108. # qhasm: int64 t22
  109. # qhasm: int64 t23
  110. # qhasm: stack64 t20_stack
  111. # qhasm: stack64 t21_stack
  112. # qhasm: stack64 t22_stack
  113. # qhasm: stack64 t23_stack
  114. # qhasm: int64 rx0
  115. # qhasm: int64 rx1
  116. # qhasm: int64 rx2
  117. # qhasm: int64 rx3
  118. # qhasm: int64 ry0
  119. # qhasm: int64 ry1
  120. # qhasm: int64 ry2
  121. # qhasm: int64 ry3
  122. # qhasm: int64 rz0
  123. # qhasm: int64 rz1
  124. # qhasm: int64 rz2
  125. # qhasm: int64 rz3
  126. # qhasm: int64 rt0
  127. # qhasm: int64 rt1
  128. # qhasm: int64 rt2
  129. # qhasm: int64 rt3
  130. # qhasm: int64 mulr4
  131. # qhasm: int64 mulr5
  132. # qhasm: int64 mulr6
  133. # qhasm: int64 mulr7
  134. # qhasm: int64 mulr8
  135. # qhasm: int64 mulrax
  136. # qhasm: int64 mulrdx
  137. # qhasm: int64 mulx0
  138. # qhasm: int64 mulx1
  139. # qhasm: int64 mulx2
  140. # qhasm: int64 mulx3
  141. # qhasm: int64 mulc
  142. # qhasm: int64 mulzero
  143. # qhasm: int64 muli38
  144. # qhasm: int64 addt0
  145. # qhasm: int64 addt1
  146. # qhasm: int64 subt0
  147. # qhasm: int64 subt1
  148. # qhasm: enter CRYPTO_NAMESPACE(ge25519_nielsadd2)
  149. .text
  150. .p2align 5
  151. .globl _CRYPTO_NAMESPACE(ge25519_nielsadd2)
  152. .globl CRYPTO_NAMESPACE(ge25519_nielsadd2)
  153. _CRYPTO_NAMESPACE(ge25519_nielsadd2):
  154. CRYPTO_NAMESPACE(ge25519_nielsadd2):
  155. mov %rsp,%r11
  156. and $31,%r11
  157. add $192,%r11
  158. sub %r11,%rsp
  159. # qhasm: caller1_stack = caller1
  160. # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
  161. # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
  162. movq %r11,0(%rsp)
  163. # qhasm: caller2_stack = caller2
  164. # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
  165. # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
  166. movq %r12,8(%rsp)
  167. # qhasm: caller3_stack = caller3
  168. # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
  169. # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
  170. movq %r13,16(%rsp)
  171. # qhasm: caller4_stack = caller4
  172. # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
  173. # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
  174. movq %r14,24(%rsp)
  175. # qhasm: caller5_stack = caller5
  176. # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
  177. # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
  178. movq %r15,32(%rsp)
  179. # qhasm: caller6_stack = caller6
  180. # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
  181. # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
  182. movq %rbx,40(%rsp)
  183. # qhasm: caller7_stack = caller7
  184. # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
  185. # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
  186. movq %rbp,48(%rsp)
  187. # qhasm: a0 = *(uint64 *)(rp + 32)
  188. # asm 1: movq 32(<rp=int64#1),>a0=int64#3
  189. # asm 2: movq 32(<rp=%rdi),>a0=%rdx
  190. movq 32(%rdi),%rdx
  191. # qhasm: a1 = *(uint64 *)(rp + 40)
  192. # asm 1: movq 40(<rp=int64#1),>a1=int64#4
  193. # asm 2: movq 40(<rp=%rdi),>a1=%rcx
  194. movq 40(%rdi),%rcx
  195. # qhasm: a2 = *(uint64 *)(rp + 48)
  196. # asm 1: movq 48(<rp=int64#1),>a2=int64#5
  197. # asm 2: movq 48(<rp=%rdi),>a2=%r8
  198. movq 48(%rdi),%r8
  199. # qhasm: a3 = *(uint64 *)(rp + 56)
  200. # asm 1: movq 56(<rp=int64#1),>a3=int64#6
  201. # asm 2: movq 56(<rp=%rdi),>a3=%r9
  202. movq 56(%rdi),%r9
  203. # qhasm: b0 = a0
  204. # asm 1: mov <a0=int64#3,>b0=int64#7
  205. # asm 2: mov <a0=%rdx,>b0=%rax
  206. mov %rdx,%rax
  207. # qhasm: b1 = a1
  208. # asm 1: mov <a1=int64#4,>b1=int64#8
  209. # asm 2: mov <a1=%rcx,>b1=%r10
  210. mov %rcx,%r10
  211. # qhasm: b2 = a2
  212. # asm 1: mov <a2=int64#5,>b2=int64#9
  213. # asm 2: mov <a2=%r8,>b2=%r11
  214. mov %r8,%r11
  215. # qhasm: b3 = a3
  216. # asm 1: mov <a3=int64#6,>b3=int64#10
  217. # asm 2: mov <a3=%r9,>b3=%r12
  218. mov %r9,%r12
  219. # qhasm: carry? a0 -= *(uint64 *) (rp + 0)
  220. # asm 1: subq 0(<rp=int64#1),<a0=int64#3
  221. # asm 2: subq 0(<rp=%rdi),<a0=%rdx
  222. subq 0(%rdi),%rdx
  223. # qhasm: carry? a1 -= *(uint64 *) (rp + 8) - carry
  224. # asm 1: sbbq 8(<rp=int64#1),<a1=int64#4
  225. # asm 2: sbbq 8(<rp=%rdi),<a1=%rcx
  226. sbbq 8(%rdi),%rcx
  227. # qhasm: carry? a2 -= *(uint64 *) (rp + 16) - carry
  228. # asm 1: sbbq 16(<rp=int64#1),<a2=int64#5
  229. # asm 2: sbbq 16(<rp=%rdi),<a2=%r8
  230. sbbq 16(%rdi),%r8
  231. # qhasm: carry? a3 -= *(uint64 *) (rp + 24) - carry
  232. # asm 1: sbbq 24(<rp=int64#1),<a3=int64#6
  233. # asm 2: sbbq 24(<rp=%rdi),<a3=%r9
  234. sbbq 24(%rdi),%r9
  235. # qhasm: subt0 = 0
  236. # asm 1: mov $0,>subt0=int64#11
  237. # asm 2: mov $0,>subt0=%r13
  238. mov $0,%r13
  239. # qhasm: subt1 = 38
  240. # asm 1: mov $38,>subt1=int64#12
  241. # asm 2: mov $38,>subt1=%r14
  242. mov $38,%r14
  243. # qhasm: subt1 = subt0 if !carry
  244. # asm 1: cmovae <subt0=int64#11,<subt1=int64#12
  245. # asm 2: cmovae <subt0=%r13,<subt1=%r14
  246. cmovae %r13,%r14
  247. # qhasm: carry? a0 -= subt1
  248. # asm 1: sub <subt1=int64#12,<a0=int64#3
  249. # asm 2: sub <subt1=%r14,<a0=%rdx
  250. sub %r14,%rdx
  251. # qhasm: carry? a1 -= subt0 - carry
  252. # asm 1: sbb <subt0=int64#11,<a1=int64#4
  253. # asm 2: sbb <subt0=%r13,<a1=%rcx
  254. sbb %r13,%rcx
  255. # qhasm: carry? a2 -= subt0 - carry
  256. # asm 1: sbb <subt0=int64#11,<a2=int64#5
  257. # asm 2: sbb <subt0=%r13,<a2=%r8
  258. sbb %r13,%r8
  259. # qhasm: carry? a3 -= subt0 - carry
  260. # asm 1: sbb <subt0=int64#11,<a3=int64#6
  261. # asm 2: sbb <subt0=%r13,<a3=%r9
  262. sbb %r13,%r9
  263. # qhasm: subt0 = subt1 if carry
  264. # asm 1: cmovc <subt1=int64#12,<subt0=int64#11
  265. # asm 2: cmovc <subt1=%r14,<subt0=%r13
  266. cmovc %r14,%r13
  267. # qhasm: a0 -= subt0
  268. # asm 1: sub <subt0=int64#11,<a0=int64#3
  269. # asm 2: sub <subt0=%r13,<a0=%rdx
  270. sub %r13,%rdx
  271. # qhasm: carry? b0 += *(uint64 *) (rp + 0)
  272. # asm 1: addq 0(<rp=int64#1),<b0=int64#7
  273. # asm 2: addq 0(<rp=%rdi),<b0=%rax
  274. addq 0(%rdi),%rax
  275. # qhasm: carry? b1 += *(uint64 *) (rp + 8) + carry
  276. # asm 1: adcq 8(<rp=int64#1),<b1=int64#8
  277. # asm 2: adcq 8(<rp=%rdi),<b1=%r10
  278. adcq 8(%rdi),%r10
  279. # qhasm: carry? b2 += *(uint64 *) (rp + 16) + carry
  280. # asm 1: adcq 16(<rp=int64#1),<b2=int64#9
  281. # asm 2: adcq 16(<rp=%rdi),<b2=%r11
  282. adcq 16(%rdi),%r11
  283. # qhasm: carry? b3 += *(uint64 *) (rp + 24) + carry
  284. # asm 1: adcq 24(<rp=int64#1),<b3=int64#10
  285. # asm 2: adcq 24(<rp=%rdi),<b3=%r12
  286. adcq 24(%rdi),%r12
  287. # qhasm: addt0 = 0
  288. # asm 1: mov $0,>addt0=int64#11
  289. # asm 2: mov $0,>addt0=%r13
  290. mov $0,%r13
  291. # qhasm: addt1 = 38
  292. # asm 1: mov $38,>addt1=int64#12
  293. # asm 2: mov $38,>addt1=%r14
  294. mov $38,%r14
  295. # qhasm: addt1 = addt0 if !carry
  296. # asm 1: cmovae <addt0=int64#11,<addt1=int64#12
  297. # asm 2: cmovae <addt0=%r13,<addt1=%r14
  298. cmovae %r13,%r14
  299. # qhasm: carry? b0 += addt1
  300. # asm 1: add <addt1=int64#12,<b0=int64#7
  301. # asm 2: add <addt1=%r14,<b0=%rax
  302. add %r14,%rax
  303. # qhasm: carry? b1 += addt0 + carry
  304. # asm 1: adc <addt0=int64#11,<b1=int64#8
  305. # asm 2: adc <addt0=%r13,<b1=%r10
  306. adc %r13,%r10
  307. # qhasm: carry? b2 += addt0 + carry
  308. # asm 1: adc <addt0=int64#11,<b2=int64#9
  309. # asm 2: adc <addt0=%r13,<b2=%r11
  310. adc %r13,%r11
  311. # qhasm: carry? b3 += addt0 + carry
  312. # asm 1: adc <addt0=int64#11,<b3=int64#10
  313. # asm 2: adc <addt0=%r13,<b3=%r12
  314. adc %r13,%r12
  315. # qhasm: addt0 = addt1 if carry
  316. # asm 1: cmovc <addt1=int64#12,<addt0=int64#11
  317. # asm 2: cmovc <addt1=%r14,<addt0=%r13
  318. cmovc %r14,%r13
  319. # qhasm: b0 += addt0
  320. # asm 1: add <addt0=int64#11,<b0=int64#7
  321. # asm 2: add <addt0=%r13,<b0=%rax
  322. add %r13,%rax
  323. # qhasm: a0_stack = a0
  324. # asm 1: movq <a0=int64#3,>a0_stack=stack64#8
  325. # asm 2: movq <a0=%rdx,>a0_stack=56(%rsp)
  326. movq %rdx,56(%rsp)
  327. # qhasm: a1_stack = a1
  328. # asm 1: movq <a1=int64#4,>a1_stack=stack64#9
  329. # asm 2: movq <a1=%rcx,>a1_stack=64(%rsp)
  330. movq %rcx,64(%rsp)
  331. # qhasm: a2_stack = a2
  332. # asm 1: movq <a2=int64#5,>a2_stack=stack64#10
  333. # asm 2: movq <a2=%r8,>a2_stack=72(%rsp)
  334. movq %r8,72(%rsp)
  335. # qhasm: a3_stack = a3
  336. # asm 1: movq <a3=int64#6,>a3_stack=stack64#11
  337. # asm 2: movq <a3=%r9,>a3_stack=80(%rsp)
  338. movq %r9,80(%rsp)
  339. # qhasm: b0_stack = b0
  340. # asm 1: movq <b0=int64#7,>b0_stack=stack64#12
  341. # asm 2: movq <b0=%rax,>b0_stack=88(%rsp)
  342. movq %rax,88(%rsp)
  343. # qhasm: b1_stack = b1
  344. # asm 1: movq <b1=int64#8,>b1_stack=stack64#13
  345. # asm 2: movq <b1=%r10,>b1_stack=96(%rsp)
  346. movq %r10,96(%rsp)
  347. # qhasm: b2_stack = b2
  348. # asm 1: movq <b2=int64#9,>b2_stack=stack64#14
  349. # asm 2: movq <b2=%r11,>b2_stack=104(%rsp)
  350. movq %r11,104(%rsp)
  351. # qhasm: b3_stack = b3
  352. # asm 1: movq <b3=int64#10,>b3_stack=stack64#15
  353. # asm 2: movq <b3=%r12,>b3_stack=112(%rsp)
  354. movq %r12,112(%rsp)
  355. # qhasm: mulr4 = 0
  356. # asm 1: mov $0,>mulr4=int64#4
  357. # asm 2: mov $0,>mulr4=%rcx
  358. mov $0,%rcx
  359. # qhasm: mulr5 = 0
  360. # asm 1: mov $0,>mulr5=int64#5
  361. # asm 2: mov $0,>mulr5=%r8
  362. mov $0,%r8
  363. # qhasm: mulr6 = 0
  364. # asm 1: mov $0,>mulr6=int64#6
  365. # asm 2: mov $0,>mulr6=%r9
  366. mov $0,%r9
  367. # qhasm: mulr7 = 0
  368. # asm 1: mov $0,>mulr7=int64#8
  369. # asm 2: mov $0,>mulr7=%r10
  370. mov $0,%r10
  371. # qhasm: mulx0 = a0_stack
  372. # asm 1: movq <a0_stack=stack64#8,>mulx0=int64#9
  373. # asm 2: movq <a0_stack=56(%rsp),>mulx0=%r11
  374. movq 56(%rsp),%r11
  375. # qhasm: mulrax = *(uint64 *)(qp + 0)
  376. # asm 1: movq 0(<qp=int64#2),>mulrax=int64#7
  377. # asm 2: movq 0(<qp=%rsi),>mulrax=%rax
  378. movq 0(%rsi),%rax
  379. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  380. # asm 1: mul <mulx0=int64#9
  381. # asm 2: mul <mulx0=%r11
  382. mul %r11
  383. # qhasm: a0 = mulrax
  384. # asm 1: mov <mulrax=int64#7,>a0=int64#10
  385. # asm 2: mov <mulrax=%rax,>a0=%r12
  386. mov %rax,%r12
  387. # qhasm: a1 = mulrdx
  388. # asm 1: mov <mulrdx=int64#3,>a1=int64#11
  389. # asm 2: mov <mulrdx=%rdx,>a1=%r13
  390. mov %rdx,%r13
  391. # qhasm: mulrax = *(uint64 *)(qp + 8)
  392. # asm 1: movq 8(<qp=int64#2),>mulrax=int64#7
  393. # asm 2: movq 8(<qp=%rsi),>mulrax=%rax
  394. movq 8(%rsi),%rax
  395. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  396. # asm 1: mul <mulx0=int64#9
  397. # asm 2: mul <mulx0=%r11
  398. mul %r11
  399. # qhasm: carry? a1 += mulrax
  400. # asm 1: add <mulrax=int64#7,<a1=int64#11
  401. # asm 2: add <mulrax=%rax,<a1=%r13
  402. add %rax,%r13
  403. # qhasm: a2 = 0
  404. # asm 1: mov $0,>a2=int64#12
  405. # asm 2: mov $0,>a2=%r14
  406. mov $0,%r14
  407. # qhasm: a2 += mulrdx + carry
  408. # asm 1: adc <mulrdx=int64#3,<a2=int64#12
  409. # asm 2: adc <mulrdx=%rdx,<a2=%r14
  410. adc %rdx,%r14
  411. # qhasm: mulrax = *(uint64 *)(qp + 16)
  412. # asm 1: movq 16(<qp=int64#2),>mulrax=int64#7
  413. # asm 2: movq 16(<qp=%rsi),>mulrax=%rax
  414. movq 16(%rsi),%rax
  415. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  416. # asm 1: mul <mulx0=int64#9
  417. # asm 2: mul <mulx0=%r11
  418. mul %r11
  419. # qhasm: carry? a2 += mulrax
  420. # asm 1: add <mulrax=int64#7,<a2=int64#12
  421. # asm 2: add <mulrax=%rax,<a2=%r14
  422. add %rax,%r14
  423. # qhasm: a3 = 0
  424. # asm 1: mov $0,>a3=int64#13
  425. # asm 2: mov $0,>a3=%r15
  426. mov $0,%r15
  427. # qhasm: a3 += mulrdx + carry
  428. # asm 1: adc <mulrdx=int64#3,<a3=int64#13
  429. # asm 2: adc <mulrdx=%rdx,<a3=%r15
  430. adc %rdx,%r15
  431. # qhasm: mulrax = *(uint64 *)(qp + 24)
  432. # asm 1: movq 24(<qp=int64#2),>mulrax=int64#7
  433. # asm 2: movq 24(<qp=%rsi),>mulrax=%rax
  434. movq 24(%rsi),%rax
  435. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  436. # asm 1: mul <mulx0=int64#9
  437. # asm 2: mul <mulx0=%r11
  438. mul %r11
  439. # qhasm: carry? a3 += mulrax
  440. # asm 1: add <mulrax=int64#7,<a3=int64#13
  441. # asm 2: add <mulrax=%rax,<a3=%r15
  442. add %rax,%r15
  443. # qhasm: mulr4 += mulrdx + carry
  444. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
  445. # asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
  446. adc %rdx,%rcx
  447. # qhasm: mulx1 = a1_stack
  448. # asm 1: movq <a1_stack=stack64#9,>mulx1=int64#9
  449. # asm 2: movq <a1_stack=64(%rsp),>mulx1=%r11
  450. movq 64(%rsp),%r11
  451. # qhasm: mulrax = *(uint64 *)(qp + 0)
  452. # asm 1: movq 0(<qp=int64#2),>mulrax=int64#7
  453. # asm 2: movq 0(<qp=%rsi),>mulrax=%rax
  454. movq 0(%rsi),%rax
  455. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  456. # asm 1: mul <mulx1=int64#9
  457. # asm 2: mul <mulx1=%r11
  458. mul %r11
  459. # qhasm: carry? a1 += mulrax
  460. # asm 1: add <mulrax=int64#7,<a1=int64#11
  461. # asm 2: add <mulrax=%rax,<a1=%r13
  462. add %rax,%r13
  463. # qhasm: mulc = 0
  464. # asm 1: mov $0,>mulc=int64#14
  465. # asm 2: mov $0,>mulc=%rbx
  466. mov $0,%rbx
  467. # qhasm: mulc += mulrdx + carry
  468. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  469. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  470. adc %rdx,%rbx
  471. # qhasm: mulrax = *(uint64 *)(qp + 8)
  472. # asm 1: movq 8(<qp=int64#2),>mulrax=int64#7
  473. # asm 2: movq 8(<qp=%rsi),>mulrax=%rax
  474. movq 8(%rsi),%rax
  475. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  476. # asm 1: mul <mulx1=int64#9
  477. # asm 2: mul <mulx1=%r11
  478. mul %r11
  479. # qhasm: carry? a2 += mulrax
  480. # asm 1: add <mulrax=int64#7,<a2=int64#12
  481. # asm 2: add <mulrax=%rax,<a2=%r14
  482. add %rax,%r14
  483. # qhasm: mulrdx += 0 + carry
  484. # asm 1: adc $0,<mulrdx=int64#3
  485. # asm 2: adc $0,<mulrdx=%rdx
  486. adc $0,%rdx
  487. # qhasm: carry? a2 += mulc
  488. # asm 1: add <mulc=int64#14,<a2=int64#12
  489. # asm 2: add <mulc=%rbx,<a2=%r14
  490. add %rbx,%r14
  491. # qhasm: mulc = 0
  492. # asm 1: mov $0,>mulc=int64#14
  493. # asm 2: mov $0,>mulc=%rbx
  494. mov $0,%rbx
  495. # qhasm: mulc += mulrdx + carry
  496. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  497. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  498. adc %rdx,%rbx
  499. # qhasm: mulrax = *(uint64 *)(qp + 16)
  500. # asm 1: movq 16(<qp=int64#2),>mulrax=int64#7
  501. # asm 2: movq 16(<qp=%rsi),>mulrax=%rax
  502. movq 16(%rsi),%rax
  503. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  504. # asm 1: mul <mulx1=int64#9
  505. # asm 2: mul <mulx1=%r11
  506. mul %r11
  507. # qhasm: carry? a3 += mulrax
  508. # asm 1: add <mulrax=int64#7,<a3=int64#13
  509. # asm 2: add <mulrax=%rax,<a3=%r15
  510. add %rax,%r15
  511. # qhasm: mulrdx += 0 + carry
  512. # asm 1: adc $0,<mulrdx=int64#3
  513. # asm 2: adc $0,<mulrdx=%rdx
  514. adc $0,%rdx
  515. # qhasm: carry? a3 += mulc
  516. # asm 1: add <mulc=int64#14,<a3=int64#13
  517. # asm 2: add <mulc=%rbx,<a3=%r15
  518. add %rbx,%r15
  519. # qhasm: mulc = 0
  520. # asm 1: mov $0,>mulc=int64#14
  521. # asm 2: mov $0,>mulc=%rbx
  522. mov $0,%rbx
  523. # qhasm: mulc += mulrdx + carry
  524. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  525. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  526. adc %rdx,%rbx
  527. # qhasm: mulrax = *(uint64 *)(qp + 24)
  528. # asm 1: movq 24(<qp=int64#2),>mulrax=int64#7
  529. # asm 2: movq 24(<qp=%rsi),>mulrax=%rax
  530. movq 24(%rsi),%rax
  531. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  532. # asm 1: mul <mulx1=int64#9
  533. # asm 2: mul <mulx1=%r11
  534. mul %r11
  535. # qhasm: carry? mulr4 += mulrax
  536. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  537. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  538. add %rax,%rcx
  539. # qhasm: mulrdx += 0 + carry
  540. # asm 1: adc $0,<mulrdx=int64#3
  541. # asm 2: adc $0,<mulrdx=%rdx
  542. adc $0,%rdx
  543. # qhasm: carry? mulr4 += mulc
  544. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  545. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  546. add %rbx,%rcx
  547. # qhasm: mulr5 += mulrdx + carry
  548. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
  549. # asm 2: adc <mulrdx=%rdx,<mulr5=%r8
  550. adc %rdx,%r8
  551. # qhasm: mulx2 = a2_stack
  552. # asm 1: movq <a2_stack=stack64#10,>mulx2=int64#9
  553. # asm 2: movq <a2_stack=72(%rsp),>mulx2=%r11
  554. movq 72(%rsp),%r11
  555. # qhasm: mulrax = *(uint64 *)(qp + 0)
  556. # asm 1: movq 0(<qp=int64#2),>mulrax=int64#7
  557. # asm 2: movq 0(<qp=%rsi),>mulrax=%rax
  558. movq 0(%rsi),%rax
  559. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  560. # asm 1: mul <mulx2=int64#9
  561. # asm 2: mul <mulx2=%r11
  562. mul %r11
  563. # qhasm: carry? a2 += mulrax
  564. # asm 1: add <mulrax=int64#7,<a2=int64#12
  565. # asm 2: add <mulrax=%rax,<a2=%r14
  566. add %rax,%r14
  567. # qhasm: mulc = 0
  568. # asm 1: mov $0,>mulc=int64#14
  569. # asm 2: mov $0,>mulc=%rbx
  570. mov $0,%rbx
  571. # qhasm: mulc += mulrdx + carry
  572. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  573. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  574. adc %rdx,%rbx
  575. # qhasm: mulrax = *(uint64 *)(qp + 8)
  576. # asm 1: movq 8(<qp=int64#2),>mulrax=int64#7
  577. # asm 2: movq 8(<qp=%rsi),>mulrax=%rax
  578. movq 8(%rsi),%rax
  579. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  580. # asm 1: mul <mulx2=int64#9
  581. # asm 2: mul <mulx2=%r11
  582. mul %r11
  583. # qhasm: carry? a3 += mulrax
  584. # asm 1: add <mulrax=int64#7,<a3=int64#13
  585. # asm 2: add <mulrax=%rax,<a3=%r15
  586. add %rax,%r15
  587. # qhasm: mulrdx += 0 + carry
  588. # asm 1: adc $0,<mulrdx=int64#3
  589. # asm 2: adc $0,<mulrdx=%rdx
  590. adc $0,%rdx
  591. # qhasm: carry? a3 += mulc
  592. # asm 1: add <mulc=int64#14,<a3=int64#13
  593. # asm 2: add <mulc=%rbx,<a3=%r15
  594. add %rbx,%r15
  595. # qhasm: mulc = 0
  596. # asm 1: mov $0,>mulc=int64#14
  597. # asm 2: mov $0,>mulc=%rbx
  598. mov $0,%rbx
  599. # qhasm: mulc += mulrdx + carry
  600. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  601. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  602. adc %rdx,%rbx
  603. # qhasm: mulrax = *(uint64 *)(qp + 16)
  604. # asm 1: movq 16(<qp=int64#2),>mulrax=int64#7
  605. # asm 2: movq 16(<qp=%rsi),>mulrax=%rax
  606. movq 16(%rsi),%rax
  607. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  608. # asm 1: mul <mulx2=int64#9
  609. # asm 2: mul <mulx2=%r11
  610. mul %r11
  611. # qhasm: carry? mulr4 += mulrax
  612. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  613. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  614. add %rax,%rcx
  615. # qhasm: mulrdx += 0 + carry
  616. # asm 1: adc $0,<mulrdx=int64#3
  617. # asm 2: adc $0,<mulrdx=%rdx
  618. adc $0,%rdx
  619. # qhasm: carry? mulr4 += mulc
  620. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  621. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  622. add %rbx,%rcx
  623. # qhasm: mulc = 0
  624. # asm 1: mov $0,>mulc=int64#14
  625. # asm 2: mov $0,>mulc=%rbx
  626. mov $0,%rbx
  627. # qhasm: mulc += mulrdx + carry
  628. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  629. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  630. adc %rdx,%rbx
  631. # qhasm: mulrax = *(uint64 *)(qp + 24)
  632. # asm 1: movq 24(<qp=int64#2),>mulrax=int64#7
  633. # asm 2: movq 24(<qp=%rsi),>mulrax=%rax
  634. movq 24(%rsi),%rax
  635. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  636. # asm 1: mul <mulx2=int64#9
  637. # asm 2: mul <mulx2=%r11
  638. mul %r11
  639. # qhasm: carry? mulr5 += mulrax
  640. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  641. # asm 2: add <mulrax=%rax,<mulr5=%r8
  642. add %rax,%r8
  643. # qhasm: mulrdx += 0 + carry
  644. # asm 1: adc $0,<mulrdx=int64#3
  645. # asm 2: adc $0,<mulrdx=%rdx
  646. adc $0,%rdx
  647. # qhasm: carry? mulr5 += mulc
  648. # asm 1: add <mulc=int64#14,<mulr5=int64#5
  649. # asm 2: add <mulc=%rbx,<mulr5=%r8
  650. add %rbx,%r8
  651. # qhasm: mulr6 += mulrdx + carry
  652. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
  653. # asm 2: adc <mulrdx=%rdx,<mulr6=%r9
  654. adc %rdx,%r9
  655. # qhasm: mulx3 = a3_stack
  656. # asm 1: movq <a3_stack=stack64#11,>mulx3=int64#9
  657. # asm 2: movq <a3_stack=80(%rsp),>mulx3=%r11
  658. movq 80(%rsp),%r11
  659. # qhasm: mulrax = *(uint64 *)(qp + 0)
  660. # asm 1: movq 0(<qp=int64#2),>mulrax=int64#7
  661. # asm 2: movq 0(<qp=%rsi),>mulrax=%rax
  662. movq 0(%rsi),%rax
  663. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  664. # asm 1: mul <mulx3=int64#9
  665. # asm 2: mul <mulx3=%r11
  666. mul %r11
  667. # qhasm: carry? a3 += mulrax
  668. # asm 1: add <mulrax=int64#7,<a3=int64#13
  669. # asm 2: add <mulrax=%rax,<a3=%r15
  670. add %rax,%r15
  671. # qhasm: mulc = 0
  672. # asm 1: mov $0,>mulc=int64#14
  673. # asm 2: mov $0,>mulc=%rbx
  674. mov $0,%rbx
  675. # qhasm: mulc += mulrdx + carry
  676. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  677. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  678. adc %rdx,%rbx
  679. # qhasm: mulrax = *(uint64 *)(qp + 8)
  680. # asm 1: movq 8(<qp=int64#2),>mulrax=int64#7
  681. # asm 2: movq 8(<qp=%rsi),>mulrax=%rax
  682. movq 8(%rsi),%rax
  683. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  684. # asm 1: mul <mulx3=int64#9
  685. # asm 2: mul <mulx3=%r11
  686. mul %r11
  687. # qhasm: carry? mulr4 += mulrax
  688. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  689. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  690. add %rax,%rcx
  691. # qhasm: mulrdx += 0 + carry
  692. # asm 1: adc $0,<mulrdx=int64#3
  693. # asm 2: adc $0,<mulrdx=%rdx
  694. adc $0,%rdx
  695. # qhasm: carry? mulr4 += mulc
  696. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  697. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  698. add %rbx,%rcx
  699. # qhasm: mulc = 0
  700. # asm 1: mov $0,>mulc=int64#14
  701. # asm 2: mov $0,>mulc=%rbx
  702. mov $0,%rbx
  703. # qhasm: mulc += mulrdx + carry
  704. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  705. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  706. adc %rdx,%rbx
  707. # qhasm: mulrax = *(uint64 *)(qp + 16)
  708. # asm 1: movq 16(<qp=int64#2),>mulrax=int64#7
  709. # asm 2: movq 16(<qp=%rsi),>mulrax=%rax
  710. movq 16(%rsi),%rax
  711. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  712. # asm 1: mul <mulx3=int64#9
  713. # asm 2: mul <mulx3=%r11
  714. mul %r11
  715. # qhasm: carry? mulr5 += mulrax
  716. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  717. # asm 2: add <mulrax=%rax,<mulr5=%r8
  718. add %rax,%r8
  719. # qhasm: mulrdx += 0 + carry
  720. # asm 1: adc $0,<mulrdx=int64#3
  721. # asm 2: adc $0,<mulrdx=%rdx
  722. adc $0,%rdx
  723. # qhasm: carry? mulr5 += mulc
  724. # asm 1: add <mulc=int64#14,<mulr5=int64#5
  725. # asm 2: add <mulc=%rbx,<mulr5=%r8
  726. add %rbx,%r8
  727. # qhasm: mulc = 0
  728. # asm 1: mov $0,>mulc=int64#14
  729. # asm 2: mov $0,>mulc=%rbx
  730. mov $0,%rbx
  731. # qhasm: mulc += mulrdx + carry
  732. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  733. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  734. adc %rdx,%rbx
  735. # qhasm: mulrax = *(uint64 *)(qp + 24)
  736. # asm 1: movq 24(<qp=int64#2),>mulrax=int64#7
  737. # asm 2: movq 24(<qp=%rsi),>mulrax=%rax
  738. movq 24(%rsi),%rax
  739. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  740. # asm 1: mul <mulx3=int64#9
  741. # asm 2: mul <mulx3=%r11
  742. mul %r11
  743. # qhasm: carry? mulr6 += mulrax
  744. # asm 1: add <mulrax=int64#7,<mulr6=int64#6
  745. # asm 2: add <mulrax=%rax,<mulr6=%r9
  746. add %rax,%r9
  747. # qhasm: mulrdx += 0 + carry
  748. # asm 1: adc $0,<mulrdx=int64#3
  749. # asm 2: adc $0,<mulrdx=%rdx
  750. adc $0,%rdx
  751. # qhasm: carry? mulr6 += mulc
  752. # asm 1: add <mulc=int64#14,<mulr6=int64#6
  753. # asm 2: add <mulc=%rbx,<mulr6=%r9
  754. add %rbx,%r9
  755. # qhasm: mulr7 += mulrdx + carry
  756. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
  757. # asm 2: adc <mulrdx=%rdx,<mulr7=%r10
  758. adc %rdx,%r10
  759. # qhasm: mulrax = mulr4
  760. # asm 1: mov <mulr4=int64#4,>mulrax=int64#7
  761. # asm 2: mov <mulr4=%rcx,>mulrax=%rax
  762. mov %rcx,%rax
  763. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  764. mulq CRYPTO_NAMESPACE(38)(%rip)
  765. # qhasm: mulr4 = mulrax
  766. # asm 1: mov <mulrax=int64#7,>mulr4=int64#4
  767. # asm 2: mov <mulrax=%rax,>mulr4=%rcx
  768. mov %rax,%rcx
  769. # qhasm: mulrax = mulr5
  770. # asm 1: mov <mulr5=int64#5,>mulrax=int64#7
  771. # asm 2: mov <mulr5=%r8,>mulrax=%rax
  772. mov %r8,%rax
  773. # qhasm: mulr5 = mulrdx
  774. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#5
  775. # asm 2: mov <mulrdx=%rdx,>mulr5=%r8
  776. mov %rdx,%r8
  777. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  778. mulq CRYPTO_NAMESPACE(38)(%rip)
  779. # qhasm: carry? mulr5 += mulrax
  780. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  781. # asm 2: add <mulrax=%rax,<mulr5=%r8
  782. add %rax,%r8
  783. # qhasm: mulrax = mulr6
  784. # asm 1: mov <mulr6=int64#6,>mulrax=int64#7
  785. # asm 2: mov <mulr6=%r9,>mulrax=%rax
  786. mov %r9,%rax
  787. # qhasm: mulr6 = 0
  788. # asm 1: mov $0,>mulr6=int64#6
  789. # asm 2: mov $0,>mulr6=%r9
  790. mov $0,%r9
  791. # qhasm: mulr6 += mulrdx + carry
  792. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
  793. # asm 2: adc <mulrdx=%rdx,<mulr6=%r9
  794. adc %rdx,%r9
  795. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  796. mulq CRYPTO_NAMESPACE(38)(%rip)
  797. # qhasm: carry? mulr6 += mulrax
  798. # asm 1: add <mulrax=int64#7,<mulr6=int64#6
  799. # asm 2: add <mulrax=%rax,<mulr6=%r9
  800. add %rax,%r9
  801. # qhasm: mulrax = mulr7
  802. # asm 1: mov <mulr7=int64#8,>mulrax=int64#7
  803. # asm 2: mov <mulr7=%r10,>mulrax=%rax
  804. mov %r10,%rax
  805. # qhasm: mulr7 = 0
  806. # asm 1: mov $0,>mulr7=int64#8
  807. # asm 2: mov $0,>mulr7=%r10
  808. mov $0,%r10
  809. # qhasm: mulr7 += mulrdx + carry
  810. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
  811. # asm 2: adc <mulrdx=%rdx,<mulr7=%r10
  812. adc %rdx,%r10
  813. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  814. mulq CRYPTO_NAMESPACE(38)(%rip)
  815. # qhasm: carry? mulr7 += mulrax
  816. # asm 1: add <mulrax=int64#7,<mulr7=int64#8
  817. # asm 2: add <mulrax=%rax,<mulr7=%r10
  818. add %rax,%r10
  819. # qhasm: mulr8 = 0
  820. # asm 1: mov $0,>mulr8=int64#7
  821. # asm 2: mov $0,>mulr8=%rax
  822. mov $0,%rax
  823. # qhasm: mulr8 += mulrdx + carry
  824. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  825. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  826. adc %rdx,%rax
  827. # qhasm: carry? a0 += mulr4
  828. # asm 1: add <mulr4=int64#4,<a0=int64#10
  829. # asm 2: add <mulr4=%rcx,<a0=%r12
  830. add %rcx,%r12
  831. # qhasm: carry? a1 += mulr5 + carry
  832. # asm 1: adc <mulr5=int64#5,<a1=int64#11
  833. # asm 2: adc <mulr5=%r8,<a1=%r13
  834. adc %r8,%r13
  835. # qhasm: carry? a2 += mulr6 + carry
  836. # asm 1: adc <mulr6=int64#6,<a2=int64#12
  837. # asm 2: adc <mulr6=%r9,<a2=%r14
  838. adc %r9,%r14
  839. # qhasm: carry? a3 += mulr7 + carry
  840. # asm 1: adc <mulr7=int64#8,<a3=int64#13
  841. # asm 2: adc <mulr7=%r10,<a3=%r15
  842. adc %r10,%r15
  843. # qhasm: mulzero = 0
  844. # asm 1: mov $0,>mulzero=int64#3
  845. # asm 2: mov $0,>mulzero=%rdx
  846. mov $0,%rdx
  847. # qhasm: mulr8 += mulzero + carry
  848. # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
  849. # asm 2: adc <mulzero=%rdx,<mulr8=%rax
  850. adc %rdx,%rax
  851. # qhasm: mulr8 *= 38
  852. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4
  853. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx
  854. imulq $38,%rax,%rcx
  855. # qhasm: carry? a0 += mulr8
  856. # asm 1: add <mulr8=int64#4,<a0=int64#10
  857. # asm 2: add <mulr8=%rcx,<a0=%r12
  858. add %rcx,%r12
  859. # qhasm: carry? a1 += mulzero + carry
  860. # asm 1: adc <mulzero=int64#3,<a1=int64#11
  861. # asm 2: adc <mulzero=%rdx,<a1=%r13
  862. adc %rdx,%r13
  863. # qhasm: carry? a2 += mulzero + carry
  864. # asm 1: adc <mulzero=int64#3,<a2=int64#12
  865. # asm 2: adc <mulzero=%rdx,<a2=%r14
  866. adc %rdx,%r14
  867. # qhasm: carry? a3 += mulzero + carry
  868. # asm 1: adc <mulzero=int64#3,<a3=int64#13
  869. # asm 2: adc <mulzero=%rdx,<a3=%r15
  870. adc %rdx,%r15
  871. # qhasm: mulzero += mulzero + carry
  872. # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
  873. # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
  874. adc %rdx,%rdx
  875. # qhasm: mulzero *= 38
  876. # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
  877. # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
  878. imulq $38,%rdx,%rdx
  879. # qhasm: a0 += mulzero
  880. # asm 1: add <mulzero=int64#3,<a0=int64#10
  881. # asm 2: add <mulzero=%rdx,<a0=%r12
  882. add %rdx,%r12
  883. # qhasm: a0_stack = a0
  884. # asm 1: movq <a0=int64#10,>a0_stack=stack64#8
  885. # asm 2: movq <a0=%r12,>a0_stack=56(%rsp)
  886. movq %r12,56(%rsp)
  887. # qhasm: a1_stack = a1
  888. # asm 1: movq <a1=int64#11,>a1_stack=stack64#9
  889. # asm 2: movq <a1=%r13,>a1_stack=64(%rsp)
  890. movq %r13,64(%rsp)
  891. # qhasm: a2_stack = a2
  892. # asm 1: movq <a2=int64#12,>a2_stack=stack64#10
  893. # asm 2: movq <a2=%r14,>a2_stack=72(%rsp)
  894. movq %r14,72(%rsp)
  895. # qhasm: a3_stack = a3
  896. # asm 1: movq <a3=int64#13,>a3_stack=stack64#11
  897. # asm 2: movq <a3=%r15,>a3_stack=80(%rsp)
  898. movq %r15,80(%rsp)
  899. # qhasm: mulr4 = 0
  900. # asm 1: mov $0,>mulr4=int64#4
  901. # asm 2: mov $0,>mulr4=%rcx
  902. mov $0,%rcx
  903. # qhasm: mulr5 = 0
  904. # asm 1: mov $0,>mulr5=int64#5
  905. # asm 2: mov $0,>mulr5=%r8
  906. mov $0,%r8
  907. # qhasm: mulr6 = 0
  908. # asm 1: mov $0,>mulr6=int64#6
  909. # asm 2: mov $0,>mulr6=%r9
  910. mov $0,%r9
  911. # qhasm: mulr7 = 0
  912. # asm 1: mov $0,>mulr7=int64#8
  913. # asm 2: mov $0,>mulr7=%r10
  914. mov $0,%r10
  915. # qhasm: mulx0 = b0_stack
  916. # asm 1: movq <b0_stack=stack64#12,>mulx0=int64#9
  917. # asm 2: movq <b0_stack=88(%rsp),>mulx0=%r11
  918. movq 88(%rsp),%r11
  919. # qhasm: mulrax = *(uint64 *)(qp + 32)
  920. # asm 1: movq 32(<qp=int64#2),>mulrax=int64#7
  921. # asm 2: movq 32(<qp=%rsi),>mulrax=%rax
  922. movq 32(%rsi),%rax
  923. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  924. # asm 1: mul <mulx0=int64#9
  925. # asm 2: mul <mulx0=%r11
  926. mul %r11
  927. # qhasm: e0 = mulrax
  928. # asm 1: mov <mulrax=int64#7,>e0=int64#10
  929. # asm 2: mov <mulrax=%rax,>e0=%r12
  930. mov %rax,%r12
  931. # qhasm: e1 = mulrdx
  932. # asm 1: mov <mulrdx=int64#3,>e1=int64#11
  933. # asm 2: mov <mulrdx=%rdx,>e1=%r13
  934. mov %rdx,%r13
  935. # qhasm: mulrax = *(uint64 *)(qp + 40)
  936. # asm 1: movq 40(<qp=int64#2),>mulrax=int64#7
  937. # asm 2: movq 40(<qp=%rsi),>mulrax=%rax
  938. movq 40(%rsi),%rax
  939. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  940. # asm 1: mul <mulx0=int64#9
  941. # asm 2: mul <mulx0=%r11
  942. mul %r11
  943. # qhasm: carry? e1 += mulrax
  944. # asm 1: add <mulrax=int64#7,<e1=int64#11
  945. # asm 2: add <mulrax=%rax,<e1=%r13
  946. add %rax,%r13
  947. # qhasm: e2 = 0
  948. # asm 1: mov $0,>e2=int64#12
  949. # asm 2: mov $0,>e2=%r14
  950. mov $0,%r14
  951. # qhasm: e2 += mulrdx + carry
  952. # asm 1: adc <mulrdx=int64#3,<e2=int64#12
  953. # asm 2: adc <mulrdx=%rdx,<e2=%r14
  954. adc %rdx,%r14
  955. # qhasm: mulrax = *(uint64 *)(qp + 48)
  956. # asm 1: movq 48(<qp=int64#2),>mulrax=int64#7
  957. # asm 2: movq 48(<qp=%rsi),>mulrax=%rax
  958. movq 48(%rsi),%rax
  959. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  960. # asm 1: mul <mulx0=int64#9
  961. # asm 2: mul <mulx0=%r11
  962. mul %r11
  963. # qhasm: carry? e2 += mulrax
  964. # asm 1: add <mulrax=int64#7,<e2=int64#12
  965. # asm 2: add <mulrax=%rax,<e2=%r14
  966. add %rax,%r14
  967. # qhasm: e3 = 0
  968. # asm 1: mov $0,>e3=int64#13
  969. # asm 2: mov $0,>e3=%r15
  970. mov $0,%r15
  971. # qhasm: e3 += mulrdx + carry
  972. # asm 1: adc <mulrdx=int64#3,<e3=int64#13
  973. # asm 2: adc <mulrdx=%rdx,<e3=%r15
  974. adc %rdx,%r15
  975. # qhasm: mulrax = *(uint64 *)(qp + 56)
  976. # asm 1: movq 56(<qp=int64#2),>mulrax=int64#7
  977. # asm 2: movq 56(<qp=%rsi),>mulrax=%rax
  978. movq 56(%rsi),%rax
  979. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  980. # asm 1: mul <mulx0=int64#9
  981. # asm 2: mul <mulx0=%r11
  982. mul %r11
  983. # qhasm: carry? e3 += mulrax
  984. # asm 1: add <mulrax=int64#7,<e3=int64#13
  985. # asm 2: add <mulrax=%rax,<e3=%r15
  986. add %rax,%r15
  987. # qhasm: mulr4 += mulrdx + carry
  988. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
  989. # asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
  990. adc %rdx,%rcx
  991. # qhasm: mulx1 = b1_stack
  992. # asm 1: movq <b1_stack=stack64#13,>mulx1=int64#9
  993. # asm 2: movq <b1_stack=96(%rsp),>mulx1=%r11
  994. movq 96(%rsp),%r11
  995. # qhasm: mulrax = *(uint64 *)(qp + 32)
  996. # asm 1: movq 32(<qp=int64#2),>mulrax=int64#7
  997. # asm 2: movq 32(<qp=%rsi),>mulrax=%rax
  998. movq 32(%rsi),%rax
  999. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1000. # asm 1: mul <mulx1=int64#9
  1001. # asm 2: mul <mulx1=%r11
  1002. mul %r11
  1003. # qhasm: carry? e1 += mulrax
  1004. # asm 1: add <mulrax=int64#7,<e1=int64#11
  1005. # asm 2: add <mulrax=%rax,<e1=%r13
  1006. add %rax,%r13
  1007. # qhasm: mulc = 0
  1008. # asm 1: mov $0,>mulc=int64#14
  1009. # asm 2: mov $0,>mulc=%rbx
  1010. mov $0,%rbx
  1011. # qhasm: mulc += mulrdx + carry
  1012. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1013. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1014. adc %rdx,%rbx
  1015. # qhasm: mulrax = *(uint64 *)(qp + 40)
  1016. # asm 1: movq 40(<qp=int64#2),>mulrax=int64#7
  1017. # asm 2: movq 40(<qp=%rsi),>mulrax=%rax
  1018. movq 40(%rsi),%rax
  1019. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1020. # asm 1: mul <mulx1=int64#9
  1021. # asm 2: mul <mulx1=%r11
  1022. mul %r11
  1023. # qhasm: carry? e2 += mulrax
  1024. # asm 1: add <mulrax=int64#7,<e2=int64#12
  1025. # asm 2: add <mulrax=%rax,<e2=%r14
  1026. add %rax,%r14
  1027. # qhasm: mulrdx += 0 + carry
  1028. # asm 1: adc $0,<mulrdx=int64#3
  1029. # asm 2: adc $0,<mulrdx=%rdx
  1030. adc $0,%rdx
  1031. # qhasm: carry? e2 += mulc
  1032. # asm 1: add <mulc=int64#14,<e2=int64#12
  1033. # asm 2: add <mulc=%rbx,<e2=%r14
  1034. add %rbx,%r14
  1035. # qhasm: mulc = 0
  1036. # asm 1: mov $0,>mulc=int64#14
  1037. # asm 2: mov $0,>mulc=%rbx
  1038. mov $0,%rbx
  1039. # qhasm: mulc += mulrdx + carry
  1040. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1041. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1042. adc %rdx,%rbx
  1043. # qhasm: mulrax = *(uint64 *)(qp + 48)
  1044. # asm 1: movq 48(<qp=int64#2),>mulrax=int64#7
  1045. # asm 2: movq 48(<qp=%rsi),>mulrax=%rax
  1046. movq 48(%rsi),%rax
  1047. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1048. # asm 1: mul <mulx1=int64#9
  1049. # asm 2: mul <mulx1=%r11
  1050. mul %r11
  1051. # qhasm: carry? e3 += mulrax
  1052. # asm 1: add <mulrax=int64#7,<e3=int64#13
  1053. # asm 2: add <mulrax=%rax,<e3=%r15
  1054. add %rax,%r15
  1055. # qhasm: mulrdx += 0 + carry
  1056. # asm 1: adc $0,<mulrdx=int64#3
  1057. # asm 2: adc $0,<mulrdx=%rdx
  1058. adc $0,%rdx
  1059. # qhasm: carry? e3 += mulc
  1060. # asm 1: add <mulc=int64#14,<e3=int64#13
  1061. # asm 2: add <mulc=%rbx,<e3=%r15
  1062. add %rbx,%r15
  1063. # qhasm: mulc = 0
  1064. # asm 1: mov $0,>mulc=int64#14
  1065. # asm 2: mov $0,>mulc=%rbx
  1066. mov $0,%rbx
  1067. # qhasm: mulc += mulrdx + carry
  1068. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1069. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1070. adc %rdx,%rbx
  1071. # qhasm: mulrax = *(uint64 *)(qp + 56)
  1072. # asm 1: movq 56(<qp=int64#2),>mulrax=int64#7
  1073. # asm 2: movq 56(<qp=%rsi),>mulrax=%rax
  1074. movq 56(%rsi),%rax
  1075. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1076. # asm 1: mul <mulx1=int64#9
  1077. # asm 2: mul <mulx1=%r11
  1078. mul %r11
  1079. # qhasm: carry? mulr4 += mulrax
  1080. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  1081. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  1082. add %rax,%rcx
  1083. # qhasm: mulrdx += 0 + carry
  1084. # asm 1: adc $0,<mulrdx=int64#3
  1085. # asm 2: adc $0,<mulrdx=%rdx
  1086. adc $0,%rdx
  1087. # qhasm: carry? mulr4 += mulc
  1088. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  1089. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  1090. add %rbx,%rcx
  1091. # qhasm: mulr5 += mulrdx + carry
  1092. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
  1093. # asm 2: adc <mulrdx=%rdx,<mulr5=%r8
  1094. adc %rdx,%r8
  1095. # qhasm: mulx2 = b2_stack
  1096. # asm 1: movq <b2_stack=stack64#14,>mulx2=int64#9
  1097. # asm 2: movq <b2_stack=104(%rsp),>mulx2=%r11
  1098. movq 104(%rsp),%r11
  1099. # qhasm: mulrax = *(uint64 *)(qp + 32)
  1100. # asm 1: movq 32(<qp=int64#2),>mulrax=int64#7
  1101. # asm 2: movq 32(<qp=%rsi),>mulrax=%rax
  1102. movq 32(%rsi),%rax
  1103. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1104. # asm 1: mul <mulx2=int64#9
  1105. # asm 2: mul <mulx2=%r11
  1106. mul %r11
  1107. # qhasm: carry? e2 += mulrax
  1108. # asm 1: add <mulrax=int64#7,<e2=int64#12
  1109. # asm 2: add <mulrax=%rax,<e2=%r14
  1110. add %rax,%r14
  1111. # qhasm: mulc = 0
  1112. # asm 1: mov $0,>mulc=int64#14
  1113. # asm 2: mov $0,>mulc=%rbx
  1114. mov $0,%rbx
  1115. # qhasm: mulc += mulrdx + carry
  1116. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1117. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1118. adc %rdx,%rbx
  1119. # qhasm: mulrax = *(uint64 *)(qp + 40)
  1120. # asm 1: movq 40(<qp=int64#2),>mulrax=int64#7
  1121. # asm 2: movq 40(<qp=%rsi),>mulrax=%rax
  1122. movq 40(%rsi),%rax
  1123. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1124. # asm 1: mul <mulx2=int64#9
  1125. # asm 2: mul <mulx2=%r11
  1126. mul %r11
  1127. # qhasm: carry? e3 += mulrax
  1128. # asm 1: add <mulrax=int64#7,<e3=int64#13
  1129. # asm 2: add <mulrax=%rax,<e3=%r15
  1130. add %rax,%r15
  1131. # qhasm: mulrdx += 0 + carry
  1132. # asm 1: adc $0,<mulrdx=int64#3
  1133. # asm 2: adc $0,<mulrdx=%rdx
  1134. adc $0,%rdx
  1135. # qhasm: carry? e3 += mulc
  1136. # asm 1: add <mulc=int64#14,<e3=int64#13
  1137. # asm 2: add <mulc=%rbx,<e3=%r15
  1138. add %rbx,%r15
  1139. # qhasm: mulc = 0
  1140. # asm 1: mov $0,>mulc=int64#14
  1141. # asm 2: mov $0,>mulc=%rbx
  1142. mov $0,%rbx
  1143. # qhasm: mulc += mulrdx + carry
  1144. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1145. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1146. adc %rdx,%rbx
  1147. # qhasm: mulrax = *(uint64 *)(qp + 48)
  1148. # asm 1: movq 48(<qp=int64#2),>mulrax=int64#7
  1149. # asm 2: movq 48(<qp=%rsi),>mulrax=%rax
  1150. movq 48(%rsi),%rax
  1151. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1152. # asm 1: mul <mulx2=int64#9
  1153. # asm 2: mul <mulx2=%r11
  1154. mul %r11
  1155. # qhasm: carry? mulr4 += mulrax
  1156. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  1157. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  1158. add %rax,%rcx
  1159. # qhasm: mulrdx += 0 + carry
  1160. # asm 1: adc $0,<mulrdx=int64#3
  1161. # asm 2: adc $0,<mulrdx=%rdx
  1162. adc $0,%rdx
  1163. # qhasm: carry? mulr4 += mulc
  1164. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  1165. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  1166. add %rbx,%rcx
  1167. # qhasm: mulc = 0
  1168. # asm 1: mov $0,>mulc=int64#14
  1169. # asm 2: mov $0,>mulc=%rbx
  1170. mov $0,%rbx
  1171. # qhasm: mulc += mulrdx + carry
  1172. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1173. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1174. adc %rdx,%rbx
  1175. # qhasm: mulrax = *(uint64 *)(qp + 56)
  1176. # asm 1: movq 56(<qp=int64#2),>mulrax=int64#7
  1177. # asm 2: movq 56(<qp=%rsi),>mulrax=%rax
  1178. movq 56(%rsi),%rax
  1179. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1180. # asm 1: mul <mulx2=int64#9
  1181. # asm 2: mul <mulx2=%r11
  1182. mul %r11
  1183. # qhasm: carry? mulr5 += mulrax
  1184. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  1185. # asm 2: add <mulrax=%rax,<mulr5=%r8
  1186. add %rax,%r8
  1187. # qhasm: mulrdx += 0 + carry
  1188. # asm 1: adc $0,<mulrdx=int64#3
  1189. # asm 2: adc $0,<mulrdx=%rdx
  1190. adc $0,%rdx
  1191. # qhasm: carry? mulr5 += mulc
  1192. # asm 1: add <mulc=int64#14,<mulr5=int64#5
  1193. # asm 2: add <mulc=%rbx,<mulr5=%r8
  1194. add %rbx,%r8
  1195. # qhasm: mulr6 += mulrdx + carry
  1196. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
  1197. # asm 2: adc <mulrdx=%rdx,<mulr6=%r9
  1198. adc %rdx,%r9
  1199. # qhasm: mulx3 = b3_stack
  1200. # asm 1: movq <b3_stack=stack64#15,>mulx3=int64#9
  1201. # asm 2: movq <b3_stack=112(%rsp),>mulx3=%r11
  1202. movq 112(%rsp),%r11
  1203. # qhasm: mulrax = *(uint64 *)(qp + 32)
  1204. # asm 1: movq 32(<qp=int64#2),>mulrax=int64#7
  1205. # asm 2: movq 32(<qp=%rsi),>mulrax=%rax
  1206. movq 32(%rsi),%rax
  1207. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1208. # asm 1: mul <mulx3=int64#9
  1209. # asm 2: mul <mulx3=%r11
  1210. mul %r11
  1211. # qhasm: carry? e3 += mulrax
  1212. # asm 1: add <mulrax=int64#7,<e3=int64#13
  1213. # asm 2: add <mulrax=%rax,<e3=%r15
  1214. add %rax,%r15
  1215. # qhasm: mulc = 0
  1216. # asm 1: mov $0,>mulc=int64#14
  1217. # asm 2: mov $0,>mulc=%rbx
  1218. mov $0,%rbx
  1219. # qhasm: mulc += mulrdx + carry
  1220. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1221. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1222. adc %rdx,%rbx
  1223. # qhasm: mulrax = *(uint64 *)(qp + 40)
  1224. # asm 1: movq 40(<qp=int64#2),>mulrax=int64#7
  1225. # asm 2: movq 40(<qp=%rsi),>mulrax=%rax
  1226. movq 40(%rsi),%rax
  1227. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1228. # asm 1: mul <mulx3=int64#9
  1229. # asm 2: mul <mulx3=%r11
  1230. mul %r11
  1231. # qhasm: carry? mulr4 += mulrax
  1232. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  1233. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  1234. add %rax,%rcx
  1235. # qhasm: mulrdx += 0 + carry
  1236. # asm 1: adc $0,<mulrdx=int64#3
  1237. # asm 2: adc $0,<mulrdx=%rdx
  1238. adc $0,%rdx
  1239. # qhasm: carry? mulr4 += mulc
  1240. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  1241. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  1242. add %rbx,%rcx
  1243. # qhasm: mulc = 0
  1244. # asm 1: mov $0,>mulc=int64#14
  1245. # asm 2: mov $0,>mulc=%rbx
  1246. mov $0,%rbx
  1247. # qhasm: mulc += mulrdx + carry
  1248. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1249. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1250. adc %rdx,%rbx
  1251. # qhasm: mulrax = *(uint64 *)(qp + 48)
  1252. # asm 1: movq 48(<qp=int64#2),>mulrax=int64#7
  1253. # asm 2: movq 48(<qp=%rsi),>mulrax=%rax
  1254. movq 48(%rsi),%rax
  1255. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1256. # asm 1: mul <mulx3=int64#9
  1257. # asm 2: mul <mulx3=%r11
  1258. mul %r11
  1259. # qhasm: carry? mulr5 += mulrax
  1260. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  1261. # asm 2: add <mulrax=%rax,<mulr5=%r8
  1262. add %rax,%r8
  1263. # qhasm: mulrdx += 0 + carry
  1264. # asm 1: adc $0,<mulrdx=int64#3
  1265. # asm 2: adc $0,<mulrdx=%rdx
  1266. adc $0,%rdx
  1267. # qhasm: carry? mulr5 += mulc
  1268. # asm 1: add <mulc=int64#14,<mulr5=int64#5
  1269. # asm 2: add <mulc=%rbx,<mulr5=%r8
  1270. add %rbx,%r8
  1271. # qhasm: mulc = 0
  1272. # asm 1: mov $0,>mulc=int64#14
  1273. # asm 2: mov $0,>mulc=%rbx
  1274. mov $0,%rbx
  1275. # qhasm: mulc += mulrdx + carry
  1276. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1277. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1278. adc %rdx,%rbx
  1279. # qhasm: mulrax = *(uint64 *)(qp + 56)
  1280. # asm 1: movq 56(<qp=int64#2),>mulrax=int64#7
  1281. # asm 2: movq 56(<qp=%rsi),>mulrax=%rax
  1282. movq 56(%rsi),%rax
  1283. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1284. # asm 1: mul <mulx3=int64#9
  1285. # asm 2: mul <mulx3=%r11
  1286. mul %r11
  1287. # qhasm: carry? mulr6 += mulrax
  1288. # asm 1: add <mulrax=int64#7,<mulr6=int64#6
  1289. # asm 2: add <mulrax=%rax,<mulr6=%r9
  1290. add %rax,%r9
  1291. # qhasm: mulrdx += 0 + carry
  1292. # asm 1: adc $0,<mulrdx=int64#3
  1293. # asm 2: adc $0,<mulrdx=%rdx
  1294. adc $0,%rdx
  1295. # qhasm: carry? mulr6 += mulc
  1296. # asm 1: add <mulc=int64#14,<mulr6=int64#6
  1297. # asm 2: add <mulc=%rbx,<mulr6=%r9
  1298. add %rbx,%r9
  1299. # qhasm: mulr7 += mulrdx + carry
  1300. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
  1301. # asm 2: adc <mulrdx=%rdx,<mulr7=%r10
  1302. adc %rdx,%r10
  1303. # qhasm: mulrax = mulr4
  1304. # asm 1: mov <mulr4=int64#4,>mulrax=int64#7
  1305. # asm 2: mov <mulr4=%rcx,>mulrax=%rax
  1306. mov %rcx,%rax
  1307. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1308. mulq CRYPTO_NAMESPACE(38)(%rip)
  1309. # qhasm: mulr4 = mulrax
  1310. # asm 1: mov <mulrax=int64#7,>mulr4=int64#4
  1311. # asm 2: mov <mulrax=%rax,>mulr4=%rcx
  1312. mov %rax,%rcx
  1313. # qhasm: mulrax = mulr5
  1314. # asm 1: mov <mulr5=int64#5,>mulrax=int64#7
  1315. # asm 2: mov <mulr5=%r8,>mulrax=%rax
  1316. mov %r8,%rax
  1317. # qhasm: mulr5 = mulrdx
  1318. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#5
  1319. # asm 2: mov <mulrdx=%rdx,>mulr5=%r8
  1320. mov %rdx,%r8
  1321. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1322. mulq CRYPTO_NAMESPACE(38)(%rip)
  1323. # qhasm: carry? mulr5 += mulrax
  1324. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  1325. # asm 2: add <mulrax=%rax,<mulr5=%r8
  1326. add %rax,%r8
  1327. # qhasm: mulrax = mulr6
  1328. # asm 1: mov <mulr6=int64#6,>mulrax=int64#7
  1329. # asm 2: mov <mulr6=%r9,>mulrax=%rax
  1330. mov %r9,%rax
  1331. # qhasm: mulr6 = 0
  1332. # asm 1: mov $0,>mulr6=int64#6
  1333. # asm 2: mov $0,>mulr6=%r9
  1334. mov $0,%r9
  1335. # qhasm: mulr6 += mulrdx + carry
  1336. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
  1337. # asm 2: adc <mulrdx=%rdx,<mulr6=%r9
  1338. adc %rdx,%r9
  1339. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1340. mulq CRYPTO_NAMESPACE(38)(%rip)
  1341. # qhasm: carry? mulr6 += mulrax
  1342. # asm 1: add <mulrax=int64#7,<mulr6=int64#6
  1343. # asm 2: add <mulrax=%rax,<mulr6=%r9
  1344. add %rax,%r9
  1345. # qhasm: mulrax = mulr7
  1346. # asm 1: mov <mulr7=int64#8,>mulrax=int64#7
  1347. # asm 2: mov <mulr7=%r10,>mulrax=%rax
  1348. mov %r10,%rax
  1349. # qhasm: mulr7 = 0
  1350. # asm 1: mov $0,>mulr7=int64#8
  1351. # asm 2: mov $0,>mulr7=%r10
  1352. mov $0,%r10
  1353. # qhasm: mulr7 += mulrdx + carry
  1354. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
  1355. # asm 2: adc <mulrdx=%rdx,<mulr7=%r10
  1356. adc %rdx,%r10
  1357. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1358. mulq CRYPTO_NAMESPACE(38)(%rip)
  1359. # qhasm: carry? mulr7 += mulrax
  1360. # asm 1: add <mulrax=int64#7,<mulr7=int64#8
  1361. # asm 2: add <mulrax=%rax,<mulr7=%r10
  1362. add %rax,%r10
  1363. # qhasm: mulr8 = 0
  1364. # asm 1: mov $0,>mulr8=int64#7
  1365. # asm 2: mov $0,>mulr8=%rax
  1366. mov $0,%rax
  1367. # qhasm: mulr8 += mulrdx + carry
  1368. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  1369. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  1370. adc %rdx,%rax
  1371. # qhasm: carry? e0 += mulr4
  1372. # asm 1: add <mulr4=int64#4,<e0=int64#10
  1373. # asm 2: add <mulr4=%rcx,<e0=%r12
  1374. add %rcx,%r12
  1375. # qhasm: carry? e1 += mulr5 + carry
  1376. # asm 1: adc <mulr5=int64#5,<e1=int64#11
  1377. # asm 2: adc <mulr5=%r8,<e1=%r13
  1378. adc %r8,%r13
  1379. # qhasm: carry? e2 += mulr6 + carry
  1380. # asm 1: adc <mulr6=int64#6,<e2=int64#12
  1381. # asm 2: adc <mulr6=%r9,<e2=%r14
  1382. adc %r9,%r14
  1383. # qhasm: carry? e3 += mulr7 + carry
  1384. # asm 1: adc <mulr7=int64#8,<e3=int64#13
  1385. # asm 2: adc <mulr7=%r10,<e3=%r15
  1386. adc %r10,%r15
  1387. # qhasm: mulzero = 0
  1388. # asm 1: mov $0,>mulzero=int64#3
  1389. # asm 2: mov $0,>mulzero=%rdx
  1390. mov $0,%rdx
  1391. # qhasm: mulr8 += mulzero + carry
  1392. # asm 1: adc <mulzero=int64#3,<mulr8=int64#7
  1393. # asm 2: adc <mulzero=%rdx,<mulr8=%rax
  1394. adc %rdx,%rax
  1395. # qhasm: mulr8 *= 38
  1396. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#4
  1397. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rcx
  1398. imulq $38,%rax,%rcx
  1399. # qhasm: carry? e0 += mulr8
  1400. # asm 1: add <mulr8=int64#4,<e0=int64#10
  1401. # asm 2: add <mulr8=%rcx,<e0=%r12
  1402. add %rcx,%r12
  1403. # qhasm: carry? e1 += mulzero + carry
  1404. # asm 1: adc <mulzero=int64#3,<e1=int64#11
  1405. # asm 2: adc <mulzero=%rdx,<e1=%r13
  1406. adc %rdx,%r13
  1407. # qhasm: carry? e2 += mulzero + carry
  1408. # asm 1: adc <mulzero=int64#3,<e2=int64#12
  1409. # asm 2: adc <mulzero=%rdx,<e2=%r14
  1410. adc %rdx,%r14
  1411. # qhasm: carry? e3 += mulzero + carry
  1412. # asm 1: adc <mulzero=int64#3,<e3=int64#13
  1413. # asm 2: adc <mulzero=%rdx,<e3=%r15
  1414. adc %rdx,%r15
  1415. # qhasm: mulzero += mulzero + carry
  1416. # asm 1: adc <mulzero=int64#3,<mulzero=int64#3
  1417. # asm 2: adc <mulzero=%rdx,<mulzero=%rdx
  1418. adc %rdx,%rdx
  1419. # qhasm: mulzero *= 38
  1420. # asm 1: imulq $38,<mulzero=int64#3,>mulzero=int64#3
  1421. # asm 2: imulq $38,<mulzero=%rdx,>mulzero=%rdx
  1422. imulq $38,%rdx,%rdx
  1423. # qhasm: e0 += mulzero
  1424. # asm 1: add <mulzero=int64#3,<e0=int64#10
  1425. # asm 2: add <mulzero=%rdx,<e0=%r12
  1426. add %rdx,%r12
  1427. # qhasm: h0 = e0
  1428. # asm 1: mov <e0=int64#10,>h0=int64#3
  1429. # asm 2: mov <e0=%r12,>h0=%rdx
  1430. mov %r12,%rdx
  1431. # qhasm: h1 = e1
  1432. # asm 1: mov <e1=int64#11,>h1=int64#4
  1433. # asm 2: mov <e1=%r13,>h1=%rcx
  1434. mov %r13,%rcx
  1435. # qhasm: h2 = e2
  1436. # asm 1: mov <e2=int64#12,>h2=int64#5
  1437. # asm 2: mov <e2=%r14,>h2=%r8
  1438. mov %r14,%r8
  1439. # qhasm: h3 = e3
  1440. # asm 1: mov <e3=int64#13,>h3=int64#6
  1441. # asm 2: mov <e3=%r15,>h3=%r9
  1442. mov %r15,%r9
  1443. # qhasm: carry? e0 -= a0_stack
  1444. # asm 1: subq <a0_stack=stack64#8,<e0=int64#10
  1445. # asm 2: subq <a0_stack=56(%rsp),<e0=%r12
  1446. subq 56(%rsp),%r12
  1447. # qhasm: carry? e1 -= a1_stack - carry
  1448. # asm 1: sbbq <a1_stack=stack64#9,<e1=int64#11
  1449. # asm 2: sbbq <a1_stack=64(%rsp),<e1=%r13
  1450. sbbq 64(%rsp),%r13
  1451. # qhasm: carry? e2 -= a2_stack - carry
  1452. # asm 1: sbbq <a2_stack=stack64#10,<e2=int64#12
  1453. # asm 2: sbbq <a2_stack=72(%rsp),<e2=%r14
  1454. sbbq 72(%rsp),%r14
  1455. # qhasm: carry? e3 -= a3_stack - carry
  1456. # asm 1: sbbq <a3_stack=stack64#11,<e3=int64#13
  1457. # asm 2: sbbq <a3_stack=80(%rsp),<e3=%r15
  1458. sbbq 80(%rsp),%r15
  1459. # qhasm: subt0 = 0
  1460. # asm 1: mov $0,>subt0=int64#7
  1461. # asm 2: mov $0,>subt0=%rax
  1462. mov $0,%rax
  1463. # qhasm: subt1 = 38
  1464. # asm 1: mov $38,>subt1=int64#8
  1465. # asm 2: mov $38,>subt1=%r10
  1466. mov $38,%r10
  1467. # qhasm: subt1 = subt0 if !carry
  1468. # asm 1: cmovae <subt0=int64#7,<subt1=int64#8
  1469. # asm 2: cmovae <subt0=%rax,<subt1=%r10
  1470. cmovae %rax,%r10
  1471. # qhasm: carry? e0 -= subt1
  1472. # asm 1: sub <subt1=int64#8,<e0=int64#10
  1473. # asm 2: sub <subt1=%r10,<e0=%r12
  1474. sub %r10,%r12
  1475. # qhasm: carry? e1 -= subt0 - carry
  1476. # asm 1: sbb <subt0=int64#7,<e1=int64#11
  1477. # asm 2: sbb <subt0=%rax,<e1=%r13
  1478. sbb %rax,%r13
  1479. # qhasm: carry? e2 -= subt0 - carry
  1480. # asm 1: sbb <subt0=int64#7,<e2=int64#12
  1481. # asm 2: sbb <subt0=%rax,<e2=%r14
  1482. sbb %rax,%r14
  1483. # qhasm: carry? e3 -= subt0 - carry
  1484. # asm 1: sbb <subt0=int64#7,<e3=int64#13
  1485. # asm 2: sbb <subt0=%rax,<e3=%r15
  1486. sbb %rax,%r15
  1487. # qhasm: subt0 = subt1 if carry
  1488. # asm 1: cmovc <subt1=int64#8,<subt0=int64#7
  1489. # asm 2: cmovc <subt1=%r10,<subt0=%rax
  1490. cmovc %r10,%rax
  1491. # qhasm: e0 -= subt0
  1492. # asm 1: sub <subt0=int64#7,<e0=int64#10
  1493. # asm 2: sub <subt0=%rax,<e0=%r12
  1494. sub %rax,%r12
  1495. # qhasm: carry? h0 += a0_stack
  1496. # asm 1: addq <a0_stack=stack64#8,<h0=int64#3
  1497. # asm 2: addq <a0_stack=56(%rsp),<h0=%rdx
  1498. addq 56(%rsp),%rdx
  1499. # qhasm: carry? h1 += a1_stack + carry
  1500. # asm 1: adcq <a1_stack=stack64#9,<h1=int64#4
  1501. # asm 2: adcq <a1_stack=64(%rsp),<h1=%rcx
  1502. adcq 64(%rsp),%rcx
  1503. # qhasm: carry? h2 += a2_stack + carry
  1504. # asm 1: adcq <a2_stack=stack64#10,<h2=int64#5
  1505. # asm 2: adcq <a2_stack=72(%rsp),<h2=%r8
  1506. adcq 72(%rsp),%r8
  1507. # qhasm: carry? h3 += a3_stack + carry
  1508. # asm 1: adcq <a3_stack=stack64#11,<h3=int64#6
  1509. # asm 2: adcq <a3_stack=80(%rsp),<h3=%r9
  1510. adcq 80(%rsp),%r9
  1511. # qhasm: addt0 = 0
  1512. # asm 1: mov $0,>addt0=int64#7
  1513. # asm 2: mov $0,>addt0=%rax
  1514. mov $0,%rax
  1515. # qhasm: addt1 = 38
  1516. # asm 1: mov $38,>addt1=int64#8
  1517. # asm 2: mov $38,>addt1=%r10
  1518. mov $38,%r10
  1519. # qhasm: addt1 = addt0 if !carry
  1520. # asm 1: cmovae <addt0=int64#7,<addt1=int64#8
  1521. # asm 2: cmovae <addt0=%rax,<addt1=%r10
  1522. cmovae %rax,%r10
  1523. # qhasm: carry? h0 += addt1
  1524. # asm 1: add <addt1=int64#8,<h0=int64#3
  1525. # asm 2: add <addt1=%r10,<h0=%rdx
  1526. add %r10,%rdx
  1527. # qhasm: carry? h1 += addt0 + carry
  1528. # asm 1: adc <addt0=int64#7,<h1=int64#4
  1529. # asm 2: adc <addt0=%rax,<h1=%rcx
  1530. adc %rax,%rcx
  1531. # qhasm: carry? h2 += addt0 + carry
  1532. # asm 1: adc <addt0=int64#7,<h2=int64#5
  1533. # asm 2: adc <addt0=%rax,<h2=%r8
  1534. adc %rax,%r8
  1535. # qhasm: carry? h3 += addt0 + carry
  1536. # asm 1: adc <addt0=int64#7,<h3=int64#6
  1537. # asm 2: adc <addt0=%rax,<h3=%r9
  1538. adc %rax,%r9
  1539. # qhasm: addt0 = addt1 if carry
  1540. # asm 1: cmovc <addt1=int64#8,<addt0=int64#7
  1541. # asm 2: cmovc <addt1=%r10,<addt0=%rax
  1542. cmovc %r10,%rax
  1543. # qhasm: h0 += addt0
  1544. # asm 1: add <addt0=int64#7,<h0=int64#3
  1545. # asm 2: add <addt0=%rax,<h0=%rdx
  1546. add %rax,%rdx
  1547. # qhasm: h0_stack = h0
  1548. # asm 1: movq <h0=int64#3,>h0_stack=stack64#8
  1549. # asm 2: movq <h0=%rdx,>h0_stack=56(%rsp)
  1550. movq %rdx,56(%rsp)
  1551. # qhasm: h1_stack = h1
  1552. # asm 1: movq <h1=int64#4,>h1_stack=stack64#9
  1553. # asm 2: movq <h1=%rcx,>h1_stack=64(%rsp)
  1554. movq %rcx,64(%rsp)
  1555. # qhasm: h2_stack = h2
  1556. # asm 1: movq <h2=int64#5,>h2_stack=stack64#10
  1557. # asm 2: movq <h2=%r8,>h2_stack=72(%rsp)
  1558. movq %r8,72(%rsp)
  1559. # qhasm: h3_stack = h3
  1560. # asm 1: movq <h3=int64#6,>h3_stack=stack64#11
  1561. # asm 2: movq <h3=%r9,>h3_stack=80(%rsp)
  1562. movq %r9,80(%rsp)
  1563. # qhasm: e0_stack = e0
  1564. # asm 1: movq <e0=int64#10,>e0_stack=stack64#12
  1565. # asm 2: movq <e0=%r12,>e0_stack=88(%rsp)
  1566. movq %r12,88(%rsp)
  1567. # qhasm: e1_stack = e1
  1568. # asm 1: movq <e1=int64#11,>e1_stack=stack64#13
  1569. # asm 2: movq <e1=%r13,>e1_stack=96(%rsp)
  1570. movq %r13,96(%rsp)
  1571. # qhasm: e2_stack = e2
  1572. # asm 1: movq <e2=int64#12,>e2_stack=stack64#14
  1573. # asm 2: movq <e2=%r14,>e2_stack=104(%rsp)
  1574. movq %r14,104(%rsp)
  1575. # qhasm: e3_stack = e3
  1576. # asm 1: movq <e3=int64#13,>e3_stack=stack64#15
  1577. # asm 2: movq <e3=%r15,>e3_stack=112(%rsp)
  1578. movq %r15,112(%rsp)
  1579. # qhasm: mulr4 = 0
  1580. # asm 1: mov $0,>mulr4=int64#4
  1581. # asm 2: mov $0,>mulr4=%rcx
  1582. mov $0,%rcx
  1583. # qhasm: mulr5 = 0
  1584. # asm 1: mov $0,>mulr5=int64#5
  1585. # asm 2: mov $0,>mulr5=%r8
  1586. mov $0,%r8
  1587. # qhasm: mulr6 = 0
  1588. # asm 1: mov $0,>mulr6=int64#6
  1589. # asm 2: mov $0,>mulr6=%r9
  1590. mov $0,%r9
  1591. # qhasm: mulr7 = 0
  1592. # asm 1: mov $0,>mulr7=int64#8
  1593. # asm 2: mov $0,>mulr7=%r10
  1594. mov $0,%r10
  1595. # qhasm: mulx0 = *(uint64 *)(rp + 96)
  1596. # asm 1: movq 96(<rp=int64#1),>mulx0=int64#9
  1597. # asm 2: movq 96(<rp=%rdi),>mulx0=%r11
  1598. movq 96(%rdi),%r11
  1599. # qhasm: mulrax = *(uint64 *)(qp + 64)
  1600. # asm 1: movq 64(<qp=int64#2),>mulrax=int64#7
  1601. # asm 2: movq 64(<qp=%rsi),>mulrax=%rax
  1602. movq 64(%rsi),%rax
  1603. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1604. # asm 1: mul <mulx0=int64#9
  1605. # asm 2: mul <mulx0=%r11
  1606. mul %r11
  1607. # qhasm: c0 = mulrax
  1608. # asm 1: mov <mulrax=int64#7,>c0=int64#10
  1609. # asm 2: mov <mulrax=%rax,>c0=%r12
  1610. mov %rax,%r12
  1611. # qhasm: c1 = mulrdx
  1612. # asm 1: mov <mulrdx=int64#3,>c1=int64#11
  1613. # asm 2: mov <mulrdx=%rdx,>c1=%r13
  1614. mov %rdx,%r13
  1615. # qhasm: mulrax = *(uint64 *)(qp + 72)
  1616. # asm 1: movq 72(<qp=int64#2),>mulrax=int64#7
  1617. # asm 2: movq 72(<qp=%rsi),>mulrax=%rax
  1618. movq 72(%rsi),%rax
  1619. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1620. # asm 1: mul <mulx0=int64#9
  1621. # asm 2: mul <mulx0=%r11
  1622. mul %r11
  1623. # qhasm: carry? c1 += mulrax
  1624. # asm 1: add <mulrax=int64#7,<c1=int64#11
  1625. # asm 2: add <mulrax=%rax,<c1=%r13
  1626. add %rax,%r13
  1627. # qhasm: c2 = 0
  1628. # asm 1: mov $0,>c2=int64#12
  1629. # asm 2: mov $0,>c2=%r14
  1630. mov $0,%r14
  1631. # qhasm: c2 += mulrdx + carry
  1632. # asm 1: adc <mulrdx=int64#3,<c2=int64#12
  1633. # asm 2: adc <mulrdx=%rdx,<c2=%r14
  1634. adc %rdx,%r14
  1635. # qhasm: mulrax = *(uint64 *)(qp + 80)
  1636. # asm 1: movq 80(<qp=int64#2),>mulrax=int64#7
  1637. # asm 2: movq 80(<qp=%rsi),>mulrax=%rax
  1638. movq 80(%rsi),%rax
  1639. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1640. # asm 1: mul <mulx0=int64#9
  1641. # asm 2: mul <mulx0=%r11
  1642. mul %r11
  1643. # qhasm: carry? c2 += mulrax
  1644. # asm 1: add <mulrax=int64#7,<c2=int64#12
  1645. # asm 2: add <mulrax=%rax,<c2=%r14
  1646. add %rax,%r14
  1647. # qhasm: c3 = 0
  1648. # asm 1: mov $0,>c3=int64#13
  1649. # asm 2: mov $0,>c3=%r15
  1650. mov $0,%r15
  1651. # qhasm: c3 += mulrdx + carry
  1652. # asm 1: adc <mulrdx=int64#3,<c3=int64#13
  1653. # asm 2: adc <mulrdx=%rdx,<c3=%r15
  1654. adc %rdx,%r15
  1655. # qhasm: mulrax = *(uint64 *)(qp + 88)
  1656. # asm 1: movq 88(<qp=int64#2),>mulrax=int64#7
  1657. # asm 2: movq 88(<qp=%rsi),>mulrax=%rax
  1658. movq 88(%rsi),%rax
  1659. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  1660. # asm 1: mul <mulx0=int64#9
  1661. # asm 2: mul <mulx0=%r11
  1662. mul %r11
  1663. # qhasm: carry? c3 += mulrax
  1664. # asm 1: add <mulrax=int64#7,<c3=int64#13
  1665. # asm 2: add <mulrax=%rax,<c3=%r15
  1666. add %rax,%r15
  1667. # qhasm: mulr4 += mulrdx + carry
  1668. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
  1669. # asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
  1670. adc %rdx,%rcx
  1671. # qhasm: mulx1 = *(uint64 *)(rp + 104)
  1672. # asm 1: movq 104(<rp=int64#1),>mulx1=int64#9
  1673. # asm 2: movq 104(<rp=%rdi),>mulx1=%r11
  1674. movq 104(%rdi),%r11
  1675. # qhasm: mulrax = *(uint64 *)(qp + 64)
  1676. # asm 1: movq 64(<qp=int64#2),>mulrax=int64#7
  1677. # asm 2: movq 64(<qp=%rsi),>mulrax=%rax
  1678. movq 64(%rsi),%rax
  1679. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1680. # asm 1: mul <mulx1=int64#9
  1681. # asm 2: mul <mulx1=%r11
  1682. mul %r11
  1683. # qhasm: carry? c1 += mulrax
  1684. # asm 1: add <mulrax=int64#7,<c1=int64#11
  1685. # asm 2: add <mulrax=%rax,<c1=%r13
  1686. add %rax,%r13
  1687. # qhasm: mulc = 0
  1688. # asm 1: mov $0,>mulc=int64#14
  1689. # asm 2: mov $0,>mulc=%rbx
  1690. mov $0,%rbx
  1691. # qhasm: mulc += mulrdx + carry
  1692. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1693. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1694. adc %rdx,%rbx
  1695. # qhasm: mulrax = *(uint64 *)(qp + 72)
  1696. # asm 1: movq 72(<qp=int64#2),>mulrax=int64#7
  1697. # asm 2: movq 72(<qp=%rsi),>mulrax=%rax
  1698. movq 72(%rsi),%rax
  1699. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1700. # asm 1: mul <mulx1=int64#9
  1701. # asm 2: mul <mulx1=%r11
  1702. mul %r11
  1703. # qhasm: carry? c2 += mulrax
  1704. # asm 1: add <mulrax=int64#7,<c2=int64#12
  1705. # asm 2: add <mulrax=%rax,<c2=%r14
  1706. add %rax,%r14
  1707. # qhasm: mulrdx += 0 + carry
  1708. # asm 1: adc $0,<mulrdx=int64#3
  1709. # asm 2: adc $0,<mulrdx=%rdx
  1710. adc $0,%rdx
  1711. # qhasm: carry? c2 += mulc
  1712. # asm 1: add <mulc=int64#14,<c2=int64#12
  1713. # asm 2: add <mulc=%rbx,<c2=%r14
  1714. add %rbx,%r14
  1715. # qhasm: mulc = 0
  1716. # asm 1: mov $0,>mulc=int64#14
  1717. # asm 2: mov $0,>mulc=%rbx
  1718. mov $0,%rbx
  1719. # qhasm: mulc += mulrdx + carry
  1720. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1721. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1722. adc %rdx,%rbx
  1723. # qhasm: mulrax = *(uint64 *)(qp + 80)
  1724. # asm 1: movq 80(<qp=int64#2),>mulrax=int64#7
  1725. # asm 2: movq 80(<qp=%rsi),>mulrax=%rax
  1726. movq 80(%rsi),%rax
  1727. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1728. # asm 1: mul <mulx1=int64#9
  1729. # asm 2: mul <mulx1=%r11
  1730. mul %r11
  1731. # qhasm: carry? c3 += mulrax
  1732. # asm 1: add <mulrax=int64#7,<c3=int64#13
  1733. # asm 2: add <mulrax=%rax,<c3=%r15
  1734. add %rax,%r15
  1735. # qhasm: mulrdx += 0 + carry
  1736. # asm 1: adc $0,<mulrdx=int64#3
  1737. # asm 2: adc $0,<mulrdx=%rdx
  1738. adc $0,%rdx
  1739. # qhasm: carry? c3 += mulc
  1740. # asm 1: add <mulc=int64#14,<c3=int64#13
  1741. # asm 2: add <mulc=%rbx,<c3=%r15
  1742. add %rbx,%r15
  1743. # qhasm: mulc = 0
  1744. # asm 1: mov $0,>mulc=int64#14
  1745. # asm 2: mov $0,>mulc=%rbx
  1746. mov $0,%rbx
  1747. # qhasm: mulc += mulrdx + carry
  1748. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1749. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1750. adc %rdx,%rbx
  1751. # qhasm: mulrax = *(uint64 *)(qp + 88)
  1752. # asm 1: movq 88(<qp=int64#2),>mulrax=int64#7
  1753. # asm 2: movq 88(<qp=%rsi),>mulrax=%rax
  1754. movq 88(%rsi),%rax
  1755. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  1756. # asm 1: mul <mulx1=int64#9
  1757. # asm 2: mul <mulx1=%r11
  1758. mul %r11
  1759. # qhasm: carry? mulr4 += mulrax
  1760. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  1761. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  1762. add %rax,%rcx
  1763. # qhasm: mulrdx += 0 + carry
  1764. # asm 1: adc $0,<mulrdx=int64#3
  1765. # asm 2: adc $0,<mulrdx=%rdx
  1766. adc $0,%rdx
  1767. # qhasm: carry? mulr4 += mulc
  1768. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  1769. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  1770. add %rbx,%rcx
  1771. # qhasm: mulr5 += mulrdx + carry
  1772. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
  1773. # asm 2: adc <mulrdx=%rdx,<mulr5=%r8
  1774. adc %rdx,%r8
  1775. # qhasm: mulx2 = *(uint64 *)(rp + 112)
  1776. # asm 1: movq 112(<rp=int64#1),>mulx2=int64#9
  1777. # asm 2: movq 112(<rp=%rdi),>mulx2=%r11
  1778. movq 112(%rdi),%r11
  1779. # qhasm: mulrax = *(uint64 *)(qp + 64)
  1780. # asm 1: movq 64(<qp=int64#2),>mulrax=int64#7
  1781. # asm 2: movq 64(<qp=%rsi),>mulrax=%rax
  1782. movq 64(%rsi),%rax
  1783. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1784. # asm 1: mul <mulx2=int64#9
  1785. # asm 2: mul <mulx2=%r11
  1786. mul %r11
  1787. # qhasm: carry? c2 += mulrax
  1788. # asm 1: add <mulrax=int64#7,<c2=int64#12
  1789. # asm 2: add <mulrax=%rax,<c2=%r14
  1790. add %rax,%r14
  1791. # qhasm: mulc = 0
  1792. # asm 1: mov $0,>mulc=int64#14
  1793. # asm 2: mov $0,>mulc=%rbx
  1794. mov $0,%rbx
  1795. # qhasm: mulc += mulrdx + carry
  1796. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1797. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1798. adc %rdx,%rbx
  1799. # qhasm: mulrax = *(uint64 *)(qp + 72)
  1800. # asm 1: movq 72(<qp=int64#2),>mulrax=int64#7
  1801. # asm 2: movq 72(<qp=%rsi),>mulrax=%rax
  1802. movq 72(%rsi),%rax
  1803. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1804. # asm 1: mul <mulx2=int64#9
  1805. # asm 2: mul <mulx2=%r11
  1806. mul %r11
  1807. # qhasm: carry? c3 += mulrax
  1808. # asm 1: add <mulrax=int64#7,<c3=int64#13
  1809. # asm 2: add <mulrax=%rax,<c3=%r15
  1810. add %rax,%r15
  1811. # qhasm: mulrdx += 0 + carry
  1812. # asm 1: adc $0,<mulrdx=int64#3
  1813. # asm 2: adc $0,<mulrdx=%rdx
  1814. adc $0,%rdx
  1815. # qhasm: carry? c3 += mulc
  1816. # asm 1: add <mulc=int64#14,<c3=int64#13
  1817. # asm 2: add <mulc=%rbx,<c3=%r15
  1818. add %rbx,%r15
  1819. # qhasm: mulc = 0
  1820. # asm 1: mov $0,>mulc=int64#14
  1821. # asm 2: mov $0,>mulc=%rbx
  1822. mov $0,%rbx
  1823. # qhasm: mulc += mulrdx + carry
  1824. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1825. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1826. adc %rdx,%rbx
  1827. # qhasm: mulrax = *(uint64 *)(qp + 80)
  1828. # asm 1: movq 80(<qp=int64#2),>mulrax=int64#7
  1829. # asm 2: movq 80(<qp=%rsi),>mulrax=%rax
  1830. movq 80(%rsi),%rax
  1831. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1832. # asm 1: mul <mulx2=int64#9
  1833. # asm 2: mul <mulx2=%r11
  1834. mul %r11
  1835. # qhasm: carry? mulr4 += mulrax
  1836. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  1837. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  1838. add %rax,%rcx
  1839. # qhasm: mulrdx += 0 + carry
  1840. # asm 1: adc $0,<mulrdx=int64#3
  1841. # asm 2: adc $0,<mulrdx=%rdx
  1842. adc $0,%rdx
  1843. # qhasm: carry? mulr4 += mulc
  1844. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  1845. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  1846. add %rbx,%rcx
  1847. # qhasm: mulc = 0
  1848. # asm 1: mov $0,>mulc=int64#14
  1849. # asm 2: mov $0,>mulc=%rbx
  1850. mov $0,%rbx
  1851. # qhasm: mulc += mulrdx + carry
  1852. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1853. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1854. adc %rdx,%rbx
  1855. # qhasm: mulrax = *(uint64 *)(qp + 88)
  1856. # asm 1: movq 88(<qp=int64#2),>mulrax=int64#7
  1857. # asm 2: movq 88(<qp=%rsi),>mulrax=%rax
  1858. movq 88(%rsi),%rax
  1859. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  1860. # asm 1: mul <mulx2=int64#9
  1861. # asm 2: mul <mulx2=%r11
  1862. mul %r11
  1863. # qhasm: carry? mulr5 += mulrax
  1864. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  1865. # asm 2: add <mulrax=%rax,<mulr5=%r8
  1866. add %rax,%r8
  1867. # qhasm: mulrdx += 0 + carry
  1868. # asm 1: adc $0,<mulrdx=int64#3
  1869. # asm 2: adc $0,<mulrdx=%rdx
  1870. adc $0,%rdx
  1871. # qhasm: carry? mulr5 += mulc
  1872. # asm 1: add <mulc=int64#14,<mulr5=int64#5
  1873. # asm 2: add <mulc=%rbx,<mulr5=%r8
  1874. add %rbx,%r8
  1875. # qhasm: mulr6 += mulrdx + carry
  1876. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
  1877. # asm 2: adc <mulrdx=%rdx,<mulr6=%r9
  1878. adc %rdx,%r9
  1879. # qhasm: mulx3 = *(uint64 *)(rp + 120)
  1880. # asm 1: movq 120(<rp=int64#1),>mulx3=int64#9
  1881. # asm 2: movq 120(<rp=%rdi),>mulx3=%r11
  1882. movq 120(%rdi),%r11
  1883. # qhasm: mulrax = *(uint64 *)(qp + 64)
  1884. # asm 1: movq 64(<qp=int64#2),>mulrax=int64#7
  1885. # asm 2: movq 64(<qp=%rsi),>mulrax=%rax
  1886. movq 64(%rsi),%rax
  1887. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1888. # asm 1: mul <mulx3=int64#9
  1889. # asm 2: mul <mulx3=%r11
  1890. mul %r11
  1891. # qhasm: carry? c3 += mulrax
  1892. # asm 1: add <mulrax=int64#7,<c3=int64#13
  1893. # asm 2: add <mulrax=%rax,<c3=%r15
  1894. add %rax,%r15
  1895. # qhasm: mulc = 0
  1896. # asm 1: mov $0,>mulc=int64#14
  1897. # asm 2: mov $0,>mulc=%rbx
  1898. mov $0,%rbx
  1899. # qhasm: mulc += mulrdx + carry
  1900. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1901. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1902. adc %rdx,%rbx
  1903. # qhasm: mulrax = *(uint64 *)(qp + 72)
  1904. # asm 1: movq 72(<qp=int64#2),>mulrax=int64#7
  1905. # asm 2: movq 72(<qp=%rsi),>mulrax=%rax
  1906. movq 72(%rsi),%rax
  1907. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1908. # asm 1: mul <mulx3=int64#9
  1909. # asm 2: mul <mulx3=%r11
  1910. mul %r11
  1911. # qhasm: carry? mulr4 += mulrax
  1912. # asm 1: add <mulrax=int64#7,<mulr4=int64#4
  1913. # asm 2: add <mulrax=%rax,<mulr4=%rcx
  1914. add %rax,%rcx
  1915. # qhasm: mulrdx += 0 + carry
  1916. # asm 1: adc $0,<mulrdx=int64#3
  1917. # asm 2: adc $0,<mulrdx=%rdx
  1918. adc $0,%rdx
  1919. # qhasm: carry? mulr4 += mulc
  1920. # asm 1: add <mulc=int64#14,<mulr4=int64#4
  1921. # asm 2: add <mulc=%rbx,<mulr4=%rcx
  1922. add %rbx,%rcx
  1923. # qhasm: mulc = 0
  1924. # asm 1: mov $0,>mulc=int64#14
  1925. # asm 2: mov $0,>mulc=%rbx
  1926. mov $0,%rbx
  1927. # qhasm: mulc += mulrdx + carry
  1928. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1929. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1930. adc %rdx,%rbx
  1931. # qhasm: mulrax = *(uint64 *)(qp + 80)
  1932. # asm 1: movq 80(<qp=int64#2),>mulrax=int64#7
  1933. # asm 2: movq 80(<qp=%rsi),>mulrax=%rax
  1934. movq 80(%rsi),%rax
  1935. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1936. # asm 1: mul <mulx3=int64#9
  1937. # asm 2: mul <mulx3=%r11
  1938. mul %r11
  1939. # qhasm: carry? mulr5 += mulrax
  1940. # asm 1: add <mulrax=int64#7,<mulr5=int64#5
  1941. # asm 2: add <mulrax=%rax,<mulr5=%r8
  1942. add %rax,%r8
  1943. # qhasm: mulrdx += 0 + carry
  1944. # asm 1: adc $0,<mulrdx=int64#3
  1945. # asm 2: adc $0,<mulrdx=%rdx
  1946. adc $0,%rdx
  1947. # qhasm: carry? mulr5 += mulc
  1948. # asm 1: add <mulc=int64#14,<mulr5=int64#5
  1949. # asm 2: add <mulc=%rbx,<mulr5=%r8
  1950. add %rbx,%r8
  1951. # qhasm: mulc = 0
  1952. # asm 1: mov $0,>mulc=int64#14
  1953. # asm 2: mov $0,>mulc=%rbx
  1954. mov $0,%rbx
  1955. # qhasm: mulc += mulrdx + carry
  1956. # asm 1: adc <mulrdx=int64#3,<mulc=int64#14
  1957. # asm 2: adc <mulrdx=%rdx,<mulc=%rbx
  1958. adc %rdx,%rbx
  1959. # qhasm: mulrax = *(uint64 *)(qp + 88)
  1960. # asm 1: movq 88(<qp=int64#2),>mulrax=int64#7
  1961. # asm 2: movq 88(<qp=%rsi),>mulrax=%rax
  1962. movq 88(%rsi),%rax
  1963. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  1964. # asm 1: mul <mulx3=int64#9
  1965. # asm 2: mul <mulx3=%r11
  1966. mul %r11
  1967. # qhasm: carry? mulr6 += mulrax
  1968. # asm 1: add <mulrax=int64#7,<mulr6=int64#6
  1969. # asm 2: add <mulrax=%rax,<mulr6=%r9
  1970. add %rax,%r9
  1971. # qhasm: mulrdx += 0 + carry
  1972. # asm 1: adc $0,<mulrdx=int64#3
  1973. # asm 2: adc $0,<mulrdx=%rdx
  1974. adc $0,%rdx
  1975. # qhasm: carry? mulr6 += mulc
  1976. # asm 1: add <mulc=int64#14,<mulr6=int64#6
  1977. # asm 2: add <mulc=%rbx,<mulr6=%r9
  1978. add %rbx,%r9
  1979. # qhasm: mulr7 += mulrdx + carry
  1980. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
  1981. # asm 2: adc <mulrdx=%rdx,<mulr7=%r10
  1982. adc %rdx,%r10
  1983. # qhasm: mulrax = mulr4
  1984. # asm 1: mov <mulr4=int64#4,>mulrax=int64#7
  1985. # asm 2: mov <mulr4=%rcx,>mulrax=%rax
  1986. mov %rcx,%rax
  1987. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  1988. mulq CRYPTO_NAMESPACE(38)(%rip)
  1989. # qhasm: mulr4 = mulrax
  1990. # asm 1: mov <mulrax=int64#7,>mulr4=int64#2
  1991. # asm 2: mov <mulrax=%rax,>mulr4=%rsi
  1992. mov %rax,%rsi
  1993. # qhasm: mulrax = mulr5
  1994. # asm 1: mov <mulr5=int64#5,>mulrax=int64#7
  1995. # asm 2: mov <mulr5=%r8,>mulrax=%rax
  1996. mov %r8,%rax
  1997. # qhasm: mulr5 = mulrdx
  1998. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#4
  1999. # asm 2: mov <mulrdx=%rdx,>mulr5=%rcx
  2000. mov %rdx,%rcx
  2001. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2002. mulq CRYPTO_NAMESPACE(38)(%rip)
  2003. # qhasm: carry? mulr5 += mulrax
  2004. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  2005. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  2006. add %rax,%rcx
  2007. # qhasm: mulrax = mulr6
  2008. # asm 1: mov <mulr6=int64#6,>mulrax=int64#7
  2009. # asm 2: mov <mulr6=%r9,>mulrax=%rax
  2010. mov %r9,%rax
  2011. # qhasm: mulr6 = 0
  2012. # asm 1: mov $0,>mulr6=int64#5
  2013. # asm 2: mov $0,>mulr6=%r8
  2014. mov $0,%r8
  2015. # qhasm: mulr6 += mulrdx + carry
  2016. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
  2017. # asm 2: adc <mulrdx=%rdx,<mulr6=%r8
  2018. adc %rdx,%r8
  2019. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2020. mulq CRYPTO_NAMESPACE(38)(%rip)
  2021. # qhasm: carry? mulr6 += mulrax
  2022. # asm 1: add <mulrax=int64#7,<mulr6=int64#5
  2023. # asm 2: add <mulrax=%rax,<mulr6=%r8
  2024. add %rax,%r8
  2025. # qhasm: mulrax = mulr7
  2026. # asm 1: mov <mulr7=int64#8,>mulrax=int64#7
  2027. # asm 2: mov <mulr7=%r10,>mulrax=%rax
  2028. mov %r10,%rax
  2029. # qhasm: mulr7 = 0
  2030. # asm 1: mov $0,>mulr7=int64#6
  2031. # asm 2: mov $0,>mulr7=%r9
  2032. mov $0,%r9
  2033. # qhasm: mulr7 += mulrdx + carry
  2034. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
  2035. # asm 2: adc <mulrdx=%rdx,<mulr7=%r9
  2036. adc %rdx,%r9
  2037. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2038. mulq CRYPTO_NAMESPACE(38)(%rip)
  2039. # qhasm: carry? mulr7 += mulrax
  2040. # asm 1: add <mulrax=int64#7,<mulr7=int64#6
  2041. # asm 2: add <mulrax=%rax,<mulr7=%r9
  2042. add %rax,%r9
  2043. # qhasm: mulr8 = 0
  2044. # asm 1: mov $0,>mulr8=int64#7
  2045. # asm 2: mov $0,>mulr8=%rax
  2046. mov $0,%rax
  2047. # qhasm: mulr8 += mulrdx + carry
  2048. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  2049. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  2050. adc %rdx,%rax
  2051. # qhasm: carry? c0 += mulr4
  2052. # asm 1: add <mulr4=int64#2,<c0=int64#10
  2053. # asm 2: add <mulr4=%rsi,<c0=%r12
  2054. add %rsi,%r12
  2055. # qhasm: carry? c1 += mulr5 + carry
  2056. # asm 1: adc <mulr5=int64#4,<c1=int64#11
  2057. # asm 2: adc <mulr5=%rcx,<c1=%r13
  2058. adc %rcx,%r13
  2059. # qhasm: carry? c2 += mulr6 + carry
  2060. # asm 1: adc <mulr6=int64#5,<c2=int64#12
  2061. # asm 2: adc <mulr6=%r8,<c2=%r14
  2062. adc %r8,%r14
  2063. # qhasm: carry? c3 += mulr7 + carry
  2064. # asm 1: adc <mulr7=int64#6,<c3=int64#13
  2065. # asm 2: adc <mulr7=%r9,<c3=%r15
  2066. adc %r9,%r15
  2067. # qhasm: mulzero = 0
  2068. # asm 1: mov $0,>mulzero=int64#2
  2069. # asm 2: mov $0,>mulzero=%rsi
  2070. mov $0,%rsi
  2071. # qhasm: mulr8 += mulzero + carry
  2072. # asm 1: adc <mulzero=int64#2,<mulr8=int64#7
  2073. # asm 2: adc <mulzero=%rsi,<mulr8=%rax
  2074. adc %rsi,%rax
  2075. # qhasm: mulr8 *= 38
  2076. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3
  2077. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx
  2078. imulq $38,%rax,%rdx
  2079. # qhasm: carry? c0 += mulr8
  2080. # asm 1: add <mulr8=int64#3,<c0=int64#10
  2081. # asm 2: add <mulr8=%rdx,<c0=%r12
  2082. add %rdx,%r12
  2083. # qhasm: carry? c1 += mulzero + carry
  2084. # asm 1: adc <mulzero=int64#2,<c1=int64#11
  2085. # asm 2: adc <mulzero=%rsi,<c1=%r13
  2086. adc %rsi,%r13
  2087. # qhasm: carry? c2 += mulzero + carry
  2088. # asm 1: adc <mulzero=int64#2,<c2=int64#12
  2089. # asm 2: adc <mulzero=%rsi,<c2=%r14
  2090. adc %rsi,%r14
  2091. # qhasm: carry? c3 += mulzero + carry
  2092. # asm 1: adc <mulzero=int64#2,<c3=int64#13
  2093. # asm 2: adc <mulzero=%rsi,<c3=%r15
  2094. adc %rsi,%r15
  2095. # qhasm: mulzero += mulzero + carry
  2096. # asm 1: adc <mulzero=int64#2,<mulzero=int64#2
  2097. # asm 2: adc <mulzero=%rsi,<mulzero=%rsi
  2098. adc %rsi,%rsi
  2099. # qhasm: mulzero *= 38
  2100. # asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2
  2101. # asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi
  2102. imulq $38,%rsi,%rsi
  2103. # qhasm: c0 += mulzero
  2104. # asm 1: add <mulzero=int64#2,<c0=int64#10
  2105. # asm 2: add <mulzero=%rsi,<c0=%r12
  2106. add %rsi,%r12
  2107. # qhasm: f0 = *(uint64 *)(rp + 64)
  2108. # asm 1: movq 64(<rp=int64#1),>f0=int64#2
  2109. # asm 2: movq 64(<rp=%rdi),>f0=%rsi
  2110. movq 64(%rdi),%rsi
  2111. # qhasm: f1 = *(uint64 *)(rp + 72)
  2112. # asm 1: movq 72(<rp=int64#1),>f1=int64#3
  2113. # asm 2: movq 72(<rp=%rdi),>f1=%rdx
  2114. movq 72(%rdi),%rdx
  2115. # qhasm: f2 = *(uint64 *)(rp + 80)
  2116. # asm 1: movq 80(<rp=int64#1),>f2=int64#4
  2117. # asm 2: movq 80(<rp=%rdi),>f2=%rcx
  2118. movq 80(%rdi),%rcx
  2119. # qhasm: f3 = *(uint64 *)(rp + 88)
  2120. # asm 1: movq 88(<rp=int64#1),>f3=int64#5
  2121. # asm 2: movq 88(<rp=%rdi),>f3=%r8
  2122. movq 88(%rdi),%r8
  2123. # qhasm: carry? f0 += f0
  2124. # asm 1: add <f0=int64#2,<f0=int64#2
  2125. # asm 2: add <f0=%rsi,<f0=%rsi
  2126. add %rsi,%rsi
  2127. # qhasm: carry? f1 += f1 + carry
  2128. # asm 1: adc <f1=int64#3,<f1=int64#3
  2129. # asm 2: adc <f1=%rdx,<f1=%rdx
  2130. adc %rdx,%rdx
  2131. # qhasm: carry? f2 += f2 + carry
  2132. # asm 1: adc <f2=int64#4,<f2=int64#4
  2133. # asm 2: adc <f2=%rcx,<f2=%rcx
  2134. adc %rcx,%rcx
  2135. # qhasm: carry? f3 += f3 + carry
  2136. # asm 1: adc <f3=int64#5,<f3=int64#5
  2137. # asm 2: adc <f3=%r8,<f3=%r8
  2138. adc %r8,%r8
  2139. # qhasm: addt0 = 0
  2140. # asm 1: mov $0,>addt0=int64#6
  2141. # asm 2: mov $0,>addt0=%r9
  2142. mov $0,%r9
  2143. # qhasm: addt1 = 38
  2144. # asm 1: mov $38,>addt1=int64#7
  2145. # asm 2: mov $38,>addt1=%rax
  2146. mov $38,%rax
  2147. # qhasm: addt1 = addt0 if !carry
  2148. # asm 1: cmovae <addt0=int64#6,<addt1=int64#7
  2149. # asm 2: cmovae <addt0=%r9,<addt1=%rax
  2150. cmovae %r9,%rax
  2151. # qhasm: carry? f0 += addt1
  2152. # asm 1: add <addt1=int64#7,<f0=int64#2
  2153. # asm 2: add <addt1=%rax,<f0=%rsi
  2154. add %rax,%rsi
  2155. # qhasm: carry? f1 += addt0 + carry
  2156. # asm 1: adc <addt0=int64#6,<f1=int64#3
  2157. # asm 2: adc <addt0=%r9,<f1=%rdx
  2158. adc %r9,%rdx
  2159. # qhasm: carry? f2 += addt0 + carry
  2160. # asm 1: adc <addt0=int64#6,<f2=int64#4
  2161. # asm 2: adc <addt0=%r9,<f2=%rcx
  2162. adc %r9,%rcx
  2163. # qhasm: carry? f3 += addt0 + carry
  2164. # asm 1: adc <addt0=int64#6,<f3=int64#5
  2165. # asm 2: adc <addt0=%r9,<f3=%r8
  2166. adc %r9,%r8
  2167. # qhasm: addt0 = addt1 if carry
  2168. # asm 1: cmovc <addt1=int64#7,<addt0=int64#6
  2169. # asm 2: cmovc <addt1=%rax,<addt0=%r9
  2170. cmovc %rax,%r9
  2171. # qhasm: f0 += addt0
  2172. # asm 1: add <addt0=int64#6,<f0=int64#2
  2173. # asm 2: add <addt0=%r9,<f0=%rsi
  2174. add %r9,%rsi
  2175. # qhasm: g0 = f0
  2176. # asm 1: mov <f0=int64#2,>g0=int64#6
  2177. # asm 2: mov <f0=%rsi,>g0=%r9
  2178. mov %rsi,%r9
  2179. # qhasm: g1 = f1
  2180. # asm 1: mov <f1=int64#3,>g1=int64#7
  2181. # asm 2: mov <f1=%rdx,>g1=%rax
  2182. mov %rdx,%rax
  2183. # qhasm: g2 = f2
  2184. # asm 1: mov <f2=int64#4,>g2=int64#8
  2185. # asm 2: mov <f2=%rcx,>g2=%r10
  2186. mov %rcx,%r10
  2187. # qhasm: g3 = f3
  2188. # asm 1: mov <f3=int64#5,>g3=int64#9
  2189. # asm 2: mov <f3=%r8,>g3=%r11
  2190. mov %r8,%r11
  2191. # qhasm: carry? f0 -= c0
  2192. # asm 1: sub <c0=int64#10,<f0=int64#2
  2193. # asm 2: sub <c0=%r12,<f0=%rsi
  2194. sub %r12,%rsi
  2195. # qhasm: carry? f1 -= c1 - carry
  2196. # asm 1: sbb <c1=int64#11,<f1=int64#3
  2197. # asm 2: sbb <c1=%r13,<f1=%rdx
  2198. sbb %r13,%rdx
  2199. # qhasm: carry? f2 -= c2 - carry
  2200. # asm 1: sbb <c2=int64#12,<f2=int64#4
  2201. # asm 2: sbb <c2=%r14,<f2=%rcx
  2202. sbb %r14,%rcx
  2203. # qhasm: carry? f3 -= c3 - carry
  2204. # asm 1: sbb <c3=int64#13,<f3=int64#5
  2205. # asm 2: sbb <c3=%r15,<f3=%r8
  2206. sbb %r15,%r8
  2207. # qhasm: subt0 = 0
  2208. # asm 1: mov $0,>subt0=int64#14
  2209. # asm 2: mov $0,>subt0=%rbx
  2210. mov $0,%rbx
  2211. # qhasm: subt1 = 38
  2212. # asm 1: mov $38,>subt1=int64#15
  2213. # asm 2: mov $38,>subt1=%rbp
  2214. mov $38,%rbp
  2215. # qhasm: subt1 = subt0 if !carry
  2216. # asm 1: cmovae <subt0=int64#14,<subt1=int64#15
  2217. # asm 2: cmovae <subt0=%rbx,<subt1=%rbp
  2218. cmovae %rbx,%rbp
  2219. # qhasm: carry? f0 -= subt1
  2220. # asm 1: sub <subt1=int64#15,<f0=int64#2
  2221. # asm 2: sub <subt1=%rbp,<f0=%rsi
  2222. sub %rbp,%rsi
  2223. # qhasm: carry? f1 -= subt0 - carry
  2224. # asm 1: sbb <subt0=int64#14,<f1=int64#3
  2225. # asm 2: sbb <subt0=%rbx,<f1=%rdx
  2226. sbb %rbx,%rdx
  2227. # qhasm: carry? f2 -= subt0 - carry
  2228. # asm 1: sbb <subt0=int64#14,<f2=int64#4
  2229. # asm 2: sbb <subt0=%rbx,<f2=%rcx
  2230. sbb %rbx,%rcx
  2231. # qhasm: carry? f3 -= subt0 - carry
  2232. # asm 1: sbb <subt0=int64#14,<f3=int64#5
  2233. # asm 2: sbb <subt0=%rbx,<f3=%r8
  2234. sbb %rbx,%r8
  2235. # qhasm: subt0 = subt1 if carry
  2236. # asm 1: cmovc <subt1=int64#15,<subt0=int64#14
  2237. # asm 2: cmovc <subt1=%rbp,<subt0=%rbx
  2238. cmovc %rbp,%rbx
  2239. # qhasm: f0 -= subt0
  2240. # asm 1: sub <subt0=int64#14,<f0=int64#2
  2241. # asm 2: sub <subt0=%rbx,<f0=%rsi
  2242. sub %rbx,%rsi
  2243. # qhasm: carry? g0 += c0
  2244. # asm 1: add <c0=int64#10,<g0=int64#6
  2245. # asm 2: add <c0=%r12,<g0=%r9
  2246. add %r12,%r9
  2247. # qhasm: carry? g1 += c1 + carry
  2248. # asm 1: adc <c1=int64#11,<g1=int64#7
  2249. # asm 2: adc <c1=%r13,<g1=%rax
  2250. adc %r13,%rax
  2251. # qhasm: carry? g2 += c2 + carry
  2252. # asm 1: adc <c2=int64#12,<g2=int64#8
  2253. # asm 2: adc <c2=%r14,<g2=%r10
  2254. adc %r14,%r10
  2255. # qhasm: carry? g3 += c3 + carry
  2256. # asm 1: adc <c3=int64#13,<g3=int64#9
  2257. # asm 2: adc <c3=%r15,<g3=%r11
  2258. adc %r15,%r11
  2259. # qhasm: addt0 = 0
  2260. # asm 1: mov $0,>addt0=int64#10
  2261. # asm 2: mov $0,>addt0=%r12
  2262. mov $0,%r12
  2263. # qhasm: addt1 = 38
  2264. # asm 1: mov $38,>addt1=int64#11
  2265. # asm 2: mov $38,>addt1=%r13
  2266. mov $38,%r13
  2267. # qhasm: addt1 = addt0 if !carry
  2268. # asm 1: cmovae <addt0=int64#10,<addt1=int64#11
  2269. # asm 2: cmovae <addt0=%r12,<addt1=%r13
  2270. cmovae %r12,%r13
  2271. # qhasm: carry? g0 += addt1
  2272. # asm 1: add <addt1=int64#11,<g0=int64#6
  2273. # asm 2: add <addt1=%r13,<g0=%r9
  2274. add %r13,%r9
  2275. # qhasm: carry? g1 += addt0 + carry
  2276. # asm 1: adc <addt0=int64#10,<g1=int64#7
  2277. # asm 2: adc <addt0=%r12,<g1=%rax
  2278. adc %r12,%rax
  2279. # qhasm: carry? g2 += addt0 + carry
  2280. # asm 1: adc <addt0=int64#10,<g2=int64#8
  2281. # asm 2: adc <addt0=%r12,<g2=%r10
  2282. adc %r12,%r10
  2283. # qhasm: carry? g3 += addt0 + carry
  2284. # asm 1: adc <addt0=int64#10,<g3=int64#9
  2285. # asm 2: adc <addt0=%r12,<g3=%r11
  2286. adc %r12,%r11
  2287. # qhasm: addt0 = addt1 if carry
  2288. # asm 1: cmovc <addt1=int64#11,<addt0=int64#10
  2289. # asm 2: cmovc <addt1=%r13,<addt0=%r12
  2290. cmovc %r13,%r12
  2291. # qhasm: g0 += addt0
  2292. # asm 1: add <addt0=int64#10,<g0=int64#6
  2293. # asm 2: add <addt0=%r12,<g0=%r9
  2294. add %r12,%r9
  2295. # qhasm: g0_stack = g0
  2296. # asm 1: movq <g0=int64#6,>g0_stack=stack64#16
  2297. # asm 2: movq <g0=%r9,>g0_stack=120(%rsp)
  2298. movq %r9,120(%rsp)
  2299. # qhasm: g1_stack = g1
  2300. # asm 1: movq <g1=int64#7,>g1_stack=stack64#17
  2301. # asm 2: movq <g1=%rax,>g1_stack=128(%rsp)
  2302. movq %rax,128(%rsp)
  2303. # qhasm: g2_stack = g2
  2304. # asm 1: movq <g2=int64#8,>g2_stack=stack64#18
  2305. # asm 2: movq <g2=%r10,>g2_stack=136(%rsp)
  2306. movq %r10,136(%rsp)
  2307. # qhasm: g3_stack = g3
  2308. # asm 1: movq <g3=int64#9,>g3_stack=stack64#19
  2309. # asm 2: movq <g3=%r11,>g3_stack=144(%rsp)
  2310. movq %r11,144(%rsp)
  2311. # qhasm: f0_stack = f0
  2312. # asm 1: movq <f0=int64#2,>f0_stack=stack64#20
  2313. # asm 2: movq <f0=%rsi,>f0_stack=152(%rsp)
  2314. movq %rsi,152(%rsp)
  2315. # qhasm: f1_stack = f1
  2316. # asm 1: movq <f1=int64#3,>f1_stack=stack64#21
  2317. # asm 2: movq <f1=%rdx,>f1_stack=160(%rsp)
  2318. movq %rdx,160(%rsp)
  2319. # qhasm: f2_stack = f2
  2320. # asm 1: movq <f2=int64#4,>f2_stack=stack64#22
  2321. # asm 2: movq <f2=%rcx,>f2_stack=168(%rsp)
  2322. movq %rcx,168(%rsp)
  2323. # qhasm: f3_stack = f3
  2324. # asm 1: movq <f3=int64#5,>f3_stack=stack64#23
  2325. # asm 2: movq <f3=%r8,>f3_stack=176(%rsp)
  2326. movq %r8,176(%rsp)
  2327. # qhasm: mulr4 = 0
  2328. # asm 1: mov $0,>mulr4=int64#2
  2329. # asm 2: mov $0,>mulr4=%rsi
  2330. mov $0,%rsi
  2331. # qhasm: mulr5 = 0
  2332. # asm 1: mov $0,>mulr5=int64#4
  2333. # asm 2: mov $0,>mulr5=%rcx
  2334. mov $0,%rcx
  2335. # qhasm: mulr6 = 0
  2336. # asm 1: mov $0,>mulr6=int64#5
  2337. # asm 2: mov $0,>mulr6=%r8
  2338. mov $0,%r8
  2339. # qhasm: mulr7 = 0
  2340. # asm 1: mov $0,>mulr7=int64#6
  2341. # asm 2: mov $0,>mulr7=%r9
  2342. mov $0,%r9
  2343. # qhasm: mulx0 = e0_stack
  2344. # asm 1: movq <e0_stack=stack64#12,>mulx0=int64#8
  2345. # asm 2: movq <e0_stack=88(%rsp),>mulx0=%r10
  2346. movq 88(%rsp),%r10
  2347. # qhasm: mulrax = f0_stack
  2348. # asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
  2349. # asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
  2350. movq 152(%rsp),%rax
  2351. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2352. # asm 1: mul <mulx0=int64#8
  2353. # asm 2: mul <mulx0=%r10
  2354. mul %r10
  2355. # qhasm: rx0 = mulrax
  2356. # asm 1: mov <mulrax=int64#7,>rx0=int64#9
  2357. # asm 2: mov <mulrax=%rax,>rx0=%r11
  2358. mov %rax,%r11
  2359. # qhasm: rx1 = mulrdx
  2360. # asm 1: mov <mulrdx=int64#3,>rx1=int64#10
  2361. # asm 2: mov <mulrdx=%rdx,>rx1=%r12
  2362. mov %rdx,%r12
  2363. # qhasm: mulrax = f1_stack
  2364. # asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
  2365. # asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
  2366. movq 160(%rsp),%rax
  2367. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2368. # asm 1: mul <mulx0=int64#8
  2369. # asm 2: mul <mulx0=%r10
  2370. mul %r10
  2371. # qhasm: carry? rx1 += mulrax
  2372. # asm 1: add <mulrax=int64#7,<rx1=int64#10
  2373. # asm 2: add <mulrax=%rax,<rx1=%r12
  2374. add %rax,%r12
  2375. # qhasm: rx2 = 0
  2376. # asm 1: mov $0,>rx2=int64#11
  2377. # asm 2: mov $0,>rx2=%r13
  2378. mov $0,%r13
  2379. # qhasm: rx2 += mulrdx + carry
  2380. # asm 1: adc <mulrdx=int64#3,<rx2=int64#11
  2381. # asm 2: adc <mulrdx=%rdx,<rx2=%r13
  2382. adc %rdx,%r13
  2383. # qhasm: mulrax = f2_stack
  2384. # asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
  2385. # asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
  2386. movq 168(%rsp),%rax
  2387. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2388. # asm 1: mul <mulx0=int64#8
  2389. # asm 2: mul <mulx0=%r10
  2390. mul %r10
  2391. # qhasm: carry? rx2 += mulrax
  2392. # asm 1: add <mulrax=int64#7,<rx2=int64#11
  2393. # asm 2: add <mulrax=%rax,<rx2=%r13
  2394. add %rax,%r13
  2395. # qhasm: rx3 = 0
  2396. # asm 1: mov $0,>rx3=int64#12
  2397. # asm 2: mov $0,>rx3=%r14
  2398. mov $0,%r14
  2399. # qhasm: rx3 += mulrdx + carry
  2400. # asm 1: adc <mulrdx=int64#3,<rx3=int64#12
  2401. # asm 2: adc <mulrdx=%rdx,<rx3=%r14
  2402. adc %rdx,%r14
  2403. # qhasm: mulrax = f3_stack
  2404. # asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
  2405. # asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
  2406. movq 176(%rsp),%rax
  2407. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2408. # asm 1: mul <mulx0=int64#8
  2409. # asm 2: mul <mulx0=%r10
  2410. mul %r10
  2411. # qhasm: carry? rx3 += mulrax
  2412. # asm 1: add <mulrax=int64#7,<rx3=int64#12
  2413. # asm 2: add <mulrax=%rax,<rx3=%r14
  2414. add %rax,%r14
  2415. # qhasm: mulr4 += mulrdx + carry
  2416. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#2
  2417. # asm 2: adc <mulrdx=%rdx,<mulr4=%rsi
  2418. adc %rdx,%rsi
  2419. # qhasm: mulx1 = e1_stack
  2420. # asm 1: movq <e1_stack=stack64#13,>mulx1=int64#8
  2421. # asm 2: movq <e1_stack=96(%rsp),>mulx1=%r10
  2422. movq 96(%rsp),%r10
  2423. # qhasm: mulrax = f0_stack
  2424. # asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
  2425. # asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
  2426. movq 152(%rsp),%rax
  2427. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2428. # asm 1: mul <mulx1=int64#8
  2429. # asm 2: mul <mulx1=%r10
  2430. mul %r10
  2431. # qhasm: carry? rx1 += mulrax
  2432. # asm 1: add <mulrax=int64#7,<rx1=int64#10
  2433. # asm 2: add <mulrax=%rax,<rx1=%r12
  2434. add %rax,%r12
  2435. # qhasm: mulc = 0
  2436. # asm 1: mov $0,>mulc=int64#13
  2437. # asm 2: mov $0,>mulc=%r15
  2438. mov $0,%r15
  2439. # qhasm: mulc += mulrdx + carry
  2440. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  2441. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  2442. adc %rdx,%r15
  2443. # qhasm: mulrax = f1_stack
  2444. # asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
  2445. # asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
  2446. movq 160(%rsp),%rax
  2447. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2448. # asm 1: mul <mulx1=int64#8
  2449. # asm 2: mul <mulx1=%r10
  2450. mul %r10
  2451. # qhasm: carry? rx2 += mulrax
  2452. # asm 1: add <mulrax=int64#7,<rx2=int64#11
  2453. # asm 2: add <mulrax=%rax,<rx2=%r13
  2454. add %rax,%r13
  2455. # qhasm: mulrdx += 0 + carry
  2456. # asm 1: adc $0,<mulrdx=int64#3
  2457. # asm 2: adc $0,<mulrdx=%rdx
  2458. adc $0,%rdx
  2459. # qhasm: carry? rx2 += mulc
  2460. # asm 1: add <mulc=int64#13,<rx2=int64#11
  2461. # asm 2: add <mulc=%r15,<rx2=%r13
  2462. add %r15,%r13
  2463. # qhasm: mulc = 0
  2464. # asm 1: mov $0,>mulc=int64#13
  2465. # asm 2: mov $0,>mulc=%r15
  2466. mov $0,%r15
  2467. # qhasm: mulc += mulrdx + carry
  2468. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  2469. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  2470. adc %rdx,%r15
  2471. # qhasm: mulrax = f2_stack
  2472. # asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
  2473. # asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
  2474. movq 168(%rsp),%rax
  2475. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2476. # asm 1: mul <mulx1=int64#8
  2477. # asm 2: mul <mulx1=%r10
  2478. mul %r10
  2479. # qhasm: carry? rx3 += mulrax
  2480. # asm 1: add <mulrax=int64#7,<rx3=int64#12
  2481. # asm 2: add <mulrax=%rax,<rx3=%r14
  2482. add %rax,%r14
  2483. # qhasm: mulrdx += 0 + carry
  2484. # asm 1: adc $0,<mulrdx=int64#3
  2485. # asm 2: adc $0,<mulrdx=%rdx
  2486. adc $0,%rdx
  2487. # qhasm: carry? rx3 += mulc
  2488. # asm 1: add <mulc=int64#13,<rx3=int64#12
  2489. # asm 2: add <mulc=%r15,<rx3=%r14
  2490. add %r15,%r14
  2491. # qhasm: mulc = 0
  2492. # asm 1: mov $0,>mulc=int64#13
  2493. # asm 2: mov $0,>mulc=%r15
  2494. mov $0,%r15
  2495. # qhasm: mulc += mulrdx + carry
  2496. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  2497. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  2498. adc %rdx,%r15
  2499. # qhasm: mulrax = f3_stack
  2500. # asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
  2501. # asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
  2502. movq 176(%rsp),%rax
  2503. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2504. # asm 1: mul <mulx1=int64#8
  2505. # asm 2: mul <mulx1=%r10
  2506. mul %r10
  2507. # qhasm: carry? mulr4 += mulrax
  2508. # asm 1: add <mulrax=int64#7,<mulr4=int64#2
  2509. # asm 2: add <mulrax=%rax,<mulr4=%rsi
  2510. add %rax,%rsi
  2511. # qhasm: mulrdx += 0 + carry
  2512. # asm 1: adc $0,<mulrdx=int64#3
  2513. # asm 2: adc $0,<mulrdx=%rdx
  2514. adc $0,%rdx
  2515. # qhasm: carry? mulr4 += mulc
  2516. # asm 1: add <mulc=int64#13,<mulr4=int64#2
  2517. # asm 2: add <mulc=%r15,<mulr4=%rsi
  2518. add %r15,%rsi
  2519. # qhasm: mulr5 += mulrdx + carry
  2520. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#4
  2521. # asm 2: adc <mulrdx=%rdx,<mulr5=%rcx
  2522. adc %rdx,%rcx
  2523. # qhasm: mulx2 = e2_stack
  2524. # asm 1: movq <e2_stack=stack64#14,>mulx2=int64#8
  2525. # asm 2: movq <e2_stack=104(%rsp),>mulx2=%r10
  2526. movq 104(%rsp),%r10
  2527. # qhasm: mulrax = f0_stack
  2528. # asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
  2529. # asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
  2530. movq 152(%rsp),%rax
  2531. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  2532. # asm 1: mul <mulx2=int64#8
  2533. # asm 2: mul <mulx2=%r10
  2534. mul %r10
  2535. # qhasm: carry? rx2 += mulrax
  2536. # asm 1: add <mulrax=int64#7,<rx2=int64#11
  2537. # asm 2: add <mulrax=%rax,<rx2=%r13
  2538. add %rax,%r13
  2539. # qhasm: mulc = 0
  2540. # asm 1: mov $0,>mulc=int64#13
  2541. # asm 2: mov $0,>mulc=%r15
  2542. mov $0,%r15
  2543. # qhasm: mulc += mulrdx + carry
  2544. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  2545. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  2546. adc %rdx,%r15
  2547. # qhasm: mulrax = f1_stack
  2548. # asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
  2549. # asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
  2550. movq 160(%rsp),%rax
  2551. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  2552. # asm 1: mul <mulx2=int64#8
  2553. # asm 2: mul <mulx2=%r10
  2554. mul %r10
  2555. # qhasm: carry? rx3 += mulrax
  2556. # asm 1: add <mulrax=int64#7,<rx3=int64#12
  2557. # asm 2: add <mulrax=%rax,<rx3=%r14
  2558. add %rax,%r14
  2559. # qhasm: mulrdx += 0 + carry
  2560. # asm 1: adc $0,<mulrdx=int64#3
  2561. # asm 2: adc $0,<mulrdx=%rdx
  2562. adc $0,%rdx
  2563. # qhasm: carry? rx3 += mulc
  2564. # asm 1: add <mulc=int64#13,<rx3=int64#12
  2565. # asm 2: add <mulc=%r15,<rx3=%r14
  2566. add %r15,%r14
  2567. # qhasm: mulc = 0
  2568. # asm 1: mov $0,>mulc=int64#13
  2569. # asm 2: mov $0,>mulc=%r15
  2570. mov $0,%r15
  2571. # qhasm: mulc += mulrdx + carry
  2572. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  2573. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  2574. adc %rdx,%r15
  2575. # qhasm: mulrax = f2_stack
  2576. # asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
  2577. # asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
  2578. movq 168(%rsp),%rax
  2579. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  2580. # asm 1: mul <mulx2=int64#8
  2581. # asm 2: mul <mulx2=%r10
  2582. mul %r10
  2583. # qhasm: carry? mulr4 += mulrax
  2584. # asm 1: add <mulrax=int64#7,<mulr4=int64#2
  2585. # asm 2: add <mulrax=%rax,<mulr4=%rsi
  2586. add %rax,%rsi
  2587. # qhasm: mulrdx += 0 + carry
  2588. # asm 1: adc $0,<mulrdx=int64#3
  2589. # asm 2: adc $0,<mulrdx=%rdx
  2590. adc $0,%rdx
  2591. # qhasm: carry? mulr4 += mulc
  2592. # asm 1: add <mulc=int64#13,<mulr4=int64#2
  2593. # asm 2: add <mulc=%r15,<mulr4=%rsi
  2594. add %r15,%rsi
  2595. # qhasm: mulc = 0
  2596. # asm 1: mov $0,>mulc=int64#13
  2597. # asm 2: mov $0,>mulc=%r15
  2598. mov $0,%r15
  2599. # qhasm: mulc += mulrdx + carry
  2600. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  2601. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  2602. adc %rdx,%r15
  2603. # qhasm: mulrax = f3_stack
  2604. # asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
  2605. # asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
  2606. movq 176(%rsp),%rax
  2607. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  2608. # asm 1: mul <mulx2=int64#8
  2609. # asm 2: mul <mulx2=%r10
  2610. mul %r10
  2611. # qhasm: carry? mulr5 += mulrax
  2612. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  2613. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  2614. add %rax,%rcx
  2615. # qhasm: mulrdx += 0 + carry
  2616. # asm 1: adc $0,<mulrdx=int64#3
  2617. # asm 2: adc $0,<mulrdx=%rdx
  2618. adc $0,%rdx
  2619. # qhasm: carry? mulr5 += mulc
  2620. # asm 1: add <mulc=int64#13,<mulr5=int64#4
  2621. # asm 2: add <mulc=%r15,<mulr5=%rcx
  2622. add %r15,%rcx
  2623. # qhasm: mulr6 += mulrdx + carry
  2624. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
  2625. # asm 2: adc <mulrdx=%rdx,<mulr6=%r8
  2626. adc %rdx,%r8
  2627. # qhasm: mulx3 = e3_stack
  2628. # asm 1: movq <e3_stack=stack64#15,>mulx3=int64#8
  2629. # asm 2: movq <e3_stack=112(%rsp),>mulx3=%r10
  2630. movq 112(%rsp),%r10
  2631. # qhasm: mulrax = f0_stack
  2632. # asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
  2633. # asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
  2634. movq 152(%rsp),%rax
  2635. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  2636. # asm 1: mul <mulx3=int64#8
  2637. # asm 2: mul <mulx3=%r10
  2638. mul %r10
  2639. # qhasm: carry? rx3 += mulrax
  2640. # asm 1: add <mulrax=int64#7,<rx3=int64#12
  2641. # asm 2: add <mulrax=%rax,<rx3=%r14
  2642. add %rax,%r14
  2643. # qhasm: mulc = 0
  2644. # asm 1: mov $0,>mulc=int64#13
  2645. # asm 2: mov $0,>mulc=%r15
  2646. mov $0,%r15
  2647. # qhasm: mulc += mulrdx + carry
  2648. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  2649. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  2650. adc %rdx,%r15
  2651. # qhasm: mulrax = f1_stack
  2652. # asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
  2653. # asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
  2654. movq 160(%rsp),%rax
  2655. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  2656. # asm 1: mul <mulx3=int64#8
  2657. # asm 2: mul <mulx3=%r10
  2658. mul %r10
  2659. # qhasm: carry? mulr4 += mulrax
  2660. # asm 1: add <mulrax=int64#7,<mulr4=int64#2
  2661. # asm 2: add <mulrax=%rax,<mulr4=%rsi
  2662. add %rax,%rsi
  2663. # qhasm: mulrdx += 0 + carry
  2664. # asm 1: adc $0,<mulrdx=int64#3
  2665. # asm 2: adc $0,<mulrdx=%rdx
  2666. adc $0,%rdx
  2667. # qhasm: carry? mulr4 += mulc
  2668. # asm 1: add <mulc=int64#13,<mulr4=int64#2
  2669. # asm 2: add <mulc=%r15,<mulr4=%rsi
  2670. add %r15,%rsi
  2671. # qhasm: mulc = 0
  2672. # asm 1: mov $0,>mulc=int64#13
  2673. # asm 2: mov $0,>mulc=%r15
  2674. mov $0,%r15
  2675. # qhasm: mulc += mulrdx + carry
  2676. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  2677. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  2678. adc %rdx,%r15
  2679. # qhasm: mulrax = f2_stack
  2680. # asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
  2681. # asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
  2682. movq 168(%rsp),%rax
  2683. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  2684. # asm 1: mul <mulx3=int64#8
  2685. # asm 2: mul <mulx3=%r10
  2686. mul %r10
  2687. # qhasm: carry? mulr5 += mulrax
  2688. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  2689. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  2690. add %rax,%rcx
  2691. # qhasm: mulrdx += 0 + carry
  2692. # asm 1: adc $0,<mulrdx=int64#3
  2693. # asm 2: adc $0,<mulrdx=%rdx
  2694. adc $0,%rdx
  2695. # qhasm: carry? mulr5 += mulc
  2696. # asm 1: add <mulc=int64#13,<mulr5=int64#4
  2697. # asm 2: add <mulc=%r15,<mulr5=%rcx
  2698. add %r15,%rcx
  2699. # qhasm: mulc = 0
  2700. # asm 1: mov $0,>mulc=int64#13
  2701. # asm 2: mov $0,>mulc=%r15
  2702. mov $0,%r15
  2703. # qhasm: mulc += mulrdx + carry
  2704. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  2705. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  2706. adc %rdx,%r15
  2707. # qhasm: mulrax = f3_stack
  2708. # asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
  2709. # asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
  2710. movq 176(%rsp),%rax
  2711. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  2712. # asm 1: mul <mulx3=int64#8
  2713. # asm 2: mul <mulx3=%r10
  2714. mul %r10
  2715. # qhasm: carry? mulr6 += mulrax
  2716. # asm 1: add <mulrax=int64#7,<mulr6=int64#5
  2717. # asm 2: add <mulrax=%rax,<mulr6=%r8
  2718. add %rax,%r8
  2719. # qhasm: mulrdx += 0 + carry
  2720. # asm 1: adc $0,<mulrdx=int64#3
  2721. # asm 2: adc $0,<mulrdx=%rdx
  2722. adc $0,%rdx
  2723. # qhasm: carry? mulr6 += mulc
  2724. # asm 1: add <mulc=int64#13,<mulr6=int64#5
  2725. # asm 2: add <mulc=%r15,<mulr6=%r8
  2726. add %r15,%r8
  2727. # qhasm: mulr7 += mulrdx + carry
  2728. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
  2729. # asm 2: adc <mulrdx=%rdx,<mulr7=%r9
  2730. adc %rdx,%r9
  2731. # qhasm: mulrax = mulr4
  2732. # asm 1: mov <mulr4=int64#2,>mulrax=int64#7
  2733. # asm 2: mov <mulr4=%rsi,>mulrax=%rax
  2734. mov %rsi,%rax
  2735. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2736. mulq CRYPTO_NAMESPACE(38)(%rip)
  2737. # qhasm: mulr4 = mulrax
  2738. # asm 1: mov <mulrax=int64#7,>mulr4=int64#2
  2739. # asm 2: mov <mulrax=%rax,>mulr4=%rsi
  2740. mov %rax,%rsi
  2741. # qhasm: mulrax = mulr5
  2742. # asm 1: mov <mulr5=int64#4,>mulrax=int64#7
  2743. # asm 2: mov <mulr5=%rcx,>mulrax=%rax
  2744. mov %rcx,%rax
  2745. # qhasm: mulr5 = mulrdx
  2746. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#4
  2747. # asm 2: mov <mulrdx=%rdx,>mulr5=%rcx
  2748. mov %rdx,%rcx
  2749. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2750. mulq CRYPTO_NAMESPACE(38)(%rip)
  2751. # qhasm: carry? mulr5 += mulrax
  2752. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  2753. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  2754. add %rax,%rcx
  2755. # qhasm: mulrax = mulr6
  2756. # asm 1: mov <mulr6=int64#5,>mulrax=int64#7
  2757. # asm 2: mov <mulr6=%r8,>mulrax=%rax
  2758. mov %r8,%rax
  2759. # qhasm: mulr6 = 0
  2760. # asm 1: mov $0,>mulr6=int64#5
  2761. # asm 2: mov $0,>mulr6=%r8
  2762. mov $0,%r8
  2763. # qhasm: mulr6 += mulrdx + carry
  2764. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
  2765. # asm 2: adc <mulrdx=%rdx,<mulr6=%r8
  2766. adc %rdx,%r8
  2767. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2768. mulq CRYPTO_NAMESPACE(38)(%rip)
  2769. # qhasm: carry? mulr6 += mulrax
  2770. # asm 1: add <mulrax=int64#7,<mulr6=int64#5
  2771. # asm 2: add <mulrax=%rax,<mulr6=%r8
  2772. add %rax,%r8
  2773. # qhasm: mulrax = mulr7
  2774. # asm 1: mov <mulr7=int64#6,>mulrax=int64#7
  2775. # asm 2: mov <mulr7=%r9,>mulrax=%rax
  2776. mov %r9,%rax
  2777. # qhasm: mulr7 = 0
  2778. # asm 1: mov $0,>mulr7=int64#6
  2779. # asm 2: mov $0,>mulr7=%r9
  2780. mov $0,%r9
  2781. # qhasm: mulr7 += mulrdx + carry
  2782. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
  2783. # asm 2: adc <mulrdx=%rdx,<mulr7=%r9
  2784. adc %rdx,%r9
  2785. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  2786. mulq CRYPTO_NAMESPACE(38)(%rip)
  2787. # qhasm: carry? mulr7 += mulrax
  2788. # asm 1: add <mulrax=int64#7,<mulr7=int64#6
  2789. # asm 2: add <mulrax=%rax,<mulr7=%r9
  2790. add %rax,%r9
  2791. # qhasm: mulr8 = 0
  2792. # asm 1: mov $0,>mulr8=int64#7
  2793. # asm 2: mov $0,>mulr8=%rax
  2794. mov $0,%rax
  2795. # qhasm: mulr8 += mulrdx + carry
  2796. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  2797. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  2798. adc %rdx,%rax
  2799. # qhasm: carry? rx0 += mulr4
  2800. # asm 1: add <mulr4=int64#2,<rx0=int64#9
  2801. # asm 2: add <mulr4=%rsi,<rx0=%r11
  2802. add %rsi,%r11
  2803. # qhasm: carry? rx1 += mulr5 + carry
  2804. # asm 1: adc <mulr5=int64#4,<rx1=int64#10
  2805. # asm 2: adc <mulr5=%rcx,<rx1=%r12
  2806. adc %rcx,%r12
  2807. # qhasm: carry? rx2 += mulr6 + carry
  2808. # asm 1: adc <mulr6=int64#5,<rx2=int64#11
  2809. # asm 2: adc <mulr6=%r8,<rx2=%r13
  2810. adc %r8,%r13
  2811. # qhasm: carry? rx3 += mulr7 + carry
  2812. # asm 1: adc <mulr7=int64#6,<rx3=int64#12
  2813. # asm 2: adc <mulr7=%r9,<rx3=%r14
  2814. adc %r9,%r14
  2815. # qhasm: mulzero = 0
  2816. # asm 1: mov $0,>mulzero=int64#2
  2817. # asm 2: mov $0,>mulzero=%rsi
  2818. mov $0,%rsi
  2819. # qhasm: mulr8 += mulzero + carry
  2820. # asm 1: adc <mulzero=int64#2,<mulr8=int64#7
  2821. # asm 2: adc <mulzero=%rsi,<mulr8=%rax
  2822. adc %rsi,%rax
  2823. # qhasm: mulr8 *= 38
  2824. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3
  2825. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx
  2826. imulq $38,%rax,%rdx
  2827. # qhasm: carry? rx0 += mulr8
  2828. # asm 1: add <mulr8=int64#3,<rx0=int64#9
  2829. # asm 2: add <mulr8=%rdx,<rx0=%r11
  2830. add %rdx,%r11
  2831. # qhasm: carry? rx1 += mulzero + carry
  2832. # asm 1: adc <mulzero=int64#2,<rx1=int64#10
  2833. # asm 2: adc <mulzero=%rsi,<rx1=%r12
  2834. adc %rsi,%r12
  2835. # qhasm: carry? rx2 += mulzero + carry
  2836. # asm 1: adc <mulzero=int64#2,<rx2=int64#11
  2837. # asm 2: adc <mulzero=%rsi,<rx2=%r13
  2838. adc %rsi,%r13
  2839. # qhasm: carry? rx3 += mulzero + carry
  2840. # asm 1: adc <mulzero=int64#2,<rx3=int64#12
  2841. # asm 2: adc <mulzero=%rsi,<rx3=%r14
  2842. adc %rsi,%r14
  2843. # qhasm: mulzero += mulzero + carry
  2844. # asm 1: adc <mulzero=int64#2,<mulzero=int64#2
  2845. # asm 2: adc <mulzero=%rsi,<mulzero=%rsi
  2846. adc %rsi,%rsi
  2847. # qhasm: mulzero *= 38
  2848. # asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2
  2849. # asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi
  2850. imulq $38,%rsi,%rsi
  2851. # qhasm: rx0 += mulzero
  2852. # asm 1: add <mulzero=int64#2,<rx0=int64#9
  2853. # asm 2: add <mulzero=%rsi,<rx0=%r11
  2854. add %rsi,%r11
  2855. # qhasm: *(uint64 *)(rp + 0) = rx0
  2856. # asm 1: movq <rx0=int64#9,0(<rp=int64#1)
  2857. # asm 2: movq <rx0=%r11,0(<rp=%rdi)
  2858. movq %r11,0(%rdi)
  2859. # qhasm: *(uint64 *)(rp + 8) = rx1
  2860. # asm 1: movq <rx1=int64#10,8(<rp=int64#1)
  2861. # asm 2: movq <rx1=%r12,8(<rp=%rdi)
  2862. movq %r12,8(%rdi)
  2863. # qhasm: *(uint64 *)(rp + 16) = rx2
  2864. # asm 1: movq <rx2=int64#11,16(<rp=int64#1)
  2865. # asm 2: movq <rx2=%r13,16(<rp=%rdi)
  2866. movq %r13,16(%rdi)
  2867. # qhasm: *(uint64 *)(rp + 24) = rx3
  2868. # asm 1: movq <rx3=int64#12,24(<rp=int64#1)
  2869. # asm 2: movq <rx3=%r14,24(<rp=%rdi)
  2870. movq %r14,24(%rdi)
  2871. # qhasm: mulr4 = 0
  2872. # asm 1: mov $0,>mulr4=int64#2
  2873. # asm 2: mov $0,>mulr4=%rsi
  2874. mov $0,%rsi
  2875. # qhasm: mulr5 = 0
  2876. # asm 1: mov $0,>mulr5=int64#4
  2877. # asm 2: mov $0,>mulr5=%rcx
  2878. mov $0,%rcx
  2879. # qhasm: mulr6 = 0
  2880. # asm 1: mov $0,>mulr6=int64#5
  2881. # asm 2: mov $0,>mulr6=%r8
  2882. mov $0,%r8
  2883. # qhasm: mulr7 = 0
  2884. # asm 1: mov $0,>mulr7=int64#6
  2885. # asm 2: mov $0,>mulr7=%r9
  2886. mov $0,%r9
  2887. # qhasm: mulx0 = h0_stack
  2888. # asm 1: movq <h0_stack=stack64#8,>mulx0=int64#8
  2889. # asm 2: movq <h0_stack=56(%rsp),>mulx0=%r10
  2890. movq 56(%rsp),%r10
  2891. # qhasm: mulrax = g0_stack
  2892. # asm 1: movq <g0_stack=stack64#16,>mulrax=int64#7
  2893. # asm 2: movq <g0_stack=120(%rsp),>mulrax=%rax
  2894. movq 120(%rsp),%rax
  2895. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2896. # asm 1: mul <mulx0=int64#8
  2897. # asm 2: mul <mulx0=%r10
  2898. mul %r10
  2899. # qhasm: ry0 = mulrax
  2900. # asm 1: mov <mulrax=int64#7,>ry0=int64#9
  2901. # asm 2: mov <mulrax=%rax,>ry0=%r11
  2902. mov %rax,%r11
  2903. # qhasm: ry1 = mulrdx
  2904. # asm 1: mov <mulrdx=int64#3,>ry1=int64#10
  2905. # asm 2: mov <mulrdx=%rdx,>ry1=%r12
  2906. mov %rdx,%r12
  2907. # qhasm: mulrax = g1_stack
  2908. # asm 1: movq <g1_stack=stack64#17,>mulrax=int64#7
  2909. # asm 2: movq <g1_stack=128(%rsp),>mulrax=%rax
  2910. movq 128(%rsp),%rax
  2911. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2912. # asm 1: mul <mulx0=int64#8
  2913. # asm 2: mul <mulx0=%r10
  2914. mul %r10
  2915. # qhasm: carry? ry1 += mulrax
  2916. # asm 1: add <mulrax=int64#7,<ry1=int64#10
  2917. # asm 2: add <mulrax=%rax,<ry1=%r12
  2918. add %rax,%r12
  2919. # qhasm: ry2 = 0
  2920. # asm 1: mov $0,>ry2=int64#11
  2921. # asm 2: mov $0,>ry2=%r13
  2922. mov $0,%r13
  2923. # qhasm: ry2 += mulrdx + carry
  2924. # asm 1: adc <mulrdx=int64#3,<ry2=int64#11
  2925. # asm 2: adc <mulrdx=%rdx,<ry2=%r13
  2926. adc %rdx,%r13
  2927. # qhasm: mulrax = g2_stack
  2928. # asm 1: movq <g2_stack=stack64#18,>mulrax=int64#7
  2929. # asm 2: movq <g2_stack=136(%rsp),>mulrax=%rax
  2930. movq 136(%rsp),%rax
  2931. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2932. # asm 1: mul <mulx0=int64#8
  2933. # asm 2: mul <mulx0=%r10
  2934. mul %r10
  2935. # qhasm: carry? ry2 += mulrax
  2936. # asm 1: add <mulrax=int64#7,<ry2=int64#11
  2937. # asm 2: add <mulrax=%rax,<ry2=%r13
  2938. add %rax,%r13
  2939. # qhasm: ry3 = 0
  2940. # asm 1: mov $0,>ry3=int64#12
  2941. # asm 2: mov $0,>ry3=%r14
  2942. mov $0,%r14
  2943. # qhasm: ry3 += mulrdx + carry
  2944. # asm 1: adc <mulrdx=int64#3,<ry3=int64#12
  2945. # asm 2: adc <mulrdx=%rdx,<ry3=%r14
  2946. adc %rdx,%r14
  2947. # qhasm: mulrax = g3_stack
  2948. # asm 1: movq <g3_stack=stack64#19,>mulrax=int64#7
  2949. # asm 2: movq <g3_stack=144(%rsp),>mulrax=%rax
  2950. movq 144(%rsp),%rax
  2951. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  2952. # asm 1: mul <mulx0=int64#8
  2953. # asm 2: mul <mulx0=%r10
  2954. mul %r10
  2955. # qhasm: carry? ry3 += mulrax
  2956. # asm 1: add <mulrax=int64#7,<ry3=int64#12
  2957. # asm 2: add <mulrax=%rax,<ry3=%r14
  2958. add %rax,%r14
  2959. # qhasm: mulr4 += mulrdx + carry
  2960. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#2
  2961. # asm 2: adc <mulrdx=%rdx,<mulr4=%rsi
  2962. adc %rdx,%rsi
  2963. # qhasm: mulx1 = h1_stack
  2964. # asm 1: movq <h1_stack=stack64#9,>mulx1=int64#8
  2965. # asm 2: movq <h1_stack=64(%rsp),>mulx1=%r10
  2966. movq 64(%rsp),%r10
  2967. # qhasm: mulrax = g0_stack
  2968. # asm 1: movq <g0_stack=stack64#16,>mulrax=int64#7
  2969. # asm 2: movq <g0_stack=120(%rsp),>mulrax=%rax
  2970. movq 120(%rsp),%rax
  2971. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2972. # asm 1: mul <mulx1=int64#8
  2973. # asm 2: mul <mulx1=%r10
  2974. mul %r10
  2975. # qhasm: carry? ry1 += mulrax
  2976. # asm 1: add <mulrax=int64#7,<ry1=int64#10
  2977. # asm 2: add <mulrax=%rax,<ry1=%r12
  2978. add %rax,%r12
  2979. # qhasm: mulc = 0
  2980. # asm 1: mov $0,>mulc=int64#13
  2981. # asm 2: mov $0,>mulc=%r15
  2982. mov $0,%r15
  2983. # qhasm: mulc += mulrdx + carry
  2984. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  2985. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  2986. adc %rdx,%r15
  2987. # qhasm: mulrax = g1_stack
  2988. # asm 1: movq <g1_stack=stack64#17,>mulrax=int64#7
  2989. # asm 2: movq <g1_stack=128(%rsp),>mulrax=%rax
  2990. movq 128(%rsp),%rax
  2991. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  2992. # asm 1: mul <mulx1=int64#8
  2993. # asm 2: mul <mulx1=%r10
  2994. mul %r10
  2995. # qhasm: carry? ry2 += mulrax
  2996. # asm 1: add <mulrax=int64#7,<ry2=int64#11
  2997. # asm 2: add <mulrax=%rax,<ry2=%r13
  2998. add %rax,%r13
  2999. # qhasm: mulrdx += 0 + carry
  3000. # asm 1: adc $0,<mulrdx=int64#3
  3001. # asm 2: adc $0,<mulrdx=%rdx
  3002. adc $0,%rdx
  3003. # qhasm: carry? ry2 += mulc
  3004. # asm 1: add <mulc=int64#13,<ry2=int64#11
  3005. # asm 2: add <mulc=%r15,<ry2=%r13
  3006. add %r15,%r13
  3007. # qhasm: mulc = 0
  3008. # asm 1: mov $0,>mulc=int64#13
  3009. # asm 2: mov $0,>mulc=%r15
  3010. mov $0,%r15
  3011. # qhasm: mulc += mulrdx + carry
  3012. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3013. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3014. adc %rdx,%r15
  3015. # qhasm: mulrax = g2_stack
  3016. # asm 1: movq <g2_stack=stack64#18,>mulrax=int64#7
  3017. # asm 2: movq <g2_stack=136(%rsp),>mulrax=%rax
  3018. movq 136(%rsp),%rax
  3019. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  3020. # asm 1: mul <mulx1=int64#8
  3021. # asm 2: mul <mulx1=%r10
  3022. mul %r10
  3023. # qhasm: carry? ry3 += mulrax
  3024. # asm 1: add <mulrax=int64#7,<ry3=int64#12
  3025. # asm 2: add <mulrax=%rax,<ry3=%r14
  3026. add %rax,%r14
  3027. # qhasm: mulrdx += 0 + carry
  3028. # asm 1: adc $0,<mulrdx=int64#3
  3029. # asm 2: adc $0,<mulrdx=%rdx
  3030. adc $0,%rdx
  3031. # qhasm: carry? ry3 += mulc
  3032. # asm 1: add <mulc=int64#13,<ry3=int64#12
  3033. # asm 2: add <mulc=%r15,<ry3=%r14
  3034. add %r15,%r14
  3035. # qhasm: mulc = 0
  3036. # asm 1: mov $0,>mulc=int64#13
  3037. # asm 2: mov $0,>mulc=%r15
  3038. mov $0,%r15
  3039. # qhasm: mulc += mulrdx + carry
  3040. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3041. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3042. adc %rdx,%r15
  3043. # qhasm: mulrax = g3_stack
  3044. # asm 1: movq <g3_stack=stack64#19,>mulrax=int64#7
  3045. # asm 2: movq <g3_stack=144(%rsp),>mulrax=%rax
  3046. movq 144(%rsp),%rax
  3047. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  3048. # asm 1: mul <mulx1=int64#8
  3049. # asm 2: mul <mulx1=%r10
  3050. mul %r10
  3051. # qhasm: carry? mulr4 += mulrax
  3052. # asm 1: add <mulrax=int64#7,<mulr4=int64#2
  3053. # asm 2: add <mulrax=%rax,<mulr4=%rsi
  3054. add %rax,%rsi
  3055. # qhasm: mulrdx += 0 + carry
  3056. # asm 1: adc $0,<mulrdx=int64#3
  3057. # asm 2: adc $0,<mulrdx=%rdx
  3058. adc $0,%rdx
  3059. # qhasm: carry? mulr4 += mulc
  3060. # asm 1: add <mulc=int64#13,<mulr4=int64#2
  3061. # asm 2: add <mulc=%r15,<mulr4=%rsi
  3062. add %r15,%rsi
  3063. # qhasm: mulr5 += mulrdx + carry
  3064. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#4
  3065. # asm 2: adc <mulrdx=%rdx,<mulr5=%rcx
  3066. adc %rdx,%rcx
  3067. # qhasm: mulx2 = h2_stack
  3068. # asm 1: movq <h2_stack=stack64#10,>mulx2=int64#8
  3069. # asm 2: movq <h2_stack=72(%rsp),>mulx2=%r10
  3070. movq 72(%rsp),%r10
  3071. # qhasm: mulrax = g0_stack
  3072. # asm 1: movq <g0_stack=stack64#16,>mulrax=int64#7
  3073. # asm 2: movq <g0_stack=120(%rsp),>mulrax=%rax
  3074. movq 120(%rsp),%rax
  3075. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  3076. # asm 1: mul <mulx2=int64#8
  3077. # asm 2: mul <mulx2=%r10
  3078. mul %r10
  3079. # qhasm: carry? ry2 += mulrax
  3080. # asm 1: add <mulrax=int64#7,<ry2=int64#11
  3081. # asm 2: add <mulrax=%rax,<ry2=%r13
  3082. add %rax,%r13
  3083. # qhasm: mulc = 0
  3084. # asm 1: mov $0,>mulc=int64#13
  3085. # asm 2: mov $0,>mulc=%r15
  3086. mov $0,%r15
  3087. # qhasm: mulc += mulrdx + carry
  3088. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3089. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3090. adc %rdx,%r15
  3091. # qhasm: mulrax = g1_stack
  3092. # asm 1: movq <g1_stack=stack64#17,>mulrax=int64#7
  3093. # asm 2: movq <g1_stack=128(%rsp),>mulrax=%rax
  3094. movq 128(%rsp),%rax
  3095. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  3096. # asm 1: mul <mulx2=int64#8
  3097. # asm 2: mul <mulx2=%r10
  3098. mul %r10
  3099. # qhasm: carry? ry3 += mulrax
  3100. # asm 1: add <mulrax=int64#7,<ry3=int64#12
  3101. # asm 2: add <mulrax=%rax,<ry3=%r14
  3102. add %rax,%r14
  3103. # qhasm: mulrdx += 0 + carry
  3104. # asm 1: adc $0,<mulrdx=int64#3
  3105. # asm 2: adc $0,<mulrdx=%rdx
  3106. adc $0,%rdx
  3107. # qhasm: carry? ry3 += mulc
  3108. # asm 1: add <mulc=int64#13,<ry3=int64#12
  3109. # asm 2: add <mulc=%r15,<ry3=%r14
  3110. add %r15,%r14
  3111. # qhasm: mulc = 0
  3112. # asm 1: mov $0,>mulc=int64#13
  3113. # asm 2: mov $0,>mulc=%r15
  3114. mov $0,%r15
  3115. # qhasm: mulc += mulrdx + carry
  3116. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3117. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3118. adc %rdx,%r15
  3119. # qhasm: mulrax = g2_stack
  3120. # asm 1: movq <g2_stack=stack64#18,>mulrax=int64#7
  3121. # asm 2: movq <g2_stack=136(%rsp),>mulrax=%rax
  3122. movq 136(%rsp),%rax
  3123. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  3124. # asm 1: mul <mulx2=int64#8
  3125. # asm 2: mul <mulx2=%r10
  3126. mul %r10
  3127. # qhasm: carry? mulr4 += mulrax
  3128. # asm 1: add <mulrax=int64#7,<mulr4=int64#2
  3129. # asm 2: add <mulrax=%rax,<mulr4=%rsi
  3130. add %rax,%rsi
  3131. # qhasm: mulrdx += 0 + carry
  3132. # asm 1: adc $0,<mulrdx=int64#3
  3133. # asm 2: adc $0,<mulrdx=%rdx
  3134. adc $0,%rdx
  3135. # qhasm: carry? mulr4 += mulc
  3136. # asm 1: add <mulc=int64#13,<mulr4=int64#2
  3137. # asm 2: add <mulc=%r15,<mulr4=%rsi
  3138. add %r15,%rsi
  3139. # qhasm: mulc = 0
  3140. # asm 1: mov $0,>mulc=int64#13
  3141. # asm 2: mov $0,>mulc=%r15
  3142. mov $0,%r15
  3143. # qhasm: mulc += mulrdx + carry
  3144. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3145. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3146. adc %rdx,%r15
  3147. # qhasm: mulrax = g3_stack
  3148. # asm 1: movq <g3_stack=stack64#19,>mulrax=int64#7
  3149. # asm 2: movq <g3_stack=144(%rsp),>mulrax=%rax
  3150. movq 144(%rsp),%rax
  3151. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  3152. # asm 1: mul <mulx2=int64#8
  3153. # asm 2: mul <mulx2=%r10
  3154. mul %r10
  3155. # qhasm: carry? mulr5 += mulrax
  3156. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  3157. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  3158. add %rax,%rcx
  3159. # qhasm: mulrdx += 0 + carry
  3160. # asm 1: adc $0,<mulrdx=int64#3
  3161. # asm 2: adc $0,<mulrdx=%rdx
  3162. adc $0,%rdx
  3163. # qhasm: carry? mulr5 += mulc
  3164. # asm 1: add <mulc=int64#13,<mulr5=int64#4
  3165. # asm 2: add <mulc=%r15,<mulr5=%rcx
  3166. add %r15,%rcx
  3167. # qhasm: mulr6 += mulrdx + carry
  3168. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
  3169. # asm 2: adc <mulrdx=%rdx,<mulr6=%r8
  3170. adc %rdx,%r8
  3171. # qhasm: mulx3 = h3_stack
  3172. # asm 1: movq <h3_stack=stack64#11,>mulx3=int64#8
  3173. # asm 2: movq <h3_stack=80(%rsp),>mulx3=%r10
  3174. movq 80(%rsp),%r10
  3175. # qhasm: mulrax = g0_stack
  3176. # asm 1: movq <g0_stack=stack64#16,>mulrax=int64#7
  3177. # asm 2: movq <g0_stack=120(%rsp),>mulrax=%rax
  3178. movq 120(%rsp),%rax
  3179. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  3180. # asm 1: mul <mulx3=int64#8
  3181. # asm 2: mul <mulx3=%r10
  3182. mul %r10
  3183. # qhasm: carry? ry3 += mulrax
  3184. # asm 1: add <mulrax=int64#7,<ry3=int64#12
  3185. # asm 2: add <mulrax=%rax,<ry3=%r14
  3186. add %rax,%r14
  3187. # qhasm: mulc = 0
  3188. # asm 1: mov $0,>mulc=int64#13
  3189. # asm 2: mov $0,>mulc=%r15
  3190. mov $0,%r15
  3191. # qhasm: mulc += mulrdx + carry
  3192. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3193. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3194. adc %rdx,%r15
  3195. # qhasm: mulrax = g1_stack
  3196. # asm 1: movq <g1_stack=stack64#17,>mulrax=int64#7
  3197. # asm 2: movq <g1_stack=128(%rsp),>mulrax=%rax
  3198. movq 128(%rsp),%rax
  3199. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  3200. # asm 1: mul <mulx3=int64#8
  3201. # asm 2: mul <mulx3=%r10
  3202. mul %r10
  3203. # qhasm: carry? mulr4 += mulrax
  3204. # asm 1: add <mulrax=int64#7,<mulr4=int64#2
  3205. # asm 2: add <mulrax=%rax,<mulr4=%rsi
  3206. add %rax,%rsi
  3207. # qhasm: mulrdx += 0 + carry
  3208. # asm 1: adc $0,<mulrdx=int64#3
  3209. # asm 2: adc $0,<mulrdx=%rdx
  3210. adc $0,%rdx
  3211. # qhasm: carry? mulr4 += mulc
  3212. # asm 1: add <mulc=int64#13,<mulr4=int64#2
  3213. # asm 2: add <mulc=%r15,<mulr4=%rsi
  3214. add %r15,%rsi
  3215. # qhasm: mulc = 0
  3216. # asm 1: mov $0,>mulc=int64#13
  3217. # asm 2: mov $0,>mulc=%r15
  3218. mov $0,%r15
  3219. # qhasm: mulc += mulrdx + carry
  3220. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3221. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3222. adc %rdx,%r15
  3223. # qhasm: mulrax = g2_stack
  3224. # asm 1: movq <g2_stack=stack64#18,>mulrax=int64#7
  3225. # asm 2: movq <g2_stack=136(%rsp),>mulrax=%rax
  3226. movq 136(%rsp),%rax
  3227. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  3228. # asm 1: mul <mulx3=int64#8
  3229. # asm 2: mul <mulx3=%r10
  3230. mul %r10
  3231. # qhasm: carry? mulr5 += mulrax
  3232. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  3233. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  3234. add %rax,%rcx
  3235. # qhasm: mulrdx += 0 + carry
  3236. # asm 1: adc $0,<mulrdx=int64#3
  3237. # asm 2: adc $0,<mulrdx=%rdx
  3238. adc $0,%rdx
  3239. # qhasm: carry? mulr5 += mulc
  3240. # asm 1: add <mulc=int64#13,<mulr5=int64#4
  3241. # asm 2: add <mulc=%r15,<mulr5=%rcx
  3242. add %r15,%rcx
  3243. # qhasm: mulc = 0
  3244. # asm 1: mov $0,>mulc=int64#13
  3245. # asm 2: mov $0,>mulc=%r15
  3246. mov $0,%r15
  3247. # qhasm: mulc += mulrdx + carry
  3248. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3249. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3250. adc %rdx,%r15
  3251. # qhasm: mulrax = g3_stack
  3252. # asm 1: movq <g3_stack=stack64#19,>mulrax=int64#7
  3253. # asm 2: movq <g3_stack=144(%rsp),>mulrax=%rax
  3254. movq 144(%rsp),%rax
  3255. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  3256. # asm 1: mul <mulx3=int64#8
  3257. # asm 2: mul <mulx3=%r10
  3258. mul %r10
  3259. # qhasm: carry? mulr6 += mulrax
  3260. # asm 1: add <mulrax=int64#7,<mulr6=int64#5
  3261. # asm 2: add <mulrax=%rax,<mulr6=%r8
  3262. add %rax,%r8
  3263. # qhasm: mulrdx += 0 + carry
  3264. # asm 1: adc $0,<mulrdx=int64#3
  3265. # asm 2: adc $0,<mulrdx=%rdx
  3266. adc $0,%rdx
  3267. # qhasm: carry? mulr6 += mulc
  3268. # asm 1: add <mulc=int64#13,<mulr6=int64#5
  3269. # asm 2: add <mulc=%r15,<mulr6=%r8
  3270. add %r15,%r8
  3271. # qhasm: mulr7 += mulrdx + carry
  3272. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
  3273. # asm 2: adc <mulrdx=%rdx,<mulr7=%r9
  3274. adc %rdx,%r9
  3275. # qhasm: mulrax = mulr4
  3276. # asm 1: mov <mulr4=int64#2,>mulrax=int64#7
  3277. # asm 2: mov <mulr4=%rsi,>mulrax=%rax
  3278. mov %rsi,%rax
  3279. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  3280. mulq CRYPTO_NAMESPACE(38)(%rip)
  3281. # qhasm: mulr4 = mulrax
  3282. # asm 1: mov <mulrax=int64#7,>mulr4=int64#2
  3283. # asm 2: mov <mulrax=%rax,>mulr4=%rsi
  3284. mov %rax,%rsi
  3285. # qhasm: mulrax = mulr5
  3286. # asm 1: mov <mulr5=int64#4,>mulrax=int64#7
  3287. # asm 2: mov <mulr5=%rcx,>mulrax=%rax
  3288. mov %rcx,%rax
  3289. # qhasm: mulr5 = mulrdx
  3290. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#4
  3291. # asm 2: mov <mulrdx=%rdx,>mulr5=%rcx
  3292. mov %rdx,%rcx
  3293. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  3294. mulq CRYPTO_NAMESPACE(38)(%rip)
  3295. # qhasm: carry? mulr5 += mulrax
  3296. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  3297. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  3298. add %rax,%rcx
  3299. # qhasm: mulrax = mulr6
  3300. # asm 1: mov <mulr6=int64#5,>mulrax=int64#7
  3301. # asm 2: mov <mulr6=%r8,>mulrax=%rax
  3302. mov %r8,%rax
  3303. # qhasm: mulr6 = 0
  3304. # asm 1: mov $0,>mulr6=int64#5
  3305. # asm 2: mov $0,>mulr6=%r8
  3306. mov $0,%r8
  3307. # qhasm: mulr6 += mulrdx + carry
  3308. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
  3309. # asm 2: adc <mulrdx=%rdx,<mulr6=%r8
  3310. adc %rdx,%r8
  3311. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  3312. mulq CRYPTO_NAMESPACE(38)(%rip)
  3313. # qhasm: carry? mulr6 += mulrax
  3314. # asm 1: add <mulrax=int64#7,<mulr6=int64#5
  3315. # asm 2: add <mulrax=%rax,<mulr6=%r8
  3316. add %rax,%r8
  3317. # qhasm: mulrax = mulr7
  3318. # asm 1: mov <mulr7=int64#6,>mulrax=int64#7
  3319. # asm 2: mov <mulr7=%r9,>mulrax=%rax
  3320. mov %r9,%rax
  3321. # qhasm: mulr7 = 0
  3322. # asm 1: mov $0,>mulr7=int64#6
  3323. # asm 2: mov $0,>mulr7=%r9
  3324. mov $0,%r9
  3325. # qhasm: mulr7 += mulrdx + carry
  3326. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
  3327. # asm 2: adc <mulrdx=%rdx,<mulr7=%r9
  3328. adc %rdx,%r9
  3329. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  3330. mulq CRYPTO_NAMESPACE(38)(%rip)
  3331. # qhasm: carry? mulr7 += mulrax
  3332. # asm 1: add <mulrax=int64#7,<mulr7=int64#6
  3333. # asm 2: add <mulrax=%rax,<mulr7=%r9
  3334. add %rax,%r9
  3335. # qhasm: mulr8 = 0
  3336. # asm 1: mov $0,>mulr8=int64#7
  3337. # asm 2: mov $0,>mulr8=%rax
  3338. mov $0,%rax
  3339. # qhasm: mulr8 += mulrdx + carry
  3340. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  3341. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  3342. adc %rdx,%rax
  3343. # qhasm: carry? ry0 += mulr4
  3344. # asm 1: add <mulr4=int64#2,<ry0=int64#9
  3345. # asm 2: add <mulr4=%rsi,<ry0=%r11
  3346. add %rsi,%r11
  3347. # qhasm: carry? ry1 += mulr5 + carry
  3348. # asm 1: adc <mulr5=int64#4,<ry1=int64#10
  3349. # asm 2: adc <mulr5=%rcx,<ry1=%r12
  3350. adc %rcx,%r12
  3351. # qhasm: carry? ry2 += mulr6 + carry
  3352. # asm 1: adc <mulr6=int64#5,<ry2=int64#11
  3353. # asm 2: adc <mulr6=%r8,<ry2=%r13
  3354. adc %r8,%r13
  3355. # qhasm: carry? ry3 += mulr7 + carry
  3356. # asm 1: adc <mulr7=int64#6,<ry3=int64#12
  3357. # asm 2: adc <mulr7=%r9,<ry3=%r14
  3358. adc %r9,%r14
  3359. # qhasm: mulzero = 0
  3360. # asm 1: mov $0,>mulzero=int64#2
  3361. # asm 2: mov $0,>mulzero=%rsi
  3362. mov $0,%rsi
  3363. # qhasm: mulr8 += mulzero + carry
  3364. # asm 1: adc <mulzero=int64#2,<mulr8=int64#7
  3365. # asm 2: adc <mulzero=%rsi,<mulr8=%rax
  3366. adc %rsi,%rax
  3367. # qhasm: mulr8 *= 38
  3368. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3
  3369. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx
  3370. imulq $38,%rax,%rdx
  3371. # qhasm: carry? ry0 += mulr8
  3372. # asm 1: add <mulr8=int64#3,<ry0=int64#9
  3373. # asm 2: add <mulr8=%rdx,<ry0=%r11
  3374. add %rdx,%r11
  3375. # qhasm: carry? ry1 += mulzero + carry
  3376. # asm 1: adc <mulzero=int64#2,<ry1=int64#10
  3377. # asm 2: adc <mulzero=%rsi,<ry1=%r12
  3378. adc %rsi,%r12
  3379. # qhasm: carry? ry2 += mulzero + carry
  3380. # asm 1: adc <mulzero=int64#2,<ry2=int64#11
  3381. # asm 2: adc <mulzero=%rsi,<ry2=%r13
  3382. adc %rsi,%r13
  3383. # qhasm: carry? ry3 += mulzero + carry
  3384. # asm 1: adc <mulzero=int64#2,<ry3=int64#12
  3385. # asm 2: adc <mulzero=%rsi,<ry3=%r14
  3386. adc %rsi,%r14
  3387. # qhasm: mulzero += mulzero + carry
  3388. # asm 1: adc <mulzero=int64#2,<mulzero=int64#2
  3389. # asm 2: adc <mulzero=%rsi,<mulzero=%rsi
  3390. adc %rsi,%rsi
  3391. # qhasm: mulzero *= 38
  3392. # asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2
  3393. # asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi
  3394. imulq $38,%rsi,%rsi
  3395. # qhasm: ry0 += mulzero
  3396. # asm 1: add <mulzero=int64#2,<ry0=int64#9
  3397. # asm 2: add <mulzero=%rsi,<ry0=%r11
  3398. add %rsi,%r11
  3399. # qhasm: *(uint64 *)(rp + 32) = ry0
  3400. # asm 1: movq <ry0=int64#9,32(<rp=int64#1)
  3401. # asm 2: movq <ry0=%r11,32(<rp=%rdi)
  3402. movq %r11,32(%rdi)
  3403. # qhasm: *(uint64 *)(rp + 40) = ry1
  3404. # asm 1: movq <ry1=int64#10,40(<rp=int64#1)
  3405. # asm 2: movq <ry1=%r12,40(<rp=%rdi)
  3406. movq %r12,40(%rdi)
  3407. # qhasm: *(uint64 *)(rp + 48) = ry2
  3408. # asm 1: movq <ry2=int64#11,48(<rp=int64#1)
  3409. # asm 2: movq <ry2=%r13,48(<rp=%rdi)
  3410. movq %r13,48(%rdi)
  3411. # qhasm: *(uint64 *)(rp + 56) = ry3
  3412. # asm 1: movq <ry3=int64#12,56(<rp=int64#1)
  3413. # asm 2: movq <ry3=%r14,56(<rp=%rdi)
  3414. movq %r14,56(%rdi)
  3415. # qhasm: mulr4 = 0
  3416. # asm 1: mov $0,>mulr4=int64#2
  3417. # asm 2: mov $0,>mulr4=%rsi
  3418. mov $0,%rsi
  3419. # qhasm: mulr5 = 0
  3420. # asm 1: mov $0,>mulr5=int64#4
  3421. # asm 2: mov $0,>mulr5=%rcx
  3422. mov $0,%rcx
  3423. # qhasm: mulr6 = 0
  3424. # asm 1: mov $0,>mulr6=int64#5
  3425. # asm 2: mov $0,>mulr6=%r8
  3426. mov $0,%r8
  3427. # qhasm: mulr7 = 0
  3428. # asm 1: mov $0,>mulr7=int64#6
  3429. # asm 2: mov $0,>mulr7=%r9
  3430. mov $0,%r9
  3431. # qhasm: mulx0 = g0_stack
  3432. # asm 1: movq <g0_stack=stack64#16,>mulx0=int64#8
  3433. # asm 2: movq <g0_stack=120(%rsp),>mulx0=%r10
  3434. movq 120(%rsp),%r10
  3435. # qhasm: mulrax = f0_stack
  3436. # asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
  3437. # asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
  3438. movq 152(%rsp),%rax
  3439. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  3440. # asm 1: mul <mulx0=int64#8
  3441. # asm 2: mul <mulx0=%r10
  3442. mul %r10
  3443. # qhasm: rz0 = mulrax
  3444. # asm 1: mov <mulrax=int64#7,>rz0=int64#9
  3445. # asm 2: mov <mulrax=%rax,>rz0=%r11
  3446. mov %rax,%r11
  3447. # qhasm: rz1 = mulrdx
  3448. # asm 1: mov <mulrdx=int64#3,>rz1=int64#10
  3449. # asm 2: mov <mulrdx=%rdx,>rz1=%r12
  3450. mov %rdx,%r12
  3451. # qhasm: mulrax = f1_stack
  3452. # asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
  3453. # asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
  3454. movq 160(%rsp),%rax
  3455. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  3456. # asm 1: mul <mulx0=int64#8
  3457. # asm 2: mul <mulx0=%r10
  3458. mul %r10
  3459. # qhasm: carry? rz1 += mulrax
  3460. # asm 1: add <mulrax=int64#7,<rz1=int64#10
  3461. # asm 2: add <mulrax=%rax,<rz1=%r12
  3462. add %rax,%r12
  3463. # qhasm: rz2 = 0
  3464. # asm 1: mov $0,>rz2=int64#11
  3465. # asm 2: mov $0,>rz2=%r13
  3466. mov $0,%r13
  3467. # qhasm: rz2 += mulrdx + carry
  3468. # asm 1: adc <mulrdx=int64#3,<rz2=int64#11
  3469. # asm 2: adc <mulrdx=%rdx,<rz2=%r13
  3470. adc %rdx,%r13
  3471. # qhasm: mulrax = f2_stack
  3472. # asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
  3473. # asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
  3474. movq 168(%rsp),%rax
  3475. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  3476. # asm 1: mul <mulx0=int64#8
  3477. # asm 2: mul <mulx0=%r10
  3478. mul %r10
  3479. # qhasm: carry? rz2 += mulrax
  3480. # asm 1: add <mulrax=int64#7,<rz2=int64#11
  3481. # asm 2: add <mulrax=%rax,<rz2=%r13
  3482. add %rax,%r13
  3483. # qhasm: rz3 = 0
  3484. # asm 1: mov $0,>rz3=int64#12
  3485. # asm 2: mov $0,>rz3=%r14
  3486. mov $0,%r14
  3487. # qhasm: rz3 += mulrdx + carry
  3488. # asm 1: adc <mulrdx=int64#3,<rz3=int64#12
  3489. # asm 2: adc <mulrdx=%rdx,<rz3=%r14
  3490. adc %rdx,%r14
  3491. # qhasm: mulrax = f3_stack
  3492. # asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
  3493. # asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
  3494. movq 176(%rsp),%rax
  3495. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  3496. # asm 1: mul <mulx0=int64#8
  3497. # asm 2: mul <mulx0=%r10
  3498. mul %r10
  3499. # qhasm: carry? rz3 += mulrax
  3500. # asm 1: add <mulrax=int64#7,<rz3=int64#12
  3501. # asm 2: add <mulrax=%rax,<rz3=%r14
  3502. add %rax,%r14
  3503. # qhasm: mulr4 += mulrdx + carry
  3504. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#2
  3505. # asm 2: adc <mulrdx=%rdx,<mulr4=%rsi
  3506. adc %rdx,%rsi
  3507. # qhasm: mulx1 = g1_stack
  3508. # asm 1: movq <g1_stack=stack64#17,>mulx1=int64#8
  3509. # asm 2: movq <g1_stack=128(%rsp),>mulx1=%r10
  3510. movq 128(%rsp),%r10
  3511. # qhasm: mulrax = f0_stack
  3512. # asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
  3513. # asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
  3514. movq 152(%rsp),%rax
  3515. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  3516. # asm 1: mul <mulx1=int64#8
  3517. # asm 2: mul <mulx1=%r10
  3518. mul %r10
  3519. # qhasm: carry? rz1 += mulrax
  3520. # asm 1: add <mulrax=int64#7,<rz1=int64#10
  3521. # asm 2: add <mulrax=%rax,<rz1=%r12
  3522. add %rax,%r12
  3523. # qhasm: mulc = 0
  3524. # asm 1: mov $0,>mulc=int64#13
  3525. # asm 2: mov $0,>mulc=%r15
  3526. mov $0,%r15
  3527. # qhasm: mulc += mulrdx + carry
  3528. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3529. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3530. adc %rdx,%r15
  3531. # qhasm: mulrax = f1_stack
  3532. # asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
  3533. # asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
  3534. movq 160(%rsp),%rax
  3535. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  3536. # asm 1: mul <mulx1=int64#8
  3537. # asm 2: mul <mulx1=%r10
  3538. mul %r10
  3539. # qhasm: carry? rz2 += mulrax
  3540. # asm 1: add <mulrax=int64#7,<rz2=int64#11
  3541. # asm 2: add <mulrax=%rax,<rz2=%r13
  3542. add %rax,%r13
  3543. # qhasm: mulrdx += 0 + carry
  3544. # asm 1: adc $0,<mulrdx=int64#3
  3545. # asm 2: adc $0,<mulrdx=%rdx
  3546. adc $0,%rdx
  3547. # qhasm: carry? rz2 += mulc
  3548. # asm 1: add <mulc=int64#13,<rz2=int64#11
  3549. # asm 2: add <mulc=%r15,<rz2=%r13
  3550. add %r15,%r13
  3551. # qhasm: mulc = 0
  3552. # asm 1: mov $0,>mulc=int64#13
  3553. # asm 2: mov $0,>mulc=%r15
  3554. mov $0,%r15
  3555. # qhasm: mulc += mulrdx + carry
  3556. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3557. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3558. adc %rdx,%r15
  3559. # qhasm: mulrax = f2_stack
  3560. # asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
  3561. # asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
  3562. movq 168(%rsp),%rax
  3563. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  3564. # asm 1: mul <mulx1=int64#8
  3565. # asm 2: mul <mulx1=%r10
  3566. mul %r10
  3567. # qhasm: carry? rz3 += mulrax
  3568. # asm 1: add <mulrax=int64#7,<rz3=int64#12
  3569. # asm 2: add <mulrax=%rax,<rz3=%r14
  3570. add %rax,%r14
  3571. # qhasm: mulrdx += 0 + carry
  3572. # asm 1: adc $0,<mulrdx=int64#3
  3573. # asm 2: adc $0,<mulrdx=%rdx
  3574. adc $0,%rdx
  3575. # qhasm: carry? rz3 += mulc
  3576. # asm 1: add <mulc=int64#13,<rz3=int64#12
  3577. # asm 2: add <mulc=%r15,<rz3=%r14
  3578. add %r15,%r14
  3579. # qhasm: mulc = 0
  3580. # asm 1: mov $0,>mulc=int64#13
  3581. # asm 2: mov $0,>mulc=%r15
  3582. mov $0,%r15
  3583. # qhasm: mulc += mulrdx + carry
  3584. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3585. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3586. adc %rdx,%r15
  3587. # qhasm: mulrax = f3_stack
  3588. # asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
  3589. # asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
  3590. movq 176(%rsp),%rax
  3591. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  3592. # asm 1: mul <mulx1=int64#8
  3593. # asm 2: mul <mulx1=%r10
  3594. mul %r10
  3595. # qhasm: carry? mulr4 += mulrax
  3596. # asm 1: add <mulrax=int64#7,<mulr4=int64#2
  3597. # asm 2: add <mulrax=%rax,<mulr4=%rsi
  3598. add %rax,%rsi
  3599. # qhasm: mulrdx += 0 + carry
  3600. # asm 1: adc $0,<mulrdx=int64#3
  3601. # asm 2: adc $0,<mulrdx=%rdx
  3602. adc $0,%rdx
  3603. # qhasm: carry? mulr4 += mulc
  3604. # asm 1: add <mulc=int64#13,<mulr4=int64#2
  3605. # asm 2: add <mulc=%r15,<mulr4=%rsi
  3606. add %r15,%rsi
  3607. # qhasm: mulr5 += mulrdx + carry
  3608. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#4
  3609. # asm 2: adc <mulrdx=%rdx,<mulr5=%rcx
  3610. adc %rdx,%rcx
  3611. # qhasm: mulx2 = g2_stack
  3612. # asm 1: movq <g2_stack=stack64#18,>mulx2=int64#8
  3613. # asm 2: movq <g2_stack=136(%rsp),>mulx2=%r10
  3614. movq 136(%rsp),%r10
  3615. # qhasm: mulrax = f0_stack
  3616. # asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
  3617. # asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
  3618. movq 152(%rsp),%rax
  3619. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  3620. # asm 1: mul <mulx2=int64#8
  3621. # asm 2: mul <mulx2=%r10
  3622. mul %r10
  3623. # qhasm: carry? rz2 += mulrax
  3624. # asm 1: add <mulrax=int64#7,<rz2=int64#11
  3625. # asm 2: add <mulrax=%rax,<rz2=%r13
  3626. add %rax,%r13
  3627. # qhasm: mulc = 0
  3628. # asm 1: mov $0,>mulc=int64#13
  3629. # asm 2: mov $0,>mulc=%r15
  3630. mov $0,%r15
  3631. # qhasm: mulc += mulrdx + carry
  3632. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3633. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3634. adc %rdx,%r15
  3635. # qhasm: mulrax = f1_stack
  3636. # asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
  3637. # asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
  3638. movq 160(%rsp),%rax
  3639. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  3640. # asm 1: mul <mulx2=int64#8
  3641. # asm 2: mul <mulx2=%r10
  3642. mul %r10
  3643. # qhasm: carry? rz3 += mulrax
  3644. # asm 1: add <mulrax=int64#7,<rz3=int64#12
  3645. # asm 2: add <mulrax=%rax,<rz3=%r14
  3646. add %rax,%r14
  3647. # qhasm: mulrdx += 0 + carry
  3648. # asm 1: adc $0,<mulrdx=int64#3
  3649. # asm 2: adc $0,<mulrdx=%rdx
  3650. adc $0,%rdx
  3651. # qhasm: carry? rz3 += mulc
  3652. # asm 1: add <mulc=int64#13,<rz3=int64#12
  3653. # asm 2: add <mulc=%r15,<rz3=%r14
  3654. add %r15,%r14
  3655. # qhasm: mulc = 0
  3656. # asm 1: mov $0,>mulc=int64#13
  3657. # asm 2: mov $0,>mulc=%r15
  3658. mov $0,%r15
  3659. # qhasm: mulc += mulrdx + carry
  3660. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3661. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3662. adc %rdx,%r15
  3663. # qhasm: mulrax = f2_stack
  3664. # asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
  3665. # asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
  3666. movq 168(%rsp),%rax
  3667. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  3668. # asm 1: mul <mulx2=int64#8
  3669. # asm 2: mul <mulx2=%r10
  3670. mul %r10
  3671. # qhasm: carry? mulr4 += mulrax
  3672. # asm 1: add <mulrax=int64#7,<mulr4=int64#2
  3673. # asm 2: add <mulrax=%rax,<mulr4=%rsi
  3674. add %rax,%rsi
  3675. # qhasm: mulrdx += 0 + carry
  3676. # asm 1: adc $0,<mulrdx=int64#3
  3677. # asm 2: adc $0,<mulrdx=%rdx
  3678. adc $0,%rdx
  3679. # qhasm: carry? mulr4 += mulc
  3680. # asm 1: add <mulc=int64#13,<mulr4=int64#2
  3681. # asm 2: add <mulc=%r15,<mulr4=%rsi
  3682. add %r15,%rsi
  3683. # qhasm: mulc = 0
  3684. # asm 1: mov $0,>mulc=int64#13
  3685. # asm 2: mov $0,>mulc=%r15
  3686. mov $0,%r15
  3687. # qhasm: mulc += mulrdx + carry
  3688. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3689. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3690. adc %rdx,%r15
  3691. # qhasm: mulrax = f3_stack
  3692. # asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
  3693. # asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
  3694. movq 176(%rsp),%rax
  3695. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  3696. # asm 1: mul <mulx2=int64#8
  3697. # asm 2: mul <mulx2=%r10
  3698. mul %r10
  3699. # qhasm: carry? mulr5 += mulrax
  3700. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  3701. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  3702. add %rax,%rcx
  3703. # qhasm: mulrdx += 0 + carry
  3704. # asm 1: adc $0,<mulrdx=int64#3
  3705. # asm 2: adc $0,<mulrdx=%rdx
  3706. adc $0,%rdx
  3707. # qhasm: carry? mulr5 += mulc
  3708. # asm 1: add <mulc=int64#13,<mulr5=int64#4
  3709. # asm 2: add <mulc=%r15,<mulr5=%rcx
  3710. add %r15,%rcx
  3711. # qhasm: mulr6 += mulrdx + carry
  3712. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
  3713. # asm 2: adc <mulrdx=%rdx,<mulr6=%r8
  3714. adc %rdx,%r8
  3715. # qhasm: mulx3 = g3_stack
  3716. # asm 1: movq <g3_stack=stack64#19,>mulx3=int64#8
  3717. # asm 2: movq <g3_stack=144(%rsp),>mulx3=%r10
  3718. movq 144(%rsp),%r10
  3719. # qhasm: mulrax = f0_stack
  3720. # asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
  3721. # asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
  3722. movq 152(%rsp),%rax
  3723. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  3724. # asm 1: mul <mulx3=int64#8
  3725. # asm 2: mul <mulx3=%r10
  3726. mul %r10
  3727. # qhasm: carry? rz3 += mulrax
  3728. # asm 1: add <mulrax=int64#7,<rz3=int64#12
  3729. # asm 2: add <mulrax=%rax,<rz3=%r14
  3730. add %rax,%r14
  3731. # qhasm: mulc = 0
  3732. # asm 1: mov $0,>mulc=int64#13
  3733. # asm 2: mov $0,>mulc=%r15
  3734. mov $0,%r15
  3735. # qhasm: mulc += mulrdx + carry
  3736. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3737. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3738. adc %rdx,%r15
  3739. # qhasm: mulrax = f1_stack
  3740. # asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
  3741. # asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
  3742. movq 160(%rsp),%rax
  3743. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  3744. # asm 1: mul <mulx3=int64#8
  3745. # asm 2: mul <mulx3=%r10
  3746. mul %r10
  3747. # qhasm: carry? mulr4 += mulrax
  3748. # asm 1: add <mulrax=int64#7,<mulr4=int64#2
  3749. # asm 2: add <mulrax=%rax,<mulr4=%rsi
  3750. add %rax,%rsi
  3751. # qhasm: mulrdx += 0 + carry
  3752. # asm 1: adc $0,<mulrdx=int64#3
  3753. # asm 2: adc $0,<mulrdx=%rdx
  3754. adc $0,%rdx
  3755. # qhasm: carry? mulr4 += mulc
  3756. # asm 1: add <mulc=int64#13,<mulr4=int64#2
  3757. # asm 2: add <mulc=%r15,<mulr4=%rsi
  3758. add %r15,%rsi
  3759. # qhasm: mulc = 0
  3760. # asm 1: mov $0,>mulc=int64#13
  3761. # asm 2: mov $0,>mulc=%r15
  3762. mov $0,%r15
  3763. # qhasm: mulc += mulrdx + carry
  3764. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3765. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3766. adc %rdx,%r15
  3767. # qhasm: mulrax = f2_stack
  3768. # asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
  3769. # asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
  3770. movq 168(%rsp),%rax
  3771. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  3772. # asm 1: mul <mulx3=int64#8
  3773. # asm 2: mul <mulx3=%r10
  3774. mul %r10
  3775. # qhasm: carry? mulr5 += mulrax
  3776. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  3777. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  3778. add %rax,%rcx
  3779. # qhasm: mulrdx += 0 + carry
  3780. # asm 1: adc $0,<mulrdx=int64#3
  3781. # asm 2: adc $0,<mulrdx=%rdx
  3782. adc $0,%rdx
  3783. # qhasm: carry? mulr5 += mulc
  3784. # asm 1: add <mulc=int64#13,<mulr5=int64#4
  3785. # asm 2: add <mulc=%r15,<mulr5=%rcx
  3786. add %r15,%rcx
  3787. # qhasm: mulc = 0
  3788. # asm 1: mov $0,>mulc=int64#13
  3789. # asm 2: mov $0,>mulc=%r15
  3790. mov $0,%r15
  3791. # qhasm: mulc += mulrdx + carry
  3792. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  3793. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  3794. adc %rdx,%r15
  3795. # qhasm: mulrax = f3_stack
  3796. # asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
  3797. # asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
  3798. movq 176(%rsp),%rax
  3799. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  3800. # asm 1: mul <mulx3=int64#8
  3801. # asm 2: mul <mulx3=%r10
  3802. mul %r10
  3803. # qhasm: carry? mulr6 += mulrax
  3804. # asm 1: add <mulrax=int64#7,<mulr6=int64#5
  3805. # asm 2: add <mulrax=%rax,<mulr6=%r8
  3806. add %rax,%r8
  3807. # qhasm: mulrdx += 0 + carry
  3808. # asm 1: adc $0,<mulrdx=int64#3
  3809. # asm 2: adc $0,<mulrdx=%rdx
  3810. adc $0,%rdx
  3811. # qhasm: carry? mulr6 += mulc
  3812. # asm 1: add <mulc=int64#13,<mulr6=int64#5
  3813. # asm 2: add <mulc=%r15,<mulr6=%r8
  3814. add %r15,%r8
  3815. # qhasm: mulr7 += mulrdx + carry
  3816. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
  3817. # asm 2: adc <mulrdx=%rdx,<mulr7=%r9
  3818. adc %rdx,%r9
  3819. # qhasm: mulrax = mulr4
  3820. # asm 1: mov <mulr4=int64#2,>mulrax=int64#7
  3821. # asm 2: mov <mulr4=%rsi,>mulrax=%rax
  3822. mov %rsi,%rax
  3823. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  3824. mulq CRYPTO_NAMESPACE(38)(%rip)
  3825. # qhasm: mulr4 = mulrax
  3826. # asm 1: mov <mulrax=int64#7,>mulr4=int64#2
  3827. # asm 2: mov <mulrax=%rax,>mulr4=%rsi
  3828. mov %rax,%rsi
  3829. # qhasm: mulrax = mulr5
  3830. # asm 1: mov <mulr5=int64#4,>mulrax=int64#7
  3831. # asm 2: mov <mulr5=%rcx,>mulrax=%rax
  3832. mov %rcx,%rax
  3833. # qhasm: mulr5 = mulrdx
  3834. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#4
  3835. # asm 2: mov <mulrdx=%rdx,>mulr5=%rcx
  3836. mov %rdx,%rcx
  3837. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  3838. mulq CRYPTO_NAMESPACE(38)(%rip)
  3839. # qhasm: carry? mulr5 += mulrax
  3840. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  3841. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  3842. add %rax,%rcx
  3843. # qhasm: mulrax = mulr6
  3844. # asm 1: mov <mulr6=int64#5,>mulrax=int64#7
  3845. # asm 2: mov <mulr6=%r8,>mulrax=%rax
  3846. mov %r8,%rax
  3847. # qhasm: mulr6 = 0
  3848. # asm 1: mov $0,>mulr6=int64#5
  3849. # asm 2: mov $0,>mulr6=%r8
  3850. mov $0,%r8
  3851. # qhasm: mulr6 += mulrdx + carry
  3852. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
  3853. # asm 2: adc <mulrdx=%rdx,<mulr6=%r8
  3854. adc %rdx,%r8
  3855. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  3856. mulq CRYPTO_NAMESPACE(38)(%rip)
  3857. # qhasm: carry? mulr6 += mulrax
  3858. # asm 1: add <mulrax=int64#7,<mulr6=int64#5
  3859. # asm 2: add <mulrax=%rax,<mulr6=%r8
  3860. add %rax,%r8
  3861. # qhasm: mulrax = mulr7
  3862. # asm 1: mov <mulr7=int64#6,>mulrax=int64#7
  3863. # asm 2: mov <mulr7=%r9,>mulrax=%rax
  3864. mov %r9,%rax
  3865. # qhasm: mulr7 = 0
  3866. # asm 1: mov $0,>mulr7=int64#6
  3867. # asm 2: mov $0,>mulr7=%r9
  3868. mov $0,%r9
  3869. # qhasm: mulr7 += mulrdx + carry
  3870. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
  3871. # asm 2: adc <mulrdx=%rdx,<mulr7=%r9
  3872. adc %rdx,%r9
  3873. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  3874. mulq CRYPTO_NAMESPACE(38)(%rip)
  3875. # qhasm: carry? mulr7 += mulrax
  3876. # asm 1: add <mulrax=int64#7,<mulr7=int64#6
  3877. # asm 2: add <mulrax=%rax,<mulr7=%r9
  3878. add %rax,%r9
  3879. # qhasm: mulr8 = 0
  3880. # asm 1: mov $0,>mulr8=int64#7
  3881. # asm 2: mov $0,>mulr8=%rax
  3882. mov $0,%rax
  3883. # qhasm: mulr8 += mulrdx + carry
  3884. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  3885. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  3886. adc %rdx,%rax
  3887. # qhasm: carry? rz0 += mulr4
  3888. # asm 1: add <mulr4=int64#2,<rz0=int64#9
  3889. # asm 2: add <mulr4=%rsi,<rz0=%r11
  3890. add %rsi,%r11
  3891. # qhasm: carry? rz1 += mulr5 + carry
  3892. # asm 1: adc <mulr5=int64#4,<rz1=int64#10
  3893. # asm 2: adc <mulr5=%rcx,<rz1=%r12
  3894. adc %rcx,%r12
  3895. # qhasm: carry? rz2 += mulr6 + carry
  3896. # asm 1: adc <mulr6=int64#5,<rz2=int64#11
  3897. # asm 2: adc <mulr6=%r8,<rz2=%r13
  3898. adc %r8,%r13
  3899. # qhasm: carry? rz3 += mulr7 + carry
  3900. # asm 1: adc <mulr7=int64#6,<rz3=int64#12
  3901. # asm 2: adc <mulr7=%r9,<rz3=%r14
  3902. adc %r9,%r14
  3903. # qhasm: mulzero = 0
  3904. # asm 1: mov $0,>mulzero=int64#2
  3905. # asm 2: mov $0,>mulzero=%rsi
  3906. mov $0,%rsi
  3907. # qhasm: mulr8 += mulzero + carry
  3908. # asm 1: adc <mulzero=int64#2,<mulr8=int64#7
  3909. # asm 2: adc <mulzero=%rsi,<mulr8=%rax
  3910. adc %rsi,%rax
  3911. # qhasm: mulr8 *= 38
  3912. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3
  3913. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx
  3914. imulq $38,%rax,%rdx
  3915. # qhasm: carry? rz0 += mulr8
  3916. # asm 1: add <mulr8=int64#3,<rz0=int64#9
  3917. # asm 2: add <mulr8=%rdx,<rz0=%r11
  3918. add %rdx,%r11
  3919. # qhasm: carry? rz1 += mulzero + carry
  3920. # asm 1: adc <mulzero=int64#2,<rz1=int64#10
  3921. # asm 2: adc <mulzero=%rsi,<rz1=%r12
  3922. adc %rsi,%r12
  3923. # qhasm: carry? rz2 += mulzero + carry
  3924. # asm 1: adc <mulzero=int64#2,<rz2=int64#11
  3925. # asm 2: adc <mulzero=%rsi,<rz2=%r13
  3926. adc %rsi,%r13
  3927. # qhasm: carry? rz3 += mulzero + carry
  3928. # asm 1: adc <mulzero=int64#2,<rz3=int64#12
  3929. # asm 2: adc <mulzero=%rsi,<rz3=%r14
  3930. adc %rsi,%r14
  3931. # qhasm: mulzero += mulzero + carry
  3932. # asm 1: adc <mulzero=int64#2,<mulzero=int64#2
  3933. # asm 2: adc <mulzero=%rsi,<mulzero=%rsi
  3934. adc %rsi,%rsi
  3935. # qhasm: mulzero *= 38
  3936. # asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2
  3937. # asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi
  3938. imulq $38,%rsi,%rsi
  3939. # qhasm: rz0 += mulzero
  3940. # asm 1: add <mulzero=int64#2,<rz0=int64#9
  3941. # asm 2: add <mulzero=%rsi,<rz0=%r11
  3942. add %rsi,%r11
  3943. # qhasm: *(uint64 *)(rp + 64) = rz0
  3944. # asm 1: movq <rz0=int64#9,64(<rp=int64#1)
  3945. # asm 2: movq <rz0=%r11,64(<rp=%rdi)
  3946. movq %r11,64(%rdi)
  3947. # qhasm: *(uint64 *)(rp + 72) = rz1
  3948. # asm 1: movq <rz1=int64#10,72(<rp=int64#1)
  3949. # asm 2: movq <rz1=%r12,72(<rp=%rdi)
  3950. movq %r12,72(%rdi)
  3951. # qhasm: *(uint64 *)(rp + 80) = rz2
  3952. # asm 1: movq <rz2=int64#11,80(<rp=int64#1)
  3953. # asm 2: movq <rz2=%r13,80(<rp=%rdi)
  3954. movq %r13,80(%rdi)
  3955. # qhasm: *(uint64 *)(rp + 88) = rz3
  3956. # asm 1: movq <rz3=int64#12,88(<rp=int64#1)
  3957. # asm 2: movq <rz3=%r14,88(<rp=%rdi)
  3958. movq %r14,88(%rdi)
  3959. # qhasm: mulr4 = 0
  3960. # asm 1: mov $0,>mulr4=int64#2
  3961. # asm 2: mov $0,>mulr4=%rsi
  3962. mov $0,%rsi
  3963. # qhasm: mulr5 = 0
  3964. # asm 1: mov $0,>mulr5=int64#4
  3965. # asm 2: mov $0,>mulr5=%rcx
  3966. mov $0,%rcx
  3967. # qhasm: mulr6 = 0
  3968. # asm 1: mov $0,>mulr6=int64#5
  3969. # asm 2: mov $0,>mulr6=%r8
  3970. mov $0,%r8
  3971. # qhasm: mulr7 = 0
  3972. # asm 1: mov $0,>mulr7=int64#6
  3973. # asm 2: mov $0,>mulr7=%r9
  3974. mov $0,%r9
  3975. # qhasm: mulx0 = e0_stack
  3976. # asm 1: movq <e0_stack=stack64#12,>mulx0=int64#8
  3977. # asm 2: movq <e0_stack=88(%rsp),>mulx0=%r10
  3978. movq 88(%rsp),%r10
  3979. # qhasm: mulrax = h0_stack
  3980. # asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
  3981. # asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
  3982. movq 56(%rsp),%rax
  3983. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  3984. # asm 1: mul <mulx0=int64#8
  3985. # asm 2: mul <mulx0=%r10
  3986. mul %r10
  3987. # qhasm: rt0 = mulrax
  3988. # asm 1: mov <mulrax=int64#7,>rt0=int64#9
  3989. # asm 2: mov <mulrax=%rax,>rt0=%r11
  3990. mov %rax,%r11
  3991. # qhasm: rt1 = mulrdx
  3992. # asm 1: mov <mulrdx=int64#3,>rt1=int64#10
  3993. # asm 2: mov <mulrdx=%rdx,>rt1=%r12
  3994. mov %rdx,%r12
  3995. # qhasm: mulrax = h1_stack
  3996. # asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7
  3997. # asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax
  3998. movq 64(%rsp),%rax
  3999. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  4000. # asm 1: mul <mulx0=int64#8
  4001. # asm 2: mul <mulx0=%r10
  4002. mul %r10
  4003. # qhasm: carry? rt1 += mulrax
  4004. # asm 1: add <mulrax=int64#7,<rt1=int64#10
  4005. # asm 2: add <mulrax=%rax,<rt1=%r12
  4006. add %rax,%r12
  4007. # qhasm: rt2 = 0
  4008. # asm 1: mov $0,>rt2=int64#11
  4009. # asm 2: mov $0,>rt2=%r13
  4010. mov $0,%r13
  4011. # qhasm: rt2 += mulrdx + carry
  4012. # asm 1: adc <mulrdx=int64#3,<rt2=int64#11
  4013. # asm 2: adc <mulrdx=%rdx,<rt2=%r13
  4014. adc %rdx,%r13
  4015. # qhasm: mulrax = h2_stack
  4016. # asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7
  4017. # asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax
  4018. movq 72(%rsp),%rax
  4019. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  4020. # asm 1: mul <mulx0=int64#8
  4021. # asm 2: mul <mulx0=%r10
  4022. mul %r10
  4023. # qhasm: carry? rt2 += mulrax
  4024. # asm 1: add <mulrax=int64#7,<rt2=int64#11
  4025. # asm 2: add <mulrax=%rax,<rt2=%r13
  4026. add %rax,%r13
  4027. # qhasm: rt3 = 0
  4028. # asm 1: mov $0,>rt3=int64#12
  4029. # asm 2: mov $0,>rt3=%r14
  4030. mov $0,%r14
  4031. # qhasm: rt3 += mulrdx + carry
  4032. # asm 1: adc <mulrdx=int64#3,<rt3=int64#12
  4033. # asm 2: adc <mulrdx=%rdx,<rt3=%r14
  4034. adc %rdx,%r14
  4035. # qhasm: mulrax = h3_stack
  4036. # asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7
  4037. # asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax
  4038. movq 80(%rsp),%rax
  4039. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0
  4040. # asm 1: mul <mulx0=int64#8
  4041. # asm 2: mul <mulx0=%r10
  4042. mul %r10
  4043. # qhasm: carry? rt3 += mulrax
  4044. # asm 1: add <mulrax=int64#7,<rt3=int64#12
  4045. # asm 2: add <mulrax=%rax,<rt3=%r14
  4046. add %rax,%r14
  4047. # qhasm: mulr4 += mulrdx + carry
  4048. # asm 1: adc <mulrdx=int64#3,<mulr4=int64#2
  4049. # asm 2: adc <mulrdx=%rdx,<mulr4=%rsi
  4050. adc %rdx,%rsi
  4051. # qhasm: mulx1 = e1_stack
  4052. # asm 1: movq <e1_stack=stack64#13,>mulx1=int64#8
  4053. # asm 2: movq <e1_stack=96(%rsp),>mulx1=%r10
  4054. movq 96(%rsp),%r10
  4055. # qhasm: mulrax = h0_stack
  4056. # asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
  4057. # asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
  4058. movq 56(%rsp),%rax
  4059. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  4060. # asm 1: mul <mulx1=int64#8
  4061. # asm 2: mul <mulx1=%r10
  4062. mul %r10
  4063. # qhasm: carry? rt1 += mulrax
  4064. # asm 1: add <mulrax=int64#7,<rt1=int64#10
  4065. # asm 2: add <mulrax=%rax,<rt1=%r12
  4066. add %rax,%r12
  4067. # qhasm: mulc = 0
  4068. # asm 1: mov $0,>mulc=int64#13
  4069. # asm 2: mov $0,>mulc=%r15
  4070. mov $0,%r15
  4071. # qhasm: mulc += mulrdx + carry
  4072. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  4073. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  4074. adc %rdx,%r15
  4075. # qhasm: mulrax = h1_stack
  4076. # asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7
  4077. # asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax
  4078. movq 64(%rsp),%rax
  4079. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  4080. # asm 1: mul <mulx1=int64#8
  4081. # asm 2: mul <mulx1=%r10
  4082. mul %r10
  4083. # qhasm: carry? rt2 += mulrax
  4084. # asm 1: add <mulrax=int64#7,<rt2=int64#11
  4085. # asm 2: add <mulrax=%rax,<rt2=%r13
  4086. add %rax,%r13
  4087. # qhasm: mulrdx += 0 + carry
  4088. # asm 1: adc $0,<mulrdx=int64#3
  4089. # asm 2: adc $0,<mulrdx=%rdx
  4090. adc $0,%rdx
  4091. # qhasm: carry? rt2 += mulc
  4092. # asm 1: add <mulc=int64#13,<rt2=int64#11
  4093. # asm 2: add <mulc=%r15,<rt2=%r13
  4094. add %r15,%r13
  4095. # qhasm: mulc = 0
  4096. # asm 1: mov $0,>mulc=int64#13
  4097. # asm 2: mov $0,>mulc=%r15
  4098. mov $0,%r15
  4099. # qhasm: mulc += mulrdx + carry
  4100. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  4101. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  4102. adc %rdx,%r15
  4103. # qhasm: mulrax = h2_stack
  4104. # asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7
  4105. # asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax
  4106. movq 72(%rsp),%rax
  4107. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  4108. # asm 1: mul <mulx1=int64#8
  4109. # asm 2: mul <mulx1=%r10
  4110. mul %r10
  4111. # qhasm: carry? rt3 += mulrax
  4112. # asm 1: add <mulrax=int64#7,<rt3=int64#12
  4113. # asm 2: add <mulrax=%rax,<rt3=%r14
  4114. add %rax,%r14
  4115. # qhasm: mulrdx += 0 + carry
  4116. # asm 1: adc $0,<mulrdx=int64#3
  4117. # asm 2: adc $0,<mulrdx=%rdx
  4118. adc $0,%rdx
  4119. # qhasm: carry? rt3 += mulc
  4120. # asm 1: add <mulc=int64#13,<rt3=int64#12
  4121. # asm 2: add <mulc=%r15,<rt3=%r14
  4122. add %r15,%r14
  4123. # qhasm: mulc = 0
  4124. # asm 1: mov $0,>mulc=int64#13
  4125. # asm 2: mov $0,>mulc=%r15
  4126. mov $0,%r15
  4127. # qhasm: mulc += mulrdx + carry
  4128. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  4129. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  4130. adc %rdx,%r15
  4131. # qhasm: mulrax = h3_stack
  4132. # asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7
  4133. # asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax
  4134. movq 80(%rsp),%rax
  4135. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1
  4136. # asm 1: mul <mulx1=int64#8
  4137. # asm 2: mul <mulx1=%r10
  4138. mul %r10
  4139. # qhasm: carry? mulr4 += mulrax
  4140. # asm 1: add <mulrax=int64#7,<mulr4=int64#2
  4141. # asm 2: add <mulrax=%rax,<mulr4=%rsi
  4142. add %rax,%rsi
  4143. # qhasm: mulrdx += 0 + carry
  4144. # asm 1: adc $0,<mulrdx=int64#3
  4145. # asm 2: adc $0,<mulrdx=%rdx
  4146. adc $0,%rdx
  4147. # qhasm: carry? mulr4 += mulc
  4148. # asm 1: add <mulc=int64#13,<mulr4=int64#2
  4149. # asm 2: add <mulc=%r15,<mulr4=%rsi
  4150. add %r15,%rsi
  4151. # qhasm: mulr5 += mulrdx + carry
  4152. # asm 1: adc <mulrdx=int64#3,<mulr5=int64#4
  4153. # asm 2: adc <mulrdx=%rdx,<mulr5=%rcx
  4154. adc %rdx,%rcx
  4155. # qhasm: mulx2 = e2_stack
  4156. # asm 1: movq <e2_stack=stack64#14,>mulx2=int64#8
  4157. # asm 2: movq <e2_stack=104(%rsp),>mulx2=%r10
  4158. movq 104(%rsp),%r10
  4159. # qhasm: mulrax = h0_stack
  4160. # asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
  4161. # asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
  4162. movq 56(%rsp),%rax
  4163. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  4164. # asm 1: mul <mulx2=int64#8
  4165. # asm 2: mul <mulx2=%r10
  4166. mul %r10
  4167. # qhasm: carry? rt2 += mulrax
  4168. # asm 1: add <mulrax=int64#7,<rt2=int64#11
  4169. # asm 2: add <mulrax=%rax,<rt2=%r13
  4170. add %rax,%r13
  4171. # qhasm: mulc = 0
  4172. # asm 1: mov $0,>mulc=int64#13
  4173. # asm 2: mov $0,>mulc=%r15
  4174. mov $0,%r15
  4175. # qhasm: mulc += mulrdx + carry
  4176. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  4177. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  4178. adc %rdx,%r15
  4179. # qhasm: mulrax = h1_stack
  4180. # asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7
  4181. # asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax
  4182. movq 64(%rsp),%rax
  4183. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  4184. # asm 1: mul <mulx2=int64#8
  4185. # asm 2: mul <mulx2=%r10
  4186. mul %r10
  4187. # qhasm: carry? rt3 += mulrax
  4188. # asm 1: add <mulrax=int64#7,<rt3=int64#12
  4189. # asm 2: add <mulrax=%rax,<rt3=%r14
  4190. add %rax,%r14
  4191. # qhasm: mulrdx += 0 + carry
  4192. # asm 1: adc $0,<mulrdx=int64#3
  4193. # asm 2: adc $0,<mulrdx=%rdx
  4194. adc $0,%rdx
  4195. # qhasm: carry? rt3 += mulc
  4196. # asm 1: add <mulc=int64#13,<rt3=int64#12
  4197. # asm 2: add <mulc=%r15,<rt3=%r14
  4198. add %r15,%r14
  4199. # qhasm: mulc = 0
  4200. # asm 1: mov $0,>mulc=int64#13
  4201. # asm 2: mov $0,>mulc=%r15
  4202. mov $0,%r15
  4203. # qhasm: mulc += mulrdx + carry
  4204. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  4205. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  4206. adc %rdx,%r15
  4207. # qhasm: mulrax = h2_stack
  4208. # asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7
  4209. # asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax
  4210. movq 72(%rsp),%rax
  4211. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  4212. # asm 1: mul <mulx2=int64#8
  4213. # asm 2: mul <mulx2=%r10
  4214. mul %r10
  4215. # qhasm: carry? mulr4 += mulrax
  4216. # asm 1: add <mulrax=int64#7,<mulr4=int64#2
  4217. # asm 2: add <mulrax=%rax,<mulr4=%rsi
  4218. add %rax,%rsi
  4219. # qhasm: mulrdx += 0 + carry
  4220. # asm 1: adc $0,<mulrdx=int64#3
  4221. # asm 2: adc $0,<mulrdx=%rdx
  4222. adc $0,%rdx
  4223. # qhasm: carry? mulr4 += mulc
  4224. # asm 1: add <mulc=int64#13,<mulr4=int64#2
  4225. # asm 2: add <mulc=%r15,<mulr4=%rsi
  4226. add %r15,%rsi
  4227. # qhasm: mulc = 0
  4228. # asm 1: mov $0,>mulc=int64#13
  4229. # asm 2: mov $0,>mulc=%r15
  4230. mov $0,%r15
  4231. # qhasm: mulc += mulrdx + carry
  4232. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  4233. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  4234. adc %rdx,%r15
  4235. # qhasm: mulrax = h3_stack
  4236. # asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7
  4237. # asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax
  4238. movq 80(%rsp),%rax
  4239. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2
  4240. # asm 1: mul <mulx2=int64#8
  4241. # asm 2: mul <mulx2=%r10
  4242. mul %r10
  4243. # qhasm: carry? mulr5 += mulrax
  4244. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  4245. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  4246. add %rax,%rcx
  4247. # qhasm: mulrdx += 0 + carry
  4248. # asm 1: adc $0,<mulrdx=int64#3
  4249. # asm 2: adc $0,<mulrdx=%rdx
  4250. adc $0,%rdx
  4251. # qhasm: carry? mulr5 += mulc
  4252. # asm 1: add <mulc=int64#13,<mulr5=int64#4
  4253. # asm 2: add <mulc=%r15,<mulr5=%rcx
  4254. add %r15,%rcx
  4255. # qhasm: mulr6 += mulrdx + carry
  4256. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
  4257. # asm 2: adc <mulrdx=%rdx,<mulr6=%r8
  4258. adc %rdx,%r8
  4259. # qhasm: mulx3 = e3_stack
  4260. # asm 1: movq <e3_stack=stack64#15,>mulx3=int64#8
  4261. # asm 2: movq <e3_stack=112(%rsp),>mulx3=%r10
  4262. movq 112(%rsp),%r10
  4263. # qhasm: mulrax = h0_stack
  4264. # asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
  4265. # asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
  4266. movq 56(%rsp),%rax
  4267. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  4268. # asm 1: mul <mulx3=int64#8
  4269. # asm 2: mul <mulx3=%r10
  4270. mul %r10
  4271. # qhasm: carry? rt3 += mulrax
  4272. # asm 1: add <mulrax=int64#7,<rt3=int64#12
  4273. # asm 2: add <mulrax=%rax,<rt3=%r14
  4274. add %rax,%r14
  4275. # qhasm: mulc = 0
  4276. # asm 1: mov $0,>mulc=int64#13
  4277. # asm 2: mov $0,>mulc=%r15
  4278. mov $0,%r15
  4279. # qhasm: mulc += mulrdx + carry
  4280. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  4281. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  4282. adc %rdx,%r15
  4283. # qhasm: mulrax = h1_stack
  4284. # asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7
  4285. # asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax
  4286. movq 64(%rsp),%rax
  4287. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  4288. # asm 1: mul <mulx3=int64#8
  4289. # asm 2: mul <mulx3=%r10
  4290. mul %r10
  4291. # qhasm: carry? mulr4 += mulrax
  4292. # asm 1: add <mulrax=int64#7,<mulr4=int64#2
  4293. # asm 2: add <mulrax=%rax,<mulr4=%rsi
  4294. add %rax,%rsi
  4295. # qhasm: mulrdx += 0 + carry
  4296. # asm 1: adc $0,<mulrdx=int64#3
  4297. # asm 2: adc $0,<mulrdx=%rdx
  4298. adc $0,%rdx
  4299. # qhasm: carry? mulr4 += mulc
  4300. # asm 1: add <mulc=int64#13,<mulr4=int64#2
  4301. # asm 2: add <mulc=%r15,<mulr4=%rsi
  4302. add %r15,%rsi
  4303. # qhasm: mulc = 0
  4304. # asm 1: mov $0,>mulc=int64#13
  4305. # asm 2: mov $0,>mulc=%r15
  4306. mov $0,%r15
  4307. # qhasm: mulc += mulrdx + carry
  4308. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  4309. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  4310. adc %rdx,%r15
  4311. # qhasm: mulrax = h2_stack
  4312. # asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7
  4313. # asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax
  4314. movq 72(%rsp),%rax
  4315. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  4316. # asm 1: mul <mulx3=int64#8
  4317. # asm 2: mul <mulx3=%r10
  4318. mul %r10
  4319. # qhasm: carry? mulr5 += mulrax
  4320. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  4321. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  4322. add %rax,%rcx
  4323. # qhasm: mulrdx += 0 + carry
  4324. # asm 1: adc $0,<mulrdx=int64#3
  4325. # asm 2: adc $0,<mulrdx=%rdx
  4326. adc $0,%rdx
  4327. # qhasm: carry? mulr5 += mulc
  4328. # asm 1: add <mulc=int64#13,<mulr5=int64#4
  4329. # asm 2: add <mulc=%r15,<mulr5=%rcx
  4330. add %r15,%rcx
  4331. # qhasm: mulc = 0
  4332. # asm 1: mov $0,>mulc=int64#13
  4333. # asm 2: mov $0,>mulc=%r15
  4334. mov $0,%r15
  4335. # qhasm: mulc += mulrdx + carry
  4336. # asm 1: adc <mulrdx=int64#3,<mulc=int64#13
  4337. # asm 2: adc <mulrdx=%rdx,<mulc=%r15
  4338. adc %rdx,%r15
  4339. # qhasm: mulrax = h3_stack
  4340. # asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7
  4341. # asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax
  4342. movq 80(%rsp),%rax
  4343. # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3
  4344. # asm 1: mul <mulx3=int64#8
  4345. # asm 2: mul <mulx3=%r10
  4346. mul %r10
  4347. # qhasm: carry? mulr6 += mulrax
  4348. # asm 1: add <mulrax=int64#7,<mulr6=int64#5
  4349. # asm 2: add <mulrax=%rax,<mulr6=%r8
  4350. add %rax,%r8
  4351. # qhasm: mulrdx += 0 + carry
  4352. # asm 1: adc $0,<mulrdx=int64#3
  4353. # asm 2: adc $0,<mulrdx=%rdx
  4354. adc $0,%rdx
  4355. # qhasm: carry? mulr6 += mulc
  4356. # asm 1: add <mulc=int64#13,<mulr6=int64#5
  4357. # asm 2: add <mulc=%r15,<mulr6=%r8
  4358. add %r15,%r8
  4359. # qhasm: mulr7 += mulrdx + carry
  4360. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
  4361. # asm 2: adc <mulrdx=%rdx,<mulr7=%r9
  4362. adc %rdx,%r9
  4363. # qhasm: mulrax = mulr4
  4364. # asm 1: mov <mulr4=int64#2,>mulrax=int64#7
  4365. # asm 2: mov <mulr4=%rsi,>mulrax=%rax
  4366. mov %rsi,%rax
  4367. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  4368. mulq CRYPTO_NAMESPACE(38)(%rip)
  4369. # qhasm: mulr4 = mulrax
  4370. # asm 1: mov <mulrax=int64#7,>mulr4=int64#2
  4371. # asm 2: mov <mulrax=%rax,>mulr4=%rsi
  4372. mov %rax,%rsi
  4373. # qhasm: mulrax = mulr5
  4374. # asm 1: mov <mulr5=int64#4,>mulrax=int64#7
  4375. # asm 2: mov <mulr5=%rcx,>mulrax=%rax
  4376. mov %rcx,%rax
  4377. # qhasm: mulr5 = mulrdx
  4378. # asm 1: mov <mulrdx=int64#3,>mulr5=int64#4
  4379. # asm 2: mov <mulrdx=%rdx,>mulr5=%rcx
  4380. mov %rdx,%rcx
  4381. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  4382. mulq CRYPTO_NAMESPACE(38)(%rip)
  4383. # qhasm: carry? mulr5 += mulrax
  4384. # asm 1: add <mulrax=int64#7,<mulr5=int64#4
  4385. # asm 2: add <mulrax=%rax,<mulr5=%rcx
  4386. add %rax,%rcx
  4387. # qhasm: mulrax = mulr6
  4388. # asm 1: mov <mulr6=int64#5,>mulrax=int64#7
  4389. # asm 2: mov <mulr6=%r8,>mulrax=%rax
  4390. mov %r8,%rax
  4391. # qhasm: mulr6 = 0
  4392. # asm 1: mov $0,>mulr6=int64#5
  4393. # asm 2: mov $0,>mulr6=%r8
  4394. mov $0,%r8
  4395. # qhasm: mulr6 += mulrdx + carry
  4396. # asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
  4397. # asm 2: adc <mulrdx=%rdx,<mulr6=%r8
  4398. adc %rdx,%r8
  4399. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  4400. mulq CRYPTO_NAMESPACE(38)(%rip)
  4401. # qhasm: carry? mulr6 += mulrax
  4402. # asm 1: add <mulrax=int64#7,<mulr6=int64#5
  4403. # asm 2: add <mulrax=%rax,<mulr6=%r8
  4404. add %rax,%r8
  4405. # qhasm: mulrax = mulr7
  4406. # asm 1: mov <mulr7=int64#6,>mulrax=int64#7
  4407. # asm 2: mov <mulr7=%r9,>mulrax=%rax
  4408. mov %r9,%rax
  4409. # qhasm: mulr7 = 0
  4410. # asm 1: mov $0,>mulr7=int64#6
  4411. # asm 2: mov $0,>mulr7=%r9
  4412. mov $0,%r9
  4413. # qhasm: mulr7 += mulrdx + carry
  4414. # asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
  4415. # asm 2: adc <mulrdx=%rdx,<mulr7=%r9
  4416. adc %rdx,%r9
  4417. # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38)
  4418. mulq CRYPTO_NAMESPACE(38)(%rip)
  4419. # qhasm: carry? mulr7 += mulrax
  4420. # asm 1: add <mulrax=int64#7,<mulr7=int64#6
  4421. # asm 2: add <mulrax=%rax,<mulr7=%r9
  4422. add %rax,%r9
  4423. # qhasm: mulr8 = 0
  4424. # asm 1: mov $0,>mulr8=int64#7
  4425. # asm 2: mov $0,>mulr8=%rax
  4426. mov $0,%rax
  4427. # qhasm: mulr8 += mulrdx + carry
  4428. # asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
  4429. # asm 2: adc <mulrdx=%rdx,<mulr8=%rax
  4430. adc %rdx,%rax
  4431. # qhasm: carry? rt0 += mulr4
  4432. # asm 1: add <mulr4=int64#2,<rt0=int64#9
  4433. # asm 2: add <mulr4=%rsi,<rt0=%r11
  4434. add %rsi,%r11
  4435. # qhasm: carry? rt1 += mulr5 + carry
  4436. # asm 1: adc <mulr5=int64#4,<rt1=int64#10
  4437. # asm 2: adc <mulr5=%rcx,<rt1=%r12
  4438. adc %rcx,%r12
  4439. # qhasm: carry? rt2 += mulr6 + carry
  4440. # asm 1: adc <mulr6=int64#5,<rt2=int64#11
  4441. # asm 2: adc <mulr6=%r8,<rt2=%r13
  4442. adc %r8,%r13
  4443. # qhasm: carry? rt3 += mulr7 + carry
  4444. # asm 1: adc <mulr7=int64#6,<rt3=int64#12
  4445. # asm 2: adc <mulr7=%r9,<rt3=%r14
  4446. adc %r9,%r14
  4447. # qhasm: mulzero = 0
  4448. # asm 1: mov $0,>mulzero=int64#2
  4449. # asm 2: mov $0,>mulzero=%rsi
  4450. mov $0,%rsi
  4451. # qhasm: mulr8 += mulzero + carry
  4452. # asm 1: adc <mulzero=int64#2,<mulr8=int64#7
  4453. # asm 2: adc <mulzero=%rsi,<mulr8=%rax
  4454. adc %rsi,%rax
  4455. # qhasm: mulr8 *= 38
  4456. # asm 1: imulq $38,<mulr8=int64#7,>mulr8=int64#3
  4457. # asm 2: imulq $38,<mulr8=%rax,>mulr8=%rdx
  4458. imulq $38,%rax,%rdx
  4459. # qhasm: carry? rt0 += mulr8
  4460. # asm 1: add <mulr8=int64#3,<rt0=int64#9
  4461. # asm 2: add <mulr8=%rdx,<rt0=%r11
  4462. add %rdx,%r11
  4463. # qhasm: carry? rt1 += mulzero + carry
  4464. # asm 1: adc <mulzero=int64#2,<rt1=int64#10
  4465. # asm 2: adc <mulzero=%rsi,<rt1=%r12
  4466. adc %rsi,%r12
  4467. # qhasm: carry? rt2 += mulzero + carry
  4468. # asm 1: adc <mulzero=int64#2,<rt2=int64#11
  4469. # asm 2: adc <mulzero=%rsi,<rt2=%r13
  4470. adc %rsi,%r13
  4471. # qhasm: carry? rt3 += mulzero + carry
  4472. # asm 1: adc <mulzero=int64#2,<rt3=int64#12
  4473. # asm 2: adc <mulzero=%rsi,<rt3=%r14
  4474. adc %rsi,%r14
  4475. # qhasm: mulzero += mulzero + carry
  4476. # asm 1: adc <mulzero=int64#2,<mulzero=int64#2
  4477. # asm 2: adc <mulzero=%rsi,<mulzero=%rsi
  4478. adc %rsi,%rsi
  4479. # qhasm: mulzero *= 38
  4480. # asm 1: imulq $38,<mulzero=int64#2,>mulzero=int64#2
  4481. # asm 2: imulq $38,<mulzero=%rsi,>mulzero=%rsi
  4482. imulq $38,%rsi,%rsi
  4483. # qhasm: rt0 += mulzero
  4484. # asm 1: add <mulzero=int64#2,<rt0=int64#9
  4485. # asm 2: add <mulzero=%rsi,<rt0=%r11
  4486. add %rsi,%r11
  4487. # qhasm: *(uint64 *)(rp + 96) = rt0
  4488. # asm 1: movq <rt0=int64#9,96(<rp=int64#1)
  4489. # asm 2: movq <rt0=%r11,96(<rp=%rdi)
  4490. movq %r11,96(%rdi)
  4491. # qhasm: *(uint64 *)(rp + 104) = rt1
  4492. # asm 1: movq <rt1=int64#10,104(<rp=int64#1)
  4493. # asm 2: movq <rt1=%r12,104(<rp=%rdi)
  4494. movq %r12,104(%rdi)
  4495. # qhasm: *(uint64 *)(rp + 112) = rt2
  4496. # asm 1: movq <rt2=int64#11,112(<rp=int64#1)
  4497. # asm 2: movq <rt2=%r13,112(<rp=%rdi)
  4498. movq %r13,112(%rdi)
  4499. # qhasm: *(uint64 *)(rp + 120) = rt3
  4500. # asm 1: movq <rt3=int64#12,120(<rp=int64#1)
  4501. # asm 2: movq <rt3=%r14,120(<rp=%rdi)
  4502. movq %r14,120(%rdi)
  4503. # qhasm: caller1 = caller1_stack
  4504. # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
  4505. # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
  4506. movq 0(%rsp),%r11
  4507. # qhasm: caller2 = caller2_stack
  4508. # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
  4509. # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
  4510. movq 8(%rsp),%r12
  4511. # qhasm: caller3 = caller3_stack
  4512. # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
  4513. # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
  4514. movq 16(%rsp),%r13
  4515. # qhasm: caller4 = caller4_stack
  4516. # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
  4517. # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
  4518. movq 24(%rsp),%r14
  4519. # qhasm: caller5 = caller5_stack
  4520. # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
  4521. # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
  4522. movq 32(%rsp),%r15
  4523. # qhasm: caller6 = caller6_stack
  4524. # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
  4525. # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
  4526. movq 40(%rsp),%rbx
  4527. # qhasm: caller7 = caller7_stack
  4528. # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
  4529. # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
  4530. movq 48(%rsp),%rbp
  4531. # qhasm: leave
  4532. add %r11,%rsp
  4533. mov %rdi,%rax
  4534. mov %rsi,%rdx
  4535. ret