1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160191611916219163191641916519166191671916819169191701917119172191731917419175191761917719178191791918019181191821918319184191851918619187191881918919190191911919219193191941919519196191971919819199192001920119202192031920419205192061920719208192091921019211192121921319214192151921619217192181921919220192211922219223192241922519226192271922819229192301923119232192331923419235192361923719238192391924019241192421924319244192451924619247192481924919250192511925219253192541925519256192571925819259192601926119262192631926419265192661926719268192691927019271192721927319274192751927619277192781927919280192811928219283192841928519286192871928819289192901929119292192931929419295192961929719298192991930019301193021930319304193051930619307193081930919310193111931219313193141931519316193171931819319193201932119322193231932419325193261932719328193291933019331193321933319334193351933619337193381933919340193411934219343193441934519346193471934819349193501935119352193531935419355193561935719358193591936019361193621936319364193651936619367193681936919370193711937219373193741937519376193771937819379193801938119382193831938419385193861938719388193891939019391193921939319394193951939619397193981939919400194011940219403194041940519406194071940819409194101941119412194131941419415194161941719418194191942019421194221942319424194251942619427194281942919430194311943219433194341943519436194371943819439194401944119442194431944419445194461944719448194491945019451194521945319454194551945619457194581945919460194611946219463194641946519466194671946819469194701947119472194731947419475194761947719478194791948019481194821948319484194851948619487194881948919490194911949219493194941949519496194971949819499195001950119502195031950419505195061950719508195091951019511195121951319514195151951619517195181951919520195211952219523195241952519526195271952819529195301953119532195331953419535195361953719538195391954019541195421954319544195451954619547195481954919550195511955219553195541955519556195571955819559195601956119562195631956419565195661956719568195691957019571195721957319574195751957619577195781957919580195811958219583195841958519586195871958819589195901959119592195931959419595195961959719598195991960019601196021960319604196051960619607196081960919610196111961219613196141961519616196171961819619196201962119622196231962419625196261962719628196291963019631196321963319634196351963619637196381963919640196411964219643196441964519646196471964819649196501965119652196531965419655196561965719658196591966019661196621966319664196651966619667196681966919670196711967219673196741967519676196771967819679196801968119682196831968419685196861968719688196891969019691196921969319694196951969619697196981969919700197011970219703197041970519706197071970819709197101971119712197131971419715197161971719718197191972019721197221972319724197251972619727197281972919730197311973219733197341973519736197371973819739197401974119742197431974419745197461974719748197491975019751197521975319754197551975619757197581975919760197611976219763197641976519766197671976819769197701977119772197731977419775197761977719778197791978019781197821978319784197851978619787197881978919790197911979219793197941979519796197971979819799198001980119802198031980419805198061980719808198091981019811198121981319814198151981619817198181981919820198211982219823198241982519826198271982819829198301983119832198331983419835198361983719838198391984019841198421984319844198451984619847198481984919850198511985219853198541985519856198571985819859198601986119862198631986419865198661986719868198691987019871198721987319874198751987619877198781987919880198811988219883198841988519886198871988819889198901989119892198931989419895198961989719898198991990019901199021990319904199051990619907199081990919910199111991219913199141991519916199171991819919199201992119922199231992419925199261992719928199291993019931199321993319934199351993619937199381993919940199411994219943199441994519946199471994819949199501995119952199531995419955199561995719958199591996019961199621996319964199651996619967199681996919970199711997219973199741997519976199771997819979199801998119982199831998419985199861998719988199891999019991199921999319994199951999619997199981999920000200012000220003200042000520006200072000820009200102001120012200132001420015200162001720018200192002020021200222002320024200252002620027200282002920030200312003220033200342003520036200372003820039200402004120042200432004420045200462004720048200492005020051200522005320054200552005620057200582005920060200612006220063200642006520066200672006820069200702007120072200732007420075200762007720078200792008020081200822008320084200852008620087200882008920090200912009220093200942009520096200972009820099201002010120102201032010420105201062010720108201092011020111201122011320114201152011620117201182011920120201212012220123201242012520126201272012820129201302013120132201332013420135201362013720138201392014020141201422014320144201452014620147201482014920150201512015220153201542015520156201572015820159201602016120162201632016420165201662016720168201692017020171201722017320174201752017620177201782017920180201812018220183201842018520186201872018820189201902019120192201932019420195201962019720198201992020020201202022020320204202052020620207202082020920210202112021220213202142021520216202172021820219202202022120222202232022420225202262022720228202292023020231202322023320234202352023620237202382023920240202412024220243202442024520246202472024820249202502025120252202532025420255202562025720258202592026020261202622026320264202652026620267202682026920270202712027220273202742027520276202772027820279202802028120282202832028420285202862028720288202892029020291202922029320294202952029620297202982029920300203012030220303203042030520306203072030820309203102031120312203132031420315203162031720318203192032020321203222032320324203252032620327203282032920330203312033220333203342033520336203372033820339203402034120342203432034420345203462034720348203492035020351203522035320354203552035620357203582035920360203612036220363203642036520366203672036820369203702037120372203732037420375203762037720378203792038020381203822038320384203852038620387203882038920390203912039220393203942039520396203972039820399204002040120402204032040420405204062040720408204092041020411204122041320414204152041620417204182041920420204212042220423204242042520426204272042820429204302043120432204332043420435204362043720438204392044020441204422044320444204452044620447204482044920450204512045220453204542045520456204572045820459204602046120462204632046420465204662046720468204692047020471204722047320474204752047620477204782047920480204812048220483204842048520486204872048820489204902049120492204932049420495204962049720498204992050020501205022050320504205052050620507205082050920510205112051220513205142051520516205172051820519205202052120522205232052420525205262052720528205292053020531205322053320534205352053620537205382053920540205412054220543205442054520546205472054820549205502055120552205532055420555205562055720558205592056020561205622056320564205652056620567205682056920570205712057220573205742057520576205772057820579205802058120582205832058420585205862058720588205892059020591205922059320594205952059620597205982059920600206012060220603206042060520606206072060820609206102061120612206132061420615206162061720618206192062020621206222062320624206252062620627206282062920630206312063220633206342063520636206372063820639206402064120642206432064420645206462064720648206492065020651206522065320654206552065620657206582065920660206612066220663206642066520666206672066820669206702067120672206732067420675206762067720678206792068020681206822068320684206852068620687206882068920690206912069220693206942069520696206972069820699207002070120702207032070420705207062070720708207092071020711207122071320714207152071620717207182071920720207212072220723207242072520726207272072820729207302073120732207332073420735207362073720738207392074020741207422074320744207452074620747207482074920750207512075220753207542075520756207572075820759207602076120762207632076420765207662076720768207692077020771207722077320774207752077620777207782077920780207812078220783207842078520786207872078820789207902079120792207932079420795207962079720798207992080020801208022080320804208052080620807208082080920810208112081220813208142081520816208172081820819208202082120822208232082420825208262082720828208292083020831208322083320834208352083620837208382083920840208412084220843208442084520846208472084820849208502085120852208532085420855208562085720858208592086020861208622086320864208652086620867208682086920870208712087220873208742087520876208772087820879208802088120882208832088420885208862088720888208892089020891208922089320894208952089620897208982089920900209012090220903209042090520906209072090820909209102091120912209132091420915209162091720918209192092020921209222092320924209252092620927209282092920930209312093220933209342093520936209372093820939209402094120942209432094420945209462094720948209492095020951209522095320954209552095620957209582095920960209612096220963209642096520966209672096820969209702097120972209732097420975209762097720978209792098020981209822098320984209852098620987209882098920990209912099220993209942099520996209972099820999210002100121002210032100421005210062100721008210092101021011210122101321014210152101621017210182101921020210212102221023210242102521026210272102821029210302103121032210332103421035210362103721038210392104021041210422104321044210452104621047210482104921050210512105221053210542105521056210572105821059210602106121062210632106421065210662106721068210692107021071210722107321074210752107621077210782107921080210812108221083210842108521086210872108821089210902109121092210932109421095210962109721098210992110021101211022110321104211052110621107211082110921110211112111221113211142111521116211172111821119211202112121122211232112421125211262112721128211292113021131211322113321134211352113621137211382113921140211412114221143211442114521146211472114821149211502115121152211532115421155211562115721158211592116021161211622116321164211652116621167211682116921170211712117221173211742117521176211772117821179211802118121182211832118421185211862118721188211892119021191211922119321194211952119621197211982119921200212012120221203212042120521206212072120821209212102121121212212132121421215212162121721218212192122021221212222122321224212252122621227212282122921230212312123221233212342123521236212372123821239212402124121242212432124421245212462124721248212492125021251212522125321254212552125621257212582125921260212612126221263212642126521266212672126821269212702127121272212732127421275212762127721278212792128021281212822128321284212852128621287212882128921290212912129221293212942129521296212972129821299213002130121302213032130421305213062130721308213092131021311213122131321314213152131621317213182131921320213212132221323213242132521326213272132821329213302133121332213332133421335213362133721338213392134021341213422134321344213452134621347213482134921350213512135221353213542135521356213572135821359213602136121362213632136421365213662136721368213692137021371213722137321374213752137621377213782137921380213812138221383213842138521386213872138821389213902139121392213932139421395213962139721398213992140021401214022140321404214052140621407214082140921410214112141221413214142141521416214172141821419214202142121422214232142421425214262142721428214292143021431214322143321434214352143621437214382143921440214412144221443214442144521446214472144821449214502145121452214532145421455214562145721458214592146021461214622146321464214652146621467214682146921470214712147221473214742147521476214772147821479214802148121482214832148421485214862148721488214892149021491214922149321494214952149621497214982149921500215012150221503215042150521506215072150821509215102151121512215132151421515215162151721518215192152021521215222152321524215252152621527215282152921530215312153221533215342153521536215372153821539215402154121542215432154421545215462154721548215492155021551215522155321554215552155621557215582155921560215612156221563215642156521566215672156821569215702157121572215732157421575215762157721578215792158021581215822158321584215852158621587215882158921590215912159221593215942159521596215972159821599216002160121602216032160421605216062160721608216092161021611216122161321614216152161621617216182161921620216212162221623216242162521626216272162821629216302163121632216332163421635216362163721638216392164021641216422164321644216452164621647216482164921650216512165221653216542165521656216572165821659216602166121662216632166421665216662166721668216692167021671216722167321674216752167621677216782167921680216812168221683216842168521686216872168821689216902169121692216932169421695216962169721698216992170021701217022170321704217052170621707217082170921710217112171221713217142171521716217172171821719217202172121722217232172421725217262172721728217292173021731217322173321734217352173621737217382173921740217412174221743217442174521746217472174821749217502175121752217532175421755217562175721758217592176021761217622176321764217652176621767217682176921770217712177221773217742177521776217772177821779217802178121782217832178421785217862178721788217892179021791217922179321794217952179621797217982179921800218012180221803218042180521806218072180821809218102181121812218132181421815218162181721818218192182021821218222182321824218252182621827218282182921830218312183221833218342183521836218372183821839218402184121842218432184421845218462184721848218492185021851218522185321854218552185621857218582185921860218612186221863218642186521866218672186821869218702187121872218732187421875218762187721878218792188021881218822188321884218852188621887218882188921890218912189221893218942189521896218972189821899219002190121902219032190421905219062190721908219092191021911219122191321914219152191621917219182191921920219212192221923219242192521926219272192821929219302193121932219332193421935219362193721938219392194021941219422194321944219452194621947219482194921950219512195221953219542195521956219572195821959219602196121962219632196421965219662196721968219692197021971219722197321974219752197621977219782197921980219812198221983219842198521986219872198821989219902199121992219932199421995219962199721998219992200022001220022200322004220052200622007220082200922010220112201222013220142201522016220172201822019220202202122022220232202422025220262202722028220292203022031220322203322034220352203622037220382203922040220412204222043220442204522046220472204822049220502205122052220532205422055220562205722058220592206022061220622206322064220652206622067220682206922070220712207222073220742207522076220772207822079220802208122082220832208422085220862208722088220892209022091220922209322094220952209622097220982209922100221012210222103221042210522106221072210822109221102211122112221132211422115221162211722118221192212022121221222212322124221252212622127221282212922130221312213222133221342213522136221372213822139221402214122142221432214422145221462214722148221492215022151221522215322154221552215622157221582215922160221612216222163221642216522166221672216822169221702217122172221732217422175221762217722178221792218022181221822218322184221852218622187221882218922190221912219222193221942219522196221972219822199222002220122202222032220422205222062220722208222092221022211222122221322214222152221622217222182221922220222212222222223222242222522226222272222822229222302223122232222332223422235222362223722238222392224022241222422224322244222452224622247222482224922250222512225222253222542225522256222572225822259222602226122262222632226422265222662226722268222692227022271222722227322274222752227622277222782227922280222812228222283222842228522286222872228822289222902229122292222932229422295222962229722298222992230022301223022230322304223052230622307223082230922310223112231222313223142231522316223172231822319223202232122322223232232422325223262232722328223292233022331223322233322334223352233622337223382233922340223412234222343223442234522346223472234822349223502235122352223532235422355223562235722358223592236022361223622236322364223652236622367223682236922370223712237222373223742237522376223772237822379223802238122382223832238422385223862238722388223892239022391223922239322394223952239622397223982239922400224012240222403224042240522406224072240822409224102241122412224132241422415224162241722418224192242022421224222242322424224252242622427224282242922430224312243222433224342243522436224372243822439224402244122442224432244422445224462244722448224492245022451224522245322454224552245622457224582245922460224612246222463224642246522466224672246822469224702247122472224732247422475224762247722478224792248022481224822248322484224852248622487224882248922490224912249222493224942249522496224972249822499225002250122502225032250422505225062250722508225092251022511225122251322514225152251622517225182251922520225212252222523225242252522526225272252822529225302253122532225332253422535225362253722538225392254022541225422254322544225452254622547225482254922550225512255222553225542255522556225572255822559225602256122562225632256422565225662256722568225692257022571225722257322574225752257622577225782257922580225812258222583225842258522586225872258822589225902259122592225932259422595225962259722598225992260022601226022260322604226052260622607226082260922610226112261222613226142261522616226172261822619226202262122622226232262422625226262262722628226292263022631226322263322634226352263622637226382263922640226412264222643226442264522646226472264822649226502265122652226532265422655226562265722658226592266022661226622266322664226652266622667226682266922670226712267222673226742267522676226772267822679226802268122682226832268422685226862268722688226892269022691226922269322694226952269622697226982269922700227012270222703227042270522706227072270822709227102271122712227132271422715227162271722718227192272022721227222272322724227252272622727227282272922730227312273222733227342273522736227372273822739227402274122742227432274422745227462274722748227492275022751227522275322754227552275622757227582275922760227612276222763227642276522766227672276822769227702277122772227732277422775227762277722778227792278022781227822278322784227852278622787227882278922790227912279222793227942279522796227972279822799228002280122802228032280422805228062280722808228092281022811228122281322814228152281622817228182281922820228212282222823228242282522826228272282822829228302283122832228332283422835228362283722838228392284022841228422284322844228452284622847228482284922850228512285222853228542285522856228572285822859228602286122862228632286422865228662286722868228692287022871228722287322874228752287622877228782287922880228812288222883228842288522886228872288822889228902289122892228932289422895228962289722898228992290022901229022290322904229052290622907229082290922910229112291222913229142291522916229172291822919229202292122922229232292422925229262292722928229292293022931229322293322934229352293622937229382293922940229412294222943229442294522946229472294822949229502295122952229532295422955229562295722958229592296022961229622296322964229652296622967229682296922970229712297222973229742297522976229772297822979229802298122982229832298422985229862298722988229892299022991229922299322994229952299622997229982299923000230012300223003230042300523006230072300823009230102301123012230132301423015230162301723018230192302023021230222302323024230252302623027230282302923030230312303223033230342303523036230372303823039230402304123042230432304423045230462304723048230492305023051230522305323054230552305623057230582305923060230612306223063230642306523066230672306823069230702307123072230732307423075230762307723078230792308023081230822308323084230852308623087230882308923090230912309223093230942309523096230972309823099231002310123102231032310423105231062310723108231092311023111231122311323114231152311623117231182311923120231212312223123231242312523126231272312823129231302313123132231332313423135231362313723138231392314023141231422314323144231452314623147231482314923150231512315223153231542315523156231572315823159231602316123162231632316423165231662316723168231692317023171231722317323174231752317623177231782317923180231812318223183231842318523186231872318823189231902319123192231932319423195231962319723198231992320023201232022320323204232052320623207232082320923210232112321223213232142321523216232172321823219232202322123222232232322423225232262322723228232292323023231232322323323234232352323623237232382323923240232412324223243232442324523246232472324823249232502325123252232532325423255232562325723258232592326023261232622326323264232652326623267232682326923270232712327223273232742327523276232772327823279232802328123282232832328423285232862328723288232892329023291232922329323294232952329623297232982329923300233012330223303233042330523306233072330823309233102331123312233132331423315233162331723318233192332023321233222332323324233252332623327233282332923330233312333223333233342333523336233372333823339233402334123342233432334423345233462334723348233492335023351233522335323354233552335623357233582335923360233612336223363233642336523366233672336823369233702337123372233732337423375233762337723378233792338023381233822338323384233852338623387233882338923390233912339223393233942339523396233972339823399234002340123402234032340423405234062340723408234092341023411234122341323414234152341623417234182341923420234212342223423234242342523426234272342823429234302343123432234332343423435234362343723438234392344023441234422344323444234452344623447234482344923450234512345223453234542345523456234572345823459234602346123462234632346423465234662346723468234692347023471234722347323474234752347623477234782347923480234812348223483234842348523486234872348823489234902349123492234932349423495234962349723498234992350023501235022350323504235052350623507235082350923510235112351223513235142351523516235172351823519235202352123522235232352423525235262352723528235292353023531235322353323534235352353623537235382353923540235412354223543235442354523546235472354823549235502355123552235532355423555235562355723558235592356023561235622356323564235652356623567235682356923570235712357223573235742357523576235772357823579235802358123582235832358423585235862358723588235892359023591235922359323594235952359623597235982359923600236012360223603236042360523606236072360823609236102361123612236132361423615236162361723618236192362023621236222362323624236252362623627236282362923630236312363223633236342363523636236372363823639236402364123642236432364423645236462364723648236492365023651236522365323654236552365623657236582365923660236612366223663236642366523666236672366823669236702367123672236732367423675236762367723678236792368023681236822368323684236852368623687236882368923690236912369223693236942369523696236972369823699237002370123702237032370423705237062370723708237092371023711237122371323714237152371623717237182371923720237212372223723237242372523726237272372823729237302373123732237332373423735237362373723738237392374023741237422374323744237452374623747237482374923750237512375223753237542375523756237572375823759237602376123762237632376423765237662376723768237692377023771237722377323774237752377623777237782377923780237812378223783237842378523786237872378823789237902379123792237932379423795237962379723798237992380023801238022380323804238052380623807238082380923810238112381223813238142381523816238172381823819238202382123822238232382423825238262382723828238292383023831238322383323834238352383623837238382383923840238412384223843238442384523846238472384823849238502385123852238532385423855238562385723858238592386023861238622386323864238652386623867238682386923870238712387223873238742387523876238772387823879238802388123882238832388423885238862388723888238892389023891238922389323894238952389623897238982389923900239012390223903239042390523906239072390823909239102391123912239132391423915239162391723918239192392023921239222392323924239252392623927239282392923930239312393223933239342393523936239372393823939239402394123942239432394423945239462394723948239492395023951239522395323954239552395623957239582395923960239612396223963239642396523966239672396823969239702397123972239732397423975239762397723978239792398023981239822398323984239852398623987239882398923990239912399223993239942399523996239972399823999240002400124002240032400424005240062400724008240092401024011240122401324014240152401624017240182401924020240212402224023240242402524026240272402824029240302403124032240332403424035240362403724038240392404024041240422404324044240452404624047240482404924050 |
- //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation ----===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- //
- // This file implements the AArch64TargetLowering class.
- //
- //===----------------------------------------------------------------------===//
- #include "AArch64ISelLowering.h"
- #include "AArch64CallingConvention.h"
- #include "AArch64ExpandImm.h"
- #include "AArch64MachineFunctionInfo.h"
- #include "AArch64PerfectShuffle.h"
- #include "AArch64RegisterInfo.h"
- #include "AArch64Subtarget.h"
- #include "MCTargetDesc/AArch64AddressingModes.h"
- #include "Utils/AArch64BaseInfo.h"
- #include "llvm/ADT/APFloat.h"
- #include "llvm/ADT/APInt.h"
- #include "llvm/ADT/ArrayRef.h"
- #include "llvm/ADT/STLExtras.h"
- #include "llvm/ADT/SmallSet.h"
- #include "llvm/ADT/SmallVector.h"
- #include "llvm/ADT/Statistic.h"
- #include "llvm/ADT/StringRef.h"
- #include "llvm/ADT/Triple.h"
- #include "llvm/ADT/Twine.h"
- #include "llvm/Analysis/LoopInfo.h"
- #include "llvm/Analysis/MemoryLocation.h"
- #include "llvm/Analysis/ObjCARCUtil.h"
- #include "llvm/Analysis/TargetTransformInfo.h"
- #include "llvm/Analysis/ValueTracking.h"
- #include "llvm/Analysis/VectorUtils.h"
- #include "llvm/CodeGen/Analysis.h"
- #include "llvm/CodeGen/CallingConvLower.h"
- #include "llvm/CodeGen/ISDOpcodes.h"
- #include "llvm/CodeGen/MachineBasicBlock.h"
- #include "llvm/CodeGen/MachineFrameInfo.h"
- #include "llvm/CodeGen/MachineFunction.h"
- #include "llvm/CodeGen/MachineInstr.h"
- #include "llvm/CodeGen/MachineInstrBuilder.h"
- #include "llvm/CodeGen/MachineMemOperand.h"
- #include "llvm/CodeGen/MachineRegisterInfo.h"
- #include "llvm/CodeGen/RuntimeLibcalls.h"
- #include "llvm/CodeGen/SelectionDAG.h"
- #include "llvm/CodeGen/SelectionDAGNodes.h"
- #include "llvm/CodeGen/TargetCallingConv.h"
- #include "llvm/CodeGen/TargetInstrInfo.h"
- #include "llvm/CodeGen/ValueTypes.h"
- #include "llvm/IR/Attributes.h"
- #include "llvm/IR/Constants.h"
- #include "llvm/IR/DataLayout.h"
- #include "llvm/IR/DebugLoc.h"
- #include "llvm/IR/DerivedTypes.h"
- #include "llvm/IR/Function.h"
- #include "llvm/IR/GetElementPtrTypeIterator.h"
- #include "llvm/IR/GlobalValue.h"
- #include "llvm/IR/IRBuilder.h"
- #include "llvm/IR/Instruction.h"
- #include "llvm/IR/Instructions.h"
- #include "llvm/IR/IntrinsicInst.h"
- #include "llvm/IR/Intrinsics.h"
- #include "llvm/IR/IntrinsicsAArch64.h"
- #include "llvm/IR/Module.h"
- #include "llvm/IR/OperandTraits.h"
- #include "llvm/IR/PatternMatch.h"
- #include "llvm/IR/Type.h"
- #include "llvm/IR/Use.h"
- #include "llvm/IR/Value.h"
- #include "llvm/MC/MCRegisterInfo.h"
- #include "llvm/Support/Casting.h"
- #include "llvm/Support/CodeGen.h"
- #include "llvm/Support/CommandLine.h"
- #include "llvm/Support/Compiler.h"
- #include "llvm/Support/Debug.h"
- #include "llvm/Support/ErrorHandling.h"
- #include "llvm/Support/InstructionCost.h"
- #include "llvm/Support/KnownBits.h"
- #include "llvm/Support/MachineValueType.h"
- #include "llvm/Support/MathExtras.h"
- #include "llvm/Support/raw_ostream.h"
- #include "llvm/Target/TargetMachine.h"
- #include "llvm/Target/TargetOptions.h"
- #include <algorithm>
- #include <bitset>
- #include <cassert>
- #include <cctype>
- #include <cstdint>
- #include <cstdlib>
- #include <iterator>
- #include <limits>
- #include <optional>
- #include <tuple>
- #include <utility>
- #include <vector>
- using namespace llvm;
- using namespace llvm::PatternMatch;
- #define DEBUG_TYPE "aarch64-lower"
- STATISTIC(NumTailCalls, "Number of tail calls");
- STATISTIC(NumShiftInserts, "Number of vector shift inserts");
- STATISTIC(NumOptimizedImms, "Number of times immediates were optimized");
- // FIXME: The necessary dtprel relocations don't seem to be supported
- // well in the GNU bfd and gold linkers at the moment. Therefore, by
- // default, for now, fall back to GeneralDynamic code generation.
- cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration(
- "aarch64-elf-ldtls-generation", cl::Hidden,
- cl::desc("Allow AArch64 Local Dynamic TLS code generation"),
- cl::init(false));
- static cl::opt<bool>
- EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden,
- cl::desc("Enable AArch64 logical imm instruction "
- "optimization"),
- cl::init(true));
- // Temporary option added for the purpose of testing functionality added
- // to DAGCombiner.cpp in D92230. It is expected that this can be removed
- // in future when both implementations will be based off MGATHER rather
- // than the GLD1 nodes added for the SVE gather load intrinsics.
- static cl::opt<bool>
- EnableCombineMGatherIntrinsics("aarch64-enable-mgather-combine", cl::Hidden,
- cl::desc("Combine extends of AArch64 masked "
- "gather intrinsics"),
- cl::init(true));
- // All of the XOR, OR and CMP use ALU ports, and data dependency will become the
- // bottleneck after this transform on high end CPU. So this max leaf node
- // limitation is guard cmp+ccmp will be profitable.
- static cl::opt<unsigned> MaxXors("aarch64-max-xors", cl::init(16), cl::Hidden,
- cl::desc("Maximum of xors"));
- /// Value type used for condition codes.
- static const MVT MVT_CC = MVT::i32;
- static inline EVT getPackedSVEVectorVT(EVT VT) {
- switch (VT.getSimpleVT().SimpleTy) {
- default:
- llvm_unreachable("unexpected element type for vector");
- case MVT::i8:
- return MVT::nxv16i8;
- case MVT::i16:
- return MVT::nxv8i16;
- case MVT::i32:
- return MVT::nxv4i32;
- case MVT::i64:
- return MVT::nxv2i64;
- case MVT::f16:
- return MVT::nxv8f16;
- case MVT::f32:
- return MVT::nxv4f32;
- case MVT::f64:
- return MVT::nxv2f64;
- case MVT::bf16:
- return MVT::nxv8bf16;
- }
- }
- // NOTE: Currently there's only a need to return integer vector types. If this
- // changes then just add an extra "type" parameter.
- static inline EVT getPackedSVEVectorVT(ElementCount EC) {
- switch (EC.getKnownMinValue()) {
- default:
- llvm_unreachable("unexpected element count for vector");
- case 16:
- return MVT::nxv16i8;
- case 8:
- return MVT::nxv8i16;
- case 4:
- return MVT::nxv4i32;
- case 2:
- return MVT::nxv2i64;
- }
- }
- static inline EVT getPromotedVTForPredicate(EVT VT) {
- assert(VT.isScalableVector() && (VT.getVectorElementType() == MVT::i1) &&
- "Expected scalable predicate vector type!");
- switch (VT.getVectorMinNumElements()) {
- default:
- llvm_unreachable("unexpected element count for vector");
- case 2:
- return MVT::nxv2i64;
- case 4:
- return MVT::nxv4i32;
- case 8:
- return MVT::nxv8i16;
- case 16:
- return MVT::nxv16i8;
- }
- }
- /// Returns true if VT's elements occupy the lowest bit positions of its
- /// associated register class without any intervening space.
- ///
- /// For example, nxv2f16, nxv4f16 and nxv8f16 are legal types that belong to the
- /// same register class, but only nxv8f16 can be treated as a packed vector.
- static inline bool isPackedVectorType(EVT VT, SelectionDAG &DAG) {
- assert(VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
- "Expected legal vector type!");
- return VT.isFixedLengthVector() ||
- VT.getSizeInBits().getKnownMinValue() == AArch64::SVEBitsPerBlock;
- }
- // Returns true for ####_MERGE_PASSTHRU opcodes, whose operands have a leading
- // predicate and end with a passthru value matching the result type.
- static bool isMergePassthruOpcode(unsigned Opc) {
- switch (Opc) {
- default:
- return false;
- case AArch64ISD::BITREVERSE_MERGE_PASSTHRU:
- case AArch64ISD::BSWAP_MERGE_PASSTHRU:
- case AArch64ISD::REVH_MERGE_PASSTHRU:
- case AArch64ISD::REVW_MERGE_PASSTHRU:
- case AArch64ISD::REVD_MERGE_PASSTHRU:
- case AArch64ISD::CTLZ_MERGE_PASSTHRU:
- case AArch64ISD::CTPOP_MERGE_PASSTHRU:
- case AArch64ISD::DUP_MERGE_PASSTHRU:
- case AArch64ISD::ABS_MERGE_PASSTHRU:
- case AArch64ISD::NEG_MERGE_PASSTHRU:
- case AArch64ISD::FNEG_MERGE_PASSTHRU:
- case AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU:
- case AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU:
- case AArch64ISD::FCEIL_MERGE_PASSTHRU:
- case AArch64ISD::FFLOOR_MERGE_PASSTHRU:
- case AArch64ISD::FNEARBYINT_MERGE_PASSTHRU:
- case AArch64ISD::FRINT_MERGE_PASSTHRU:
- case AArch64ISD::FROUND_MERGE_PASSTHRU:
- case AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU:
- case AArch64ISD::FTRUNC_MERGE_PASSTHRU:
- case AArch64ISD::FP_ROUND_MERGE_PASSTHRU:
- case AArch64ISD::FP_EXTEND_MERGE_PASSTHRU:
- case AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU:
- case AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU:
- case AArch64ISD::FCVTZU_MERGE_PASSTHRU:
- case AArch64ISD::FCVTZS_MERGE_PASSTHRU:
- case AArch64ISD::FSQRT_MERGE_PASSTHRU:
- case AArch64ISD::FRECPX_MERGE_PASSTHRU:
- case AArch64ISD::FABS_MERGE_PASSTHRU:
- return true;
- }
- }
- // Returns true if inactive lanes are known to be zeroed by construction.
- static bool isZeroingInactiveLanes(SDValue Op) {
- switch (Op.getOpcode()) {
- default:
- // We guarantee i1 splat_vectors to zero the other lanes by
- // implementing it with ptrue and possibly a punpklo for nxv1i1.
- if (ISD::isConstantSplatVectorAllOnes(Op.getNode()))
- return true;
- return false;
- case AArch64ISD::PTRUE:
- case AArch64ISD::SETCC_MERGE_ZERO:
- return true;
- case ISD::INTRINSIC_WO_CHAIN:
- switch (Op.getConstantOperandVal(0)) {
- default:
- return false;
- case Intrinsic::aarch64_sve_ptrue:
- case Intrinsic::aarch64_sve_pnext:
- case Intrinsic::aarch64_sve_cmpeq:
- case Intrinsic::aarch64_sve_cmpne:
- case Intrinsic::aarch64_sve_cmpge:
- case Intrinsic::aarch64_sve_cmpgt:
- case Intrinsic::aarch64_sve_cmphs:
- case Intrinsic::aarch64_sve_cmphi:
- case Intrinsic::aarch64_sve_cmpeq_wide:
- case Intrinsic::aarch64_sve_cmpne_wide:
- case Intrinsic::aarch64_sve_cmpge_wide:
- case Intrinsic::aarch64_sve_cmpgt_wide:
- case Intrinsic::aarch64_sve_cmplt_wide:
- case Intrinsic::aarch64_sve_cmple_wide:
- case Intrinsic::aarch64_sve_cmphs_wide:
- case Intrinsic::aarch64_sve_cmphi_wide:
- case Intrinsic::aarch64_sve_cmplo_wide:
- case Intrinsic::aarch64_sve_cmpls_wide:
- case Intrinsic::aarch64_sve_fcmpeq:
- case Intrinsic::aarch64_sve_fcmpne:
- case Intrinsic::aarch64_sve_fcmpge:
- case Intrinsic::aarch64_sve_fcmpgt:
- case Intrinsic::aarch64_sve_fcmpuo:
- case Intrinsic::aarch64_sve_facgt:
- case Intrinsic::aarch64_sve_facge:
- case Intrinsic::aarch64_sve_whilege:
- case Intrinsic::aarch64_sve_whilegt:
- case Intrinsic::aarch64_sve_whilehi:
- case Intrinsic::aarch64_sve_whilehs:
- case Intrinsic::aarch64_sve_whilele:
- case Intrinsic::aarch64_sve_whilelo:
- case Intrinsic::aarch64_sve_whilels:
- case Intrinsic::aarch64_sve_whilelt:
- case Intrinsic::aarch64_sve_match:
- case Intrinsic::aarch64_sve_nmatch:
- case Intrinsic::aarch64_sve_whilege_x2:
- case Intrinsic::aarch64_sve_whilegt_x2:
- case Intrinsic::aarch64_sve_whilehi_x2:
- case Intrinsic::aarch64_sve_whilehs_x2:
- case Intrinsic::aarch64_sve_whilele_x2:
- case Intrinsic::aarch64_sve_whilelo_x2:
- case Intrinsic::aarch64_sve_whilels_x2:
- case Intrinsic::aarch64_sve_whilelt_x2:
- return true;
- }
- }
- }
- AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
- const AArch64Subtarget &STI)
- : TargetLowering(TM), Subtarget(&STI) {
- // AArch64 doesn't have comparisons which set GPRs or setcc instructions, so
- // we have to make something up. Arbitrarily, choose ZeroOrOne.
- setBooleanContents(ZeroOrOneBooleanContent);
- // When comparing vectors the result sets the different elements in the
- // vector to all-one or all-zero.
- setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
- // Set up the register classes.
- addRegisterClass(MVT::i32, &AArch64::GPR32allRegClass);
- addRegisterClass(MVT::i64, &AArch64::GPR64allRegClass);
- if (Subtarget->hasLS64()) {
- addRegisterClass(MVT::i64x8, &AArch64::GPR64x8ClassRegClass);
- setOperationAction(ISD::LOAD, MVT::i64x8, Custom);
- setOperationAction(ISD::STORE, MVT::i64x8, Custom);
- }
- if (Subtarget->hasFPARMv8()) {
- addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
- addRegisterClass(MVT::bf16, &AArch64::FPR16RegClass);
- addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
- addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
- addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
- }
- if (Subtarget->hasNEON()) {
- addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass);
- addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass);
- // Someone set us up the NEON.
- addDRTypeForNEON(MVT::v2f32);
- addDRTypeForNEON(MVT::v8i8);
- addDRTypeForNEON(MVT::v4i16);
- addDRTypeForNEON(MVT::v2i32);
- addDRTypeForNEON(MVT::v1i64);
- addDRTypeForNEON(MVT::v1f64);
- addDRTypeForNEON(MVT::v4f16);
- if (Subtarget->hasBF16())
- addDRTypeForNEON(MVT::v4bf16);
- addQRTypeForNEON(MVT::v4f32);
- addQRTypeForNEON(MVT::v2f64);
- addQRTypeForNEON(MVT::v16i8);
- addQRTypeForNEON(MVT::v8i16);
- addQRTypeForNEON(MVT::v4i32);
- addQRTypeForNEON(MVT::v2i64);
- addQRTypeForNEON(MVT::v8f16);
- if (Subtarget->hasBF16())
- addQRTypeForNEON(MVT::v8bf16);
- }
- if (Subtarget->hasSVEorSME()) {
- // Add legal sve predicate types
- addRegisterClass(MVT::nxv1i1, &AArch64::PPRRegClass);
- addRegisterClass(MVT::nxv2i1, &AArch64::PPRRegClass);
- addRegisterClass(MVT::nxv4i1, &AArch64::PPRRegClass);
- addRegisterClass(MVT::nxv8i1, &AArch64::PPRRegClass);
- addRegisterClass(MVT::nxv16i1, &AArch64::PPRRegClass);
- // Add legal sve data types
- addRegisterClass(MVT::nxv16i8, &AArch64::ZPRRegClass);
- addRegisterClass(MVT::nxv8i16, &AArch64::ZPRRegClass);
- addRegisterClass(MVT::nxv4i32, &AArch64::ZPRRegClass);
- addRegisterClass(MVT::nxv2i64, &AArch64::ZPRRegClass);
- addRegisterClass(MVT::nxv2f16, &AArch64::ZPRRegClass);
- addRegisterClass(MVT::nxv4f16, &AArch64::ZPRRegClass);
- addRegisterClass(MVT::nxv8f16, &AArch64::ZPRRegClass);
- addRegisterClass(MVT::nxv2f32, &AArch64::ZPRRegClass);
- addRegisterClass(MVT::nxv4f32, &AArch64::ZPRRegClass);
- addRegisterClass(MVT::nxv2f64, &AArch64::ZPRRegClass);
- if (Subtarget->hasBF16()) {
- addRegisterClass(MVT::nxv2bf16, &AArch64::ZPRRegClass);
- addRegisterClass(MVT::nxv4bf16, &AArch64::ZPRRegClass);
- addRegisterClass(MVT::nxv8bf16, &AArch64::ZPRRegClass);
- }
- if (Subtarget->useSVEForFixedLengthVectors()) {
- for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
- if (useSVEForFixedLengthVectorVT(VT))
- addRegisterClass(VT, &AArch64::ZPRRegClass);
- for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
- if (useSVEForFixedLengthVectorVT(VT))
- addRegisterClass(VT, &AArch64::ZPRRegClass);
- }
- }
- // Compute derived properties from the register classes
- computeRegisterProperties(Subtarget->getRegisterInfo());
- // Provide all sorts of operation actions
- setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
- setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
- setOperationAction(ISD::SETCC, MVT::i32, Custom);
- setOperationAction(ISD::SETCC, MVT::i64, Custom);
- setOperationAction(ISD::SETCC, MVT::f16, Custom);
- setOperationAction(ISD::SETCC, MVT::f32, Custom);
- setOperationAction(ISD::SETCC, MVT::f64, Custom);
- setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom);
- setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom);
- setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom);
- setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
- setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom);
- setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom);
- setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
- setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
- setOperationAction(ISD::BRCOND, MVT::Other, Custom);
- setOperationAction(ISD::BR_CC, MVT::i32, Custom);
- setOperationAction(ISD::BR_CC, MVT::i64, Custom);
- setOperationAction(ISD::BR_CC, MVT::f16, Custom);
- setOperationAction(ISD::BR_CC, MVT::f32, Custom);
- setOperationAction(ISD::BR_CC, MVT::f64, Custom);
- setOperationAction(ISD::SELECT, MVT::i32, Custom);
- setOperationAction(ISD::SELECT, MVT::i64, Custom);
- setOperationAction(ISD::SELECT, MVT::f16, Custom);
- setOperationAction(ISD::SELECT, MVT::bf16, Custom);
- setOperationAction(ISD::SELECT, MVT::f32, Custom);
- setOperationAction(ISD::SELECT, MVT::f64, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::f16, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::bf16, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
- setOperationAction(ISD::BR_JT, MVT::Other, Custom);
- setOperationAction(ISD::JumpTable, MVT::i64, Custom);
- setOperationAction(ISD::SETCCCARRY, MVT::i64, Custom);
- setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
- setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
- setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
- setOperationAction(ISD::FREM, MVT::f32, Expand);
- setOperationAction(ISD::FREM, MVT::f64, Expand);
- setOperationAction(ISD::FREM, MVT::f80, Expand);
- setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
- // Custom lowering hooks are needed for XOR
- // to fold it into CSINC/CSINV.
- setOperationAction(ISD::XOR, MVT::i32, Custom);
- setOperationAction(ISD::XOR, MVT::i64, Custom);
- // Virtually no operation on f128 is legal, but LLVM can't expand them when
- // there's a valid register class, so we need custom operations in most cases.
- setOperationAction(ISD::FABS, MVT::f128, Expand);
- setOperationAction(ISD::FADD, MVT::f128, LibCall);
- setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
- setOperationAction(ISD::FCOS, MVT::f128, Expand);
- setOperationAction(ISD::FDIV, MVT::f128, LibCall);
- setOperationAction(ISD::FMA, MVT::f128, Expand);
- setOperationAction(ISD::FMUL, MVT::f128, LibCall);
- setOperationAction(ISD::FNEG, MVT::f128, Expand);
- setOperationAction(ISD::FPOW, MVT::f128, Expand);
- setOperationAction(ISD::FREM, MVT::f128, Expand);
- setOperationAction(ISD::FRINT, MVT::f128, Expand);
- setOperationAction(ISD::FSIN, MVT::f128, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
- setOperationAction(ISD::FSQRT, MVT::f128, Expand);
- setOperationAction(ISD::FSUB, MVT::f128, LibCall);
- setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
- setOperationAction(ISD::SETCC, MVT::f128, Custom);
- setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom);
- setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom);
- setOperationAction(ISD::BR_CC, MVT::f128, Custom);
- setOperationAction(ISD::SELECT, MVT::f128, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
- setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
- // FIXME: f128 FMINIMUM and FMAXIMUM (including STRICT versions) currently
- // aren't handled.
- // Lowering for many of the conversions is actually specified by the non-f128
- // type. The LowerXXX function will be trivial when f128 isn't involved.
- setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
- setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
- setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
- setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
- setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
- setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom);
- setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
- setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
- setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
- setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
- setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
- setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom);
- setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
- setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
- setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
- setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
- setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
- setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom);
- setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
- setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
- setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
- setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
- setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
- setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
- setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
- setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
- setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
- setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
- setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
- setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
- setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Custom);
- setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
- setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Custom);
- setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
- // Variable arguments.
- setOperationAction(ISD::VASTART, MVT::Other, Custom);
- setOperationAction(ISD::VAARG, MVT::Other, Custom);
- setOperationAction(ISD::VACOPY, MVT::Other, Custom);
- setOperationAction(ISD::VAEND, MVT::Other, Expand);
- // Variable-sized objects.
- setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
- setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- if (Subtarget->isTargetWindows())
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
- else
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
- // Constant pool entries
- setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
- // BlockAddress
- setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
- // AArch64 lacks both left-rotate and popcount instructions.
- setOperationAction(ISD::ROTL, MVT::i32, Expand);
- setOperationAction(ISD::ROTL, MVT::i64, Expand);
- for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
- setOperationAction(ISD::ROTL, VT, Expand);
- setOperationAction(ISD::ROTR, VT, Expand);
- }
- // AArch64 doesn't have i32 MULH{S|U}.
- setOperationAction(ISD::MULHU, MVT::i32, Expand);
- setOperationAction(ISD::MULHS, MVT::i32, Expand);
- // AArch64 doesn't have {U|S}MUL_LOHI.
- setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
- setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
- if (Subtarget->hasCSSC()) {
- setOperationAction(ISD::CTPOP, MVT::i32, Legal);
- setOperationAction(ISD::CTPOP, MVT::i64, Legal);
- setOperationAction(ISD::CTPOP, MVT::i128, Expand);
- setOperationAction(ISD::PARITY, MVT::i128, Expand);
- setOperationAction(ISD::CTTZ, MVT::i32, Legal);
- setOperationAction(ISD::CTTZ, MVT::i64, Legal);
- setOperationAction(ISD::CTTZ, MVT::i128, Expand);
- setOperationAction(ISD::ABS, MVT::i32, Legal);
- setOperationAction(ISD::ABS, MVT::i64, Legal);
- setOperationAction(ISD::SMAX, MVT::i32, Legal);
- setOperationAction(ISD::SMAX, MVT::i64, Legal);
- setOperationAction(ISD::UMAX, MVT::i32, Legal);
- setOperationAction(ISD::UMAX, MVT::i64, Legal);
- setOperationAction(ISD::SMIN, MVT::i32, Legal);
- setOperationAction(ISD::SMIN, MVT::i64, Legal);
- setOperationAction(ISD::UMIN, MVT::i32, Legal);
- setOperationAction(ISD::UMIN, MVT::i64, Legal);
- } else {
- setOperationAction(ISD::CTPOP, MVT::i32, Custom);
- setOperationAction(ISD::CTPOP, MVT::i64, Custom);
- setOperationAction(ISD::CTPOP, MVT::i128, Custom);
- setOperationAction(ISD::PARITY, MVT::i64, Custom);
- setOperationAction(ISD::PARITY, MVT::i128, Custom);
- setOperationAction(ISD::ABS, MVT::i32, Custom);
- setOperationAction(ISD::ABS, MVT::i64, Custom);
- }
- setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
- setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
- for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
- setOperationAction(ISD::SDIVREM, VT, Expand);
- setOperationAction(ISD::UDIVREM, VT, Expand);
- }
- setOperationAction(ISD::SREM, MVT::i32, Expand);
- setOperationAction(ISD::SREM, MVT::i64, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
- setOperationAction(ISD::UREM, MVT::i32, Expand);
- setOperationAction(ISD::UREM, MVT::i64, Expand);
- // Custom lower Add/Sub/Mul with overflow.
- setOperationAction(ISD::SADDO, MVT::i32, Custom);
- setOperationAction(ISD::SADDO, MVT::i64, Custom);
- setOperationAction(ISD::UADDO, MVT::i32, Custom);
- setOperationAction(ISD::UADDO, MVT::i64, Custom);
- setOperationAction(ISD::SSUBO, MVT::i32, Custom);
- setOperationAction(ISD::SSUBO, MVT::i64, Custom);
- setOperationAction(ISD::USUBO, MVT::i32, Custom);
- setOperationAction(ISD::USUBO, MVT::i64, Custom);
- setOperationAction(ISD::SMULO, MVT::i32, Custom);
- setOperationAction(ISD::SMULO, MVT::i64, Custom);
- setOperationAction(ISD::UMULO, MVT::i32, Custom);
- setOperationAction(ISD::UMULO, MVT::i64, Custom);
- setOperationAction(ISD::ADDCARRY, MVT::i32, Custom);
- setOperationAction(ISD::ADDCARRY, MVT::i64, Custom);
- setOperationAction(ISD::SUBCARRY, MVT::i32, Custom);
- setOperationAction(ISD::SUBCARRY, MVT::i64, Custom);
- setOperationAction(ISD::SADDO_CARRY, MVT::i32, Custom);
- setOperationAction(ISD::SADDO_CARRY, MVT::i64, Custom);
- setOperationAction(ISD::SSUBO_CARRY, MVT::i32, Custom);
- setOperationAction(ISD::SSUBO_CARRY, MVT::i64, Custom);
- setOperationAction(ISD::FSIN, MVT::f32, Expand);
- setOperationAction(ISD::FSIN, MVT::f64, Expand);
- setOperationAction(ISD::FCOS, MVT::f32, Expand);
- setOperationAction(ISD::FCOS, MVT::f64, Expand);
- setOperationAction(ISD::FPOW, MVT::f32, Expand);
- setOperationAction(ISD::FPOW, MVT::f64, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
- if (Subtarget->hasFullFP16())
- setOperationAction(ISD::FCOPYSIGN, MVT::f16, Custom);
- else
- setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote);
- for (auto Op : {ISD::FREM, ISD::FPOW, ISD::FPOWI,
- ISD::FCOS, ISD::FSIN, ISD::FSINCOS,
- ISD::FEXP, ISD::FEXP2, ISD::FLOG,
- ISD::FLOG2, ISD::FLOG10, ISD::STRICT_FREM,
- ISD::STRICT_FPOW, ISD::STRICT_FPOWI, ISD::STRICT_FCOS,
- ISD::STRICT_FSIN, ISD::STRICT_FEXP, ISD::STRICT_FEXP2,
- ISD::STRICT_FLOG, ISD::STRICT_FLOG2, ISD::STRICT_FLOG10}) {
- setOperationAction(Op, MVT::f16, Promote);
- setOperationAction(Op, MVT::v4f16, Expand);
- setOperationAction(Op, MVT::v8f16, Expand);
- }
- if (!Subtarget->hasFullFP16()) {
- for (auto Op :
- {ISD::SETCC, ISD::SELECT_CC,
- ISD::BR_CC, ISD::FADD, ISD::FSUB,
- ISD::FMUL, ISD::FDIV, ISD::FMA,
- ISD::FNEG, ISD::FABS, ISD::FCEIL,
- ISD::FSQRT, ISD::FFLOOR, ISD::FNEARBYINT,
- ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN,
- ISD::FTRUNC, ISD::FMINNUM, ISD::FMAXNUM,
- ISD::FMINIMUM, ISD::FMAXIMUM, ISD::STRICT_FADD,
- ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV,
- ISD::STRICT_FMA, ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR,
- ISD::STRICT_FSQRT, ISD::STRICT_FRINT, ISD::STRICT_FNEARBYINT,
- ISD::STRICT_FROUND, ISD::STRICT_FTRUNC, ISD::STRICT_FROUNDEVEN,
- ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM,
- ISD::STRICT_FMAXIMUM})
- setOperationAction(Op, MVT::f16, Promote);
- // Round-to-integer need custom lowering for fp16, as Promote doesn't work
- // because the result type is integer.
- for (auto Op : {ISD::STRICT_LROUND, ISD::STRICT_LLROUND, ISD::STRICT_LRINT,
- ISD::STRICT_LLRINT})
- setOperationAction(Op, MVT::f16, Custom);
- // promote v4f16 to v4f32 when that is known to be safe.
- setOperationPromotedToType(ISD::FADD, MVT::v4f16, MVT::v4f32);
- setOperationPromotedToType(ISD::FSUB, MVT::v4f16, MVT::v4f32);
- setOperationPromotedToType(ISD::FMUL, MVT::v4f16, MVT::v4f32);
- setOperationPromotedToType(ISD::FDIV, MVT::v4f16, MVT::v4f32);
- setOperationAction(ISD::FABS, MVT::v4f16, Expand);
- setOperationAction(ISD::FNEG, MVT::v4f16, Expand);
- setOperationAction(ISD::FROUND, MVT::v4f16, Expand);
- setOperationAction(ISD::FROUNDEVEN, MVT::v4f16, Expand);
- setOperationAction(ISD::FMA, MVT::v4f16, Expand);
- setOperationAction(ISD::SETCC, MVT::v4f16, Expand);
- setOperationAction(ISD::BR_CC, MVT::v4f16, Expand);
- setOperationAction(ISD::SELECT, MVT::v4f16, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::v4f16, Expand);
- setOperationAction(ISD::FTRUNC, MVT::v4f16, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::v4f16, Expand);
- setOperationAction(ISD::FFLOOR, MVT::v4f16, Expand);
- setOperationAction(ISD::FCEIL, MVT::v4f16, Expand);
- setOperationAction(ISD::FRINT, MVT::v4f16, Expand);
- setOperationAction(ISD::FNEARBYINT, MVT::v4f16, Expand);
- setOperationAction(ISD::FSQRT, MVT::v4f16, Expand);
- setOperationAction(ISD::FABS, MVT::v8f16, Expand);
- setOperationAction(ISD::FADD, MVT::v8f16, Expand);
- setOperationAction(ISD::FCEIL, MVT::v8f16, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::v8f16, Expand);
- setOperationAction(ISD::FDIV, MVT::v8f16, Expand);
- setOperationAction(ISD::FFLOOR, MVT::v8f16, Expand);
- setOperationAction(ISD::FMA, MVT::v8f16, Expand);
- setOperationAction(ISD::FMUL, MVT::v8f16, Expand);
- setOperationAction(ISD::FNEARBYINT, MVT::v8f16, Expand);
- setOperationAction(ISD::FNEG, MVT::v8f16, Expand);
- setOperationAction(ISD::FROUND, MVT::v8f16, Expand);
- setOperationAction(ISD::FROUNDEVEN, MVT::v8f16, Expand);
- setOperationAction(ISD::FRINT, MVT::v8f16, Expand);
- setOperationAction(ISD::FSQRT, MVT::v8f16, Expand);
- setOperationAction(ISD::FSUB, MVT::v8f16, Expand);
- setOperationAction(ISD::FTRUNC, MVT::v8f16, Expand);
- setOperationAction(ISD::SETCC, MVT::v8f16, Expand);
- setOperationAction(ISD::BR_CC, MVT::v8f16, Expand);
- setOperationAction(ISD::SELECT, MVT::v8f16, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::v8f16, Expand);
- setOperationAction(ISD::FP_EXTEND, MVT::v8f16, Expand);
- }
- // AArch64 has implementations of a lot of rounding-like FP operations.
- for (auto Op :
- {ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL,
- ISD::FRINT, ISD::FTRUNC, ISD::FROUND,
- ISD::FROUNDEVEN, ISD::FMINNUM, ISD::FMAXNUM,
- ISD::FMINIMUM, ISD::FMAXIMUM, ISD::LROUND,
- ISD::LLROUND, ISD::LRINT, ISD::LLRINT,
- ISD::STRICT_FFLOOR, ISD::STRICT_FCEIL, ISD::STRICT_FNEARBYINT,
- ISD::STRICT_FRINT, ISD::STRICT_FTRUNC, ISD::STRICT_FROUNDEVEN,
- ISD::STRICT_FROUND, ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM,
- ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_LROUND,
- ISD::STRICT_LLROUND, ISD::STRICT_LRINT, ISD::STRICT_LLRINT}) {
- for (MVT Ty : {MVT::f32, MVT::f64})
- setOperationAction(Op, Ty, Legal);
- if (Subtarget->hasFullFP16())
- setOperationAction(Op, MVT::f16, Legal);
- }
- // Basic strict FP operations are legal
- for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
- ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) {
- for (MVT Ty : {MVT::f32, MVT::f64})
- setOperationAction(Op, Ty, Legal);
- if (Subtarget->hasFullFP16())
- setOperationAction(Op, MVT::f16, Legal);
- }
- // Strict conversion to a larger type is legal
- for (auto VT : {MVT::f32, MVT::f64})
- setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
- setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
- setOperationAction(ISD::GET_ROUNDING, MVT::i32, Custom);
- setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom);
- // Generate outline atomics library calls only if LSE was not specified for
- // subtarget
- if (Subtarget->outlineAtomics() && !Subtarget->hasLSE()) {
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, LibCall);
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, LibCall);
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, LibCall);
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, LibCall);
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, LibCall);
- setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, LibCall);
- setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, LibCall);
- setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, LibCall);
- setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i8, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i16, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i32, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i64, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, LibCall);
- setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, LibCall);
- #define LCALLNAMES(A, B, N) \
- setLibcallName(A##N##_RELAX, #B #N "_relax"); \
- setLibcallName(A##N##_ACQ, #B #N "_acq"); \
- setLibcallName(A##N##_REL, #B #N "_rel"); \
- setLibcallName(A##N##_ACQ_REL, #B #N "_acq_rel");
- #define LCALLNAME4(A, B) \
- LCALLNAMES(A, B, 1) \
- LCALLNAMES(A, B, 2) LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8)
- #define LCALLNAME5(A, B) \
- LCALLNAMES(A, B, 1) \
- LCALLNAMES(A, B, 2) \
- LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8) LCALLNAMES(A, B, 16)
- LCALLNAME5(RTLIB::OUTLINE_ATOMIC_CAS, __aarch64_cas)
- LCALLNAME4(RTLIB::OUTLINE_ATOMIC_SWP, __aarch64_swp)
- LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDADD, __aarch64_ldadd)
- LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDSET, __aarch64_ldset)
- LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDCLR, __aarch64_ldclr)
- LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDEOR, __aarch64_ldeor)
- #undef LCALLNAMES
- #undef LCALLNAME4
- #undef LCALLNAME5
- }
- // 128-bit loads and stores can be done without expanding
- setOperationAction(ISD::LOAD, MVT::i128, Custom);
- setOperationAction(ISD::STORE, MVT::i128, Custom);
- // Aligned 128-bit loads and stores are single-copy atomic according to the
- // v8.4a spec.
- if (Subtarget->hasLSE2()) {
- setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
- setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
- }
- // 256 bit non-temporal stores can be lowered to STNP. Do this as part of the
- // custom lowering, as there are no un-paired non-temporal stores and
- // legalization will break up 256 bit inputs.
- setOperationAction(ISD::STORE, MVT::v32i8, Custom);
- setOperationAction(ISD::STORE, MVT::v16i16, Custom);
- setOperationAction(ISD::STORE, MVT::v16f16, Custom);
- setOperationAction(ISD::STORE, MVT::v8i32, Custom);
- setOperationAction(ISD::STORE, MVT::v8f32, Custom);
- setOperationAction(ISD::STORE, MVT::v4f64, Custom);
- setOperationAction(ISD::STORE, MVT::v4i64, Custom);
- // 256 bit non-temporal loads can be lowered to LDNP. This is done using
- // custom lowering, as there are no un-paired non-temporal loads legalization
- // will break up 256 bit inputs.
- setOperationAction(ISD::LOAD, MVT::v32i8, Custom);
- setOperationAction(ISD::LOAD, MVT::v16i16, Custom);
- setOperationAction(ISD::LOAD, MVT::v16f16, Custom);
- setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
- setOperationAction(ISD::LOAD, MVT::v8f32, Custom);
- setOperationAction(ISD::LOAD, MVT::v4f64, Custom);
- setOperationAction(ISD::LOAD, MVT::v4i64, Custom);
- // Lower READCYCLECOUNTER using an mrs from CNTVCT_EL0.
- setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
- if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
- getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
- // Issue __sincos_stret if available.
- setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
- setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
- } else {
- setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
- }
- if (Subtarget->getTargetTriple().isOSMSVCRT()) {
- // MSVCRT doesn't have powi; fall back to pow
- setLibcallName(RTLIB::POWI_F32, nullptr);
- setLibcallName(RTLIB::POWI_F64, nullptr);
- }
- // Make floating-point constants legal for the large code model, so they don't
- // become loads from the constant pool.
- if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) {
- setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
- }
- // AArch64 does not have floating-point extending loads, i1 sign-extending
- // load, floating-point truncating stores, or v2i32->v2i16 truncating store.
- for (MVT VT : MVT::fp_valuetypes()) {
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
- }
- for (MVT VT : MVT::integer_valuetypes())
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand);
- setTruncStoreAction(MVT::f32, MVT::f16, Expand);
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
- setTruncStoreAction(MVT::f64, MVT::f16, Expand);
- setTruncStoreAction(MVT::f128, MVT::f80, Expand);
- setTruncStoreAction(MVT::f128, MVT::f64, Expand);
- setTruncStoreAction(MVT::f128, MVT::f32, Expand);
- setTruncStoreAction(MVT::f128, MVT::f16, Expand);
- setOperationAction(ISD::BITCAST, MVT::i16, Custom);
- setOperationAction(ISD::BITCAST, MVT::f16, Custom);
- setOperationAction(ISD::BITCAST, MVT::bf16, Custom);
- // Indexed loads and stores are supported.
- for (unsigned im = (unsigned)ISD::PRE_INC;
- im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
- setIndexedLoadAction(im, MVT::i8, Legal);
- setIndexedLoadAction(im, MVT::i16, Legal);
- setIndexedLoadAction(im, MVT::i32, Legal);
- setIndexedLoadAction(im, MVT::i64, Legal);
- setIndexedLoadAction(im, MVT::f64, Legal);
- setIndexedLoadAction(im, MVT::f32, Legal);
- setIndexedLoadAction(im, MVT::f16, Legal);
- setIndexedLoadAction(im, MVT::bf16, Legal);
- setIndexedStoreAction(im, MVT::i8, Legal);
- setIndexedStoreAction(im, MVT::i16, Legal);
- setIndexedStoreAction(im, MVT::i32, Legal);
- setIndexedStoreAction(im, MVT::i64, Legal);
- setIndexedStoreAction(im, MVT::f64, Legal);
- setIndexedStoreAction(im, MVT::f32, Legal);
- setIndexedStoreAction(im, MVT::f16, Legal);
- setIndexedStoreAction(im, MVT::bf16, Legal);
- }
- // Trap.
- setOperationAction(ISD::TRAP, MVT::Other, Legal);
- setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
- setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal);
- // We combine OR nodes for bitfield operations.
- setTargetDAGCombine(ISD::OR);
- // Try to create BICs for vector ANDs.
- setTargetDAGCombine(ISD::AND);
- // Vector add and sub nodes may conceal a high-half opportunity.
- // Also, try to fold ADD into CSINC/CSINV..
- setTargetDAGCombine({ISD::ADD, ISD::ABS, ISD::SUB, ISD::XOR, ISD::SINT_TO_FP,
- ISD::UINT_TO_FP});
- setTargetDAGCombine({ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT,
- ISD::FP_TO_UINT_SAT, ISD::FDIV});
- // Try and combine setcc with csel
- setTargetDAGCombine(ISD::SETCC);
- setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
- setTargetDAGCombine({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND,
- ISD::VECTOR_SPLICE, ISD::SIGN_EXTEND_INREG,
- ISD::CONCAT_VECTORS, ISD::EXTRACT_SUBVECTOR,
- ISD::INSERT_SUBVECTOR, ISD::STORE, ISD::BUILD_VECTOR});
- setTargetDAGCombine(ISD::TRUNCATE);
- setTargetDAGCombine(ISD::LOAD);
- setTargetDAGCombine(ISD::MSTORE);
- setTargetDAGCombine(ISD::MUL);
- setTargetDAGCombine({ISD::SELECT, ISD::VSELECT});
- setTargetDAGCombine({ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN,
- ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
- ISD::VECREDUCE_ADD, ISD::STEP_VECTOR});
- setTargetDAGCombine({ISD::MGATHER, ISD::MSCATTER});
- setTargetDAGCombine(ISD::FP_EXTEND);
- setTargetDAGCombine(ISD::GlobalAddress);
- setTargetDAGCombine(ISD::CTLZ);
- // In case of strict alignment, avoid an excessive number of byte wide stores.
- MaxStoresPerMemsetOptSize = 8;
- MaxStoresPerMemset =
- Subtarget->requiresStrictAlign() ? MaxStoresPerMemsetOptSize : 32;
- MaxGluedStoresPerMemcpy = 4;
- MaxStoresPerMemcpyOptSize = 4;
- MaxStoresPerMemcpy =
- Subtarget->requiresStrictAlign() ? MaxStoresPerMemcpyOptSize : 16;
- MaxStoresPerMemmoveOptSize = 4;
- MaxStoresPerMemmove = 4;
- MaxLoadsPerMemcmpOptSize = 4;
- MaxLoadsPerMemcmp =
- Subtarget->requiresStrictAlign() ? MaxLoadsPerMemcmpOptSize : 8;
- setStackPointerRegisterToSaveRestore(AArch64::SP);
- setSchedulingPreference(Sched::Hybrid);
- EnableExtLdPromotion = true;
- // Set required alignment.
- setMinFunctionAlignment(Align(4));
- // Set preferred alignments.
- setPrefLoopAlignment(Align(1ULL << STI.getPrefLoopLogAlignment()));
- setMaxBytesForAlignment(STI.getMaxBytesForLoopAlignment());
- setPrefFunctionAlignment(Align(1ULL << STI.getPrefFunctionLogAlignment()));
- // Only change the limit for entries in a jump table if specified by
- // the sub target, but not at the command line.
- unsigned MaxJT = STI.getMaximumJumpTableSize();
- if (MaxJT && getMaximumJumpTableSize() == UINT_MAX)
- setMaximumJumpTableSize(MaxJT);
- setHasExtractBitsInsn(true);
- setMaxDivRemBitWidthSupported(128);
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
- if (Subtarget->hasNEON()) {
- // FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to
- // silliness like this:
- for (auto Op :
- {ISD::SELECT, ISD::SELECT_CC, ISD::SETCC,
- ISD::BR_CC, ISD::FADD, ISD::FSUB,
- ISD::FMUL, ISD::FDIV, ISD::FMA,
- ISD::FNEG, ISD::FABS, ISD::FCEIL,
- ISD::FSQRT, ISD::FFLOOR, ISD::FNEARBYINT,
- ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN,
- ISD::FTRUNC, ISD::FMINNUM, ISD::FMAXNUM,
- ISD::FMINIMUM, ISD::FMAXIMUM, ISD::STRICT_FADD,
- ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV,
- ISD::STRICT_FMA, ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR,
- ISD::STRICT_FSQRT, ISD::STRICT_FRINT, ISD::STRICT_FNEARBYINT,
- ISD::STRICT_FROUND, ISD::STRICT_FTRUNC, ISD::STRICT_FROUNDEVEN,
- ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM,
- ISD::STRICT_FMAXIMUM})
- setOperationAction(Op, MVT::v1f64, Expand);
- for (auto Op :
- {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP,
- ISD::FP_ROUND, ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, ISD::MUL,
- ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT,
- ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_ROUND})
- setOperationAction(Op, MVT::v1i64, Expand);
- // AArch64 doesn't have a direct vector ->f32 conversion instructions for
- // elements smaller than i32, so promote the input to i32 first.
- setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i8, MVT::v4i32);
- setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i8, MVT::v4i32);
- // Similarly, there is no direct i32 -> f64 vector conversion instruction.
- // Or, direct i32 -> f16 vector conversion. Set it so custom, so the
- // conversion happens in two steps: v4i32 -> v4f32 -> v4f16
- for (auto Op : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
- ISD::STRICT_UINT_TO_FP})
- for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v4i32})
- setOperationAction(Op, VT, Custom);
- if (Subtarget->hasFullFP16()) {
- setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
- setOperationAction(ISD::SINT_TO_FP, MVT::v8i8, Custom);
- setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
- setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Custom);
- setOperationAction(ISD::UINT_TO_FP, MVT::v16i8, Custom);
- setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
- setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
- setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom);
- setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
- } else {
- // when AArch64 doesn't have fullfp16 support, promote the input
- // to i32 first.
- setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i8, MVT::v8i32);
- setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i8, MVT::v8i32);
- setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v16i8, MVT::v16i32);
- setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v16i8, MVT::v16i32);
- setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i16, MVT::v4i32);
- setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i16, MVT::v4i32);
- setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i16, MVT::v8i32);
- setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i16, MVT::v8i32);
- }
- setOperationAction(ISD::CTLZ, MVT::v1i64, Expand);
- setOperationAction(ISD::CTLZ, MVT::v2i64, Expand);
- setOperationAction(ISD::BITREVERSE, MVT::v8i8, Legal);
- setOperationAction(ISD::BITREVERSE, MVT::v16i8, Legal);
- setOperationAction(ISD::BITREVERSE, MVT::v2i32, Custom);
- setOperationAction(ISD::BITREVERSE, MVT::v4i32, Custom);
- setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom);
- setOperationAction(ISD::BITREVERSE, MVT::v2i64, Custom);
- for (auto VT : {MVT::v1i64, MVT::v2i64}) {
- setOperationAction(ISD::UMAX, VT, Custom);
- setOperationAction(ISD::SMAX, VT, Custom);
- setOperationAction(ISD::UMIN, VT, Custom);
- setOperationAction(ISD::SMIN, VT, Custom);
- }
- // AArch64 doesn't have MUL.2d:
- setOperationAction(ISD::MUL, MVT::v2i64, Expand);
- // Custom handling for some quad-vector types to detect MULL.
- setOperationAction(ISD::MUL, MVT::v8i16, Custom);
- setOperationAction(ISD::MUL, MVT::v4i32, Custom);
- setOperationAction(ISD::MUL, MVT::v2i64, Custom);
- // Saturates
- for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
- MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
- setOperationAction(ISD::SADDSAT, VT, Legal);
- setOperationAction(ISD::UADDSAT, VT, Legal);
- setOperationAction(ISD::SSUBSAT, VT, Legal);
- setOperationAction(ISD::USUBSAT, VT, Legal);
- }
- for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16,
- MVT::v4i32}) {
- setOperationAction(ISD::AVGFLOORS, VT, Legal);
- setOperationAction(ISD::AVGFLOORU, VT, Legal);
- setOperationAction(ISD::AVGCEILS, VT, Legal);
- setOperationAction(ISD::AVGCEILU, VT, Legal);
- setOperationAction(ISD::ABDS, VT, Legal);
- setOperationAction(ISD::ABDU, VT, Legal);
- }
- // Vector reductions
- for (MVT VT : { MVT::v4f16, MVT::v2f32,
- MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
- if (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()) {
- setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_FADD, VT, Legal);
- }
- }
- for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
- MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
- setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
- }
- setOperationAction(ISD::VECREDUCE_ADD, MVT::v2i64, Custom);
- setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal);
- setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
- // Likewise, narrowing and extending vector loads/stores aren't handled
- // directly.
- for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
- setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
- if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) {
- setOperationAction(ISD::MULHS, VT, Legal);
- setOperationAction(ISD::MULHU, VT, Legal);
- } else {
- setOperationAction(ISD::MULHS, VT, Expand);
- setOperationAction(ISD::MULHU, VT, Expand);
- }
- setOperationAction(ISD::SMUL_LOHI, VT, Expand);
- setOperationAction(ISD::UMUL_LOHI, VT, Expand);
- setOperationAction(ISD::BSWAP, VT, Expand);
- setOperationAction(ISD::CTTZ, VT, Expand);
- for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
- setTruncStoreAction(VT, InnerVT, Expand);
- setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
- setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
- }
- }
- // AArch64 has implementations of a lot of rounding-like FP operations.
- for (auto Op :
- {ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL, ISD::FRINT, ISD::FTRUNC,
- ISD::FROUND, ISD::FROUNDEVEN, ISD::STRICT_FFLOOR,
- ISD::STRICT_FNEARBYINT, ISD::STRICT_FCEIL, ISD::STRICT_FRINT,
- ISD::STRICT_FTRUNC, ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN}) {
- for (MVT Ty : {MVT::v2f32, MVT::v4f32, MVT::v2f64})
- setOperationAction(Op, Ty, Legal);
- if (Subtarget->hasFullFP16())
- for (MVT Ty : {MVT::v4f16, MVT::v8f16})
- setOperationAction(Op, Ty, Legal);
- }
- setTruncStoreAction(MVT::v4i16, MVT::v4i8, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i8, Custom);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Custom);
- // ADDP custom lowering
- for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
- setOperationAction(ISD::ADD, VT, Custom);
- // FADDP custom lowering
- for (MVT VT : { MVT::v16f16, MVT::v8f32, MVT::v4f64 })
- setOperationAction(ISD::FADD, VT, Custom);
- }
- if (Subtarget->hasSME()) {
- setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
- }
- // FIXME: Move lowering for more nodes here if those are common between
- // SVE and SME.
- if (Subtarget->hasSVEorSME()) {
- for (auto VT :
- {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) {
- setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
- }
- }
- if (Subtarget->hasSVE()) {
- for (auto VT : {MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64}) {
- setOperationAction(ISD::BITREVERSE, VT, Custom);
- setOperationAction(ISD::BSWAP, VT, Custom);
- setOperationAction(ISD::CTLZ, VT, Custom);
- setOperationAction(ISD::CTPOP, VT, Custom);
- setOperationAction(ISD::CTTZ, VT, Custom);
- setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
- setOperationAction(ISD::UINT_TO_FP, VT, Custom);
- setOperationAction(ISD::SINT_TO_FP, VT, Custom);
- setOperationAction(ISD::FP_TO_UINT, VT, Custom);
- setOperationAction(ISD::FP_TO_SINT, VT, Custom);
- setOperationAction(ISD::MGATHER, VT, Custom);
- setOperationAction(ISD::MSCATTER, VT, Custom);
- setOperationAction(ISD::MLOAD, VT, Custom);
- setOperationAction(ISD::MUL, VT, Custom);
- setOperationAction(ISD::MULHS, VT, Custom);
- setOperationAction(ISD::MULHU, VT, Custom);
- setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
- setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
- setOperationAction(ISD::SELECT, VT, Custom);
- setOperationAction(ISD::SETCC, VT, Custom);
- setOperationAction(ISD::SDIV, VT, Custom);
- setOperationAction(ISD::UDIV, VT, Custom);
- setOperationAction(ISD::SMIN, VT, Custom);
- setOperationAction(ISD::UMIN, VT, Custom);
- setOperationAction(ISD::SMAX, VT, Custom);
- setOperationAction(ISD::UMAX, VT, Custom);
- setOperationAction(ISD::SHL, VT, Custom);
- setOperationAction(ISD::SRL, VT, Custom);
- setOperationAction(ISD::SRA, VT, Custom);
- setOperationAction(ISD::ABS, VT, Custom);
- setOperationAction(ISD::ABDS, VT, Custom);
- setOperationAction(ISD::ABDU, VT, Custom);
- setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
- setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
- setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
- setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
- setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
- setOperationAction(ISD::UMUL_LOHI, VT, Expand);
- setOperationAction(ISD::SMUL_LOHI, VT, Expand);
- setOperationAction(ISD::SELECT_CC, VT, Expand);
- setOperationAction(ISD::ROTL, VT, Expand);
- setOperationAction(ISD::ROTR, VT, Expand);
- setOperationAction(ISD::SADDSAT, VT, Legal);
- setOperationAction(ISD::UADDSAT, VT, Legal);
- setOperationAction(ISD::SSUBSAT, VT, Legal);
- setOperationAction(ISD::USUBSAT, VT, Legal);
- setOperationAction(ISD::UREM, VT, Expand);
- setOperationAction(ISD::SREM, VT, Expand);
- setOperationAction(ISD::SDIVREM, VT, Expand);
- setOperationAction(ISD::UDIVREM, VT, Expand);
- if (Subtarget->hasSVE2()) {
- setOperationAction(ISD::AVGFLOORS, VT, Custom);
- setOperationAction(ISD::AVGFLOORU, VT, Custom);
- setOperationAction(ISD::AVGCEILS, VT, Custom);
- setOperationAction(ISD::AVGCEILU, VT, Custom);
- }
- }
- // Illegal unpacked integer vector types.
- for (auto VT : {MVT::nxv8i8, MVT::nxv4i16, MVT::nxv2i32}) {
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
- setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
- }
- // Legalize unpacked bitcasts to REINTERPRET_CAST.
- for (auto VT : {MVT::nxv2i16, MVT::nxv4i16, MVT::nxv2i32, MVT::nxv2bf16,
- MVT::nxv4bf16, MVT::nxv2f16, MVT::nxv4f16, MVT::nxv2f32})
- setOperationAction(ISD::BITCAST, VT, Custom);
- for (auto VT :
- { MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, MVT::nxv4i8,
- MVT::nxv4i16, MVT::nxv4i32, MVT::nxv8i8, MVT::nxv8i16 })
- setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Legal);
- for (auto VT :
- {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) {
- setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
- setOperationAction(ISD::SELECT, VT, Custom);
- setOperationAction(ISD::SETCC, VT, Custom);
- setOperationAction(ISD::TRUNCATE, VT, Custom);
- setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
- setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
- setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
- setOperationAction(ISD::SELECT_CC, VT, Expand);
- setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
- setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
- // There are no legal MVT::nxv16f## based types.
- if (VT != MVT::nxv16i1) {
- setOperationAction(ISD::SINT_TO_FP, VT, Custom);
- setOperationAction(ISD::UINT_TO_FP, VT, Custom);
- }
- }
- // NEON doesn't support masked loads/stores/gathers/scatters, but SVE does
- for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v1f64,
- MVT::v2f64, MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
- MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}) {
- setOperationAction(ISD::MLOAD, VT, Custom);
- setOperationAction(ISD::MSTORE, VT, Custom);
- setOperationAction(ISD::MGATHER, VT, Custom);
- setOperationAction(ISD::MSCATTER, VT, Custom);
- }
- // Firstly, exclude all scalable vector extending loads/truncating stores,
- // include both integer and floating scalable vector.
- for (MVT VT : MVT::scalable_vector_valuetypes()) {
- for (MVT InnerVT : MVT::scalable_vector_valuetypes()) {
- setTruncStoreAction(VT, InnerVT, Expand);
- setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
- setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
- }
- }
- // Then, selectively enable those which we directly support.
- setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i8, Legal);
- setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i16, Legal);
- setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i32, Legal);
- setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i8, Legal);
- setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i16, Legal);
- setTruncStoreAction(MVT::nxv8i16, MVT::nxv8i8, Legal);
- for (auto Op : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
- setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i8, Legal);
- setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i16, Legal);
- setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i32, Legal);
- setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i8, Legal);
- setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i16, Legal);
- setLoadExtAction(Op, MVT::nxv8i16, MVT::nxv8i8, Legal);
- }
- // SVE supports truncating stores of 64 and 128-bit vectors
- setTruncStoreAction(MVT::v2i64, MVT::v2i8, Custom);
- setTruncStoreAction(MVT::v2i64, MVT::v2i16, Custom);
- setTruncStoreAction(MVT::v2i64, MVT::v2i32, Custom);
- setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
- setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
- for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32,
- MVT::nxv4f32, MVT::nxv2f64}) {
- setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
- setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
- setOperationAction(ISD::MGATHER, VT, Custom);
- setOperationAction(ISD::MSCATTER, VT, Custom);
- setOperationAction(ISD::MLOAD, VT, Custom);
- setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
- setOperationAction(ISD::SELECT, VT, Custom);
- setOperationAction(ISD::FADD, VT, Custom);
- setOperationAction(ISD::FCOPYSIGN, VT, Custom);
- setOperationAction(ISD::FDIV, VT, Custom);
- setOperationAction(ISD::FMA, VT, Custom);
- setOperationAction(ISD::FMAXIMUM, VT, Custom);
- setOperationAction(ISD::FMAXNUM, VT, Custom);
- setOperationAction(ISD::FMINIMUM, VT, Custom);
- setOperationAction(ISD::FMINNUM, VT, Custom);
- setOperationAction(ISD::FMUL, VT, Custom);
- setOperationAction(ISD::FNEG, VT, Custom);
- setOperationAction(ISD::FSUB, VT, Custom);
- setOperationAction(ISD::FCEIL, VT, Custom);
- setOperationAction(ISD::FFLOOR, VT, Custom);
- setOperationAction(ISD::FNEARBYINT, VT, Custom);
- setOperationAction(ISD::FRINT, VT, Custom);
- setOperationAction(ISD::FROUND, VT, Custom);
- setOperationAction(ISD::FROUNDEVEN, VT, Custom);
- setOperationAction(ISD::FTRUNC, VT, Custom);
- setOperationAction(ISD::FSQRT, VT, Custom);
- setOperationAction(ISD::FABS, VT, Custom);
- setOperationAction(ISD::FP_EXTEND, VT, Custom);
- setOperationAction(ISD::FP_ROUND, VT, Custom);
- setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
- setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
- setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
- setOperationAction(ISD::SELECT_CC, VT, Expand);
- setOperationAction(ISD::FREM, VT, Expand);
- setOperationAction(ISD::FPOW, VT, Expand);
- setOperationAction(ISD::FPOWI, VT, Expand);
- setOperationAction(ISD::FCOS, VT, Expand);
- setOperationAction(ISD::FSIN, VT, Expand);
- setOperationAction(ISD::FSINCOS, VT, Expand);
- setOperationAction(ISD::FEXP, VT, Expand);
- setOperationAction(ISD::FEXP2, VT, Expand);
- setOperationAction(ISD::FLOG, VT, Expand);
- setOperationAction(ISD::FLOG2, VT, Expand);
- setOperationAction(ISD::FLOG10, VT, Expand);
- setCondCodeAction(ISD::SETO, VT, Expand);
- setCondCodeAction(ISD::SETOLT, VT, Expand);
- setCondCodeAction(ISD::SETLT, VT, Expand);
- setCondCodeAction(ISD::SETOLE, VT, Expand);
- setCondCodeAction(ISD::SETLE, VT, Expand);
- setCondCodeAction(ISD::SETULT, VT, Expand);
- setCondCodeAction(ISD::SETULE, VT, Expand);
- setCondCodeAction(ISD::SETUGE, VT, Expand);
- setCondCodeAction(ISD::SETUGT, VT, Expand);
- setCondCodeAction(ISD::SETUEQ, VT, Expand);
- setCondCodeAction(ISD::SETONE, VT, Expand);
- }
- for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) {
- setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
- setOperationAction(ISD::MGATHER, VT, Custom);
- setOperationAction(ISD::MSCATTER, VT, Custom);
- setOperationAction(ISD::MLOAD, VT, Custom);
- setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
- setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
- }
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
- // NEON doesn't support integer divides, but SVE does
- for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32,
- MVT::v4i32, MVT::v1i64, MVT::v2i64}) {
- setOperationAction(ISD::SDIV, VT, Custom);
- setOperationAction(ISD::UDIV, VT, Custom);
- }
- // NEON doesn't support 64-bit vector integer muls, but SVE does.
- setOperationAction(ISD::MUL, MVT::v1i64, Custom);
- setOperationAction(ISD::MUL, MVT::v2i64, Custom);
- // NEON doesn't support across-vector reductions, but SVE does.
- for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v2f64})
- setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
- if (Subtarget->forceStreamingCompatibleSVE()) {
- setTruncStoreAction(MVT::v2f32, MVT::v2f16, Custom);
- setTruncStoreAction(MVT::v4f32, MVT::v4f16, Custom);
- setTruncStoreAction(MVT::v8f32, MVT::v8f16, Custom);
- setTruncStoreAction(MVT::v1f64, MVT::v1f16, Custom);
- setTruncStoreAction(MVT::v2f64, MVT::v2f16, Custom);
- setTruncStoreAction(MVT::v4f64, MVT::v4f16, Custom);
- setTruncStoreAction(MVT::v1f64, MVT::v1f32, Custom);
- setTruncStoreAction(MVT::v2f64, MVT::v2f32, Custom);
- setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
- for (MVT VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32,
- MVT::v4i32, MVT::v1i64, MVT::v2i64})
- addTypeForStreamingSVE(VT);
- for (MVT VT :
- {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v2f64})
- addTypeForStreamingSVE(VT);
- }
- // NOTE: Currently this has to happen after computeRegisterProperties rather
- // than the preferred option of combining it with the addRegisterClass call.
- if (Subtarget->useSVEForFixedLengthVectors()) {
- for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
- if (useSVEForFixedLengthVectorVT(VT))
- addTypeForFixedLengthSVE(VT);
- for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
- if (useSVEForFixedLengthVectorVT(VT))
- addTypeForFixedLengthSVE(VT);
- // 64bit results can mean a bigger than NEON input.
- for (auto VT : {MVT::v8i8, MVT::v4i16})
- setOperationAction(ISD::TRUNCATE, VT, Custom);
- setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom);
- // 128bit results imply a bigger than NEON input.
- for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
- setOperationAction(ISD::TRUNCATE, VT, Custom);
- for (auto VT : {MVT::v8f16, MVT::v4f32})
- setOperationAction(ISD::FP_ROUND, VT, Custom);
- // These operations are not supported on NEON but SVE can do them.
- setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom);
- setOperationAction(ISD::CTLZ, MVT::v1i64, Custom);
- setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
- setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
- setOperationAction(ISD::MULHS, MVT::v1i64, Custom);
- setOperationAction(ISD::MULHS, MVT::v2i64, Custom);
- setOperationAction(ISD::MULHU, MVT::v1i64, Custom);
- setOperationAction(ISD::MULHU, MVT::v2i64, Custom);
- setOperationAction(ISD::SMAX, MVT::v1i64, Custom);
- setOperationAction(ISD::SMAX, MVT::v2i64, Custom);
- setOperationAction(ISD::SMIN, MVT::v1i64, Custom);
- setOperationAction(ISD::SMIN, MVT::v2i64, Custom);
- setOperationAction(ISD::UMAX, MVT::v1i64, Custom);
- setOperationAction(ISD::UMAX, MVT::v2i64, Custom);
- setOperationAction(ISD::UMIN, MVT::v1i64, Custom);
- setOperationAction(ISD::UMIN, MVT::v2i64, Custom);
- setOperationAction(ISD::VECREDUCE_SMAX, MVT::v2i64, Custom);
- setOperationAction(ISD::VECREDUCE_SMIN, MVT::v2i64, Custom);
- setOperationAction(ISD::VECREDUCE_UMAX, MVT::v2i64, Custom);
- setOperationAction(ISD::VECREDUCE_UMIN, MVT::v2i64, Custom);
- // Int operations with no NEON support.
- for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
- MVT::v2i32, MVT::v4i32, MVT::v2i64}) {
- setOperationAction(ISD::BITREVERSE, VT, Custom);
- setOperationAction(ISD::CTTZ, VT, Custom);
- setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
- setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
- setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
- }
- // Use SVE for vectors with more than 2 elements.
- for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v4f32})
- setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
- }
- setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv2i1, MVT::nxv2i64);
- setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv4i1, MVT::nxv4i32);
- setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv8i1, MVT::nxv8i16);
- setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv16i1, MVT::nxv16i8);
- setOperationAction(ISD::VSCALE, MVT::i32, Custom);
- }
- if (Subtarget->hasMOPS() && Subtarget->hasMTE()) {
- // Only required for llvm.aarch64.mops.memset.tag
- setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
- }
- setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
- PredictableSelectIsExpensive = Subtarget->predictableSelectIsExpensive();
- IsStrictFPEnabled = true;
- }
- void AArch64TargetLowering::addTypeForNEON(MVT VT) {
- assert(VT.isVector() && "VT should be a vector type");
- if (VT.isFloatingPoint()) {
- MVT PromoteTo = EVT(VT).changeVectorElementTypeToInteger().getSimpleVT();
- setOperationPromotedToType(ISD::LOAD, VT, PromoteTo);
- setOperationPromotedToType(ISD::STORE, VT, PromoteTo);
- }
- // Mark vector float intrinsics as expand.
- if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) {
- setOperationAction(ISD::FSIN, VT, Expand);
- setOperationAction(ISD::FCOS, VT, Expand);
- setOperationAction(ISD::FPOW, VT, Expand);
- setOperationAction(ISD::FLOG, VT, Expand);
- setOperationAction(ISD::FLOG2, VT, Expand);
- setOperationAction(ISD::FLOG10, VT, Expand);
- setOperationAction(ISD::FEXP, VT, Expand);
- setOperationAction(ISD::FEXP2, VT, Expand);
- }
- // But we do support custom-lowering for FCOPYSIGN.
- if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
- ((VT == MVT::v4f16 || VT == MVT::v8f16) && Subtarget->hasFullFP16()))
- setOperationAction(ISD::FCOPYSIGN, VT, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
- setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
- setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
- setOperationAction(ISD::SRA, VT, Custom);
- setOperationAction(ISD::SRL, VT, Custom);
- setOperationAction(ISD::SHL, VT, Custom);
- setOperationAction(ISD::OR, VT, Custom);
- setOperationAction(ISD::SETCC, VT, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, VT, Legal);
- setOperationAction(ISD::SELECT, VT, Expand);
- setOperationAction(ISD::SELECT_CC, VT, Expand);
- setOperationAction(ISD::VSELECT, VT, Expand);
- for (MVT InnerVT : MVT::all_valuetypes())
- setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
- // CNT supports only B element sizes, then use UADDLP to widen.
- if (VT != MVT::v8i8 && VT != MVT::v16i8)
- setOperationAction(ISD::CTPOP, VT, Custom);
- setOperationAction(ISD::UDIV, VT, Expand);
- setOperationAction(ISD::SDIV, VT, Expand);
- setOperationAction(ISD::UREM, VT, Expand);
- setOperationAction(ISD::SREM, VT, Expand);
- setOperationAction(ISD::FREM, VT, Expand);
- for (unsigned Opcode :
- {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT,
- ISD::FP_TO_UINT_SAT, ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT})
- setOperationAction(Opcode, VT, Custom);
- if (!VT.isFloatingPoint())
- setOperationAction(ISD::ABS, VT, Legal);
- // [SU][MIN|MAX] are available for all NEON types apart from i64.
- if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64)
- for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
- setOperationAction(Opcode, VT, Legal);
- // F[MIN|MAX][NUM|NAN] and simple strict operations are available for all FP
- // NEON types.
- if (VT.isFloatingPoint() &&
- VT.getVectorElementType() != MVT::bf16 &&
- (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()))
- for (unsigned Opcode :
- {ISD::FMINIMUM, ISD::FMAXIMUM, ISD::FMINNUM, ISD::FMAXNUM,
- ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_FMINNUM,
- ISD::STRICT_FMAXNUM, ISD::STRICT_FADD, ISD::STRICT_FSUB,
- ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FMA,
- ISD::STRICT_FSQRT})
- setOperationAction(Opcode, VT, Legal);
- // Strict fp extend and trunc are legal
- if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 16)
- setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
- if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 64)
- setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal);
- // FIXME: We could potentially make use of the vector comparison instructions
- // for STRICT_FSETCC and STRICT_FSETCSS, but there's a number of
- // complications:
- // * FCMPEQ/NE are quiet comparisons, the rest are signalling comparisons,
- // so we would need to expand when the condition code doesn't match the
- // kind of comparison.
- // * Some kinds of comparison require more than one FCMXY instruction so
- // would need to be expanded instead.
- // * The lowering of the non-strict versions involves target-specific ISD
- // nodes so we would likely need to add strict versions of all of them and
- // handle them appropriately.
- setOperationAction(ISD::STRICT_FSETCC, VT, Expand);
- setOperationAction(ISD::STRICT_FSETCCS, VT, Expand);
- if (Subtarget->isLittleEndian()) {
- for (unsigned im = (unsigned)ISD::PRE_INC;
- im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
- setIndexedLoadAction(im, VT, Legal);
- setIndexedStoreAction(im, VT, Legal);
- }
- }
- if (Subtarget->hasD128()) {
- setOperationAction(ISD::READ_REGISTER, MVT::i128, Custom);
- setOperationAction(ISD::WRITE_REGISTER, MVT::i128, Custom);
- }
- }
- bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
- EVT OpVT) const {
- // Only SVE has a 1:1 mapping from intrinsic -> instruction (whilelo).
- if (!Subtarget->hasSVE())
- return true;
- // We can only support legal predicate result types. We can use the SVE
- // whilelo instruction for generating fixed-width predicates too.
- if (ResVT != MVT::nxv2i1 && ResVT != MVT::nxv4i1 && ResVT != MVT::nxv8i1 &&
- ResVT != MVT::nxv16i1 && ResVT != MVT::v2i1 && ResVT != MVT::v4i1 &&
- ResVT != MVT::v8i1 && ResVT != MVT::v16i1)
- return true;
- // The whilelo instruction only works with i32 or i64 scalar inputs.
- if (OpVT != MVT::i32 && OpVT != MVT::i64)
- return true;
- return false;
- }
- void AArch64TargetLowering::addTypeForStreamingSVE(MVT VT) {
- // By default set all operations to Expand,
- // then change to Legal/Custom if needed.
- for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
- setOperationAction(Op, VT, Expand);
- assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
- if (VT.isFloatingPoint()) {
- setCondCodeAction(ISD::SETO, VT, Expand);
- setCondCodeAction(ISD::SETOLT, VT, Expand);
- setCondCodeAction(ISD::SETOLE, VT, Expand);
- setCondCodeAction(ISD::SETULT, VT, Expand);
- setCondCodeAction(ISD::SETULE, VT, Expand);
- setCondCodeAction(ISD::SETUGE, VT, Expand);
- setCondCodeAction(ISD::SETUGT, VT, Expand);
- setCondCodeAction(ISD::SETUEQ, VT, Expand);
- setCondCodeAction(ISD::SETONE, VT, Expand);
- }
- // STORE, LOAD, SCALAR_TO_VECTOR and BITCAST are natively supported,
- // so no need to Custom/Expand them.
- setOperationAction(ISD::STORE, VT, Legal);
- setOperationAction(ISD::LOAD, VT, Legal);
- setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal);
- setOperationAction(ISD::BITCAST, VT, Legal);
- // Mark integer truncating stores/extending loads as having custom lowering
- if (VT.isInteger()) {
- MVT InnerVT = VT.changeVectorElementType(MVT::i8);
- while (InnerVT != VT) {
- setTruncStoreAction(VT, InnerVT, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Custom);
- setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Custom);
- InnerVT = InnerVT.changeVectorElementType(
- MVT::getIntegerVT(2 * InnerVT.getScalarSizeInBits()));
- }
- }
- // Mark floating-point truncating stores/extending loads as having custom
- // lowering
- if (VT.isFloatingPoint()) {
- MVT InnerVT = VT.changeVectorElementType(MVT::f16);
- while (InnerVT != VT) {
- setTruncStoreAction(VT, InnerVT, Custom);
- setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Custom);
- InnerVT = InnerVT.changeVectorElementType(
- MVT::getFloatingPointVT(2 * InnerVT.getScalarSizeInBits()));
- }
- }
- setOperationAction(ISD::ABS, VT, Custom);
- setOperationAction(ISD::ADD, VT, Custom);
- setOperationAction(ISD::AND, VT, Custom);
- setOperationAction(ISD::ANY_EXTEND, VT, Custom);
- setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
- setOperationAction(ISD::CTLZ, VT, Custom);
- setOperationAction(ISD::CTPOP, VT, Custom);
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
- setOperationAction(ISD::FABS, VT, Custom);
- setOperationAction(ISD::FADD, VT, Custom);
- setOperationAction(ISD::FCEIL, VT, Custom);
- setOperationAction(ISD::FCOPYSIGN, VT, Custom);
- setOperationAction(ISD::FDIV, VT, Custom);
- setOperationAction(ISD::FFLOOR, VT, Custom);
- setOperationAction(ISD::FMA, VT, Custom);
- setOperationAction(ISD::FMAXIMUM, VT, Custom);
- setOperationAction(ISD::FMAXNUM, VT, Custom);
- setOperationAction(ISD::FMINIMUM, VT, Custom);
- setOperationAction(ISD::FMINNUM, VT, Custom);
- setOperationAction(ISD::FMUL, VT, Custom);
- setOperationAction(ISD::FNEARBYINT, VT, Custom);
- setOperationAction(ISD::FNEG, VT, Custom);
- setOperationAction(ISD::FP_ROUND, VT, Custom);
- setOperationAction(ISD::FP_TO_SINT, VT, Custom);
- setOperationAction(ISD::FP_TO_UINT, VT, Custom);
- setOperationAction(ISD::FRINT, VT, Custom);
- setOperationAction(ISD::FROUND, VT, Custom);
- setOperationAction(ISD::FROUNDEVEN, VT, Custom);
- setOperationAction(ISD::FSQRT, VT, Custom);
- setOperationAction(ISD::FSUB, VT, Custom);
- setOperationAction(ISD::FTRUNC, VT, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
- setOperationAction(ISD::MLOAD, VT, Custom);
- setOperationAction(ISD::MSTORE, VT, Custom);
- setOperationAction(ISD::MUL, VT, Custom);
- setOperationAction(ISD::MULHS, VT, Custom);
- setOperationAction(ISD::MULHU, VT, Custom);
- setOperationAction(ISD::OR, VT, Custom);
- setOperationAction(ISD::SDIV, VT, Custom);
- setOperationAction(ISD::SETCC, VT, Custom);
- setOperationAction(ISD::SHL, VT, Custom);
- setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
- setOperationAction(ISD::SINT_TO_FP, VT, Custom);
- setOperationAction(ISD::SMAX, VT, Custom);
- setOperationAction(ISD::SMIN, VT, Custom);
- setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
- setOperationAction(ISD::SRA, VT, Custom);
- setOperationAction(ISD::SRL, VT, Custom);
- setOperationAction(ISD::SUB, VT, Custom);
- setOperationAction(ISD::TRUNCATE, VT, Custom);
- setOperationAction(ISD::UDIV, VT, Custom);
- setOperationAction(ISD::UINT_TO_FP, VT, Custom);
- setOperationAction(ISD::UMAX, VT, Custom);
- setOperationAction(ISD::UMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
- setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
- setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
- setOperationAction(ISD::XOR, VT, Custom);
- setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
- }
- void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
- assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
- // By default everything must be expanded.
- for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
- setOperationAction(Op, VT, Expand);
- if (VT.isFloatingPoint()) {
- setCondCodeAction(ISD::SETO, VT, Expand);
- setCondCodeAction(ISD::SETOLT, VT, Expand);
- setCondCodeAction(ISD::SETOLE, VT, Expand);
- setCondCodeAction(ISD::SETULT, VT, Expand);
- setCondCodeAction(ISD::SETULE, VT, Expand);
- setCondCodeAction(ISD::SETUGE, VT, Expand);
- setCondCodeAction(ISD::SETUGT, VT, Expand);
- setCondCodeAction(ISD::SETUEQ, VT, Expand);
- setCondCodeAction(ISD::SETONE, VT, Expand);
- }
- // Mark integer truncating stores/extending loads as having custom lowering
- if (VT.isInteger()) {
- MVT InnerVT = VT.changeVectorElementType(MVT::i8);
- while (InnerVT != VT) {
- setTruncStoreAction(VT, InnerVT, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Custom);
- setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Custom);
- InnerVT = InnerVT.changeVectorElementType(
- MVT::getIntegerVT(2 * InnerVT.getScalarSizeInBits()));
- }
- }
- // Mark floating-point truncating stores/extending loads as having custom
- // lowering
- if (VT.isFloatingPoint()) {
- MVT InnerVT = VT.changeVectorElementType(MVT::f16);
- while (InnerVT != VT) {
- setTruncStoreAction(VT, InnerVT, Custom);
- setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Custom);
- InnerVT = InnerVT.changeVectorElementType(
- MVT::getFloatingPointVT(2 * InnerVT.getScalarSizeInBits()));
- }
- }
- // Lower fixed length vector operations to scalable equivalents.
- setOperationAction(ISD::ABS, VT, Custom);
- setOperationAction(ISD::ADD, VT, Custom);
- setOperationAction(ISD::AND, VT, Custom);
- setOperationAction(ISD::ANY_EXTEND, VT, Custom);
- setOperationAction(ISD::BITCAST, VT, Custom);
- setOperationAction(ISD::BITREVERSE, VT, Custom);
- setOperationAction(ISD::BSWAP, VT, Custom);
- setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
- setOperationAction(ISD::CTLZ, VT, Custom);
- setOperationAction(ISD::CTPOP, VT, Custom);
- setOperationAction(ISD::CTTZ, VT, Custom);
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
- setOperationAction(ISD::FABS, VT, Custom);
- setOperationAction(ISD::FADD, VT, Custom);
- setOperationAction(ISD::FCEIL, VT, Custom);
- setOperationAction(ISD::FCOPYSIGN, VT, Custom);
- setOperationAction(ISD::FDIV, VT, Custom);
- setOperationAction(ISD::FFLOOR, VT, Custom);
- setOperationAction(ISD::FMA, VT, Custom);
- setOperationAction(ISD::FMAXIMUM, VT, Custom);
- setOperationAction(ISD::FMAXNUM, VT, Custom);
- setOperationAction(ISD::FMINIMUM, VT, Custom);
- setOperationAction(ISD::FMINNUM, VT, Custom);
- setOperationAction(ISD::FMUL, VT, Custom);
- setOperationAction(ISD::FNEARBYINT, VT, Custom);
- setOperationAction(ISD::FNEG, VT, Custom);
- setOperationAction(ISD::FP_EXTEND, VT, Custom);
- setOperationAction(ISD::FP_ROUND, VT, Custom);
- setOperationAction(ISD::FP_TO_SINT, VT, Custom);
- setOperationAction(ISD::FP_TO_UINT, VT, Custom);
- setOperationAction(ISD::FRINT, VT, Custom);
- setOperationAction(ISD::FROUND, VT, Custom);
- setOperationAction(ISD::FROUNDEVEN, VT, Custom);
- setOperationAction(ISD::FSQRT, VT, Custom);
- setOperationAction(ISD::FSUB, VT, Custom);
- setOperationAction(ISD::FTRUNC, VT, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
- setOperationAction(ISD::LOAD, VT, Custom);
- setOperationAction(ISD::MGATHER, VT, Custom);
- setOperationAction(ISD::MLOAD, VT, Custom);
- setOperationAction(ISD::MSCATTER, VT, Custom);
- setOperationAction(ISD::MSTORE, VT, Custom);
- setOperationAction(ISD::MUL, VT, Custom);
- setOperationAction(ISD::MULHS, VT, Custom);
- setOperationAction(ISD::MULHU, VT, Custom);
- setOperationAction(ISD::OR, VT, Custom);
- setOperationAction(ISD::SDIV, VT, Custom);
- setOperationAction(ISD::SELECT, VT, Custom);
- setOperationAction(ISD::SETCC, VT, Custom);
- setOperationAction(ISD::SHL, VT, Custom);
- setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
- setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
- setOperationAction(ISD::SINT_TO_FP, VT, Custom);
- setOperationAction(ISD::SMAX, VT, Custom);
- setOperationAction(ISD::SMIN, VT, Custom);
- setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
- setOperationAction(ISD::SRA, VT, Custom);
- setOperationAction(ISD::SRL, VT, Custom);
- setOperationAction(ISD::STORE, VT, Custom);
- setOperationAction(ISD::SUB, VT, Custom);
- setOperationAction(ISD::TRUNCATE, VT, Custom);
- setOperationAction(ISD::UDIV, VT, Custom);
- setOperationAction(ISD::UINT_TO_FP, VT, Custom);
- setOperationAction(ISD::UMAX, VT, Custom);
- setOperationAction(ISD::UMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
- setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
- setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
- setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
- setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
- setOperationAction(ISD::VSELECT, VT, Custom);
- setOperationAction(ISD::XOR, VT, Custom);
- setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
- }
- void AArch64TargetLowering::addDRTypeForNEON(MVT VT) {
- addRegisterClass(VT, &AArch64::FPR64RegClass);
- addTypeForNEON(VT);
- }
- void AArch64TargetLowering::addQRTypeForNEON(MVT VT) {
- addRegisterClass(VT, &AArch64::FPR128RegClass);
- addTypeForNEON(VT);
- }
- EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &,
- LLVMContext &C, EVT VT) const {
- if (!VT.isVector())
- return MVT::i32;
- if (VT.isScalableVector())
- return EVT::getVectorVT(C, MVT::i1, VT.getVectorElementCount());
- return VT.changeVectorElementTypeToInteger();
- }
- // isIntImmediate - This method tests to see if the node is a constant
- // operand. If so Imm will receive the value.
- static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
- if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
- Imm = C->getZExtValue();
- return true;
- }
- return false;
- }
- // isOpcWithIntImmediate - This method tests to see if the node is a specific
- // opcode and that it has a immediate integer right operand.
- // If so Imm will receive the value.
- static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
- uint64_t &Imm) {
- return N->getOpcode() == Opc &&
- isIntImmediate(N->getOperand(1).getNode(), Imm);
- }
- static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm,
- const APInt &Demanded,
- TargetLowering::TargetLoweringOpt &TLO,
- unsigned NewOpc) {
- uint64_t OldImm = Imm, NewImm, Enc;
- uint64_t Mask = ((uint64_t)(-1LL) >> (64 - Size)), OrigMask = Mask;
- // Return if the immediate is already all zeros, all ones, a bimm32 or a
- // bimm64.
- if (Imm == 0 || Imm == Mask ||
- AArch64_AM::isLogicalImmediate(Imm & Mask, Size))
- return false;
- unsigned EltSize = Size;
- uint64_t DemandedBits = Demanded.getZExtValue();
- // Clear bits that are not demanded.
- Imm &= DemandedBits;
- while (true) {
- // The goal here is to set the non-demanded bits in a way that minimizes
- // the number of switching between 0 and 1. In order to achieve this goal,
- // we set the non-demanded bits to the value of the preceding demanded bits.
- // For example, if we have an immediate 0bx10xx0x1 ('x' indicates a
- // non-demanded bit), we copy bit0 (1) to the least significant 'x',
- // bit2 (0) to 'xx', and bit6 (1) to the most significant 'x'.
- // The final result is 0b11000011.
- uint64_t NonDemandedBits = ~DemandedBits;
- uint64_t InvertedImm = ~Imm & DemandedBits;
- uint64_t RotatedImm =
- ((InvertedImm << 1) | (InvertedImm >> (EltSize - 1) & 1)) &
- NonDemandedBits;
- uint64_t Sum = RotatedImm + NonDemandedBits;
- bool Carry = NonDemandedBits & ~Sum & (1ULL << (EltSize - 1));
- uint64_t Ones = (Sum + Carry) & NonDemandedBits;
- NewImm = (Imm | Ones) & Mask;
- // If NewImm or its bitwise NOT is a shifted mask, it is a bitmask immediate
- // or all-ones or all-zeros, in which case we can stop searching. Otherwise,
- // we halve the element size and continue the search.
- if (isShiftedMask_64(NewImm) || isShiftedMask_64(~(NewImm | ~Mask)))
- break;
- // We cannot shrink the element size any further if it is 2-bits.
- if (EltSize == 2)
- return false;
- EltSize /= 2;
- Mask >>= EltSize;
- uint64_t Hi = Imm >> EltSize, DemandedBitsHi = DemandedBits >> EltSize;
- // Return if there is mismatch in any of the demanded bits of Imm and Hi.
- if (((Imm ^ Hi) & (DemandedBits & DemandedBitsHi) & Mask) != 0)
- return false;
- // Merge the upper and lower halves of Imm and DemandedBits.
- Imm |= Hi;
- DemandedBits |= DemandedBitsHi;
- }
- ++NumOptimizedImms;
- // Replicate the element across the register width.
- while (EltSize < Size) {
- NewImm |= NewImm << EltSize;
- EltSize *= 2;
- }
- (void)OldImm;
- assert(((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 &&
- "demanded bits should never be altered");
- assert(OldImm != NewImm && "the new imm shouldn't be equal to the old imm");
- // Create the new constant immediate node.
- EVT VT = Op.getValueType();
- SDLoc DL(Op);
- SDValue New;
- // If the new constant immediate is all-zeros or all-ones, let the target
- // independent DAG combine optimize this node.
- if (NewImm == 0 || NewImm == OrigMask) {
- New = TLO.DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0),
- TLO.DAG.getConstant(NewImm, DL, VT));
- // Otherwise, create a machine node so that target independent DAG combine
- // doesn't undo this optimization.
- } else {
- Enc = AArch64_AM::encodeLogicalImmediate(NewImm, Size);
- SDValue EncConst = TLO.DAG.getTargetConstant(Enc, DL, VT);
- New = SDValue(
- TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0);
- }
- return TLO.CombineTo(Op, New);
- }
- bool AArch64TargetLowering::targetShrinkDemandedConstant(
- SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
- TargetLoweringOpt &TLO) const {
- // Delay this optimization to as late as possible.
- if (!TLO.LegalOps)
- return false;
- if (!EnableOptimizeLogicalImm)
- return false;
- EVT VT = Op.getValueType();
- if (VT.isVector())
- return false;
- unsigned Size = VT.getSizeInBits();
- assert((Size == 32 || Size == 64) &&
- "i32 or i64 is expected after legalization.");
- // Exit early if we demand all bits.
- if (DemandedBits.countPopulation() == Size)
- return false;
- unsigned NewOpc;
- switch (Op.getOpcode()) {
- default:
- return false;
- case ISD::AND:
- NewOpc = Size == 32 ? AArch64::ANDWri : AArch64::ANDXri;
- break;
- case ISD::OR:
- NewOpc = Size == 32 ? AArch64::ORRWri : AArch64::ORRXri;
- break;
- case ISD::XOR:
- NewOpc = Size == 32 ? AArch64::EORWri : AArch64::EORXri;
- break;
- }
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
- if (!C)
- return false;
- uint64_t Imm = C->getZExtValue();
- return optimizeLogicalImm(Op, Size, Imm, DemandedBits, TLO, NewOpc);
- }
- /// computeKnownBitsForTargetNode - Determine which of the bits specified in
- /// Mask are known to be either zero or one and return them Known.
- void AArch64TargetLowering::computeKnownBitsForTargetNode(
- const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
- const SelectionDAG &DAG, unsigned Depth) const {
- switch (Op.getOpcode()) {
- default:
- break;
- case AArch64ISD::DUP: {
- SDValue SrcOp = Op.getOperand(0);
- Known = DAG.computeKnownBits(SrcOp, Depth + 1);
- if (SrcOp.getValueSizeInBits() != Op.getScalarValueSizeInBits()) {
- assert(SrcOp.getValueSizeInBits() > Op.getScalarValueSizeInBits() &&
- "Expected DUP implicit truncation");
- Known = Known.trunc(Op.getScalarValueSizeInBits());
- }
- break;
- }
- case AArch64ISD::CSEL: {
- KnownBits Known2;
- Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
- Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
- Known = KnownBits::commonBits(Known, Known2);
- break;
- }
- case AArch64ISD::BICi: {
- // Compute the bit cleared value.
- uint64_t Mask =
- ~(Op->getConstantOperandVal(1) << Op->getConstantOperandVal(2));
- Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
- Known &= KnownBits::makeConstant(APInt(Known.getBitWidth(), Mask));
- break;
- }
- case AArch64ISD::VLSHR: {
- KnownBits Known2;
- Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
- Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
- Known = KnownBits::lshr(Known, Known2);
- break;
- }
- case AArch64ISD::VASHR: {
- KnownBits Known2;
- Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
- Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
- Known = KnownBits::ashr(Known, Known2);
- break;
- }
- case AArch64ISD::MOVI: {
- ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(0));
- Known =
- KnownBits::makeConstant(APInt(Known.getBitWidth(), CN->getZExtValue()));
- break;
- }
- case AArch64ISD::LOADgot:
- case AArch64ISD::ADDlow: {
- if (!Subtarget->isTargetILP32())
- break;
- // In ILP32 mode all valid pointers are in the low 4GB of the address-space.
- Known.Zero = APInt::getHighBitsSet(64, 32);
- break;
- }
- case AArch64ISD::ASSERT_ZEXT_BOOL: {
- Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
- Known.Zero |= APInt(Known.getBitWidth(), 0xFE);
- break;
- }
- case ISD::INTRINSIC_W_CHAIN: {
- ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
- Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
- switch (IntID) {
- default: return;
- case Intrinsic::aarch64_ldaxr:
- case Intrinsic::aarch64_ldxr: {
- unsigned BitWidth = Known.getBitWidth();
- EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
- unsigned MemBits = VT.getScalarSizeInBits();
- Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
- return;
- }
- }
- break;
- }
- case ISD::INTRINSIC_WO_CHAIN:
- case ISD::INTRINSIC_VOID: {
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- switch (IntNo) {
- default:
- break;
- case Intrinsic::aarch64_neon_umaxv:
- case Intrinsic::aarch64_neon_uminv: {
- // Figure out the datatype of the vector operand. The UMINV instruction
- // will zero extend the result, so we can mark as known zero all the
- // bits larger than the element datatype. 32-bit or larget doesn't need
- // this as those are legal types and will be handled by isel directly.
- MVT VT = Op.getOperand(1).getValueType().getSimpleVT();
- unsigned BitWidth = Known.getBitWidth();
- if (VT == MVT::v8i8 || VT == MVT::v16i8) {
- assert(BitWidth >= 8 && "Unexpected width!");
- APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8);
- Known.Zero |= Mask;
- } else if (VT == MVT::v4i16 || VT == MVT::v8i16) {
- assert(BitWidth >= 16 && "Unexpected width!");
- APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
- Known.Zero |= Mask;
- }
- break;
- } break;
- }
- }
- }
- }
- MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
- EVT) const {
- return MVT::i64;
- }
- bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
- EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
- unsigned *Fast) const {
- if (Subtarget->requiresStrictAlign())
- return false;
- if (Fast) {
- // Some CPUs are fine with unaligned stores except for 128-bit ones.
- *Fast = !Subtarget->isMisaligned128StoreSlow() || VT.getStoreSize() != 16 ||
- // See comments in performSTORECombine() for more details about
- // these conditions.
- // Code that uses clang vector extensions can mark that it
- // wants unaligned accesses to be treated as fast by
- // underspecifying alignment to be 1 or 2.
- Alignment <= 2 ||
- // Disregard v2i64. Memcpy lowering produces those and splitting
- // them regresses performance on micro-benchmarks and olden/bh.
- VT == MVT::v2i64;
- }
- return true;
- }
- // Same as above but handling LLTs instead.
- bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
- LLT Ty, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
- unsigned *Fast) const {
- if (Subtarget->requiresStrictAlign())
- return false;
- if (Fast) {
- // Some CPUs are fine with unaligned stores except for 128-bit ones.
- *Fast = !Subtarget->isMisaligned128StoreSlow() ||
- Ty.getSizeInBytes() != 16 ||
- // See comments in performSTORECombine() for more details about
- // these conditions.
- // Code that uses clang vector extensions can mark that it
- // wants unaligned accesses to be treated as fast by
- // underspecifying alignment to be 1 or 2.
- Alignment <= 2 ||
- // Disregard v2i64. Memcpy lowering produces those and splitting
- // them regresses performance on micro-benchmarks and olden/bh.
- Ty == LLT::fixed_vector(2, 64);
- }
- return true;
- }
- FastISel *
- AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
- const TargetLibraryInfo *libInfo) const {
- return AArch64::createFastISel(funcInfo, libInfo);
- }
- const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
- #define MAKE_CASE(V) \
- case V: \
- return #V;
- switch ((AArch64ISD::NodeType)Opcode) {
- case AArch64ISD::FIRST_NUMBER:
- break;
- MAKE_CASE(AArch64ISD::OBSCURE_COPY)
- MAKE_CASE(AArch64ISD::SMSTART)
- MAKE_CASE(AArch64ISD::SMSTOP)
- MAKE_CASE(AArch64ISD::RESTORE_ZA)
- MAKE_CASE(AArch64ISD::CALL)
- MAKE_CASE(AArch64ISD::ADRP)
- MAKE_CASE(AArch64ISD::ADR)
- MAKE_CASE(AArch64ISD::ADDlow)
- MAKE_CASE(AArch64ISD::LOADgot)
- MAKE_CASE(AArch64ISD::RET_FLAG)
- MAKE_CASE(AArch64ISD::BRCOND)
- MAKE_CASE(AArch64ISD::CSEL)
- MAKE_CASE(AArch64ISD::CSINV)
- MAKE_CASE(AArch64ISD::CSNEG)
- MAKE_CASE(AArch64ISD::CSINC)
- MAKE_CASE(AArch64ISD::THREAD_POINTER)
- MAKE_CASE(AArch64ISD::TLSDESC_CALLSEQ)
- MAKE_CASE(AArch64ISD::ABDS_PRED)
- MAKE_CASE(AArch64ISD::ABDU_PRED)
- MAKE_CASE(AArch64ISD::HADDS_PRED)
- MAKE_CASE(AArch64ISD::HADDU_PRED)
- MAKE_CASE(AArch64ISD::MUL_PRED)
- MAKE_CASE(AArch64ISD::MULHS_PRED)
- MAKE_CASE(AArch64ISD::MULHU_PRED)
- MAKE_CASE(AArch64ISD::RHADDS_PRED)
- MAKE_CASE(AArch64ISD::RHADDU_PRED)
- MAKE_CASE(AArch64ISD::SDIV_PRED)
- MAKE_CASE(AArch64ISD::SHL_PRED)
- MAKE_CASE(AArch64ISD::SMAX_PRED)
- MAKE_CASE(AArch64ISD::SMIN_PRED)
- MAKE_CASE(AArch64ISD::SRA_PRED)
- MAKE_CASE(AArch64ISD::SRL_PRED)
- MAKE_CASE(AArch64ISD::UDIV_PRED)
- MAKE_CASE(AArch64ISD::UMAX_PRED)
- MAKE_CASE(AArch64ISD::UMIN_PRED)
- MAKE_CASE(AArch64ISD::SRAD_MERGE_OP1)
- MAKE_CASE(AArch64ISD::FNEG_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FCEIL_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FFLOOR_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FRINT_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FROUND_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FTRUNC_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FP_ROUND_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FCVTZU_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FCVTZS_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FSQRT_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FRECPX_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::FABS_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::ABS_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::NEG_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::SETCC_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::ADC)
- MAKE_CASE(AArch64ISD::SBC)
- MAKE_CASE(AArch64ISD::ADDS)
- MAKE_CASE(AArch64ISD::SUBS)
- MAKE_CASE(AArch64ISD::ADCS)
- MAKE_CASE(AArch64ISD::SBCS)
- MAKE_CASE(AArch64ISD::ANDS)
- MAKE_CASE(AArch64ISD::CCMP)
- MAKE_CASE(AArch64ISD::CCMN)
- MAKE_CASE(AArch64ISD::FCCMP)
- MAKE_CASE(AArch64ISD::FCMP)
- MAKE_CASE(AArch64ISD::STRICT_FCMP)
- MAKE_CASE(AArch64ISD::STRICT_FCMPE)
- MAKE_CASE(AArch64ISD::DUP)
- MAKE_CASE(AArch64ISD::DUPLANE8)
- MAKE_CASE(AArch64ISD::DUPLANE16)
- MAKE_CASE(AArch64ISD::DUPLANE32)
- MAKE_CASE(AArch64ISD::DUPLANE64)
- MAKE_CASE(AArch64ISD::DUPLANE128)
- MAKE_CASE(AArch64ISD::MOVI)
- MAKE_CASE(AArch64ISD::MOVIshift)
- MAKE_CASE(AArch64ISD::MOVIedit)
- MAKE_CASE(AArch64ISD::MOVImsl)
- MAKE_CASE(AArch64ISD::FMOV)
- MAKE_CASE(AArch64ISD::MVNIshift)
- MAKE_CASE(AArch64ISD::MVNImsl)
- MAKE_CASE(AArch64ISD::BICi)
- MAKE_CASE(AArch64ISD::ORRi)
- MAKE_CASE(AArch64ISD::BSP)
- MAKE_CASE(AArch64ISD::EXTR)
- MAKE_CASE(AArch64ISD::ZIP1)
- MAKE_CASE(AArch64ISD::ZIP2)
- MAKE_CASE(AArch64ISD::UZP1)
- MAKE_CASE(AArch64ISD::UZP2)
- MAKE_CASE(AArch64ISD::TRN1)
- MAKE_CASE(AArch64ISD::TRN2)
- MAKE_CASE(AArch64ISD::REV16)
- MAKE_CASE(AArch64ISD::REV32)
- MAKE_CASE(AArch64ISD::REV64)
- MAKE_CASE(AArch64ISD::EXT)
- MAKE_CASE(AArch64ISD::SPLICE)
- MAKE_CASE(AArch64ISD::VSHL)
- MAKE_CASE(AArch64ISD::VLSHR)
- MAKE_CASE(AArch64ISD::VASHR)
- MAKE_CASE(AArch64ISD::VSLI)
- MAKE_CASE(AArch64ISD::VSRI)
- MAKE_CASE(AArch64ISD::CMEQ)
- MAKE_CASE(AArch64ISD::CMGE)
- MAKE_CASE(AArch64ISD::CMGT)
- MAKE_CASE(AArch64ISD::CMHI)
- MAKE_CASE(AArch64ISD::CMHS)
- MAKE_CASE(AArch64ISD::FCMEQ)
- MAKE_CASE(AArch64ISD::FCMGE)
- MAKE_CASE(AArch64ISD::FCMGT)
- MAKE_CASE(AArch64ISD::CMEQz)
- MAKE_CASE(AArch64ISD::CMGEz)
- MAKE_CASE(AArch64ISD::CMGTz)
- MAKE_CASE(AArch64ISD::CMLEz)
- MAKE_CASE(AArch64ISD::CMLTz)
- MAKE_CASE(AArch64ISD::FCMEQz)
- MAKE_CASE(AArch64ISD::FCMGEz)
- MAKE_CASE(AArch64ISD::FCMGTz)
- MAKE_CASE(AArch64ISD::FCMLEz)
- MAKE_CASE(AArch64ISD::FCMLTz)
- MAKE_CASE(AArch64ISD::SADDV)
- MAKE_CASE(AArch64ISD::UADDV)
- MAKE_CASE(AArch64ISD::SDOT)
- MAKE_CASE(AArch64ISD::UDOT)
- MAKE_CASE(AArch64ISD::SMINV)
- MAKE_CASE(AArch64ISD::UMINV)
- MAKE_CASE(AArch64ISD::SMAXV)
- MAKE_CASE(AArch64ISD::UMAXV)
- MAKE_CASE(AArch64ISD::SADDV_PRED)
- MAKE_CASE(AArch64ISD::UADDV_PRED)
- MAKE_CASE(AArch64ISD::SMAXV_PRED)
- MAKE_CASE(AArch64ISD::UMAXV_PRED)
- MAKE_CASE(AArch64ISD::SMINV_PRED)
- MAKE_CASE(AArch64ISD::UMINV_PRED)
- MAKE_CASE(AArch64ISD::ORV_PRED)
- MAKE_CASE(AArch64ISD::EORV_PRED)
- MAKE_CASE(AArch64ISD::ANDV_PRED)
- MAKE_CASE(AArch64ISD::CLASTA_N)
- MAKE_CASE(AArch64ISD::CLASTB_N)
- MAKE_CASE(AArch64ISD::LASTA)
- MAKE_CASE(AArch64ISD::LASTB)
- MAKE_CASE(AArch64ISD::REINTERPRET_CAST)
- MAKE_CASE(AArch64ISD::LS64_BUILD)
- MAKE_CASE(AArch64ISD::LS64_EXTRACT)
- MAKE_CASE(AArch64ISD::TBL)
- MAKE_CASE(AArch64ISD::FADD_PRED)
- MAKE_CASE(AArch64ISD::FADDA_PRED)
- MAKE_CASE(AArch64ISD::FADDV_PRED)
- MAKE_CASE(AArch64ISD::FDIV_PRED)
- MAKE_CASE(AArch64ISD::FMA_PRED)
- MAKE_CASE(AArch64ISD::FMAX_PRED)
- MAKE_CASE(AArch64ISD::FMAXV_PRED)
- MAKE_CASE(AArch64ISD::FMAXNM_PRED)
- MAKE_CASE(AArch64ISD::FMAXNMV_PRED)
- MAKE_CASE(AArch64ISD::FMIN_PRED)
- MAKE_CASE(AArch64ISD::FMINV_PRED)
- MAKE_CASE(AArch64ISD::FMINNM_PRED)
- MAKE_CASE(AArch64ISD::FMINNMV_PRED)
- MAKE_CASE(AArch64ISD::FMUL_PRED)
- MAKE_CASE(AArch64ISD::FSUB_PRED)
- MAKE_CASE(AArch64ISD::RDSVL)
- MAKE_CASE(AArch64ISD::BIC)
- MAKE_CASE(AArch64ISD::BIT)
- MAKE_CASE(AArch64ISD::CBZ)
- MAKE_CASE(AArch64ISD::CBNZ)
- MAKE_CASE(AArch64ISD::TBZ)
- MAKE_CASE(AArch64ISD::TBNZ)
- MAKE_CASE(AArch64ISD::TC_RETURN)
- MAKE_CASE(AArch64ISD::PREFETCH)
- MAKE_CASE(AArch64ISD::SITOF)
- MAKE_CASE(AArch64ISD::UITOF)
- MAKE_CASE(AArch64ISD::NVCAST)
- MAKE_CASE(AArch64ISD::MRS)
- MAKE_CASE(AArch64ISD::SQSHL_I)
- MAKE_CASE(AArch64ISD::UQSHL_I)
- MAKE_CASE(AArch64ISD::SRSHR_I)
- MAKE_CASE(AArch64ISD::URSHR_I)
- MAKE_CASE(AArch64ISD::SQSHLU_I)
- MAKE_CASE(AArch64ISD::WrapperLarge)
- MAKE_CASE(AArch64ISD::LD2post)
- MAKE_CASE(AArch64ISD::LD3post)
- MAKE_CASE(AArch64ISD::LD4post)
- MAKE_CASE(AArch64ISD::ST2post)
- MAKE_CASE(AArch64ISD::ST3post)
- MAKE_CASE(AArch64ISD::ST4post)
- MAKE_CASE(AArch64ISD::LD1x2post)
- MAKE_CASE(AArch64ISD::LD1x3post)
- MAKE_CASE(AArch64ISD::LD1x4post)
- MAKE_CASE(AArch64ISD::ST1x2post)
- MAKE_CASE(AArch64ISD::ST1x3post)
- MAKE_CASE(AArch64ISD::ST1x4post)
- MAKE_CASE(AArch64ISD::LD1DUPpost)
- MAKE_CASE(AArch64ISD::LD2DUPpost)
- MAKE_CASE(AArch64ISD::LD3DUPpost)
- MAKE_CASE(AArch64ISD::LD4DUPpost)
- MAKE_CASE(AArch64ISD::LD1LANEpost)
- MAKE_CASE(AArch64ISD::LD2LANEpost)
- MAKE_CASE(AArch64ISD::LD3LANEpost)
- MAKE_CASE(AArch64ISD::LD4LANEpost)
- MAKE_CASE(AArch64ISD::ST2LANEpost)
- MAKE_CASE(AArch64ISD::ST3LANEpost)
- MAKE_CASE(AArch64ISD::ST4LANEpost)
- MAKE_CASE(AArch64ISD::SMULL)
- MAKE_CASE(AArch64ISD::UMULL)
- MAKE_CASE(AArch64ISD::PMULL)
- MAKE_CASE(AArch64ISD::FRECPE)
- MAKE_CASE(AArch64ISD::FRECPS)
- MAKE_CASE(AArch64ISD::FRSQRTE)
- MAKE_CASE(AArch64ISD::FRSQRTS)
- MAKE_CASE(AArch64ISD::STG)
- MAKE_CASE(AArch64ISD::STZG)
- MAKE_CASE(AArch64ISD::ST2G)
- MAKE_CASE(AArch64ISD::STZ2G)
- MAKE_CASE(AArch64ISD::SUNPKHI)
- MAKE_CASE(AArch64ISD::SUNPKLO)
- MAKE_CASE(AArch64ISD::UUNPKHI)
- MAKE_CASE(AArch64ISD::UUNPKLO)
- MAKE_CASE(AArch64ISD::INSR)
- MAKE_CASE(AArch64ISD::PTEST)
- MAKE_CASE(AArch64ISD::PTEST_ANY)
- MAKE_CASE(AArch64ISD::PTRUE)
- MAKE_CASE(AArch64ISD::LD1_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::LD1S_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::LDNF1_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::LDNF1S_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::LDFF1_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::LDFF1S_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::LD1RQ_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::LD1RO_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::SVE_LD2_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::SVE_LD3_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::SVE_LD4_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1_SCALED_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1_SXTW_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1_UXTW_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1_IMM_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1S_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1S_SCALED_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1S_SXTW_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1S_UXTW_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLD1S_IMM_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1_SCALED_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1_SXTW_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1_UXTW_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1_IMM_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1S_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDFF1S_IMM_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDNT1_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDNT1_INDEX_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::GLDNT1S_MERGE_ZERO)
- MAKE_CASE(AArch64ISD::ST1_PRED)
- MAKE_CASE(AArch64ISD::SST1_PRED)
- MAKE_CASE(AArch64ISD::SST1_SCALED_PRED)
- MAKE_CASE(AArch64ISD::SST1_SXTW_PRED)
- MAKE_CASE(AArch64ISD::SST1_UXTW_PRED)
- MAKE_CASE(AArch64ISD::SST1_SXTW_SCALED_PRED)
- MAKE_CASE(AArch64ISD::SST1_UXTW_SCALED_PRED)
- MAKE_CASE(AArch64ISD::SST1_IMM_PRED)
- MAKE_CASE(AArch64ISD::SSTNT1_PRED)
- MAKE_CASE(AArch64ISD::SSTNT1_INDEX_PRED)
- MAKE_CASE(AArch64ISD::LDP)
- MAKE_CASE(AArch64ISD::LDNP)
- MAKE_CASE(AArch64ISD::STP)
- MAKE_CASE(AArch64ISD::STNP)
- MAKE_CASE(AArch64ISD::BITREVERSE_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::BSWAP_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::REVH_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::REVW_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::REVD_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::CTLZ_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::CTPOP_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::DUP_MERGE_PASSTHRU)
- MAKE_CASE(AArch64ISD::INDEX_VECTOR)
- MAKE_CASE(AArch64ISD::ADDP)
- MAKE_CASE(AArch64ISD::SADDLP)
- MAKE_CASE(AArch64ISD::UADDLP)
- MAKE_CASE(AArch64ISD::CALL_RVMARKER)
- MAKE_CASE(AArch64ISD::ASSERT_ZEXT_BOOL)
- MAKE_CASE(AArch64ISD::MOPS_MEMSET)
- MAKE_CASE(AArch64ISD::MOPS_MEMSET_TAGGING)
- MAKE_CASE(AArch64ISD::MOPS_MEMCOPY)
- MAKE_CASE(AArch64ISD::MOPS_MEMMOVE)
- MAKE_CASE(AArch64ISD::CALL_BTI)
- MAKE_CASE(AArch64ISD::MRRS)
- MAKE_CASE(AArch64ISD::MSRR)
- }
- #undef MAKE_CASE
- return nullptr;
- }
- MachineBasicBlock *
- AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI,
- MachineBasicBlock *MBB) const {
- // We materialise the F128CSEL pseudo-instruction as some control flow and a
- // phi node:
- // OrigBB:
- // [... previous instrs leading to comparison ...]
- // b.ne TrueBB
- // b EndBB
- // TrueBB:
- // ; Fallthrough
- // EndBB:
- // Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB]
- MachineFunction *MF = MBB->getParent();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- const BasicBlock *LLVM_BB = MBB->getBasicBlock();
- DebugLoc DL = MI.getDebugLoc();
- MachineFunction::iterator It = ++MBB->getIterator();
- Register DestReg = MI.getOperand(0).getReg();
- Register IfTrueReg = MI.getOperand(1).getReg();
- Register IfFalseReg = MI.getOperand(2).getReg();
- unsigned CondCode = MI.getOperand(3).getImm();
- bool NZCVKilled = MI.getOperand(4).isKill();
- MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MF->insert(It, TrueBB);
- MF->insert(It, EndBB);
- // Transfer rest of current basic-block to EndBB
- EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)),
- MBB->end());
- EndBB->transferSuccessorsAndUpdatePHIs(MBB);
- BuildMI(MBB, DL, TII->get(AArch64::Bcc)).addImm(CondCode).addMBB(TrueBB);
- BuildMI(MBB, DL, TII->get(AArch64::B)).addMBB(EndBB);
- MBB->addSuccessor(TrueBB);
- MBB->addSuccessor(EndBB);
- // TrueBB falls through to the end.
- TrueBB->addSuccessor(EndBB);
- if (!NZCVKilled) {
- TrueBB->addLiveIn(AArch64::NZCV);
- EndBB->addLiveIn(AArch64::NZCV);
- }
- BuildMI(*EndBB, EndBB->begin(), DL, TII->get(AArch64::PHI), DestReg)
- .addReg(IfTrueReg)
- .addMBB(TrueBB)
- .addReg(IfFalseReg)
- .addMBB(MBB);
- MI.eraseFromParent();
- return EndBB;
- }
- MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchRet(
- MachineInstr &MI, MachineBasicBlock *BB) const {
- assert(!isAsynchronousEHPersonality(classifyEHPersonality(
- BB->getParent()->getFunction().getPersonalityFn())) &&
- "SEH does not use catchret!");
- return BB;
- }
- MachineBasicBlock *
- AArch64TargetLowering::EmitTileLoad(unsigned Opc, unsigned BaseReg,
- MachineInstr &MI,
- MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
- MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
- MIB.add(MI.getOperand(1)); // slice index register
- MIB.add(MI.getOperand(2)); // slice index offset
- MIB.add(MI.getOperand(3)); // pg
- MIB.add(MI.getOperand(4)); // base
- MIB.add(MI.getOperand(5)); // offset
- MI.eraseFromParent(); // The pseudo is gone now.
- return BB;
- }
- MachineBasicBlock *
- AArch64TargetLowering::EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- MachineInstrBuilder MIB =
- BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(AArch64::LDR_ZA));
- MIB.addReg(AArch64::ZA, RegState::Define);
- MIB.add(MI.getOperand(0)); // Vector select register
- MIB.add(MI.getOperand(1)); // Vector select offset
- MIB.add(MI.getOperand(2)); // Base
- MIB.add(MI.getOperand(1)); // Offset, same as vector select offset
- MI.eraseFromParent(); // The pseudo is gone now.
- return BB;
- }
- MachineBasicBlock *
- AArch64TargetLowering::EmitZAInstr(unsigned Opc, unsigned BaseReg,
- MachineInstr &MI,
- MachineBasicBlock *BB, bool HasTile) const {
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
- unsigned StartIdx = 0;
- if (HasTile) {
- MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
- MIB.addReg(BaseReg + MI.getOperand(0).getImm());
- StartIdx = 1;
- } else
- MIB.addReg(BaseReg, RegState::Define).addReg(BaseReg);
- for (unsigned I = StartIdx; I < MI.getNumOperands(); ++I)
- MIB.add(MI.getOperand(I));
- MI.eraseFromParent(); // The pseudo is gone now.
- return BB;
- }
- MachineBasicBlock *
- AArch64TargetLowering::EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- MachineInstrBuilder MIB =
- BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(AArch64::ZERO_M));
- MIB.add(MI.getOperand(0)); // Mask
- unsigned Mask = MI.getOperand(0).getImm();
- for (unsigned I = 0; I < 8; I++) {
- if (Mask & (1 << I))
- MIB.addDef(AArch64::ZAD0 + I, RegState::ImplicitDefine);
- }
- MI.eraseFromParent(); // The pseudo is gone now.
- return BB;
- }
- MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
- MachineInstr &MI, MachineBasicBlock *BB) const {
- int SMEOrigInstr = AArch64::getSMEPseudoMap(MI.getOpcode());
- if (SMEOrigInstr != -1) {
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- uint64_t SMEMatrixType =
- TII->get(MI.getOpcode()).TSFlags & AArch64::SMEMatrixTypeMask;
- switch (SMEMatrixType) {
- case (AArch64::SMEMatrixArray):
- return EmitZAInstr(SMEOrigInstr, AArch64::ZA, MI, BB, /*HasTile*/ false);
- case (AArch64::SMEMatrixTileB):
- return EmitZAInstr(SMEOrigInstr, AArch64::ZAB0, MI, BB, /*HasTile*/ true);
- case (AArch64::SMEMatrixTileH):
- return EmitZAInstr(SMEOrigInstr, AArch64::ZAH0, MI, BB, /*HasTile*/ true);
- case (AArch64::SMEMatrixTileS):
- return EmitZAInstr(SMEOrigInstr, AArch64::ZAS0, MI, BB, /*HasTile*/ true);
- case (AArch64::SMEMatrixTileD):
- return EmitZAInstr(SMEOrigInstr, AArch64::ZAD0, MI, BB, /*HasTile*/ true);
- case (AArch64::SMEMatrixTileQ):
- return EmitZAInstr(SMEOrigInstr, AArch64::ZAQ0, MI, BB, /*HasTile*/ true);
- }
- }
- switch (MI.getOpcode()) {
- default:
- #ifndef NDEBUG
- MI.dump();
- #endif
- llvm_unreachable("Unexpected instruction for custom inserter!");
- case AArch64::F128CSEL:
- return EmitF128CSEL(MI, BB);
- case TargetOpcode::STATEPOINT:
- // STATEPOINT is a pseudo instruction which has no implicit defs/uses
- // while bl call instruction (where statepoint will be lowered at the end)
- // has implicit def. This def is early-clobber as it will be set at
- // the moment of the call and earlier than any use is read.
- // Add this implicit dead def here as a workaround.
- MI.addOperand(*MI.getMF(),
- MachineOperand::CreateReg(
- AArch64::LR, /*isDef*/ true,
- /*isImp*/ true, /*isKill*/ false, /*isDead*/ true,
- /*isUndef*/ false, /*isEarlyClobber*/ true));
- [[fallthrough]];
- case TargetOpcode::STACKMAP:
- case TargetOpcode::PATCHPOINT:
- return emitPatchPoint(MI, BB);
- case AArch64::CATCHRET:
- return EmitLoweredCatchRet(MI, BB);
- case AArch64::LD1_MXIPXX_H_PSEUDO_B:
- return EmitTileLoad(AArch64::LD1_MXIPXX_H_B, AArch64::ZAB0, MI, BB);
- case AArch64::LD1_MXIPXX_H_PSEUDO_H:
- return EmitTileLoad(AArch64::LD1_MXIPXX_H_H, AArch64::ZAH0, MI, BB);
- case AArch64::LD1_MXIPXX_H_PSEUDO_S:
- return EmitTileLoad(AArch64::LD1_MXIPXX_H_S, AArch64::ZAS0, MI, BB);
- case AArch64::LD1_MXIPXX_H_PSEUDO_D:
- return EmitTileLoad(AArch64::LD1_MXIPXX_H_D, AArch64::ZAD0, MI, BB);
- case AArch64::LD1_MXIPXX_H_PSEUDO_Q:
- return EmitTileLoad(AArch64::LD1_MXIPXX_H_Q, AArch64::ZAQ0, MI, BB);
- case AArch64::LD1_MXIPXX_V_PSEUDO_B:
- return EmitTileLoad(AArch64::LD1_MXIPXX_V_B, AArch64::ZAB0, MI, BB);
- case AArch64::LD1_MXIPXX_V_PSEUDO_H:
- return EmitTileLoad(AArch64::LD1_MXIPXX_V_H, AArch64::ZAH0, MI, BB);
- case AArch64::LD1_MXIPXX_V_PSEUDO_S:
- return EmitTileLoad(AArch64::LD1_MXIPXX_V_S, AArch64::ZAS0, MI, BB);
- case AArch64::LD1_MXIPXX_V_PSEUDO_D:
- return EmitTileLoad(AArch64::LD1_MXIPXX_V_D, AArch64::ZAD0, MI, BB);
- case AArch64::LD1_MXIPXX_V_PSEUDO_Q:
- return EmitTileLoad(AArch64::LD1_MXIPXX_V_Q, AArch64::ZAQ0, MI, BB);
- case AArch64::LDR_ZA_PSEUDO:
- return EmitFill(MI, BB);
- case AArch64::ZERO_M_PSEUDO:
- return EmitZero(MI, BB);
- }
- }
- //===----------------------------------------------------------------------===//
- // AArch64 Lowering private implementation.
- //===----------------------------------------------------------------------===//
- //===----------------------------------------------------------------------===//
- // Lowering Code
- //===----------------------------------------------------------------------===//
- // Forward declarations of SVE fixed length lowering helpers
- static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT);
- static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
- static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
- static SDValue convertFixedMaskToScalableVector(SDValue Mask,
- SelectionDAG &DAG);
- static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
- EVT VT);
- /// isZerosVector - Check whether SDNode N is a zero-filled vector.
- static bool isZerosVector(const SDNode *N) {
- // Look through a bit convert.
- while (N->getOpcode() == ISD::BITCAST)
- N = N->getOperand(0).getNode();
- if (ISD::isConstantSplatVectorAllZeros(N))
- return true;
- if (N->getOpcode() != AArch64ISD::DUP)
- return false;
- auto Opnd0 = N->getOperand(0);
- return isNullConstant(Opnd0) || isNullFPConstant(Opnd0);
- }
- /// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64
- /// CC
- static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) {
- switch (CC) {
- default:
- llvm_unreachable("Unknown condition code!");
- case ISD::SETNE:
- return AArch64CC::NE;
- case ISD::SETEQ:
- return AArch64CC::EQ;
- case ISD::SETGT:
- return AArch64CC::GT;
- case ISD::SETGE:
- return AArch64CC::GE;
- case ISD::SETLT:
- return AArch64CC::LT;
- case ISD::SETLE:
- return AArch64CC::LE;
- case ISD::SETUGT:
- return AArch64CC::HI;
- case ISD::SETUGE:
- return AArch64CC::HS;
- case ISD::SETULT:
- return AArch64CC::LO;
- case ISD::SETULE:
- return AArch64CC::LS;
- }
- }
- /// changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC.
- static void changeFPCCToAArch64CC(ISD::CondCode CC,
- AArch64CC::CondCode &CondCode,
- AArch64CC::CondCode &CondCode2) {
- CondCode2 = AArch64CC::AL;
- switch (CC) {
- default:
- llvm_unreachable("Unknown FP condition!");
- case ISD::SETEQ:
- case ISD::SETOEQ:
- CondCode = AArch64CC::EQ;
- break;
- case ISD::SETGT:
- case ISD::SETOGT:
- CondCode = AArch64CC::GT;
- break;
- case ISD::SETGE:
- case ISD::SETOGE:
- CondCode = AArch64CC::GE;
- break;
- case ISD::SETOLT:
- CondCode = AArch64CC::MI;
- break;
- case ISD::SETOLE:
- CondCode = AArch64CC::LS;
- break;
- case ISD::SETONE:
- CondCode = AArch64CC::MI;
- CondCode2 = AArch64CC::GT;
- break;
- case ISD::SETO:
- CondCode = AArch64CC::VC;
- break;
- case ISD::SETUO:
- CondCode = AArch64CC::VS;
- break;
- case ISD::SETUEQ:
- CondCode = AArch64CC::EQ;
- CondCode2 = AArch64CC::VS;
- break;
- case ISD::SETUGT:
- CondCode = AArch64CC::HI;
- break;
- case ISD::SETUGE:
- CondCode = AArch64CC::PL;
- break;
- case ISD::SETLT:
- case ISD::SETULT:
- CondCode = AArch64CC::LT;
- break;
- case ISD::SETLE:
- case ISD::SETULE:
- CondCode = AArch64CC::LE;
- break;
- case ISD::SETNE:
- case ISD::SETUNE:
- CondCode = AArch64CC::NE;
- break;
- }
- }
- /// Convert a DAG fp condition code to an AArch64 CC.
- /// This differs from changeFPCCToAArch64CC in that it returns cond codes that
- /// should be AND'ed instead of OR'ed.
- static void changeFPCCToANDAArch64CC(ISD::CondCode CC,
- AArch64CC::CondCode &CondCode,
- AArch64CC::CondCode &CondCode2) {
- CondCode2 = AArch64CC::AL;
- switch (CC) {
- default:
- changeFPCCToAArch64CC(CC, CondCode, CondCode2);
- assert(CondCode2 == AArch64CC::AL);
- break;
- case ISD::SETONE:
- // (a one b)
- // == ((a olt b) || (a ogt b))
- // == ((a ord b) && (a une b))
- CondCode = AArch64CC::VC;
- CondCode2 = AArch64CC::NE;
- break;
- case ISD::SETUEQ:
- // (a ueq b)
- // == ((a uno b) || (a oeq b))
- // == ((a ule b) && (a uge b))
- CondCode = AArch64CC::PL;
- CondCode2 = AArch64CC::LE;
- break;
- }
- }
- /// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64
- /// CC usable with the vector instructions. Fewer operations are available
- /// without a real NZCV register, so we have to use less efficient combinations
- /// to get the same effect.
- static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
- AArch64CC::CondCode &CondCode,
- AArch64CC::CondCode &CondCode2,
- bool &Invert) {
- Invert = false;
- switch (CC) {
- default:
- // Mostly the scalar mappings work fine.
- changeFPCCToAArch64CC(CC, CondCode, CondCode2);
- break;
- case ISD::SETUO:
- Invert = true;
- [[fallthrough]];
- case ISD::SETO:
- CondCode = AArch64CC::MI;
- CondCode2 = AArch64CC::GE;
- break;
- case ISD::SETUEQ:
- case ISD::SETULT:
- case ISD::SETULE:
- case ISD::SETUGT:
- case ISD::SETUGE:
- // All of the compare-mask comparisons are ordered, but we can switch
- // between the two by a double inversion. E.g. ULE == !OGT.
- Invert = true;
- changeFPCCToAArch64CC(getSetCCInverse(CC, /* FP inverse */ MVT::f32),
- CondCode, CondCode2);
- break;
- }
- }
- static bool isLegalArithImmed(uint64_t C) {
- // Matches AArch64DAGToDAGISel::SelectArithImmed().
- bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0);
- LLVM_DEBUG(dbgs() << "Is imm " << C
- << " legal: " << (IsLegal ? "yes\n" : "no\n"));
- return IsLegal;
- }
- // Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on
- // the grounds that "op1 - (-op2) == op1 + op2" ? Not always, the C and V flags
- // can be set differently by this operation. It comes down to whether
- // "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then
- // everything is fine. If not then the optimization is wrong. Thus general
- // comparisons are only valid if op2 != 0.
- //
- // So, finally, the only LLVM-native comparisons that don't mention C and V
- // are SETEQ and SETNE. They're the only ones we can safely use CMN for in
- // the absence of information about op2.
- static bool isCMN(SDValue Op, ISD::CondCode CC) {
- return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) &&
- (CC == ISD::SETEQ || CC == ISD::SETNE);
- }
- static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
- SelectionDAG &DAG, SDValue Chain,
- bool IsSignaling) {
- EVT VT = LHS.getValueType();
- assert(VT != MVT::f128);
- const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
- if (VT == MVT::f16 && !FullFP16) {
- LHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
- {Chain, LHS});
- RHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
- {LHS.getValue(1), RHS});
- Chain = RHS.getValue(1);
- VT = MVT::f32;
- }
- unsigned Opcode =
- IsSignaling ? AArch64ISD::STRICT_FCMPE : AArch64ISD::STRICT_FCMP;
- return DAG.getNode(Opcode, dl, {VT, MVT::Other}, {Chain, LHS, RHS});
- }
- static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- const SDLoc &dl, SelectionDAG &DAG) {
- EVT VT = LHS.getValueType();
- const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
- if (VT.isFloatingPoint()) {
- assert(VT != MVT::f128);
- if (VT == MVT::f16 && !FullFP16) {
- LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS);
- RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS);
- VT = MVT::f32;
- }
- return DAG.getNode(AArch64ISD::FCMP, dl, VT, LHS, RHS);
- }
- // The CMP instruction is just an alias for SUBS, and representing it as
- // SUBS means that it's possible to get CSE with subtract operations.
- // A later phase can perform the optimization of setting the destination
- // register to WZR/XZR if it ends up being unused.
- unsigned Opcode = AArch64ISD::SUBS;
- if (isCMN(RHS, CC)) {
- // Can we combine a (CMP op1, (sub 0, op2) into a CMN instruction ?
- Opcode = AArch64ISD::ADDS;
- RHS = RHS.getOperand(1);
- } else if (isCMN(LHS, CC)) {
- // As we are looking for EQ/NE compares, the operands can be commuted ; can
- // we combine a (CMP (sub 0, op1), op2) into a CMN instruction ?
- Opcode = AArch64ISD::ADDS;
- LHS = LHS.getOperand(1);
- } else if (isNullConstant(RHS) && !isUnsignedIntSetCC(CC)) {
- if (LHS.getOpcode() == ISD::AND) {
- // Similarly, (CMP (and X, Y), 0) can be implemented with a TST
- // (a.k.a. ANDS) except that the flags are only guaranteed to work for one
- // of the signed comparisons.
- const SDValue ANDSNode = DAG.getNode(AArch64ISD::ANDS, dl,
- DAG.getVTList(VT, MVT_CC),
- LHS.getOperand(0),
- LHS.getOperand(1));
- // Replace all users of (and X, Y) with newly generated (ands X, Y)
- DAG.ReplaceAllUsesWith(LHS, ANDSNode);
- return ANDSNode.getValue(1);
- } else if (LHS.getOpcode() == AArch64ISD::ANDS) {
- // Use result of ANDS
- return LHS.getValue(1);
- }
- }
- return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT_CC), LHS, RHS)
- .getValue(1);
- }
- /// \defgroup AArch64CCMP CMP;CCMP matching
- ///
- /// These functions deal with the formation of CMP;CCMP;... sequences.
- /// The CCMP/CCMN/FCCMP/FCCMPE instructions allow the conditional execution of
- /// a comparison. They set the NZCV flags to a predefined value if their
- /// predicate is false. This allows to express arbitrary conjunctions, for
- /// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B)))"
- /// expressed as:
- /// cmp A
- /// ccmp B, inv(CB), CA
- /// check for CB flags
- ///
- /// This naturally lets us implement chains of AND operations with SETCC
- /// operands. And we can even implement some other situations by transforming
- /// them:
- /// - We can implement (NEG SETCC) i.e. negating a single comparison by
- /// negating the flags used in a CCMP/FCCMP operations.
- /// - We can negate the result of a whole chain of CMP/CCMP/FCCMP operations
- /// by negating the flags we test for afterwards. i.e.
- /// NEG (CMP CCMP CCCMP ...) can be implemented.
- /// - Note that we can only ever negate all previously processed results.
- /// What we can not implement by flipping the flags to test is a negation
- /// of two sub-trees (because the negation affects all sub-trees emitted so
- /// far, so the 2nd sub-tree we emit would also affect the first).
- /// With those tools we can implement some OR operations:
- /// - (OR (SETCC A) (SETCC B)) can be implemented via:
- /// NEG (AND (NEG (SETCC A)) (NEG (SETCC B)))
- /// - After transforming OR to NEG/AND combinations we may be able to use NEG
- /// elimination rules from earlier to implement the whole thing as a
- /// CCMP/FCCMP chain.
- ///
- /// As complete example:
- /// or (or (setCA (cmp A)) (setCB (cmp B)))
- /// (and (setCC (cmp C)) (setCD (cmp D)))"
- /// can be reassociated to:
- /// or (and (setCC (cmp C)) setCD (cmp D))
- // (or (setCA (cmp A)) (setCB (cmp B)))
- /// can be transformed to:
- /// not (and (not (and (setCC (cmp C)) (setCD (cmp D))))
- /// (and (not (setCA (cmp A)) (not (setCB (cmp B))))))"
- /// which can be implemented as:
- /// cmp C
- /// ccmp D, inv(CD), CC
- /// ccmp A, CA, inv(CD)
- /// ccmp B, CB, inv(CA)
- /// check for CB flags
- ///
- /// A counterexample is "or (and A B) (and C D)" which translates to
- /// not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we
- /// can only implement 1 of the inner (not) operations, but not both!
- /// @{
- /// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate.
- static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
- ISD::CondCode CC, SDValue CCOp,
- AArch64CC::CondCode Predicate,
- AArch64CC::CondCode OutCC,
- const SDLoc &DL, SelectionDAG &DAG) {
- unsigned Opcode = 0;
- const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
- if (LHS.getValueType().isFloatingPoint()) {
- assert(LHS.getValueType() != MVT::f128);
- if (LHS.getValueType() == MVT::f16 && !FullFP16) {
- LHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, LHS);
- RHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, RHS);
- }
- Opcode = AArch64ISD::FCCMP;
- } else if (RHS.getOpcode() == ISD::SUB) {
- SDValue SubOp0 = RHS.getOperand(0);
- if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
- // See emitComparison() on why we can only do this for SETEQ and SETNE.
- Opcode = AArch64ISD::CCMN;
- RHS = RHS.getOperand(1);
- }
- }
- if (Opcode == 0)
- Opcode = AArch64ISD::CCMP;
- SDValue Condition = DAG.getConstant(Predicate, DL, MVT_CC);
- AArch64CC::CondCode InvOutCC = AArch64CC::getInvertedCondCode(OutCC);
- unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvOutCC);
- SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
- return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp);
- }
- /// Returns true if @p Val is a tree of AND/OR/SETCC operations that can be
- /// expressed as a conjunction. See \ref AArch64CCMP.
- /// \param CanNegate Set to true if we can negate the whole sub-tree just by
- /// changing the conditions on the SETCC tests.
- /// (this means we can call emitConjunctionRec() with
- /// Negate==true on this sub-tree)
- /// \param MustBeFirst Set to true if this subtree needs to be negated and we
- /// cannot do the negation naturally. We are required to
- /// emit the subtree first in this case.
- /// \param WillNegate Is true if are called when the result of this
- /// subexpression must be negated. This happens when the
- /// outer expression is an OR. We can use this fact to know
- /// that we have a double negation (or (or ...) ...) that
- /// can be implemented for free.
- static bool canEmitConjunction(const SDValue Val, bool &CanNegate,
- bool &MustBeFirst, bool WillNegate,
- unsigned Depth = 0) {
- if (!Val.hasOneUse())
- return false;
- unsigned Opcode = Val->getOpcode();
- if (Opcode == ISD::SETCC) {
- if (Val->getOperand(0).getValueType() == MVT::f128)
- return false;
- CanNegate = true;
- MustBeFirst = false;
- return true;
- }
- // Protect against exponential runtime and stack overflow.
- if (Depth > 6)
- return false;
- if (Opcode == ISD::AND || Opcode == ISD::OR) {
- bool IsOR = Opcode == ISD::OR;
- SDValue O0 = Val->getOperand(0);
- SDValue O1 = Val->getOperand(1);
- bool CanNegateL;
- bool MustBeFirstL;
- if (!canEmitConjunction(O0, CanNegateL, MustBeFirstL, IsOR, Depth+1))
- return false;
- bool CanNegateR;
- bool MustBeFirstR;
- if (!canEmitConjunction(O1, CanNegateR, MustBeFirstR, IsOR, Depth+1))
- return false;
- if (MustBeFirstL && MustBeFirstR)
- return false;
- if (IsOR) {
- // For an OR expression we need to be able to naturally negate at least
- // one side or we cannot do the transformation at all.
- if (!CanNegateL && !CanNegateR)
- return false;
- // If we the result of the OR will be negated and we can naturally negate
- // the leafs, then this sub-tree as a whole negates naturally.
- CanNegate = WillNegate && CanNegateL && CanNegateR;
- // If we cannot naturally negate the whole sub-tree, then this must be
- // emitted first.
- MustBeFirst = !CanNegate;
- } else {
- assert(Opcode == ISD::AND && "Must be OR or AND");
- // We cannot naturally negate an AND operation.
- CanNegate = false;
- MustBeFirst = MustBeFirstL || MustBeFirstR;
- }
- return true;
- }
- return false;
- }
- /// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain
- /// of CCMP/CFCMP ops. See @ref AArch64CCMP.
- /// Tries to transform the given i1 producing node @p Val to a series compare
- /// and conditional compare operations. @returns an NZCV flags producing node
- /// and sets @p OutCC to the flags that should be tested or returns SDValue() if
- /// transformation was not possible.
- /// \p Negate is true if we want this sub-tree being negated just by changing
- /// SETCC conditions.
- static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val,
- AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp,
- AArch64CC::CondCode Predicate) {
- // We're at a tree leaf, produce a conditional comparison operation.
- unsigned Opcode = Val->getOpcode();
- if (Opcode == ISD::SETCC) {
- SDValue LHS = Val->getOperand(0);
- SDValue RHS = Val->getOperand(1);
- ISD::CondCode CC = cast<CondCodeSDNode>(Val->getOperand(2))->get();
- bool isInteger = LHS.getValueType().isInteger();
- if (Negate)
- CC = getSetCCInverse(CC, LHS.getValueType());
- SDLoc DL(Val);
- // Determine OutCC and handle FP special case.
- if (isInteger) {
- OutCC = changeIntCCToAArch64CC(CC);
- } else {
- assert(LHS.getValueType().isFloatingPoint());
- AArch64CC::CondCode ExtraCC;
- changeFPCCToANDAArch64CC(CC, OutCC, ExtraCC);
- // Some floating point conditions can't be tested with a single condition
- // code. Construct an additional comparison in this case.
- if (ExtraCC != AArch64CC::AL) {
- SDValue ExtraCmp;
- if (!CCOp.getNode())
- ExtraCmp = emitComparison(LHS, RHS, CC, DL, DAG);
- else
- ExtraCmp = emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate,
- ExtraCC, DL, DAG);
- CCOp = ExtraCmp;
- Predicate = ExtraCC;
- }
- }
- // Produce a normal comparison if we are first in the chain
- if (!CCOp)
- return emitComparison(LHS, RHS, CC, DL, DAG);
- // Otherwise produce a ccmp.
- return emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, OutCC, DL,
- DAG);
- }
- assert(Val->hasOneUse() && "Valid conjunction/disjunction tree");
- bool IsOR = Opcode == ISD::OR;
- SDValue LHS = Val->getOperand(0);
- bool CanNegateL;
- bool MustBeFirstL;
- bool ValidL = canEmitConjunction(LHS, CanNegateL, MustBeFirstL, IsOR);
- assert(ValidL && "Valid conjunction/disjunction tree");
- (void)ValidL;
- SDValue RHS = Val->getOperand(1);
- bool CanNegateR;
- bool MustBeFirstR;
- bool ValidR = canEmitConjunction(RHS, CanNegateR, MustBeFirstR, IsOR);
- assert(ValidR && "Valid conjunction/disjunction tree");
- (void)ValidR;
- // Swap sub-tree that must come first to the right side.
- if (MustBeFirstL) {
- assert(!MustBeFirstR && "Valid conjunction/disjunction tree");
- std::swap(LHS, RHS);
- std::swap(CanNegateL, CanNegateR);
- std::swap(MustBeFirstL, MustBeFirstR);
- }
- bool NegateR;
- bool NegateAfterR;
- bool NegateL;
- bool NegateAfterAll;
- if (Opcode == ISD::OR) {
- // Swap the sub-tree that we can negate naturally to the left.
- if (!CanNegateL) {
- assert(CanNegateR && "at least one side must be negatable");
- assert(!MustBeFirstR && "invalid conjunction/disjunction tree");
- assert(!Negate);
- std::swap(LHS, RHS);
- NegateR = false;
- NegateAfterR = true;
- } else {
- // Negate the left sub-tree if possible, otherwise negate the result.
- NegateR = CanNegateR;
- NegateAfterR = !CanNegateR;
- }
- NegateL = true;
- NegateAfterAll = !Negate;
- } else {
- assert(Opcode == ISD::AND && "Valid conjunction/disjunction tree");
- assert(!Negate && "Valid conjunction/disjunction tree");
- NegateL = false;
- NegateR = false;
- NegateAfterR = false;
- NegateAfterAll = false;
- }
- // Emit sub-trees.
- AArch64CC::CondCode RHSCC;
- SDValue CmpR = emitConjunctionRec(DAG, RHS, RHSCC, NegateR, CCOp, Predicate);
- if (NegateAfterR)
- RHSCC = AArch64CC::getInvertedCondCode(RHSCC);
- SDValue CmpL = emitConjunctionRec(DAG, LHS, OutCC, NegateL, CmpR, RHSCC);
- if (NegateAfterAll)
- OutCC = AArch64CC::getInvertedCondCode(OutCC);
- return CmpL;
- }
- /// Emit expression as a conjunction (a series of CCMP/CFCMP ops).
- /// In some cases this is even possible with OR operations in the expression.
- /// See \ref AArch64CCMP.
- /// \see emitConjunctionRec().
- static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val,
- AArch64CC::CondCode &OutCC) {
- bool DummyCanNegate;
- bool DummyMustBeFirst;
- if (!canEmitConjunction(Val, DummyCanNegate, DummyMustBeFirst, false))
- return SDValue();
- return emitConjunctionRec(DAG, Val, OutCC, false, SDValue(), AArch64CC::AL);
- }
- /// @}
- /// Returns how profitable it is to fold a comparison's operand's shift and/or
- /// extension operations.
- static unsigned getCmpOperandFoldingProfit(SDValue Op) {
- auto isSupportedExtend = [&](SDValue V) {
- if (V.getOpcode() == ISD::SIGN_EXTEND_INREG)
- return true;
- if (V.getOpcode() == ISD::AND)
- if (ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
- uint64_t Mask = MaskCst->getZExtValue();
- return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
- }
- return false;
- };
- if (!Op.hasOneUse())
- return 0;
- if (isSupportedExtend(Op))
- return 1;
- unsigned Opc = Op.getOpcode();
- if (Opc == ISD::SHL || Opc == ISD::SRL || Opc == ISD::SRA)
- if (ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
- uint64_t Shift = ShiftCst->getZExtValue();
- if (isSupportedExtend(Op.getOperand(0)))
- return (Shift <= 4) ? 2 : 1;
- EVT VT = Op.getValueType();
- if ((VT == MVT::i32 && Shift <= 31) || (VT == MVT::i64 && Shift <= 63))
- return 1;
- }
- return 0;
- }
- static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &AArch64cc, SelectionDAG &DAG,
- const SDLoc &dl) {
- if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
- EVT VT = RHS.getValueType();
- uint64_t C = RHSC->getZExtValue();
- if (!isLegalArithImmed(C)) {
- // Constant does not fit, try adjusting it by one?
- switch (CC) {
- default:
- break;
- case ISD::SETLT:
- case ISD::SETGE:
- if ((VT == MVT::i32 && C != 0x80000000 &&
- isLegalArithImmed((uint32_t)(C - 1))) ||
- (VT == MVT::i64 && C != 0x80000000ULL &&
- isLegalArithImmed(C - 1ULL))) {
- CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
- C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
- RHS = DAG.getConstant(C, dl, VT);
- }
- break;
- case ISD::SETULT:
- case ISD::SETUGE:
- if ((VT == MVT::i32 && C != 0 &&
- isLegalArithImmed((uint32_t)(C - 1))) ||
- (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) {
- CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
- C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
- RHS = DAG.getConstant(C, dl, VT);
- }
- break;
- case ISD::SETLE:
- case ISD::SETGT:
- if ((VT == MVT::i32 && C != INT32_MAX &&
- isLegalArithImmed((uint32_t)(C + 1))) ||
- (VT == MVT::i64 && C != INT64_MAX &&
- isLegalArithImmed(C + 1ULL))) {
- CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
- C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
- RHS = DAG.getConstant(C, dl, VT);
- }
- break;
- case ISD::SETULE:
- case ISD::SETUGT:
- if ((VT == MVT::i32 && C != UINT32_MAX &&
- isLegalArithImmed((uint32_t)(C + 1))) ||
- (VT == MVT::i64 && C != UINT64_MAX &&
- isLegalArithImmed(C + 1ULL))) {
- CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
- C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
- RHS = DAG.getConstant(C, dl, VT);
- }
- break;
- }
- }
- }
- // Comparisons are canonicalized so that the RHS operand is simpler than the
- // LHS one, the extreme case being when RHS is an immediate. However, AArch64
- // can fold some shift+extend operations on the RHS operand, so swap the
- // operands if that can be done.
- //
- // For example:
- // lsl w13, w11, #1
- // cmp w13, w12
- // can be turned into:
- // cmp w12, w11, lsl #1
- if (!isa<ConstantSDNode>(RHS) ||
- !isLegalArithImmed(cast<ConstantSDNode>(RHS)->getZExtValue())) {
- SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS;
- if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) {
- std::swap(LHS, RHS);
- CC = ISD::getSetCCSwappedOperands(CC);
- }
- }
- SDValue Cmp;
- AArch64CC::CondCode AArch64CC;
- if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa<ConstantSDNode>(RHS)) {
- const ConstantSDNode *RHSC = cast<ConstantSDNode>(RHS);
- // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095.
- // For the i8 operand, the largest immediate is 255, so this can be easily
- // encoded in the compare instruction. For the i16 operand, however, the
- // largest immediate cannot be encoded in the compare.
- // Therefore, use a sign extending load and cmn to avoid materializing the
- // -1 constant. For example,
- // movz w1, #65535
- // ldrh w0, [x0, #0]
- // cmp w0, w1
- // >
- // ldrsh w0, [x0, #0]
- // cmn w0, #1
- // Fundamental, we're relying on the property that (zext LHS) == (zext RHS)
- // if and only if (sext LHS) == (sext RHS). The checks are in place to
- // ensure both the LHS and RHS are truly zero extended and to make sure the
- // transformation is profitable.
- if ((RHSC->getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) &&
- cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD &&
- cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 &&
- LHS.getNode()->hasNUsesOfValue(1, 0)) {
- int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
- if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) {
- SDValue SExt =
- DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS,
- DAG.getValueType(MVT::i16));
- Cmp = emitComparison(SExt, DAG.getConstant(ValueofRHS, dl,
- RHS.getValueType()),
- CC, dl, DAG);
- AArch64CC = changeIntCCToAArch64CC(CC);
- }
- }
- if (!Cmp && (RHSC->isZero() || RHSC->isOne())) {
- if ((Cmp = emitConjunction(DAG, LHS, AArch64CC))) {
- if ((CC == ISD::SETNE) ^ RHSC->isZero())
- AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
- }
- }
- }
- if (!Cmp) {
- Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
- AArch64CC = changeIntCCToAArch64CC(CC);
- }
- AArch64cc = DAG.getConstant(AArch64CC, dl, MVT_CC);
- return Cmp;
- }
- static std::pair<SDValue, SDValue>
- getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {
- assert((Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) &&
- "Unsupported value type");
- SDValue Value, Overflow;
- SDLoc DL(Op);
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- unsigned Opc = 0;
- switch (Op.getOpcode()) {
- default:
- llvm_unreachable("Unknown overflow instruction!");
- case ISD::SADDO:
- Opc = AArch64ISD::ADDS;
- CC = AArch64CC::VS;
- break;
- case ISD::UADDO:
- Opc = AArch64ISD::ADDS;
- CC = AArch64CC::HS;
- break;
- case ISD::SSUBO:
- Opc = AArch64ISD::SUBS;
- CC = AArch64CC::VS;
- break;
- case ISD::USUBO:
- Opc = AArch64ISD::SUBS;
- CC = AArch64CC::LO;
- break;
- // Multiply needs a little bit extra work.
- case ISD::SMULO:
- case ISD::UMULO: {
- CC = AArch64CC::NE;
- bool IsSigned = Op.getOpcode() == ISD::SMULO;
- if (Op.getValueType() == MVT::i32) {
- // Extend to 64-bits, then perform a 64-bit multiply.
- unsigned ExtendOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- LHS = DAG.getNode(ExtendOpc, DL, MVT::i64, LHS);
- RHS = DAG.getNode(ExtendOpc, DL, MVT::i64, RHS);
- SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
- Value = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
- // Check that the result fits into a 32-bit integer.
- SDVTList VTs = DAG.getVTList(MVT::i64, MVT_CC);
- if (IsSigned) {
- // cmp xreg, wreg, sxtw
- SDValue SExtMul = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Value);
- Overflow =
- DAG.getNode(AArch64ISD::SUBS, DL, VTs, Mul, SExtMul).getValue(1);
- } else {
- // tst xreg, #0xffffffff00000000
- SDValue UpperBits = DAG.getConstant(0xFFFFFFFF00000000, DL, MVT::i64);
- Overflow =
- DAG.getNode(AArch64ISD::ANDS, DL, VTs, Mul, UpperBits).getValue(1);
- }
- break;
- }
- assert(Op.getValueType() == MVT::i64 && "Expected an i64 value type");
- // For the 64 bit multiply
- Value = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
- if (IsSigned) {
- SDValue UpperBits = DAG.getNode(ISD::MULHS, DL, MVT::i64, LHS, RHS);
- SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i64, Value,
- DAG.getConstant(63, DL, MVT::i64));
- // It is important that LowerBits is last, otherwise the arithmetic
- // shift will not be folded into the compare (SUBS).
- SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
- Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits)
- .getValue(1);
- } else {
- SDValue UpperBits = DAG.getNode(ISD::MULHU, DL, MVT::i64, LHS, RHS);
- SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
- Overflow =
- DAG.getNode(AArch64ISD::SUBS, DL, VTs,
- DAG.getConstant(0, DL, MVT::i64),
- UpperBits).getValue(1);
- }
- break;
- }
- } // switch (...)
- if (Opc) {
- SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
- // Emit the AArch64 operation with overflow check.
- Value = DAG.getNode(Opc, DL, VTs, LHS, RHS);
- Overflow = Value.getValue(1);
- }
- return std::make_pair(Value, Overflow);
- }
- SDValue AArch64TargetLowering::LowerXOR(SDValue Op, SelectionDAG &DAG) const {
- if (useSVEForFixedLengthVectorVT(Op.getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerToScalableOp(Op, DAG);
- SDValue Sel = Op.getOperand(0);
- SDValue Other = Op.getOperand(1);
- SDLoc dl(Sel);
- // If the operand is an overflow checking operation, invert the condition
- // code and kill the Not operation. I.e., transform:
- // (xor (overflow_op_bool, 1))
- // -->
- // (csel 1, 0, invert(cc), overflow_op_bool)
- // ... which later gets transformed to just a cset instruction with an
- // inverted condition code, rather than a cset + eor sequence.
- if (isOneConstant(Other) && ISD::isOverflowIntrOpRes(Sel)) {
- // Only lower legal XALUO ops.
- if (!DAG.getTargetLoweringInfo().isTypeLegal(Sel->getValueType(0)))
- return SDValue();
- SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
- SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
- AArch64CC::CondCode CC;
- SDValue Value, Overflow;
- std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Sel.getValue(0), DAG);
- SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32);
- return DAG.getNode(AArch64ISD::CSEL, dl, Op.getValueType(), TVal, FVal,
- CCVal, Overflow);
- }
- // If neither operand is a SELECT_CC, give up.
- if (Sel.getOpcode() != ISD::SELECT_CC)
- std::swap(Sel, Other);
- if (Sel.getOpcode() != ISD::SELECT_CC)
- return Op;
- // The folding we want to perform is:
- // (xor x, (select_cc a, b, cc, 0, -1) )
- // -->
- // (csel x, (xor x, -1), cc ...)
- //
- // The latter will get matched to a CSINV instruction.
- ISD::CondCode CC = cast<CondCodeSDNode>(Sel.getOperand(4))->get();
- SDValue LHS = Sel.getOperand(0);
- SDValue RHS = Sel.getOperand(1);
- SDValue TVal = Sel.getOperand(2);
- SDValue FVal = Sel.getOperand(3);
- // FIXME: This could be generalized to non-integer comparisons.
- if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64)
- return Op;
- ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
- ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
- // The values aren't constants, this isn't the pattern we're looking for.
- if (!CFVal || !CTVal)
- return Op;
- // We can commute the SELECT_CC by inverting the condition. This
- // might be needed to make this fit into a CSINV pattern.
- if (CTVal->isAllOnes() && CFVal->isZero()) {
- std::swap(TVal, FVal);
- std::swap(CTVal, CFVal);
- CC = ISD::getSetCCInverse(CC, LHS.getValueType());
- }
- // If the constants line up, perform the transform!
- if (CTVal->isZero() && CFVal->isAllOnes()) {
- SDValue CCVal;
- SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
- FVal = Other;
- TVal = DAG.getNode(ISD::XOR, dl, Other.getValueType(), Other,
- DAG.getConstant(-1ULL, dl, Other.getValueType()));
- return DAG.getNode(AArch64ISD::CSEL, dl, Sel.getValueType(), FVal, TVal,
- CCVal, Cmp);
- }
- return Op;
- }
- // If Invert is false, sets 'C' bit of NZCV to 0 if value is 0, else sets 'C'
- // bit to 1. If Invert is true, sets 'C' bit of NZCV to 1 if value is 0, else
- // sets 'C' bit to 0.
- static SDValue valueToCarryFlag(SDValue Value, SelectionDAG &DAG, bool Invert) {
- SDLoc DL(Value);
- EVT VT = Value.getValueType();
- SDValue Op0 = Invert ? DAG.getConstant(0, DL, VT) : Value;
- SDValue Op1 = Invert ? Value : DAG.getConstant(1, DL, VT);
- SDValue Cmp =
- DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::Glue), Op0, Op1);
- return Cmp.getValue(1);
- }
- // If Invert is false, value is 1 if 'C' bit of NZCV is 1, else 0.
- // If Invert is true, value is 0 if 'C' bit of NZCV is 1, else 1.
- static SDValue carryFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG,
- bool Invert) {
- assert(Flag.getResNo() == 1);
- SDLoc DL(Flag);
- SDValue Zero = DAG.getConstant(0, DL, VT);
- SDValue One = DAG.getConstant(1, DL, VT);
- unsigned Cond = Invert ? AArch64CC::LO : AArch64CC::HS;
- SDValue CC = DAG.getConstant(Cond, DL, MVT::i32);
- return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag);
- }
- // Value is 1 if 'V' bit of NZCV is 1, else 0
- static SDValue overflowFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG) {
- assert(Flag.getResNo() == 1);
- SDLoc DL(Flag);
- SDValue Zero = DAG.getConstant(0, DL, VT);
- SDValue One = DAG.getConstant(1, DL, VT);
- SDValue CC = DAG.getConstant(AArch64CC::VS, DL, MVT::i32);
- return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag);
- }
- // This lowering is inefficient, but it will get cleaned up by
- // `foldOverflowCheck`
- static SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG, unsigned Opcode,
- bool IsSigned) {
- EVT VT0 = Op.getValue(0).getValueType();
- EVT VT1 = Op.getValue(1).getValueType();
- if (VT0 != MVT::i32 && VT0 != MVT::i64)
- return SDValue();
- bool InvertCarry = Opcode == AArch64ISD::SBCS;
- SDValue OpLHS = Op.getOperand(0);
- SDValue OpRHS = Op.getOperand(1);
- SDValue OpCarryIn = valueToCarryFlag(Op.getOperand(2), DAG, InvertCarry);
- SDLoc DL(Op);
- SDVTList VTs = DAG.getVTList(VT0, VT1);
- SDValue Sum = DAG.getNode(Opcode, DL, DAG.getVTList(VT0, MVT::Glue), OpLHS,
- OpRHS, OpCarryIn);
- SDValue OutFlag =
- IsSigned ? overflowFlagToValue(Sum.getValue(1), VT1, DAG)
- : carryFlagToValue(Sum.getValue(1), VT1, DAG, InvertCarry);
- return DAG.getNode(ISD::MERGE_VALUES, DL, VTs, Sum, OutFlag);
- }
- static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
- // Let legalize expand this if it isn't a legal type yet.
- if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
- return SDValue();
- SDLoc dl(Op);
- AArch64CC::CondCode CC;
- // The actual operation that sets the overflow or carry flag.
- SDValue Value, Overflow;
- std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Op, DAG);
- // We use 0 and 1 as false and true values.
- SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
- SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
- // We use an inverted condition, because the conditional select is inverted
- // too. This will allow it to be selected to a single instruction:
- // CSINC Wd, WZR, WZR, invert(cond).
- SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32);
- Overflow = DAG.getNode(AArch64ISD::CSEL, dl, MVT::i32, FVal, TVal,
- CCVal, Overflow);
- SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
- return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
- }
- // Prefetch operands are:
- // 1: Address to prefetch
- // 2: bool isWrite
- // 3: int locality (0 = no locality ... 3 = extreme locality)
- // 4: bool isDataCache
- static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) {
- SDLoc DL(Op);
- unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
- unsigned Locality = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
- unsigned IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
- bool IsStream = !Locality;
- // When the locality number is set
- if (Locality) {
- // The front-end should have filtered out the out-of-range values
- assert(Locality <= 3 && "Prefetch locality out-of-range");
- // The locality degree is the opposite of the cache speed.
- // Put the number the other way around.
- // The encoding starts at 0 for level 1
- Locality = 3 - Locality;
- }
- // built the mask value encoding the expected behavior.
- unsigned PrfOp = (IsWrite << 4) | // Load/Store bit
- (!IsData << 3) | // IsDataCache bit
- (Locality << 1) | // Cache level bits
- (unsigned)IsStream; // Stream bit
- return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0),
- DAG.getTargetConstant(PrfOp, DL, MVT::i32),
- Op.getOperand(1));
- }
- SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- if (VT.isScalableVector())
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_EXTEND_MERGE_PASSTHRU);
- if (useSVEForFixedLengthVectorVT(VT))
- return LowerFixedLengthFPExtendToSVE(Op, DAG);
- assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
- return SDValue();
- }
- SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
- SelectionDAG &DAG) const {
- if (Op.getValueType().isScalableVector())
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_ROUND_MERGE_PASSTHRU);
- bool IsStrict = Op->isStrictFPOpcode();
- SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
- EVT SrcVT = SrcVal.getValueType();
- if (useSVEForFixedLengthVectorVT(SrcVT,
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerFixedLengthFPRoundToSVE(Op, DAG);
- if (SrcVT != MVT::f128) {
- // Expand cases where the input is a vector bigger than NEON.
- if (useSVEForFixedLengthVectorVT(SrcVT))
- return SDValue();
- // It's legal except when f128 is involved
- return Op;
- }
- return SDValue();
- }
- SDValue AArch64TargetLowering::LowerVectorFP_TO_INT(SDValue Op,
- SelectionDAG &DAG) const {
- // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
- // Any additional optimization in this function should be recorded
- // in the cost tables.
- bool IsStrict = Op->isStrictFPOpcode();
- EVT InVT = Op.getOperand(IsStrict ? 1 : 0).getValueType();
- EVT VT = Op.getValueType();
- if (VT.isScalableVector()) {
- unsigned Opcode = Op.getOpcode() == ISD::FP_TO_UINT
- ? AArch64ISD::FCVTZU_MERGE_PASSTHRU
- : AArch64ISD::FCVTZS_MERGE_PASSTHRU;
- return LowerToPredicatedOp(Op, DAG, Opcode);
- }
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()) ||
- useSVEForFixedLengthVectorVT(InVT,
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerFixedLengthFPToIntToSVE(Op, DAG);
- unsigned NumElts = InVT.getVectorNumElements();
- // f16 conversions are promoted to f32 when full fp16 is not supported.
- if (InVT.getVectorElementType() == MVT::f16 &&
- !Subtarget->hasFullFP16()) {
- MVT NewVT = MVT::getVectorVT(MVT::f32, NumElts);
- SDLoc dl(Op);
- if (IsStrict) {
- SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {NewVT, MVT::Other},
- {Op.getOperand(0), Op.getOperand(1)});
- return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
- {Ext.getValue(1), Ext.getValue(0)});
- }
- return DAG.getNode(
- Op.getOpcode(), dl, Op.getValueType(),
- DAG.getNode(ISD::FP_EXTEND, dl, NewVT, Op.getOperand(0)));
- }
- uint64_t VTSize = VT.getFixedSizeInBits();
- uint64_t InVTSize = InVT.getFixedSizeInBits();
- if (VTSize < InVTSize) {
- SDLoc dl(Op);
- if (IsStrict) {
- InVT = InVT.changeVectorElementTypeToInteger();
- SDValue Cv = DAG.getNode(Op.getOpcode(), dl, {InVT, MVT::Other},
- {Op.getOperand(0), Op.getOperand(1)});
- SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, Cv);
- return DAG.getMergeValues({Trunc, Cv.getValue(1)}, dl);
- }
- SDValue Cv =
- DAG.getNode(Op.getOpcode(), dl, InVT.changeVectorElementTypeToInteger(),
- Op.getOperand(0));
- return DAG.getNode(ISD::TRUNCATE, dl, VT, Cv);
- }
- if (VTSize > InVTSize) {
- SDLoc dl(Op);
- MVT ExtVT =
- MVT::getVectorVT(MVT::getFloatingPointVT(VT.getScalarSizeInBits()),
- VT.getVectorNumElements());
- if (IsStrict) {
- SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {ExtVT, MVT::Other},
- {Op.getOperand(0), Op.getOperand(1)});
- return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
- {Ext.getValue(1), Ext.getValue(0)});
- }
- SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, ExtVT, Op.getOperand(0));
- return DAG.getNode(Op.getOpcode(), dl, VT, Ext);
- }
- // Use a scalar operation for conversions between single-element vectors of
- // the same size.
- if (NumElts == 1) {
- SDLoc dl(Op);
- SDValue Extract = DAG.getNode(
- ISD::EXTRACT_VECTOR_ELT, dl, InVT.getScalarType(),
- Op.getOperand(IsStrict ? 1 : 0), DAG.getConstant(0, dl, MVT::i64));
- EVT ScalarVT = VT.getScalarType();
- if (IsStrict)
- return DAG.getNode(Op.getOpcode(), dl, {ScalarVT, MVT::Other},
- {Op.getOperand(0), Extract});
- return DAG.getNode(Op.getOpcode(), dl, ScalarVT, Extract);
- }
- // Type changing conversions are illegal.
- return Op;
- }
- SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op,
- SelectionDAG &DAG) const {
- bool IsStrict = Op->isStrictFPOpcode();
- SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
- if (SrcVal.getValueType().isVector())
- return LowerVectorFP_TO_INT(Op, DAG);
- // f16 conversions are promoted to f32 when full fp16 is not supported.
- if (SrcVal.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
- SDLoc dl(Op);
- if (IsStrict) {
- SDValue Ext =
- DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
- {Op.getOperand(0), SrcVal});
- return DAG.getNode(Op.getOpcode(), dl, {Op.getValueType(), MVT::Other},
- {Ext.getValue(1), Ext.getValue(0)});
- }
- return DAG.getNode(
- Op.getOpcode(), dl, Op.getValueType(),
- DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, SrcVal));
- }
- if (SrcVal.getValueType() != MVT::f128) {
- // It's legal except when f128 is involved
- return Op;
- }
- return SDValue();
- }
- SDValue
- AArch64TargetLowering::LowerVectorFP_TO_INT_SAT(SDValue Op,
- SelectionDAG &DAG) const {
- // AArch64 FP-to-int conversions saturate to the destination element size, so
- // we can lower common saturating conversions to simple instructions.
- SDValue SrcVal = Op.getOperand(0);
- EVT SrcVT = SrcVal.getValueType();
- EVT DstVT = Op.getValueType();
- EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
- uint64_t SrcElementWidth = SrcVT.getScalarSizeInBits();
- uint64_t DstElementWidth = DstVT.getScalarSizeInBits();
- uint64_t SatWidth = SatVT.getScalarSizeInBits();
- assert(SatWidth <= DstElementWidth &&
- "Saturation width cannot exceed result width");
- // TODO: Consider lowering to SVE operations, as in LowerVectorFP_TO_INT.
- // Currently, the `llvm.fpto[su]i.sat.*` intrinsics don't accept scalable
- // types, so this is hard to reach.
- if (DstVT.isScalableVector())
- return SDValue();
- EVT SrcElementVT = SrcVT.getVectorElementType();
- // In the absence of FP16 support, promote f16 to f32 and saturate the result.
- if (SrcElementVT == MVT::f16 &&
- (!Subtarget->hasFullFP16() || DstElementWidth > 16)) {
- MVT F32VT = MVT::getVectorVT(MVT::f32, SrcVT.getVectorNumElements());
- SrcVal = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), F32VT, SrcVal);
- SrcVT = F32VT;
- SrcElementVT = MVT::f32;
- SrcElementWidth = 32;
- } else if (SrcElementVT != MVT::f64 && SrcElementVT != MVT::f32 &&
- SrcElementVT != MVT::f16)
- return SDValue();
- SDLoc DL(Op);
- // Cases that we can emit directly.
- if (SrcElementWidth == DstElementWidth && SrcElementWidth == SatWidth)
- return DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal,
- DAG.getValueType(DstVT.getScalarType()));
- // Otherwise we emit a cvt that saturates to a higher BW, and saturate the
- // result. This is only valid if the legal cvt is larger than the saturate
- // width. For double, as we don't have MIN/MAX, it can be simpler to scalarize
- // (at least until sqxtn is selected).
- if (SrcElementWidth < SatWidth || SrcElementVT == MVT::f64)
- return SDValue();
- EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
- SDValue NativeCvt = DAG.getNode(Op.getOpcode(), DL, IntVT, SrcVal,
- DAG.getValueType(IntVT.getScalarType()));
- SDValue Sat;
- if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) {
- SDValue MinC = DAG.getConstant(
- APInt::getSignedMaxValue(SatWidth).sext(SrcElementWidth), DL, IntVT);
- SDValue Min = DAG.getNode(ISD::SMIN, DL, IntVT, NativeCvt, MinC);
- SDValue MaxC = DAG.getConstant(
- APInt::getSignedMinValue(SatWidth).sext(SrcElementWidth), DL, IntVT);
- Sat = DAG.getNode(ISD::SMAX, DL, IntVT, Min, MaxC);
- } else {
- SDValue MinC = DAG.getConstant(
- APInt::getAllOnesValue(SatWidth).zext(SrcElementWidth), DL, IntVT);
- Sat = DAG.getNode(ISD::UMIN, DL, IntVT, NativeCvt, MinC);
- }
- return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat);
- }
- SDValue AArch64TargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
- SelectionDAG &DAG) const {
- // AArch64 FP-to-int conversions saturate to the destination register size, so
- // we can lower common saturating conversions to simple instructions.
- SDValue SrcVal = Op.getOperand(0);
- EVT SrcVT = SrcVal.getValueType();
- if (SrcVT.isVector())
- return LowerVectorFP_TO_INT_SAT(Op, DAG);
- EVT DstVT = Op.getValueType();
- EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
- uint64_t SatWidth = SatVT.getScalarSizeInBits();
- uint64_t DstWidth = DstVT.getScalarSizeInBits();
- assert(SatWidth <= DstWidth && "Saturation width cannot exceed result width");
- // In the absence of FP16 support, promote f16 to f32 and saturate the result.
- if (SrcVT == MVT::f16 && !Subtarget->hasFullFP16()) {
- SrcVal = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), MVT::f32, SrcVal);
- SrcVT = MVT::f32;
- } else if (SrcVT != MVT::f64 && SrcVT != MVT::f32 && SrcVT != MVT::f16)
- return SDValue();
- SDLoc DL(Op);
- // Cases that we can emit directly.
- if ((SrcVT == MVT::f64 || SrcVT == MVT::f32 ||
- (SrcVT == MVT::f16 && Subtarget->hasFullFP16())) &&
- DstVT == SatVT && (DstVT == MVT::i64 || DstVT == MVT::i32))
- return DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal,
- DAG.getValueType(DstVT));
- // Otherwise we emit a cvt that saturates to a higher BW, and saturate the
- // result. This is only valid if the legal cvt is larger than the saturate
- // width.
- if (DstWidth < SatWidth)
- return SDValue();
- SDValue NativeCvt =
- DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal, DAG.getValueType(DstVT));
- SDValue Sat;
- if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) {
- SDValue MinC = DAG.getConstant(
- APInt::getSignedMaxValue(SatWidth).sext(DstWidth), DL, DstVT);
- SDValue Min = DAG.getNode(ISD::SMIN, DL, DstVT, NativeCvt, MinC);
- SDValue MaxC = DAG.getConstant(
- APInt::getSignedMinValue(SatWidth).sext(DstWidth), DL, DstVT);
- Sat = DAG.getNode(ISD::SMAX, DL, DstVT, Min, MaxC);
- } else {
- SDValue MinC = DAG.getConstant(
- APInt::getAllOnesValue(SatWidth).zext(DstWidth), DL, DstVT);
- Sat = DAG.getNode(ISD::UMIN, DL, DstVT, NativeCvt, MinC);
- }
- return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat);
- }
- SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op,
- SelectionDAG &DAG) const {
- // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
- // Any additional optimization in this function should be recorded
- // in the cost tables.
- bool IsStrict = Op->isStrictFPOpcode();
- EVT VT = Op.getValueType();
- SDLoc dl(Op);
- SDValue In = Op.getOperand(IsStrict ? 1 : 0);
- EVT InVT = In.getValueType();
- unsigned Opc = Op.getOpcode();
- bool IsSigned = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
- if (VT.isScalableVector()) {
- if (InVT.getVectorElementType() == MVT::i1) {
- // We can't directly extend an SVE predicate; extend it first.
- unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- EVT CastVT = getPromotedVTForPredicate(InVT);
- In = DAG.getNode(CastOpc, dl, CastVT, In);
- return DAG.getNode(Opc, dl, VT, In);
- }
- unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU
- : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU;
- return LowerToPredicatedOp(Op, DAG, Opcode);
- }
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()) ||
- useSVEForFixedLengthVectorVT(InVT,
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerFixedLengthIntToFPToSVE(Op, DAG);
- uint64_t VTSize = VT.getFixedSizeInBits();
- uint64_t InVTSize = InVT.getFixedSizeInBits();
- if (VTSize < InVTSize) {
- MVT CastVT =
- MVT::getVectorVT(MVT::getFloatingPointVT(InVT.getScalarSizeInBits()),
- InVT.getVectorNumElements());
- if (IsStrict) {
- In = DAG.getNode(Opc, dl, {CastVT, MVT::Other},
- {Op.getOperand(0), In});
- return DAG.getNode(
- ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other},
- {In.getValue(1), In.getValue(0), DAG.getIntPtrConstant(0, dl)});
- }
- In = DAG.getNode(Opc, dl, CastVT, In);
- return DAG.getNode(ISD::FP_ROUND, dl, VT, In,
- DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
- }
- if (VTSize > InVTSize) {
- unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- EVT CastVT = VT.changeVectorElementTypeToInteger();
- In = DAG.getNode(CastOpc, dl, CastVT, In);
- if (IsStrict)
- return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op.getOperand(0), In});
- return DAG.getNode(Opc, dl, VT, In);
- }
- // Use a scalar operation for conversions between single-element vectors of
- // the same size.
- if (VT.getVectorNumElements() == 1) {
- SDValue Extract = DAG.getNode(
- ISD::EXTRACT_VECTOR_ELT, dl, InVT.getScalarType(),
- In, DAG.getConstant(0, dl, MVT::i64));
- EVT ScalarVT = VT.getScalarType();
- if (IsStrict)
- return DAG.getNode(Op.getOpcode(), dl, {ScalarVT, MVT::Other},
- {Op.getOperand(0), Extract});
- return DAG.getNode(Op.getOpcode(), dl, ScalarVT, Extract);
- }
- return Op;
- }
- SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op,
- SelectionDAG &DAG) const {
- if (Op.getValueType().isVector())
- return LowerVectorINT_TO_FP(Op, DAG);
- bool IsStrict = Op->isStrictFPOpcode();
- SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
- // f16 conversions are promoted to f32 when full fp16 is not supported.
- if (Op.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
- SDLoc dl(Op);
- if (IsStrict) {
- SDValue Val = DAG.getNode(Op.getOpcode(), dl, {MVT::f32, MVT::Other},
- {Op.getOperand(0), SrcVal});
- return DAG.getNode(
- ISD::STRICT_FP_ROUND, dl, {MVT::f16, MVT::Other},
- {Val.getValue(1), Val.getValue(0), DAG.getIntPtrConstant(0, dl)});
- }
- return DAG.getNode(
- ISD::FP_ROUND, dl, MVT::f16,
- DAG.getNode(Op.getOpcode(), dl, MVT::f32, SrcVal),
- DAG.getIntPtrConstant(0, dl));
- }
- // i128 conversions are libcalls.
- if (SrcVal.getValueType() == MVT::i128)
- return SDValue();
- // Other conversions are legal, unless it's to the completely software-based
- // fp128.
- if (Op.getValueType() != MVT::f128)
- return Op;
- return SDValue();
- }
- SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
- SelectionDAG &DAG) const {
- // For iOS, we want to call an alternative entry point: __sincos_stret,
- // which returns the values in two S / D registers.
- SDLoc dl(Op);
- SDValue Arg = Op.getOperand(0);
- EVT ArgVT = Arg.getValueType();
- Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
- ArgListTy Args;
- ArgListEntry Entry;
- Entry.Node = Arg;
- Entry.Ty = ArgTy;
- Entry.IsSExt = false;
- Entry.IsZExt = false;
- Args.push_back(Entry);
- RTLIB::Libcall LC = ArgVT == MVT::f64 ? RTLIB::SINCOS_STRET_F64
- : RTLIB::SINCOS_STRET_F32;
- const char *LibcallName = getLibcallName(LC);
- SDValue Callee =
- DAG.getExternalSymbol(LibcallName, getPointerTy(DAG.getDataLayout()));
- StructType *RetTy = StructType::get(ArgTy, ArgTy);
- TargetLowering::CallLoweringInfo CLI(DAG);
- CLI.setDebugLoc(dl)
- .setChain(DAG.getEntryNode())
- .setLibCallee(CallingConv::Fast, RetTy, Callee, std::move(Args));
- std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
- return CallResult.first;
- }
- static MVT getSVEContainerType(EVT ContentTy);
- SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
- SelectionDAG &DAG) const {
- EVT OpVT = Op.getValueType();
- EVT ArgVT = Op.getOperand(0).getValueType();
- if (useSVEForFixedLengthVectorVT(OpVT))
- return LowerFixedLengthBitcastToSVE(Op, DAG);
- if (OpVT.isScalableVector()) {
- // Bitcasting between unpacked vector types of different element counts is
- // not a NOP because the live elements are laid out differently.
- // 01234567
- // e.g. nxv2i32 = XX??XX??
- // nxv4f16 = X?X?X?X?
- if (OpVT.getVectorElementCount() != ArgVT.getVectorElementCount())
- return SDValue();
- if (isTypeLegal(OpVT) && !isTypeLegal(ArgVT)) {
- assert(OpVT.isFloatingPoint() && !ArgVT.isFloatingPoint() &&
- "Expected int->fp bitcast!");
- SDValue ExtResult =
- DAG.getNode(ISD::ANY_EXTEND, SDLoc(Op), getSVEContainerType(ArgVT),
- Op.getOperand(0));
- return getSVESafeBitCast(OpVT, ExtResult, DAG);
- }
- return getSVESafeBitCast(OpVT, Op.getOperand(0), DAG);
- }
- if (OpVT != MVT::f16 && OpVT != MVT::bf16)
- return SDValue();
- // Bitcasts between f16 and bf16 are legal.
- if (ArgVT == MVT::f16 || ArgVT == MVT::bf16)
- return Op;
- assert(ArgVT == MVT::i16);
- SDLoc DL(Op);
- Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op.getOperand(0));
- Op = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Op);
- return SDValue(
- DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, OpVT, Op,
- DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
- 0);
- }
- static EVT getExtensionTo64Bits(const EVT &OrigVT) {
- if (OrigVT.getSizeInBits() >= 64)
- return OrigVT;
- assert(OrigVT.isSimple() && "Expecting a simple value type");
- MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
- switch (OrigSimpleTy) {
- default: llvm_unreachable("Unexpected Vector Type");
- case MVT::v2i8:
- case MVT::v2i16:
- return MVT::v2i32;
- case MVT::v4i8:
- return MVT::v4i16;
- }
- }
- static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG,
- const EVT &OrigTy,
- const EVT &ExtTy,
- unsigned ExtOpcode) {
- // The vector originally had a size of OrigTy. It was then extended to ExtTy.
- // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
- // 64-bits we need to insert a new extension so that it will be 64-bits.
- assert(ExtTy.is128BitVector() && "Unexpected extension size");
- if (OrigTy.getSizeInBits() >= 64)
- return N;
- // Must extend size to at least 64 bits to be used as an operand for VMULL.
- EVT NewVT = getExtensionTo64Bits(OrigTy);
- return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
- }
- // Returns lane if Op extracts from a two-element vector and lane is constant
- // (i.e., extractelt(<2 x Ty> %v, ConstantLane)), and std::nullopt otherwise.
- static std::optional<uint64_t>
- getConstantLaneNumOfExtractHalfOperand(SDValue &Op) {
- SDNode *OpNode = Op.getNode();
- if (OpNode->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
- return std::nullopt;
- EVT VT = OpNode->getOperand(0).getValueType();
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpNode->getOperand(1));
- if (!VT.isFixedLengthVector() || VT.getVectorNumElements() != 2 || !C)
- return std::nullopt;
- return C->getZExtValue();
- }
- static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
- bool isSigned) {
- EVT VT = N->getValueType(0);
- if (N->getOpcode() != ISD::BUILD_VECTOR)
- return false;
- for (const SDValue &Elt : N->op_values()) {
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
- unsigned EltSize = VT.getScalarSizeInBits();
- unsigned HalfSize = EltSize / 2;
- if (isSigned) {
- if (!isIntN(HalfSize, C->getSExtValue()))
- return false;
- } else {
- if (!isUIntN(HalfSize, C->getZExtValue()))
- return false;
- }
- continue;
- }
- return false;
- }
- return true;
- }
- static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
- if (N->getOpcode() == ISD::SIGN_EXTEND ||
- N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND)
- return addRequiredExtensionForVectorMULL(N->getOperand(0), DAG,
- N->getOperand(0)->getValueType(0),
- N->getValueType(0),
- N->getOpcode());
- assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
- EVT VT = N->getValueType(0);
- SDLoc dl(N);
- unsigned EltSize = VT.getScalarSizeInBits() / 2;
- unsigned NumElts = VT.getVectorNumElements();
- MVT TruncVT = MVT::getIntegerVT(EltSize);
- SmallVector<SDValue, 8> Ops;
- for (unsigned i = 0; i != NumElts; ++i) {
- ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
- const APInt &CInt = C->getAPIntValue();
- // Element types smaller than 32 bits are not legal, so use i32 elements.
- // The values are implicitly truncated so sext vs. zext doesn't matter.
- Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
- }
- return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
- }
- static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
- return N->getOpcode() == ISD::SIGN_EXTEND ||
- N->getOpcode() == ISD::ANY_EXTEND ||
- isExtendedBUILD_VECTOR(N, DAG, true);
- }
- static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
- return N->getOpcode() == ISD::ZERO_EXTEND ||
- N->getOpcode() == ISD::ANY_EXTEND ||
- isExtendedBUILD_VECTOR(N, DAG, false);
- }
- static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
- unsigned Opcode = N->getOpcode();
- if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
- SDNode *N0 = N->getOperand(0).getNode();
- SDNode *N1 = N->getOperand(1).getNode();
- return N0->hasOneUse() && N1->hasOneUse() &&
- isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
- }
- return false;
- }
- static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
- unsigned Opcode = N->getOpcode();
- if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
- SDNode *N0 = N->getOperand(0).getNode();
- SDNode *N1 = N->getOperand(1).getNode();
- return N0->hasOneUse() && N1->hasOneUse() &&
- isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
- }
- return false;
- }
- SDValue AArch64TargetLowering::LowerGET_ROUNDING(SDValue Op,
- SelectionDAG &DAG) const {
- // The rounding mode is in bits 23:22 of the FPSCR.
- // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
- // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
- // so that the shift + and get folded into a bitfield extract.
- SDLoc dl(Op);
- SDValue Chain = Op.getOperand(0);
- SDValue FPCR_64 = DAG.getNode(
- ISD::INTRINSIC_W_CHAIN, dl, {MVT::i64, MVT::Other},
- {Chain, DAG.getConstant(Intrinsic::aarch64_get_fpcr, dl, MVT::i64)});
- Chain = FPCR_64.getValue(1);
- SDValue FPCR_32 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, FPCR_64);
- SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPCR_32,
- DAG.getConstant(1U << 22, dl, MVT::i32));
- SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
- DAG.getConstant(22, dl, MVT::i32));
- SDValue AND = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
- DAG.getConstant(3, dl, MVT::i32));
- return DAG.getMergeValues({AND, Chain}, dl);
- }
- SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc DL(Op);
- SDValue Chain = Op->getOperand(0);
- SDValue RMValue = Op->getOperand(1);
- // The rounding mode is in bits 23:22 of the FPCR.
- // The llvm.set.rounding argument value to the rounding mode in FPCR mapping
- // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is
- // ((arg - 1) & 3) << 22).
- //
- // The argument of llvm.set.rounding must be within the segment [0, 3], so
- // NearestTiesToAway (4) is not handled here. It is responsibility of the code
- // generated llvm.set.rounding to ensure this condition.
- // Calculate new value of FPCR[23:22].
- RMValue = DAG.getNode(ISD::SUB, DL, MVT::i32, RMValue,
- DAG.getConstant(1, DL, MVT::i32));
- RMValue = DAG.getNode(ISD::AND, DL, MVT::i32, RMValue,
- DAG.getConstant(0x3, DL, MVT::i32));
- RMValue =
- DAG.getNode(ISD::SHL, DL, MVT::i32, RMValue,
- DAG.getConstant(AArch64::RoundingBitsPos, DL, MVT::i32));
- RMValue = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, RMValue);
- // Get current value of FPCR.
- SDValue Ops[] = {
- Chain, DAG.getTargetConstant(Intrinsic::aarch64_get_fpcr, DL, MVT::i64)};
- SDValue FPCR =
- DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, {MVT::i64, MVT::Other}, Ops);
- Chain = FPCR.getValue(1);
- FPCR = FPCR.getValue(0);
- // Put new rounding mode into FPSCR[23:22].
- const int RMMask = ~(AArch64::Rounding::rmMask << AArch64::RoundingBitsPos);
- FPCR = DAG.getNode(ISD::AND, DL, MVT::i64, FPCR,
- DAG.getConstant(RMMask, DL, MVT::i64));
- FPCR = DAG.getNode(ISD::OR, DL, MVT::i64, FPCR, RMValue);
- SDValue Ops2[] = {
- Chain, DAG.getTargetConstant(Intrinsic::aarch64_set_fpcr, DL, MVT::i64),
- FPCR};
- return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2);
- }
- static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
- SDLoc DL, bool &IsMLA) {
- bool IsN0SExt = isSignExtended(N0, DAG);
- bool IsN1SExt = isSignExtended(N1, DAG);
- if (IsN0SExt && IsN1SExt)
- return AArch64ISD::SMULL;
- bool IsN0ZExt = isZeroExtended(N0, DAG);
- bool IsN1ZExt = isZeroExtended(N1, DAG);
- if (IsN0ZExt && IsN1ZExt)
- return AArch64ISD::UMULL;
- // Select SMULL if we can replace zext with sext.
- if (((IsN0SExt && IsN1ZExt) || (IsN0ZExt && IsN1SExt)) &&
- !isExtendedBUILD_VECTOR(N0, DAG, false) &&
- !isExtendedBUILD_VECTOR(N1, DAG, false)) {
- SDValue ZextOperand;
- if (IsN0ZExt)
- ZextOperand = N0->getOperand(0);
- else
- ZextOperand = N1->getOperand(0);
- if (DAG.SignBitIsZero(ZextOperand)) {
- SDNode *NewSext =
- DAG.getSExtOrTrunc(ZextOperand, DL, N0->getValueType(0)).getNode();
- if (IsN0ZExt)
- N0 = NewSext;
- else
- N1 = NewSext;
- return AArch64ISD::SMULL;
- }
- }
- // Select UMULL if we can replace the other operand with an extend.
- if (IsN0ZExt || IsN1ZExt) {
- EVT VT = N0->getValueType(0);
- APInt Mask = APInt::getHighBitsSet(VT.getScalarSizeInBits(),
- VT.getScalarSizeInBits() / 2);
- if (DAG.MaskedValueIsZero(SDValue(IsN0ZExt ? N1 : N0, 0), Mask)) {
- EVT HalfVT;
- switch (VT.getSimpleVT().SimpleTy) {
- case MVT::v2i64:
- HalfVT = MVT::v2i32;
- break;
- case MVT::v4i32:
- HalfVT = MVT::v4i16;
- break;
- case MVT::v8i16:
- HalfVT = MVT::v8i8;
- break;
- default:
- return 0;
- }
- // Truncate and then extend the result.
- SDValue NewExt = DAG.getNode(ISD::TRUNCATE, DL, HalfVT,
- SDValue(IsN0ZExt ? N1 : N0, 0));
- NewExt = DAG.getZExtOrTrunc(NewExt, DL, VT);
- if (IsN0ZExt)
- N1 = NewExt.getNode();
- else
- N0 = NewExt.getNode();
- return AArch64ISD::UMULL;
- }
- }
- if (!IsN1SExt && !IsN1ZExt)
- return 0;
- // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
- // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
- if (IsN1SExt && isAddSubSExt(N0, DAG)) {
- IsMLA = true;
- return AArch64ISD::SMULL;
- }
- if (IsN1ZExt && isAddSubZExt(N0, DAG)) {
- IsMLA = true;
- return AArch64ISD::UMULL;
- }
- if (IsN0ZExt && isAddSubZExt(N1, DAG)) {
- std::swap(N0, N1);
- IsMLA = true;
- return AArch64ISD::UMULL;
- }
- return 0;
- }
- SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- // If SVE is available then i64 vector multiplications can also be made legal.
- bool OverrideNEON = VT == MVT::v2i64 || VT == MVT::v1i64 ||
- Subtarget->forceStreamingCompatibleSVE();
- if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT, OverrideNEON))
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::MUL_PRED);
- // Multiplications are only custom-lowered for 128-bit vectors so that
- // VMULL can be detected. Otherwise v2i64 multiplications are not legal.
- assert(VT.is128BitVector() && VT.isInteger() &&
- "unexpected type for custom-lowering ISD::MUL");
- SDNode *N0 = Op.getOperand(0).getNode();
- SDNode *N1 = Op.getOperand(1).getNode();
- bool isMLA = false;
- SDLoc DL(Op);
- unsigned NewOpc = selectUmullSmull(N0, N1, DAG, DL, isMLA);
- if (!NewOpc) {
- if (VT == MVT::v2i64)
- // Fall through to expand this. It is not legal.
- return SDValue();
- else
- // Other vector multiplications are legal.
- return Op;
- }
- // Legalize to a S/UMULL instruction
- SDValue Op0;
- SDValue Op1 = skipExtensionForVectorMULL(N1, DAG);
- if (!isMLA) {
- Op0 = skipExtensionForVectorMULL(N0, DAG);
- assert(Op0.getValueType().is64BitVector() &&
- Op1.getValueType().is64BitVector() &&
- "unexpected types for extended operands to VMULL");
- return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
- }
- // Optimizing (zext A + zext B) * C, to (S/UMULL A, C) + (S/UMULL B, C) during
- // isel lowering to take advantage of no-stall back to back s/umul + s/umla.
- // This is true for CPUs with accumulate forwarding such as Cortex-A53/A57
- SDValue N00 = skipExtensionForVectorMULL(N0->getOperand(0).getNode(), DAG);
- SDValue N01 = skipExtensionForVectorMULL(N0->getOperand(1).getNode(), DAG);
- EVT Op1VT = Op1.getValueType();
- return DAG.getNode(N0->getOpcode(), DL, VT,
- DAG.getNode(NewOpc, DL, VT,
- DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
- DAG.getNode(NewOpc, DL, VT,
- DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
- }
- static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT,
- int Pattern) {
- if (VT == MVT::nxv1i1 && Pattern == AArch64SVEPredPattern::all)
- return DAG.getConstant(1, DL, MVT::nxv1i1);
- return DAG.getNode(AArch64ISD::PTRUE, DL, VT,
- DAG.getTargetConstant(Pattern, DL, MVT::i32));
- }
- static SDValue optimizeWhile(SDValue Op, SelectionDAG &DAG, bool IsSigned,
- bool IsLess, bool IsEqual) {
- if (!isa<ConstantSDNode>(Op.getOperand(1)) ||
- !isa<ConstantSDNode>(Op.getOperand(2)))
- return SDValue();
- SDLoc dl(Op);
- APInt X = Op.getConstantOperandAPInt(1);
- APInt Y = Op.getConstantOperandAPInt(2);
- APInt NumActiveElems;
- bool Overflow;
- if (IsLess)
- NumActiveElems = IsSigned ? Y.ssub_ov(X, Overflow) : Y.usub_ov(X, Overflow);
- else
- NumActiveElems = IsSigned ? X.ssub_ov(Y, Overflow) : X.usub_ov(Y, Overflow);
- if (Overflow)
- return SDValue();
- if (IsEqual) {
- APInt One(NumActiveElems.getBitWidth(), 1, IsSigned);
- NumActiveElems = IsSigned ? NumActiveElems.sadd_ov(One, Overflow)
- : NumActiveElems.uadd_ov(One, Overflow);
- if (Overflow)
- return SDValue();
- }
- std::optional<unsigned> PredPattern =
- getSVEPredPatternFromNumElements(NumActiveElems.getZExtValue());
- unsigned MinSVEVectorSize = std::max(
- DAG.getSubtarget<AArch64Subtarget>().getMinSVEVectorSizeInBits(), 128u);
- unsigned ElementSize = 128 / Op.getValueType().getVectorMinNumElements();
- if (PredPattern != std::nullopt &&
- NumActiveElems.getZExtValue() <= (MinSVEVectorSize / ElementSize))
- return getPTrue(DAG, dl, Op.getValueType(), *PredPattern);
- return SDValue();
- }
- // Returns a safe bitcast between two scalable vector predicates, where
- // any newly created lanes from a widening bitcast are defined as zero.
- static SDValue getSVEPredicateBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) {
- SDLoc DL(Op);
- EVT InVT = Op.getValueType();
- assert(InVT.getVectorElementType() == MVT::i1 &&
- VT.getVectorElementType() == MVT::i1 &&
- "Expected a predicate-to-predicate bitcast");
- assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
- InVT.isScalableVector() &&
- DAG.getTargetLoweringInfo().isTypeLegal(InVT) &&
- "Only expect to cast between legal scalable predicate types!");
- // Return the operand if the cast isn't changing type,
- // e.g. <n x 16 x i1> -> <n x 16 x i1>
- if (InVT == VT)
- return Op;
- SDValue Reinterpret = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op);
- // We only have to zero the lanes if new lanes are being defined, e.g. when
- // casting from <vscale x 2 x i1> to <vscale x 16 x i1>. If this is not the
- // case (e.g. when casting from <vscale x 16 x i1> -> <vscale x 2 x i1>) then
- // we can return here.
- if (InVT.bitsGT(VT))
- return Reinterpret;
- // Check if the other lanes are already known to be zeroed by
- // construction.
- if (isZeroingInactiveLanes(Op))
- return Reinterpret;
- // Zero the newly introduced lanes.
- SDValue Mask = DAG.getConstant(1, DL, InVT);
- Mask = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Mask);
- return DAG.getNode(ISD::AND, DL, VT, Reinterpret, Mask);
- }
- SDValue AArch64TargetLowering::getPStateSM(SelectionDAG &DAG, SDValue Chain,
- SMEAttrs Attrs, SDLoc DL,
- EVT VT) const {
- if (Attrs.hasStreamingInterfaceOrBody())
- return DAG.getConstant(1, DL, VT);
- if (Attrs.hasNonStreamingInterfaceAndBody())
- return DAG.getConstant(0, DL, VT);
- assert(Attrs.hasStreamingCompatibleInterface() && "Unexpected interface");
- SDValue Callee = DAG.getExternalSymbol("__arm_sme_state",
- getPointerTy(DAG.getDataLayout()));
- Type *Int64Ty = Type::getInt64Ty(*DAG.getContext());
- Type *RetTy = StructType::get(Int64Ty, Int64Ty);
- TargetLowering::CallLoweringInfo CLI(DAG);
- ArgListTy Args;
- CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
- CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2,
- RetTy, Callee, std::move(Args));
- std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
- SDValue Mask = DAG.getConstant(/*PSTATE.SM*/ 1, DL, MVT::i64);
- return DAG.getNode(ISD::AND, DL, MVT::i64, CallResult.first.getOperand(0),
- Mask);
- }
- static std::optional<SMEAttrs> getCalleeAttrsFromExternalFunction(SDValue V) {
- if (auto *ES = dyn_cast<ExternalSymbolSDNode>(V)) {
- StringRef S(ES->getSymbol());
- if (S == "__arm_sme_state" || S == "__arm_tpidr2_save")
- return SMEAttrs(SMEAttrs::SM_Compatible | SMEAttrs::ZA_Preserved);
- if (S == "__arm_tpidr2_restore")
- return SMEAttrs(SMEAttrs::SM_Compatible | SMEAttrs::ZA_Shared);
- }
- return std::nullopt;
- }
- SDValue AArch64TargetLowering::LowerINTRINSIC_VOID(SDValue Op,
- SelectionDAG &DAG) const {
- unsigned IntNo = Op.getConstantOperandVal(1);
- SDLoc DL(Op);
- switch (IntNo) {
- default:
- return SDValue(); // Don't custom lower most intrinsics.
- case Intrinsic::aarch64_prefetch: {
- SDValue Chain = Op.getOperand(0);
- SDValue Addr = Op.getOperand(2);
- unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
- unsigned Locality = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
- unsigned IsStream = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
- unsigned IsData = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
- unsigned PrfOp = (IsWrite << 4) | // Load/Store bit
- (!IsData << 3) | // IsDataCache bit
- (Locality << 1) | // Cache level bits
- (unsigned)IsStream; // Stream bit
- return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Chain,
- DAG.getTargetConstant(PrfOp, DL, MVT::i32), Addr);
- }
- case Intrinsic::aarch64_sme_za_enable:
- return DAG.getNode(
- AArch64ISD::SMSTART, DL, MVT::Other,
- Op->getOperand(0), // Chain
- DAG.getTargetConstant((int32_t)(AArch64SVCR::SVCRZA), DL, MVT::i32),
- DAG.getConstant(0, DL, MVT::i64), DAG.getConstant(1, DL, MVT::i64));
- case Intrinsic::aarch64_sme_za_disable:
- return DAG.getNode(
- AArch64ISD::SMSTOP, DL, MVT::Other,
- Op->getOperand(0), // Chain
- DAG.getTargetConstant((int32_t)(AArch64SVCR::SVCRZA), DL, MVT::i32),
- DAG.getConstant(0, DL, MVT::i64), DAG.getConstant(1, DL, MVT::i64));
- }
- }
- SDValue AArch64TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
- SelectionDAG &DAG) const {
- unsigned IntNo = Op.getConstantOperandVal(1);
- SDLoc DL(Op);
- switch (IntNo) {
- default:
- return SDValue(); // Don't custom lower most intrinsics.
- case Intrinsic::aarch64_mops_memset_tag: {
- auto Node = cast<MemIntrinsicSDNode>(Op.getNode());
- SDValue Chain = Node->getChain();
- SDValue Dst = Op.getOperand(2);
- SDValue Val = Op.getOperand(3);
- Val = DAG.getAnyExtOrTrunc(Val, DL, MVT::i64);
- SDValue Size = Op.getOperand(4);
- auto Alignment = Node->getMemOperand()->getAlign();
- bool IsVol = Node->isVolatile();
- auto DstPtrInfo = Node->getPointerInfo();
- const auto &SDI =
- static_cast<const AArch64SelectionDAGInfo &>(DAG.getSelectionDAGInfo());
- SDValue MS =
- SDI.EmitMOPS(AArch64ISD::MOPS_MEMSET_TAGGING, DAG, DL, Chain, Dst, Val,
- Size, Alignment, IsVol, DstPtrInfo, MachinePointerInfo{});
- // MOPS_MEMSET_TAGGING has 3 results (DstWb, SizeWb, Chain) whereas the
- // intrinsic has 2. So hide SizeWb using MERGE_VALUES. Otherwise
- // LowerOperationWrapper will complain that the number of results has
- // changed.
- return DAG.getMergeValues({MS.getValue(0), MS.getValue(2)}, DL);
- }
- }
- }
- SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
- SelectionDAG &DAG) const {
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- SDLoc dl(Op);
- switch (IntNo) {
- default: return SDValue(); // Don't custom lower most intrinsics.
- case Intrinsic::thread_pointer: {
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
- }
- case Intrinsic::aarch64_neon_abs: {
- EVT Ty = Op.getValueType();
- if (Ty == MVT::i64) {
- SDValue Result = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64,
- Op.getOperand(1));
- Result = DAG.getNode(ISD::ABS, dl, MVT::v1i64, Result);
- return DAG.getNode(ISD::BITCAST, dl, MVT::i64, Result);
- } else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) {
- return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1));
- } else {
- report_fatal_error("Unexpected type for AArch64 NEON intrinic");
- }
- }
- case Intrinsic::aarch64_neon_pmull64: {
- SDValue LHS = Op.getOperand(1);
- SDValue RHS = Op.getOperand(2);
- std::optional<uint64_t> LHSLane =
- getConstantLaneNumOfExtractHalfOperand(LHS);
- std::optional<uint64_t> RHSLane =
- getConstantLaneNumOfExtractHalfOperand(RHS);
- assert((!LHSLane || *LHSLane < 2) && "Expect lane to be None or 0 or 1");
- assert((!RHSLane || *RHSLane < 2) && "Expect lane to be None or 0 or 1");
- // 'aarch64_neon_pmull64' takes i64 parameters; while pmull/pmull2
- // instructions execute on SIMD registers. So canonicalize i64 to v1i64,
- // which ISel recognizes better. For example, generate a ldr into d*
- // registers as opposed to a GPR load followed by a fmov.
- auto TryVectorizeOperand = [](SDValue N, std::optional<uint64_t> NLane,
- std::optional<uint64_t> OtherLane,
- const SDLoc &dl,
- SelectionDAG &DAG) -> SDValue {
- // If the operand is an higher half itself, rewrite it to
- // extract_high_v2i64; this way aarch64_neon_pmull64 could
- // re-use the dag-combiner function with aarch64_neon_{pmull,smull,umull}.
- if (NLane && *NLane == 1)
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i64,
- N.getOperand(0), DAG.getConstant(1, dl, MVT::i64));
- // Operand N is not a higher half but the other operand is.
- if (OtherLane && *OtherLane == 1) {
- // If this operand is a lower half, rewrite it to
- // extract_high_v2i64(duplane(<2 x Ty>, 0)). This saves a roundtrip to
- // align lanes of two operands. A roundtrip sequence (to move from lane
- // 1 to lane 0) is like this:
- // mov x8, v0.d[1]
- // fmov d0, x8
- if (NLane && *NLane == 0)
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i64,
- DAG.getNode(AArch64ISD::DUPLANE64, dl, MVT::v2i64,
- N.getOperand(0),
- DAG.getConstant(0, dl, MVT::i64)),
- DAG.getConstant(1, dl, MVT::i64));
- // Otherwise just dup from main to all lanes.
- return DAG.getNode(AArch64ISD::DUP, dl, MVT::v1i64, N);
- }
- // Neither operand is an extract of higher half, so codegen may just use
- // the non-high version of PMULL instruction. Use v1i64 to represent i64.
- assert(N.getValueType() == MVT::i64 &&
- "Intrinsic aarch64_neon_pmull64 requires i64 parameters");
- return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, N);
- };
- LHS = TryVectorizeOperand(LHS, LHSLane, RHSLane, dl, DAG);
- RHS = TryVectorizeOperand(RHS, RHSLane, LHSLane, dl, DAG);
- return DAG.getNode(AArch64ISD::PMULL, dl, Op.getValueType(), LHS, RHS);
- }
- case Intrinsic::aarch64_neon_smax:
- return DAG.getNode(ISD::SMAX, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_neon_umax:
- return DAG.getNode(ISD::UMAX, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_neon_smin:
- return DAG.getNode(ISD::SMIN, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_neon_umin:
- return DAG.getNode(ISD::UMIN, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_neon_scalar_sqxtn:
- case Intrinsic::aarch64_neon_scalar_sqxtun:
- case Intrinsic::aarch64_neon_scalar_uqxtn: {
- assert(Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::f32);
- if (Op.getValueType() == MVT::i32)
- return DAG.getNode(ISD::BITCAST, dl, MVT::i32,
- DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::f32,
- Op.getOperand(0),
- DAG.getNode(ISD::BITCAST, dl, MVT::f64,
- Op.getOperand(1))));
- return SDValue();
- }
- case Intrinsic::aarch64_sve_whilelo:
- return optimizeWhile(Op, DAG, /*IsSigned=*/false, /*IsLess=*/true,
- /*IsEqual=*/false);
- case Intrinsic::aarch64_sve_whilelt:
- return optimizeWhile(Op, DAG, /*IsSigned=*/true, /*IsLess=*/true,
- /*IsEqual=*/false);
- case Intrinsic::aarch64_sve_whilels:
- return optimizeWhile(Op, DAG, /*IsSigned=*/false, /*IsLess=*/true,
- /*IsEqual=*/true);
- case Intrinsic::aarch64_sve_whilele:
- return optimizeWhile(Op, DAG, /*IsSigned=*/true, /*IsLess=*/true,
- /*IsEqual=*/true);
- case Intrinsic::aarch64_sve_whilege:
- return optimizeWhile(Op, DAG, /*IsSigned=*/true, /*IsLess=*/false,
- /*IsEqual=*/true);
- case Intrinsic::aarch64_sve_whilegt:
- return optimizeWhile(Op, DAG, /*IsSigned=*/true, /*IsLess=*/false,
- /*IsEqual=*/false);
- case Intrinsic::aarch64_sve_whilehs:
- return optimizeWhile(Op, DAG, /*IsSigned=*/false, /*IsLess=*/false,
- /*IsEqual=*/true);
- case Intrinsic::aarch64_sve_whilehi:
- return optimizeWhile(Op, DAG, /*IsSigned=*/false, /*IsLess=*/false,
- /*IsEqual=*/false);
- case Intrinsic::aarch64_sve_sunpkhi:
- return DAG.getNode(AArch64ISD::SUNPKHI, dl, Op.getValueType(),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_sunpklo:
- return DAG.getNode(AArch64ISD::SUNPKLO, dl, Op.getValueType(),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_uunpkhi:
- return DAG.getNode(AArch64ISD::UUNPKHI, dl, Op.getValueType(),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_uunpklo:
- return DAG.getNode(AArch64ISD::UUNPKLO, dl, Op.getValueType(),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_clasta_n:
- return DAG.getNode(AArch64ISD::CLASTA_N, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
- case Intrinsic::aarch64_sve_clastb_n:
- return DAG.getNode(AArch64ISD::CLASTB_N, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
- case Intrinsic::aarch64_sve_lasta:
- return DAG.getNode(AArch64ISD::LASTA, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_sve_lastb:
- return DAG.getNode(AArch64ISD::LASTB, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_sve_rev:
- return DAG.getNode(ISD::VECTOR_REVERSE, dl, Op.getValueType(),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_tbl:
- return DAG.getNode(AArch64ISD::TBL, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_sve_trn1:
- return DAG.getNode(AArch64ISD::TRN1, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_sve_trn2:
- return DAG.getNode(AArch64ISD::TRN2, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_sve_uzp1:
- return DAG.getNode(AArch64ISD::UZP1, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_sve_uzp2:
- return DAG.getNode(AArch64ISD::UZP2, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_sve_zip1:
- return DAG.getNode(AArch64ISD::ZIP1, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_sve_zip2:
- return DAG.getNode(AArch64ISD::ZIP2, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_sve_splice:
- return DAG.getNode(AArch64ISD::SPLICE, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
- case Intrinsic::aarch64_sve_ptrue:
- return getPTrue(DAG, dl, Op.getValueType(),
- cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
- case Intrinsic::aarch64_sve_clz:
- return DAG.getNode(AArch64ISD::CTLZ_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sme_cntsb:
- return DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
- DAG.getConstant(1, dl, MVT::i32));
- case Intrinsic::aarch64_sme_cntsh: {
- SDValue One = DAG.getConstant(1, dl, MVT::i32);
- SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(), One);
- return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes, One);
- }
- case Intrinsic::aarch64_sme_cntsw: {
- SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
- DAG.getConstant(1, dl, MVT::i32));
- return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes,
- DAG.getConstant(2, dl, MVT::i32));
- }
- case Intrinsic::aarch64_sme_cntsd: {
- SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
- DAG.getConstant(1, dl, MVT::i32));
- return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes,
- DAG.getConstant(3, dl, MVT::i32));
- }
- case Intrinsic::aarch64_sve_cnt: {
- SDValue Data = Op.getOperand(3);
- // CTPOP only supports integer operands.
- if (Data.getValueType().isFloatingPoint())
- Data = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Data);
- return DAG.getNode(AArch64ISD::CTPOP_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Data, Op.getOperand(1));
- }
- case Intrinsic::aarch64_sve_dupq_lane:
- return LowerDUPQLane(Op, DAG);
- case Intrinsic::aarch64_sve_convert_from_svbool:
- return getSVEPredicateBitCast(Op.getValueType(), Op.getOperand(1), DAG);
- case Intrinsic::aarch64_sve_convert_to_svbool:
- return getSVEPredicateBitCast(MVT::nxv16i1, Op.getOperand(1), DAG);
- case Intrinsic::aarch64_sve_fneg:
- return DAG.getNode(AArch64ISD::FNEG_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_frintp:
- return DAG.getNode(AArch64ISD::FCEIL_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_frintm:
- return DAG.getNode(AArch64ISD::FFLOOR_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_frinti:
- return DAG.getNode(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_frintx:
- return DAG.getNode(AArch64ISD::FRINT_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_frinta:
- return DAG.getNode(AArch64ISD::FROUND_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_frintn:
- return DAG.getNode(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_frintz:
- return DAG.getNode(AArch64ISD::FTRUNC_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_ucvtf:
- return DAG.getNode(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU, dl,
- Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_scvtf:
- return DAG.getNode(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU, dl,
- Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_fcvtzu:
- return DAG.getNode(AArch64ISD::FCVTZU_MERGE_PASSTHRU, dl,
- Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_fcvtzs:
- return DAG.getNode(AArch64ISD::FCVTZS_MERGE_PASSTHRU, dl,
- Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_fsqrt:
- return DAG.getNode(AArch64ISD::FSQRT_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_frecpx:
- return DAG.getNode(AArch64ISD::FRECPX_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_frecpe_x:
- return DAG.getNode(AArch64ISD::FRECPE, dl, Op.getValueType(),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_frecps_x:
- return DAG.getNode(AArch64ISD::FRECPS, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_sve_frsqrte_x:
- return DAG.getNode(AArch64ISD::FRSQRTE, dl, Op.getValueType(),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_frsqrts_x:
- return DAG.getNode(AArch64ISD::FRSQRTS, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
- case Intrinsic::aarch64_sve_fabs:
- return DAG.getNode(AArch64ISD::FABS_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_abs:
- return DAG.getNode(AArch64ISD::ABS_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_neg:
- return DAG.getNode(AArch64ISD::NEG_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_insr: {
- SDValue Scalar = Op.getOperand(2);
- EVT ScalarTy = Scalar.getValueType();
- if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
- Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar);
- return DAG.getNode(AArch64ISD::INSR, dl, Op.getValueType(),
- Op.getOperand(1), Scalar);
- }
- case Intrinsic::aarch64_sve_rbit:
- return DAG.getNode(AArch64ISD::BITREVERSE_MERGE_PASSTHRU, dl,
- Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_revb:
- return DAG.getNode(AArch64ISD::BSWAP_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_revh:
- return DAG.getNode(AArch64ISD::REVH_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_revw:
- return DAG.getNode(AArch64ISD::REVW_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_revd:
- return DAG.getNode(AArch64ISD::REVD_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
- case Intrinsic::aarch64_sve_sxtb:
- return DAG.getNode(
- AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3),
- DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_sxth:
- return DAG.getNode(
- AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3),
- DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_sxtw:
- return DAG.getNode(
- AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3),
- DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_uxtb:
- return DAG.getNode(
- AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3),
- DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_uxth:
- return DAG.getNode(
- AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3),
- DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)),
- Op.getOperand(1));
- case Intrinsic::aarch64_sve_uxtw:
- return DAG.getNode(
- AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
- Op.getOperand(2), Op.getOperand(3),
- DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)),
- Op.getOperand(1));
- case Intrinsic::localaddress: {
- const auto &MF = DAG.getMachineFunction();
- const auto *RegInfo = Subtarget->getRegisterInfo();
- unsigned Reg = RegInfo->getLocalAddressRegister(MF);
- return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg,
- Op.getSimpleValueType());
- }
- case Intrinsic::eh_recoverfp: {
- // FIXME: This needs to be implemented to correctly handle highly aligned
- // stack objects. For now we simply return the incoming FP. Refer D53541
- // for more details.
- SDValue FnOp = Op.getOperand(1);
- SDValue IncomingFPOp = Op.getOperand(2);
- GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
- auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
- if (!Fn)
- report_fatal_error(
- "llvm.eh.recoverfp must take a function as the first argument");
- return IncomingFPOp;
- }
- case Intrinsic::aarch64_neon_vsri:
- case Intrinsic::aarch64_neon_vsli: {
- EVT Ty = Op.getValueType();
- if (!Ty.isVector())
- report_fatal_error("Unexpected type for aarch64_neon_vsli");
- assert(Op.getConstantOperandVal(3) <= Ty.getScalarSizeInBits());
- bool IsShiftRight = IntNo == Intrinsic::aarch64_neon_vsri;
- unsigned Opcode = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI;
- return DAG.getNode(Opcode, dl, Ty, Op.getOperand(1), Op.getOperand(2),
- Op.getOperand(3));
- }
- case Intrinsic::aarch64_neon_srhadd:
- case Intrinsic::aarch64_neon_urhadd:
- case Intrinsic::aarch64_neon_shadd:
- case Intrinsic::aarch64_neon_uhadd: {
- bool IsSignedAdd = (IntNo == Intrinsic::aarch64_neon_srhadd ||
- IntNo == Intrinsic::aarch64_neon_shadd);
- bool IsRoundingAdd = (IntNo == Intrinsic::aarch64_neon_srhadd ||
- IntNo == Intrinsic::aarch64_neon_urhadd);
- unsigned Opcode = IsSignedAdd
- ? (IsRoundingAdd ? ISD::AVGCEILS : ISD::AVGFLOORS)
- : (IsRoundingAdd ? ISD::AVGCEILU : ISD::AVGFLOORU);
- return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
- Op.getOperand(2));
- }
- case Intrinsic::aarch64_neon_sabd:
- case Intrinsic::aarch64_neon_uabd: {
- unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uabd ? ISD::ABDU
- : ISD::ABDS;
- return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
- Op.getOperand(2));
- }
- case Intrinsic::aarch64_neon_saddlp:
- case Intrinsic::aarch64_neon_uaddlp: {
- unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uaddlp
- ? AArch64ISD::UADDLP
- : AArch64ISD::SADDLP;
- return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1));
- }
- case Intrinsic::aarch64_neon_sdot:
- case Intrinsic::aarch64_neon_udot:
- case Intrinsic::aarch64_sve_sdot:
- case Intrinsic::aarch64_sve_udot: {
- unsigned Opcode = (IntNo == Intrinsic::aarch64_neon_udot ||
- IntNo == Intrinsic::aarch64_sve_udot)
- ? AArch64ISD::UDOT
- : AArch64ISD::SDOT;
- return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
- Op.getOperand(2), Op.getOperand(3));
- }
- case Intrinsic::get_active_lane_mask: {
- SDValue ID =
- DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, dl, MVT::i64);
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), ID,
- Op.getOperand(1), Op.getOperand(2));
- }
- }
- }
- bool AArch64TargetLowering::shouldExtendGSIndex(EVT VT, EVT &EltTy) const {
- if (VT.getVectorElementType() == MVT::i8 ||
- VT.getVectorElementType() == MVT::i16) {
- EltTy = MVT::i32;
- return true;
- }
- return false;
- }
- bool AArch64TargetLowering::shouldRemoveExtendFromGSIndex(EVT IndexVT,
- EVT DataVT) const {
- // SVE only supports implicit extension of 32-bit indices.
- if (!Subtarget->hasSVE() || IndexVT.getVectorElementType() != MVT::i32)
- return false;
- // Indices cannot be smaller than the main data type.
- if (IndexVT.getScalarSizeInBits() < DataVT.getScalarSizeInBits())
- return false;
- // Scalable vectors with "vscale * 2" or fewer elements sit within a 64-bit
- // element container type, which would violate the previous clause.
- return DataVT.isFixedLengthVector() || DataVT.getVectorMinNumElements() > 2;
- }
- bool AArch64TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
- return ExtVal.getValueType().isScalableVector() ||
- useSVEForFixedLengthVectorVT(
- ExtVal.getValueType(),
- /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors());
- }
- unsigned getGatherVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) {
- std::map<std::tuple<bool, bool, bool>, unsigned> AddrModes = {
- {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ false),
- AArch64ISD::GLD1_MERGE_ZERO},
- {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ true),
- AArch64ISD::GLD1_UXTW_MERGE_ZERO},
- {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ false),
- AArch64ISD::GLD1_MERGE_ZERO},
- {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ true),
- AArch64ISD::GLD1_SXTW_MERGE_ZERO},
- {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ false),
- AArch64ISD::GLD1_SCALED_MERGE_ZERO},
- {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ true),
- AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO},
- {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ false),
- AArch64ISD::GLD1_SCALED_MERGE_ZERO},
- {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ true),
- AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO},
- };
- auto Key = std::make_tuple(IsScaled, IsSigned, NeedsExtend);
- return AddrModes.find(Key)->second;
- }
- unsigned getSignExtendedGatherOpcode(unsigned Opcode) {
- switch (Opcode) {
- default:
- llvm_unreachable("unimplemented opcode");
- return Opcode;
- case AArch64ISD::GLD1_MERGE_ZERO:
- return AArch64ISD::GLD1S_MERGE_ZERO;
- case AArch64ISD::GLD1_IMM_MERGE_ZERO:
- return AArch64ISD::GLD1S_IMM_MERGE_ZERO;
- case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
- return AArch64ISD::GLD1S_UXTW_MERGE_ZERO;
- case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
- return AArch64ISD::GLD1S_SXTW_MERGE_ZERO;
- case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
- return AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
- case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
- return AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO;
- case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
- return AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO;
- }
- }
- SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
- SelectionDAG &DAG) const {
- MaskedGatherSDNode *MGT = cast<MaskedGatherSDNode>(Op);
- SDLoc DL(Op);
- SDValue Chain = MGT->getChain();
- SDValue PassThru = MGT->getPassThru();
- SDValue Mask = MGT->getMask();
- SDValue BasePtr = MGT->getBasePtr();
- SDValue Index = MGT->getIndex();
- SDValue Scale = MGT->getScale();
- EVT VT = Op.getValueType();
- EVT MemVT = MGT->getMemoryVT();
- ISD::LoadExtType ExtType = MGT->getExtensionType();
- ISD::MemIndexType IndexType = MGT->getIndexType();
- // SVE supports zero (and so undef) passthrough values only, everything else
- // must be handled manually by an explicit select on the load's output.
- if (!PassThru->isUndef() && !isZerosVector(PassThru.getNode())) {
- SDValue Ops[] = {Chain, DAG.getUNDEF(VT), Mask, BasePtr, Index, Scale};
- SDValue Load =
- DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
- MGT->getMemOperand(), IndexType, ExtType);
- SDValue Select = DAG.getSelect(DL, VT, Mask, Load, PassThru);
- return DAG.getMergeValues({Select, Load.getValue(1)}, DL);
- }
- bool IsScaled = MGT->isIndexScaled();
- bool IsSigned = MGT->isIndexSigned();
- // SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
- // must be calculated before hand.
- uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
- if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
- assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
- EVT IndexVT = Index.getValueType();
- Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index,
- DAG.getConstant(Log2_32(ScaleVal), DL, IndexVT));
- Scale = DAG.getTargetConstant(1, DL, Scale.getValueType());
- SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
- return DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
- MGT->getMemOperand(), IndexType, ExtType);
- }
- // Lower fixed length gather to a scalable equivalent.
- if (VT.isFixedLengthVector()) {
- assert(Subtarget->useSVEForFixedLengthVectors() &&
- "Cannot lower when not using SVE for fixed vectors!");
- // NOTE: Handle floating-point as if integer then bitcast the result.
- EVT DataVT = VT.changeVectorElementTypeToInteger();
- MemVT = MemVT.changeVectorElementTypeToInteger();
- // Find the smallest integer fixed length vector we can use for the gather.
- EVT PromotedVT = VT.changeVectorElementType(MVT::i32);
- if (DataVT.getVectorElementType() == MVT::i64 ||
- Index.getValueType().getVectorElementType() == MVT::i64 ||
- Mask.getValueType().getVectorElementType() == MVT::i64)
- PromotedVT = VT.changeVectorElementType(MVT::i64);
- // Promote vector operands except for passthrough, which we know is either
- // undef or zero, and thus best constructed directly.
- unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- Index = DAG.getNode(ExtOpcode, DL, PromotedVT, Index);
- Mask = DAG.getNode(ISD::SIGN_EXTEND, DL, PromotedVT, Mask);
- // A promoted result type forces the need for an extending load.
- if (PromotedVT != DataVT && ExtType == ISD::NON_EXTLOAD)
- ExtType = ISD::EXTLOAD;
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT);
- // Convert fixed length vector operands to scalable.
- MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
- Index = convertToScalableVector(DAG, ContainerVT, Index);
- Mask = convertFixedMaskToScalableVector(Mask, DAG);
- PassThru = PassThru->isUndef() ? DAG.getUNDEF(ContainerVT)
- : DAG.getConstant(0, DL, ContainerVT);
- // Emit equivalent scalable vector gather.
- SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
- SDValue Load =
- DAG.getMaskedGather(DAG.getVTList(ContainerVT, MVT::Other), MemVT, DL,
- Ops, MGT->getMemOperand(), IndexType, ExtType);
- // Extract fixed length data then convert to the required result type.
- SDValue Result = convertFromScalableVector(DAG, PromotedVT, Load);
- Result = DAG.getNode(ISD::TRUNCATE, DL, DataVT, Result);
- if (VT.isFloatingPoint())
- Result = DAG.getNode(ISD::BITCAST, DL, VT, Result);
- return DAG.getMergeValues({Result, Load.getValue(1)}, DL);
- }
- // Everything else is legal.
- return Op;
- }
- SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
- SelectionDAG &DAG) const {
- MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(Op);
- SDLoc DL(Op);
- SDValue Chain = MSC->getChain();
- SDValue StoreVal = MSC->getValue();
- SDValue Mask = MSC->getMask();
- SDValue BasePtr = MSC->getBasePtr();
- SDValue Index = MSC->getIndex();
- SDValue Scale = MSC->getScale();
- EVT VT = StoreVal.getValueType();
- EVT MemVT = MSC->getMemoryVT();
- ISD::MemIndexType IndexType = MSC->getIndexType();
- bool Truncating = MSC->isTruncatingStore();
- bool IsScaled = MSC->isIndexScaled();
- bool IsSigned = MSC->isIndexSigned();
- // SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
- // must be calculated before hand.
- uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
- if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
- assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
- EVT IndexVT = Index.getValueType();
- Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index,
- DAG.getConstant(Log2_32(ScaleVal), DL, IndexVT));
- Scale = DAG.getTargetConstant(1, DL, Scale.getValueType());
- SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
- return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops,
- MSC->getMemOperand(), IndexType, Truncating);
- }
- // Lower fixed length scatter to a scalable equivalent.
- if (VT.isFixedLengthVector()) {
- assert(Subtarget->useSVEForFixedLengthVectors() &&
- "Cannot lower when not using SVE for fixed vectors!");
- // Once bitcast we treat floating-point scatters as if integer.
- if (VT.isFloatingPoint()) {
- VT = VT.changeVectorElementTypeToInteger();
- MemVT = MemVT.changeVectorElementTypeToInteger();
- StoreVal = DAG.getNode(ISD::BITCAST, DL, VT, StoreVal);
- }
- // Find the smallest integer fixed length vector we can use for the scatter.
- EVT PromotedVT = VT.changeVectorElementType(MVT::i32);
- if (VT.getVectorElementType() == MVT::i64 ||
- Index.getValueType().getVectorElementType() == MVT::i64 ||
- Mask.getValueType().getVectorElementType() == MVT::i64)
- PromotedVT = VT.changeVectorElementType(MVT::i64);
- // Promote vector operands.
- unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- Index = DAG.getNode(ExtOpcode, DL, PromotedVT, Index);
- Mask = DAG.getNode(ISD::SIGN_EXTEND, DL, PromotedVT, Mask);
- StoreVal = DAG.getNode(ISD::ANY_EXTEND, DL, PromotedVT, StoreVal);
- // A promoted value type forces the need for a truncating store.
- if (PromotedVT != VT)
- Truncating = true;
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT);
- // Convert fixed length vector operands to scalable.
- MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
- Index = convertToScalableVector(DAG, ContainerVT, Index);
- Mask = convertFixedMaskToScalableVector(Mask, DAG);
- StoreVal = convertToScalableVector(DAG, ContainerVT, StoreVal);
- // Emit equivalent scalable vector scatter.
- SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
- return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops,
- MSC->getMemOperand(), IndexType, Truncating);
- }
- // Everything else is legal.
- return Op;
- }
- SDValue AArch64TargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- MaskedLoadSDNode *LoadNode = cast<MaskedLoadSDNode>(Op);
- assert(LoadNode && "Expected custom lowering of a masked load node");
- EVT VT = Op->getValueType(0);
- if (useSVEForFixedLengthVectorVT(
- VT,
- /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
- return LowerFixedLengthVectorMLoadToSVE(Op, DAG);
- SDValue PassThru = LoadNode->getPassThru();
- SDValue Mask = LoadNode->getMask();
- if (PassThru->isUndef() || isZerosVector(PassThru.getNode()))
- return Op;
- SDValue Load = DAG.getMaskedLoad(
- VT, DL, LoadNode->getChain(), LoadNode->getBasePtr(),
- LoadNode->getOffset(), Mask, DAG.getUNDEF(VT), LoadNode->getMemoryVT(),
- LoadNode->getMemOperand(), LoadNode->getAddressingMode(),
- LoadNode->getExtensionType());
- SDValue Result = DAG.getSelect(DL, VT, Mask, Load, PassThru);
- return DAG.getMergeValues({Result, Load.getValue(1)}, DL);
- }
- // Custom lower trunc store for v4i8 vectors, since it is promoted to v4i16.
- static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST,
- EVT VT, EVT MemVT,
- SelectionDAG &DAG) {
- assert(VT.isVector() && "VT should be a vector type");
- assert(MemVT == MVT::v4i8 && VT == MVT::v4i16);
- SDValue Value = ST->getValue();
- // It first extend the promoted v4i16 to v8i16, truncate to v8i8, and extract
- // the word lane which represent the v4i8 subvector. It optimizes the store
- // to:
- //
- // xtn v0.8b, v0.8h
- // str s0, [x0]
- SDValue Undef = DAG.getUNDEF(MVT::i16);
- SDValue UndefVec = DAG.getBuildVector(MVT::v4i16, DL,
- {Undef, Undef, Undef, Undef});
- SDValue TruncExt = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16,
- Value, UndefVec);
- SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, TruncExt);
- Trunc = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Trunc);
- SDValue ExtractTrunc = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
- Trunc, DAG.getConstant(0, DL, MVT::i64));
- return DAG.getStore(ST->getChain(), DL, ExtractTrunc,
- ST->getBasePtr(), ST->getMemOperand());
- }
- // Custom lowering for any store, vector or scalar and/or default or with
- // a truncate operations. Currently only custom lower truncate operation
- // from vector v4i16 to v4i8 or volatile stores of i128.
- SDValue AArch64TargetLowering::LowerSTORE(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc Dl(Op);
- StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
- assert (StoreNode && "Can only custom lower store nodes");
- SDValue Value = StoreNode->getValue();
- EVT VT = Value.getValueType();
- EVT MemVT = StoreNode->getMemoryVT();
- if (VT.isVector()) {
- if (useSVEForFixedLengthVectorVT(
- VT,
- /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
- return LowerFixedLengthVectorStoreToSVE(Op, DAG);
- unsigned AS = StoreNode->getAddressSpace();
- Align Alignment = StoreNode->getAlign();
- if (Alignment < MemVT.getStoreSize() &&
- !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment,
- StoreNode->getMemOperand()->getFlags(),
- nullptr)) {
- return scalarizeVectorStore(StoreNode, DAG);
- }
- if (StoreNode->isTruncatingStore() && VT == MVT::v4i16 &&
- MemVT == MVT::v4i8) {
- return LowerTruncateVectorStore(Dl, StoreNode, VT, MemVT, DAG);
- }
- // 256 bit non-temporal stores can be lowered to STNP. Do this as part of
- // the custom lowering, as there are no un-paired non-temporal stores and
- // legalization will break up 256 bit inputs.
- ElementCount EC = MemVT.getVectorElementCount();
- if (StoreNode->isNonTemporal() && MemVT.getSizeInBits() == 256u &&
- EC.isKnownEven() &&
- ((MemVT.getScalarSizeInBits() == 8u ||
- MemVT.getScalarSizeInBits() == 16u ||
- MemVT.getScalarSizeInBits() == 32u ||
- MemVT.getScalarSizeInBits() == 64u))) {
- SDValue Lo =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl,
- MemVT.getHalfNumVectorElementsVT(*DAG.getContext()),
- StoreNode->getValue(), DAG.getConstant(0, Dl, MVT::i64));
- SDValue Hi =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl,
- MemVT.getHalfNumVectorElementsVT(*DAG.getContext()),
- StoreNode->getValue(),
- DAG.getConstant(EC.getKnownMinValue() / 2, Dl, MVT::i64));
- SDValue Result = DAG.getMemIntrinsicNode(
- AArch64ISD::STNP, Dl, DAG.getVTList(MVT::Other),
- {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()},
- StoreNode->getMemoryVT(), StoreNode->getMemOperand());
- return Result;
- }
- } else if (MemVT == MVT::i128 && StoreNode->isVolatile()) {
- return LowerStore128(Op, DAG);
- } else if (MemVT == MVT::i64x8) {
- SDValue Value = StoreNode->getValue();
- assert(Value->getValueType(0) == MVT::i64x8);
- SDValue Chain = StoreNode->getChain();
- SDValue Base = StoreNode->getBasePtr();
- EVT PtrVT = Base.getValueType();
- for (unsigned i = 0; i < 8; i++) {
- SDValue Part = DAG.getNode(AArch64ISD::LS64_EXTRACT, Dl, MVT::i64,
- Value, DAG.getConstant(i, Dl, MVT::i32));
- SDValue Ptr = DAG.getNode(ISD::ADD, Dl, PtrVT, Base,
- DAG.getConstant(i * 8, Dl, PtrVT));
- Chain = DAG.getStore(Chain, Dl, Part, Ptr, StoreNode->getPointerInfo(),
- StoreNode->getOriginalAlign());
- }
- return Chain;
- }
- return SDValue();
- }
- /// Lower atomic or volatile 128-bit stores to a single STP instruction.
- SDValue AArch64TargetLowering::LowerStore128(SDValue Op,
- SelectionDAG &DAG) const {
- MemSDNode *StoreNode = cast<MemSDNode>(Op);
- assert(StoreNode->getMemoryVT() == MVT::i128);
- assert(StoreNode->isVolatile() || StoreNode->isAtomic());
- assert(!StoreNode->isAtomic() ||
- StoreNode->getMergedOrdering() == AtomicOrdering::Unordered ||
- StoreNode->getMergedOrdering() == AtomicOrdering::Monotonic);
- SDValue Value = StoreNode->getOpcode() == ISD::STORE
- ? StoreNode->getOperand(1)
- : StoreNode->getOperand(2);
- SDLoc DL(Op);
- SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Value,
- DAG.getConstant(0, DL, MVT::i64));
- SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Value,
- DAG.getConstant(1, DL, MVT::i64));
- SDValue Result = DAG.getMemIntrinsicNode(
- AArch64ISD::STP, DL, DAG.getVTList(MVT::Other),
- {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()},
- StoreNode->getMemoryVT(), StoreNode->getMemOperand());
- return Result;
- }
- SDValue AArch64TargetLowering::LowerLOAD(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc DL(Op);
- LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
- assert(LoadNode && "Expected custom lowering of a load node");
- if (LoadNode->getMemoryVT() == MVT::i64x8) {
- SmallVector<SDValue, 8> Ops;
- SDValue Base = LoadNode->getBasePtr();
- SDValue Chain = LoadNode->getChain();
- EVT PtrVT = Base.getValueType();
- for (unsigned i = 0; i < 8; i++) {
- SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
- DAG.getConstant(i * 8, DL, PtrVT));
- SDValue Part = DAG.getLoad(MVT::i64, DL, Chain, Ptr,
- LoadNode->getPointerInfo(),
- LoadNode->getOriginalAlign());
- Ops.push_back(Part);
- Chain = SDValue(Part.getNode(), 1);
- }
- SDValue Loaded = DAG.getNode(AArch64ISD::LS64_BUILD, DL, MVT::i64x8, Ops);
- return DAG.getMergeValues({Loaded, Chain}, DL);
- }
- // Custom lowering for extending v4i8 vector loads.
- EVT VT = Op->getValueType(0);
- assert((VT == MVT::v4i16 || VT == MVT::v4i32) && "Expected v4i16 or v4i32");
- if (LoadNode->getMemoryVT() != MVT::v4i8)
- return SDValue();
- unsigned ExtType;
- if (LoadNode->getExtensionType() == ISD::SEXTLOAD)
- ExtType = ISD::SIGN_EXTEND;
- else if (LoadNode->getExtensionType() == ISD::ZEXTLOAD ||
- LoadNode->getExtensionType() == ISD::EXTLOAD)
- ExtType = ISD::ZERO_EXTEND;
- else
- return SDValue();
- SDValue Load = DAG.getLoad(MVT::f32, DL, LoadNode->getChain(),
- LoadNode->getBasePtr(), MachinePointerInfo());
- SDValue Chain = Load.getValue(1);
- SDValue Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f32, Load);
- SDValue BC = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Vec);
- SDValue Ext = DAG.getNode(ExtType, DL, MVT::v8i16, BC);
- Ext = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Ext,
- DAG.getConstant(0, DL, MVT::i64));
- if (VT == MVT::v4i32)
- Ext = DAG.getNode(ExtType, DL, MVT::v4i32, Ext);
- return DAG.getMergeValues({Ext, Chain}, DL);
- }
- // Generate SUBS and CSEL for integer abs.
- SDValue AArch64TargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
- MVT VT = Op.getSimpleValueType();
- if (VT.isVector())
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABS_MERGE_PASSTHRU);
- SDLoc DL(Op);
- SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
- Op.getOperand(0));
- // Generate SUBS & CSEL.
- SDValue Cmp =
- DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::i32),
- Op.getOperand(0), DAG.getConstant(0, DL, VT));
- return DAG.getNode(AArch64ISD::CSEL, DL, VT, Op.getOperand(0), Neg,
- DAG.getConstant(AArch64CC::PL, DL, MVT::i32),
- Cmp.getValue(1));
- }
- static SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
- SDValue Chain = Op.getOperand(0);
- SDValue Cond = Op.getOperand(1);
- SDValue Dest = Op.getOperand(2);
- AArch64CC::CondCode CC;
- if (SDValue Cmp = emitConjunction(DAG, Cond, CC)) {
- SDLoc dl(Op);
- SDValue CCVal = DAG.getConstant(CC, dl, MVT::i32);
- return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
- Cmp);
- }
- return SDValue();
- }
- SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
- SelectionDAG &DAG) const {
- LLVM_DEBUG(dbgs() << "Custom lowering: ");
- LLVM_DEBUG(Op.dump());
- switch (Op.getOpcode()) {
- default:
- llvm_unreachable("unimplemented operand");
- return SDValue();
- case ISD::BITCAST:
- return LowerBITCAST(Op, DAG);
- case ISD::GlobalAddress:
- return LowerGlobalAddress(Op, DAG);
- case ISD::GlobalTLSAddress:
- return LowerGlobalTLSAddress(Op, DAG);
- case ISD::SETCC:
- case ISD::STRICT_FSETCC:
- case ISD::STRICT_FSETCCS:
- return LowerSETCC(Op, DAG);
- case ISD::SETCCCARRY:
- return LowerSETCCCARRY(Op, DAG);
- case ISD::BRCOND:
- return LowerBRCOND(Op, DAG);
- case ISD::BR_CC:
- return LowerBR_CC(Op, DAG);
- case ISD::SELECT:
- return LowerSELECT(Op, DAG);
- case ISD::SELECT_CC:
- return LowerSELECT_CC(Op, DAG);
- case ISD::JumpTable:
- return LowerJumpTable(Op, DAG);
- case ISD::BR_JT:
- return LowerBR_JT(Op, DAG);
- case ISD::ConstantPool:
- return LowerConstantPool(Op, DAG);
- case ISD::BlockAddress:
- return LowerBlockAddress(Op, DAG);
- case ISD::VASTART:
- return LowerVASTART(Op, DAG);
- case ISD::VACOPY:
- return LowerVACOPY(Op, DAG);
- case ISD::VAARG:
- return LowerVAARG(Op, DAG);
- case ISD::ADDCARRY:
- return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, false /*unsigned*/);
- case ISD::SUBCARRY:
- return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, false /*unsigned*/);
- case ISD::SADDO_CARRY:
- return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, true /*signed*/);
- case ISD::SSUBO_CARRY:
- return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, true /*signed*/);
- case ISD::SADDO:
- case ISD::UADDO:
- case ISD::SSUBO:
- case ISD::USUBO:
- case ISD::SMULO:
- case ISD::UMULO:
- return LowerXALUO(Op, DAG);
- case ISD::FADD:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FADD_PRED);
- case ISD::FSUB:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSUB_PRED);
- case ISD::FMUL:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMUL_PRED);
- case ISD::FMA:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMA_PRED);
- case ISD::FDIV:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FDIV_PRED);
- case ISD::FNEG:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEG_MERGE_PASSTHRU);
- case ISD::FCEIL:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FCEIL_MERGE_PASSTHRU);
- case ISD::FFLOOR:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FFLOOR_MERGE_PASSTHRU);
- case ISD::FNEARBYINT:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEARBYINT_MERGE_PASSTHRU);
- case ISD::FRINT:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FRINT_MERGE_PASSTHRU);
- case ISD::FROUND:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUND_MERGE_PASSTHRU);
- case ISD::FROUNDEVEN:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU);
- case ISD::FTRUNC:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FTRUNC_MERGE_PASSTHRU);
- case ISD::FSQRT:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSQRT_MERGE_PASSTHRU);
- case ISD::FABS:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FABS_MERGE_PASSTHRU);
- case ISD::FP_ROUND:
- case ISD::STRICT_FP_ROUND:
- return LowerFP_ROUND(Op, DAG);
- case ISD::FP_EXTEND:
- return LowerFP_EXTEND(Op, DAG);
- case ISD::FRAMEADDR:
- return LowerFRAMEADDR(Op, DAG);
- case ISD::SPONENTRY:
- return LowerSPONENTRY(Op, DAG);
- case ISD::RETURNADDR:
- return LowerRETURNADDR(Op, DAG);
- case ISD::ADDROFRETURNADDR:
- return LowerADDROFRETURNADDR(Op, DAG);
- case ISD::CONCAT_VECTORS:
- return LowerCONCAT_VECTORS(Op, DAG);
- case ISD::INSERT_VECTOR_ELT:
- return LowerINSERT_VECTOR_ELT(Op, DAG);
- case ISD::EXTRACT_VECTOR_ELT:
- return LowerEXTRACT_VECTOR_ELT(Op, DAG);
- case ISD::BUILD_VECTOR:
- return LowerBUILD_VECTOR(Op, DAG);
- case ISD::ZERO_EXTEND_VECTOR_INREG:
- return LowerZERO_EXTEND_VECTOR_INREG(Op, DAG);
- case ISD::VECTOR_SHUFFLE:
- return LowerVECTOR_SHUFFLE(Op, DAG);
- case ISD::SPLAT_VECTOR:
- return LowerSPLAT_VECTOR(Op, DAG);
- case ISD::EXTRACT_SUBVECTOR:
- return LowerEXTRACT_SUBVECTOR(Op, DAG);
- case ISD::INSERT_SUBVECTOR:
- return LowerINSERT_SUBVECTOR(Op, DAG);
- case ISD::SDIV:
- case ISD::UDIV:
- return LowerDIV(Op, DAG);
- case ISD::SMIN:
- case ISD::UMIN:
- case ISD::SMAX:
- case ISD::UMAX:
- return LowerMinMax(Op, DAG);
- case ISD::SRA:
- case ISD::SRL:
- case ISD::SHL:
- return LowerVectorSRA_SRL_SHL(Op, DAG);
- case ISD::SHL_PARTS:
- case ISD::SRL_PARTS:
- case ISD::SRA_PARTS:
- return LowerShiftParts(Op, DAG);
- case ISD::CTPOP:
- case ISD::PARITY:
- return LowerCTPOP_PARITY(Op, DAG);
- case ISD::FCOPYSIGN:
- return LowerFCOPYSIGN(Op, DAG);
- case ISD::OR:
- return LowerVectorOR(Op, DAG);
- case ISD::XOR:
- return LowerXOR(Op, DAG);
- case ISD::PREFETCH:
- return LowerPREFETCH(Op, DAG);
- case ISD::SINT_TO_FP:
- case ISD::UINT_TO_FP:
- case ISD::STRICT_SINT_TO_FP:
- case ISD::STRICT_UINT_TO_FP:
- return LowerINT_TO_FP(Op, DAG);
- case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT:
- case ISD::STRICT_FP_TO_SINT:
- case ISD::STRICT_FP_TO_UINT:
- return LowerFP_TO_INT(Op, DAG);
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT:
- return LowerFP_TO_INT_SAT(Op, DAG);
- case ISD::FSINCOS:
- return LowerFSINCOS(Op, DAG);
- case ISD::GET_ROUNDING:
- return LowerGET_ROUNDING(Op, DAG);
- case ISD::SET_ROUNDING:
- return LowerSET_ROUNDING(Op, DAG);
- case ISD::MUL:
- return LowerMUL(Op, DAG);
- case ISD::MULHS:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHS_PRED);
- case ISD::MULHU:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHU_PRED);
- case ISD::INTRINSIC_W_CHAIN:
- return LowerINTRINSIC_W_CHAIN(Op, DAG);
- case ISD::INTRINSIC_WO_CHAIN:
- return LowerINTRINSIC_WO_CHAIN(Op, DAG);
- case ISD::INTRINSIC_VOID:
- return LowerINTRINSIC_VOID(Op, DAG);
- case ISD::ATOMIC_STORE:
- if (cast<MemSDNode>(Op)->getMemoryVT() == MVT::i128) {
- assert(Subtarget->hasLSE2());
- return LowerStore128(Op, DAG);
- }
- return SDValue();
- case ISD::STORE:
- return LowerSTORE(Op, DAG);
- case ISD::MSTORE:
- return LowerFixedLengthVectorMStoreToSVE(Op, DAG);
- case ISD::MGATHER:
- return LowerMGATHER(Op, DAG);
- case ISD::MSCATTER:
- return LowerMSCATTER(Op, DAG);
- case ISD::VECREDUCE_SEQ_FADD:
- return LowerVECREDUCE_SEQ_FADD(Op, DAG);
- case ISD::VECREDUCE_ADD:
- case ISD::VECREDUCE_AND:
- case ISD::VECREDUCE_OR:
- case ISD::VECREDUCE_XOR:
- case ISD::VECREDUCE_SMAX:
- case ISD::VECREDUCE_SMIN:
- case ISD::VECREDUCE_UMAX:
- case ISD::VECREDUCE_UMIN:
- case ISD::VECREDUCE_FADD:
- case ISD::VECREDUCE_FMAX:
- case ISD::VECREDUCE_FMIN:
- return LowerVECREDUCE(Op, DAG);
- case ISD::ATOMIC_LOAD_SUB:
- return LowerATOMIC_LOAD_SUB(Op, DAG);
- case ISD::ATOMIC_LOAD_AND:
- return LowerATOMIC_LOAD_AND(Op, DAG);
- case ISD::DYNAMIC_STACKALLOC:
- return LowerDYNAMIC_STACKALLOC(Op, DAG);
- case ISD::VSCALE:
- return LowerVSCALE(Op, DAG);
- case ISD::ANY_EXTEND:
- case ISD::SIGN_EXTEND:
- case ISD::ZERO_EXTEND:
- return LowerFixedLengthVectorIntExtendToSVE(Op, DAG);
- case ISD::SIGN_EXTEND_INREG: {
- // Only custom lower when ExtraVT has a legal byte based element type.
- EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
- EVT ExtraEltVT = ExtraVT.getVectorElementType();
- if ((ExtraEltVT != MVT::i8) && (ExtraEltVT != MVT::i16) &&
- (ExtraEltVT != MVT::i32) && (ExtraEltVT != MVT::i64))
- return SDValue();
- return LowerToPredicatedOp(Op, DAG,
- AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU);
- }
- case ISD::TRUNCATE:
- return LowerTRUNCATE(Op, DAG);
- case ISD::MLOAD:
- return LowerMLOAD(Op, DAG);
- case ISD::LOAD:
- if (useSVEForFixedLengthVectorVT(Op.getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerFixedLengthVectorLoadToSVE(Op, DAG);
- return LowerLOAD(Op, DAG);
- case ISD::ADD:
- case ISD::AND:
- case ISD::SUB:
- return LowerToScalableOp(Op, DAG);
- case ISD::FMAXIMUM:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAX_PRED);
- case ISD::FMAXNUM:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAXNM_PRED);
- case ISD::FMINIMUM:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMIN_PRED);
- case ISD::FMINNUM:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMINNM_PRED);
- case ISD::VSELECT:
- return LowerFixedLengthVectorSelectToSVE(Op, DAG);
- case ISD::ABS:
- return LowerABS(Op, DAG);
- case ISD::ABDS:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDS_PRED);
- case ISD::ABDU:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDU_PRED);
- case ISD::AVGFLOORS:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::HADDS_PRED);
- case ISD::AVGFLOORU:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::HADDU_PRED);
- case ISD::AVGCEILS:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::RHADDS_PRED);
- case ISD::AVGCEILU:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::RHADDU_PRED);
- case ISD::BITREVERSE:
- return LowerBitreverse(Op, DAG);
- case ISD::BSWAP:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::BSWAP_MERGE_PASSTHRU);
- case ISD::CTLZ:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTLZ_MERGE_PASSTHRU);
- case ISD::CTTZ:
- return LowerCTTZ(Op, DAG);
- case ISD::VECTOR_SPLICE:
- return LowerVECTOR_SPLICE(Op, DAG);
- case ISD::STRICT_LROUND:
- case ISD::STRICT_LLROUND:
- case ISD::STRICT_LRINT:
- case ISD::STRICT_LLRINT: {
- assert(Op.getOperand(1).getValueType() == MVT::f16 &&
- "Expected custom lowering of rounding operations only for f16");
- SDLoc DL(Op);
- SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
- {Op.getOperand(0), Op.getOperand(1)});
- return DAG.getNode(Op.getOpcode(), DL, {Op.getValueType(), MVT::Other},
- {Ext.getValue(1), Ext.getValue(0)});
- }
- case ISD::WRITE_REGISTER: {
- assert(Op.getOperand(2).getValueType() == MVT::i128 &&
- "WRITE_REGISTER custom lowering is only for 128-bit sysregs");
- SDLoc DL(Op);
- SDValue Chain = Op.getOperand(0);
- SDValue SysRegName = Op.getOperand(1);
- SDValue Pair = Op.getOperand(2);
- SDValue PairLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Pair,
- DAG.getConstant(0, DL, MVT::i32));
- SDValue PairHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Pair,
- DAG.getConstant(1, DL, MVT::i32));
- // chain = MSRR(chain, sysregname, lo, hi)
- SDValue Result = DAG.getNode(AArch64ISD::MSRR, DL, MVT::Other, Chain,
- SysRegName, PairLo, PairHi);
- return Result;
- }
- }
- }
- bool AArch64TargetLowering::mergeStoresAfterLegalization(EVT VT) const {
- return !Subtarget->useSVEForFixedLengthVectors();
- }
- bool AArch64TargetLowering::isVScaleKnownToBeAPowerOfTwo() const {
- return true;
- }
- bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(
- EVT VT, bool OverrideNEON) const {
- if (!VT.isFixedLengthVector() || !VT.isSimple())
- return false;
- // Don't use SVE for vectors we cannot scalarize if required.
- switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
- // Fixed length predicates should be promoted to i8.
- // NOTE: This is consistent with how NEON (and thus 64/128bit vectors) work.
- case MVT::i1:
- default:
- return false;
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- case MVT::i64:
- case MVT::f16:
- case MVT::f32:
- case MVT::f64:
- break;
- }
- // All SVE implementations support NEON sized vectors.
- if (OverrideNEON && (VT.is128BitVector() || VT.is64BitVector()))
- return Subtarget->hasSVE();
- // Ensure NEON MVTs only belong to a single register class.
- if (VT.getFixedSizeInBits() <= 128)
- return false;
- // Ensure wider than NEON code generation is enabled.
- if (!Subtarget->useSVEForFixedLengthVectors())
- return false;
- // Don't use SVE for types that don't fit.
- if (VT.getFixedSizeInBits() > Subtarget->getMinSVEVectorSizeInBits())
- return false;
- // TODO: Perhaps an artificial restriction, but worth having whilst getting
- // the base fixed length SVE support in place.
- if (!VT.isPow2VectorType())
- return false;
- return true;
- }
- //===----------------------------------------------------------------------===//
- // Calling Convention Implementation
- //===----------------------------------------------------------------------===//
- static unsigned getIntrinsicID(const SDNode *N) {
- unsigned Opcode = N->getOpcode();
- switch (Opcode) {
- default:
- return Intrinsic::not_intrinsic;
- case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
- if (IID < Intrinsic::num_intrinsics)
- return IID;
- return Intrinsic::not_intrinsic;
- }
- }
- }
- bool AArch64TargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
- SDValue N1) const {
- if (!N0.hasOneUse())
- return false;
- unsigned IID = getIntrinsicID(N1.getNode());
- // Avoid reassociating expressions that can be lowered to smlal/umlal.
- if (IID == Intrinsic::aarch64_neon_umull ||
- N1.getOpcode() == AArch64ISD::UMULL ||
- IID == Intrinsic::aarch64_neon_smull ||
- N1.getOpcode() == AArch64ISD::SMULL)
- return N0.getOpcode() != ISD::ADD;
- return true;
- }
- /// Selects the correct CCAssignFn for a given CallingConvention value.
- CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
- bool IsVarArg) const {
- switch (CC) {
- default:
- report_fatal_error("Unsupported calling convention.");
- case CallingConv::WebKit_JS:
- return CC_AArch64_WebKit_JS;
- case CallingConv::GHC:
- return CC_AArch64_GHC;
- case CallingConv::C:
- case CallingConv::Fast:
- case CallingConv::PreserveMost:
- case CallingConv::CXX_FAST_TLS:
- case CallingConv::Swift:
- case CallingConv::SwiftTail:
- case CallingConv::Tail:
- if (Subtarget->isTargetWindows() && IsVarArg) {
- if (Subtarget->isWindowsArm64EC())
- return CC_AArch64_Arm64EC_VarArg;
- return CC_AArch64_Win64_VarArg;
- }
- if (!Subtarget->isTargetDarwin())
- return CC_AArch64_AAPCS;
- if (!IsVarArg)
- return CC_AArch64_DarwinPCS;
- return Subtarget->isTargetILP32() ? CC_AArch64_DarwinPCS_ILP32_VarArg
- : CC_AArch64_DarwinPCS_VarArg;
- case CallingConv::Win64:
- if (IsVarArg) {
- if (Subtarget->isWindowsArm64EC())
- return CC_AArch64_Arm64EC_VarArg;
- return CC_AArch64_Win64_VarArg;
- }
- return CC_AArch64_AAPCS;
- case CallingConv::CFGuard_Check:
- return CC_AArch64_Win64_CFGuard_Check;
- case CallingConv::AArch64_VectorCall:
- case CallingConv::AArch64_SVE_VectorCall:
- case CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0:
- case CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2:
- return CC_AArch64_AAPCS;
- }
- }
- CCAssignFn *
- AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const {
- return CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS
- : RetCC_AArch64_AAPCS;
- }
- unsigned
- AArch64TargetLowering::allocateLazySaveBuffer(SDValue &Chain, const SDLoc &DL,
- SelectionDAG &DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo &MFI = MF.getFrameInfo();
- // Allocate a lazy-save buffer object of size SVL.B * SVL.B (worst-case)
- SDValue N = DAG.getNode(AArch64ISD::RDSVL, DL, MVT::i64,
- DAG.getConstant(1, DL, MVT::i32));
- SDValue NN = DAG.getNode(ISD::MUL, DL, MVT::i64, N, N);
- SDValue Ops[] = {Chain, NN, DAG.getConstant(1, DL, MVT::i64)};
- SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
- SDValue Buffer = DAG.getNode(ISD::DYNAMIC_STACKALLOC, DL, VTs, Ops);
- Chain = Buffer.getValue(1);
- MFI.CreateVariableSizedObject(Align(1), nullptr);
- // Allocate an additional TPIDR2 object on the stack (16 bytes)
- unsigned TPIDR2Obj = MFI.CreateStackObject(16, Align(16), false);
- // Store the buffer pointer to the TPIDR2 stack object.
- MachinePointerInfo MPI = MachinePointerInfo::getStack(MF, TPIDR2Obj);
- SDValue Ptr = DAG.getFrameIndex(
- TPIDR2Obj,
- DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()));
- Chain = DAG.getStore(Chain, DL, Buffer, Ptr, MPI);
- return TPIDR2Obj;
- }
- SDValue AArch64TargetLowering::LowerFormalArguments(
- SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
- MachineFunction &MF = DAG.getMachineFunction();
- const Function &F = MF.getFunction();
- MachineFrameInfo &MFI = MF.getFrameInfo();
- bool IsWin64 = Subtarget->isCallingConvWin64(F.getCallingConv());
- AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(CallConv, F.getReturnType(), F.getAttributes(), Outs,
- DAG.getTargetLoweringInfo(), MF.getDataLayout());
- if (any_of(Outs, [](ISD::OutputArg &Out){ return Out.VT.isScalableVector(); }))
- FuncInfo->setIsSVECC(true);
- // Assign locations to all of the incoming arguments.
- SmallVector<CCValAssign, 16> ArgLocs;
- DenseMap<unsigned, SDValue> CopiedRegs;
- CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
- // At this point, Ins[].VT may already be promoted to i32. To correctly
- // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and
- // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT.
- // Since AnalyzeFormalArguments uses Ins[].VT for both ValVT and LocVT, here
- // we use a special version of AnalyzeFormalArguments to pass in ValVT and
- // LocVT.
- unsigned NumArgs = Ins.size();
- Function::const_arg_iterator CurOrigArg = F.arg_begin();
- unsigned CurArgIdx = 0;
- for (unsigned i = 0; i != NumArgs; ++i) {
- MVT ValVT = Ins[i].VT;
- if (Ins[i].isOrigArg()) {
- std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx);
- CurArgIdx = Ins[i].getOrigArgIndex();
- // Get type of the original argument.
- EVT ActualVT = getValueType(DAG.getDataLayout(), CurOrigArg->getType(),
- /*AllowUnknown*/ true);
- MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
- // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
- if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
- ValVT = MVT::i8;
- else if (ActualMVT == MVT::i16)
- ValVT = MVT::i16;
- }
- bool UseVarArgCC = false;
- if (IsWin64)
- UseVarArgCC = isVarArg;
- CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, UseVarArgCC);
- bool Res =
- AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo);
- assert(!Res && "Call operand has unhandled type");
- (void)Res;
- }
- SMEAttrs Attrs(MF.getFunction());
- bool IsLocallyStreaming =
- !Attrs.hasStreamingInterface() && Attrs.hasStreamingBody();
- assert(Chain.getOpcode() == ISD::EntryToken && "Unexpected Chain value");
- SDValue Glue = Chain.getValue(1);
- SmallVector<SDValue, 16> ArgValues;
- unsigned ExtraArgLocs = 0;
- for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i - ExtraArgLocs];
- if (Ins[i].Flags.isByVal()) {
- // Byval is used for HFAs in the PCS, but the system should work in a
- // non-compliant manner for larger structs.
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- int Size = Ins[i].Flags.getByValSize();
- unsigned NumRegs = (Size + 7) / 8;
- // FIXME: This works on big-endian for composite byvals, which are the common
- // case. It should also work for fundamental types too.
- unsigned FrameIdx =
- MFI.CreateFixedObject(8 * NumRegs, VA.getLocMemOffset(), false);
- SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrVT);
- InVals.push_back(FrameIdxN);
- continue;
- }
- if (Ins[i].Flags.isSwiftAsync())
- MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
- SDValue ArgValue;
- if (VA.isRegLoc()) {
- // Arguments stored in registers.
- EVT RegVT = VA.getLocVT();
- const TargetRegisterClass *RC;
- if (RegVT == MVT::i32)
- RC = &AArch64::GPR32RegClass;
- else if (RegVT == MVT::i64)
- RC = &AArch64::GPR64RegClass;
- else if (RegVT == MVT::f16 || RegVT == MVT::bf16)
- RC = &AArch64::FPR16RegClass;
- else if (RegVT == MVT::f32)
- RC = &AArch64::FPR32RegClass;
- else if (RegVT == MVT::f64 || RegVT.is64BitVector())
- RC = &AArch64::FPR64RegClass;
- else if (RegVT == MVT::f128 || RegVT.is128BitVector())
- RC = &AArch64::FPR128RegClass;
- else if (RegVT.isScalableVector() &&
- RegVT.getVectorElementType() == MVT::i1) {
- FuncInfo->setIsSVECC(true);
- RC = &AArch64::PPRRegClass;
- } else if (RegVT.isScalableVector()) {
- FuncInfo->setIsSVECC(true);
- RC = &AArch64::ZPRRegClass;
- } else
- llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
- // Transform the arguments in physical registers into virtual ones.
- Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
- if (IsLocallyStreaming) {
- // LocallyStreamingFunctions must insert the SMSTART in the correct
- // position, so we use Glue to ensure no instructions can be scheduled
- // between the chain of:
- // t0: ch,glue = EntryNode
- // t1: res,ch,glue = CopyFromReg
- // ...
- // tn: res,ch,glue = CopyFromReg t(n-1), ..
- // t(n+1): ch, glue = SMSTART t0:0, ...., tn:2
- // ^^^^^^
- // This will be the new Chain/Root node.
- ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT, Glue);
- Glue = ArgValue.getValue(2);
- } else
- ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
- // If this is an 8, 16 or 32-bit value, it is really passed promoted
- // to 64 bits. Insert an assert[sz]ext to capture this, then
- // truncate to the right size.
- switch (VA.getLocInfo()) {
- default:
- llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full:
- break;
- case CCValAssign::Indirect:
- assert((VA.getValVT().isScalableVector() ||
- Subtarget->isWindowsArm64EC()) &&
- "Indirect arguments should be scalable on most subtargets");
- break;
- case CCValAssign::BCvt:
- ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue);
- break;
- case CCValAssign::AExt:
- case CCValAssign::SExt:
- case CCValAssign::ZExt:
- break;
- case CCValAssign::AExtUpper:
- ArgValue = DAG.getNode(ISD::SRL, DL, RegVT, ArgValue,
- DAG.getConstant(32, DL, RegVT));
- ArgValue = DAG.getZExtOrTrunc(ArgValue, DL, VA.getValVT());
- break;
- }
- } else { // VA.isRegLoc()
- assert(VA.isMemLoc() && "CCValAssign is neither reg nor mem");
- unsigned ArgOffset = VA.getLocMemOffset();
- unsigned ArgSize = (VA.getLocInfo() == CCValAssign::Indirect
- ? VA.getLocVT().getSizeInBits()
- : VA.getValVT().getSizeInBits()) / 8;
- uint32_t BEAlign = 0;
- if (!Subtarget->isLittleEndian() && ArgSize < 8 &&
- !Ins[i].Flags.isInConsecutiveRegs())
- BEAlign = 8 - ArgSize;
- SDValue FIN;
- MachinePointerInfo PtrInfo;
- if (isVarArg && Subtarget->isWindowsArm64EC()) {
- // In the ARM64EC varargs convention, fixed arguments on the stack are
- // accessed relative to x4, not sp.
- unsigned ObjOffset = ArgOffset + BEAlign;
- Register VReg = MF.addLiveIn(AArch64::X4, &AArch64::GPR64RegClass);
- SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
- FIN = DAG.getNode(ISD::ADD, DL, MVT::i64, Val,
- DAG.getConstant(ObjOffset, DL, MVT::i64));
- PtrInfo = MachinePointerInfo::getUnknownStack(MF);
- } else {
- int FI = MFI.CreateFixedObject(ArgSize, ArgOffset + BEAlign, true);
- // Create load nodes to retrieve arguments from the stack.
- FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
- PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
- }
- // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
- ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
- MVT MemVT = VA.getValVT();
- switch (VA.getLocInfo()) {
- default:
- break;
- case CCValAssign::Trunc:
- case CCValAssign::BCvt:
- MemVT = VA.getLocVT();
- break;
- case CCValAssign::Indirect:
- assert((VA.getValVT().isScalableVector() ||
- Subtarget->isWindowsArm64EC()) &&
- "Indirect arguments should be scalable on most subtargets");
- MemVT = VA.getLocVT();
- break;
- case CCValAssign::SExt:
- ExtType = ISD::SEXTLOAD;
- break;
- case CCValAssign::ZExt:
- ExtType = ISD::ZEXTLOAD;
- break;
- case CCValAssign::AExt:
- ExtType = ISD::EXTLOAD;
- break;
- }
- ArgValue = DAG.getExtLoad(ExtType, DL, VA.getLocVT(), Chain, FIN, PtrInfo,
- MemVT);
- }
- if (VA.getLocInfo() == CCValAssign::Indirect) {
- assert(
- (VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) &&
- "Indirect arguments should be scalable on most subtargets");
- uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinValue();
- unsigned NumParts = 1;
- if (Ins[i].Flags.isInConsecutiveRegs()) {
- assert(!Ins[i].Flags.isInConsecutiveRegsLast());
- while (!Ins[i + NumParts - 1].Flags.isInConsecutiveRegsLast())
- ++NumParts;
- }
- MVT PartLoad = VA.getValVT();
- SDValue Ptr = ArgValue;
- // Ensure we generate all loads for each tuple part, whilst updating the
- // pointer after each load correctly using vscale.
- while (NumParts > 0) {
- ArgValue = DAG.getLoad(PartLoad, DL, Chain, Ptr, MachinePointerInfo());
- InVals.push_back(ArgValue);
- NumParts--;
- if (NumParts > 0) {
- SDValue BytesIncrement;
- if (PartLoad.isScalableVector()) {
- BytesIncrement = DAG.getVScale(
- DL, Ptr.getValueType(),
- APInt(Ptr.getValueSizeInBits().getFixedValue(), PartSize));
- } else {
- BytesIncrement = DAG.getConstant(
- APInt(Ptr.getValueSizeInBits().getFixedValue(), PartSize), DL,
- Ptr.getValueType());
- }
- SDNodeFlags Flags;
- Flags.setNoUnsignedWrap(true);
- Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
- BytesIncrement, Flags);
- ExtraArgLocs++;
- i++;
- }
- }
- } else {
- if (Subtarget->isTargetILP32() && Ins[i].Flags.isPointer())
- ArgValue = DAG.getNode(ISD::AssertZext, DL, ArgValue.getValueType(),
- ArgValue, DAG.getValueType(MVT::i32));
- // i1 arguments are zero-extended to i8 by the caller. Emit a
- // hint to reflect this.
- if (Ins[i].isOrigArg()) {
- Argument *OrigArg = F.getArg(Ins[i].getOrigArgIndex());
- if (OrigArg->getType()->isIntegerTy(1)) {
- if (!Ins[i].Flags.isZExt()) {
- ArgValue = DAG.getNode(AArch64ISD::ASSERT_ZEXT_BOOL, DL,
- ArgValue.getValueType(), ArgValue);
- }
- }
- }
- InVals.push_back(ArgValue);
- }
- }
- assert((ArgLocs.size() + ExtraArgLocs) == Ins.size());
- // Insert the SMSTART if this is a locally streaming function and
- // make sure it is Glued to the last CopyFromReg value.
- if (IsLocallyStreaming) {
- const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
- Chain = DAG.getNode(
- AArch64ISD::SMSTART, DL, DAG.getVTList(MVT::Other, MVT::Glue),
- {DAG.getRoot(),
- DAG.getTargetConstant((int32_t)AArch64SVCR::SVCRSM, DL, MVT::i32),
- DAG.getConstant(0, DL, MVT::i64), DAG.getConstant(1, DL, MVT::i64),
- DAG.getRegisterMask(TRI->getSMStartStopCallPreservedMask()), Glue});
- // Ensure that the SMSTART happens after the CopyWithChain such that its
- // chain result is used.
- for (unsigned I=0; I<InVals.size(); ++I) {
- Register Reg = MF.getRegInfo().createVirtualRegister(
- getRegClassFor(InVals[I].getValueType().getSimpleVT()));
- Chain = DAG.getCopyToReg(Chain, DL, Reg, InVals[I]);
- InVals[I] = DAG.getCopyFromReg(Chain, DL, Reg,
- InVals[I].getValueType());
- }
- }
- // varargs
- if (isVarArg) {
- if (!Subtarget->isTargetDarwin() || IsWin64) {
- // The AAPCS variadic function ABI is identical to the non-variadic
- // one. As a result there may be more arguments in registers and we should
- // save them for future reference.
- // Win64 variadic functions also pass arguments in registers, but all float
- // arguments are passed in integer registers.
- saveVarArgRegisters(CCInfo, DAG, DL, Chain);
- }
- // This will point to the next argument passed via stack.
- unsigned StackOffset = CCInfo.getNextStackOffset();
- // We currently pass all varargs at 8-byte alignment, or 4 for ILP32
- StackOffset = alignTo(StackOffset, Subtarget->isTargetILP32() ? 4 : 8);
- FuncInfo->setVarArgsStackOffset(StackOffset);
- FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
- if (MFI.hasMustTailInVarArgFunc()) {
- SmallVector<MVT, 2> RegParmTypes;
- RegParmTypes.push_back(MVT::i64);
- RegParmTypes.push_back(MVT::f128);
- // Compute the set of forwarded registers. The rest are scratch.
- SmallVectorImpl<ForwardedRegister> &Forwards =
- FuncInfo->getForwardedMustTailRegParms();
- CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes,
- CC_AArch64_AAPCS);
- // Conservatively forward X8, since it might be used for aggregate return.
- if (!CCInfo.isAllocated(AArch64::X8)) {
- Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
- Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
- }
- }
- }
- // On Windows, InReg pointers must be returned, so record the pointer in a
- // virtual register at the start of the function so it can be returned in the
- // epilogue.
- if (IsWin64) {
- for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
- if (Ins[I].Flags.isInReg() && Ins[I].Flags.isSRet()) {
- assert(!FuncInfo->getSRetReturnReg());
- MVT PtrTy = getPointerTy(DAG.getDataLayout());
- Register Reg =
- MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
- FuncInfo->setSRetReturnReg(Reg);
- SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[I]);
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
- break;
- }
- }
- }
- unsigned StackArgSize = CCInfo.getNextStackOffset();
- bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
- if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
- // This is a non-standard ABI so by fiat I say we're allowed to make full
- // use of the stack area to be popped, which must be aligned to 16 bytes in
- // any case:
- StackArgSize = alignTo(StackArgSize, 16);
- // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
- // a multiple of 16.
- FuncInfo->setArgumentStackToRestore(StackArgSize);
- // This realignment carries over to the available bytes below. Our own
- // callers will guarantee the space is free by giving an aligned value to
- // CALLSEQ_START.
- }
- // Even if we're not expected to free up the space, it's useful to know how
- // much is there while considering tail calls (because we can reuse it).
- FuncInfo->setBytesInStackArgArea(StackArgSize);
- if (Subtarget->hasCustomCallingConv())
- Subtarget->getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
- // Conservatively assume the function requires the lazy-save mechanism.
- if (SMEAttrs(MF.getFunction()).hasZAState()) {
- unsigned TPIDR2Obj = allocateLazySaveBuffer(Chain, DL, DAG);
- FuncInfo->setLazySaveTPIDR2Obj(TPIDR2Obj);
- }
- return Chain;
- }
- void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
- SelectionDAG &DAG,
- const SDLoc &DL,
- SDValue &Chain) const {
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo &MFI = MF.getFrameInfo();
- AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- auto PtrVT = getPointerTy(DAG.getDataLayout());
- bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
- SmallVector<SDValue, 8> MemOps;
- static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2,
- AArch64::X3, AArch64::X4, AArch64::X5,
- AArch64::X6, AArch64::X7 };
- unsigned NumGPRArgRegs = std::size(GPRArgRegs);
- if (Subtarget->isWindowsArm64EC()) {
- // In the ARM64EC ABI, only x0-x3 are used to pass arguments to varargs
- // functions.
- NumGPRArgRegs = 4;
- }
- unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs);
- unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR);
- int GPRIdx = 0;
- if (GPRSaveSize != 0) {
- if (IsWin64) {
- GPRIdx = MFI.CreateFixedObject(GPRSaveSize, -(int)GPRSaveSize, false);
- if (GPRSaveSize & 15)
- // The extra size here, if triggered, will always be 8.
- MFI.CreateFixedObject(16 - (GPRSaveSize & 15), -(int)alignTo(GPRSaveSize, 16), false);
- } else
- GPRIdx = MFI.CreateStackObject(GPRSaveSize, Align(8), false);
- SDValue FIN;
- if (Subtarget->isWindowsArm64EC()) {
- // With the Arm64EC ABI, we reserve the save area as usual, but we
- // compute its address relative to x4. For a normal AArch64->AArch64
- // call, x4 == sp on entry, but calls from an entry thunk can pass in a
- // different address.
- Register VReg = MF.addLiveIn(AArch64::X4, &AArch64::GPR64RegClass);
- SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
- FIN = DAG.getNode(ISD::SUB, DL, MVT::i64, Val,
- DAG.getConstant(GPRSaveSize, DL, MVT::i64));
- } else {
- FIN = DAG.getFrameIndex(GPRIdx, PtrVT);
- }
- for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) {
- Register VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass);
- SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
- SDValue Store =
- DAG.getStore(Val.getValue(1), DL, Val, FIN,
- IsWin64 ? MachinePointerInfo::getFixedStack(
- MF, GPRIdx, (i - FirstVariadicGPR) * 8)
- : MachinePointerInfo::getStack(MF, i * 8));
- MemOps.push_back(Store);
- FIN =
- DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getConstant(8, DL, PtrVT));
- }
- }
- FuncInfo->setVarArgsGPRIndex(GPRIdx);
- FuncInfo->setVarArgsGPRSize(GPRSaveSize);
- if (Subtarget->hasFPARMv8() && !IsWin64) {
- static const MCPhysReg FPRArgRegs[] = {
- AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
- AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
- static const unsigned NumFPRArgRegs = std::size(FPRArgRegs);
- unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs);
- unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
- int FPRIdx = 0;
- if (FPRSaveSize != 0) {
- FPRIdx = MFI.CreateStackObject(FPRSaveSize, Align(16), false);
- SDValue FIN = DAG.getFrameIndex(FPRIdx, PtrVT);
- for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
- Register VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
- SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
- SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
- MachinePointerInfo::getStack(MF, i * 16));
- MemOps.push_back(Store);
- FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
- DAG.getConstant(16, DL, PtrVT));
- }
- }
- FuncInfo->setVarArgsFPRIndex(FPRIdx);
- FuncInfo->setVarArgsFPRSize(FPRSaveSize);
- }
- if (!MemOps.empty()) {
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
- }
- }
- /// LowerCallResult - Lower the result values of a call into the
- /// appropriate copies out of appropriate physical registers.
- SDValue AArch64TargetLowering::LowerCallResult(
- SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<CCValAssign> &RVLocs, const SDLoc &DL,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
- SDValue ThisVal) const {
- DenseMap<unsigned, SDValue> CopiedRegs;
- // Copy all of the result registers out of their specified physreg.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- CCValAssign VA = RVLocs[i];
- // Pass 'this' value directly from the argument to return value, to avoid
- // reg unit interference
- if (i == 0 && isThisReturn) {
- assert(!VA.needsCustom() && VA.getLocVT() == MVT::i64 &&
- "unexpected return calling convention register assignment");
- InVals.push_back(ThisVal);
- continue;
- }
- // Avoid copying a physreg twice since RegAllocFast is incompetent and only
- // allows one use of a physreg per block.
- SDValue Val = CopiedRegs.lookup(VA.getLocReg());
- if (!Val) {
- Val =
- DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
- Chain = Val.getValue(1);
- InFlag = Val.getValue(2);
- CopiedRegs[VA.getLocReg()] = Val;
- }
- switch (VA.getLocInfo()) {
- default:
- llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full:
- break;
- case CCValAssign::BCvt:
- Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
- break;
- case CCValAssign::AExtUpper:
- Val = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Val,
- DAG.getConstant(32, DL, VA.getLocVT()));
- [[fallthrough]];
- case CCValAssign::AExt:
- [[fallthrough]];
- case CCValAssign::ZExt:
- Val = DAG.getZExtOrTrunc(Val, DL, VA.getValVT());
- break;
- }
- InVals.push_back(Val);
- }
- return Chain;
- }
- /// Return true if the calling convention is one that we can guarantee TCO for.
- static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {
- return (CC == CallingConv::Fast && GuaranteeTailCalls) ||
- CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
- }
- /// Return true if we might ever do TCO for calls with this calling convention.
- static bool mayTailCallThisCC(CallingConv::ID CC) {
- switch (CC) {
- case CallingConv::C:
- case CallingConv::AArch64_SVE_VectorCall:
- case CallingConv::PreserveMost:
- case CallingConv::Swift:
- case CallingConv::SwiftTail:
- case CallingConv::Tail:
- case CallingConv::Fast:
- return true;
- default:
- return false;
- }
- }
- static void analyzeCallOperands(const AArch64TargetLowering &TLI,
- const AArch64Subtarget *Subtarget,
- const TargetLowering::CallLoweringInfo &CLI,
- CCState &CCInfo) {
- const SelectionDAG &DAG = CLI.DAG;
- CallingConv::ID CalleeCC = CLI.CallConv;
- bool IsVarArg = CLI.IsVarArg;
- const SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
- unsigned NumArgs = Outs.size();
- for (unsigned i = 0; i != NumArgs; ++i) {
- MVT ArgVT = Outs[i].VT;
- ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
- bool UseVarArgCC = false;
- if (IsVarArg) {
- // On Windows, the fixed arguments in a vararg call are passed in GPRs
- // too, so use the vararg CC to force them to integer registers.
- if (IsCalleeWin64) {
- UseVarArgCC = true;
- } else {
- UseVarArgCC = !Outs[i].IsFixed;
- }
- }
- if (!UseVarArgCC) {
- // Get type of the original argument.
- EVT ActualVT =
- TLI.getValueType(DAG.getDataLayout(), CLI.Args[Outs[i].OrigArgIndex].Ty,
- /*AllowUnknown*/ true);
- MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : ArgVT;
- // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
- if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
- ArgVT = MVT::i8;
- else if (ActualMVT == MVT::i16)
- ArgVT = MVT::i16;
- }
- CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CalleeCC, UseVarArgCC);
- bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
- assert(!Res && "Call operand has unhandled type");
- (void)Res;
- }
- }
- bool AArch64TargetLowering::isEligibleForTailCallOptimization(
- const CallLoweringInfo &CLI) const {
- CallingConv::ID CalleeCC = CLI.CallConv;
- if (!mayTailCallThisCC(CalleeCC))
- return false;
- SDValue Callee = CLI.Callee;
- bool IsVarArg = CLI.IsVarArg;
- const SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- const SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- const SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
- const SelectionDAG &DAG = CLI.DAG;
- MachineFunction &MF = DAG.getMachineFunction();
- const Function &CallerF = MF.getFunction();
- CallingConv::ID CallerCC = CallerF.getCallingConv();
- // SME Streaming functions are not eligible for TCO as they may require
- // the streaming mode or ZA to be restored after returning from the call.
- SMEAttrs CallerAttrs(MF.getFunction());
- auto CalleeAttrs = CLI.CB ? SMEAttrs(*CLI.CB) : SMEAttrs(SMEAttrs::Normal);
- if (CallerAttrs.requiresSMChange(CalleeAttrs) ||
- CallerAttrs.requiresLazySave(CalleeAttrs))
- return false;
- // Functions using the C or Fast calling convention that have an SVE signature
- // preserve more registers and should assume the SVE_VectorCall CC.
- // The check for matching callee-saved regs will determine whether it is
- // eligible for TCO.
- if ((CallerCC == CallingConv::C || CallerCC == CallingConv::Fast) &&
- MF.getInfo<AArch64FunctionInfo>()->isSVECC())
- CallerCC = CallingConv::AArch64_SVE_VectorCall;
- bool CCMatch = CallerCC == CalleeCC;
- // When using the Windows calling convention on a non-windows OS, we want
- // to back up and restore X18 in such functions; we can't do a tail call
- // from those functions.
- if (CallerCC == CallingConv::Win64 && !Subtarget->isTargetWindows() &&
- CalleeCC != CallingConv::Win64)
- return false;
- // Byval parameters hand the function a pointer directly into the stack area
- // we want to reuse during a tail call. Working around this *is* possible (see
- // X86) but less efficient and uglier in LowerCall.
- for (Function::const_arg_iterator i = CallerF.arg_begin(),
- e = CallerF.arg_end();
- i != e; ++i) {
- if (i->hasByValAttr())
- return false;
- // On Windows, "inreg" attributes signify non-aggregate indirect returns.
- // In this case, it is necessary to save/restore X0 in the callee. Tail
- // call opt interferes with this. So we disable tail call opt when the
- // caller has an argument with "inreg" attribute.
- // FIXME: Check whether the callee also has an "inreg" argument.
- if (i->hasInRegAttr())
- return false;
- }
- if (canGuaranteeTCO(CalleeCC, getTargetMachine().Options.GuaranteedTailCallOpt))
- return CCMatch;
- // Externally-defined functions with weak linkage should not be
- // tail-called on AArch64 when the OS does not support dynamic
- // pre-emption of symbols, as the AAELF spec requires normal calls
- // to undefined weak functions to be replaced with a NOP or jump to the
- // next instruction. The behaviour of branch instructions in this
- // situation (as used for tail calls) is implementation-defined, so we
- // cannot rely on the linker replacing the tail call with a return.
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- const GlobalValue *GV = G->getGlobal();
- const Triple &TT = getTargetMachine().getTargetTriple();
- if (GV->hasExternalWeakLinkage() &&
- (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
- return false;
- }
- // Now we search for cases where we can use a tail call without changing the
- // ABI. Sibcall is used in some places (particularly gcc) to refer to this
- // concept.
- // I want anyone implementing a new calling convention to think long and hard
- // about this assert.
- assert((!IsVarArg || CalleeCC == CallingConv::C) &&
- "Unexpected variadic calling convention");
- LLVMContext &C = *DAG.getContext();
- // Check that the call results are passed in the same way.
- if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
- CCAssignFnForCall(CalleeCC, IsVarArg),
- CCAssignFnForCall(CallerCC, IsVarArg)))
- return false;
- // The callee has to preserve all registers the caller needs to preserve.
- const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
- const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
- if (!CCMatch) {
- const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
- if (Subtarget->hasCustomCallingConv()) {
- TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
- TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
- }
- if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
- return false;
- }
- // Nothing more to check if the callee is taking no arguments
- if (Outs.empty())
- return true;
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
- analyzeCallOperands(*this, Subtarget, CLI, CCInfo);
- if (IsVarArg && !(CLI.CB && CLI.CB->isMustTailCall())) {
- // When we are musttail, additional checks have been done and we can safely ignore this check
- // At least two cases here: if caller is fastcc then we can't have any
- // memory arguments (we'd be expected to clean up the stack afterwards). If
- // caller is C then we could potentially use its argument area.
- // FIXME: for now we take the most conservative of these in both cases:
- // disallow all variadic memory operands.
- for (const CCValAssign &ArgLoc : ArgLocs)
- if (!ArgLoc.isRegLoc())
- return false;
- }
- const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- // If any of the arguments is passed indirectly, it must be SVE, so the
- // 'getBytesInStackArgArea' is not sufficient to determine whether we need to
- // allocate space on the stack. That is why we determine this explicitly here
- // the call cannot be a tailcall.
- if (llvm::any_of(ArgLocs, [&](CCValAssign &A) {
- assert((A.getLocInfo() != CCValAssign::Indirect ||
- A.getValVT().isScalableVector() ||
- Subtarget->isWindowsArm64EC()) &&
- "Expected value to be scalable");
- return A.getLocInfo() == CCValAssign::Indirect;
- }))
- return false;
- // If the stack arguments for this call do not fit into our own save area then
- // the call cannot be made tail.
- if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
- return false;
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
- return false;
- return true;
- }
- SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
- SelectionDAG &DAG,
- MachineFrameInfo &MFI,
- int ClobberedFI) const {
- SmallVector<SDValue, 8> ArgChains;
- int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
- int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
- // Include the original chain at the beginning of the list. When this is
- // used by target LowerCall hooks, this helps legalize find the
- // CALLSEQ_BEGIN node.
- ArgChains.push_back(Chain);
- // Add a chain value for each stack argument corresponding
- for (SDNode *U : DAG.getEntryNode().getNode()->uses())
- if (LoadSDNode *L = dyn_cast<LoadSDNode>(U))
- if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
- if (FI->getIndex() < 0) {
- int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
- int64_t InLastByte = InFirstByte;
- InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
- if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
- (FirstByte <= InFirstByte && InFirstByte <= LastByte))
- ArgChains.push_back(SDValue(L, 1));
- }
- // Build a tokenfactor for all the chains.
- return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
- }
- bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
- bool TailCallOpt) const {
- return (CallCC == CallingConv::Fast && TailCallOpt) ||
- CallCC == CallingConv::Tail || CallCC == CallingConv::SwiftTail;
- }
- // Check if the value is zero-extended from i1 to i8
- static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
- unsigned SizeInBits = Arg.getValueType().getSizeInBits();
- if (SizeInBits < 8)
- return false;
- APInt RequredZero(SizeInBits, 0xFE);
- KnownBits Bits = DAG.computeKnownBits(Arg, 4);
- bool ZExtBool = (Bits.Zero & RequredZero) == RequredZero;
- return ZExtBool;
- }
- SDValue AArch64TargetLowering::changeStreamingMode(
- SelectionDAG &DAG, SDLoc DL, bool Enable,
- SDValue Chain, SDValue InFlag, SDValue PStateSM, bool Entry) const {
- const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
- SDValue RegMask = DAG.getRegisterMask(TRI->getSMStartStopCallPreservedMask());
- SDValue MSROp =
- DAG.getTargetConstant((int32_t)AArch64SVCR::SVCRSM, DL, MVT::i32);
- SDValue ExpectedSMVal =
- DAG.getTargetConstant(Entry ? Enable : !Enable, DL, MVT::i64);
- SmallVector<SDValue> Ops = {Chain, MSROp, PStateSM, ExpectedSMVal, RegMask};
- if (InFlag)
- Ops.push_back(InFlag);
- unsigned Opcode = Enable ? AArch64ISD::SMSTART : AArch64ISD::SMSTOP;
- return DAG.getNode(Opcode, DL, DAG.getVTList(MVT::Other, MVT::Glue), Ops);
- }
- /// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain,
- /// and add input and output parameter nodes.
- SDValue
- AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const {
- SelectionDAG &DAG = CLI.DAG;
- SDLoc &DL = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
- SDValue Chain = CLI.Chain;
- SDValue Callee = CLI.Callee;
- bool &IsTailCall = CLI.IsTailCall;
- CallingConv::ID &CallConv = CLI.CallConv;
- bool IsVarArg = CLI.IsVarArg;
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFunction::CallSiteInfo CSInfo;
- bool IsThisReturn = false;
- AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
- bool IsCFICall = CLI.CB && CLI.CB->isIndirectCall() && CLI.CFIType;
- bool IsSibCall = false;
- bool GuardWithBTI = false;
- if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Attribute::ReturnsTwice) &&
- !Subtarget->noBTIAtReturnTwice()) {
- GuardWithBTI = FuncInfo->branchTargetEnforcement();
- }
- // Analyze operands of the call, assigning locations to each operand.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
- if (IsVarArg) {
- unsigned NumArgs = Outs.size();
- for (unsigned i = 0; i != NumArgs; ++i) {
- if (!Outs[i].IsFixed && Outs[i].VT.isScalableVector())
- report_fatal_error("Passing SVE types to variadic functions is "
- "currently not supported");
- }
- }
- analyzeCallOperands(*this, Subtarget, CLI, CCInfo);
- CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
- // Assign locations to each value returned by this call.
- SmallVector<CCValAssign, 16> RVLocs;
- CCState RetCCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
- *DAG.getContext());
- RetCCInfo.AnalyzeCallResult(Ins, RetCC);
- // Check callee args/returns for SVE registers and set calling convention
- // accordingly.
- if (CallConv == CallingConv::C || CallConv == CallingConv::Fast) {
- auto HasSVERegLoc = [](CCValAssign &Loc) {
- if (!Loc.isRegLoc())
- return false;
- return AArch64::ZPRRegClass.contains(Loc.getLocReg()) ||
- AArch64::PPRRegClass.contains(Loc.getLocReg());
- };
- if (any_of(RVLocs, HasSVERegLoc) || any_of(ArgLocs, HasSVERegLoc))
- CallConv = CallingConv::AArch64_SVE_VectorCall;
- }
- if (IsTailCall) {
- // Check if it's really possible to do a tail call.
- IsTailCall = isEligibleForTailCallOptimization(CLI);
- // A sibling call is one where we're under the usual C ABI and not planning
- // to change that but can still do a tail call:
- if (!TailCallOpt && IsTailCall && CallConv != CallingConv::Tail &&
- CallConv != CallingConv::SwiftTail)
- IsSibCall = true;
- if (IsTailCall)
- ++NumTailCalls;
- }
- if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
- report_fatal_error("failed to perform tail call elimination on a call "
- "site marked musttail");
- // Get a count of how many bytes are to be pushed on the stack.
- unsigned NumBytes = CCInfo.getNextStackOffset();
- if (IsSibCall) {
- // Since we're not changing the ABI to make this a tail call, the memory
- // operands are already available in the caller's incoming argument space.
- NumBytes = 0;
- }
- // FPDiff is the byte offset of the call's argument area from the callee's.
- // Stores to callee stack arguments will be placed in FixedStackSlots offset
- // by this amount for a tail call. In a sibling call it must be 0 because the
- // caller will deallocate the entire stack and the callee still expects its
- // arguments to begin at SP+0. Completely unused for non-tail calls.
- int FPDiff = 0;
- if (IsTailCall && !IsSibCall) {
- unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
- // Since callee will pop argument stack as a tail call, we must keep the
- // popped size 16-byte aligned.
- NumBytes = alignTo(NumBytes, 16);
- // FPDiff will be negative if this tail call requires more space than we
- // would automatically have in our incoming argument space. Positive if we
- // can actually shrink the stack.
- FPDiff = NumReusableBytes - NumBytes;
- // Update the required reserved area if this is the tail call requiring the
- // most argument stack space.
- if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)
- FuncInfo->setTailCallReservedStack(-FPDiff);
- // The stack pointer must be 16-byte aligned at all times it's used for a
- // memory operation, which in practice means at *all* times and in
- // particular across call boundaries. Therefore our own arguments started at
- // a 16-byte aligned SP and the delta applied for the tail call should
- // satisfy the same constraint.
- assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
- }
- // Determine whether we need any streaming mode changes.
- SMEAttrs CalleeAttrs, CallerAttrs(MF.getFunction());
- if (CLI.CB)
- CalleeAttrs = SMEAttrs(*CLI.CB);
- else if (std::optional<SMEAttrs> Attrs =
- getCalleeAttrsFromExternalFunction(CLI.Callee))
- CalleeAttrs = *Attrs;
- bool RequiresLazySave = CallerAttrs.requiresLazySave(CalleeAttrs);
- MachineFrameInfo &MFI = MF.getFrameInfo();
- if (RequiresLazySave) {
- // Set up a lazy save mechanism by storing the runtime live slices
- // (worst-case N*N) to the TPIDR2 stack object.
- SDValue N = DAG.getNode(AArch64ISD::RDSVL, DL, MVT::i64,
- DAG.getConstant(1, DL, MVT::i32));
- SDValue NN = DAG.getNode(ISD::MUL, DL, MVT::i64, N, N);
- unsigned TPIDR2Obj = FuncInfo->getLazySaveTPIDR2Obj();
- MachinePointerInfo MPI = MachinePointerInfo::getStack(MF, TPIDR2Obj);
- SDValue TPIDR2ObjAddr = DAG.getFrameIndex(TPIDR2Obj,
- DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()));
- SDValue BufferPtrAddr =
- DAG.getNode(ISD::ADD, DL, TPIDR2ObjAddr.getValueType(), TPIDR2ObjAddr,
- DAG.getConstant(8, DL, TPIDR2ObjAddr.getValueType()));
- Chain = DAG.getTruncStore(Chain, DL, NN, BufferPtrAddr, MPI, MVT::i16);
- Chain = DAG.getNode(
- ISD::INTRINSIC_VOID, DL, MVT::Other, Chain,
- DAG.getConstant(Intrinsic::aarch64_sme_set_tpidr2, DL, MVT::i32),
- TPIDR2ObjAddr);
- }
- SDValue PStateSM;
- std::optional<bool> RequiresSMChange =
- CallerAttrs.requiresSMChange(CalleeAttrs);
- if (RequiresSMChange)
- PStateSM = getPStateSM(DAG, Chain, CallerAttrs, DL, MVT::i64);
- // Adjust the stack pointer for the new arguments...
- // These operations are automatically eliminated by the prolog/epilog pass
- if (!IsSibCall)
- Chain = DAG.getCALLSEQ_START(Chain, IsTailCall ? 0 : NumBytes, 0, DL);
- SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP,
- getPointerTy(DAG.getDataLayout()));
- SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
- SmallSet<unsigned, 8> RegsUsed;
- SmallVector<SDValue, 8> MemOpChains;
- auto PtrVT = getPointerTy(DAG.getDataLayout());
- if (IsVarArg && CLI.CB && CLI.CB->isMustTailCall()) {
- const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
- for (const auto &F : Forwards) {
- SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
- RegsToPass.emplace_back(F.PReg, Val);
- }
- }
- // Walk the register/memloc assignments, inserting copies/loads.
- unsigned ExtraArgLocs = 0;
- for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i - ExtraArgLocs];
- SDValue Arg = OutVals[i];
- ISD::ArgFlagsTy Flags = Outs[i].Flags;
- // Promote the value if needed.
- switch (VA.getLocInfo()) {
- default:
- llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full:
- break;
- case CCValAssign::SExt:
- Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
- break;
- case CCValAssign::ZExt:
- Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
- break;
- case CCValAssign::AExt:
- if (Outs[i].ArgVT == MVT::i1) {
- // AAPCS requires i1 to be zero-extended to 8-bits by the caller.
- //
- // Check if we actually have to do this, because the value may
- // already be zero-extended.
- //
- // We cannot just emit a (zext i8 (trunc (assert-zext i8)))
- // and rely on DAGCombiner to fold this, because the following
- // (anyext i32) is combined with (zext i8) in DAG.getNode:
- //
- // (ext (zext x)) -> (zext x)
- //
- // This will give us (zext i32), which we cannot remove, so
- // try to check this beforehand.
- if (!checkZExtBool(Arg, DAG)) {
- Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg);
- Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i8, Arg);
- }
- }
- Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
- break;
- case CCValAssign::AExtUpper:
- assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits");
- Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
- Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg,
- DAG.getConstant(32, DL, VA.getLocVT()));
- break;
- case CCValAssign::BCvt:
- Arg = DAG.getBitcast(VA.getLocVT(), Arg);
- break;
- case CCValAssign::Trunc:
- Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
- break;
- case CCValAssign::FPExt:
- Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
- break;
- case CCValAssign::Indirect:
- bool isScalable = VA.getValVT().isScalableVector();
- assert((isScalable || Subtarget->isWindowsArm64EC()) &&
- "Indirect arguments should be scalable on most subtargets");
- uint64_t StoreSize = VA.getValVT().getStoreSize().getKnownMinValue();
- uint64_t PartSize = StoreSize;
- unsigned NumParts = 1;
- if (Outs[i].Flags.isInConsecutiveRegs()) {
- assert(!Outs[i].Flags.isInConsecutiveRegsLast());
- while (!Outs[i + NumParts - 1].Flags.isInConsecutiveRegsLast())
- ++NumParts;
- StoreSize *= NumParts;
- }
- Type *Ty = EVT(VA.getValVT()).getTypeForEVT(*DAG.getContext());
- Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty);
- int FI = MFI.CreateStackObject(StoreSize, Alignment, false);
- if (isScalable)
- MFI.setStackID(FI, TargetStackID::ScalableVector);
- MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
- SDValue Ptr = DAG.getFrameIndex(
- FI, DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()));
- SDValue SpillSlot = Ptr;
- // Ensure we generate all stores for each tuple part, whilst updating the
- // pointer after each store correctly using vscale.
- while (NumParts) {
- Chain = DAG.getStore(Chain, DL, OutVals[i], Ptr, MPI);
- NumParts--;
- if (NumParts > 0) {
- SDValue BytesIncrement;
- if (isScalable) {
- BytesIncrement = DAG.getVScale(
- DL, Ptr.getValueType(),
- APInt(Ptr.getValueSizeInBits().getFixedValue(), PartSize));
- } else {
- BytesIncrement = DAG.getConstant(
- APInt(Ptr.getValueSizeInBits().getFixedValue(), PartSize), DL,
- Ptr.getValueType());
- }
- SDNodeFlags Flags;
- Flags.setNoUnsignedWrap(true);
- MPI = MachinePointerInfo(MPI.getAddrSpace());
- Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
- BytesIncrement, Flags);
- ExtraArgLocs++;
- i++;
- }
- }
- Arg = SpillSlot;
- break;
- }
- if (VA.isRegLoc()) {
- if (i == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
- Outs[0].VT == MVT::i64) {
- assert(VA.getLocVT() == MVT::i64 &&
- "unexpected calling convention register assignment");
- assert(!Ins.empty() && Ins[0].VT == MVT::i64 &&
- "unexpected use of 'returned'");
- IsThisReturn = true;
- }
- if (RegsUsed.count(VA.getLocReg())) {
- // If this register has already been used then we're trying to pack
- // parts of an [N x i32] into an X-register. The extension type will
- // take care of putting the two halves in the right place but we have to
- // combine them.
- SDValue &Bits =
- llvm::find_if(RegsToPass,
- [=](const std::pair<unsigned, SDValue> &Elt) {
- return Elt.first == VA.getLocReg();
- })
- ->second;
- Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg);
- // Call site info is used for function's parameter entry value
- // tracking. For now we track only simple cases when parameter
- // is transferred through whole register.
- llvm::erase_if(CSInfo, [&VA](MachineFunction::ArgRegPair ArgReg) {
- return ArgReg.Reg == VA.getLocReg();
- });
- } else {
- // Add an extra level of indirection for streaming mode changes by
- // using a pseudo copy node that cannot be rematerialised between a
- // smstart/smstop and the call by the simple register coalescer.
- if (RequiresSMChange && isa<FrameIndexSDNode>(Arg))
- Arg = DAG.getNode(AArch64ISD::OBSCURE_COPY, DL, MVT::i64, Arg);
- RegsToPass.emplace_back(VA.getLocReg(), Arg);
- RegsUsed.insert(VA.getLocReg());
- const TargetOptions &Options = DAG.getTarget().Options;
- if (Options.EmitCallSiteInfo)
- CSInfo.emplace_back(VA.getLocReg(), i);
- }
- } else {
- assert(VA.isMemLoc());
- SDValue DstAddr;
- MachinePointerInfo DstInfo;
- // FIXME: This works on big-endian for composite byvals, which are the
- // common case. It should also work for fundamental types too.
- uint32_t BEAlign = 0;
- unsigned OpSize;
- if (VA.getLocInfo() == CCValAssign::Indirect ||
- VA.getLocInfo() == CCValAssign::Trunc)
- OpSize = VA.getLocVT().getFixedSizeInBits();
- else
- OpSize = Flags.isByVal() ? Flags.getByValSize() * 8
- : VA.getValVT().getSizeInBits();
- OpSize = (OpSize + 7) / 8;
- if (!Subtarget->isLittleEndian() && !Flags.isByVal() &&
- !Flags.isInConsecutiveRegs()) {
- if (OpSize < 8)
- BEAlign = 8 - OpSize;
- }
- unsigned LocMemOffset = VA.getLocMemOffset();
- int32_t Offset = LocMemOffset + BEAlign;
- SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
- PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
- if (IsTailCall) {
- Offset = Offset + FPDiff;
- int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
- DstAddr = DAG.getFrameIndex(FI, PtrVT);
- DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
- // Make sure any stack arguments overlapping with where we're storing
- // are loaded before this eventual operation. Otherwise they'll be
- // clobbered.
- Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
- } else {
- SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
- DstAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
- DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
- }
- if (Outs[i].Flags.isByVal()) {
- SDValue SizeNode =
- DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i64);
- SDValue Cpy = DAG.getMemcpy(
- Chain, DL, DstAddr, Arg, SizeNode,
- Outs[i].Flags.getNonZeroByValAlign(),
- /*isVol = */ false, /*AlwaysInline = */ false,
- /*isTailCall = */ false, DstInfo, MachinePointerInfo());
- MemOpChains.push_back(Cpy);
- } else {
- // Since we pass i1/i8/i16 as i1/i8/i16 on stack and Arg is already
- // promoted to a legal register type i32, we should truncate Arg back to
- // i1/i8/i16.
- if (VA.getValVT() == MVT::i1 || VA.getValVT() == MVT::i8 ||
- VA.getValVT() == MVT::i16)
- Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
- SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
- MemOpChains.push_back(Store);
- }
- }
- }
- if (IsVarArg && Subtarget->isWindowsArm64EC()) {
- // For vararg calls, the Arm64EC ABI requires values in x4 and x5
- // describing the argument list. x4 contains the address of the
- // first stack parameter. x5 contains the size in bytes of all parameters
- // passed on the stack.
- RegsToPass.emplace_back(AArch64::X4, StackPtr);
- RegsToPass.emplace_back(AArch64::X5,
- DAG.getConstant(NumBytes, DL, MVT::i64));
- }
- if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
- SDValue InFlag;
- if (RequiresSMChange) {
- SDValue NewChain = changeStreamingMode(DAG, DL, *RequiresSMChange, Chain,
- InFlag, PStateSM, true);
- Chain = NewChain.getValue(0);
- InFlag = NewChain.getValue(1);
- }
- // Build a sequence of copy-to-reg nodes chained together with token chain
- // and flag operands which copy the outgoing args into the appropriate regs.
- for (auto &RegToPass : RegsToPass) {
- Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
- RegToPass.second, InFlag);
- InFlag = Chain.getValue(1);
- }
- // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
- // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
- // node so that legalize doesn't hack it.
- if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- auto GV = G->getGlobal();
- unsigned OpFlags =
- Subtarget->classifyGlobalFunctionReference(GV, getTargetMachine());
- if (OpFlags & AArch64II::MO_GOT) {
- Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
- Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
- } else {
- const GlobalValue *GV = G->getGlobal();
- Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
- }
- } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- if (getTargetMachine().getCodeModel() == CodeModel::Large &&
- Subtarget->isTargetMachO()) {
- const char *Sym = S->getSymbol();
- Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT);
- Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
- } else {
- const char *Sym = S->getSymbol();
- Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0);
- }
- }
- // We don't usually want to end the call-sequence here because we would tidy
- // the frame up *after* the call, however in the ABI-changing tail-call case
- // we've carefully laid out the parameters so that when sp is reset they'll be
- // in the correct location.
- if (IsTailCall && !IsSibCall) {
- Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InFlag, DL);
- InFlag = Chain.getValue(1);
- }
- std::vector<SDValue> Ops;
- Ops.push_back(Chain);
- Ops.push_back(Callee);
- if (IsTailCall) {
- // Each tail call may have to adjust the stack by a different amount, so
- // this information must travel along with the operation for eventual
- // consumption by emitEpilogue.
- Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
- }
- // Add argument registers to the end of the list so that they are known live
- // into the call.
- for (auto &RegToPass : RegsToPass)
- Ops.push_back(DAG.getRegister(RegToPass.first,
- RegToPass.second.getValueType()));
- // Add a register mask operand representing the call-preserved registers.
- const uint32_t *Mask;
- const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
- if (IsThisReturn) {
- // For 'this' returns, use the X0-preserving mask if applicable
- Mask = TRI->getThisReturnPreservedMask(MF, CallConv);
- if (!Mask) {
- IsThisReturn = false;
- Mask = TRI->getCallPreservedMask(MF, CallConv);
- }
- } else
- Mask = TRI->getCallPreservedMask(MF, CallConv);
- if (Subtarget->hasCustomCallingConv())
- TRI->UpdateCustomCallPreservedMask(MF, &Mask);
- if (TRI->isAnyArgRegReserved(MF))
- TRI->emitReservedArgRegCallError(MF);
- assert(Mask && "Missing call preserved mask for calling convention");
- Ops.push_back(DAG.getRegisterMask(Mask));
- if (InFlag.getNode())
- Ops.push_back(InFlag);
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- // If we're doing a tall call, use a TC_RETURN here rather than an
- // actual call instruction.
- if (IsTailCall) {
- MF.getFrameInfo().setHasTailCall();
- SDValue Ret = DAG.getNode(AArch64ISD::TC_RETURN, DL, NodeTys, Ops);
- if (IsCFICall)
- Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue());
- DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
- return Ret;
- }
- unsigned CallOpc = AArch64ISD::CALL;
- // Calls with operand bundle "clang.arc.attachedcall" are special. They should
- // be expanded to the call, directly followed by a special marker sequence and
- // a call to an ObjC library function. Use CALL_RVMARKER to do that.
- if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) {
- assert(!IsTailCall &&
- "tail calls cannot be marked with clang.arc.attachedcall");
- CallOpc = AArch64ISD::CALL_RVMARKER;
- // Add a target global address for the retainRV/claimRV runtime function
- // just before the call target.
- Function *ARCFn = *objcarc::getAttachedARCFunction(CLI.CB);
- auto GA = DAG.getTargetGlobalAddress(ARCFn, DL, PtrVT);
- Ops.insert(Ops.begin() + 1, GA);
- } else if (GuardWithBTI)
- CallOpc = AArch64ISD::CALL_BTI;
- // Returns a chain and a flag for retval copy to use.
- Chain = DAG.getNode(CallOpc, DL, NodeTys, Ops);
- if (IsCFICall)
- Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue());
- DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
- InFlag = Chain.getValue(1);
- DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
- uint64_t CalleePopBytes =
- DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0;
- Chain = DAG.getCALLSEQ_END(Chain, NumBytes, CalleePopBytes, InFlag, DL);
- InFlag = Chain.getValue(1);
- // Handle result values, copying them out of physregs into vregs that we
- // return.
- SDValue Result = LowerCallResult(Chain, InFlag, CallConv, IsVarArg, RVLocs,
- DL, DAG, InVals, IsThisReturn,
- IsThisReturn ? OutVals[0] : SDValue());
- if (!Ins.empty())
- InFlag = Result.getValue(Result->getNumValues() - 1);
- if (RequiresSMChange) {
- assert(PStateSM && "Expected a PStateSM to be set");
- Result = changeStreamingMode(DAG, DL, !*RequiresSMChange, Result, InFlag,
- PStateSM, false);
- }
- if (RequiresLazySave) {
- // Unconditionally resume ZA.
- Result = DAG.getNode(
- AArch64ISD::SMSTART, DL, MVT::Other, Result,
- DAG.getTargetConstant((int32_t)(AArch64SVCR::SVCRZA), DL, MVT::i32),
- DAG.getConstant(0, DL, MVT::i64), DAG.getConstant(1, DL, MVT::i64));
- // Conditionally restore the lazy save using a pseudo node.
- unsigned FI = FuncInfo->getLazySaveTPIDR2Obj();
- SDValue RegMask = DAG.getRegisterMask(
- TRI->SMEABISupportRoutinesCallPreservedMaskFromX0());
- SDValue RestoreRoutine = DAG.getTargetExternalSymbol(
- "__arm_tpidr2_restore", getPointerTy(DAG.getDataLayout()));
- SDValue TPIDR2_EL0 = DAG.getNode(
- ISD::INTRINSIC_W_CHAIN, DL, MVT::i64, Result,
- DAG.getConstant(Intrinsic::aarch64_sme_get_tpidr2, DL, MVT::i32));
- // Copy the address of the TPIDR2 block into X0 before 'calling' the
- // RESTORE_ZA pseudo.
- SDValue Glue;
- SDValue TPIDR2Block = DAG.getFrameIndex(
- FI, DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()));
- Result = DAG.getCopyToReg(Result, DL, AArch64::X0, TPIDR2Block, Glue);
- Result = DAG.getNode(AArch64ISD::RESTORE_ZA, DL, MVT::Other,
- {Result, TPIDR2_EL0,
- DAG.getRegister(AArch64::X0, MVT::i64),
- RestoreRoutine,
- RegMask,
- Result.getValue(1)});
- // Finally reset the TPIDR2_EL0 register to 0.
- Result = DAG.getNode(
- ISD::INTRINSIC_VOID, DL, MVT::Other, Result,
- DAG.getConstant(Intrinsic::aarch64_sme_set_tpidr2, DL, MVT::i32),
- DAG.getConstant(0, DL, MVT::i64));
- }
- if (RequiresSMChange || RequiresLazySave) {
- for (unsigned I = 0; I < InVals.size(); ++I) {
- // The smstart/smstop is chained as part of the call, but when the
- // resulting chain is discarded (which happens when the call is not part
- // of a chain, e.g. a call to @llvm.cos()), we need to ensure the
- // smstart/smstop is chained to the result value. We can do that by doing
- // a vreg -> vreg copy.
- Register Reg = MF.getRegInfo().createVirtualRegister(
- getRegClassFor(InVals[I].getValueType().getSimpleVT()));
- SDValue X = DAG.getCopyToReg(Result, DL, Reg, InVals[I]);
- InVals[I] = DAG.getCopyFromReg(X, DL, Reg,
- InVals[I].getValueType());
- }
- }
- return Result;
- }
- bool AArch64TargetLowering::CanLowerReturn(
- CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
- CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
- return CCInfo.CheckReturn(Outs, RetCC);
- }
- SDValue
- AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SDLoc &DL, SelectionDAG &DAG) const {
- auto &MF = DAG.getMachineFunction();
- auto *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
- CCInfo.AnalyzeReturn(Outs, RetCC);
- // Copy the result values into the output registers.
- SDValue Flag;
- SmallVector<std::pair<unsigned, SDValue>, 4> RetVals;
- SmallSet<unsigned, 4> RegsUsed;
- for (unsigned i = 0, realRVLocIdx = 0; i != RVLocs.size();
- ++i, ++realRVLocIdx) {
- CCValAssign &VA = RVLocs[i];
- assert(VA.isRegLoc() && "Can only return in registers!");
- SDValue Arg = OutVals[realRVLocIdx];
- switch (VA.getLocInfo()) {
- default:
- llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full:
- if (Outs[i].ArgVT == MVT::i1) {
- // AAPCS requires i1 to be zero-extended to i8 by the producer of the
- // value. This is strictly redundant on Darwin (which uses "zeroext
- // i1"), but will be optimised out before ISel.
- Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg);
- Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
- }
- break;
- case CCValAssign::BCvt:
- Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
- break;
- case CCValAssign::AExt:
- case CCValAssign::ZExt:
- Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
- break;
- case CCValAssign::AExtUpper:
- assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits");
- Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
- Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg,
- DAG.getConstant(32, DL, VA.getLocVT()));
- break;
- }
- if (RegsUsed.count(VA.getLocReg())) {
- SDValue &Bits =
- llvm::find_if(RetVals, [=](const std::pair<unsigned, SDValue> &Elt) {
- return Elt.first == VA.getLocReg();
- })->second;
- Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg);
- } else {
- RetVals.emplace_back(VA.getLocReg(), Arg);
- RegsUsed.insert(VA.getLocReg());
- }
- }
- const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
- // Emit SMSTOP before returning from a locally streaming function
- SMEAttrs FuncAttrs(MF.getFunction());
- if (FuncAttrs.hasStreamingBody() && !FuncAttrs.hasStreamingInterface()) {
- Chain = DAG.getNode(
- AArch64ISD::SMSTOP, DL, DAG.getVTList(MVT::Other, MVT::Glue), Chain,
- DAG.getTargetConstant((int32_t)AArch64SVCR::SVCRSM, DL, MVT::i32),
- DAG.getConstant(1, DL, MVT::i64), DAG.getConstant(0, DL, MVT::i64),
- DAG.getRegisterMask(TRI->getSMStartStopCallPreservedMask()));
- Flag = Chain.getValue(1);
- }
- SmallVector<SDValue, 4> RetOps(1, Chain);
- for (auto &RetVal : RetVals) {
- Chain = DAG.getCopyToReg(Chain, DL, RetVal.first, RetVal.second, Flag);
- Flag = Chain.getValue(1);
- RetOps.push_back(
- DAG.getRegister(RetVal.first, RetVal.second.getValueType()));
- }
- // Windows AArch64 ABIs require that for returning structs by value we copy
- // the sret argument into X0 for the return.
- // We saved the argument into a virtual register in the entry block,
- // so now we copy the value out and into X0.
- if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
- SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg,
- getPointerTy(MF.getDataLayout()));
- unsigned RetValReg = AArch64::X0;
- Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Flag);
- Flag = Chain.getValue(1);
- RetOps.push_back(
- DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
- }
- const MCPhysReg *I = TRI->getCalleeSavedRegsViaCopy(&MF);
- if (I) {
- for (; *I; ++I) {
- if (AArch64::GPR64RegClass.contains(*I))
- RetOps.push_back(DAG.getRegister(*I, MVT::i64));
- else if (AArch64::FPR64RegClass.contains(*I))
- RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
- else
- llvm_unreachable("Unexpected register class in CSRsViaCopy!");
- }
- }
- RetOps[0] = Chain; // Update chain.
- // Add the flag if we have it.
- if (Flag.getNode())
- RetOps.push_back(Flag);
- return DAG.getNode(AArch64ISD::RET_FLAG, DL, MVT::Other, RetOps);
- }
- //===----------------------------------------------------------------------===//
- // Other Lowering Code
- //===----------------------------------------------------------------------===//
- SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
- SelectionDAG &DAG,
- unsigned Flag) const {
- return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty,
- N->getOffset(), Flag);
- }
- SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
- SelectionDAG &DAG,
- unsigned Flag) const {
- return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
- }
- SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
- SelectionDAG &DAG,
- unsigned Flag) const {
- return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
- N->getOffset(), Flag);
- }
- SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty,
- SelectionDAG &DAG,
- unsigned Flag) const {
- return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
- }
- // (loadGOT sym)
- template <class NodeTy>
- SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG,
- unsigned Flags) const {
- LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n");
- SDLoc DL(N);
- EVT Ty = getPointerTy(DAG.getDataLayout());
- SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT | Flags);
- // FIXME: Once remat is capable of dealing with instructions with register
- // operands, expand this into two nodes instead of using a wrapper node.
- return DAG.getNode(AArch64ISD::LOADgot, DL, Ty, GotAddr);
- }
- // (wrapper %highest(sym), %higher(sym), %hi(sym), %lo(sym))
- template <class NodeTy>
- SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG,
- unsigned Flags) const {
- LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n");
- SDLoc DL(N);
- EVT Ty = getPointerTy(DAG.getDataLayout());
- const unsigned char MO_NC = AArch64II::MO_NC;
- return DAG.getNode(
- AArch64ISD::WrapperLarge, DL, Ty,
- getTargetNode(N, Ty, DAG, AArch64II::MO_G3 | Flags),
- getTargetNode(N, Ty, DAG, AArch64II::MO_G2 | MO_NC | Flags),
- getTargetNode(N, Ty, DAG, AArch64II::MO_G1 | MO_NC | Flags),
- getTargetNode(N, Ty, DAG, AArch64II::MO_G0 | MO_NC | Flags));
- }
- // (addlow (adrp %hi(sym)) %lo(sym))
- template <class NodeTy>
- SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
- unsigned Flags) const {
- LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n");
- SDLoc DL(N);
- EVT Ty = getPointerTy(DAG.getDataLayout());
- SDValue Hi = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGE | Flags);
- SDValue Lo = getTargetNode(N, Ty, DAG,
- AArch64II::MO_PAGEOFF | AArch64II::MO_NC | Flags);
- SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, Ty, Hi);
- return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo);
- }
- // (adr sym)
- template <class NodeTy>
- SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG,
- unsigned Flags) const {
- LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrTiny\n");
- SDLoc DL(N);
- EVT Ty = getPointerTy(DAG.getDataLayout());
- SDValue Sym = getTargetNode(N, Ty, DAG, Flags);
- return DAG.getNode(AArch64ISD::ADR, DL, Ty, Sym);
- }
- SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
- SelectionDAG &DAG) const {
- GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
- const GlobalValue *GV = GN->getGlobal();
- unsigned OpFlags = Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
- if (OpFlags != AArch64II::MO_NO_FLAG)
- assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&
- "unexpected offset in global node");
- // This also catches the large code model case for Darwin, and tiny code
- // model with got relocations.
- if ((OpFlags & AArch64II::MO_GOT) != 0) {
- return getGOT(GN, DAG, OpFlags);
- }
- SDValue Result;
- if (getTargetMachine().getCodeModel() == CodeModel::Large) {
- Result = getAddrLarge(GN, DAG, OpFlags);
- } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
- Result = getAddrTiny(GN, DAG, OpFlags);
- } else {
- Result = getAddr(GN, DAG, OpFlags);
- }
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- SDLoc DL(GN);
- if (OpFlags & (AArch64II::MO_DLLIMPORT | AArch64II::MO_DLLIMPORTAUX |
- AArch64II::MO_COFFSTUB))
- Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()));
- return Result;
- }
- /// Convert a TLS address reference into the correct sequence of loads
- /// and calls to compute the variable's address (for Darwin, currently) and
- /// return an SDValue containing the final node.
- /// Darwin only has one TLS scheme which must be capable of dealing with the
- /// fully general situation, in the worst case. This means:
- /// + "extern __thread" declaration.
- /// + Defined in a possibly unknown dynamic library.
- ///
- /// The general system is that each __thread variable has a [3 x i64] descriptor
- /// which contains information used by the runtime to calculate the address. The
- /// only part of this the compiler needs to know about is the first xword, which
- /// contains a function pointer that must be called with the address of the
- /// entire descriptor in "x0".
- ///
- /// Since this descriptor may be in a different unit, in general even the
- /// descriptor must be accessed via an indirect load. The "ideal" code sequence
- /// is:
- /// adrp x0, _var@TLVPPAGE
- /// ldr x0, [x0, _var@TLVPPAGEOFF] ; x0 now contains address of descriptor
- /// ldr x1, [x0] ; x1 contains 1st entry of descriptor,
- /// ; the function pointer
- /// blr x1 ; Uses descriptor address in x0
- /// ; Address of _var is now in x0.
- ///
- /// If the address of _var's descriptor *is* known to the linker, then it can
- /// change the first "ldr" instruction to an appropriate "add x0, x0, #imm" for
- /// a slight efficiency gain.
- SDValue
- AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Subtarget->isTargetDarwin() &&
- "This function expects a Darwin target");
- SDLoc DL(Op);
- MVT PtrVT = getPointerTy(DAG.getDataLayout());
- MVT PtrMemVT = getPointerMemTy(DAG.getDataLayout());
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- SDValue TLVPAddr =
- DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
- SDValue DescAddr = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TLVPAddr);
- // The first entry in the descriptor is a function pointer that we must call
- // to obtain the address of the variable.
- SDValue Chain = DAG.getEntryNode();
- SDValue FuncTLVGet = DAG.getLoad(
- PtrMemVT, DL, Chain, DescAddr,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- Align(PtrMemVT.getSizeInBits() / 8),
- MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
- Chain = FuncTLVGet.getValue(1);
- // Extend loaded pointer if necessary (i.e. if ILP32) to DAG pointer.
- FuncTLVGet = DAG.getZExtOrTrunc(FuncTLVGet, DL, PtrVT);
- MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
- MFI.setAdjustsStack(true);
- // TLS calls preserve all registers except those that absolutely must be
- // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
- // silly).
- const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
- const uint32_t *Mask = TRI->getTLSCallPreservedMask();
- if (Subtarget->hasCustomCallingConv())
- TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask);
- // Finally, we can make the call. This is just a degenerate version of a
- // normal AArch64 call node: x0 takes the address of the descriptor, and
- // returns the address of the variable in this thread.
- Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, DescAddr, SDValue());
- Chain =
- DAG.getNode(AArch64ISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
- Chain, FuncTLVGet, DAG.getRegister(AArch64::X0, MVT::i64),
- DAG.getRegisterMask(Mask), Chain.getValue(1));
- return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Chain.getValue(1));
- }
- /// Convert a thread-local variable reference into a sequence of instructions to
- /// compute the variable's address for the local exec TLS model of ELF targets.
- /// The sequence depends on the maximum TLS area size.
- SDValue AArch64TargetLowering::LowerELFTLSLocalExec(const GlobalValue *GV,
- SDValue ThreadBase,
- const SDLoc &DL,
- SelectionDAG &DAG) const {
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- SDValue TPOff, Addr;
- switch (DAG.getTarget().Options.TLSSize) {
- default:
- llvm_unreachable("Unexpected TLS size");
- case 12: {
- // mrs x0, TPIDR_EL0
- // add x0, x0, :tprel_lo12:a
- SDValue Var = DAG.getTargetGlobalAddress(
- GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
- return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase,
- Var,
- DAG.getTargetConstant(0, DL, MVT::i32)),
- 0);
- }
- case 24: {
- // mrs x0, TPIDR_EL0
- // add x0, x0, :tprel_hi12:a
- // add x0, x0, :tprel_lo12_nc:a
- SDValue HiVar = DAG.getTargetGlobalAddress(
- GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
- SDValue LoVar = DAG.getTargetGlobalAddress(
- GV, DL, PtrVT, 0,
- AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
- Addr = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase,
- HiVar,
- DAG.getTargetConstant(0, DL, MVT::i32)),
- 0);
- return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, Addr,
- LoVar,
- DAG.getTargetConstant(0, DL, MVT::i32)),
- 0);
- }
- case 32: {
- // mrs x1, TPIDR_EL0
- // movz x0, #:tprel_g1:a
- // movk x0, #:tprel_g0_nc:a
- // add x0, x1, x0
- SDValue HiVar = DAG.getTargetGlobalAddress(
- GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G1);
- SDValue LoVar = DAG.getTargetGlobalAddress(
- GV, DL, PtrVT, 0,
- AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC);
- TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar,
- DAG.getTargetConstant(16, DL, MVT::i32)),
- 0);
- TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar,
- DAG.getTargetConstant(0, DL, MVT::i32)),
- 0);
- return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
- }
- case 48: {
- // mrs x1, TPIDR_EL0
- // movz x0, #:tprel_g2:a
- // movk x0, #:tprel_g1_nc:a
- // movk x0, #:tprel_g0_nc:a
- // add x0, x1, x0
- SDValue HiVar = DAG.getTargetGlobalAddress(
- GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G2);
- SDValue MiVar = DAG.getTargetGlobalAddress(
- GV, DL, PtrVT, 0,
- AArch64II::MO_TLS | AArch64II::MO_G1 | AArch64II::MO_NC);
- SDValue LoVar = DAG.getTargetGlobalAddress(
- GV, DL, PtrVT, 0,
- AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC);
- TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar,
- DAG.getTargetConstant(32, DL, MVT::i32)),
- 0);
- TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, MiVar,
- DAG.getTargetConstant(16, DL, MVT::i32)),
- 0);
- TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar,
- DAG.getTargetConstant(0, DL, MVT::i32)),
- 0);
- return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
- }
- }
- }
- /// When accessing thread-local variables under either the general-dynamic or
- /// local-dynamic system, we make a "TLS-descriptor" call. The variable will
- /// have a descriptor, accessible via a PC-relative ADRP, and whose first entry
- /// is a function pointer to carry out the resolution.
- ///
- /// The sequence is:
- /// adrp x0, :tlsdesc:var
- /// ldr x1, [x0, #:tlsdesc_lo12:var]
- /// add x0, x0, #:tlsdesc_lo12:var
- /// .tlsdesccall var
- /// blr x1
- /// (TPIDR_EL0 offset now in x0)
- ///
- /// The above sequence must be produced unscheduled, to enable the linker to
- /// optimize/relax this sequence.
- /// Therefore, a pseudo-instruction (TLSDESC_CALLSEQ) is used to represent the
- /// above sequence, and expanded really late in the compilation flow, to ensure
- /// the sequence is produced as per above.
- SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr,
- const SDLoc &DL,
- SelectionDAG &DAG) const {
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- SDValue Chain = DAG.getEntryNode();
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- Chain =
- DAG.getNode(AArch64ISD::TLSDESC_CALLSEQ, DL, NodeTys, {Chain, SymAddr});
- SDValue Glue = Chain.getValue(1);
- return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
- }
- SDValue
- AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Subtarget->isTargetELF() && "This function expects an ELF target");
- const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
- TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
- if (!EnableAArch64ELFLocalDynamicTLSGeneration) {
- if (Model == TLSModel::LocalDynamic)
- Model = TLSModel::GeneralDynamic;
- }
- if (getTargetMachine().getCodeModel() == CodeModel::Large &&
- Model != TLSModel::LocalExec)
- report_fatal_error("ELF TLS only supported in small memory model or "
- "in local exec TLS model");
- // Different choices can be made for the maximum size of the TLS area for a
- // module. For the small address model, the default TLS size is 16MiB and the
- // maximum TLS size is 4GiB.
- // FIXME: add tiny and large code model support for TLS access models other
- // than local exec. We currently generate the same code as small for tiny,
- // which may be larger than needed.
- SDValue TPOff;
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- SDLoc DL(Op);
- const GlobalValue *GV = GA->getGlobal();
- SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
- if (Model == TLSModel::LocalExec) {
- return LowerELFTLSLocalExec(GV, ThreadBase, DL, DAG);
- } else if (Model == TLSModel::InitialExec) {
- TPOff = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
- TPOff = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TPOff);
- } else if (Model == TLSModel::LocalDynamic) {
- // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
- // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
- // the beginning of the module's TLS region, followed by a DTPREL offset
- // calculation.
- // These accesses will need deduplicating if there's more than one.
- AArch64FunctionInfo *MFI =
- DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
- MFI->incNumLocalDynamicTLSAccesses();
- // The call needs a relocation too for linker relaxation. It doesn't make
- // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of
- // the address.
- SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
- AArch64II::MO_TLS);
- // Now we can calculate the offset from TPIDR_EL0 to this module's
- // thread-local area.
- TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
- // Now use :dtprel_whatever: operations to calculate this variable's offset
- // in its thread-storage area.
- SDValue HiVar = DAG.getTargetGlobalAddress(
- GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
- SDValue LoVar = DAG.getTargetGlobalAddress(
- GV, DL, MVT::i64, 0,
- AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
- TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, HiVar,
- DAG.getTargetConstant(0, DL, MVT::i32)),
- 0);
- TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, LoVar,
- DAG.getTargetConstant(0, DL, MVT::i32)),
- 0);
- } else if (Model == TLSModel::GeneralDynamic) {
- // The call needs a relocation too for linker relaxation. It doesn't make
- // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of
- // the address.
- SDValue SymAddr =
- DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
- // Finally we can make a call to calculate the offset from tpidr_el0.
- TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
- } else
- llvm_unreachable("Unsupported ELF TLS access model");
- return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
- }
- SDValue
- AArch64TargetLowering::LowerWindowsGlobalTLSAddress(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
- SDValue Chain = DAG.getEntryNode();
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- SDLoc DL(Op);
- SDValue TEB = DAG.getRegister(AArch64::X18, MVT::i64);
- // Load the ThreadLocalStoragePointer from the TEB
- // A pointer to the TLS array is located at offset 0x58 from the TEB.
- SDValue TLSArray =
- DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x58, DL));
- TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
- Chain = TLSArray.getValue(1);
- // Load the TLS index from the C runtime;
- // This does the same as getAddr(), but without having a GlobalAddressSDNode.
- // This also does the same as LOADgot, but using a generic i32 load,
- // while LOADgot only loads i64.
- SDValue TLSIndexHi =
- DAG.getTargetExternalSymbol("_tls_index", PtrVT, AArch64II::MO_PAGE);
- SDValue TLSIndexLo = DAG.getTargetExternalSymbol(
- "_tls_index", PtrVT, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
- SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, TLSIndexHi);
- SDValue TLSIndex =
- DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, TLSIndexLo);
- TLSIndex = DAG.getLoad(MVT::i32, DL, Chain, TLSIndex, MachinePointerInfo());
- Chain = TLSIndex.getValue(1);
- // The pointer to the thread's TLS data area is at the TLS Index scaled by 8
- // offset into the TLSArray.
- TLSIndex = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TLSIndex);
- SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
- DAG.getConstant(3, DL, PtrVT));
- SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
- DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
- MachinePointerInfo());
- Chain = TLS.getValue(1);
- const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
- const GlobalValue *GV = GA->getGlobal();
- SDValue TGAHi = DAG.getTargetGlobalAddress(
- GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
- SDValue TGALo = DAG.getTargetGlobalAddress(
- GV, DL, PtrVT, 0,
- AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
- // Add the offset from the start of the .tls section (section base).
- SDValue Addr =
- SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TLS, TGAHi,
- DAG.getTargetConstant(0, DL, MVT::i32)),
- 0);
- Addr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, Addr, TGALo);
- return Addr;
- }
- SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
- SelectionDAG &DAG) const {
- const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
- if (DAG.getTarget().useEmulatedTLS())
- return LowerToTLSEmulatedModel(GA, DAG);
- if (Subtarget->isTargetDarwin())
- return LowerDarwinGlobalTLSAddress(Op, DAG);
- if (Subtarget->isTargetELF())
- return LowerELFGlobalTLSAddress(Op, DAG);
- if (Subtarget->isTargetWindows())
- return LowerWindowsGlobalTLSAddress(Op, DAG);
- llvm_unreachable("Unexpected platform trying to use TLS");
- }
- // Looks through \param Val to determine the bit that can be used to
- // check the sign of the value. It returns the unextended value and
- // the sign bit position.
- std::pair<SDValue, uint64_t> lookThroughSignExtension(SDValue Val) {
- if (Val.getOpcode() == ISD::SIGN_EXTEND_INREG)
- return {Val.getOperand(0),
- cast<VTSDNode>(Val.getOperand(1))->getVT().getFixedSizeInBits() -
- 1};
- if (Val.getOpcode() == ISD::SIGN_EXTEND)
- return {Val.getOperand(0),
- Val.getOperand(0)->getValueType(0).getFixedSizeInBits() - 1};
- return {Val, Val.getValueSizeInBits() - 1};
- }
- SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
- SDValue Chain = Op.getOperand(0);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
- SDValue LHS = Op.getOperand(2);
- SDValue RHS = Op.getOperand(3);
- SDValue Dest = Op.getOperand(4);
- SDLoc dl(Op);
- MachineFunction &MF = DAG.getMachineFunction();
- // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
- // will not be produced, as they are conditional branch instructions that do
- // not set flags.
- bool ProduceNonFlagSettingCondBr =
- !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
- // Handle f128 first, since lowering it will result in comparing the return
- // value of a libcall against zero, which is just what the rest of LowerBR_CC
- // is expecting to deal with.
- if (LHS.getValueType() == MVT::f128) {
- softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS);
- // If softenSetCCOperands returned a scalar, we need to compare the result
- // against zero to select between true and false values.
- if (!RHS.getNode()) {
- RHS = DAG.getConstant(0, dl, LHS.getValueType());
- CC = ISD::SETNE;
- }
- }
- // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
- // instruction.
- if (ISD::isOverflowIntrOpRes(LHS) && isOneConstant(RHS) &&
- (CC == ISD::SETEQ || CC == ISD::SETNE)) {
- // Only lower legal XALUO ops.
- if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0)))
- return SDValue();
- // The actual operation with overflow check.
- AArch64CC::CondCode OFCC;
- SDValue Value, Overflow;
- std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, LHS.getValue(0), DAG);
- if (CC == ISD::SETNE)
- OFCC = getInvertedCondCode(OFCC);
- SDValue CCVal = DAG.getConstant(OFCC, dl, MVT::i32);
- return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
- Overflow);
- }
- if (LHS.getValueType().isInteger()) {
- assert((LHS.getValueType() == RHS.getValueType()) &&
- (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64));
- // If the RHS of the comparison is zero, we can potentially fold this
- // to a specialized branch.
- const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
- if (RHSC && RHSC->getZExtValue() == 0 && ProduceNonFlagSettingCondBr) {
- if (CC == ISD::SETEQ) {
- // See if we can use a TBZ to fold in an AND as well.
- // TBZ has a smaller branch displacement than CBZ. If the offset is
- // out of bounds, a late MI-layer pass rewrites branches.
- // 403.gcc is an example that hits this case.
- if (LHS.getOpcode() == ISD::AND &&
- isa<ConstantSDNode>(LHS.getOperand(1)) &&
- isPowerOf2_64(LHS.getConstantOperandVal(1))) {
- SDValue Test = LHS.getOperand(0);
- uint64_t Mask = LHS.getConstantOperandVal(1);
- return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, Test,
- DAG.getConstant(Log2_64(Mask), dl, MVT::i64),
- Dest);
- }
- return DAG.getNode(AArch64ISD::CBZ, dl, MVT::Other, Chain, LHS, Dest);
- } else if (CC == ISD::SETNE) {
- // See if we can use a TBZ to fold in an AND as well.
- // TBZ has a smaller branch displacement than CBZ. If the offset is
- // out of bounds, a late MI-layer pass rewrites branches.
- // 403.gcc is an example that hits this case.
- if (LHS.getOpcode() == ISD::AND &&
- isa<ConstantSDNode>(LHS.getOperand(1)) &&
- isPowerOf2_64(LHS.getConstantOperandVal(1))) {
- SDValue Test = LHS.getOperand(0);
- uint64_t Mask = LHS.getConstantOperandVal(1);
- return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, Test,
- DAG.getConstant(Log2_64(Mask), dl, MVT::i64),
- Dest);
- }
- return DAG.getNode(AArch64ISD::CBNZ, dl, MVT::Other, Chain, LHS, Dest);
- } else if (CC == ISD::SETLT && LHS.getOpcode() != ISD::AND) {
- // Don't combine AND since emitComparison converts the AND to an ANDS
- // (a.k.a. TST) and the test in the test bit and branch instruction
- // becomes redundant. This would also increase register pressure.
- uint64_t SignBitPos;
- std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS);
- return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS,
- DAG.getConstant(SignBitPos, dl, MVT::i64), Dest);
- }
- }
- if (RHSC && RHSC->getSExtValue() == -1 && CC == ISD::SETGT &&
- LHS.getOpcode() != ISD::AND && ProduceNonFlagSettingCondBr) {
- // Don't combine AND since emitComparison converts the AND to an ANDS
- // (a.k.a. TST) and the test in the test bit and branch instruction
- // becomes redundant. This would also increase register pressure.
- uint64_t SignBitPos;
- std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS);
- return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS,
- DAG.getConstant(SignBitPos, dl, MVT::i64), Dest);
- }
- SDValue CCVal;
- SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
- return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
- Cmp);
- }
- assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::bf16 ||
- LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
- // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
- // clean. Some of them require two branches to implement.
- SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
- AArch64CC::CondCode CC1, CC2;
- changeFPCCToAArch64CC(CC, CC1, CC2);
- SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
- SDValue BR1 =
- DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CC1Val, Cmp);
- if (CC2 != AArch64CC::AL) {
- SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
- return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, BR1, Dest, CC2Val,
- Cmp);
- }
- return BR1;
- }
- SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
- SelectionDAG &DAG) const {
- if (!Subtarget->hasNEON())
- return SDValue();
- EVT VT = Op.getValueType();
- EVT IntVT = VT.changeTypeToInteger();
- SDLoc DL(Op);
- SDValue In1 = Op.getOperand(0);
- SDValue In2 = Op.getOperand(1);
- EVT SrcVT = In2.getValueType();
- if (!SrcVT.bitsEq(VT))
- In2 = DAG.getFPExtendOrRound(In2, DL, VT);
- if (VT.isScalableVector())
- IntVT =
- getPackedSVEVectorVT(VT.getVectorElementType().changeTypeToInteger());
- if (VT.isFixedLengthVector() &&
- useSVEForFixedLengthVectorVT(VT, Subtarget->forceStreamingCompatibleSVE())) {
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- In1 = convertToScalableVector(DAG, ContainerVT, In1);
- In2 = convertToScalableVector(DAG, ContainerVT, In2);
- SDValue Res = DAG.getNode(ISD::FCOPYSIGN, DL, ContainerVT, In1, In2);
- return convertFromScalableVector(DAG, VT, Res);
- }
- auto BitCast = [this](EVT VT, SDValue Op, SelectionDAG &DAG) {
- if (VT.isScalableVector())
- return getSVESafeBitCast(VT, Op, DAG);
- return DAG.getBitcast(VT, Op);
- };
- SDValue VecVal1, VecVal2;
- EVT VecVT;
- auto SetVecVal = [&](int Idx = -1) {
- if (!VT.isVector()) {
- VecVal1 =
- DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In1);
- VecVal2 =
- DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In2);
- } else {
- VecVal1 = BitCast(VecVT, In1, DAG);
- VecVal2 = BitCast(VecVT, In2, DAG);
- }
- };
- if (VT.isVector()) {
- VecVT = IntVT;
- SetVecVal();
- } else if (VT == MVT::f64) {
- VecVT = MVT::v2i64;
- SetVecVal(AArch64::dsub);
- } else if (VT == MVT::f32) {
- VecVT = MVT::v4i32;
- SetVecVal(AArch64::ssub);
- } else if (VT == MVT::f16) {
- VecVT = MVT::v8i16;
- SetVecVal(AArch64::hsub);
- } else {
- llvm_unreachable("Invalid type for copysign!");
- }
- unsigned BitWidth = In1.getScalarValueSizeInBits();
- SDValue SignMaskV = DAG.getConstant(~APInt::getSignMask(BitWidth), DL, VecVT);
- // We want to materialize a mask with every bit but the high bit set, but the
- // AdvSIMD immediate moves cannot materialize that in a single instruction for
- // 64-bit elements. Instead, materialize all bits set and then negate that.
- if (VT == MVT::f64 || VT == MVT::v2f64) {
- SignMaskV = DAG.getConstant(APInt::getAllOnes(BitWidth), DL, VecVT);
- SignMaskV = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, SignMaskV);
- SignMaskV = DAG.getNode(ISD::FNEG, DL, MVT::v2f64, SignMaskV);
- SignMaskV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, SignMaskV);
- }
- SDValue BSP =
- DAG.getNode(AArch64ISD::BSP, DL, VecVT, SignMaskV, VecVal1, VecVal2);
- if (VT == MVT::f16)
- return DAG.getTargetExtractSubreg(AArch64::hsub, DL, VT, BSP);
- if (VT == MVT::f32)
- return DAG.getTargetExtractSubreg(AArch64::ssub, DL, VT, BSP);
- if (VT == MVT::f64)
- return DAG.getTargetExtractSubreg(AArch64::dsub, DL, VT, BSP);
- return BitCast(VT, BSP, DAG);
- }
- SDValue AArch64TargetLowering::LowerCTPOP_PARITY(SDValue Op,
- SelectionDAG &DAG) const {
- if (DAG.getMachineFunction().getFunction().hasFnAttribute(
- Attribute::NoImplicitFloat))
- return SDValue();
- if (!Subtarget->hasNEON())
- return SDValue();
- bool IsParity = Op.getOpcode() == ISD::PARITY;
- SDValue Val = Op.getOperand(0);
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- // for i32, general parity function using EORs is more efficient compared to
- // using floating point
- if (VT == MVT::i32 && IsParity)
- return SDValue();
- // If there is no CNT instruction available, GPR popcount can
- // be more efficiently lowered to the following sequence that uses
- // AdvSIMD registers/instructions as long as the copies to/from
- // the AdvSIMD registers are cheap.
- // FMOV D0, X0 // copy 64-bit int to vector, high bits zero'd
- // CNT V0.8B, V0.8B // 8xbyte pop-counts
- // ADDV B0, V0.8B // sum 8xbyte pop-counts
- // UMOV X0, V0.B[0] // copy byte result back to integer reg
- if (VT == MVT::i32 || VT == MVT::i64) {
- if (VT == MVT::i32)
- Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
- Val = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val);
- SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val);
- SDValue UaddLV = DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
- DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
- if (IsParity)
- UaddLV = DAG.getNode(ISD::AND, DL, MVT::i32, UaddLV,
- DAG.getConstant(1, DL, MVT::i32));
- if (VT == MVT::i64)
- UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV);
- return UaddLV;
- } else if (VT == MVT::i128) {
- Val = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Val);
- SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v16i8, Val);
- SDValue UaddLV = DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
- DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
- if (IsParity)
- UaddLV = DAG.getNode(ISD::AND, DL, MVT::i32, UaddLV,
- DAG.getConstant(1, DL, MVT::i32));
- return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i128, UaddLV);
- }
- assert(!IsParity && "ISD::PARITY of vector types not supported");
- if (VT.isScalableVector() ||
- useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTPOP_MERGE_PASSTHRU);
- assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
- VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&
- "Unexpected type for custom ctpop lowering");
- EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
- Val = DAG.getBitcast(VT8Bit, Val);
- Val = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Val);
- // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
- unsigned EltSize = 8;
- unsigned NumElts = VT.is64BitVector() ? 8 : 16;
- while (EltSize != VT.getScalarSizeInBits()) {
- EltSize *= 2;
- NumElts /= 2;
- MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts);
- Val = DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, DL, WidenVT,
- DAG.getConstant(Intrinsic::aarch64_neon_uaddlp, DL, MVT::i32), Val);
- }
- return Val;
- }
- SDValue AArch64TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- assert(VT.isScalableVector() ||
- useSVEForFixedLengthVectorVT(
- VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()));
- SDLoc DL(Op);
- SDValue RBIT = DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(0));
- return DAG.getNode(ISD::CTLZ, DL, VT, RBIT);
- }
- SDValue AArch64TargetLowering::LowerMinMax(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- SDLoc DL(Op);
- unsigned Opcode = Op.getOpcode();
- ISD::CondCode CC;
- switch (Opcode) {
- default:
- llvm_unreachable("Wrong instruction");
- case ISD::SMAX:
- CC = ISD::SETGT;
- break;
- case ISD::SMIN:
- CC = ISD::SETLT;
- break;
- case ISD::UMAX:
- CC = ISD::SETUGT;
- break;
- case ISD::UMIN:
- CC = ISD::SETULT;
- break;
- }
- if (VT.isScalableVector() ||
- useSVEForFixedLengthVectorVT(
- VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) {
- switch (Opcode) {
- default:
- llvm_unreachable("Wrong instruction");
- case ISD::SMAX:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMAX_PRED);
- case ISD::SMIN:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMIN_PRED);
- case ISD::UMAX:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMAX_PRED);
- case ISD::UMIN:
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMIN_PRED);
- }
- }
- SDValue Op0 = Op.getOperand(0);
- SDValue Op1 = Op.getOperand(1);
- SDValue Cond = DAG.getSetCC(DL, VT, Op0, Op1, CC);
- return DAG.getSelect(DL, VT, Cond, Op0, Op1);
- }
- SDValue AArch64TargetLowering::LowerBitreverse(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- if (VT.isScalableVector() ||
- useSVEForFixedLengthVectorVT(
- VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::BITREVERSE_MERGE_PASSTHRU);
- SDLoc DL(Op);
- SDValue REVB;
- MVT VST;
- switch (VT.getSimpleVT().SimpleTy) {
- default:
- llvm_unreachable("Invalid type for bitreverse!");
- case MVT::v2i32: {
- VST = MVT::v8i8;
- REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0));
- break;
- }
- case MVT::v4i32: {
- VST = MVT::v16i8;
- REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0));
- break;
- }
- case MVT::v1i64: {
- VST = MVT::v8i8;
- REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0));
- break;
- }
- case MVT::v2i64: {
- VST = MVT::v16i8;
- REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0));
- break;
- }
- }
- return DAG.getNode(AArch64ISD::NVCAST, DL, VT,
- DAG.getNode(ISD::BITREVERSE, DL, VST, REVB));
- }
- // Check whether the continuous comparison sequence.
- static bool
- isOrXorChain(SDValue N, unsigned &Num,
- SmallVector<std::pair<SDValue, SDValue>, 16> &WorkList) {
- if (Num == MaxXors)
- return false;
- // Skip the one-use zext
- if (N->getOpcode() == ISD::ZERO_EXTEND && N->hasOneUse())
- N = N->getOperand(0);
- // The leaf node must be XOR
- if (N->getOpcode() == ISD::XOR) {
- WorkList.push_back(std::make_pair(N->getOperand(0), N->getOperand(1)));
- Num++;
- return true;
- }
- // All the non-leaf nodes must be OR.
- if (N->getOpcode() != ISD::OR || !N->hasOneUse())
- return false;
- if (isOrXorChain(N->getOperand(0), Num, WorkList) &&
- isOrXorChain(N->getOperand(1), Num, WorkList))
- return true;
- return false;
- }
- // Transform chains of ORs and XORs, which usually outlined by memcmp/bmp.
- static SDValue performOrXorChainCombine(SDNode *N, SelectionDAG &DAG) {
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- SmallVector<std::pair<SDValue, SDValue>, 16> WorkList;
- // Only handle integer compares.
- if (N->getOpcode() != ISD::SETCC)
- return SDValue();
- ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
- // Try to express conjunction "cmp 0 (or (xor A0 A1) (xor B0 B1))" as:
- // sub A0, A1; ccmp B0, B1, 0, eq; cmp inv(Cond) flag
- unsigned NumXors = 0;
- if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && isNullConstant(RHS) &&
- LHS->getOpcode() == ISD::OR && LHS->hasOneUse() &&
- isOrXorChain(LHS, NumXors, WorkList)) {
- SDValue XOR0, XOR1;
- std::tie(XOR0, XOR1) = WorkList[0];
- unsigned LogicOp = (Cond == ISD::SETEQ) ? ISD::AND : ISD::OR;
- SDValue Cmp = DAG.getSetCC(DL, VT, XOR0, XOR1, Cond);
- for (unsigned I = 1; I < WorkList.size(); I++) {
- std::tie(XOR0, XOR1) = WorkList[I];
- SDValue CmpChain = DAG.getSetCC(DL, VT, XOR0, XOR1, Cond);
- Cmp = DAG.getNode(LogicOp, DL, VT, Cmp, CmpChain);
- }
- // Exit early by inverting the condition, which help reduce indentations.
- return Cmp;
- }
- return SDValue();
- }
- SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
- if (Op.getValueType().isVector())
- return LowerVSETCC(Op, DAG);
- bool IsStrict = Op->isStrictFPOpcode();
- bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
- unsigned OpNo = IsStrict ? 1 : 0;
- SDValue Chain;
- if (IsStrict)
- Chain = Op.getOperand(0);
- SDValue LHS = Op.getOperand(OpNo + 0);
- SDValue RHS = Op.getOperand(OpNo + 1);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(OpNo + 2))->get();
- SDLoc dl(Op);
- // We chose ZeroOrOneBooleanContents, so use zero and one.
- EVT VT = Op.getValueType();
- SDValue TVal = DAG.getConstant(1, dl, VT);
- SDValue FVal = DAG.getConstant(0, dl, VT);
- // Handle f128 first, since one possible outcome is a normal integer
- // comparison which gets picked up by the next if statement.
- if (LHS.getValueType() == MVT::f128) {
- softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS, Chain,
- IsSignaling);
- // If softenSetCCOperands returned a scalar, use it.
- if (!RHS.getNode()) {
- assert(LHS.getValueType() == Op.getValueType() &&
- "Unexpected setcc expansion!");
- return IsStrict ? DAG.getMergeValues({LHS, Chain}, dl) : LHS;
- }
- }
- if (LHS.getValueType().isInteger()) {
- SDValue CCVal;
- SDValue Cmp = getAArch64Cmp(
- LHS, RHS, ISD::getSetCCInverse(CC, LHS.getValueType()), CCVal, DAG, dl);
- // Note that we inverted the condition above, so we reverse the order of
- // the true and false operands here. This will allow the setcc to be
- // matched to a single CSINC instruction.
- SDValue Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp);
- return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
- }
- // Now we know we're dealing with FP values.
- assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||
- LHS.getValueType() == MVT::f64);
- // If that fails, we'll need to perform an FCMP + CSEL sequence. Go ahead
- // and do the comparison.
- SDValue Cmp;
- if (IsStrict)
- Cmp = emitStrictFPComparison(LHS, RHS, dl, DAG, Chain, IsSignaling);
- else
- Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
- AArch64CC::CondCode CC1, CC2;
- changeFPCCToAArch64CC(CC, CC1, CC2);
- SDValue Res;
- if (CC2 == AArch64CC::AL) {
- changeFPCCToAArch64CC(ISD::getSetCCInverse(CC, LHS.getValueType()), CC1,
- CC2);
- SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
- // Note that we inverted the condition above, so we reverse the order of
- // the true and false operands here. This will allow the setcc to be
- // matched to a single CSINC instruction.
- Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CC1Val, Cmp);
- } else {
- // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't
- // totally clean. Some of them require two CSELs to implement. As is in
- // this case, we emit the first CSEL and then emit a second using the output
- // of the first as the RHS. We're effectively OR'ing the two CC's together.
- // FIXME: It would be nice if we could match the two CSELs to two CSINCs.
- SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
- SDValue CS1 =
- DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
- SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
- Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
- }
- return IsStrict ? DAG.getMergeValues({Res, Cmp.getValue(1)}, dl) : Res;
- }
- SDValue AArch64TargetLowering::LowerSETCCCARRY(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- EVT VT = LHS.getValueType();
- if (VT != MVT::i32 && VT != MVT::i64)
- return SDValue();
- SDLoc DL(Op);
- SDValue Carry = Op.getOperand(2);
- // SBCS uses a carry not a borrow so the carry flag should be inverted first.
- SDValue InvCarry = valueToCarryFlag(Carry, DAG, true);
- SDValue Cmp = DAG.getNode(AArch64ISD::SBCS, DL, DAG.getVTList(VT, MVT::Glue),
- LHS, RHS, InvCarry);
- EVT OpVT = Op.getValueType();
- SDValue TVal = DAG.getConstant(1, DL, OpVT);
- SDValue FVal = DAG.getConstant(0, DL, OpVT);
- ISD::CondCode Cond = cast<CondCodeSDNode>(Op.getOperand(3))->get();
- ISD::CondCode CondInv = ISD::getSetCCInverse(Cond, VT);
- SDValue CCVal =
- DAG.getConstant(changeIntCCToAArch64CC(CondInv), DL, MVT::i32);
- // Inputs are swapped because the condition is inverted. This will allow
- // matching with a single CSINC instruction.
- return DAG.getNode(AArch64ISD::CSEL, DL, OpVT, FVal, TVal, CCVal,
- Cmp.getValue(1));
- }
- SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
- SDValue RHS, SDValue TVal,
- SDValue FVal, const SDLoc &dl,
- SelectionDAG &DAG) const {
- // Handle f128 first, because it will result in a comparison of some RTLIB
- // call result against zero.
- if (LHS.getValueType() == MVT::f128) {
- softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS);
- // If softenSetCCOperands returned a scalar, we need to compare the result
- // against zero to select between true and false values.
- if (!RHS.getNode()) {
- RHS = DAG.getConstant(0, dl, LHS.getValueType());
- CC = ISD::SETNE;
- }
- }
- // Also handle f16, for which we need to do a f32 comparison.
- if (LHS.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
- LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS);
- RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS);
- }
- // Next, handle integers.
- if (LHS.getValueType().isInteger()) {
- assert((LHS.getValueType() == RHS.getValueType()) &&
- (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64));
- ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
- ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
- ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
- // Check for sign pattern (SELECT_CC setgt, iN lhs, -1, 1, -1) and transform
- // into (OR (ASR lhs, N-1), 1), which requires less instructions for the
- // supported types.
- if (CC == ISD::SETGT && RHSC && RHSC->isAllOnes() && CTVal && CFVal &&
- CTVal->isOne() && CFVal->isAllOnes() &&
- LHS.getValueType() == TVal.getValueType()) {
- EVT VT = LHS.getValueType();
- SDValue Shift =
- DAG.getNode(ISD::SRA, dl, VT, LHS,
- DAG.getConstant(VT.getSizeInBits() - 1, dl, VT));
- return DAG.getNode(ISD::OR, dl, VT, Shift, DAG.getConstant(1, dl, VT));
- }
- unsigned Opcode = AArch64ISD::CSEL;
- // If both the TVal and the FVal are constants, see if we can swap them in
- // order to for a CSINV or CSINC out of them.
- if (CTVal && CFVal && CTVal->isAllOnes() && CFVal->isZero()) {
- std::swap(TVal, FVal);
- std::swap(CTVal, CFVal);
- CC = ISD::getSetCCInverse(CC, LHS.getValueType());
- } else if (CTVal && CFVal && CTVal->isOne() && CFVal->isZero()) {
- std::swap(TVal, FVal);
- std::swap(CTVal, CFVal);
- CC = ISD::getSetCCInverse(CC, LHS.getValueType());
- } else if (TVal.getOpcode() == ISD::XOR) {
- // If TVal is a NOT we want to swap TVal and FVal so that we can match
- // with a CSINV rather than a CSEL.
- if (isAllOnesConstant(TVal.getOperand(1))) {
- std::swap(TVal, FVal);
- std::swap(CTVal, CFVal);
- CC = ISD::getSetCCInverse(CC, LHS.getValueType());
- }
- } else if (TVal.getOpcode() == ISD::SUB) {
- // If TVal is a negation (SUB from 0) we want to swap TVal and FVal so
- // that we can match with a CSNEG rather than a CSEL.
- if (isNullConstant(TVal.getOperand(0))) {
- std::swap(TVal, FVal);
- std::swap(CTVal, CFVal);
- CC = ISD::getSetCCInverse(CC, LHS.getValueType());
- }
- } else if (CTVal && CFVal) {
- const int64_t TrueVal = CTVal->getSExtValue();
- const int64_t FalseVal = CFVal->getSExtValue();
- bool Swap = false;
- // If both TVal and FVal are constants, see if FVal is the
- // inverse/negation/increment of TVal and generate a CSINV/CSNEG/CSINC
- // instead of a CSEL in that case.
- if (TrueVal == ~FalseVal) {
- Opcode = AArch64ISD::CSINV;
- } else if (FalseVal > std::numeric_limits<int64_t>::min() &&
- TrueVal == -FalseVal) {
- Opcode = AArch64ISD::CSNEG;
- } else if (TVal.getValueType() == MVT::i32) {
- // If our operands are only 32-bit wide, make sure we use 32-bit
- // arithmetic for the check whether we can use CSINC. This ensures that
- // the addition in the check will wrap around properly in case there is
- // an overflow (which would not be the case if we do the check with
- // 64-bit arithmetic).
- const uint32_t TrueVal32 = CTVal->getZExtValue();
- const uint32_t FalseVal32 = CFVal->getZExtValue();
- if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) {
- Opcode = AArch64ISD::CSINC;
- if (TrueVal32 > FalseVal32) {
- Swap = true;
- }
- }
- } else {
- // 64-bit check whether we can use CSINC.
- const uint64_t TrueVal64 = TrueVal;
- const uint64_t FalseVal64 = FalseVal;
- if ((TrueVal64 == FalseVal64 + 1) || (TrueVal64 + 1 == FalseVal64)) {
- Opcode = AArch64ISD::CSINC;
- if (TrueVal > FalseVal) {
- Swap = true;
- }
- }
- }
- // Swap TVal and FVal if necessary.
- if (Swap) {
- std::swap(TVal, FVal);
- std::swap(CTVal, CFVal);
- CC = ISD::getSetCCInverse(CC, LHS.getValueType());
- }
- if (Opcode != AArch64ISD::CSEL) {
- // Drop FVal since we can get its value by simply inverting/negating
- // TVal.
- FVal = TVal;
- }
- }
- // Avoid materializing a constant when possible by reusing a known value in
- // a register. However, don't perform this optimization if the known value
- // is one, zero or negative one in the case of a CSEL. We can always
- // materialize these values using CSINC, CSEL and CSINV with wzr/xzr as the
- // FVal, respectively.
- ConstantSDNode *RHSVal = dyn_cast<ConstantSDNode>(RHS);
- if (Opcode == AArch64ISD::CSEL && RHSVal && !RHSVal->isOne() &&
- !RHSVal->isZero() && !RHSVal->isAllOnes()) {
- AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
- // Transform "a == C ? C : x" to "a == C ? a : x" and "a != C ? x : C" to
- // "a != C ? x : a" to avoid materializing C.
- if (CTVal && CTVal == RHSVal && AArch64CC == AArch64CC::EQ)
- TVal = LHS;
- else if (CFVal && CFVal == RHSVal && AArch64CC == AArch64CC::NE)
- FVal = LHS;
- } else if (Opcode == AArch64ISD::CSNEG && RHSVal && RHSVal->isOne()) {
- assert (CTVal && CFVal && "Expected constant operands for CSNEG.");
- // Use a CSINV to transform "a == C ? 1 : -1" to "a == C ? a : -1" to
- // avoid materializing C.
- AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
- if (CTVal == RHSVal && AArch64CC == AArch64CC::EQ) {
- Opcode = AArch64ISD::CSINV;
- TVal = LHS;
- FVal = DAG.getConstant(0, dl, FVal.getValueType());
- }
- }
- SDValue CCVal;
- SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
- EVT VT = TVal.getValueType();
- return DAG.getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp);
- }
- // Now we know we're dealing with FP values.
- assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||
- LHS.getValueType() == MVT::f64);
- assert(LHS.getValueType() == RHS.getValueType());
- EVT VT = TVal.getValueType();
- SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
- // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
- // clean. Some of them require two CSELs to implement.
- AArch64CC::CondCode CC1, CC2;
- changeFPCCToAArch64CC(CC, CC1, CC2);
- if (DAG.getTarget().Options.UnsafeFPMath) {
- // Transform "a == 0.0 ? 0.0 : x" to "a == 0.0 ? a : x" and
- // "a != 0.0 ? x : 0.0" to "a != 0.0 ? x : a" to avoid materializing 0.0.
- ConstantFPSDNode *RHSVal = dyn_cast<ConstantFPSDNode>(RHS);
- if (RHSVal && RHSVal->isZero()) {
- ConstantFPSDNode *CFVal = dyn_cast<ConstantFPSDNode>(FVal);
- ConstantFPSDNode *CTVal = dyn_cast<ConstantFPSDNode>(TVal);
- if ((CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETUEQ) &&
- CTVal && CTVal->isZero() && TVal.getValueType() == LHS.getValueType())
- TVal = LHS;
- else if ((CC == ISD::SETNE || CC == ISD::SETONE || CC == ISD::SETUNE) &&
- CFVal && CFVal->isZero() &&
- FVal.getValueType() == LHS.getValueType())
- FVal = LHS;
- }
- }
- // Emit first, and possibly only, CSEL.
- SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
- SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
- // If we need a second CSEL, emit it, using the output of the first as the
- // RHS. We're effectively OR'ing the two CC's together.
- if (CC2 != AArch64CC::AL) {
- SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
- return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
- }
- // Otherwise, return the output of the first CSEL.
- return CS1;
- }
- SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op,
- SelectionDAG &DAG) const {
- EVT Ty = Op.getValueType();
- auto Idx = Op.getConstantOperandAPInt(2);
- int64_t IdxVal = Idx.getSExtValue();
- assert(Ty.isScalableVector() &&
- "Only expect scalable vectors for custom lowering of VECTOR_SPLICE");
- // We can use the splice instruction for certain index values where we are
- // able to efficiently generate the correct predicate. The index will be
- // inverted and used directly as the input to the ptrue instruction, i.e.
- // -1 -> vl1, -2 -> vl2, etc. The predicate will then be reversed to get the
- // splice predicate. However, we can only do this if we can guarantee that
- // there are enough elements in the vector, hence we check the index <= min
- // number of elements.
- std::optional<unsigned> PredPattern;
- if (Ty.isScalableVector() && IdxVal < 0 &&
- (PredPattern = getSVEPredPatternFromNumElements(std::abs(IdxVal))) !=
- std::nullopt) {
- SDLoc DL(Op);
- // Create a predicate where all but the last -IdxVal elements are false.
- EVT PredVT = Ty.changeVectorElementType(MVT::i1);
- SDValue Pred = getPTrue(DAG, DL, PredVT, *PredPattern);
- Pred = DAG.getNode(ISD::VECTOR_REVERSE, DL, PredVT, Pred);
- // Now splice the two inputs together using the predicate.
- return DAG.getNode(AArch64ISD::SPLICE, DL, Ty, Pred, Op.getOperand(0),
- Op.getOperand(1));
- }
- // This will select to an EXT instruction, which has a maximum immediate
- // value of 255, hence 2048-bits is the maximum value we can lower.
- if (IdxVal >= 0 &&
- IdxVal < int64_t(2048 / Ty.getVectorElementType().getSizeInBits()))
- return Op;
- return SDValue();
- }
- SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op,
- SelectionDAG &DAG) const {
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- SDValue TVal = Op.getOperand(2);
- SDValue FVal = Op.getOperand(3);
- SDLoc DL(Op);
- return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
- }
- SDValue AArch64TargetLowering::LowerSELECT(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue CCVal = Op->getOperand(0);
- SDValue TVal = Op->getOperand(1);
- SDValue FVal = Op->getOperand(2);
- SDLoc DL(Op);
- EVT Ty = Op.getValueType();
- if (Ty.isScalableVector()) {
- SDValue TruncCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, CCVal);
- MVT PredVT = MVT::getVectorVT(MVT::i1, Ty.getVectorElementCount());
- SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, TruncCC);
- return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal);
- }
- if (useSVEForFixedLengthVectorVT(Ty)) {
- // FIXME: Ideally this would be the same as above using i1 types, however
- // for the moment we can't deal with fixed i1 vector types properly, so
- // instead extend the predicate to a result type sized integer vector.
- MVT SplatValVT = MVT::getIntegerVT(Ty.getScalarSizeInBits());
- MVT PredVT = MVT::getVectorVT(SplatValVT, Ty.getVectorElementCount());
- SDValue SplatVal = DAG.getSExtOrTrunc(CCVal, DL, SplatValVT);
- SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, SplatVal);
- return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal);
- }
- // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a select
- // instruction.
- if (ISD::isOverflowIntrOpRes(CCVal)) {
- // Only lower legal XALUO ops.
- if (!DAG.getTargetLoweringInfo().isTypeLegal(CCVal->getValueType(0)))
- return SDValue();
- AArch64CC::CondCode OFCC;
- SDValue Value, Overflow;
- std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CCVal.getValue(0), DAG);
- SDValue CCVal = DAG.getConstant(OFCC, DL, MVT::i32);
- return DAG.getNode(AArch64ISD::CSEL, DL, Op.getValueType(), TVal, FVal,
- CCVal, Overflow);
- }
- // Lower it the same way as we would lower a SELECT_CC node.
- ISD::CondCode CC;
- SDValue LHS, RHS;
- if (CCVal.getOpcode() == ISD::SETCC) {
- LHS = CCVal.getOperand(0);
- RHS = CCVal.getOperand(1);
- CC = cast<CondCodeSDNode>(CCVal.getOperand(2))->get();
- } else {
- LHS = CCVal;
- RHS = DAG.getConstant(0, DL, CCVal.getValueType());
- CC = ISD::SETNE;
- }
- // If we are lowering a f16 and we do not have fullf16, convert to a f32 in
- // order to use FCSELSrrr
- if ((Ty == MVT::f16 || Ty == MVT::bf16) && !Subtarget->hasFullFP16()) {
- TVal = SDValue(
- DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32,
- DAG.getUNDEF(MVT::f32), TVal,
- DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
- 0);
- FVal = SDValue(
- DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32,
- DAG.getUNDEF(MVT::f32), FVal,
- DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
- 0);
- }
- SDValue Res = LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
- if ((Ty == MVT::f16 || Ty == MVT::bf16) && !Subtarget->hasFullFP16()) {
- Res = SDValue(
- DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, Ty, Res,
- DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
- 0);
- }
- return Res;
- }
- SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op,
- SelectionDAG &DAG) const {
- // Jump table entries as PC relative offsets. No additional tweaking
- // is necessary here. Just get the address of the jump table.
- JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
- if (getTargetMachine().getCodeModel() == CodeModel::Large &&
- !Subtarget->isTargetMachO()) {
- return getAddrLarge(JT, DAG);
- } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
- return getAddrTiny(JT, DAG);
- }
- return getAddr(JT, DAG);
- }
- SDValue AArch64TargetLowering::LowerBR_JT(SDValue Op,
- SelectionDAG &DAG) const {
- // Jump table entries as PC relative offsets. No additional tweaking
- // is necessary here. Just get the address of the jump table.
- SDLoc DL(Op);
- SDValue JT = Op.getOperand(1);
- SDValue Entry = Op.getOperand(2);
- int JTI = cast<JumpTableSDNode>(JT.getNode())->getIndex();
- auto *AFI = DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
- AFI->setJumpTableEntryInfo(JTI, 4, nullptr);
- SDNode *Dest =
- DAG.getMachineNode(AArch64::JumpTableDest32, DL, MVT::i64, MVT::i64, JT,
- Entry, DAG.getTargetJumpTable(JTI, MVT::i32));
- return DAG.getNode(ISD::BRIND, DL, MVT::Other, Op.getOperand(0),
- SDValue(Dest, 0));
- }
- SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op,
- SelectionDAG &DAG) const {
- ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
- if (getTargetMachine().getCodeModel() == CodeModel::Large) {
- // Use the GOT for the large code model on iOS.
- if (Subtarget->isTargetMachO()) {
- return getGOT(CP, DAG);
- }
- return getAddrLarge(CP, DAG);
- } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
- return getAddrTiny(CP, DAG);
- } else {
- return getAddr(CP, DAG);
- }
- }
- SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op,
- SelectionDAG &DAG) const {
- BlockAddressSDNode *BA = cast<BlockAddressSDNode>(Op);
- if (getTargetMachine().getCodeModel() == CodeModel::Large &&
- !Subtarget->isTargetMachO()) {
- return getAddrLarge(BA, DAG);
- } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
- return getAddrTiny(BA, DAG);
- }
- return getAddr(BA, DAG);
- }
- SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
- SelectionDAG &DAG) const {
- AArch64FunctionInfo *FuncInfo =
- DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
- SDLoc DL(Op);
- SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(),
- getPointerTy(DAG.getDataLayout()));
- FR = DAG.getZExtOrTrunc(FR, DL, getPointerMemTy(DAG.getDataLayout()));
- const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
- MachinePointerInfo(SV));
- }
- SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op,
- SelectionDAG &DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
- AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- SDLoc DL(Op);
- SDValue FR;
- if (Subtarget->isWindowsArm64EC()) {
- // With the Arm64EC ABI, we compute the address of the varargs save area
- // relative to x4. For a normal AArch64->AArch64 call, x4 == sp on entry,
- // but calls from an entry thunk can pass in a different address.
- Register VReg = MF.addLiveIn(AArch64::X4, &AArch64::GPR64RegClass);
- SDValue Val = DAG.getCopyFromReg(DAG.getEntryNode(), DL, VReg, MVT::i64);
- uint64_t StackOffset;
- if (FuncInfo->getVarArgsGPRSize() > 0)
- StackOffset = -(uint64_t)FuncInfo->getVarArgsGPRSize();
- else
- StackOffset = FuncInfo->getVarArgsStackOffset();
- FR = DAG.getNode(ISD::ADD, DL, MVT::i64, Val,
- DAG.getConstant(StackOffset, DL, MVT::i64));
- } else {
- FR = DAG.getFrameIndex(FuncInfo->getVarArgsGPRSize() > 0
- ? FuncInfo->getVarArgsGPRIndex()
- : FuncInfo->getVarArgsStackIndex(),
- getPointerTy(DAG.getDataLayout()));
- }
- const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
- MachinePointerInfo(SV));
- }
- SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
- SelectionDAG &DAG) const {
- // The layout of the va_list struct is specified in the AArch64 Procedure Call
- // Standard, section B.3.
- MachineFunction &MF = DAG.getMachineFunction();
- AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8;
- auto PtrMemVT = getPointerMemTy(DAG.getDataLayout());
- auto PtrVT = getPointerTy(DAG.getDataLayout());
- SDLoc DL(Op);
- SDValue Chain = Op.getOperand(0);
- SDValue VAList = Op.getOperand(1);
- const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- SmallVector<SDValue, 4> MemOps;
- // void *__stack at offset 0
- unsigned Offset = 0;
- SDValue Stack = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), PtrVT);
- Stack = DAG.getZExtOrTrunc(Stack, DL, PtrMemVT);
- MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
- MachinePointerInfo(SV), Align(PtrSize)));
- // void *__gr_top at offset 8 (4 on ILP32)
- Offset += PtrSize;
- int GPRSize = FuncInfo->getVarArgsGPRSize();
- if (GPRSize > 0) {
- SDValue GRTop, GRTopAddr;
- GRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
- DAG.getConstant(Offset, DL, PtrVT));
- GRTop = DAG.getFrameIndex(FuncInfo->getVarArgsGPRIndex(), PtrVT);
- GRTop = DAG.getNode(ISD::ADD, DL, PtrVT, GRTop,
- DAG.getConstant(GPRSize, DL, PtrVT));
- GRTop = DAG.getZExtOrTrunc(GRTop, DL, PtrMemVT);
- MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
- MachinePointerInfo(SV, Offset),
- Align(PtrSize)));
- }
- // void *__vr_top at offset 16 (8 on ILP32)
- Offset += PtrSize;
- int FPRSize = FuncInfo->getVarArgsFPRSize();
- if (FPRSize > 0) {
- SDValue VRTop, VRTopAddr;
- VRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
- DAG.getConstant(Offset, DL, PtrVT));
- VRTop = DAG.getFrameIndex(FuncInfo->getVarArgsFPRIndex(), PtrVT);
- VRTop = DAG.getNode(ISD::ADD, DL, PtrVT, VRTop,
- DAG.getConstant(FPRSize, DL, PtrVT));
- VRTop = DAG.getZExtOrTrunc(VRTop, DL, PtrMemVT);
- MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
- MachinePointerInfo(SV, Offset),
- Align(PtrSize)));
- }
- // int __gr_offs at offset 24 (12 on ILP32)
- Offset += PtrSize;
- SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
- DAG.getConstant(Offset, DL, PtrVT));
- MemOps.push_back(
- DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, DL, MVT::i32),
- GROffsAddr, MachinePointerInfo(SV, Offset), Align(4)));
- // int __vr_offs at offset 28 (16 on ILP32)
- Offset += 4;
- SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
- DAG.getConstant(Offset, DL, PtrVT));
- MemOps.push_back(
- DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, DL, MVT::i32),
- VROffsAddr, MachinePointerInfo(SV, Offset), Align(4)));
- return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
- }
- SDValue AArch64TargetLowering::LowerVASTART(SDValue Op,
- SelectionDAG &DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
- if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()))
- return LowerWin64_VASTART(Op, DAG);
- else if (Subtarget->isTargetDarwin())
- return LowerDarwin_VASTART(Op, DAG);
- else
- return LowerAAPCS_VASTART(Op, DAG);
- }
- SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op,
- SelectionDAG &DAG) const {
- // AAPCS has three pointers and two ints (= 32 bytes), Darwin has single
- // pointer.
- SDLoc DL(Op);
- unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8;
- unsigned VaListSize =
- (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
- ? PtrSize
- : Subtarget->isTargetILP32() ? 20 : 32;
- const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
- const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
- return DAG.getMemcpy(Op.getOperand(0), DL, Op.getOperand(1), Op.getOperand(2),
- DAG.getConstant(VaListSize, DL, MVT::i32),
- Align(PtrSize), false, false, false,
- MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
- }
- SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
- assert(Subtarget->isTargetDarwin() &&
- "automatic va_arg instruction only works on Darwin");
- const Value *V = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- EVT VT = Op.getValueType();
- SDLoc DL(Op);
- SDValue Chain = Op.getOperand(0);
- SDValue Addr = Op.getOperand(1);
- MaybeAlign Align(Op.getConstantOperandVal(3));
- unsigned MinSlotSize = Subtarget->isTargetILP32() ? 4 : 8;
- auto PtrVT = getPointerTy(DAG.getDataLayout());
- auto PtrMemVT = getPointerMemTy(DAG.getDataLayout());
- SDValue VAList =
- DAG.getLoad(PtrMemVT, DL, Chain, Addr, MachinePointerInfo(V));
- Chain = VAList.getValue(1);
- VAList = DAG.getZExtOrTrunc(VAList, DL, PtrVT);
- if (VT.isScalableVector())
- report_fatal_error("Passing SVE types to variadic functions is "
- "currently not supported");
- if (Align && *Align > MinSlotSize) {
- VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
- DAG.getConstant(Align->value() - 1, DL, PtrVT));
- VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList,
- DAG.getConstant(-(int64_t)Align->value(), DL, PtrVT));
- }
- Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
- unsigned ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
- // Scalar integer and FP values smaller than 64 bits are implicitly extended
- // up to 64 bits. At the very least, we have to increase the striding of the
- // vaargs list to match this, and for FP values we need to introduce
- // FP_ROUND nodes as well.
- if (VT.isInteger() && !VT.isVector())
- ArgSize = std::max(ArgSize, MinSlotSize);
- bool NeedFPTrunc = false;
- if (VT.isFloatingPoint() && !VT.isVector() && VT != MVT::f64) {
- ArgSize = 8;
- NeedFPTrunc = true;
- }
- // Increment the pointer, VAList, to the next vaarg
- SDValue VANext = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
- DAG.getConstant(ArgSize, DL, PtrVT));
- VANext = DAG.getZExtOrTrunc(VANext, DL, PtrMemVT);
- // Store the incremented VAList to the legalized pointer
- SDValue APStore =
- DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V));
- // Load the actual argument out of the pointer VAList
- if (NeedFPTrunc) {
- // Load the value as an f64.
- SDValue WideFP =
- DAG.getLoad(MVT::f64, DL, APStore, VAList, MachinePointerInfo());
- // Round the value down to an f32.
- SDValue NarrowFP =
- DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0),
- DAG.getIntPtrConstant(1, DL, /*isTarget=*/true));
- SDValue Ops[] = { NarrowFP, WideFP.getValue(1) };
- // Merge the rounded value with the chain output of the load.
- return DAG.getMergeValues(Ops, DL);
- }
- return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo());
- }
- SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op,
- SelectionDAG &DAG) const {
- MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
- MFI.setFrameAddressIsTaken(true);
- EVT VT = Op.getValueType();
- SDLoc DL(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- SDValue FrameAddr =
- DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, MVT::i64);
- while (Depth--)
- FrameAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), FrameAddr,
- MachinePointerInfo());
- if (Subtarget->isTargetILP32())
- FrameAddr = DAG.getNode(ISD::AssertZext, DL, MVT::i64, FrameAddr,
- DAG.getValueType(VT));
- return FrameAddr;
- }
- SDValue AArch64TargetLowering::LowerSPONENTRY(SDValue Op,
- SelectionDAG &DAG) const {
- MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
- EVT VT = getPointerTy(DAG.getDataLayout());
- SDLoc DL(Op);
- int FI = MFI.CreateFixedObject(4, 0, false);
- return DAG.getFrameIndex(FI, VT);
- }
- #define GET_REGISTER_MATCHER
- #include "AArch64GenAsmMatcher.inc"
- // FIXME? Maybe this could be a TableGen attribute on some registers and
- // this table could be generated automatically from RegInfo.
- Register AArch64TargetLowering::
- getRegisterByName(const char* RegName, LLT VT, const MachineFunction &MF) const {
- Register Reg = MatchRegisterName(RegName);
- if (AArch64::X1 <= Reg && Reg <= AArch64::X28) {
- const MCRegisterInfo *MRI = Subtarget->getRegisterInfo();
- unsigned DwarfRegNum = MRI->getDwarfRegNum(Reg, false);
- if (!Subtarget->isXRegisterReserved(DwarfRegNum))
- Reg = 0;
- }
- if (Reg)
- return Reg;
- report_fatal_error(Twine("Invalid register name \""
- + StringRef(RegName) + "\"."));
- }
- SDValue AArch64TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
- SelectionDAG &DAG) const {
- DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
- EVT VT = Op.getValueType();
- SDLoc DL(Op);
- SDValue FrameAddr =
- DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, VT);
- SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout()));
- return DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset);
- }
- SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
- SelectionDAG &DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo &MFI = MF.getFrameInfo();
- MFI.setReturnAddressIsTaken(true);
- EVT VT = Op.getValueType();
- SDLoc DL(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- SDValue ReturnAddress;
- if (Depth) {
- SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
- SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout()));
- ReturnAddress = DAG.getLoad(
- VT, DL, DAG.getEntryNode(),
- DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), MachinePointerInfo());
- } else {
- // Return LR, which contains the return address. Mark it an implicit
- // live-in.
- Register Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
- ReturnAddress = DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
- }
- // The XPACLRI instruction assembles to a hint-space instruction before
- // Armv8.3-A therefore this instruction can be safely used for any pre
- // Armv8.3-A architectures. On Armv8.3-A and onwards XPACI is available so use
- // that instead.
- SDNode *St;
- if (Subtarget->hasPAuth()) {
- St = DAG.getMachineNode(AArch64::XPACI, DL, VT, ReturnAddress);
- } else {
- // XPACLRI operates on LR therefore we must move the operand accordingly.
- SDValue Chain =
- DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::LR, ReturnAddress);
- St = DAG.getMachineNode(AArch64::XPACLRI, DL, VT, Chain);
- }
- return SDValue(St, 0);
- }
- /// LowerShiftParts - Lower SHL_PARTS/SRA_PARTS/SRL_PARTS, which returns two
- /// i32 values and take a 2 x i32 value to shift plus a shift amount.
- SDValue AArch64TargetLowering::LowerShiftParts(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue Lo, Hi;
- expandShiftParts(Op.getNode(), Lo, Hi, DAG);
- return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
- }
- bool AArch64TargetLowering::isOffsetFoldingLegal(
- const GlobalAddressSDNode *GA) const {
- // Offsets are folded in the DAG combine rather than here so that we can
- // intelligently choose an offset based on the uses.
- return false;
- }
- bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
- bool OptForSize) const {
- bool IsLegal = false;
- // We can materialize #0.0 as fmov $Rd, XZR for 64-bit, 32-bit cases, and
- // 16-bit case when target has full fp16 support.
- // FIXME: We should be able to handle f128 as well with a clever lowering.
- const APInt ImmInt = Imm.bitcastToAPInt();
- if (VT == MVT::f64)
- IsLegal = AArch64_AM::getFP64Imm(ImmInt) != -1 || Imm.isPosZero();
- else if (VT == MVT::f32)
- IsLegal = AArch64_AM::getFP32Imm(ImmInt) != -1 || Imm.isPosZero();
- else if (VT == MVT::f16 && Subtarget->hasFullFP16())
- IsLegal = AArch64_AM::getFP16Imm(ImmInt) != -1 || Imm.isPosZero();
- // TODO: fmov h0, w0 is also legal, however on't have an isel pattern to
- // generate that fmov.
- // If we can not materialize in immediate field for fmov, check if the
- // value can be encoded as the immediate operand of a logical instruction.
- // The immediate value will be created with either MOVZ, MOVN, or ORR.
- if (!IsLegal && (VT == MVT::f64 || VT == MVT::f32)) {
- // The cost is actually exactly the same for mov+fmov vs. adrp+ldr;
- // however the mov+fmov sequence is always better because of the reduced
- // cache pressure. The timings are still the same if you consider
- // movw+movk+fmov vs. adrp+ldr (it's one instruction longer, but the
- // movw+movk is fused). So we limit up to 2 instrdduction at most.
- SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
- AArch64_IMM::expandMOVImm(ImmInt.getZExtValue(), VT.getSizeInBits(),
- Insn);
- unsigned Limit = (OptForSize ? 1 : (Subtarget->hasFuseLiterals() ? 5 : 2));
- IsLegal = Insn.size() <= Limit;
- }
- LLVM_DEBUG(dbgs() << (IsLegal ? "Legal " : "Illegal ") << VT.getEVTString()
- << " imm value: "; Imm.dump(););
- return IsLegal;
- }
- //===----------------------------------------------------------------------===//
- // AArch64 Optimization Hooks
- //===----------------------------------------------------------------------===//
- static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode,
- SDValue Operand, SelectionDAG &DAG,
- int &ExtraSteps) {
- EVT VT = Operand.getValueType();
- if ((ST->hasNEON() &&
- (VT == MVT::f64 || VT == MVT::v1f64 || VT == MVT::v2f64 ||
- VT == MVT::f32 || VT == MVT::v1f32 || VT == MVT::v2f32 ||
- VT == MVT::v4f32)) ||
- (ST->hasSVE() &&
- (VT == MVT::nxv8f16 || VT == MVT::nxv4f32 || VT == MVT::nxv2f64))) {
- if (ExtraSteps == TargetLoweringBase::ReciprocalEstimate::Unspecified)
- // For the reciprocal estimates, convergence is quadratic, so the number
- // of digits is doubled after each iteration. In ARMv8, the accuracy of
- // the initial estimate is 2^-8. Thus the number of extra steps to refine
- // the result for float (23 mantissa bits) is 2 and for double (52
- // mantissa bits) is 3.
- ExtraSteps = VT.getScalarType() == MVT::f64 ? 3 : 2;
- return DAG.getNode(Opcode, SDLoc(Operand), VT, Operand);
- }
- return SDValue();
- }
- SDValue
- AArch64TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
- const DenormalMode &Mode) const {
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
- SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
- return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ);
- }
- SDValue
- AArch64TargetLowering::getSqrtResultForDenormInput(SDValue Op,
- SelectionDAG &DAG) const {
- return Op;
- }
- SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand,
- SelectionDAG &DAG, int Enabled,
- int &ExtraSteps,
- bool &UseOneConst,
- bool Reciprocal) const {
- if (Enabled == ReciprocalEstimate::Enabled ||
- (Enabled == ReciprocalEstimate::Unspecified && Subtarget->useRSqrt()))
- if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRSQRTE, Operand,
- DAG, ExtraSteps)) {
- SDLoc DL(Operand);
- EVT VT = Operand.getValueType();
- SDNodeFlags Flags;
- Flags.setAllowReassociation(true);
- // Newton reciprocal square root iteration: E * 0.5 * (3 - X * E^2)
- // AArch64 reciprocal square root iteration instruction: 0.5 * (3 - M * N)
- for (int i = ExtraSteps; i > 0; --i) {
- SDValue Step = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Estimate,
- Flags);
- Step = DAG.getNode(AArch64ISD::FRSQRTS, DL, VT, Operand, Step, Flags);
- Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags);
- }
- if (!Reciprocal)
- Estimate = DAG.getNode(ISD::FMUL, DL, VT, Operand, Estimate, Flags);
- ExtraSteps = 0;
- return Estimate;
- }
- return SDValue();
- }
- SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand,
- SelectionDAG &DAG, int Enabled,
- int &ExtraSteps) const {
- if (Enabled == ReciprocalEstimate::Enabled)
- if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRECPE, Operand,
- DAG, ExtraSteps)) {
- SDLoc DL(Operand);
- EVT VT = Operand.getValueType();
- SDNodeFlags Flags;
- Flags.setAllowReassociation(true);
- // Newton reciprocal iteration: E * (2 - X * E)
- // AArch64 reciprocal iteration instruction: (2 - M * N)
- for (int i = ExtraSteps; i > 0; --i) {
- SDValue Step = DAG.getNode(AArch64ISD::FRECPS, DL, VT, Operand,
- Estimate, Flags);
- Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags);
- }
- ExtraSteps = 0;
- return Estimate;
- }
- return SDValue();
- }
- //===----------------------------------------------------------------------===//
- // AArch64 Inline Assembly Support
- //===----------------------------------------------------------------------===//
- // Table of Constraints
- // TODO: This is the current set of constraints supported by ARM for the
- // compiler, not all of them may make sense.
- //
- // r - A general register
- // w - An FP/SIMD register of some size in the range v0-v31
- // x - An FP/SIMD register of some size in the range v0-v15
- // I - Constant that can be used with an ADD instruction
- // J - Constant that can be used with a SUB instruction
- // K - Constant that can be used with a 32-bit logical instruction
- // L - Constant that can be used with a 64-bit logical instruction
- // M - Constant that can be used as a 32-bit MOV immediate
- // N - Constant that can be used as a 64-bit MOV immediate
- // Q - A memory reference with base register and no offset
- // S - A symbolic address
- // Y - Floating point constant zero
- // Z - Integer constant zero
- //
- // Note that general register operands will be output using their 64-bit x
- // register name, whatever the size of the variable, unless the asm operand
- // is prefixed by the %w modifier. Floating-point and SIMD register operands
- // will be output with the v prefix unless prefixed by the %b, %h, %s, %d or
- // %q modifier.
- const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
- // At this point, we have to lower this constraint to something else, so we
- // lower it to an "r" or "w". However, by doing this we will force the result
- // to be in register, while the X constraint is much more permissive.
- //
- // Although we are correct (we are free to emit anything, without
- // constraints), we might break use cases that would expect us to be more
- // efficient and emit something else.
- if (!Subtarget->hasFPARMv8())
- return "r";
- if (ConstraintVT.isFloatingPoint())
- return "w";
- if (ConstraintVT.isVector() &&
- (ConstraintVT.getSizeInBits() == 64 ||
- ConstraintVT.getSizeInBits() == 128))
- return "w";
- return "r";
- }
- enum PredicateConstraint {
- Upl,
- Upa,
- Invalid
- };
- static PredicateConstraint parsePredicateConstraint(StringRef Constraint) {
- PredicateConstraint P = PredicateConstraint::Invalid;
- if (Constraint == "Upa")
- P = PredicateConstraint::Upa;
- if (Constraint == "Upl")
- P = PredicateConstraint::Upl;
- return P;
- }
- /// getConstraintType - Given a constraint letter, return the type of
- /// constraint it is for this target.
- AArch64TargetLowering::ConstraintType
- AArch64TargetLowering::getConstraintType(StringRef Constraint) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- default:
- break;
- case 'x':
- case 'w':
- case 'y':
- return C_RegisterClass;
- // An address with a single base register. Due to the way we
- // currently handle addresses it is the same as 'r'.
- case 'Q':
- return C_Memory;
- case 'I':
- case 'J':
- case 'K':
- case 'L':
- case 'M':
- case 'N':
- case 'Y':
- case 'Z':
- return C_Immediate;
- case 'z':
- case 'S': // A symbolic address
- return C_Other;
- }
- } else if (parsePredicateConstraint(Constraint) !=
- PredicateConstraint::Invalid)
- return C_RegisterClass;
- return TargetLowering::getConstraintType(Constraint);
- }
- /// Examine constraint type and operand type and determine a weight value.
- /// This object must already have been set up with the operand type
- /// and the current alternative constraint selected.
- TargetLowering::ConstraintWeight
- AArch64TargetLowering::getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const {
- ConstraintWeight weight = CW_Invalid;
- Value *CallOperandVal = info.CallOperandVal;
- // If we don't have a value, we can't do a match,
- // but allow it at the lowest weight.
- if (!CallOperandVal)
- return CW_Default;
- Type *type = CallOperandVal->getType();
- // Look at the constraint type.
- switch (*constraint) {
- default:
- weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
- break;
- case 'x':
- case 'w':
- case 'y':
- if (type->isFloatingPointTy() || type->isVectorTy())
- weight = CW_Register;
- break;
- case 'z':
- weight = CW_Constant;
- break;
- case 'U':
- if (parsePredicateConstraint(constraint) != PredicateConstraint::Invalid)
- weight = CW_Register;
- break;
- }
- return weight;
- }
- std::pair<unsigned, const TargetRegisterClass *>
- AArch64TargetLowering::getRegForInlineAsmConstraint(
- const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- case 'r':
- if (VT.isScalableVector())
- return std::make_pair(0U, nullptr);
- if (Subtarget->hasLS64() && VT.getSizeInBits() == 512)
- return std::make_pair(0U, &AArch64::GPR64x8ClassRegClass);
- if (VT.getFixedSizeInBits() == 64)
- return std::make_pair(0U, &AArch64::GPR64commonRegClass);
- return std::make_pair(0U, &AArch64::GPR32commonRegClass);
- case 'w': {
- if (!Subtarget->hasFPARMv8())
- break;
- if (VT.isScalableVector()) {
- if (VT.getVectorElementType() != MVT::i1)
- return std::make_pair(0U, &AArch64::ZPRRegClass);
- return std::make_pair(0U, nullptr);
- }
- uint64_t VTSize = VT.getFixedSizeInBits();
- if (VTSize == 16)
- return std::make_pair(0U, &AArch64::FPR16RegClass);
- if (VTSize == 32)
- return std::make_pair(0U, &AArch64::FPR32RegClass);
- if (VTSize == 64)
- return std::make_pair(0U, &AArch64::FPR64RegClass);
- if (VTSize == 128)
- return std::make_pair(0U, &AArch64::FPR128RegClass);
- break;
- }
- // The instructions that this constraint is designed for can
- // only take 128-bit registers so just use that regclass.
- case 'x':
- if (!Subtarget->hasFPARMv8())
- break;
- if (VT.isScalableVector())
- return std::make_pair(0U, &AArch64::ZPR_4bRegClass);
- if (VT.getSizeInBits() == 128)
- return std::make_pair(0U, &AArch64::FPR128_loRegClass);
- break;
- case 'y':
- if (!Subtarget->hasFPARMv8())
- break;
- if (VT.isScalableVector())
- return std::make_pair(0U, &AArch64::ZPR_3bRegClass);
- break;
- }
- } else {
- PredicateConstraint PC = parsePredicateConstraint(Constraint);
- if (PC != PredicateConstraint::Invalid) {
- if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1)
- return std::make_pair(0U, nullptr);
- bool restricted = (PC == PredicateConstraint::Upl);
- return restricted ? std::make_pair(0U, &AArch64::PPR_3bRegClass)
- : std::make_pair(0U, &AArch64::PPRRegClass);
- }
- }
- if (StringRef("{cc}").equals_insensitive(Constraint))
- return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass);
- // Use the default implementation in TargetLowering to convert the register
- // constraint into a member of a register class.
- std::pair<unsigned, const TargetRegisterClass *> Res;
- Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
- // Not found as a standard register?
- if (!Res.second) {
- unsigned Size = Constraint.size();
- if ((Size == 4 || Size == 5) && Constraint[0] == '{' &&
- tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') {
- int RegNo;
- bool Failed = Constraint.slice(2, Size - 1).getAsInteger(10, RegNo);
- if (!Failed && RegNo >= 0 && RegNo <= 31) {
- // v0 - v31 are aliases of q0 - q31 or d0 - d31 depending on size.
- // By default we'll emit v0-v31 for this unless there's a modifier where
- // we'll emit the correct register as well.
- if (VT != MVT::Other && VT.getSizeInBits() == 64) {
- Res.first = AArch64::FPR64RegClass.getRegister(RegNo);
- Res.second = &AArch64::FPR64RegClass;
- } else {
- Res.first = AArch64::FPR128RegClass.getRegister(RegNo);
- Res.second = &AArch64::FPR128RegClass;
- }
- }
- }
- }
- if (Res.second && !Subtarget->hasFPARMv8() &&
- !AArch64::GPR32allRegClass.hasSubClassEq(Res.second) &&
- !AArch64::GPR64allRegClass.hasSubClassEq(Res.second))
- return std::make_pair(0U, nullptr);
- return Res;
- }
- EVT AArch64TargetLowering::getAsmOperandValueType(const DataLayout &DL,
- llvm::Type *Ty,
- bool AllowUnknown) const {
- if (Subtarget->hasLS64() && Ty->isIntegerTy(512))
- return EVT(MVT::i64x8);
- return TargetLowering::getAsmOperandValueType(DL, Ty, AllowUnknown);
- }
- /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
- /// vector. If it is invalid, don't add anything to Ops.
- void AArch64TargetLowering::LowerAsmOperandForConstraint(
- SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const {
- SDValue Result;
- // Currently only support length 1 constraints.
- if (Constraint.length() != 1)
- return;
- char ConstraintLetter = Constraint[0];
- switch (ConstraintLetter) {
- default:
- break;
- // This set of constraints deal with valid constants for various instructions.
- // Validate and return a target constant for them if we can.
- case 'z': {
- // 'z' maps to xzr or wzr so it needs an input of 0.
- if (!isNullConstant(Op))
- return;
- if (Op.getValueType() == MVT::i64)
- Result = DAG.getRegister(AArch64::XZR, MVT::i64);
- else
- Result = DAG.getRegister(AArch64::WZR, MVT::i32);
- break;
- }
- case 'S': {
- // An absolute symbolic address or label reference.
- if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
- Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
- GA->getValueType(0));
- } else if (const BlockAddressSDNode *BA =
- dyn_cast<BlockAddressSDNode>(Op)) {
- Result =
- DAG.getTargetBlockAddress(BA->getBlockAddress(), BA->getValueType(0));
- } else
- return;
- break;
- }
- case 'I':
- case 'J':
- case 'K':
- case 'L':
- case 'M':
- case 'N':
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
- if (!C)
- return;
- // Grab the value and do some validation.
- uint64_t CVal = C->getZExtValue();
- switch (ConstraintLetter) {
- // The I constraint applies only to simple ADD or SUB immediate operands:
- // i.e. 0 to 4095 with optional shift by 12
- // The J constraint applies only to ADD or SUB immediates that would be
- // valid when negated, i.e. if [an add pattern] were to be output as a SUB
- // instruction [or vice versa], in other words -1 to -4095 with optional
- // left shift by 12.
- case 'I':
- if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal))
- break;
- return;
- case 'J': {
- uint64_t NVal = -C->getSExtValue();
- if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) {
- CVal = C->getSExtValue();
- break;
- }
- return;
- }
- // The K and L constraints apply *only* to logical immediates, including
- // what used to be the MOVI alias for ORR (though the MOVI alias has now
- // been removed and MOV should be used). So these constraints have to
- // distinguish between bit patterns that are valid 32-bit or 64-bit
- // "bitmask immediates": for example 0xaaaaaaaa is a valid bimm32 (K), but
- // not a valid bimm64 (L) where 0xaaaaaaaaaaaaaaaa would be valid, and vice
- // versa.
- case 'K':
- if (AArch64_AM::isLogicalImmediate(CVal, 32))
- break;
- return;
- case 'L':
- if (AArch64_AM::isLogicalImmediate(CVal, 64))
- break;
- return;
- // The M and N constraints are a superset of K and L respectively, for use
- // with the MOV (immediate) alias. As well as the logical immediates they
- // also match 32 or 64-bit immediates that can be loaded either using a
- // *single* MOVZ or MOVN , such as 32-bit 0x12340000, 0x00001234, 0xffffedca
- // (M) or 64-bit 0x1234000000000000 (N) etc.
- // As a note some of this code is liberally stolen from the asm parser.
- case 'M': {
- if (!isUInt<32>(CVal))
- return;
- if (AArch64_AM::isLogicalImmediate(CVal, 32))
- break;
- if ((CVal & 0xFFFF) == CVal)
- break;
- if ((CVal & 0xFFFF0000ULL) == CVal)
- break;
- uint64_t NCVal = ~(uint32_t)CVal;
- if ((NCVal & 0xFFFFULL) == NCVal)
- break;
- if ((NCVal & 0xFFFF0000ULL) == NCVal)
- break;
- return;
- }
- case 'N': {
- if (AArch64_AM::isLogicalImmediate(CVal, 64))
- break;
- if ((CVal & 0xFFFFULL) == CVal)
- break;
- if ((CVal & 0xFFFF0000ULL) == CVal)
- break;
- if ((CVal & 0xFFFF00000000ULL) == CVal)
- break;
- if ((CVal & 0xFFFF000000000000ULL) == CVal)
- break;
- uint64_t NCVal = ~CVal;
- if ((NCVal & 0xFFFFULL) == NCVal)
- break;
- if ((NCVal & 0xFFFF0000ULL) == NCVal)
- break;
- if ((NCVal & 0xFFFF00000000ULL) == NCVal)
- break;
- if ((NCVal & 0xFFFF000000000000ULL) == NCVal)
- break;
- return;
- }
- default:
- return;
- }
- // All assembler immediates are 64-bit integers.
- Result = DAG.getTargetConstant(CVal, SDLoc(Op), MVT::i64);
- break;
- }
- if (Result.getNode()) {
- Ops.push_back(Result);
- return;
- }
- return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
- }
- //===----------------------------------------------------------------------===//
- // AArch64 Advanced SIMD Support
- //===----------------------------------------------------------------------===//
- /// WidenVector - Given a value in the V64 register class, produce the
- /// equivalent value in the V128 register class.
- static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) {
- EVT VT = V64Reg.getValueType();
- unsigned NarrowSize = VT.getVectorNumElements();
- MVT EltTy = VT.getVectorElementType().getSimpleVT();
- MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
- SDLoc DL(V64Reg);
- return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideTy, DAG.getUNDEF(WideTy),
- V64Reg, DAG.getConstant(0, DL, MVT::i64));
- }
- /// getExtFactor - Determine the adjustment factor for the position when
- /// generating an "extract from vector registers" instruction.
- static unsigned getExtFactor(SDValue &V) {
- EVT EltType = V.getValueType().getVectorElementType();
- return EltType.getSizeInBits() / 8;
- }
- /// NarrowVector - Given a value in the V128 register class, produce the
- /// equivalent value in the V64 register class.
- static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
- EVT VT = V128Reg.getValueType();
- unsigned WideSize = VT.getVectorNumElements();
- MVT EltTy = VT.getVectorElementType().getSimpleVT();
- MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
- SDLoc DL(V128Reg);
- return DAG.getTargetExtractSubreg(AArch64::dsub, DL, NarrowTy, V128Reg);
- }
- // Gather data to see if the operation can be modelled as a
- // shuffle in combination with VEXTs.
- SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
- LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n");
- SDLoc dl(Op);
- EVT VT = Op.getValueType();
- assert(!VT.isScalableVector() &&
- "Scalable vectors cannot be used with ISD::BUILD_VECTOR");
- unsigned NumElts = VT.getVectorNumElements();
- struct ShuffleSourceInfo {
- SDValue Vec;
- unsigned MinElt;
- unsigned MaxElt;
- // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
- // be compatible with the shuffle we intend to construct. As a result
- // ShuffleVec will be some sliding window into the original Vec.
- SDValue ShuffleVec;
- // Code should guarantee that element i in Vec starts at element "WindowBase
- // + i * WindowScale in ShuffleVec".
- int WindowBase;
- int WindowScale;
- ShuffleSourceInfo(SDValue Vec)
- : Vec(Vec), MinElt(std::numeric_limits<unsigned>::max()), MaxElt(0),
- ShuffleVec(Vec), WindowBase(0), WindowScale(1) {}
- bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
- };
- // First gather all vectors used as an immediate source for this BUILD_VECTOR
- // node.
- SmallVector<ShuffleSourceInfo, 2> Sources;
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue V = Op.getOperand(i);
- if (V.isUndef())
- continue;
- else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
- !isa<ConstantSDNode>(V.getOperand(1)) ||
- V.getOperand(0).getValueType().isScalableVector()) {
- LLVM_DEBUG(
- dbgs() << "Reshuffle failed: "
- "a shuffle can only come from building a vector from "
- "various elements of other fixed-width vectors, provided "
- "their indices are constant\n");
- return SDValue();
- }
- // Add this element source to the list if it's not already there.
- SDValue SourceVec = V.getOperand(0);
- auto Source = find(Sources, SourceVec);
- if (Source == Sources.end())
- Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
- // Update the minimum and maximum lane number seen.
- unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
- Source->MinElt = std::min(Source->MinElt, EltNo);
- Source->MaxElt = std::max(Source->MaxElt, EltNo);
- }
- // If we have 3 or 4 sources, try to generate a TBL, which will at least be
- // better than moving to/from gpr registers for larger vectors.
- if ((Sources.size() == 3 || Sources.size() == 4) && NumElts > 4) {
- // Construct a mask for the tbl. We may need to adjust the index for types
- // larger than i8.
- SmallVector<unsigned, 16> Mask;
- unsigned OutputFactor = VT.getScalarSizeInBits() / 8;
- for (unsigned I = 0; I < NumElts; ++I) {
- SDValue V = Op.getOperand(I);
- if (V.isUndef()) {
- for (unsigned OF = 0; OF < OutputFactor; OF++)
- Mask.push_back(-1);
- continue;
- }
- // Set the Mask lanes adjusted for the size of the input and output
- // lanes. The Mask is always i8, so it will set OutputFactor lanes per
- // output element, adjusted in their positions per input and output types.
- unsigned Lane = V.getConstantOperandVal(1);
- for (unsigned S = 0; S < Sources.size(); S++) {
- if (V.getOperand(0) == Sources[S].Vec) {
- unsigned InputSize = Sources[S].Vec.getScalarValueSizeInBits();
- unsigned InputBase = 16 * S + Lane * InputSize / 8;
- for (unsigned OF = 0; OF < OutputFactor; OF++)
- Mask.push_back(InputBase + OF);
- break;
- }
- }
- }
- // Construct the tbl3/tbl4 out of an intrinsic, the sources converted to
- // v16i8, and the TBLMask
- SmallVector<SDValue, 16> TBLOperands;
- TBLOperands.push_back(DAG.getConstant(Sources.size() == 3
- ? Intrinsic::aarch64_neon_tbl3
- : Intrinsic::aarch64_neon_tbl4,
- dl, MVT::i32));
- for (unsigned i = 0; i < Sources.size(); i++) {
- SDValue Src = Sources[i].Vec;
- EVT SrcVT = Src.getValueType();
- Src = DAG.getBitcast(SrcVT.is64BitVector() ? MVT::v8i8 : MVT::v16i8, Src);
- assert((SrcVT.is64BitVector() || SrcVT.is128BitVector()) &&
- "Expected a legally typed vector");
- if (SrcVT.is64BitVector())
- Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, Src,
- DAG.getUNDEF(MVT::v8i8));
- TBLOperands.push_back(Src);
- }
- SmallVector<SDValue, 16> TBLMask;
- for (unsigned i = 0; i < Mask.size(); i++)
- TBLMask.push_back(DAG.getConstant(Mask[i], dl, MVT::i32));
- assert((Mask.size() == 8 || Mask.size() == 16) &&
- "Expected a v8i8 or v16i8 Mask");
- TBLOperands.push_back(
- DAG.getBuildVector(Mask.size() == 8 ? MVT::v8i8 : MVT::v16i8, dl, TBLMask));
- SDValue Shuffle =
- DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
- Mask.size() == 8 ? MVT::v8i8 : MVT::v16i8, TBLOperands);
- return DAG.getBitcast(VT, Shuffle);
- }
- if (Sources.size() > 2) {
- LLVM_DEBUG(dbgs() << "Reshuffle failed: currently only do something "
- << "sensible when at most two source vectors are "
- << "involved\n");
- return SDValue();
- }
- // Find out the smallest element size among result and two sources, and use
- // it as element size to build the shuffle_vector.
- EVT SmallestEltTy = VT.getVectorElementType();
- for (auto &Source : Sources) {
- EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
- if (SrcEltTy.bitsLT(SmallestEltTy)) {
- SmallestEltTy = SrcEltTy;
- }
- }
- unsigned ResMultiplier =
- VT.getScalarSizeInBits() / SmallestEltTy.getFixedSizeInBits();
- uint64_t VTSize = VT.getFixedSizeInBits();
- NumElts = VTSize / SmallestEltTy.getFixedSizeInBits();
- EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
- // If the source vector is too wide or too narrow, we may nevertheless be able
- // to construct a compatible shuffle either by concatenating it with UNDEF or
- // extracting a suitable range of elements.
- for (auto &Src : Sources) {
- EVT SrcVT = Src.ShuffleVec.getValueType();
- TypeSize SrcVTSize = SrcVT.getSizeInBits();
- if (SrcVTSize == TypeSize::Fixed(VTSize))
- continue;
- // This stage of the search produces a source with the same element type as
- // the original, but with a total width matching the BUILD_VECTOR output.
- EVT EltVT = SrcVT.getVectorElementType();
- unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits();
- EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
- if (SrcVTSize.getFixedValue() < VTSize) {
- assert(2 * SrcVTSize == VTSize);
- // We can pad out the smaller vector for free, so if it's part of a
- // shuffle...
- Src.ShuffleVec =
- DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
- DAG.getUNDEF(Src.ShuffleVec.getValueType()));
- continue;
- }
- if (SrcVTSize.getFixedValue() != 2 * VTSize) {
- LLVM_DEBUG(
- dbgs() << "Reshuffle failed: result vector too small to extract\n");
- return SDValue();
- }
- if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
- LLVM_DEBUG(
- dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n");
- return SDValue();
- }
- if (Src.MinElt >= NumSrcElts) {
- // The extraction can just take the second half
- Src.ShuffleVec =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
- DAG.getConstant(NumSrcElts, dl, MVT::i64));
- Src.WindowBase = -NumSrcElts;
- } else if (Src.MaxElt < NumSrcElts) {
- // The extraction can just take the first half
- Src.ShuffleVec =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
- DAG.getConstant(0, dl, MVT::i64));
- } else {
- // An actual VEXT is needed
- SDValue VEXTSrc1 =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
- DAG.getConstant(0, dl, MVT::i64));
- SDValue VEXTSrc2 =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
- DAG.getConstant(NumSrcElts, dl, MVT::i64));
- unsigned Imm = Src.MinElt * getExtFactor(VEXTSrc1);
- if (!SrcVT.is64BitVector()) {
- LLVM_DEBUG(
- dbgs() << "Reshuffle failed: don't know how to lower AArch64ISD::EXT "
- "for SVE vectors.");
- return SDValue();
- }
- Src.ShuffleVec = DAG.getNode(AArch64ISD::EXT, dl, DestVT, VEXTSrc1,
- VEXTSrc2,
- DAG.getConstant(Imm, dl, MVT::i32));
- Src.WindowBase = -Src.MinElt;
- }
- }
- // Another possible incompatibility occurs from the vector element types. We
- // can fix this by bitcasting the source vectors to the same type we intend
- // for the shuffle.
- for (auto &Src : Sources) {
- EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
- if (SrcEltTy == SmallestEltTy)
- continue;
- assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
- Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec);
- Src.WindowScale =
- SrcEltTy.getFixedSizeInBits() / SmallestEltTy.getFixedSizeInBits();
- Src.WindowBase *= Src.WindowScale;
- }
- // Final check before we try to actually produce a shuffle.
- LLVM_DEBUG(for (auto Src
- : Sources)
- assert(Src.ShuffleVec.getValueType() == ShuffleVT););
- // The stars all align, our next step is to produce the mask for the shuffle.
- SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
- int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
- for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
- SDValue Entry = Op.getOperand(i);
- if (Entry.isUndef())
- continue;
- auto Src = find(Sources, Entry.getOperand(0));
- int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
- // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
- // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
- // segment.
- EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
- int BitsDefined = std::min(OrigEltTy.getScalarSizeInBits(),
- VT.getScalarSizeInBits());
- int LanesDefined = BitsDefined / BitsPerShuffleLane;
- // This source is expected to fill ResMultiplier lanes of the final shuffle,
- // starting at the appropriate offset.
- int *LaneMask = &Mask[i * ResMultiplier];
- int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
- ExtractBase += NumElts * (Src - Sources.begin());
- for (int j = 0; j < LanesDefined; ++j)
- LaneMask[j] = ExtractBase + j;
- }
- // Final check before we try to produce nonsense...
- if (!isShuffleMaskLegal(Mask, ShuffleVT)) {
- LLVM_DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n");
- return SDValue();
- }
- SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
- for (unsigned i = 0; i < Sources.size(); ++i)
- ShuffleOps[i] = Sources[i].ShuffleVec;
- SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
- ShuffleOps[1], Mask);
- SDValue V = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
- LLVM_DEBUG(dbgs() << "Reshuffle, creating node: "; Shuffle.dump();
- dbgs() << "Reshuffle, creating node: "; V.dump(););
- return V;
- }
- // check if an EXT instruction can handle the shuffle mask when the
- // vector sources of the shuffle are the same.
- static bool isSingletonEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
- unsigned NumElts = VT.getVectorNumElements();
- // Assume that the first shuffle index is not UNDEF. Fail if it is.
- if (M[0] < 0)
- return false;
- Imm = M[0];
- // If this is a VEXT shuffle, the immediate value is the index of the first
- // element. The other shuffle indices must be the successive elements after
- // the first one.
- unsigned ExpectedElt = Imm;
- for (unsigned i = 1; i < NumElts; ++i) {
- // Increment the expected index. If it wraps around, just follow it
- // back to index zero and keep going.
- ++ExpectedElt;
- if (ExpectedElt == NumElts)
- ExpectedElt = 0;
- if (M[i] < 0)
- continue; // ignore UNDEF indices
- if (ExpectedElt != static_cast<unsigned>(M[i]))
- return false;
- }
- return true;
- }
- // Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from
- // v4i32s. This is really a truncate, which we can construct out of (legal)
- // concats and truncate nodes.
- static SDValue ReconstructTruncateFromBuildVector(SDValue V, SelectionDAG &DAG) {
- if (V.getValueType() != MVT::v16i8)
- return SDValue();
- assert(V.getNumOperands() == 16 && "Expected 16 operands on the BUILDVECTOR");
- for (unsigned X = 0; X < 4; X++) {
- // Check the first item in each group is an extract from lane 0 of a v4i32
- // or v4i16.
- SDValue BaseExt = V.getOperand(X * 4);
- if (BaseExt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
- (BaseExt.getOperand(0).getValueType() != MVT::v4i16 &&
- BaseExt.getOperand(0).getValueType() != MVT::v4i32) ||
- !isa<ConstantSDNode>(BaseExt.getOperand(1)) ||
- BaseExt.getConstantOperandVal(1) != 0)
- return SDValue();
- SDValue Base = BaseExt.getOperand(0);
- // And check the other items are extracts from the same vector.
- for (unsigned Y = 1; Y < 4; Y++) {
- SDValue Ext = V.getOperand(X * 4 + Y);
- if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
- Ext.getOperand(0) != Base ||
- !isa<ConstantSDNode>(Ext.getOperand(1)) ||
- Ext.getConstantOperandVal(1) != Y)
- return SDValue();
- }
- }
- // Turn the buildvector into a series of truncates and concates, which will
- // become uzip1's. Any v4i32s we found get truncated to v4i16, which are
- // concat together to produce 2 v8i16. These are both truncated and concat
- // together.
- SDLoc DL(V);
- SDValue Trunc[4] = {
- V.getOperand(0).getOperand(0), V.getOperand(4).getOperand(0),
- V.getOperand(8).getOperand(0), V.getOperand(12).getOperand(0)};
- for (SDValue &V : Trunc)
- if (V.getValueType() == MVT::v4i32)
- V = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i16, V);
- SDValue Concat0 =
- DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[0], Trunc[1]);
- SDValue Concat1 =
- DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[2], Trunc[3]);
- SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, Concat0);
- SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, Concat1);
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Trunc0, Trunc1);
- }
- /// Check if a vector shuffle corresponds to a DUP instructions with a larger
- /// element width than the vector lane type. If that is the case the function
- /// returns true and writes the value of the DUP instruction lane operand into
- /// DupLaneOp
- static bool isWideDUPMask(ArrayRef<int> M, EVT VT, unsigned BlockSize,
- unsigned &DupLaneOp) {
- assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
- "Only possible block sizes for wide DUP are: 16, 32, 64");
- if (BlockSize <= VT.getScalarSizeInBits())
- return false;
- if (BlockSize % VT.getScalarSizeInBits() != 0)
- return false;
- if (VT.getSizeInBits() % BlockSize != 0)
- return false;
- size_t SingleVecNumElements = VT.getVectorNumElements();
- size_t NumEltsPerBlock = BlockSize / VT.getScalarSizeInBits();
- size_t NumBlocks = VT.getSizeInBits() / BlockSize;
- // We are looking for masks like
- // [0, 1, 0, 1] or [2, 3, 2, 3] or [4, 5, 6, 7, 4, 5, 6, 7] where any element
- // might be replaced by 'undefined'. BlockIndices will eventually contain
- // lane indices of the duplicated block (i.e. [0, 1], [2, 3] and [4, 5, 6, 7]
- // for the above examples)
- SmallVector<int, 8> BlockElts(NumEltsPerBlock, -1);
- for (size_t BlockIndex = 0; BlockIndex < NumBlocks; BlockIndex++)
- for (size_t I = 0; I < NumEltsPerBlock; I++) {
- int Elt = M[BlockIndex * NumEltsPerBlock + I];
- if (Elt < 0)
- continue;
- // For now we don't support shuffles that use the second operand
- if ((unsigned)Elt >= SingleVecNumElements)
- return false;
- if (BlockElts[I] < 0)
- BlockElts[I] = Elt;
- else if (BlockElts[I] != Elt)
- return false;
- }
- // We found a candidate block (possibly with some undefs). It must be a
- // sequence of consecutive integers starting with a value divisible by
- // NumEltsPerBlock with some values possibly replaced by undef-s.
- // Find first non-undef element
- auto FirstRealEltIter = find_if(BlockElts, [](int Elt) { return Elt >= 0; });
- assert(FirstRealEltIter != BlockElts.end() &&
- "Shuffle with all-undefs must have been caught by previous cases, "
- "e.g. isSplat()");
- if (FirstRealEltIter == BlockElts.end()) {
- DupLaneOp = 0;
- return true;
- }
- // Index of FirstRealElt in BlockElts
- size_t FirstRealIndex = FirstRealEltIter - BlockElts.begin();
- if ((unsigned)*FirstRealEltIter < FirstRealIndex)
- return false;
- // BlockElts[0] must have the following value if it isn't undef:
- size_t Elt0 = *FirstRealEltIter - FirstRealIndex;
- // Check the first element
- if (Elt0 % NumEltsPerBlock != 0)
- return false;
- // Check that the sequence indeed consists of consecutive integers (modulo
- // undefs)
- for (size_t I = 0; I < NumEltsPerBlock; I++)
- if (BlockElts[I] >= 0 && (unsigned)BlockElts[I] != Elt0 + I)
- return false;
- DupLaneOp = Elt0 / NumEltsPerBlock;
- return true;
- }
- // check if an EXT instruction can handle the shuffle mask when the
- // vector sources of the shuffle are different.
- static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
- unsigned &Imm) {
- // Look for the first non-undef element.
- const int *FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; });
- // Benefit form APInt to handle overflow when calculating expected element.
- unsigned NumElts = VT.getVectorNumElements();
- unsigned MaskBits = APInt(32, NumElts * 2).logBase2();
- APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1);
- // The following shuffle indices must be the successive elements after the
- // first real element.
- bool FoundWrongElt = std::any_of(FirstRealElt + 1, M.end(), [&](int Elt) {
- return Elt != ExpectedElt++ && Elt != -1;
- });
- if (FoundWrongElt)
- return false;
- // The index of an EXT is the first element if it is not UNDEF.
- // Watch out for the beginning UNDEFs. The EXT index should be the expected
- // value of the first element. E.g.
- // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>.
- // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>.
- // ExpectedElt is the last mask index plus 1.
- Imm = ExpectedElt.getZExtValue();
- // There are two difference cases requiring to reverse input vectors.
- // For example, for vector <4 x i32> we have the following cases,
- // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>)
- // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>)
- // For both cases, we finally use mask <5, 6, 7, 0>, which requires
- // to reverse two input vectors.
- if (Imm < NumElts)
- ReverseEXT = true;
- else
- Imm -= NumElts;
- return true;
- }
- /// isREVMask - Check if a vector shuffle corresponds to a REV
- /// instruction with the specified blocksize. (The order of the elements
- /// within each block of the vector is reversed.)
- static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
- assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64 ||
- BlockSize == 128) &&
- "Only possible block sizes for REV are: 16, 32, 64, 128");
- unsigned EltSz = VT.getScalarSizeInBits();
- unsigned NumElts = VT.getVectorNumElements();
- unsigned BlockElts = M[0] + 1;
- // If the first shuffle index is UNDEF, be optimistic.
- if (M[0] < 0)
- BlockElts = BlockSize / EltSz;
- if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
- return false;
- for (unsigned i = 0; i < NumElts; ++i) {
- if (M[i] < 0)
- continue; // ignore UNDEF indices
- if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
- return false;
- }
- return true;
- }
- static bool isZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
- unsigned NumElts = VT.getVectorNumElements();
- if (NumElts % 2 != 0)
- return false;
- WhichResult = (M[0] == 0 ? 0 : 1);
- unsigned Idx = WhichResult * NumElts / 2;
- for (unsigned i = 0; i != NumElts; i += 2) {
- if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
- (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts))
- return false;
- Idx += 1;
- }
- return true;
- }
- static bool isUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
- unsigned NumElts = VT.getVectorNumElements();
- WhichResult = (M[0] == 0 ? 0 : 1);
- for (unsigned i = 0; i != NumElts; ++i) {
- if (M[i] < 0)
- continue; // ignore UNDEF indices
- if ((unsigned)M[i] != 2 * i + WhichResult)
- return false;
- }
- return true;
- }
- static bool isTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
- unsigned NumElts = VT.getVectorNumElements();
- if (NumElts % 2 != 0)
- return false;
- WhichResult = (M[0] == 0 ? 0 : 1);
- for (unsigned i = 0; i < NumElts; i += 2) {
- if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) ||
- (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + NumElts + WhichResult))
- return false;
- }
- return true;
- }
- /// isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of
- /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
- /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
- static bool isZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
- unsigned NumElts = VT.getVectorNumElements();
- if (NumElts % 2 != 0)
- return false;
- WhichResult = (M[0] == 0 ? 0 : 1);
- unsigned Idx = WhichResult * NumElts / 2;
- for (unsigned i = 0; i != NumElts; i += 2) {
- if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
- (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx))
- return false;
- Idx += 1;
- }
- return true;
- }
- /// isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of
- /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
- /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
- static bool isUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
- unsigned Half = VT.getVectorNumElements() / 2;
- WhichResult = (M[0] == 0 ? 0 : 1);
- for (unsigned j = 0; j != 2; ++j) {
- unsigned Idx = WhichResult;
- for (unsigned i = 0; i != Half; ++i) {
- int MIdx = M[i + j * Half];
- if (MIdx >= 0 && (unsigned)MIdx != Idx)
- return false;
- Idx += 2;
- }
- }
- return true;
- }
- /// isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of
- /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
- /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
- static bool isTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
- unsigned NumElts = VT.getVectorNumElements();
- if (NumElts % 2 != 0)
- return false;
- WhichResult = (M[0] == 0 ? 0 : 1);
- for (unsigned i = 0; i < NumElts; i += 2) {
- if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) ||
- (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + WhichResult))
- return false;
- }
- return true;
- }
- static bool isINSMask(ArrayRef<int> M, int NumInputElements,
- bool &DstIsLeft, int &Anomaly) {
- if (M.size() != static_cast<size_t>(NumInputElements))
- return false;
- int NumLHSMatch = 0, NumRHSMatch = 0;
- int LastLHSMismatch = -1, LastRHSMismatch = -1;
- for (int i = 0; i < NumInputElements; ++i) {
- if (M[i] == -1) {
- ++NumLHSMatch;
- ++NumRHSMatch;
- continue;
- }
- if (M[i] == i)
- ++NumLHSMatch;
- else
- LastLHSMismatch = i;
- if (M[i] == i + NumInputElements)
- ++NumRHSMatch;
- else
- LastRHSMismatch = i;
- }
- if (NumLHSMatch == NumInputElements - 1) {
- DstIsLeft = true;
- Anomaly = LastLHSMismatch;
- return true;
- } else if (NumRHSMatch == NumInputElements - 1) {
- DstIsLeft = false;
- Anomaly = LastRHSMismatch;
- return true;
- }
- return false;
- }
- static bool isConcatMask(ArrayRef<int> Mask, EVT VT, bool SplitLHS) {
- if (VT.getSizeInBits() != 128)
- return false;
- unsigned NumElts = VT.getVectorNumElements();
- for (int I = 0, E = NumElts / 2; I != E; I++) {
- if (Mask[I] != I)
- return false;
- }
- int Offset = NumElts / 2;
- for (int I = NumElts / 2, E = NumElts; I != E; I++) {
- if (Mask[I] != I + SplitLHS * Offset)
- return false;
- }
- return true;
- }
- static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- SDValue V0 = Op.getOperand(0);
- SDValue V1 = Op.getOperand(1);
- ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
- if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() ||
- VT.getVectorElementType() != V1.getValueType().getVectorElementType())
- return SDValue();
- bool SplitV0 = V0.getValueSizeInBits() == 128;
- if (!isConcatMask(Mask, VT, SplitV0))
- return SDValue();
- EVT CastVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
- if (SplitV0) {
- V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,
- DAG.getConstant(0, DL, MVT::i64));
- }
- if (V1.getValueSizeInBits() == 128) {
- V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,
- DAG.getConstant(0, DL, MVT::i64));
- }
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1);
- }
- /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
- /// the specified operations to build the shuffle. ID is the perfect-shuffle
- //ID, V1 and V2 are the original shuffle inputs. PFEntry is the Perfect shuffle
- //table entry and LHS/RHS are the immediate inputs for this stage of the
- //shuffle.
- static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
- SDValue V2, unsigned PFEntry, SDValue LHS,
- SDValue RHS, SelectionDAG &DAG,
- const SDLoc &dl) {
- unsigned OpNum = (PFEntry >> 26) & 0x0F;
- unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1);
- unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1);
- enum {
- OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
- OP_VREV,
- OP_VDUP0,
- OP_VDUP1,
- OP_VDUP2,
- OP_VDUP3,
- OP_VEXT1,
- OP_VEXT2,
- OP_VEXT3,
- OP_VUZPL, // VUZP, left result
- OP_VUZPR, // VUZP, right result
- OP_VZIPL, // VZIP, left result
- OP_VZIPR, // VZIP, right result
- OP_VTRNL, // VTRN, left result
- OP_VTRNR, // VTRN, right result
- OP_MOVLANE // Move lane. RHSID is the lane to move into
- };
- if (OpNum == OP_COPY) {
- if (LHSID == (1 * 9 + 2) * 9 + 3)
- return LHS;
- assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!");
- return RHS;
- }
- if (OpNum == OP_MOVLANE) {
- // Decompose a PerfectShuffle ID to get the Mask for lane Elt
- auto getPFIDLane = [](unsigned ID, int Elt) -> int {
- assert(Elt < 4 && "Expected Perfect Lanes to be less than 4");
- Elt = 3 - Elt;
- while (Elt > 0) {
- ID /= 9;
- Elt--;
- }
- return (ID % 9 == 8) ? -1 : ID % 9;
- };
- // For OP_MOVLANE shuffles, the RHSID represents the lane to move into. We
- // get the lane to move from from the PFID, which is always from the
- // original vectors (V1 or V2).
- SDValue OpLHS = GeneratePerfectShuffle(
- LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
- EVT VT = OpLHS.getValueType();
- assert(RHSID < 8 && "Expected a lane index for RHSID!");
- unsigned ExtLane = 0;
- SDValue Input;
- // OP_MOVLANE are either D movs (if bit 0x4 is set) or S movs. D movs
- // convert into a higher type.
- if (RHSID & 0x4) {
- int MaskElt = getPFIDLane(ID, (RHSID & 0x01) << 1) >> 1;
- if (MaskElt == -1)
- MaskElt = (getPFIDLane(ID, ((RHSID & 0x01) << 1) + 1) - 1) >> 1;
- assert(MaskElt >= 0 && "Didn't expect an undef movlane index!");
- ExtLane = MaskElt < 2 ? MaskElt : (MaskElt - 2);
- Input = MaskElt < 2 ? V1 : V2;
- if (VT.getScalarSizeInBits() == 16) {
- Input = DAG.getBitcast(MVT::v2f32, Input);
- OpLHS = DAG.getBitcast(MVT::v2f32, OpLHS);
- } else {
- assert(VT.getScalarSizeInBits() == 32 &&
- "Expected 16 or 32 bit shuffle elemements");
- Input = DAG.getBitcast(MVT::v2f64, Input);
- OpLHS = DAG.getBitcast(MVT::v2f64, OpLHS);
- }
- } else {
- int MaskElt = getPFIDLane(ID, RHSID);
- assert(MaskElt >= 0 && "Didn't expect an undef movlane index!");
- ExtLane = MaskElt < 4 ? MaskElt : (MaskElt - 4);
- Input = MaskElt < 4 ? V1 : V2;
- // Be careful about creating illegal types. Use f16 instead of i16.
- if (VT == MVT::v4i16) {
- Input = DAG.getBitcast(MVT::v4f16, Input);
- OpLHS = DAG.getBitcast(MVT::v4f16, OpLHS);
- }
- }
- SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- Input.getValueType().getVectorElementType(),
- Input, DAG.getVectorIdxConstant(ExtLane, dl));
- SDValue Ins =
- DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Input.getValueType(), OpLHS,
- Ext, DAG.getVectorIdxConstant(RHSID & 0x3, dl));
- return DAG.getBitcast(VT, Ins);
- }
- SDValue OpLHS, OpRHS;
- OpLHS = GeneratePerfectShuffle(LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS,
- RHS, DAG, dl);
- OpRHS = GeneratePerfectShuffle(RHSID, V1, V2, PerfectShuffleTable[RHSID], LHS,
- RHS, DAG, dl);
- EVT VT = OpLHS.getValueType();
- switch (OpNum) {
- default:
- llvm_unreachable("Unknown shuffle opcode!");
- case OP_VREV:
- // VREV divides the vector in half and swaps within the half.
- if (VT.getVectorElementType() == MVT::i32 ||
- VT.getVectorElementType() == MVT::f32)
- return DAG.getNode(AArch64ISD::REV64, dl, VT, OpLHS);
- // vrev <4 x i16> -> REV32
- if (VT.getVectorElementType() == MVT::i16 ||
- VT.getVectorElementType() == MVT::f16 ||
- VT.getVectorElementType() == MVT::bf16)
- return DAG.getNode(AArch64ISD::REV32, dl, VT, OpLHS);
- // vrev <4 x i8> -> REV16
- assert(VT.getVectorElementType() == MVT::i8);
- return DAG.getNode(AArch64ISD::REV16, dl, VT, OpLHS);
- case OP_VDUP0:
- case OP_VDUP1:
- case OP_VDUP2:
- case OP_VDUP3: {
- EVT EltTy = VT.getVectorElementType();
- unsigned Opcode;
- if (EltTy == MVT::i8)
- Opcode = AArch64ISD::DUPLANE8;
- else if (EltTy == MVT::i16 || EltTy == MVT::f16 || EltTy == MVT::bf16)
- Opcode = AArch64ISD::DUPLANE16;
- else if (EltTy == MVT::i32 || EltTy == MVT::f32)
- Opcode = AArch64ISD::DUPLANE32;
- else if (EltTy == MVT::i64 || EltTy == MVT::f64)
- Opcode = AArch64ISD::DUPLANE64;
- else
- llvm_unreachable("Invalid vector element type?");
- if (VT.getSizeInBits() == 64)
- OpLHS = WidenVector(OpLHS, DAG);
- SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, dl, MVT::i64);
- return DAG.getNode(Opcode, dl, VT, OpLHS, Lane);
- }
- case OP_VEXT1:
- case OP_VEXT2:
- case OP_VEXT3: {
- unsigned Imm = (OpNum - OP_VEXT1 + 1) * getExtFactor(OpLHS);
- return DAG.getNode(AArch64ISD::EXT, dl, VT, OpLHS, OpRHS,
- DAG.getConstant(Imm, dl, MVT::i32));
- }
- case OP_VUZPL:
- return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), OpLHS,
- OpRHS);
- case OP_VUZPR:
- return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), OpLHS,
- OpRHS);
- case OP_VZIPL:
- return DAG.getNode(AArch64ISD::ZIP1, dl, DAG.getVTList(VT, VT), OpLHS,
- OpRHS);
- case OP_VZIPR:
- return DAG.getNode(AArch64ISD::ZIP2, dl, DAG.getVTList(VT, VT), OpLHS,
- OpRHS);
- case OP_VTRNL:
- return DAG.getNode(AArch64ISD::TRN1, dl, DAG.getVTList(VT, VT), OpLHS,
- OpRHS);
- case OP_VTRNR:
- return DAG.getNode(AArch64ISD::TRN2, dl, DAG.getVTList(VT, VT), OpLHS,
- OpRHS);
- }
- }
- static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
- SelectionDAG &DAG) {
- // Check to see if we can use the TBL instruction.
- SDValue V1 = Op.getOperand(0);
- SDValue V2 = Op.getOperand(1);
- SDLoc DL(Op);
- EVT EltVT = Op.getValueType().getVectorElementType();
- unsigned BytesPerElt = EltVT.getSizeInBits() / 8;
- bool Swap = false;
- if (V1.isUndef() || isZerosVector(V1.getNode())) {
- std::swap(V1, V2);
- Swap = true;
- }
- // If the V2 source is undef or zero then we can use a tbl1, as tbl1 will fill
- // out of range values with 0s. We do need to make sure that any out-of-range
- // values are really out-of-range for a v16i8 vector.
- bool IsUndefOrZero = V2.isUndef() || isZerosVector(V2.getNode());
- MVT IndexVT = MVT::v8i8;
- unsigned IndexLen = 8;
- if (Op.getValueSizeInBits() == 128) {
- IndexVT = MVT::v16i8;
- IndexLen = 16;
- }
- SmallVector<SDValue, 8> TBLMask;
- for (int Val : ShuffleMask) {
- for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
- unsigned Offset = Byte + Val * BytesPerElt;
- if (Swap)
- Offset = Offset < IndexLen ? Offset + IndexLen : Offset - IndexLen;
- if (IsUndefOrZero && Offset >= IndexLen)
- Offset = 255;
- TBLMask.push_back(DAG.getConstant(Offset, DL, MVT::i32));
- }
- }
- SDValue V1Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V1);
- SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2);
- SDValue Shuffle;
- if (IsUndefOrZero) {
- if (IndexLen == 8)
- V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst);
- Shuffle = DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
- DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
- DAG.getBuildVector(IndexVT, DL, ArrayRef(TBLMask.data(), IndexLen)));
- } else {
- if (IndexLen == 8) {
- V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst);
- Shuffle = DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
- DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
- DAG.getBuildVector(IndexVT, DL, ArrayRef(TBLMask.data(), IndexLen)));
- } else {
- // FIXME: We cannot, for the moment, emit a TBL2 instruction because we
- // cannot currently represent the register constraints on the input
- // table registers.
- // Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst,
- // DAG.getBuildVector(IndexVT, DL, &TBLMask[0],
- // IndexLen));
- Shuffle = DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
- DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32), V1Cst,
- V2Cst,
- DAG.getBuildVector(IndexVT, DL, ArrayRef(TBLMask.data(), IndexLen)));
- }
- }
- return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle);
- }
- static unsigned getDUPLANEOp(EVT EltType) {
- if (EltType == MVT::i8)
- return AArch64ISD::DUPLANE8;
- if (EltType == MVT::i16 || EltType == MVT::f16 || EltType == MVT::bf16)
- return AArch64ISD::DUPLANE16;
- if (EltType == MVT::i32 || EltType == MVT::f32)
- return AArch64ISD::DUPLANE32;
- if (EltType == MVT::i64 || EltType == MVT::f64)
- return AArch64ISD::DUPLANE64;
- llvm_unreachable("Invalid vector element type?");
- }
- static SDValue constructDup(SDValue V, int Lane, SDLoc dl, EVT VT,
- unsigned Opcode, SelectionDAG &DAG) {
- // Try to eliminate a bitcasted extract subvector before a DUPLANE.
- auto getScaledOffsetDup = [](SDValue BitCast, int &LaneC, MVT &CastVT) {
- // Match: dup (bitcast (extract_subv X, C)), LaneC
- if (BitCast.getOpcode() != ISD::BITCAST ||
- BitCast.getOperand(0).getOpcode() != ISD::EXTRACT_SUBVECTOR)
- return false;
- // The extract index must align in the destination type. That may not
- // happen if the bitcast is from narrow to wide type.
- SDValue Extract = BitCast.getOperand(0);
- unsigned ExtIdx = Extract.getConstantOperandVal(1);
- unsigned SrcEltBitWidth = Extract.getScalarValueSizeInBits();
- unsigned ExtIdxInBits = ExtIdx * SrcEltBitWidth;
- unsigned CastedEltBitWidth = BitCast.getScalarValueSizeInBits();
- if (ExtIdxInBits % CastedEltBitWidth != 0)
- return false;
- // Can't handle cases where vector size is not 128-bit
- if (!Extract.getOperand(0).getValueType().is128BitVector())
- return false;
- // Update the lane value by offsetting with the scaled extract index.
- LaneC += ExtIdxInBits / CastedEltBitWidth;
- // Determine the casted vector type of the wide vector input.
- // dup (bitcast (extract_subv X, C)), LaneC --> dup (bitcast X), LaneC'
- // Examples:
- // dup (bitcast (extract_subv v2f64 X, 1) to v2f32), 1 --> dup v4f32 X, 3
- // dup (bitcast (extract_subv v16i8 X, 8) to v4i16), 1 --> dup v8i16 X, 5
- unsigned SrcVecNumElts =
- Extract.getOperand(0).getValueSizeInBits() / CastedEltBitWidth;
- CastVT = MVT::getVectorVT(BitCast.getSimpleValueType().getScalarType(),
- SrcVecNumElts);
- return true;
- };
- MVT CastVT;
- if (getScaledOffsetDup(V, Lane, CastVT)) {
- V = DAG.getBitcast(CastVT, V.getOperand(0).getOperand(0));
- } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- V.getOperand(0).getValueType().is128BitVector()) {
- // The lane is incremented by the index of the extract.
- // Example: dup v2f32 (extract v4f32 X, 2), 1 --> dup v4f32 X, 3
- Lane += V.getConstantOperandVal(1);
- V = V.getOperand(0);
- } else if (V.getOpcode() == ISD::CONCAT_VECTORS) {
- // The lane is decremented if we are splatting from the 2nd operand.
- // Example: dup v4i32 (concat v2i32 X, v2i32 Y), 3 --> dup v4i32 Y, 1
- unsigned Idx = Lane >= (int)VT.getVectorNumElements() / 2;
- Lane -= Idx * VT.getVectorNumElements() / 2;
- V = WidenVector(V.getOperand(Idx), DAG);
- } else if (VT.getSizeInBits() == 64) {
- // Widen the operand to 128-bit register with undef.
- V = WidenVector(V, DAG);
- }
- return DAG.getNode(Opcode, dl, VT, V, DAG.getConstant(Lane, dl, MVT::i64));
- }
- // Return true if we can get a new shuffle mask by checking the parameter mask
- // array to test whether every two adjacent mask values are continuous and
- // starting from an even number.
- static bool isWideTypeMask(ArrayRef<int> M, EVT VT,
- SmallVectorImpl<int> &NewMask) {
- unsigned NumElts = VT.getVectorNumElements();
- if (NumElts % 2 != 0)
- return false;
- NewMask.clear();
- for (unsigned i = 0; i < NumElts; i += 2) {
- int M0 = M[i];
- int M1 = M[i + 1];
- // If both elements are undef, new mask is undef too.
- if (M0 == -1 && M1 == -1) {
- NewMask.push_back(-1);
- continue;
- }
- if (M0 == -1 && M1 != -1 && (M1 % 2) == 1) {
- NewMask.push_back(M1 / 2);
- continue;
- }
- if (M0 != -1 && (M0 % 2) == 0 && ((M0 + 1) == M1 || M1 == -1)) {
- NewMask.push_back(M0 / 2);
- continue;
- }
- NewMask.clear();
- return false;
- }
- assert(NewMask.size() == NumElts / 2 && "Incorrect size for mask!");
- return true;
- }
- // Try to widen element type to get a new mask value for a better permutation
- // sequence, so that we can use NEON shuffle instructions, such as zip1/2,
- // UZP1/2, TRN1/2, REV, INS, etc.
- // For example:
- // shufflevector <4 x i32> %a, <4 x i32> %b,
- // <4 x i32> <i32 6, i32 7, i32 2, i32 3>
- // is equivalent to:
- // shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
- // Finally, we can get:
- // mov v0.d[0], v1.d[1]
- static SDValue tryWidenMaskForShuffle(SDValue Op, SelectionDAG &DAG) {
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- EVT ScalarVT = VT.getVectorElementType();
- unsigned ElementSize = ScalarVT.getFixedSizeInBits();
- SDValue V0 = Op.getOperand(0);
- SDValue V1 = Op.getOperand(1);
- ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
- // If combining adjacent elements, like two i16's -> i32, two i32's -> i64 ...
- // We need to make sure the wider element type is legal. Thus, ElementSize
- // should be not larger than 32 bits, and i1 type should also be excluded.
- if (ElementSize > 32 || ElementSize == 1)
- return SDValue();
- SmallVector<int, 8> NewMask;
- if (isWideTypeMask(Mask, VT, NewMask)) {
- MVT NewEltVT = VT.isFloatingPoint()
- ? MVT::getFloatingPointVT(ElementSize * 2)
- : MVT::getIntegerVT(ElementSize * 2);
- MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
- if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
- V0 = DAG.getBitcast(NewVT, V0);
- V1 = DAG.getBitcast(NewVT, V1);
- return DAG.getBitcast(VT,
- DAG.getVectorShuffle(NewVT, DL, V0, V1, NewMask));
- }
- }
- return SDValue();
- }
- // Try to fold shuffle (tbl2, tbl2) into a single tbl4.
- static SDValue tryToConvertShuffleOfTbl2ToTbl4(SDValue Op,
- ArrayRef<int> ShuffleMask,
- SelectionDAG &DAG) {
- SDValue Tbl1 = Op->getOperand(0);
- SDValue Tbl2 = Op->getOperand(1);
- SDLoc dl(Op);
- SDValue Tbl2ID =
- DAG.getTargetConstant(Intrinsic::aarch64_neon_tbl2, dl, MVT::i64);
- EVT VT = Op.getValueType();
- if (Tbl1->getOpcode() != ISD::INTRINSIC_WO_CHAIN ||
- Tbl1->getOperand(0) != Tbl2ID ||
- Tbl2->getOpcode() != ISD::INTRINSIC_WO_CHAIN ||
- Tbl2->getOperand(0) != Tbl2ID)
- return SDValue();
- if (Tbl1->getValueType(0) != MVT::v16i8 ||
- Tbl2->getValueType(0) != MVT::v16i8)
- return SDValue();
- SDValue Mask1 = Tbl1->getOperand(3);
- SDValue Mask2 = Tbl2->getOperand(3);
- SmallVector<SDValue, 16> TBLMaskParts(16, SDValue());
- for (unsigned I = 0; I < 16; I++) {
- if (ShuffleMask[I] < 16)
- TBLMaskParts[I] = Mask1->getOperand(ShuffleMask[I]);
- else {
- auto *C =
- dyn_cast<ConstantSDNode>(Mask2->getOperand(ShuffleMask[I] - 16));
- if (!C)
- return SDValue();
- TBLMaskParts[I] = DAG.getConstant(C->getSExtValue() + 32, dl, MVT::i32);
- }
- }
- SDValue TBLMask = DAG.getBuildVector(VT, dl, TBLMaskParts);
- SDValue ID =
- DAG.getTargetConstant(Intrinsic::aarch64_neon_tbl4, dl, MVT::i64);
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v16i8,
- {ID, Tbl1->getOperand(1), Tbl1->getOperand(2),
- Tbl2->getOperand(1), Tbl2->getOperand(2), TBLMask});
- }
- // Baseline legalization for ZERO_EXTEND_VECTOR_INREG will blend-in zeros,
- // but we don't have an appropriate instruction,
- // so custom-lower it as ZIP1-with-zeros.
- SDValue
- AArch64TargetLowering::LowerZERO_EXTEND_VECTOR_INREG(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc dl(Op);
- EVT VT = Op.getValueType();
- SDValue SrcOp = Op.getOperand(0);
- EVT SrcVT = SrcOp.getValueType();
- assert(VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits() == 0 &&
- "Unexpected extension factor.");
- unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
- // FIXME: support multi-step zipping?
- if (Scale != 2)
- return SDValue();
- SDValue Zeros = DAG.getConstant(0, dl, SrcVT);
- return DAG.getBitcast(VT,
- DAG.getNode(AArch64ISD::ZIP1, dl, SrcVT, SrcOp, Zeros));
- }
- SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc dl(Op);
- EVT VT = Op.getValueType();
- ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerFixedLengthVECTOR_SHUFFLEToSVE(Op, DAG);
- // Convert shuffles that are directly supported on NEON to target-specific
- // DAG nodes, instead of keeping them as shuffles and matching them again
- // during code selection. This is more efficient and avoids the possibility
- // of inconsistencies between legalization and selection.
- ArrayRef<int> ShuffleMask = SVN->getMask();
- SDValue V1 = Op.getOperand(0);
- SDValue V2 = Op.getOperand(1);
- assert(V1.getValueType() == VT && "Unexpected VECTOR_SHUFFLE type!");
- assert(ShuffleMask.size() == VT.getVectorNumElements() &&
- "Unexpected VECTOR_SHUFFLE mask size!");
- if (SDValue Res = tryToConvertShuffleOfTbl2ToTbl4(Op, ShuffleMask, DAG))
- return Res;
- if (SVN->isSplat()) {
- int Lane = SVN->getSplatIndex();
- // If this is undef splat, generate it via "just" vdup, if possible.
- if (Lane == -1)
- Lane = 0;
- if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR)
- return DAG.getNode(AArch64ISD::DUP, dl, V1.getValueType(),
- V1.getOperand(0));
- // Test if V1 is a BUILD_VECTOR and the lane being referenced is a non-
- // constant. If so, we can just reference the lane's definition directly.
- if (V1.getOpcode() == ISD::BUILD_VECTOR &&
- !isa<ConstantSDNode>(V1.getOperand(Lane)))
- return DAG.getNode(AArch64ISD::DUP, dl, VT, V1.getOperand(Lane));
- // Otherwise, duplicate from the lane of the input vector.
- unsigned Opcode = getDUPLANEOp(V1.getValueType().getVectorElementType());
- return constructDup(V1, Lane, dl, VT, Opcode, DAG);
- }
- // Check if the mask matches a DUP for a wider element
- for (unsigned LaneSize : {64U, 32U, 16U}) {
- unsigned Lane = 0;
- if (isWideDUPMask(ShuffleMask, VT, LaneSize, Lane)) {
- unsigned Opcode = LaneSize == 64 ? AArch64ISD::DUPLANE64
- : LaneSize == 32 ? AArch64ISD::DUPLANE32
- : AArch64ISD::DUPLANE16;
- // Cast V1 to an integer vector with required lane size
- MVT NewEltTy = MVT::getIntegerVT(LaneSize);
- unsigned NewEltCount = VT.getSizeInBits() / LaneSize;
- MVT NewVecTy = MVT::getVectorVT(NewEltTy, NewEltCount);
- V1 = DAG.getBitcast(NewVecTy, V1);
- // Constuct the DUP instruction
- V1 = constructDup(V1, Lane, dl, NewVecTy, Opcode, DAG);
- // Cast back to the original type
- return DAG.getBitcast(VT, V1);
- }
- }
- if (isREVMask(ShuffleMask, VT, 64))
- return DAG.getNode(AArch64ISD::REV64, dl, V1.getValueType(), V1, V2);
- if (isREVMask(ShuffleMask, VT, 32))
- return DAG.getNode(AArch64ISD::REV32, dl, V1.getValueType(), V1, V2);
- if (isREVMask(ShuffleMask, VT, 16))
- return DAG.getNode(AArch64ISD::REV16, dl, V1.getValueType(), V1, V2);
- if (((VT.getVectorNumElements() == 8 && VT.getScalarSizeInBits() == 16) ||
- (VT.getVectorNumElements() == 16 && VT.getScalarSizeInBits() == 8)) &&
- ShuffleVectorInst::isReverseMask(ShuffleMask)) {
- SDValue Rev = DAG.getNode(AArch64ISD::REV64, dl, VT, V1);
- return DAG.getNode(AArch64ISD::EXT, dl, VT, Rev, Rev,
- DAG.getConstant(8, dl, MVT::i32));
- }
- bool ReverseEXT = false;
- unsigned Imm;
- if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) {
- if (ReverseEXT)
- std::swap(V1, V2);
- Imm *= getExtFactor(V1);
- return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2,
- DAG.getConstant(Imm, dl, MVT::i32));
- } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) {
- Imm *= getExtFactor(V1);
- return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1,
- DAG.getConstant(Imm, dl, MVT::i32));
- }
- unsigned WhichResult;
- if (isZIPMask(ShuffleMask, VT, WhichResult)) {
- unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2;
- return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
- }
- if (isUZPMask(ShuffleMask, VT, WhichResult)) {
- unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
- return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
- }
- if (isTRNMask(ShuffleMask, VT, WhichResult)) {
- unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
- return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
- }
- if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
- unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2;
- return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
- }
- if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
- unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
- return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
- }
- if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
- unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
- return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
- }
- if (SDValue Concat = tryFormConcatFromShuffle(Op, DAG))
- return Concat;
- bool DstIsLeft;
- int Anomaly;
- int NumInputElements = V1.getValueType().getVectorNumElements();
- if (isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) {
- SDValue DstVec = DstIsLeft ? V1 : V2;
- SDValue DstLaneV = DAG.getConstant(Anomaly, dl, MVT::i64);
- SDValue SrcVec = V1;
- int SrcLane = ShuffleMask[Anomaly];
- if (SrcLane >= NumInputElements) {
- SrcVec = V2;
- SrcLane -= VT.getVectorNumElements();
- }
- SDValue SrcLaneV = DAG.getConstant(SrcLane, dl, MVT::i64);
- EVT ScalarVT = VT.getVectorElementType();
- if (ScalarVT.getFixedSizeInBits() < 32 && ScalarVT.isInteger())
- ScalarVT = MVT::i32;
- return DAG.getNode(
- ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
- DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, SrcVec, SrcLaneV),
- DstLaneV);
- }
- if (SDValue NewSD = tryWidenMaskForShuffle(Op, DAG))
- return NewSD;
- // If the shuffle is not directly supported and it has 4 elements, use
- // the PerfectShuffle-generated table to synthesize it from other shuffles.
- unsigned NumElts = VT.getVectorNumElements();
- if (NumElts == 4) {
- unsigned PFIndexes[4];
- for (unsigned i = 0; i != 4; ++i) {
- if (ShuffleMask[i] < 0)
- PFIndexes[i] = 8;
- else
- PFIndexes[i] = ShuffleMask[i];
- }
- // Compute the index in the perfect shuffle table.
- unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
- PFIndexes[2] * 9 + PFIndexes[3];
- unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
- return GeneratePerfectShuffle(PFTableIndex, V1, V2, PFEntry, V1, V2, DAG,
- dl);
- }
- return GenerateTBL(Op, ShuffleMask, DAG);
- }
- SDValue AArch64TargetLowering::LowerSPLAT_VECTOR(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerToScalableOp(Op, DAG);
- assert(VT.isScalableVector() && VT.getVectorElementType() == MVT::i1 &&
- "Unexpected vector type!");
- // We can handle the constant cases during isel.
- if (isa<ConstantSDNode>(Op.getOperand(0)))
- return Op;
- // There isn't a natural way to handle the general i1 case, so we use some
- // trickery with whilelo.
- SDLoc DL(Op);
- SDValue SplatVal = DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, MVT::i64);
- SplatVal = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, SplatVal,
- DAG.getValueType(MVT::i1));
- SDValue ID =
- DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64);
- SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
- if (VT == MVT::nxv1i1)
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::nxv1i1,
- DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::nxv2i1, ID,
- Zero, SplatVal),
- Zero);
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, ID, Zero, SplatVal);
- }
- SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- if (!isTypeLegal(VT) || !VT.isScalableVector())
- return SDValue();
- // Current lowering only supports the SVE-ACLE types.
- if (VT.getSizeInBits().getKnownMinValue() != AArch64::SVEBitsPerBlock)
- return SDValue();
- // The DUPQ operation is indepedent of element type so normalise to i64s.
- SDValue Idx128 = Op.getOperand(2);
- // DUPQ can be used when idx is in range.
- auto *CIdx = dyn_cast<ConstantSDNode>(Idx128);
- if (CIdx && (CIdx->getZExtValue() <= 3)) {
- SDValue CI = DAG.getTargetConstant(CIdx->getZExtValue(), DL, MVT::i64);
- return DAG.getNode(AArch64ISD::DUPLANE128, DL, VT, Op.getOperand(1), CI);
- }
- SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::nxv2i64, Op.getOperand(1));
- // The ACLE says this must produce the same result as:
- // svtbl(data, svadd_x(svptrue_b64(),
- // svand_x(svptrue_b64(), svindex_u64(0, 1), 1),
- // index * 2))
- SDValue One = DAG.getConstant(1, DL, MVT::i64);
- SDValue SplatOne = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, One);
- // create the vector 0,1,0,1,...
- SDValue SV = DAG.getStepVector(DL, MVT::nxv2i64);
- SV = DAG.getNode(ISD::AND, DL, MVT::nxv2i64, SV, SplatOne);
- // create the vector idx64,idx64+1,idx64,idx64+1,...
- SDValue Idx64 = DAG.getNode(ISD::ADD, DL, MVT::i64, Idx128, Idx128);
- SDValue SplatIdx64 = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Idx64);
- SDValue ShuffleMask = DAG.getNode(ISD::ADD, DL, MVT::nxv2i64, SV, SplatIdx64);
- // create the vector Val[idx64],Val[idx64+1],Val[idx64],Val[idx64+1],...
- SDValue TBL = DAG.getNode(AArch64ISD::TBL, DL, MVT::nxv2i64, V, ShuffleMask);
- return DAG.getNode(ISD::BITCAST, DL, VT, TBL);
- }
- static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits,
- APInt &UndefBits) {
- EVT VT = BVN->getValueType(0);
- APInt SplatBits, SplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
- if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
- unsigned NumSplats = VT.getSizeInBits() / SplatBitSize;
- for (unsigned i = 0; i < NumSplats; ++i) {
- CnstBits <<= SplatBitSize;
- UndefBits <<= SplatBitSize;
- CnstBits |= SplatBits.zextOrTrunc(VT.getSizeInBits());
- UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.getSizeInBits());
- }
- return true;
- }
- return false;
- }
- // Try 64-bit splatted SIMD immediate.
- static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
- const APInt &Bits) {
- if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
- uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
- EVT VT = Op.getValueType();
- MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v2i64 : MVT::f64;
- if (AArch64_AM::isAdvSIMDModImmType10(Value)) {
- Value = AArch64_AM::encodeAdvSIMDModImmType10(Value);
- SDLoc dl(Op);
- SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
- DAG.getConstant(Value, dl, MVT::i32));
- return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
- }
- }
- return SDValue();
- }
- // Try 32-bit splatted SIMD immediate.
- static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
- const APInt &Bits,
- const SDValue *LHS = nullptr) {
- EVT VT = Op.getValueType();
- if (VT.isFixedLengthVector() &&
- DAG.getSubtarget<AArch64Subtarget>().forceStreamingCompatibleSVE())
- return SDValue();
- if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
- uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
- MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
- bool isAdvSIMDModImm = false;
- uint64_t Shift;
- if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType1(Value))) {
- Value = AArch64_AM::encodeAdvSIMDModImmType1(Value);
- Shift = 0;
- }
- else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType2(Value))) {
- Value = AArch64_AM::encodeAdvSIMDModImmType2(Value);
- Shift = 8;
- }
- else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType3(Value))) {
- Value = AArch64_AM::encodeAdvSIMDModImmType3(Value);
- Shift = 16;
- }
- else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType4(Value))) {
- Value = AArch64_AM::encodeAdvSIMDModImmType4(Value);
- Shift = 24;
- }
- if (isAdvSIMDModImm) {
- SDLoc dl(Op);
- SDValue Mov;
- if (LHS)
- Mov = DAG.getNode(NewOp, dl, MovTy, *LHS,
- DAG.getConstant(Value, dl, MVT::i32),
- DAG.getConstant(Shift, dl, MVT::i32));
- else
- Mov = DAG.getNode(NewOp, dl, MovTy,
- DAG.getConstant(Value, dl, MVT::i32),
- DAG.getConstant(Shift, dl, MVT::i32));
- return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
- }
- }
- return SDValue();
- }
- // Try 16-bit splatted SIMD immediate.
- static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
- const APInt &Bits,
- const SDValue *LHS = nullptr) {
- EVT VT = Op.getValueType();
- if (VT.isFixedLengthVector() &&
- DAG.getSubtarget<AArch64Subtarget>().forceStreamingCompatibleSVE())
- return SDValue();
- if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
- uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
- MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
- bool isAdvSIMDModImm = false;
- uint64_t Shift;
- if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType5(Value))) {
- Value = AArch64_AM::encodeAdvSIMDModImmType5(Value);
- Shift = 0;
- }
- else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType6(Value))) {
- Value = AArch64_AM::encodeAdvSIMDModImmType6(Value);
- Shift = 8;
- }
- if (isAdvSIMDModImm) {
- SDLoc dl(Op);
- SDValue Mov;
- if (LHS)
- Mov = DAG.getNode(NewOp, dl, MovTy, *LHS,
- DAG.getConstant(Value, dl, MVT::i32),
- DAG.getConstant(Shift, dl, MVT::i32));
- else
- Mov = DAG.getNode(NewOp, dl, MovTy,
- DAG.getConstant(Value, dl, MVT::i32),
- DAG.getConstant(Shift, dl, MVT::i32));
- return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
- }
- }
- return SDValue();
- }
- // Try 32-bit splatted SIMD immediate with shifted ones.
- static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op,
- SelectionDAG &DAG, const APInt &Bits) {
- if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
- uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
- EVT VT = Op.getValueType();
- MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
- bool isAdvSIMDModImm = false;
- uint64_t Shift;
- if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType7(Value))) {
- Value = AArch64_AM::encodeAdvSIMDModImmType7(Value);
- Shift = 264;
- }
- else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType8(Value))) {
- Value = AArch64_AM::encodeAdvSIMDModImmType8(Value);
- Shift = 272;
- }
- if (isAdvSIMDModImm) {
- SDLoc dl(Op);
- SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
- DAG.getConstant(Value, dl, MVT::i32),
- DAG.getConstant(Shift, dl, MVT::i32));
- return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
- }
- }
- return SDValue();
- }
- // Try 8-bit splatted SIMD immediate.
- static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
- const APInt &Bits) {
- if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
- uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
- EVT VT = Op.getValueType();
- MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8;
- if (AArch64_AM::isAdvSIMDModImmType9(Value)) {
- Value = AArch64_AM::encodeAdvSIMDModImmType9(Value);
- SDLoc dl(Op);
- SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
- DAG.getConstant(Value, dl, MVT::i32));
- return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
- }
- }
- return SDValue();
- }
- // Try FP splatted SIMD immediate.
- static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
- const APInt &Bits) {
- if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
- uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
- EVT VT = Op.getValueType();
- bool isWide = (VT.getSizeInBits() == 128);
- MVT MovTy;
- bool isAdvSIMDModImm = false;
- if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType11(Value))) {
- Value = AArch64_AM::encodeAdvSIMDModImmType11(Value);
- MovTy = isWide ? MVT::v4f32 : MVT::v2f32;
- }
- else if (isWide &&
- (isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType12(Value))) {
- Value = AArch64_AM::encodeAdvSIMDModImmType12(Value);
- MovTy = MVT::v2f64;
- }
- if (isAdvSIMDModImm) {
- SDLoc dl(Op);
- SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
- DAG.getConstant(Value, dl, MVT::i32));
- return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
- }
- }
- return SDValue();
- }
- // Specialized code to quickly find if PotentialBVec is a BuildVector that
- // consists of only the same constant int value, returned in reference arg
- // ConstVal
- static bool isAllConstantBuildVector(const SDValue &PotentialBVec,
- uint64_t &ConstVal) {
- BuildVectorSDNode *Bvec = dyn_cast<BuildVectorSDNode>(PotentialBVec);
- if (!Bvec)
- return false;
- ConstantSDNode *FirstElt = dyn_cast<ConstantSDNode>(Bvec->getOperand(0));
- if (!FirstElt)
- return false;
- EVT VT = Bvec->getValueType(0);
- unsigned NumElts = VT.getVectorNumElements();
- for (unsigned i = 1; i < NumElts; ++i)
- if (dyn_cast<ConstantSDNode>(Bvec->getOperand(i)) != FirstElt)
- return false;
- ConstVal = FirstElt->getZExtValue();
- return true;
- }
- // Attempt to form a vector S[LR]I from (or (and X, BvecC1), (lsl Y, C2)),
- // to (SLI X, Y, C2), where X and Y have matching vector types, BvecC1 is a
- // BUILD_VECTORs with constant element C1, C2 is a constant, and:
- // - for the SLI case: C1 == ~(Ones(ElemSizeInBits) << C2)
- // - for the SRI case: C1 == ~(Ones(ElemSizeInBits) >> C2)
- // The (or (lsl Y, C2), (and X, BvecC1)) case is also handled.
- static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) {
- EVT VT = N->getValueType(0);
- if (!VT.isVector())
- return SDValue();
- SDLoc DL(N);
- SDValue And;
- SDValue Shift;
- SDValue FirstOp = N->getOperand(0);
- unsigned FirstOpc = FirstOp.getOpcode();
- SDValue SecondOp = N->getOperand(1);
- unsigned SecondOpc = SecondOp.getOpcode();
- // Is one of the operands an AND or a BICi? The AND may have been optimised to
- // a BICi in order to use an immediate instead of a register.
- // Is the other operand an shl or lshr? This will have been turned into:
- // AArch64ISD::VSHL vector, #shift or AArch64ISD::VLSHR vector, #shift.
- if ((FirstOpc == ISD::AND || FirstOpc == AArch64ISD::BICi) &&
- (SecondOpc == AArch64ISD::VSHL || SecondOpc == AArch64ISD::VLSHR)) {
- And = FirstOp;
- Shift = SecondOp;
- } else if ((SecondOpc == ISD::AND || SecondOpc == AArch64ISD::BICi) &&
- (FirstOpc == AArch64ISD::VSHL || FirstOpc == AArch64ISD::VLSHR)) {
- And = SecondOp;
- Shift = FirstOp;
- } else
- return SDValue();
- bool IsAnd = And.getOpcode() == ISD::AND;
- bool IsShiftRight = Shift.getOpcode() == AArch64ISD::VLSHR;
- // Is the shift amount constant?
- ConstantSDNode *C2node = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
- if (!C2node)
- return SDValue();
- uint64_t C1;
- if (IsAnd) {
- // Is the and mask vector all constant?
- if (!isAllConstantBuildVector(And.getOperand(1), C1))
- return SDValue();
- } else {
- // Reconstruct the corresponding AND immediate from the two BICi immediates.
- ConstantSDNode *C1nodeImm = dyn_cast<ConstantSDNode>(And.getOperand(1));
- ConstantSDNode *C1nodeShift = dyn_cast<ConstantSDNode>(And.getOperand(2));
- assert(C1nodeImm && C1nodeShift);
- C1 = ~(C1nodeImm->getZExtValue() << C1nodeShift->getZExtValue());
- }
- // Is C1 == ~(Ones(ElemSizeInBits) << C2) or
- // C1 == ~(Ones(ElemSizeInBits) >> C2), taking into account
- // how much one can shift elements of a particular size?
- uint64_t C2 = C2node->getZExtValue();
- unsigned ElemSizeInBits = VT.getScalarSizeInBits();
- if (C2 > ElemSizeInBits)
- return SDValue();
- APInt C1AsAPInt(ElemSizeInBits, C1);
- APInt RequiredC1 = IsShiftRight ? APInt::getHighBitsSet(ElemSizeInBits, C2)
- : APInt::getLowBitsSet(ElemSizeInBits, C2);
- if (C1AsAPInt != RequiredC1)
- return SDValue();
- SDValue X = And.getOperand(0);
- SDValue Y = Shift.getOperand(0);
- unsigned Inst = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI;
- SDValue ResultSLI = DAG.getNode(Inst, DL, VT, X, Y, Shift.getOperand(1));
- LLVM_DEBUG(dbgs() << "aarch64-lower: transformed: \n");
- LLVM_DEBUG(N->dump(&DAG));
- LLVM_DEBUG(dbgs() << "into: \n");
- LLVM_DEBUG(ResultSLI->dump(&DAG));
- ++NumShiftInserts;
- return ResultSLI;
- }
- SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
- SelectionDAG &DAG) const {
- if (useSVEForFixedLengthVectorVT(Op.getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerToScalableOp(Op, DAG);
- // Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2))
- if (SDValue Res = tryLowerToSLI(Op.getNode(), DAG))
- return Res;
- EVT VT = Op.getValueType();
- SDValue LHS = Op.getOperand(0);
- BuildVectorSDNode *BVN =
- dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode());
- if (!BVN) {
- // OR commutes, so try swapping the operands.
- LHS = Op.getOperand(1);
- BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode());
- }
- if (!BVN)
- return Op;
- APInt DefBits(VT.getSizeInBits(), 0);
- APInt UndefBits(VT.getSizeInBits(), 0);
- if (resolveBuildVector(BVN, DefBits, UndefBits)) {
- SDValue NewOp;
- if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG,
- DefBits, &LHS)) ||
- (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG,
- DefBits, &LHS)))
- return NewOp;
- if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG,
- UndefBits, &LHS)) ||
- (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG,
- UndefBits, &LHS)))
- return NewOp;
- }
- // We can always fall back to a non-immediate OR.
- return Op;
- }
- // Normalize the operands of BUILD_VECTOR. The value of constant operands will
- // be truncated to fit element width.
- static SDValue NormalizeBuildVector(SDValue Op,
- SelectionDAG &DAG) {
- assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
- SDLoc dl(Op);
- EVT VT = Op.getValueType();
- EVT EltTy= VT.getVectorElementType();
- if (EltTy.isFloatingPoint() || EltTy.getSizeInBits() > 16)
- return Op;
- SmallVector<SDValue, 16> Ops;
- for (SDValue Lane : Op->ops()) {
- // For integer vectors, type legalization would have promoted the
- // operands already. Otherwise, if Op is a floating-point splat
- // (with operands cast to integers), then the only possibilities
- // are constants and UNDEFs.
- if (auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) {
- APInt LowBits(EltTy.getSizeInBits(),
- CstLane->getZExtValue());
- Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32);
- } else if (Lane.getNode()->isUndef()) {
- Lane = DAG.getUNDEF(MVT::i32);
- } else {
- assert(Lane.getValueType() == MVT::i32 &&
- "Unexpected BUILD_VECTOR operand type");
- }
- Ops.push_back(Lane);
- }
- return DAG.getBuildVector(VT, dl, Ops);
- }
- static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG) {
- EVT VT = Op.getValueType();
- APInt DefBits(VT.getSizeInBits(), 0);
- APInt UndefBits(VT.getSizeInBits(), 0);
- BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
- if (resolveBuildVector(BVN, DefBits, UndefBits)) {
- SDValue NewOp;
- if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
- return NewOp;
- DefBits = ~DefBits;
- if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
- return NewOp;
- DefBits = UndefBits;
- if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
- return NewOp;
- DefBits = ~UndefBits;
- if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
- (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
- return NewOp;
- }
- return SDValue();
- }
- SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE())) {
- if (auto SeqInfo = cast<BuildVectorSDNode>(Op)->isConstantSequence()) {
- SDLoc DL(Op);
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- SDValue Start = DAG.getConstant(SeqInfo->first, DL, ContainerVT);
- SDValue Steps = DAG.getStepVector(DL, ContainerVT, SeqInfo->second);
- SDValue Seq = DAG.getNode(ISD::ADD, DL, ContainerVT, Start, Steps);
- return convertFromScalableVector(DAG, Op.getValueType(), Seq);
- }
- // Revert to common legalisation for all other variants.
- return SDValue();
- }
- // Try to build a simple constant vector.
- Op = NormalizeBuildVector(Op, DAG);
- // Thought this might return a non-BUILD_VECTOR (e.g. CONCAT_VECTORS), if so,
- // abort.
- if (Op.getOpcode() != ISD::BUILD_VECTOR)
- return SDValue();
- if (VT.isInteger()) {
- // Certain vector constants, used to express things like logical NOT and
- // arithmetic NEG, are passed through unmodified. This allows special
- // patterns for these operations to match, which will lower these constants
- // to whatever is proven necessary.
- BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
- if (BVN->isConstant())
- if (ConstantSDNode *Const = BVN->getConstantSplatNode()) {
- unsigned BitSize = VT.getVectorElementType().getSizeInBits();
- APInt Val(BitSize,
- Const->getAPIntValue().zextOrTrunc(BitSize).getZExtValue());
- if (Val.isZero() || Val.isAllOnes())
- return Op;
- }
- }
- if (SDValue V = ConstantBuildVector(Op, DAG))
- return V;
- // Scan through the operands to find some interesting properties we can
- // exploit:
- // 1) If only one value is used, we can use a DUP, or
- // 2) if only the low element is not undef, we can just insert that, or
- // 3) if only one constant value is used (w/ some non-constant lanes),
- // we can splat the constant value into the whole vector then fill
- // in the non-constant lanes.
- // 4) FIXME: If different constant values are used, but we can intelligently
- // select the values we'll be overwriting for the non-constant
- // lanes such that we can directly materialize the vector
- // some other way (MOVI, e.g.), we can be sneaky.
- // 5) if all operands are EXTRACT_VECTOR_ELT, check for VUZP.
- SDLoc dl(Op);
- unsigned NumElts = VT.getVectorNumElements();
- bool isOnlyLowElement = true;
- bool usesOnlyOneValue = true;
- bool usesOnlyOneConstantValue = true;
- bool isConstant = true;
- bool AllLanesExtractElt = true;
- unsigned NumConstantLanes = 0;
- unsigned NumDifferentLanes = 0;
- unsigned NumUndefLanes = 0;
- SDValue Value;
- SDValue ConstantValue;
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue V = Op.getOperand(i);
- if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
- AllLanesExtractElt = false;
- if (V.isUndef()) {
- ++NumUndefLanes;
- continue;
- }
- if (i > 0)
- isOnlyLowElement = false;
- if (!isIntOrFPConstant(V))
- isConstant = false;
- if (isIntOrFPConstant(V)) {
- ++NumConstantLanes;
- if (!ConstantValue.getNode())
- ConstantValue = V;
- else if (ConstantValue != V)
- usesOnlyOneConstantValue = false;
- }
- if (!Value.getNode())
- Value = V;
- else if (V != Value) {
- usesOnlyOneValue = false;
- ++NumDifferentLanes;
- }
- }
- if (!Value.getNode()) {
- LLVM_DEBUG(
- dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n");
- return DAG.getUNDEF(VT);
- }
- // Convert BUILD_VECTOR where all elements but the lowest are undef into
- // SCALAR_TO_VECTOR, except for when we have a single-element constant vector
- // as SimplifyDemandedBits will just turn that back into BUILD_VECTOR.
- if (isOnlyLowElement && !(NumElts == 1 && isIntOrFPConstant(Value))) {
- LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 "
- "SCALAR_TO_VECTOR node\n");
- return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
- }
- if (AllLanesExtractElt) {
- SDNode *Vector = nullptr;
- bool Even = false;
- bool Odd = false;
- // Check whether the extract elements match the Even pattern <0,2,4,...> or
- // the Odd pattern <1,3,5,...>.
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue V = Op.getOperand(i);
- const SDNode *N = V.getNode();
- if (!isa<ConstantSDNode>(N->getOperand(1)))
- break;
- SDValue N0 = N->getOperand(0);
- // All elements are extracted from the same vector.
- if (!Vector) {
- Vector = N0.getNode();
- // Check that the type of EXTRACT_VECTOR_ELT matches the type of
- // BUILD_VECTOR.
- if (VT.getVectorElementType() !=
- N0.getValueType().getVectorElementType())
- break;
- } else if (Vector != N0.getNode()) {
- Odd = false;
- Even = false;
- break;
- }
- // Extracted values are either at Even indices <0,2,4,...> or at Odd
- // indices <1,3,5,...>.
- uint64_t Val = N->getConstantOperandVal(1);
- if (Val == 2 * i) {
- Even = true;
- continue;
- }
- if (Val - 1 == 2 * i) {
- Odd = true;
- continue;
- }
- // Something does not match: abort.
- Odd = false;
- Even = false;
- break;
- }
- if (Even || Odd) {
- SDValue LHS =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0),
- DAG.getConstant(0, dl, MVT::i64));
- SDValue RHS =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0),
- DAG.getConstant(NumElts, dl, MVT::i64));
- if (Even && !Odd)
- return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), LHS,
- RHS);
- if (Odd && !Even)
- return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), LHS,
- RHS);
- }
- }
- // Use DUP for non-constant splats. For f32 constant splats, reduce to
- // i32 and try again.
- if (usesOnlyOneValue) {
- if (!isConstant) {
- if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
- Value.getValueType() != VT) {
- LLVM_DEBUG(
- dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n");
- return DAG.getNode(AArch64ISD::DUP, dl, VT, Value);
- }
- // This is actually a DUPLANExx operation, which keeps everything vectory.
- SDValue Lane = Value.getOperand(1);
- Value = Value.getOperand(0);
- if (Value.getValueSizeInBits() == 64) {
- LLVM_DEBUG(
- dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, "
- "widening it\n");
- Value = WidenVector(Value, DAG);
- }
- unsigned Opcode = getDUPLANEOp(VT.getVectorElementType());
- return DAG.getNode(Opcode, dl, VT, Value, Lane);
- }
- if (VT.getVectorElementType().isFloatingPoint()) {
- SmallVector<SDValue, 8> Ops;
- EVT EltTy = VT.getVectorElementType();
- assert ((EltTy == MVT::f16 || EltTy == MVT::bf16 || EltTy == MVT::f32 ||
- EltTy == MVT::f64) && "Unsupported floating-point vector type");
- LLVM_DEBUG(
- dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int "
- "BITCASTS, and try again\n");
- MVT NewType = MVT::getIntegerVT(EltTy.getSizeInBits());
- for (unsigned i = 0; i < NumElts; ++i)
- Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i)));
- EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts);
- SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
- LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: ";
- Val.dump(););
- Val = LowerBUILD_VECTOR(Val, DAG);
- if (Val.getNode())
- return DAG.getNode(ISD::BITCAST, dl, VT, Val);
- }
- }
- // If we need to insert a small number of different non-constant elements and
- // the vector width is sufficiently large, prefer using DUP with the common
- // value and INSERT_VECTOR_ELT for the different lanes. If DUP is preferred,
- // skip the constant lane handling below.
- bool PreferDUPAndInsert =
- !isConstant && NumDifferentLanes >= 1 &&
- NumDifferentLanes < ((NumElts - NumUndefLanes) / 2) &&
- NumDifferentLanes >= NumConstantLanes;
- // If there was only one constant value used and for more than one lane,
- // start by splatting that value, then replace the non-constant lanes. This
- // is better than the default, which will perform a separate initialization
- // for each lane.
- if (!PreferDUPAndInsert && NumConstantLanes > 0 && usesOnlyOneConstantValue) {
- // Firstly, try to materialize the splat constant.
- SDValue Vec = DAG.getSplatBuildVector(VT, dl, ConstantValue),
- Val = ConstantBuildVector(Vec, DAG);
- if (!Val) {
- // Otherwise, materialize the constant and splat it.
- Val = DAG.getNode(AArch64ISD::DUP, dl, VT, ConstantValue);
- DAG.ReplaceAllUsesWith(Vec.getNode(), &Val);
- }
- // Now insert the non-constant lanes.
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue V = Op.getOperand(i);
- SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
- if (!isIntOrFPConstant(V))
- // Note that type legalization likely mucked about with the VT of the
- // source operand, so we may have to convert it here before inserting.
- Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, V, LaneIdx);
- }
- return Val;
- }
- // This will generate a load from the constant pool.
- if (isConstant) {
- LLVM_DEBUG(
- dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default "
- "expansion\n");
- return SDValue();
- }
- // Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from
- // v4i32s. This is really a truncate, which we can construct out of (legal)
- // concats and truncate nodes.
- if (SDValue M = ReconstructTruncateFromBuildVector(Op, DAG))
- return M;
- // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
- if (NumElts >= 4) {
- if (SDValue shuffle = ReconstructShuffle(Op, DAG))
- return shuffle;
- }
- if (PreferDUPAndInsert) {
- // First, build a constant vector with the common element.
- SmallVector<SDValue, 8> Ops(NumElts, Value);
- SDValue NewVector = LowerBUILD_VECTOR(DAG.getBuildVector(VT, dl, Ops), DAG);
- // Next, insert the elements that do not match the common value.
- for (unsigned I = 0; I < NumElts; ++I)
- if (Op.getOperand(I) != Value)
- NewVector =
- DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, NewVector,
- Op.getOperand(I), DAG.getConstant(I, dl, MVT::i64));
- return NewVector;
- }
- // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
- // know the default expansion would otherwise fall back on something even
- // worse. For a vector with one or two non-undef values, that's
- // scalar_to_vector for the elements followed by a shuffle (provided the
- // shuffle is valid for the target) and materialization element by element
- // on the stack followed by a load for everything else.
- if (!isConstant && !usesOnlyOneValue) {
- LLVM_DEBUG(
- dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence "
- "of INSERT_VECTOR_ELT\n");
- SDValue Vec = DAG.getUNDEF(VT);
- SDValue Op0 = Op.getOperand(0);
- unsigned i = 0;
- // Use SCALAR_TO_VECTOR for lane zero to
- // a) Avoid a RMW dependency on the full vector register, and
- // b) Allow the register coalescer to fold away the copy if the
- // value is already in an S or D register, and we're forced to emit an
- // INSERT_SUBREG that we can't fold anywhere.
- //
- // We also allow types like i8 and i16 which are illegal scalar but legal
- // vector element types. After type-legalization the inserted value is
- // extended (i32) and it is safe to cast them to the vector type by ignoring
- // the upper bits of the lowest lane (e.g. v8i8, v4i16).
- if (!Op0.isUndef()) {
- LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n");
- Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0);
- ++i;
- }
- LLVM_DEBUG(if (i < NumElts) dbgs()
- << "Creating nodes for the other vector elements:\n";);
- for (; i < NumElts; ++i) {
- SDValue V = Op.getOperand(i);
- if (V.isUndef())
- continue;
- SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
- Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
- }
- return Vec;
- }
- LLVM_DEBUG(
- dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find "
- "better alternative\n");
- return SDValue();
- }
- SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op,
- SelectionDAG &DAG) const {
- if (useSVEForFixedLengthVectorVT(Op.getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerFixedLengthConcatVectorsToSVE(Op, DAG);
- assert(Op.getValueType().isScalableVector() &&
- isTypeLegal(Op.getValueType()) &&
- "Expected legal scalable vector type!");
- if (isTypeLegal(Op.getOperand(0).getValueType())) {
- unsigned NumOperands = Op->getNumOperands();
- assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
- "Unexpected number of operands in CONCAT_VECTORS");
- if (NumOperands == 2)
- return Op;
- // Concat each pair of subvectors and pack into the lower half of the array.
- SmallVector<SDValue> ConcatOps(Op->op_begin(), Op->op_end());
- while (ConcatOps.size() > 1) {
- for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) {
- SDValue V1 = ConcatOps[I];
- SDValue V2 = ConcatOps[I + 1];
- EVT SubVT = V1.getValueType();
- EVT PairVT = SubVT.getDoubleNumVectorElementsVT(*DAG.getContext());
- ConcatOps[I / 2] =
- DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), PairVT, V1, V2);
- }
- ConcatOps.resize(ConcatOps.size() / 2);
- }
- return ConcatOps[0];
- }
- return SDValue();
- }
- SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!");
- if (useSVEForFixedLengthVectorVT(Op.getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerFixedLengthInsertVectorElt(Op, DAG);
- // Check for non-constant or out of range lane.
- EVT VT = Op.getOperand(0).getValueType();
- if (VT.getScalarType() == MVT::i1) {
- EVT VectorVT = getPromotedVTForPredicate(VT);
- SDLoc DL(Op);
- SDValue ExtendedVector =
- DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, VectorVT);
- SDValue ExtendedValue =
- DAG.getAnyExtOrTrunc(Op.getOperand(1), DL,
- VectorVT.getScalarType().getSizeInBits() < 32
- ? MVT::i32
- : VectorVT.getScalarType());
- ExtendedVector =
- DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VectorVT, ExtendedVector,
- ExtendedValue, Op.getOperand(2));
- return DAG.getAnyExtOrTrunc(ExtendedVector, DL, VT);
- }
- ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(2));
- if (!CI || CI->getZExtValue() >= VT.getVectorNumElements())
- return SDValue();
- // Insertion/extraction are legal for V128 types.
- if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
- VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
- VT == MVT::v8f16 || VT == MVT::v8bf16)
- return Op;
- if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
- VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 &&
- VT != MVT::v4bf16)
- return SDValue();
- // For V64 types, we perform insertion by expanding the value
- // to a V128 type and perform the insertion on that.
- SDLoc DL(Op);
- SDValue WideVec = WidenVector(Op.getOperand(0), DAG);
- EVT WideTy = WideVec.getValueType();
- SDValue Node = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideTy, WideVec,
- Op.getOperand(1), Op.getOperand(2));
- // Re-narrow the resultant vector.
- return NarrowVector(Node, DAG);
- }
- SDValue
- AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!");
- EVT VT = Op.getOperand(0).getValueType();
- if (VT.getScalarType() == MVT::i1) {
- // We can't directly extract from an SVE predicate; extend it first.
- // (This isn't the only possible lowering, but it's straightforward.)
- EVT VectorVT = getPromotedVTForPredicate(VT);
- SDLoc DL(Op);
- SDValue Extend =
- DAG.getNode(ISD::ANY_EXTEND, DL, VectorVT, Op.getOperand(0));
- MVT ExtractTy = VectorVT == MVT::nxv2i64 ? MVT::i64 : MVT::i32;
- SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractTy,
- Extend, Op.getOperand(1));
- return DAG.getAnyExtOrTrunc(Extract, DL, Op.getValueType());
- }
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerFixedLengthExtractVectorElt(Op, DAG);
- // Check for non-constant or out of range lane.
- ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(1));
- if (!CI || CI->getZExtValue() >= VT.getVectorNumElements())
- return SDValue();
- // Insertion/extraction are legal for V128 types.
- if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
- VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
- VT == MVT::v8f16 || VT == MVT::v8bf16)
- return Op;
- if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
- VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 &&
- VT != MVT::v4bf16)
- return SDValue();
- // For V64 types, we perform extraction by expanding the value
- // to a V128 type and perform the extraction on that.
- SDLoc DL(Op);
- SDValue WideVec = WidenVector(Op.getOperand(0), DAG);
- EVT WideTy = WideVec.getValueType();
- EVT ExtrTy = WideTy.getVectorElementType();
- if (ExtrTy == MVT::i16 || ExtrTy == MVT::i8)
- ExtrTy = MVT::i32;
- // For extractions, we just return the result directly.
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtrTy, WideVec,
- Op.getOperand(1));
- }
- SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Op.getValueType().isFixedLengthVector() &&
- "Only cases that extract a fixed length vector are supported!");
- EVT InVT = Op.getOperand(0).getValueType();
- unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
- unsigned Size = Op.getValueSizeInBits();
- // If we don't have legal types yet, do nothing
- if (!DAG.getTargetLoweringInfo().isTypeLegal(InVT))
- return SDValue();
- if (InVT.isScalableVector()) {
- // This will be matched by custom code during ISelDAGToDAG.
- if (Idx == 0 && isPackedVectorType(InVT, DAG))
- return Op;
- return SDValue();
- }
- // This will get lowered to an appropriate EXTRACT_SUBREG in ISel.
- if (Idx == 0 && InVT.getSizeInBits() <= 128)
- return Op;
- // If this is extracting the upper 64-bits of a 128-bit vector, we match
- // that directly.
- if (Size == 64 && Idx * InVT.getScalarSizeInBits() == 64 &&
- InVT.getSizeInBits() == 128 && !Subtarget->forceStreamingCompatibleSVE())
- return Op;
- if (useSVEForFixedLengthVectorVT(InVT,
- Subtarget->forceStreamingCompatibleSVE())) {
- SDLoc DL(Op);
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
- SDValue NewInVec =
- convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
- SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ContainerVT, NewInVec,
- NewInVec, DAG.getConstant(Idx, DL, MVT::i64));
- return convertFromScalableVector(DAG, Op.getValueType(), Splice);
- }
- return SDValue();
- }
- SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Op.getValueType().isScalableVector() &&
- "Only expect to lower inserts into scalable vectors!");
- EVT InVT = Op.getOperand(1).getValueType();
- unsigned Idx = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
- SDValue Vec0 = Op.getOperand(0);
- SDValue Vec1 = Op.getOperand(1);
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- if (InVT.isScalableVector()) {
- if (!isTypeLegal(VT))
- return SDValue();
- // Break down insert_subvector into simpler parts.
- if (VT.getVectorElementType() == MVT::i1) {
- unsigned NumElts = VT.getVectorMinNumElements();
- EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
- SDValue Lo, Hi;
- Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0,
- DAG.getVectorIdxConstant(0, DL));
- Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0,
- DAG.getVectorIdxConstant(NumElts / 2, DL));
- if (Idx < (NumElts / 2)) {
- SDValue NewLo = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Lo, Vec1,
- DAG.getVectorIdxConstant(Idx, DL));
- return DAG.getNode(AArch64ISD::UZP1, DL, VT, NewLo, Hi);
- } else {
- SDValue NewHi =
- DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Hi, Vec1,
- DAG.getVectorIdxConstant(Idx - (NumElts / 2), DL));
- return DAG.getNode(AArch64ISD::UZP1, DL, VT, Lo, NewHi);
- }
- }
- // Ensure the subvector is half the size of the main vector.
- if (VT.getVectorElementCount() != (InVT.getVectorElementCount() * 2))
- return SDValue();
- // Here narrow and wide refers to the vector element types. After "casting"
- // both vectors must have the same bit length and so because the subvector
- // has fewer elements, those elements need to be bigger.
- EVT NarrowVT = getPackedSVEVectorVT(VT.getVectorElementCount());
- EVT WideVT = getPackedSVEVectorVT(InVT.getVectorElementCount());
- // NOP cast operands to the largest legal vector of the same element count.
- if (VT.isFloatingPoint()) {
- Vec0 = getSVESafeBitCast(NarrowVT, Vec0, DAG);
- Vec1 = getSVESafeBitCast(WideVT, Vec1, DAG);
- } else {
- // Legal integer vectors are already their largest so Vec0 is fine as is.
- Vec1 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Vec1);
- }
- // To replace the top/bottom half of vector V with vector SubV we widen the
- // preserved half of V, concatenate this to SubV (the order depending on the
- // half being replaced) and then narrow the result.
- SDValue Narrow;
- if (Idx == 0) {
- SDValue HiVec0 = DAG.getNode(AArch64ISD::UUNPKHI, DL, WideVT, Vec0);
- Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, Vec1, HiVec0);
- } else {
- assert(Idx == InVT.getVectorMinNumElements() &&
- "Invalid subvector index!");
- SDValue LoVec0 = DAG.getNode(AArch64ISD::UUNPKLO, DL, WideVT, Vec0);
- Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, LoVec0, Vec1);
- }
- return getSVESafeBitCast(VT, Narrow, DAG);
- }
- if (Idx == 0 && isPackedVectorType(VT, DAG)) {
- // This will be matched by custom code during ISelDAGToDAG.
- if (Vec0.isUndef())
- return Op;
- std::optional<unsigned> PredPattern =
- getSVEPredPatternFromNumElements(InVT.getVectorNumElements());
- auto PredTy = VT.changeVectorElementType(MVT::i1);
- SDValue PTrue = getPTrue(DAG, DL, PredTy, *PredPattern);
- SDValue ScalableVec1 = convertToScalableVector(DAG, VT, Vec1);
- return DAG.getNode(ISD::VSELECT, DL, VT, PTrue, ScalableVec1, Vec0);
- }
- return SDValue();
- }
- static bool isPow2Splat(SDValue Op, uint64_t &SplatVal, bool &Negated) {
- if (Op.getOpcode() != AArch64ISD::DUP &&
- Op.getOpcode() != ISD::SPLAT_VECTOR &&
- Op.getOpcode() != ISD::BUILD_VECTOR)
- return false;
- if (Op.getOpcode() == ISD::BUILD_VECTOR &&
- !isAllConstantBuildVector(Op, SplatVal))
- return false;
- if (Op.getOpcode() != ISD::BUILD_VECTOR &&
- !isa<ConstantSDNode>(Op->getOperand(0)))
- return false;
- SplatVal = Op->getConstantOperandVal(0);
- if (Op.getValueType().getVectorElementType() != MVT::i64)
- SplatVal = (int32_t)SplatVal;
- Negated = false;
- if (isPowerOf2_64(SplatVal))
- return true;
- Negated = true;
- if (isPowerOf2_64(-SplatVal)) {
- SplatVal = -SplatVal;
- return true;
- }
- return false;
- }
- SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- SDLoc dl(Op);
- if (useSVEForFixedLengthVectorVT(VT, /*OverrideNEON=*/true))
- return LowerFixedLengthVectorIntDivideToSVE(Op, DAG);
- assert(VT.isScalableVector() && "Expected a scalable vector.");
- bool Signed = Op.getOpcode() == ISD::SDIV;
- unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED;
- bool Negated;
- uint64_t SplatVal;
- if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) {
- SDValue Pg = getPredicateForScalableVector(DAG, dl, VT);
- SDValue Res =
- DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, VT, Pg, Op->getOperand(0),
- DAG.getTargetConstant(Log2_64(SplatVal), dl, MVT::i32));
- if (Negated)
- Res = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Res);
- return Res;
- }
- if (VT == MVT::nxv4i32 || VT == MVT::nxv2i64)
- return LowerToPredicatedOp(Op, DAG, PredOpcode);
- // SVE doesn't have i8 and i16 DIV operations; widen them to 32-bit
- // operations, and truncate the result.
- EVT WidenedVT;
- if (VT == MVT::nxv16i8)
- WidenedVT = MVT::nxv8i16;
- else if (VT == MVT::nxv8i16)
- WidenedVT = MVT::nxv4i32;
- else
- llvm_unreachable("Unexpected Custom DIV operation");
- unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
- unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI;
- SDValue Op0Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(0));
- SDValue Op1Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(1));
- SDValue Op0Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(0));
- SDValue Op1Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(1));
- SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Lo, Op1Lo);
- SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Hi, Op1Hi);
- return DAG.getNode(AArch64ISD::UZP1, dl, VT, ResultLo, ResultHi);
- }
- bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
- // Currently no fixed length shuffles that require SVE are legal.
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
- return false;
- if (VT.getVectorNumElements() == 4 &&
- (VT.is128BitVector() || VT.is64BitVector())) {
- unsigned Cost = getPerfectShuffleCost(M);
- if (Cost <= 1)
- return true;
- }
- bool DummyBool;
- int DummyInt;
- unsigned DummyUnsigned;
- return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) ||
- isREVMask(M, VT, 32) || isREVMask(M, VT, 16) ||
- isEXTMask(M, VT, DummyBool, DummyUnsigned) ||
- // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM.
- isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) ||
- isZIPMask(M, VT, DummyUnsigned) ||
- isTRN_v_undef_Mask(M, VT, DummyUnsigned) ||
- isUZP_v_undef_Mask(M, VT, DummyUnsigned) ||
- isZIP_v_undef_Mask(M, VT, DummyUnsigned) ||
- isINSMask(M, VT.getVectorNumElements(), DummyBool, DummyInt) ||
- isConcatMask(M, VT, VT.getSizeInBits() == 128));
- }
- bool AArch64TargetLowering::isVectorClearMaskLegal(ArrayRef<int> M,
- EVT VT) const {
- // Just delegate to the generic legality, clear masks aren't special.
- return isShuffleMaskLegal(M, VT);
- }
- /// getVShiftImm - Check if this is a valid build_vector for the immediate
- /// operand of a vector shift operation, where all the elements of the
- /// build_vector must have the same constant integer value.
- static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
- // Ignore bit_converts.
- while (Op.getOpcode() == ISD::BITCAST)
- Op = Op.getOperand(0);
- BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
- APInt SplatBits, SplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
- if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
- HasAnyUndefs, ElementBits) ||
- SplatBitSize > ElementBits)
- return false;
- Cnt = SplatBits.getSExtValue();
- return true;
- }
- /// isVShiftLImm - Check if this is a valid build_vector for the immediate
- /// operand of a vector shift left operation. That value must be in the range:
- /// 0 <= Value < ElementBits for a left shift; or
- /// 0 <= Value <= ElementBits for a long left shift.
- static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
- assert(VT.isVector() && "vector shift count is not a vector type");
- int64_t ElementBits = VT.getScalarSizeInBits();
- if (!getVShiftImm(Op, ElementBits, Cnt))
- return false;
- return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
- }
- /// isVShiftRImm - Check if this is a valid build_vector for the immediate
- /// operand of a vector shift right operation. The value must be in the range:
- /// 1 <= Value <= ElementBits for a right shift; or
- static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) {
- assert(VT.isVector() && "vector shift count is not a vector type");
- int64_t ElementBits = VT.getScalarSizeInBits();
- if (!getVShiftImm(Op, ElementBits, Cnt))
- return false;
- return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
- }
- SDValue AArch64TargetLowering::LowerTRUNCATE(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- if (VT.getScalarType() == MVT::i1) {
- // Lower i1 truncate to `(x & 1) != 0`.
- SDLoc dl(Op);
- EVT OpVT = Op.getOperand(0).getValueType();
- SDValue Zero = DAG.getConstant(0, dl, OpVT);
- SDValue One = DAG.getConstant(1, dl, OpVT);
- SDValue And = DAG.getNode(ISD::AND, dl, OpVT, Op.getOperand(0), One);
- return DAG.getSetCC(dl, VT, And, Zero, ISD::SETNE);
- }
- if (!VT.isVector() || VT.isScalableVector())
- return SDValue();
- if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerFixedLengthVectorTruncateToSVE(Op, DAG);
- return SDValue();
- }
- SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- SDLoc DL(Op);
- int64_t Cnt;
- if (!Op.getOperand(1).getValueType().isVector())
- return Op;
- unsigned EltSize = VT.getScalarSizeInBits();
- switch (Op.getOpcode()) {
- case ISD::SHL:
- if (VT.isScalableVector() ||
- useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::SHL_PRED);
- if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize)
- return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0),
- DAG.getConstant(Cnt, DL, MVT::i32));
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::aarch64_neon_ushl, DL,
- MVT::i32),
- Op.getOperand(0), Op.getOperand(1));
- case ISD::SRA:
- case ISD::SRL:
- if (VT.isScalableVector() ||
- useSVEForFixedLengthVectorVT(
- VT, Subtarget->forceStreamingCompatibleSVE())) {
- unsigned Opc = Op.getOpcode() == ISD::SRA ? AArch64ISD::SRA_PRED
- : AArch64ISD::SRL_PRED;
- return LowerToPredicatedOp(Op, DAG, Opc);
- }
- // Right shift immediate
- if (isVShiftRImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) {
- unsigned Opc =
- (Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR;
- return DAG.getNode(Opc, DL, VT, Op.getOperand(0),
- DAG.getConstant(Cnt, DL, MVT::i32));
- }
- // Right shift register. Note, there is not a shift right register
- // instruction, but the shift left register instruction takes a signed
- // value, where negative numbers specify a right shift.
- unsigned Opc = (Op.getOpcode() == ISD::SRA) ? Intrinsic::aarch64_neon_sshl
- : Intrinsic::aarch64_neon_ushl;
- // negate the shift amount
- SDValue NegShift = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
- Op.getOperand(1));
- SDValue NegShiftLeft =
- DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Opc, DL, MVT::i32), Op.getOperand(0),
- NegShift);
- return NegShiftLeft;
- }
- llvm_unreachable("unexpected shift opcode");
- }
- static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
- AArch64CC::CondCode CC, bool NoNans, EVT VT,
- const SDLoc &dl, SelectionDAG &DAG) {
- EVT SrcVT = LHS.getValueType();
- assert(VT.getSizeInBits() == SrcVT.getSizeInBits() &&
- "function only supposed to emit natural comparisons");
- BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
- APInt CnstBits(VT.getSizeInBits(), 0);
- APInt UndefBits(VT.getSizeInBits(), 0);
- bool IsCnst = BVN && resolveBuildVector(BVN, CnstBits, UndefBits);
- bool IsZero = IsCnst && (CnstBits == 0);
- if (SrcVT.getVectorElementType().isFloatingPoint()) {
- switch (CC) {
- default:
- return SDValue();
- case AArch64CC::NE: {
- SDValue Fcmeq;
- if (IsZero)
- Fcmeq = DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS);
- else
- Fcmeq = DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS);
- return DAG.getNOT(dl, Fcmeq, VT);
- }
- case AArch64CC::EQ:
- if (IsZero)
- return DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS);
- return DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS);
- case AArch64CC::GE:
- if (IsZero)
- return DAG.getNode(AArch64ISD::FCMGEz, dl, VT, LHS);
- return DAG.getNode(AArch64ISD::FCMGE, dl, VT, LHS, RHS);
- case AArch64CC::GT:
- if (IsZero)
- return DAG.getNode(AArch64ISD::FCMGTz, dl, VT, LHS);
- return DAG.getNode(AArch64ISD::FCMGT, dl, VT, LHS, RHS);
- case AArch64CC::LE:
- if (!NoNans)
- return SDValue();
- // If we ignore NaNs then we can use to the LS implementation.
- [[fallthrough]];
- case AArch64CC::LS:
- if (IsZero)
- return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS);
- return DAG.getNode(AArch64ISD::FCMGE, dl, VT, RHS, LHS);
- case AArch64CC::LT:
- if (!NoNans)
- return SDValue();
- // If we ignore NaNs then we can use to the MI implementation.
- [[fallthrough]];
- case AArch64CC::MI:
- if (IsZero)
- return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS);
- return DAG.getNode(AArch64ISD::FCMGT, dl, VT, RHS, LHS);
- }
- }
- switch (CC) {
- default:
- return SDValue();
- case AArch64CC::NE: {
- SDValue Cmeq;
- if (IsZero)
- Cmeq = DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS);
- else
- Cmeq = DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS);
- return DAG.getNOT(dl, Cmeq, VT);
- }
- case AArch64CC::EQ:
- if (IsZero)
- return DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS);
- return DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS);
- case AArch64CC::GE:
- if (IsZero)
- return DAG.getNode(AArch64ISD::CMGEz, dl, VT, LHS);
- return DAG.getNode(AArch64ISD::CMGE, dl, VT, LHS, RHS);
- case AArch64CC::GT:
- if (IsZero)
- return DAG.getNode(AArch64ISD::CMGTz, dl, VT, LHS);
- return DAG.getNode(AArch64ISD::CMGT, dl, VT, LHS, RHS);
- case AArch64CC::LE:
- if (IsZero)
- return DAG.getNode(AArch64ISD::CMLEz, dl, VT, LHS);
- return DAG.getNode(AArch64ISD::CMGE, dl, VT, RHS, LHS);
- case AArch64CC::LS:
- return DAG.getNode(AArch64ISD::CMHS, dl, VT, RHS, LHS);
- case AArch64CC::LO:
- return DAG.getNode(AArch64ISD::CMHI, dl, VT, RHS, LHS);
- case AArch64CC::LT:
- if (IsZero)
- return DAG.getNode(AArch64ISD::CMLTz, dl, VT, LHS);
- return DAG.getNode(AArch64ISD::CMGT, dl, VT, RHS, LHS);
- case AArch64CC::HI:
- return DAG.getNode(AArch64ISD::CMHI, dl, VT, LHS, RHS);
- case AArch64CC::HS:
- return DAG.getNode(AArch64ISD::CMHS, dl, VT, LHS, RHS);
- }
- }
- SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
- SelectionDAG &DAG) const {
- if (Op.getValueType().isScalableVector())
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::SETCC_MERGE_ZERO);
- if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
- return LowerFixedLengthVectorSetccToSVE(Op, DAG);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- EVT CmpVT = LHS.getValueType().changeVectorElementTypeToInteger();
- SDLoc dl(Op);
- if (LHS.getValueType().getVectorElementType().isInteger()) {
- assert(LHS.getValueType() == RHS.getValueType());
- AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
- SDValue Cmp =
- EmitVectorComparison(LHS, RHS, AArch64CC, false, CmpVT, dl, DAG);
- return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
- }
- const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
- // Make v4f16 (only) fcmp operations utilise vector instructions
- // v8f16 support will be a litle more complicated
- if (!FullFP16 && LHS.getValueType().getVectorElementType() == MVT::f16) {
- if (LHS.getValueType().getVectorNumElements() == 4) {
- LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, LHS);
- RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, RHS);
- SDValue NewSetcc = DAG.getSetCC(dl, MVT::v4i16, LHS, RHS, CC);
- DAG.ReplaceAllUsesWith(Op, NewSetcc);
- CmpVT = MVT::v4i32;
- } else
- return SDValue();
- }
- assert((!FullFP16 && LHS.getValueType().getVectorElementType() != MVT::f16) ||
- LHS.getValueType().getVectorElementType() != MVT::f128);
- // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
- // clean. Some of them require two branches to implement.
- AArch64CC::CondCode CC1, CC2;
- bool ShouldInvert;
- changeVectorFPCCToAArch64CC(CC, CC1, CC2, ShouldInvert);
- bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs();
- SDValue Cmp =
- EmitVectorComparison(LHS, RHS, CC1, NoNaNs, CmpVT, dl, DAG);
- if (!Cmp.getNode())
- return SDValue();
- if (CC2 != AArch64CC::AL) {
- SDValue Cmp2 =
- EmitVectorComparison(LHS, RHS, CC2, NoNaNs, CmpVT, dl, DAG);
- if (!Cmp2.getNode())
- return SDValue();
- Cmp = DAG.getNode(ISD::OR, dl, CmpVT, Cmp, Cmp2);
- }
- Cmp = DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
- if (ShouldInvert)
- Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType());
- return Cmp;
- }
- static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp,
- SelectionDAG &DAG) {
- SDValue VecOp = ScalarOp.getOperand(0);
- auto Rdx = DAG.getNode(Op, DL, VecOp.getSimpleValueType(), VecOp);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarOp.getValueType(), Rdx,
- DAG.getConstant(0, DL, MVT::i64));
- }
- SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue Src = Op.getOperand(0);
- // Try to lower fixed length reductions to SVE.
- EVT SrcVT = Src.getValueType();
- bool OverrideNEON = Subtarget->forceStreamingCompatibleSVE() ||
- Op.getOpcode() == ISD::VECREDUCE_AND ||
- Op.getOpcode() == ISD::VECREDUCE_OR ||
- Op.getOpcode() == ISD::VECREDUCE_XOR ||
- Op.getOpcode() == ISD::VECREDUCE_FADD ||
- (Op.getOpcode() != ISD::VECREDUCE_ADD &&
- SrcVT.getVectorElementType() == MVT::i64);
- if (SrcVT.isScalableVector() ||
- useSVEForFixedLengthVectorVT(
- SrcVT, OverrideNEON && Subtarget->useSVEForFixedLengthVectors())) {
- if (SrcVT.getVectorElementType() == MVT::i1)
- return LowerPredReductionToSVE(Op, DAG);
- switch (Op.getOpcode()) {
- case ISD::VECREDUCE_ADD:
- return LowerReductionToSVE(AArch64ISD::UADDV_PRED, Op, DAG);
- case ISD::VECREDUCE_AND:
- return LowerReductionToSVE(AArch64ISD::ANDV_PRED, Op, DAG);
- case ISD::VECREDUCE_OR:
- return LowerReductionToSVE(AArch64ISD::ORV_PRED, Op, DAG);
- case ISD::VECREDUCE_SMAX:
- return LowerReductionToSVE(AArch64ISD::SMAXV_PRED, Op, DAG);
- case ISD::VECREDUCE_SMIN:
- return LowerReductionToSVE(AArch64ISD::SMINV_PRED, Op, DAG);
- case ISD::VECREDUCE_UMAX:
- return LowerReductionToSVE(AArch64ISD::UMAXV_PRED, Op, DAG);
- case ISD::VECREDUCE_UMIN:
- return LowerReductionToSVE(AArch64ISD::UMINV_PRED, Op, DAG);
- case ISD::VECREDUCE_XOR:
- return LowerReductionToSVE(AArch64ISD::EORV_PRED, Op, DAG);
- case ISD::VECREDUCE_FADD:
- return LowerReductionToSVE(AArch64ISD::FADDV_PRED, Op, DAG);
- case ISD::VECREDUCE_FMAX:
- return LowerReductionToSVE(AArch64ISD::FMAXNMV_PRED, Op, DAG);
- case ISD::VECREDUCE_FMIN:
- return LowerReductionToSVE(AArch64ISD::FMINNMV_PRED, Op, DAG);
- default:
- llvm_unreachable("Unhandled fixed length reduction");
- }
- }
- // Lower NEON reductions.
- SDLoc dl(Op);
- switch (Op.getOpcode()) {
- case ISD::VECREDUCE_ADD:
- return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG);
- case ISD::VECREDUCE_SMAX:
- return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG);
- case ISD::VECREDUCE_SMIN:
- return getReductionSDNode(AArch64ISD::SMINV, dl, Op, DAG);
- case ISD::VECREDUCE_UMAX:
- return getReductionSDNode(AArch64ISD::UMAXV, dl, Op, DAG);
- case ISD::VECREDUCE_UMIN:
- return getReductionSDNode(AArch64ISD::UMINV, dl, Op, DAG);
- case ISD::VECREDUCE_FMAX: {
- return DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
- DAG.getConstant(Intrinsic::aarch64_neon_fmaxnmv, dl, MVT::i32),
- Src);
- }
- case ISD::VECREDUCE_FMIN: {
- return DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
- DAG.getConstant(Intrinsic::aarch64_neon_fminnmv, dl, MVT::i32),
- Src);
- }
- default:
- llvm_unreachable("Unhandled reduction");
- }
- }
- SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op,
- SelectionDAG &DAG) const {
- auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
- if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics())
- return SDValue();
- // LSE has an atomic load-add instruction, but not a load-sub.
- SDLoc dl(Op);
- MVT VT = Op.getSimpleValueType();
- SDValue RHS = Op.getOperand(2);
- AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode());
- RHS = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), RHS);
- return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, AN->getMemoryVT(),
- Op.getOperand(0), Op.getOperand(1), RHS,
- AN->getMemOperand());
- }
- SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op,
- SelectionDAG &DAG) const {
- auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
- if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics())
- return SDValue();
- // LSE has an atomic load-clear instruction, but not a load-and.
- SDLoc dl(Op);
- MVT VT = Op.getSimpleValueType();
- SDValue RHS = Op.getOperand(2);
- AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode());
- RHS = DAG.getNode(ISD::XOR, dl, VT, DAG.getConstant(-1ULL, dl, VT), RHS);
- return DAG.getAtomic(ISD::ATOMIC_LOAD_CLR, dl, AN->getMemoryVT(),
- Op.getOperand(0), Op.getOperand(1), RHS,
- AN->getMemOperand());
- }
- SDValue AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC(
- SDValue Op, SDValue Chain, SDValue &Size, SelectionDAG &DAG) const {
- SDLoc dl(Op);
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- SDValue Callee = DAG.getTargetExternalSymbol(Subtarget->getChkStkName(),
- PtrVT, 0);
- const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
- const uint32_t *Mask = TRI->getWindowsStackProbePreservedMask();
- if (Subtarget->hasCustomCallingConv())
- TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask);
- Size = DAG.getNode(ISD::SRL, dl, MVT::i64, Size,
- DAG.getConstant(4, dl, MVT::i64));
- Chain = DAG.getCopyToReg(Chain, dl, AArch64::X15, Size, SDValue());
- Chain =
- DAG.getNode(AArch64ISD::CALL, dl, DAG.getVTList(MVT::Other, MVT::Glue),
- Chain, Callee, DAG.getRegister(AArch64::X15, MVT::i64),
- DAG.getRegisterMask(Mask), Chain.getValue(1));
- // To match the actual intent better, we should read the output from X15 here
- // again (instead of potentially spilling it to the stack), but rereading Size
- // from X15 here doesn't work at -O0, since it thinks that X15 is undefined
- // here.
- Size = DAG.getNode(ISD::SHL, dl, MVT::i64, Size,
- DAG.getConstant(4, dl, MVT::i64));
- return Chain;
- }
- SDValue
- AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Subtarget->isTargetWindows() &&
- "Only Windows alloca probing supported");
- SDLoc dl(Op);
- // Get the inputs.
- SDNode *Node = Op.getNode();
- SDValue Chain = Op.getOperand(0);
- SDValue Size = Op.getOperand(1);
- MaybeAlign Align =
- cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
- EVT VT = Node->getValueType(0);
- if (DAG.getMachineFunction().getFunction().hasFnAttribute(
- "no-stack-arg-probe")) {
- SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64);
- Chain = SP.getValue(1);
- SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size);
- if (Align)
- SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
- DAG.getConstant(-(uint64_t)Align->value(), dl, VT));
- Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP);
- SDValue Ops[2] = {SP, Chain};
- return DAG.getMergeValues(Ops, dl);
- }
- Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
- Chain = LowerWindowsDYNAMIC_STACKALLOC(Op, Chain, Size, DAG);
- SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64);
- Chain = SP.getValue(1);
- SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size);
- if (Align)
- SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
- DAG.getConstant(-(uint64_t)Align->value(), dl, VT));
- Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP);
- Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
- SDValue Ops[2] = {SP, Chain};
- return DAG.getMergeValues(Ops, dl);
- }
- SDValue AArch64TargetLowering::LowerVSCALE(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- assert(VT != MVT::i64 && "Expected illegal VSCALE node");
- SDLoc DL(Op);
- APInt MulImm = cast<ConstantSDNode>(Op.getOperand(0))->getAPIntValue();
- return DAG.getZExtOrTrunc(DAG.getVScale(DL, MVT::i64, MulImm.sext(64)), DL,
- VT);
- }
- /// Set the IntrinsicInfo for the `aarch64_sve_st<N>` intrinsics.
- template <unsigned NumVecs>
- static bool
- setInfoSVEStN(const AArch64TargetLowering &TLI, const DataLayout &DL,
- AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI) {
- Info.opc = ISD::INTRINSIC_VOID;
- // Retrieve EC from first vector argument.
- const EVT VT = TLI.getMemValueType(DL, CI.getArgOperand(0)->getType());
- ElementCount EC = VT.getVectorElementCount();
- #ifndef NDEBUG
- // Check the assumption that all input vectors are the same type.
- for (unsigned I = 0; I < NumVecs; ++I)
- assert(VT == TLI.getMemValueType(DL, CI.getArgOperand(I)->getType()) &&
- "Invalid type.");
- #endif
- // memVT is `NumVecs * VT`.
- Info.memVT = EVT::getVectorVT(CI.getType()->getContext(), VT.getScalarType(),
- EC * NumVecs);
- Info.ptrVal = CI.getArgOperand(CI.arg_size() - 1);
- Info.offset = 0;
- Info.align.reset();
- Info.flags = MachineMemOperand::MOStore;
- return true;
- }
- /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
- /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
- /// specified in the intrinsic calls.
- bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
- const CallInst &I,
- MachineFunction &MF,
- unsigned Intrinsic) const {
- auto &DL = I.getModule()->getDataLayout();
- switch (Intrinsic) {
- case Intrinsic::aarch64_sve_st2:
- return setInfoSVEStN<2>(*this, DL, Info, I);
- case Intrinsic::aarch64_sve_st3:
- return setInfoSVEStN<3>(*this, DL, Info, I);
- case Intrinsic::aarch64_sve_st4:
- return setInfoSVEStN<4>(*this, DL, Info, I);
- case Intrinsic::aarch64_neon_ld2:
- case Intrinsic::aarch64_neon_ld3:
- case Intrinsic::aarch64_neon_ld4:
- case Intrinsic::aarch64_neon_ld1x2:
- case Intrinsic::aarch64_neon_ld1x3:
- case Intrinsic::aarch64_neon_ld1x4:
- case Intrinsic::aarch64_neon_ld2lane:
- case Intrinsic::aarch64_neon_ld3lane:
- case Intrinsic::aarch64_neon_ld4lane:
- case Intrinsic::aarch64_neon_ld2r:
- case Intrinsic::aarch64_neon_ld3r:
- case Intrinsic::aarch64_neon_ld4r: {
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- // Conservatively set memVT to the entire set of vectors loaded.
- uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
- Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
- Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
- Info.offset = 0;
- Info.align.reset();
- // volatile loads with NEON intrinsics not supported
- Info.flags = MachineMemOperand::MOLoad;
- return true;
- }
- case Intrinsic::aarch64_neon_st2:
- case Intrinsic::aarch64_neon_st3:
- case Intrinsic::aarch64_neon_st4:
- case Intrinsic::aarch64_neon_st1x2:
- case Intrinsic::aarch64_neon_st1x3:
- case Intrinsic::aarch64_neon_st1x4:
- case Intrinsic::aarch64_neon_st2lane:
- case Intrinsic::aarch64_neon_st3lane:
- case Intrinsic::aarch64_neon_st4lane: {
- Info.opc = ISD::INTRINSIC_VOID;
- // Conservatively set memVT to the entire set of vectors stored.
- unsigned NumElts = 0;
- for (const Value *Arg : I.args()) {
- Type *ArgTy = Arg->getType();
- if (!ArgTy->isVectorTy())
- break;
- NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
- }
- Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
- Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
- Info.offset = 0;
- Info.align.reset();
- // volatile stores with NEON intrinsics not supported
- Info.flags = MachineMemOperand::MOStore;
- return true;
- }
- case Intrinsic::aarch64_ldaxr:
- case Intrinsic::aarch64_ldxr: {
- Type *ValTy = I.getParamElementType(0);
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(ValTy);
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.align = DL.getABITypeAlign(ValTy);
- Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
- return true;
- }
- case Intrinsic::aarch64_stlxr:
- case Intrinsic::aarch64_stxr: {
- Type *ValTy = I.getParamElementType(1);
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(ValTy);
- Info.ptrVal = I.getArgOperand(1);
- Info.offset = 0;
- Info.align = DL.getABITypeAlign(ValTy);
- Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
- return true;
- }
- case Intrinsic::aarch64_ldaxp:
- case Intrinsic::aarch64_ldxp:
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::i128;
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.align = Align(16);
- Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
- return true;
- case Intrinsic::aarch64_stlxp:
- case Intrinsic::aarch64_stxp:
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::i128;
- Info.ptrVal = I.getArgOperand(2);
- Info.offset = 0;
- Info.align = Align(16);
- Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
- return true;
- case Intrinsic::aarch64_sve_ldnt1: {
- Type *ElTy = cast<VectorType>(I.getType())->getElementType();
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(I.getType());
- Info.ptrVal = I.getArgOperand(1);
- Info.offset = 0;
- Info.align = DL.getABITypeAlign(ElTy);
- Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MONonTemporal;
- return true;
- }
- case Intrinsic::aarch64_sve_stnt1: {
- Type *ElTy =
- cast<VectorType>(I.getArgOperand(0)->getType())->getElementType();
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(I.getOperand(0)->getType());
- Info.ptrVal = I.getArgOperand(2);
- Info.offset = 0;
- Info.align = DL.getABITypeAlign(ElTy);
- Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MONonTemporal;
- return true;
- }
- case Intrinsic::aarch64_mops_memset_tag: {
- Value *Dst = I.getArgOperand(0);
- Value *Val = I.getArgOperand(1);
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(Val->getType());
- Info.ptrVal = Dst;
- Info.offset = 0;
- Info.align = I.getParamAlign(0).valueOrOne();
- Info.flags = MachineMemOperand::MOStore;
- // The size of the memory being operated on is unknown at this point
- Info.size = MemoryLocation::UnknownSize;
- return true;
- }
- default:
- break;
- }
- return false;
- }
- bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load,
- ISD::LoadExtType ExtTy,
- EVT NewVT) const {
- // TODO: This may be worth removing. Check regression tests for diffs.
- if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT))
- return false;
- // If we're reducing the load width in order to avoid having to use an extra
- // instruction to do extension then it's probably a good idea.
- if (ExtTy != ISD::NON_EXTLOAD)
- return true;
- // Don't reduce load width if it would prevent us from combining a shift into
- // the offset.
- MemSDNode *Mem = dyn_cast<MemSDNode>(Load);
- assert(Mem);
- const SDValue &Base = Mem->getBasePtr();
- if (Base.getOpcode() == ISD::ADD &&
- Base.getOperand(1).getOpcode() == ISD::SHL &&
- Base.getOperand(1).hasOneUse() &&
- Base.getOperand(1).getOperand(1).getOpcode() == ISD::Constant) {
- // It's unknown whether a scalable vector has a power-of-2 bitwidth.
- if (Mem->getMemoryVT().isScalableVector())
- return false;
- // The shift can be combined if it matches the size of the value being
- // loaded (and so reducing the width would make it not match).
- uint64_t ShiftAmount = Base.getOperand(1).getConstantOperandVal(1);
- uint64_t LoadBytes = Mem->getMemoryVT().getSizeInBits()/8;
- if (ShiftAmount == Log2_32(LoadBytes))
- return false;
- }
- // We have no reason to disallow reducing the load width, so allow it.
- return true;
- }
- // Truncations from 64-bit GPR to 32-bit GPR is free.
- bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
- if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
- return false;
- uint64_t NumBits1 = Ty1->getPrimitiveSizeInBits().getFixedValue();
- uint64_t NumBits2 = Ty2->getPrimitiveSizeInBits().getFixedValue();
- return NumBits1 > NumBits2;
- }
- bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
- if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger())
- return false;
- uint64_t NumBits1 = VT1.getFixedSizeInBits();
- uint64_t NumBits2 = VT2.getFixedSizeInBits();
- return NumBits1 > NumBits2;
- }
- /// Check if it is profitable to hoist instruction in then/else to if.
- /// Not profitable if I and it's user can form a FMA instruction
- /// because we prefer FMSUB/FMADD.
- bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const {
- if (I->getOpcode() != Instruction::FMul)
- return true;
- if (!I->hasOneUse())
- return true;
- Instruction *User = I->user_back();
- if (!(User->getOpcode() == Instruction::FSub ||
- User->getOpcode() == Instruction::FAdd))
- return true;
- const TargetOptions &Options = getTargetMachine().Options;
- const Function *F = I->getFunction();
- const DataLayout &DL = F->getParent()->getDataLayout();
- Type *Ty = User->getOperand(0)->getType();
- return !(isFMAFasterThanFMulAndFAdd(*F, Ty) &&
- isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
- (Options.AllowFPOpFusion == FPOpFusion::Fast ||
- Options.UnsafeFPMath));
- }
- // All 32-bit GPR operations implicitly zero the high-half of the corresponding
- // 64-bit GPR.
- bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
- if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
- return false;
- unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
- unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
- return NumBits1 == 32 && NumBits2 == 64;
- }
- bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
- if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger())
- return false;
- unsigned NumBits1 = VT1.getSizeInBits();
- unsigned NumBits2 = VT2.getSizeInBits();
- return NumBits1 == 32 && NumBits2 == 64;
- }
- bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
- EVT VT1 = Val.getValueType();
- if (isZExtFree(VT1, VT2)) {
- return true;
- }
- if (Val.getOpcode() != ISD::LOAD)
- return false;
- // 8-, 16-, and 32-bit integer loads all implicitly zero-extend.
- return (VT1.isSimple() && !VT1.isVector() && VT1.isInteger() &&
- VT2.isSimple() && !VT2.isVector() && VT2.isInteger() &&
- VT1.getSizeInBits() <= 32);
- }
- bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
- if (isa<FPExtInst>(Ext))
- return false;
- // Vector types are not free.
- if (Ext->getType()->isVectorTy())
- return false;
- for (const Use &U : Ext->uses()) {
- // The extension is free if we can fold it with a left shift in an
- // addressing mode or an arithmetic operation: add, sub, and cmp.
- // Is there a shift?
- const Instruction *Instr = cast<Instruction>(U.getUser());
- // Is this a constant shift?
- switch (Instr->getOpcode()) {
- case Instruction::Shl:
- if (!isa<ConstantInt>(Instr->getOperand(1)))
- return false;
- break;
- case Instruction::GetElementPtr: {
- gep_type_iterator GTI = gep_type_begin(Instr);
- auto &DL = Ext->getModule()->getDataLayout();
- std::advance(GTI, U.getOperandNo()-1);
- Type *IdxTy = GTI.getIndexedType();
- // This extension will end up with a shift because of the scaling factor.
- // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0.
- // Get the shift amount based on the scaling factor:
- // log2(sizeof(IdxTy)) - log2(8).
- uint64_t ShiftAmt =
- countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy).getFixedValue()) -
- 3;
- // Is the constant foldable in the shift of the addressing mode?
- // I.e., shift amount is between 1 and 4 inclusive.
- if (ShiftAmt == 0 || ShiftAmt > 4)
- return false;
- break;
- }
- case Instruction::Trunc:
- // Check if this is a noop.
- // trunc(sext ty1 to ty2) to ty1.
- if (Instr->getType() == Ext->getOperand(0)->getType())
- continue;
- [[fallthrough]];
- default:
- return false;
- }
- // At this point we can use the bfm family, so this extension is free
- // for that use.
- }
- return true;
- }
- static bool isSplatShuffle(Value *V) {
- if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V))
- return all_equal(Shuf->getShuffleMask());
- return false;
- }
- /// Check if both Op1 and Op2 are shufflevector extracts of either the lower
- /// or upper half of the vector elements.
- static bool areExtractShuffleVectors(Value *Op1, Value *Op2,
- bool AllowSplat = false) {
- auto areTypesHalfed = [](Value *FullV, Value *HalfV) {
- auto *FullTy = FullV->getType();
- auto *HalfTy = HalfV->getType();
- return FullTy->getPrimitiveSizeInBits().getFixedValue() ==
- 2 * HalfTy->getPrimitiveSizeInBits().getFixedValue();
- };
- auto extractHalf = [](Value *FullV, Value *HalfV) {
- auto *FullVT = cast<FixedVectorType>(FullV->getType());
- auto *HalfVT = cast<FixedVectorType>(HalfV->getType());
- return FullVT->getNumElements() == 2 * HalfVT->getNumElements();
- };
- ArrayRef<int> M1, M2;
- Value *S1Op1 = nullptr, *S2Op1 = nullptr;
- if (!match(Op1, m_Shuffle(m_Value(S1Op1), m_Undef(), m_Mask(M1))) ||
- !match(Op2, m_Shuffle(m_Value(S2Op1), m_Undef(), m_Mask(M2))))
- return false;
- // If we allow splats, set S1Op1/S2Op1 to nullptr for the relavant arg so that
- // it is not checked as an extract below.
- if (AllowSplat && isSplatShuffle(Op1))
- S1Op1 = nullptr;
- if (AllowSplat && isSplatShuffle(Op2))
- S2Op1 = nullptr;
- // Check that the operands are half as wide as the result and we extract
- // half of the elements of the input vectors.
- if ((S1Op1 && (!areTypesHalfed(S1Op1, Op1) || !extractHalf(S1Op1, Op1))) ||
- (S2Op1 && (!areTypesHalfed(S2Op1, Op2) || !extractHalf(S2Op1, Op2))))
- return false;
- // Check the mask extracts either the lower or upper half of vector
- // elements.
- int M1Start = 0;
- int M2Start = 0;
- int NumElements = cast<FixedVectorType>(Op1->getType())->getNumElements() * 2;
- if ((S1Op1 &&
- !ShuffleVectorInst::isExtractSubvectorMask(M1, NumElements, M1Start)) ||
- (S2Op1 &&
- !ShuffleVectorInst::isExtractSubvectorMask(M2, NumElements, M2Start)))
- return false;
- if ((M1Start != 0 && M1Start != (NumElements / 2)) ||
- (M2Start != 0 && M2Start != (NumElements / 2)))
- return false;
- if (S1Op1 && S2Op1 && M1Start != M2Start)
- return false;
- return true;
- }
- /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
- /// of the vector elements.
- static bool areExtractExts(Value *Ext1, Value *Ext2) {
- auto areExtDoubled = [](Instruction *Ext) {
- return Ext->getType()->getScalarSizeInBits() ==
- 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
- };
- if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
- !match(Ext2, m_ZExtOrSExt(m_Value())) ||
- !areExtDoubled(cast<Instruction>(Ext1)) ||
- !areExtDoubled(cast<Instruction>(Ext2)))
- return false;
- return true;
- }
- /// Check if Op could be used with vmull_high_p64 intrinsic.
- static bool isOperandOfVmullHighP64(Value *Op) {
- Value *VectorOperand = nullptr;
- ConstantInt *ElementIndex = nullptr;
- return match(Op, m_ExtractElt(m_Value(VectorOperand),
- m_ConstantInt(ElementIndex))) &&
- ElementIndex->getValue() == 1 &&
- isa<FixedVectorType>(VectorOperand->getType()) &&
- cast<FixedVectorType>(VectorOperand->getType())->getNumElements() == 2;
- }
- /// Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.
- static bool areOperandsOfVmullHighP64(Value *Op1, Value *Op2) {
- return isOperandOfVmullHighP64(Op1) && isOperandOfVmullHighP64(Op2);
- }
- /// Check if sinking \p I's operands to I's basic block is profitable, because
- /// the operands can be folded into a target instruction, e.g.
- /// shufflevectors extracts and/or sext/zext can be folded into (u,s)subl(2).
- bool AArch64TargetLowering::shouldSinkOperands(
- Instruction *I, SmallVectorImpl<Use *> &Ops) const {
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
- switch (II->getIntrinsicID()) {
- case Intrinsic::aarch64_neon_smull:
- case Intrinsic::aarch64_neon_umull:
- if (areExtractShuffleVectors(II->getOperand(0), II->getOperand(1),
- /*AllowSplat=*/true)) {
- Ops.push_back(&II->getOperandUse(0));
- Ops.push_back(&II->getOperandUse(1));
- return true;
- }
- [[fallthrough]];
- case Intrinsic::fma:
- if (isa<VectorType>(I->getType()) &&
- cast<VectorType>(I->getType())->getElementType()->isHalfTy() &&
- !Subtarget->hasFullFP16())
- return false;
- [[fallthrough]];
- case Intrinsic::aarch64_neon_sqdmull:
- case Intrinsic::aarch64_neon_sqdmulh:
- case Intrinsic::aarch64_neon_sqrdmulh:
- // Sink splats for index lane variants
- if (isSplatShuffle(II->getOperand(0)))
- Ops.push_back(&II->getOperandUse(0));
- if (isSplatShuffle(II->getOperand(1)))
- Ops.push_back(&II->getOperandUse(1));
- return !Ops.empty();
- case Intrinsic::aarch64_sve_ptest_first:
- case Intrinsic::aarch64_sve_ptest_last:
- if (auto *IIOp = dyn_cast<IntrinsicInst>(II->getOperand(0)))
- if (IIOp->getIntrinsicID() == Intrinsic::aarch64_sve_ptrue)
- Ops.push_back(&II->getOperandUse(0));
- return !Ops.empty();
- case Intrinsic::aarch64_sme_write_horiz:
- case Intrinsic::aarch64_sme_write_vert:
- case Intrinsic::aarch64_sme_writeq_horiz:
- case Intrinsic::aarch64_sme_writeq_vert: {
- auto *Idx = dyn_cast<Instruction>(II->getOperand(1));
- if (!Idx || Idx->getOpcode() != Instruction::Add)
- return false;
- Ops.push_back(&II->getOperandUse(1));
- return true;
- }
- case Intrinsic::aarch64_sme_read_horiz:
- case Intrinsic::aarch64_sme_read_vert:
- case Intrinsic::aarch64_sme_readq_horiz:
- case Intrinsic::aarch64_sme_readq_vert:
- case Intrinsic::aarch64_sme_ld1b_vert:
- case Intrinsic::aarch64_sme_ld1h_vert:
- case Intrinsic::aarch64_sme_ld1w_vert:
- case Intrinsic::aarch64_sme_ld1d_vert:
- case Intrinsic::aarch64_sme_ld1q_vert:
- case Intrinsic::aarch64_sme_st1b_vert:
- case Intrinsic::aarch64_sme_st1h_vert:
- case Intrinsic::aarch64_sme_st1w_vert:
- case Intrinsic::aarch64_sme_st1d_vert:
- case Intrinsic::aarch64_sme_st1q_vert:
- case Intrinsic::aarch64_sme_ld1b_horiz:
- case Intrinsic::aarch64_sme_ld1h_horiz:
- case Intrinsic::aarch64_sme_ld1w_horiz:
- case Intrinsic::aarch64_sme_ld1d_horiz:
- case Intrinsic::aarch64_sme_ld1q_horiz:
- case Intrinsic::aarch64_sme_st1b_horiz:
- case Intrinsic::aarch64_sme_st1h_horiz:
- case Intrinsic::aarch64_sme_st1w_horiz:
- case Intrinsic::aarch64_sme_st1d_horiz:
- case Intrinsic::aarch64_sme_st1q_horiz: {
- auto *Idx = dyn_cast<Instruction>(II->getOperand(3));
- if (!Idx || Idx->getOpcode() != Instruction::Add)
- return false;
- Ops.push_back(&II->getOperandUse(3));
- return true;
- }
- case Intrinsic::aarch64_neon_pmull:
- if (!areExtractShuffleVectors(II->getOperand(0), II->getOperand(1)))
- return false;
- Ops.push_back(&II->getOperandUse(0));
- Ops.push_back(&II->getOperandUse(1));
- return true;
- case Intrinsic::aarch64_neon_pmull64:
- if (!areOperandsOfVmullHighP64(II->getArgOperand(0),
- II->getArgOperand(1)))
- return false;
- Ops.push_back(&II->getArgOperandUse(0));
- Ops.push_back(&II->getArgOperandUse(1));
- return true;
- default:
- return false;
- }
- }
- if (!I->getType()->isVectorTy())
- return false;
- switch (I->getOpcode()) {
- case Instruction::Sub:
- case Instruction::Add: {
- if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
- return false;
- // If the exts' operands extract either the lower or upper elements, we
- // can sink them too.
- auto Ext1 = cast<Instruction>(I->getOperand(0));
- auto Ext2 = cast<Instruction>(I->getOperand(1));
- if (areExtractShuffleVectors(Ext1->getOperand(0), Ext2->getOperand(0))) {
- Ops.push_back(&Ext1->getOperandUse(0));
- Ops.push_back(&Ext2->getOperandUse(0));
- }
- Ops.push_back(&I->getOperandUse(0));
- Ops.push_back(&I->getOperandUse(1));
- return true;
- }
- case Instruction::Mul: {
- int NumZExts = 0, NumSExts = 0;
- for (auto &Op : I->operands()) {
- // Make sure we are not already sinking this operand
- if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
- continue;
- if (match(&Op, m_SExt(m_Value()))) {
- NumSExts++;
- continue;
- } else if (match(&Op, m_ZExt(m_Value()))) {
- NumZExts++;
- continue;
- }
- ShuffleVectorInst *Shuffle = dyn_cast<ShuffleVectorInst>(Op);
- // If the Shuffle is a splat and the operand is a zext/sext, sinking the
- // operand and the s/zext can help create indexed s/umull. This is
- // especially useful to prevent i64 mul being scalarized.
- if (Shuffle && isSplatShuffle(Shuffle) &&
- match(Shuffle->getOperand(0), m_ZExtOrSExt(m_Value()))) {
- Ops.push_back(&Shuffle->getOperandUse(0));
- Ops.push_back(&Op);
- if (match(Shuffle->getOperand(0), m_SExt(m_Value())))
- NumSExts++;
- else
- NumZExts++;
- continue;
- }
- if (!Shuffle)
- continue;
- Value *ShuffleOperand = Shuffle->getOperand(0);
- InsertElementInst *Insert = dyn_cast<InsertElementInst>(ShuffleOperand);
- if (!Insert)
- continue;
- Instruction *OperandInstr = dyn_cast<Instruction>(Insert->getOperand(1));
- if (!OperandInstr)
- continue;
- ConstantInt *ElementConstant =
- dyn_cast<ConstantInt>(Insert->getOperand(2));
- // Check that the insertelement is inserting into element 0
- if (!ElementConstant || ElementConstant->getZExtValue() != 0)
- continue;
- unsigned Opcode = OperandInstr->getOpcode();
- if (Opcode == Instruction::SExt)
- NumSExts++;
- else if (Opcode == Instruction::ZExt)
- NumZExts++;
- else {
- // If we find that the top bits are known 0, then we can sink and allow
- // the backend to generate a umull.
- unsigned Bitwidth = I->getType()->getScalarSizeInBits();
- APInt UpperMask = APInt::getHighBitsSet(Bitwidth, Bitwidth / 2);
- const DataLayout &DL = I->getFunction()->getParent()->getDataLayout();
- if (!MaskedValueIsZero(OperandInstr, UpperMask, DL))
- continue;
- NumZExts++;
- }
- Ops.push_back(&Shuffle->getOperandUse(0));
- Ops.push_back(&Op);
- }
- // Is it profitable to sink if we found two of the same type of extends.
- return !Ops.empty() && (NumSExts == 2 || NumZExts == 2);
- }
- default:
- return false;
- }
- return false;
- }
- static void createTblShuffleForZExt(ZExtInst *ZExt, bool IsLittleEndian) {
- Value *Op = ZExt->getOperand(0);
- auto *SrcTy = cast<FixedVectorType>(Op->getType());
- auto *DstTy = cast<FixedVectorType>(ZExt->getType());
- auto SrcWidth = cast<IntegerType>(SrcTy->getElementType())->getBitWidth();
- auto DstWidth = cast<IntegerType>(DstTy->getElementType())->getBitWidth();
- assert(DstWidth % SrcWidth == 0 &&
- "TBL lowering is not supported for a ZExt instruction with this "
- "source & destination element type.");
- unsigned ZExtFactor = DstWidth / SrcWidth;
- unsigned NumElts = SrcTy->getNumElements();
- IRBuilder<> Builder(ZExt);
- SmallVector<int> Mask;
- // Create a mask that selects <0,...,Op[i]> for each lane of the destination
- // vector to replace the original ZExt. This can later be lowered to a set of
- // tbl instructions.
- for (unsigned i = 0; i < NumElts * ZExtFactor; i++) {
- if (IsLittleEndian) {
- if (i % ZExtFactor == 0)
- Mask.push_back(i / ZExtFactor);
- else
- Mask.push_back(NumElts);
- } else {
- if ((i + 1) % ZExtFactor == 0)
- Mask.push_back((i - ZExtFactor + 1) / ZExtFactor);
- else
- Mask.push_back(NumElts);
- }
- }
- auto *FirstEltZero = Builder.CreateInsertElement(
- PoisonValue::get(SrcTy), Builder.getInt8(0), uint64_t(0));
- Value *Result = Builder.CreateShuffleVector(Op, FirstEltZero, Mask);
- Result = Builder.CreateBitCast(Result, DstTy);
- ZExt->replaceAllUsesWith(Result);
- ZExt->eraseFromParent();
- }
- static void createTblForTrunc(TruncInst *TI, bool IsLittleEndian) {
- IRBuilder<> Builder(TI);
- SmallVector<Value *> Parts;
- int NumElements = cast<FixedVectorType>(TI->getType())->getNumElements();
- auto *SrcTy = cast<FixedVectorType>(TI->getOperand(0)->getType());
- auto *DstTy = cast<FixedVectorType>(TI->getType());
- assert(SrcTy->getElementType()->isIntegerTy() &&
- "Non-integer type source vector element is not supported");
- assert(DstTy->getElementType()->isIntegerTy(8) &&
- "Unsupported destination vector element type");
- unsigned SrcElemTySz =
- cast<IntegerType>(SrcTy->getElementType())->getBitWidth();
- unsigned DstElemTySz =
- cast<IntegerType>(DstTy->getElementType())->getBitWidth();
- assert((SrcElemTySz % DstElemTySz == 0) &&
- "Cannot lower truncate to tbl instructions for a source element size "
- "that is not divisible by the destination element size");
- unsigned TruncFactor = SrcElemTySz / DstElemTySz;
- assert((SrcElemTySz == 16 || SrcElemTySz == 32 || SrcElemTySz == 64) &&
- "Unsupported source vector element type size");
- Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), 16);
- // Create a mask to choose every nth byte from the source vector table of
- // bytes to create the truncated destination vector, where 'n' is the truncate
- // ratio. For example, for a truncate from Yxi64 to Yxi8, choose
- // 0,8,16,..Y*8th bytes for the little-endian format
- SmallVector<Constant *, 16> MaskConst;
- for (int Itr = 0; Itr < 16; Itr++) {
- if (Itr < NumElements)
- MaskConst.push_back(Builder.getInt8(
- IsLittleEndian ? Itr * TruncFactor
- : Itr * TruncFactor + (TruncFactor - 1)));
- else
- MaskConst.push_back(Builder.getInt8(255));
- }
- int MaxTblSz = 128 * 4;
- int MaxSrcSz = SrcElemTySz * NumElements;
- int ElemsPerTbl =
- (MaxTblSz > MaxSrcSz) ? NumElements : (MaxTblSz / SrcElemTySz);
- assert(ElemsPerTbl <= 16 &&
- "Maximum elements selected using TBL instruction cannot exceed 16!");
- int ShuffleCount = 128 / SrcElemTySz;
- SmallVector<int> ShuffleLanes;
- for (int i = 0; i < ShuffleCount; ++i)
- ShuffleLanes.push_back(i);
- // Create TBL's table of bytes in 1,2,3 or 4 FP/SIMD registers using shuffles
- // over the source vector. If TBL's maximum 4 FP/SIMD registers are saturated,
- // call TBL & save the result in a vector of TBL results for combining later.
- SmallVector<Value *> Results;
- while (ShuffleLanes.back() < NumElements) {
- Parts.push_back(Builder.CreateBitCast(
- Builder.CreateShuffleVector(TI->getOperand(0), ShuffleLanes), VecTy));
- if (Parts.size() == 4) {
- auto *F = Intrinsic::getDeclaration(TI->getModule(),
- Intrinsic::aarch64_neon_tbl4, VecTy);
- Parts.push_back(ConstantVector::get(MaskConst));
- Results.push_back(Builder.CreateCall(F, Parts));
- Parts.clear();
- }
- for (int i = 0; i < ShuffleCount; ++i)
- ShuffleLanes[i] += ShuffleCount;
- }
- assert((Parts.empty() || Results.empty()) &&
- "Lowering trunc for vectors requiring different TBL instructions is "
- "not supported!");
- // Call TBL for the residual table bytes present in 1,2, or 3 FP/SIMD
- // registers
- if (!Parts.empty()) {
- Intrinsic::ID TblID;
- switch (Parts.size()) {
- case 1:
- TblID = Intrinsic::aarch64_neon_tbl1;
- break;
- case 2:
- TblID = Intrinsic::aarch64_neon_tbl2;
- break;
- case 3:
- TblID = Intrinsic::aarch64_neon_tbl3;
- break;
- }
- auto *F = Intrinsic::getDeclaration(TI->getModule(), TblID, VecTy);
- Parts.push_back(ConstantVector::get(MaskConst));
- Results.push_back(Builder.CreateCall(F, Parts));
- }
- // Extract the destination vector from TBL result(s) after combining them
- // where applicable. Currently, at most two TBLs are supported.
- assert(Results.size() <= 2 && "Trunc lowering does not support generation of "
- "more than 2 tbl instructions!");
- Value *FinalResult = Results[0];
- if (Results.size() == 1) {
- if (ElemsPerTbl < 16) {
- SmallVector<int> FinalMask(ElemsPerTbl);
- std::iota(FinalMask.begin(), FinalMask.end(), 0);
- FinalResult = Builder.CreateShuffleVector(Results[0], FinalMask);
- }
- } else {
- SmallVector<int> FinalMask(ElemsPerTbl * Results.size());
- if (ElemsPerTbl < 16) {
- std::iota(FinalMask.begin(), FinalMask.begin() + ElemsPerTbl, 0);
- std::iota(FinalMask.begin() + ElemsPerTbl, FinalMask.end(), 16);
- } else {
- std::iota(FinalMask.begin(), FinalMask.end(), 0);
- }
- FinalResult =
- Builder.CreateShuffleVector(Results[0], Results[1], FinalMask);
- }
- TI->replaceAllUsesWith(FinalResult);
- TI->eraseFromParent();
- }
- bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(Instruction *I,
- Loop *L) const {
- // shuffle_vector instructions are serialized when targeting SVE,
- // see LowerSPLAT_VECTOR. This peephole is not beneficial.
- if (Subtarget->useSVEForFixedLengthVectors())
- return false;
- // Try to optimize conversions using tbl. This requires materializing constant
- // index vectors, which can increase code size and add loads. Skip the
- // transform unless the conversion is in a loop block guaranteed to execute
- // and we are not optimizing for size.
- Function *F = I->getParent()->getParent();
- if (!L || L->getHeader() != I->getParent() || F->hasMinSize() ||
- F->hasOptSize())
- return false;
- auto *SrcTy = dyn_cast<FixedVectorType>(I->getOperand(0)->getType());
- auto *DstTy = dyn_cast<FixedVectorType>(I->getType());
- if (!SrcTy || !DstTy)
- return false;
- // Convert 'zext <Y x i8> %x to <Y x i8X>' to a shuffle that can be
- // lowered to tbl instructions to insert the original i8 elements
- // into i8x lanes. This is enabled for cases where it is beneficial.
- auto *ZExt = dyn_cast<ZExtInst>(I);
- if (ZExt && SrcTy->getElementType()->isIntegerTy(8)) {
- auto DstWidth = cast<IntegerType>(DstTy->getElementType())->getBitWidth();
- if (DstWidth % 8 == 0 && DstWidth > 16 && DstWidth < 64) {
- createTblShuffleForZExt(ZExt, Subtarget->isLittleEndian());
- return true;
- }
- }
- auto *UIToFP = dyn_cast<UIToFPInst>(I);
- if (UIToFP && SrcTy->getElementType()->isIntegerTy(8) &&
- DstTy->getElementType()->isFloatTy()) {
- IRBuilder<> Builder(I);
- auto *ZExt = cast<ZExtInst>(
- Builder.CreateZExt(I->getOperand(0), VectorType::getInteger(DstTy)));
- auto *UI = Builder.CreateUIToFP(ZExt, DstTy);
- I->replaceAllUsesWith(UI);
- I->eraseFromParent();
- createTblShuffleForZExt(ZExt, Subtarget->isLittleEndian());
- return true;
- }
- // Convert 'fptoui <(8|16) x float> to <(8|16) x i8>' to a wide fptoui
- // followed by a truncate lowered to using tbl.4.
- auto *FPToUI = dyn_cast<FPToUIInst>(I);
- if (FPToUI &&
- (SrcTy->getNumElements() == 8 || SrcTy->getNumElements() == 16) &&
- SrcTy->getElementType()->isFloatTy() &&
- DstTy->getElementType()->isIntegerTy(8)) {
- IRBuilder<> Builder(I);
- auto *WideConv = Builder.CreateFPToUI(FPToUI->getOperand(0),
- VectorType::getInteger(SrcTy));
- auto *TruncI = Builder.CreateTrunc(WideConv, DstTy);
- I->replaceAllUsesWith(TruncI);
- I->eraseFromParent();
- createTblForTrunc(cast<TruncInst>(TruncI), Subtarget->isLittleEndian());
- return true;
- }
- // Convert 'trunc <(8|16) x (i32|i64)> %x to <(8|16) x i8>' to an appropriate
- // tbl instruction selecting the lowest/highest (little/big endian) 8 bits
- // per lane of the input that is represented using 1,2,3 or 4 128-bit table
- // registers
- auto *TI = dyn_cast<TruncInst>(I);
- if (TI && DstTy->getElementType()->isIntegerTy(8) &&
- ((SrcTy->getElementType()->isIntegerTy(32) ||
- SrcTy->getElementType()->isIntegerTy(64)) &&
- (SrcTy->getNumElements() == 16 || SrcTy->getNumElements() == 8))) {
- createTblForTrunc(TI, Subtarget->isLittleEndian());
- return true;
- }
- return false;
- }
- bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
- Align &RequiredAligment) const {
- if (!LoadedType.isSimple() ||
- (!LoadedType.isInteger() && !LoadedType.isFloatingPoint()))
- return false;
- // Cyclone supports unaligned accesses.
- RequiredAligment = Align(1);
- unsigned NumBits = LoadedType.getSizeInBits();
- return NumBits == 32 || NumBits == 64;
- }
- /// A helper function for determining the number of interleaved accesses we
- /// will generate when lowering accesses of the given type.
- unsigned AArch64TargetLowering::getNumInterleavedAccesses(
- VectorType *VecTy, const DataLayout &DL, bool UseScalable) const {
- unsigned VecSize = 128;
- if (UseScalable)
- VecSize = std::max(Subtarget->getMinSVEVectorSizeInBits(), 128u);
- return std::max<unsigned>(1, (DL.getTypeSizeInBits(VecTy) + 127) / VecSize);
- }
- MachineMemOperand::Flags
- AArch64TargetLowering::getTargetMMOFlags(const Instruction &I) const {
- if (Subtarget->getProcFamily() == AArch64Subtarget::Falkor &&
- I.getMetadata(FALKOR_STRIDED_ACCESS_MD) != nullptr)
- return MOStridedAccess;
- return MachineMemOperand::MONone;
- }
- bool AArch64TargetLowering::isLegalInterleavedAccessType(
- VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const {
- unsigned VecSize = DL.getTypeSizeInBits(VecTy);
- unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
- unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
- UseScalable = false;
- // Ensure that the predicate for this number of elements is available.
- if (Subtarget->hasSVE() && !getSVEPredPatternFromNumElements(NumElements))
- return false;
- // Ensure the number of vector elements is greater than 1.
- if (NumElements < 2)
- return false;
- // Ensure the element type is legal.
- if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64)
- return false;
- if (Subtarget->forceStreamingCompatibleSVE() ||
- (Subtarget->useSVEForFixedLengthVectors() &&
- (VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 ||
- (VecSize < Subtarget->getMinSVEVectorSizeInBits() &&
- isPowerOf2_32(NumElements) && VecSize > 128)))) {
- UseScalable = true;
- return true;
- }
- // Ensure the total vector size is 64 or a multiple of 128. Types larger than
- // 128 will be split into multiple interleaved accesses.
- return VecSize == 64 || VecSize % 128 == 0;
- }
- static ScalableVectorType *getSVEContainerIRType(FixedVectorType *VTy) {
- if (VTy->getElementType() == Type::getDoubleTy(VTy->getContext()))
- return ScalableVectorType::get(VTy->getElementType(), 2);
- if (VTy->getElementType() == Type::getFloatTy(VTy->getContext()))
- return ScalableVectorType::get(VTy->getElementType(), 4);
- if (VTy->getElementType() == Type::getBFloatTy(VTy->getContext()))
- return ScalableVectorType::get(VTy->getElementType(), 8);
- if (VTy->getElementType() == Type::getHalfTy(VTy->getContext()))
- return ScalableVectorType::get(VTy->getElementType(), 8);
- if (VTy->getElementType() == Type::getInt64Ty(VTy->getContext()))
- return ScalableVectorType::get(VTy->getElementType(), 2);
- if (VTy->getElementType() == Type::getInt32Ty(VTy->getContext()))
- return ScalableVectorType::get(VTy->getElementType(), 4);
- if (VTy->getElementType() == Type::getInt16Ty(VTy->getContext()))
- return ScalableVectorType::get(VTy->getElementType(), 8);
- if (VTy->getElementType() == Type::getInt8Ty(VTy->getContext()))
- return ScalableVectorType::get(VTy->getElementType(), 16);
- llvm_unreachable("Cannot handle input vector type");
- }
- /// Lower an interleaved load into a ldN intrinsic.
- ///
- /// E.g. Lower an interleaved load (Factor = 2):
- /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
- /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements
- /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements
- ///
- /// Into:
- /// %ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr)
- /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
- /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
- bool AArch64TargetLowering::lowerInterleavedLoad(
- LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
- ArrayRef<unsigned> Indices, unsigned Factor) const {
- assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
- "Invalid interleave factor");
- assert(!Shuffles.empty() && "Empty shufflevector input");
- assert(Shuffles.size() == Indices.size() &&
- "Unmatched number of shufflevectors and indices");
- const DataLayout &DL = LI->getModule()->getDataLayout();
- VectorType *VTy = Shuffles[0]->getType();
- // Skip if we do not have NEON and skip illegal vector types. We can
- // "legalize" wide vector types into multiple interleaved accesses as long as
- // the vector types are divisible by 128.
- bool UseScalable;
- if (!Subtarget->hasNEON() ||
- !isLegalInterleavedAccessType(VTy, DL, UseScalable))
- return false;
- unsigned NumLoads = getNumInterleavedAccesses(VTy, DL, UseScalable);
- auto *FVTy = cast<FixedVectorType>(VTy);
- // A pointer vector can not be the return type of the ldN intrinsics. Need to
- // load integer vectors first and then convert to pointer vectors.
- Type *EltTy = FVTy->getElementType();
- if (EltTy->isPointerTy())
- FVTy =
- FixedVectorType::get(DL.getIntPtrType(EltTy), FVTy->getNumElements());
- // If we're going to generate more than one load, reset the sub-vector type
- // to something legal.
- FVTy = FixedVectorType::get(FVTy->getElementType(),
- FVTy->getNumElements() / NumLoads);
- auto *LDVTy =
- UseScalable ? cast<VectorType>(getSVEContainerIRType(FVTy)) : FVTy;
- IRBuilder<> Builder(LI);
- // The base address of the load.
- Value *BaseAddr = LI->getPointerOperand();
- if (NumLoads > 1) {
- // We will compute the pointer operand of each load from the original base
- // address using GEPs. Cast the base address to a pointer to the scalar
- // element type.
- BaseAddr = Builder.CreateBitCast(
- BaseAddr,
- LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()));
- }
- Type *PtrTy =
- UseScalable
- ? LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())
- : LDVTy->getPointerTo(LI->getPointerAddressSpace());
- Type *PredTy = VectorType::get(Type::getInt1Ty(LDVTy->getContext()),
- LDVTy->getElementCount());
- static const Intrinsic::ID SVELoadIntrs[3] = {
- Intrinsic::aarch64_sve_ld2_sret, Intrinsic::aarch64_sve_ld3_sret,
- Intrinsic::aarch64_sve_ld4_sret};
- static const Intrinsic::ID NEONLoadIntrs[3] = {Intrinsic::aarch64_neon_ld2,
- Intrinsic::aarch64_neon_ld3,
- Intrinsic::aarch64_neon_ld4};
- Function *LdNFunc;
- if (UseScalable)
- LdNFunc = Intrinsic::getDeclaration(LI->getModule(),
- SVELoadIntrs[Factor - 2], {LDVTy});
- else
- LdNFunc = Intrinsic::getDeclaration(
- LI->getModule(), NEONLoadIntrs[Factor - 2], {LDVTy, PtrTy});
- // Holds sub-vectors extracted from the load intrinsic return values. The
- // sub-vectors are associated with the shufflevector instructions they will
- // replace.
- DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
- Value *PTrue = nullptr;
- if (UseScalable) {
- std::optional<unsigned> PgPattern =
- getSVEPredPatternFromNumElements(FVTy->getNumElements());
- if (Subtarget->getMinSVEVectorSizeInBits() ==
- Subtarget->getMaxSVEVectorSizeInBits() &&
- Subtarget->getMinSVEVectorSizeInBits() == DL.getTypeSizeInBits(FVTy))
- PgPattern = AArch64SVEPredPattern::all;
- auto *PTruePat =
- ConstantInt::get(Type::getInt32Ty(LDVTy->getContext()), *PgPattern);
- PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy},
- {PTruePat});
- }
- for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
- // If we're generating more than one load, compute the base address of
- // subsequent loads as an offset from the previous.
- if (LoadCount > 0)
- BaseAddr = Builder.CreateConstGEP1_32(LDVTy->getElementType(), BaseAddr,
- FVTy->getNumElements() * Factor);
- CallInst *LdN;
- if (UseScalable)
- LdN = Builder.CreateCall(
- LdNFunc, {PTrue, Builder.CreateBitCast(BaseAddr, PtrTy)}, "ldN");
- else
- LdN = Builder.CreateCall(LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy),
- "ldN");
- // Extract and store the sub-vectors returned by the load intrinsic.
- for (unsigned i = 0; i < Shuffles.size(); i++) {
- ShuffleVectorInst *SVI = Shuffles[i];
- unsigned Index = Indices[i];
- Value *SubVec = Builder.CreateExtractValue(LdN, Index);
- if (UseScalable)
- SubVec = Builder.CreateExtractVector(
- FVTy, SubVec,
- ConstantInt::get(Type::getInt64Ty(VTy->getContext()), 0));
- // Convert the integer vector to pointer vector if the element is pointer.
- if (EltTy->isPointerTy())
- SubVec = Builder.CreateIntToPtr(
- SubVec, FixedVectorType::get(SVI->getType()->getElementType(),
- FVTy->getNumElements()));
- SubVecs[SVI].push_back(SubVec);
- }
- }
- // Replace uses of the shufflevector instructions with the sub-vectors
- // returned by the load intrinsic. If a shufflevector instruction is
- // associated with more than one sub-vector, those sub-vectors will be
- // concatenated into a single wide vector.
- for (ShuffleVectorInst *SVI : Shuffles) {
- auto &SubVec = SubVecs[SVI];
- auto *WideVec =
- SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
- SVI->replaceAllUsesWith(WideVec);
- }
- return true;
- }
- /// Lower an interleaved store into a stN intrinsic.
- ///
- /// E.g. Lower an interleaved store (Factor = 3):
- /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
- /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
- /// store <12 x i32> %i.vec, <12 x i32>* %ptr
- ///
- /// Into:
- /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
- /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
- /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
- /// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
- ///
- /// Note that the new shufflevectors will be removed and we'll only generate one
- /// st3 instruction in CodeGen.
- ///
- /// Example for a more general valid mask (Factor 3). Lower:
- /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
- /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
- /// store <12 x i32> %i.vec, <12 x i32>* %ptr
- ///
- /// Into:
- /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
- /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
- /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
- /// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
- bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
- ShuffleVectorInst *SVI,
- unsigned Factor) const {
- // Skip if streaming compatible SVE is enabled, because it generates invalid
- // code in streaming mode when SVE length is not specified.
- if (Subtarget->forceStreamingCompatibleSVE())
- return false;
- assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
- "Invalid interleave factor");
- auto *VecTy = cast<FixedVectorType>(SVI->getType());
- assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
- unsigned LaneLen = VecTy->getNumElements() / Factor;
- Type *EltTy = VecTy->getElementType();
- auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen);
- const DataLayout &DL = SI->getModule()->getDataLayout();
- bool UseScalable;
- // Skip if we do not have NEON and skip illegal vector types. We can
- // "legalize" wide vector types into multiple interleaved accesses as long as
- // the vector types are divisible by 128.
- if (!Subtarget->hasNEON() ||
- !isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
- return false;
- unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL, UseScalable);
- Value *Op0 = SVI->getOperand(0);
- Value *Op1 = SVI->getOperand(1);
- IRBuilder<> Builder(SI);
- // StN intrinsics don't support pointer vectors as arguments. Convert pointer
- // vectors to integer vectors.
- if (EltTy->isPointerTy()) {
- Type *IntTy = DL.getIntPtrType(EltTy);
- unsigned NumOpElts =
- cast<FixedVectorType>(Op0->getType())->getNumElements();
- // Convert to the corresponding integer vector.
- auto *IntVecTy = FixedVectorType::get(IntTy, NumOpElts);
- Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
- Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
- SubVecTy = FixedVectorType::get(IntTy, LaneLen);
- }
- // If we're going to generate more than one store, reset the lane length
- // and sub-vector type to something legal.
- LaneLen /= NumStores;
- SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen);
- auto *STVTy = UseScalable ? cast<VectorType>(getSVEContainerIRType(SubVecTy))
- : SubVecTy;
- // The base address of the store.
- Value *BaseAddr = SI->getPointerOperand();
- if (NumStores > 1) {
- // We will compute the pointer operand of each store from the original base
- // address using GEPs. Cast the base address to a pointer to the scalar
- // element type.
- BaseAddr = Builder.CreateBitCast(
- BaseAddr,
- SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace()));
- }
- auto Mask = SVI->getShuffleMask();
- // Sanity check if all the indices are NOT in range.
- // If mask is `undef` or `poison`, `Mask` may be a vector of -1s.
- // If all of them are `undef`, OOB read will happen later.
- if (llvm::all_of(Mask, [](int Idx) { return Idx == UndefMaskElem; })) {
- return false;
- }
- Type *PtrTy =
- UseScalable
- ? STVTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())
- : STVTy->getPointerTo(SI->getPointerAddressSpace());
- Type *PredTy = VectorType::get(Type::getInt1Ty(STVTy->getContext()),
- STVTy->getElementCount());
- static const Intrinsic::ID SVEStoreIntrs[3] = {Intrinsic::aarch64_sve_st2,
- Intrinsic::aarch64_sve_st3,
- Intrinsic::aarch64_sve_st4};
- static const Intrinsic::ID NEONStoreIntrs[3] = {Intrinsic::aarch64_neon_st2,
- Intrinsic::aarch64_neon_st3,
- Intrinsic::aarch64_neon_st4};
- Function *StNFunc;
- if (UseScalable)
- StNFunc = Intrinsic::getDeclaration(SI->getModule(),
- SVEStoreIntrs[Factor - 2], {STVTy});
- else
- StNFunc = Intrinsic::getDeclaration(
- SI->getModule(), NEONStoreIntrs[Factor - 2], {STVTy, PtrTy});
- Value *PTrue = nullptr;
- if (UseScalable) {
- std::optional<unsigned> PgPattern =
- getSVEPredPatternFromNumElements(SubVecTy->getNumElements());
- if (Subtarget->getMinSVEVectorSizeInBits() ==
- Subtarget->getMaxSVEVectorSizeInBits() &&
- Subtarget->getMinSVEVectorSizeInBits() ==
- DL.getTypeSizeInBits(SubVecTy))
- PgPattern = AArch64SVEPredPattern::all;
- auto *PTruePat =
- ConstantInt::get(Type::getInt32Ty(STVTy->getContext()), *PgPattern);
- PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy},
- {PTruePat});
- }
- for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
- SmallVector<Value *, 5> Ops;
- // Split the shufflevector operands into sub vectors for the new stN call.
- for (unsigned i = 0; i < Factor; i++) {
- Value *Shuffle;
- unsigned IdxI = StoreCount * LaneLen * Factor + i;
- if (Mask[IdxI] >= 0) {
- Shuffle = Builder.CreateShuffleVector(
- Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0));
- } else {
- unsigned StartMask = 0;
- for (unsigned j = 1; j < LaneLen; j++) {
- unsigned IdxJ = StoreCount * LaneLen * Factor + j * Factor + i;
- if (Mask[IdxJ] >= 0) {
- StartMask = Mask[IdxJ] - j;
- break;
- }
- }
- // Note: Filling undef gaps with random elements is ok, since
- // those elements were being written anyway (with undefs).
- // In the case of all undefs we're defaulting to using elems from 0
- // Note: StartMask cannot be negative, it's checked in
- // isReInterleaveMask
- Shuffle = Builder.CreateShuffleVector(
- Op0, Op1, createSequentialMask(StartMask, LaneLen, 0));
- }
- if (UseScalable)
- Shuffle = Builder.CreateInsertVector(
- STVTy, UndefValue::get(STVTy), Shuffle,
- ConstantInt::get(Type::getInt64Ty(STVTy->getContext()), 0));
- Ops.push_back(Shuffle);
- }
- if (UseScalable)
- Ops.push_back(PTrue);
- // If we generating more than one store, we compute the base address of
- // subsequent stores as an offset from the previous.
- if (StoreCount > 0)
- BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(),
- BaseAddr, LaneLen * Factor);
- Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy));
- Builder.CreateCall(StNFunc, Ops);
- }
- return true;
- }
- EVT AArch64TargetLowering::getOptimalMemOpType(
- const MemOp &Op, const AttributeList &FuncAttributes) const {
- bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
- bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
- bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
- // Only use AdvSIMD to implement memset of 32-byte and above. It would have
- // taken one instruction to materialize the v2i64 zero and one store (with
- // restrictive addressing mode). Just do i64 stores.
- bool IsSmallMemset = Op.isMemset() && Op.size() < 32;
- auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) {
- if (Op.isAligned(AlignCheck))
- return true;
- unsigned Fast;
- return allowsMisalignedMemoryAccesses(VT, 0, Align(1),
- MachineMemOperand::MONone, &Fast) &&
- Fast;
- };
- if (CanUseNEON && Op.isMemset() && !IsSmallMemset &&
- AlignmentIsAcceptable(MVT::v16i8, Align(16)))
- return MVT::v16i8;
- if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
- return MVT::f128;
- if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
- return MVT::i64;
- if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4)))
- return MVT::i32;
- return MVT::Other;
- }
- LLT AArch64TargetLowering::getOptimalMemOpLLT(
- const MemOp &Op, const AttributeList &FuncAttributes) const {
- bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
- bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
- bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
- // Only use AdvSIMD to implement memset of 32-byte and above. It would have
- // taken one instruction to materialize the v2i64 zero and one store (with
- // restrictive addressing mode). Just do i64 stores.
- bool IsSmallMemset = Op.isMemset() && Op.size() < 32;
- auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) {
- if (Op.isAligned(AlignCheck))
- return true;
- unsigned Fast;
- return allowsMisalignedMemoryAccesses(VT, 0, Align(1),
- MachineMemOperand::MONone, &Fast) &&
- Fast;
- };
- if (CanUseNEON && Op.isMemset() && !IsSmallMemset &&
- AlignmentIsAcceptable(MVT::v2i64, Align(16)))
- return LLT::fixed_vector(2, 64);
- if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
- return LLT::scalar(128);
- if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
- return LLT::scalar(64);
- if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4)))
- return LLT::scalar(32);
- return LLT();
- }
- // 12-bit optionally shifted immediates are legal for adds.
- bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const {
- if (Immed == std::numeric_limits<int64_t>::min()) {
- LLVM_DEBUG(dbgs() << "Illegal add imm " << Immed
- << ": avoid UB for INT64_MIN\n");
- return false;
- }
- // Same encoding for add/sub, just flip the sign.
- Immed = std::abs(Immed);
- bool IsLegal = ((Immed >> 12) == 0 ||
- ((Immed & 0xfff) == 0 && Immed >> 24 == 0));
- LLVM_DEBUG(dbgs() << "Is " << Immed
- << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n");
- return IsLegal;
- }
- // Return false to prevent folding
- // (mul (add x, c1), c2) -> (add (mul x, c2), c2*c1) in DAGCombine,
- // if the folding leads to worse code.
- bool AArch64TargetLowering::isMulAddWithConstProfitable(
- SDValue AddNode, SDValue ConstNode) const {
- // Let the DAGCombiner decide for vector types and large types.
- const EVT VT = AddNode.getValueType();
- if (VT.isVector() || VT.getScalarSizeInBits() > 64)
- return true;
- // It is worse if c1 is legal add immediate, while c1*c2 is not
- // and has to be composed by at least two instructions.
- const ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
- const ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
- const int64_t C1 = C1Node->getSExtValue();
- const APInt C1C2 = C1Node->getAPIntValue() * C2Node->getAPIntValue();
- if (!isLegalAddImmediate(C1) || isLegalAddImmediate(C1C2.getSExtValue()))
- return true;
- SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
- AArch64_IMM::expandMOVImm(C1C2.getZExtValue(), VT.getSizeInBits(), Insn);
- if (Insn.size() > 1)
- return false;
- // Default to true and let the DAGCombiner decide.
- return true;
- }
- // Integer comparisons are implemented with ADDS/SUBS, so the range of valid
- // immediates is the same as for an add or a sub.
- bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const {
- return isLegalAddImmediate(Immed);
- }
- /// isLegalAddressingMode - Return true if the addressing mode represented
- /// by AM is legal for this target, for a load/store of the specified type.
- bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
- const AddrMode &AM, Type *Ty,
- unsigned AS, Instruction *I) const {
- // AArch64 has five basic addressing modes:
- // reg
- // reg + 9-bit signed offset
- // reg + SIZE_IN_BYTES * 12-bit unsigned offset
- // reg1 + reg2
- // reg + SIZE_IN_BYTES * reg
- // No global is ever allowed as a base.
- if (AM.BaseGV)
- return false;
- // No reg+reg+imm addressing.
- if (AM.HasBaseReg && AM.BaseOffs && AM.Scale)
- return false;
- // FIXME: Update this method to support scalable addressing modes.
- if (isa<ScalableVectorType>(Ty)) {
- uint64_t VecElemNumBytes =
- DL.getTypeSizeInBits(cast<VectorType>(Ty)->getElementType()) / 8;
- return AM.HasBaseReg && !AM.BaseOffs &&
- (AM.Scale == 0 || (uint64_t)AM.Scale == VecElemNumBytes);
- }
- // check reg + imm case:
- // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12
- uint64_t NumBytes = 0;
- if (Ty->isSized()) {
- uint64_t NumBits = DL.getTypeSizeInBits(Ty);
- NumBytes = NumBits / 8;
- if (!isPowerOf2_64(NumBits))
- NumBytes = 0;
- }
- if (!AM.Scale) {
- int64_t Offset = AM.BaseOffs;
- // 9-bit signed offset
- if (isInt<9>(Offset))
- return true;
- // 12-bit unsigned offset
- unsigned shift = Log2_64(NumBytes);
- if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 &&
- // Must be a multiple of NumBytes (NumBytes is a power of 2)
- (Offset >> shift) << shift == Offset)
- return true;
- return false;
- }
- // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2
- return AM.Scale == 1 || (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes);
- }
- bool AArch64TargetLowering::shouldConsiderGEPOffsetSplit() const {
- // Consider splitting large offset of struct or array.
- return true;
- }
- bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(
- const MachineFunction &MF, EVT VT) const {
- VT = VT.getScalarType();
- if (!VT.isSimple())
- return false;
- switch (VT.getSimpleVT().SimpleTy) {
- case MVT::f16:
- return Subtarget->hasFullFP16();
- case MVT::f32:
- case MVT::f64:
- return true;
- default:
- break;
- }
- return false;
- }
- bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
- Type *Ty) const {
- switch (Ty->getScalarType()->getTypeID()) {
- case Type::FloatTyID:
- case Type::DoubleTyID:
- return true;
- default:
- return false;
- }
- }
- bool AArch64TargetLowering::generateFMAsInMachineCombiner(
- EVT VT, CodeGenOpt::Level OptLevel) const {
- return (OptLevel >= CodeGenOpt::Aggressive) && !VT.isScalableVector() &&
- !useSVEForFixedLengthVectorVT(VT);
- }
- const MCPhysReg *
- AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const {
- // LR is a callee-save register, but we must treat it as clobbered by any call
- // site. Hence we include LR in the scratch registers, which are in turn added
- // as implicit-defs for stackmaps and patchpoints.
- static const MCPhysReg ScratchRegs[] = {
- AArch64::X16, AArch64::X17, AArch64::LR, 0
- };
- return ScratchRegs;
- }
- bool
- AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
- CombineLevel Level) const {
- assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
- N->getOpcode() == ISD::SRL) &&
- "Expected shift op");
- SDValue ShiftLHS = N->getOperand(0);
- EVT VT = N->getValueType(0);
- // If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not
- // combine it with shift 'N' to let it be lowered to UBFX except:
- // ((x >> C) & mask) << C.
- if (ShiftLHS.getOpcode() == ISD::AND && (VT == MVT::i32 || VT == MVT::i64) &&
- isa<ConstantSDNode>(ShiftLHS.getOperand(1))) {
- uint64_t TruncMask = ShiftLHS.getConstantOperandVal(1);
- if (isMask_64(TruncMask)) {
- SDValue AndLHS = ShiftLHS.getOperand(0);
- if (AndLHS.getOpcode() == ISD::SRL) {
- if (auto *SRLC = dyn_cast<ConstantSDNode>(AndLHS.getOperand(1))) {
- if (N->getOpcode() == ISD::SHL)
- if (auto *SHLC = dyn_cast<ConstantSDNode>(N->getOperand(1)))
- return SRLC->getZExtValue() == SHLC->getZExtValue();
- return false;
- }
- }
- }
- }
- return true;
- }
- bool AArch64TargetLowering::isDesirableToCommuteXorWithShift(
- const SDNode *N) const {
- assert(N->getOpcode() == ISD::XOR &&
- (N->getOperand(0).getOpcode() == ISD::SHL ||
- N->getOperand(0).getOpcode() == ISD::SRL) &&
- "Expected XOR(SHIFT) pattern");
- // Only commute if the entire NOT mask is a hidden shifted mask.
- auto *XorC = dyn_cast<ConstantSDNode>(N->getOperand(1));
- auto *ShiftC = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1));
- if (XorC && ShiftC) {
- unsigned MaskIdx, MaskLen;
- if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) {
- unsigned ShiftAmt = ShiftC->getZExtValue();
- unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
- if (N->getOperand(0).getOpcode() == ISD::SHL)
- return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt);
- return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt);
- }
- }
- return false;
- }
- bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
- const SDNode *N, CombineLevel Level) const {
- assert(((N->getOpcode() == ISD::SHL &&
- N->getOperand(0).getOpcode() == ISD::SRL) ||
- (N->getOpcode() == ISD::SRL &&
- N->getOperand(0).getOpcode() == ISD::SHL)) &&
- "Expected shift-shift mask");
- // Don't allow multiuse shift folding with the same shift amount.
- if (!N->getOperand(0)->hasOneUse())
- return false;
- // Only fold srl(shl(x,c1),c2) iff C1 >= C2 to prevent loss of UBFX patterns.
- EVT VT = N->getValueType(0);
- if (N->getOpcode() == ISD::SRL && (VT == MVT::i32 || VT == MVT::i64)) {
- auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1));
- auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
- return (!C1 || !C2 || C1->getZExtValue() >= C2->getZExtValue());
- }
- return true;
- }
- bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
- Type *Ty) const {
- assert(Ty->isIntegerTy());
- unsigned BitSize = Ty->getPrimitiveSizeInBits();
- if (BitSize == 0)
- return false;
- int64_t Val = Imm.getSExtValue();
- if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, BitSize))
- return true;
- if ((int64_t)Val < 0)
- Val = ~Val;
- if (BitSize == 32)
- Val &= (1LL << 32) - 1;
- unsigned LZ = countLeadingZeros((uint64_t)Val);
- unsigned Shift = (63 - LZ) / 16;
- // MOVZ is free so return true for one or fewer MOVK.
- return Shift < 3;
- }
- bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
- unsigned Index) const {
- if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
- return false;
- return (Index == 0 || Index == ResVT.getVectorMinNumElements());
- }
- /// Turn vector tests of the signbit in the form of:
- /// xor (sra X, elt_size(X)-1), -1
- /// into:
- /// cmge X, X, #0
- static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- EVT VT = N->getValueType(0);
- if (!Subtarget->hasNEON() || !VT.isVector())
- return SDValue();
- // There must be a shift right algebraic before the xor, and the xor must be a
- // 'not' operation.
- SDValue Shift = N->getOperand(0);
- SDValue Ones = N->getOperand(1);
- if (Shift.getOpcode() != AArch64ISD::VASHR || !Shift.hasOneUse() ||
- !ISD::isBuildVectorAllOnes(Ones.getNode()))
- return SDValue();
- // The shift should be smearing the sign bit across each vector element.
- auto *ShiftAmt = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
- EVT ShiftEltTy = Shift.getValueType().getVectorElementType();
- if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
- return SDValue();
- return DAG.getNode(AArch64ISD::CMGEz, SDLoc(N), VT, Shift.getOperand(0));
- }
- // Given a vecreduce_add node, detect the below pattern and convert it to the
- // node sequence with UABDL, [S|U]ADB and UADDLP.
- //
- // i32 vecreduce_add(
- // v16i32 abs(
- // v16i32 sub(
- // v16i32 [sign|zero]_extend(v16i8 a), v16i32 [sign|zero]_extend(v16i8 b))))
- // =================>
- // i32 vecreduce_add(
- // v4i32 UADDLP(
- // v8i16 add(
- // v8i16 zext(
- // v8i8 [S|U]ABD low8:v16i8 a, low8:v16i8 b
- // v8i16 zext(
- // v8i8 [S|U]ABD high8:v16i8 a, high8:v16i8 b
- static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
- SelectionDAG &DAG) {
- // Assumed i32 vecreduce_add
- if (N->getValueType(0) != MVT::i32)
- return SDValue();
- SDValue VecReduceOp0 = N->getOperand(0);
- unsigned Opcode = VecReduceOp0.getOpcode();
- // Assumed v16i32 abs
- if (Opcode != ISD::ABS || VecReduceOp0->getValueType(0) != MVT::v16i32)
- return SDValue();
- SDValue ABS = VecReduceOp0;
- // Assumed v16i32 sub
- if (ABS->getOperand(0)->getOpcode() != ISD::SUB ||
- ABS->getOperand(0)->getValueType(0) != MVT::v16i32)
- return SDValue();
- SDValue SUB = ABS->getOperand(0);
- unsigned Opcode0 = SUB->getOperand(0).getOpcode();
- unsigned Opcode1 = SUB->getOperand(1).getOpcode();
- // Assumed v16i32 type
- if (SUB->getOperand(0)->getValueType(0) != MVT::v16i32 ||
- SUB->getOperand(1)->getValueType(0) != MVT::v16i32)
- return SDValue();
- // Assumed zext or sext
- bool IsZExt = false;
- if (Opcode0 == ISD::ZERO_EXTEND && Opcode1 == ISD::ZERO_EXTEND) {
- IsZExt = true;
- } else if (Opcode0 == ISD::SIGN_EXTEND && Opcode1 == ISD::SIGN_EXTEND) {
- IsZExt = false;
- } else
- return SDValue();
- SDValue EXT0 = SUB->getOperand(0);
- SDValue EXT1 = SUB->getOperand(1);
- // Assumed zext's operand has v16i8 type
- if (EXT0->getOperand(0)->getValueType(0) != MVT::v16i8 ||
- EXT1->getOperand(0)->getValueType(0) != MVT::v16i8)
- return SDValue();
- // Pattern is dectected. Let's convert it to sequence of nodes.
- SDLoc DL(N);
- // First, create the node pattern of UABD/SABD.
- SDValue UABDHigh8Op0 =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0),
- DAG.getConstant(8, DL, MVT::i64));
- SDValue UABDHigh8Op1 =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0),
- DAG.getConstant(8, DL, MVT::i64));
- SDValue UABDHigh8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8,
- UABDHigh8Op0, UABDHigh8Op1);
- SDValue UABDL = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDHigh8);
- // Second, create the node pattern of UABAL.
- SDValue UABDLo8Op0 =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0),
- DAG.getConstant(0, DL, MVT::i64));
- SDValue UABDLo8Op1 =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0),
- DAG.getConstant(0, DL, MVT::i64));
- SDValue UABDLo8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8,
- UABDLo8Op0, UABDLo8Op1);
- SDValue ZExtUABD = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDLo8);
- SDValue UABAL = DAG.getNode(ISD::ADD, DL, MVT::v8i16, UABDL, ZExtUABD);
- // Third, create the node of UADDLP.
- SDValue UADDLP = DAG.getNode(AArch64ISD::UADDLP, DL, MVT::v4i32, UABAL);
- // Fourth, create the node of VECREDUCE_ADD.
- return DAG.getNode(ISD::VECREDUCE_ADD, DL, MVT::i32, UADDLP);
- }
- // Turn a v8i8/v16i8 extended vecreduce into a udot/sdot and vecreduce
- // vecreduce.add(ext(A)) to vecreduce.add(DOT(zero, A, one))
- // vecreduce.add(mul(ext(A), ext(B))) to vecreduce.add(DOT(zero, A, B))
- static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG,
- const AArch64Subtarget *ST) {
- if (!ST->hasDotProd())
- return performVecReduceAddCombineWithUADDLP(N, DAG);
- SDValue Op0 = N->getOperand(0);
- if (N->getValueType(0) != MVT::i32 ||
- Op0.getValueType().getVectorElementType() != MVT::i32)
- return SDValue();
- unsigned ExtOpcode = Op0.getOpcode();
- SDValue A = Op0;
- SDValue B;
- if (ExtOpcode == ISD::MUL) {
- A = Op0.getOperand(0);
- B = Op0.getOperand(1);
- if (A.getOpcode() != B.getOpcode() ||
- A.getOperand(0).getValueType() != B.getOperand(0).getValueType())
- return SDValue();
- ExtOpcode = A.getOpcode();
- }
- if (ExtOpcode != ISD::ZERO_EXTEND && ExtOpcode != ISD::SIGN_EXTEND)
- return SDValue();
- EVT Op0VT = A.getOperand(0).getValueType();
- if (Op0VT != MVT::v8i8 && Op0VT != MVT::v16i8)
- return SDValue();
- SDLoc DL(Op0);
- // For non-mla reductions B can be set to 1. For MLA we take the operand of
- // the extend B.
- if (!B)
- B = DAG.getConstant(1, DL, Op0VT);
- else
- B = B.getOperand(0);
- SDValue Zeros =
- DAG.getConstant(0, DL, Op0VT == MVT::v8i8 ? MVT::v2i32 : MVT::v4i32);
- auto DotOpcode =
- (ExtOpcode == ISD::ZERO_EXTEND) ? AArch64ISD::UDOT : AArch64ISD::SDOT;
- SDValue Dot = DAG.getNode(DotOpcode, DL, Zeros.getValueType(), Zeros,
- A.getOperand(0), B);
- return DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot);
- }
- // Given an (integer) vecreduce, we know the order of the inputs does not
- // matter. We can convert UADDV(add(zext(extract_lo(x)), zext(extract_hi(x))))
- // into UADDV(UADDLP(x)). This can also happen through an extra add, where we
- // transform UADDV(add(y, add(zext(extract_lo(x)), zext(extract_hi(x))))).
- static SDValue performUADDVCombine(SDNode *N, SelectionDAG &DAG) {
- auto DetectAddExtract = [&](SDValue A) {
- // Look for add(zext(extract_lo(x)), zext(extract_hi(x))), returning
- // UADDLP(x) if found.
- if (A.getOpcode() != ISD::ADD)
- return SDValue();
- EVT VT = A.getValueType();
- SDValue Op0 = A.getOperand(0);
- SDValue Op1 = A.getOperand(1);
- if (Op0.getOpcode() != Op0.getOpcode() ||
- (Op0.getOpcode() != ISD::ZERO_EXTEND &&
- Op0.getOpcode() != ISD::SIGN_EXTEND))
- return SDValue();
- SDValue Ext0 = Op0.getOperand(0);
- SDValue Ext1 = Op1.getOperand(0);
- if (Ext0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
- Ext1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
- Ext0.getOperand(0) != Ext1.getOperand(0))
- return SDValue();
- // Check that the type is twice the add types, and the extract are from
- // upper/lower parts of the same source.
- if (Ext0.getOperand(0).getValueType().getVectorNumElements() !=
- VT.getVectorNumElements() * 2)
- return SDValue();
- if ((Ext0.getConstantOperandVal(1) != 0 &&
- Ext1.getConstantOperandVal(1) != VT.getVectorNumElements()) &&
- (Ext1.getConstantOperandVal(1) != 0 &&
- Ext0.getConstantOperandVal(1) != VT.getVectorNumElements()))
- return SDValue();
- unsigned Opcode = Op0.getOpcode() == ISD::ZERO_EXTEND ? AArch64ISD::UADDLP
- : AArch64ISD::SADDLP;
- return DAG.getNode(Opcode, SDLoc(A), VT, Ext0.getOperand(0));
- };
- SDValue A = N->getOperand(0);
- if (SDValue R = DetectAddExtract(A))
- return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), R);
- if (A.getOpcode() == ISD::ADD) {
- if (SDValue R = DetectAddExtract(A.getOperand(0)))
- return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
- DAG.getNode(ISD::ADD, SDLoc(A), A.getValueType(), R,
- A.getOperand(1)));
- if (SDValue R = DetectAddExtract(A.getOperand(1)))
- return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
- DAG.getNode(ISD::ADD, SDLoc(A), A.getValueType(), R,
- A.getOperand(0)));
- }
- return SDValue();
- }
- static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget) {
- if (DCI.isBeforeLegalizeOps())
- return SDValue();
- return foldVectorXorShiftIntoCmp(N, DAG, Subtarget);
- }
- SDValue
- AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
- SelectionDAG &DAG,
- SmallVectorImpl<SDNode *> &Created) const {
- AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
- if (isIntDivCheap(N->getValueType(0), Attr))
- return SDValue(N,0); // Lower SDIV as SDIV
- EVT VT = N->getValueType(0);
- // For scalable and fixed types, mark them as cheap so we can handle it much
- // later. This allows us to handle larger than legal types.
- if (VT.isScalableVector() || Subtarget->useSVEForFixedLengthVectors())
- return SDValue(N, 0);
- // fold (sdiv X, pow2)
- if ((VT != MVT::i32 && VT != MVT::i64) ||
- !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()))
- return SDValue();
- SDLoc DL(N);
- SDValue N0 = N->getOperand(0);
- unsigned Lg2 = Divisor.countTrailingZeros();
- SDValue Zero = DAG.getConstant(0, DL, VT);
- SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
- // Add (N0 < 0) ? Pow2 - 1 : 0;
- SDValue CCVal;
- SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETLT, CCVal, DAG, DL);
- SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
- SDValue CSel = DAG.getNode(AArch64ISD::CSEL, DL, VT, Add, N0, CCVal, Cmp);
- Created.push_back(Cmp.getNode());
- Created.push_back(Add.getNode());
- Created.push_back(CSel.getNode());
- // Divide by pow2.
- SDValue SRA =
- DAG.getNode(ISD::SRA, DL, VT, CSel, DAG.getConstant(Lg2, DL, MVT::i64));
- // If we're dividing by a positive value, we're done. Otherwise, we must
- // negate the result.
- if (Divisor.isNonNegative())
- return SRA;
- Created.push_back(SRA.getNode());
- return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
- }
- SDValue
- AArch64TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor,
- SelectionDAG &DAG,
- SmallVectorImpl<SDNode *> &Created) const {
- AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
- if (isIntDivCheap(N->getValueType(0), Attr))
- return SDValue(N, 0); // Lower SREM as SREM
- EVT VT = N->getValueType(0);
- // For scalable and fixed types, mark them as cheap so we can handle it much
- // later. This allows us to handle larger than legal types.
- if (VT.isScalableVector() || Subtarget->useSVEForFixedLengthVectors())
- return SDValue(N, 0);
- // fold (srem X, pow2)
- if ((VT != MVT::i32 && VT != MVT::i64) ||
- !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()))
- return SDValue();
- unsigned Lg2 = Divisor.countTrailingZeros();
- if (Lg2 == 0)
- return SDValue();
- SDLoc DL(N);
- SDValue N0 = N->getOperand(0);
- SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
- SDValue Zero = DAG.getConstant(0, DL, VT);
- SDValue CCVal, CSNeg;
- if (Lg2 == 1) {
- SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETGE, CCVal, DAG, DL);
- SDValue And = DAG.getNode(ISD::AND, DL, VT, N0, Pow2MinusOne);
- CSNeg = DAG.getNode(AArch64ISD::CSNEG, DL, VT, And, And, CCVal, Cmp);
- Created.push_back(Cmp.getNode());
- Created.push_back(And.getNode());
- } else {
- SDValue CCVal = DAG.getConstant(AArch64CC::MI, DL, MVT_CC);
- SDVTList VTs = DAG.getVTList(VT, MVT::i32);
- SDValue Negs = DAG.getNode(AArch64ISD::SUBS, DL, VTs, Zero, N0);
- SDValue AndPos = DAG.getNode(ISD::AND, DL, VT, N0, Pow2MinusOne);
- SDValue AndNeg = DAG.getNode(ISD::AND, DL, VT, Negs, Pow2MinusOne);
- CSNeg = DAG.getNode(AArch64ISD::CSNEG, DL, VT, AndPos, AndNeg, CCVal,
- Negs.getValue(1));
- Created.push_back(Negs.getNode());
- Created.push_back(AndPos.getNode());
- Created.push_back(AndNeg.getNode());
- }
- return CSNeg;
- }
- static std::optional<unsigned> IsSVECntIntrinsic(SDValue S) {
- switch(getIntrinsicID(S.getNode())) {
- default:
- break;
- case Intrinsic::aarch64_sve_cntb:
- return 8;
- case Intrinsic::aarch64_sve_cnth:
- return 16;
- case Intrinsic::aarch64_sve_cntw:
- return 32;
- case Intrinsic::aarch64_sve_cntd:
- return 64;
- }
- return {};
- }
- /// Calculates what the pre-extend type is, based on the extension
- /// operation node provided by \p Extend.
- ///
- /// In the case that \p Extend is a SIGN_EXTEND or a ZERO_EXTEND, the
- /// pre-extend type is pulled directly from the operand, while other extend
- /// operations need a bit more inspection to get this information.
- ///
- /// \param Extend The SDNode from the DAG that represents the extend operation
- ///
- /// \returns The type representing the \p Extend source type, or \p MVT::Other
- /// if no valid type can be determined
- static EVT calculatePreExtendType(SDValue Extend) {
- switch (Extend.getOpcode()) {
- case ISD::SIGN_EXTEND:
- case ISD::ZERO_EXTEND:
- return Extend.getOperand(0).getValueType();
- case ISD::AssertSext:
- case ISD::AssertZext:
- case ISD::SIGN_EXTEND_INREG: {
- VTSDNode *TypeNode = dyn_cast<VTSDNode>(Extend.getOperand(1));
- if (!TypeNode)
- return MVT::Other;
- return TypeNode->getVT();
- }
- case ISD::AND: {
- ConstantSDNode *Constant =
- dyn_cast<ConstantSDNode>(Extend.getOperand(1).getNode());
- if (!Constant)
- return MVT::Other;
- uint32_t Mask = Constant->getZExtValue();
- if (Mask == UCHAR_MAX)
- return MVT::i8;
- else if (Mask == USHRT_MAX)
- return MVT::i16;
- else if (Mask == UINT_MAX)
- return MVT::i32;
- return MVT::Other;
- }
- default:
- return MVT::Other;
- }
- }
- /// Combines a buildvector(sext/zext) or shuffle(sext/zext, undef) node pattern
- /// into sext/zext(buildvector) or sext/zext(shuffle) making use of the vector
- /// SExt/ZExt rather than the scalar SExt/ZExt
- static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) {
- EVT VT = BV.getValueType();
- if (BV.getOpcode() != ISD::BUILD_VECTOR &&
- BV.getOpcode() != ISD::VECTOR_SHUFFLE)
- return SDValue();
- // Use the first item in the buildvector/shuffle to get the size of the
- // extend, and make sure it looks valid.
- SDValue Extend = BV->getOperand(0);
- unsigned ExtendOpcode = Extend.getOpcode();
- bool IsSExt = ExtendOpcode == ISD::SIGN_EXTEND ||
- ExtendOpcode == ISD::SIGN_EXTEND_INREG ||
- ExtendOpcode == ISD::AssertSext;
- if (!IsSExt && ExtendOpcode != ISD::ZERO_EXTEND &&
- ExtendOpcode != ISD::AssertZext && ExtendOpcode != ISD::AND)
- return SDValue();
- // Shuffle inputs are vector, limit to SIGN_EXTEND and ZERO_EXTEND to ensure
- // calculatePreExtendType will work without issue.
- if (BV.getOpcode() == ISD::VECTOR_SHUFFLE &&
- ExtendOpcode != ISD::SIGN_EXTEND && ExtendOpcode != ISD::ZERO_EXTEND)
- return SDValue();
- // Restrict valid pre-extend data type
- EVT PreExtendType = calculatePreExtendType(Extend);
- if (PreExtendType == MVT::Other ||
- PreExtendType.getScalarSizeInBits() != VT.getScalarSizeInBits() / 2)
- return SDValue();
- // Make sure all other operands are equally extended
- for (SDValue Op : drop_begin(BV->ops())) {
- if (Op.isUndef())
- continue;
- unsigned Opc = Op.getOpcode();
- bool OpcIsSExt = Opc == ISD::SIGN_EXTEND || Opc == ISD::SIGN_EXTEND_INREG ||
- Opc == ISD::AssertSext;
- if (OpcIsSExt != IsSExt || calculatePreExtendType(Op) != PreExtendType)
- return SDValue();
- }
- SDValue NBV;
- SDLoc DL(BV);
- if (BV.getOpcode() == ISD::BUILD_VECTOR) {
- EVT PreExtendVT = VT.changeVectorElementType(PreExtendType);
- EVT PreExtendLegalType =
- PreExtendType.getScalarSizeInBits() < 32 ? MVT::i32 : PreExtendType;
- SmallVector<SDValue, 8> NewOps;
- for (SDValue Op : BV->ops())
- NewOps.push_back(Op.isUndef() ? DAG.getUNDEF(PreExtendLegalType)
- : DAG.getAnyExtOrTrunc(Op.getOperand(0), DL,
- PreExtendLegalType));
- NBV = DAG.getNode(ISD::BUILD_VECTOR, DL, PreExtendVT, NewOps);
- } else { // BV.getOpcode() == ISD::VECTOR_SHUFFLE
- EVT PreExtendVT = VT.changeVectorElementType(PreExtendType.getScalarType());
- NBV = DAG.getVectorShuffle(PreExtendVT, DL, BV.getOperand(0).getOperand(0),
- BV.getOperand(1).isUndef()
- ? DAG.getUNDEF(PreExtendVT)
- : BV.getOperand(1).getOperand(0),
- cast<ShuffleVectorSDNode>(BV)->getMask());
- }
- return DAG.getNode(IsSExt ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, VT, NBV);
- }
- /// Combines a mul(dup(sext/zext)) node pattern into mul(sext/zext(dup))
- /// making use of the vector SExt/ZExt rather than the scalar SExt/ZExt
- static SDValue performMulVectorExtendCombine(SDNode *Mul, SelectionDAG &DAG) {
- // If the value type isn't a vector, none of the operands are going to be dups
- EVT VT = Mul->getValueType(0);
- if (VT != MVT::v8i16 && VT != MVT::v4i32 && VT != MVT::v2i64)
- return SDValue();
- SDValue Op0 = performBuildShuffleExtendCombine(Mul->getOperand(0), DAG);
- SDValue Op1 = performBuildShuffleExtendCombine(Mul->getOperand(1), DAG);
- // Neither operands have been changed, don't make any further changes
- if (!Op0 && !Op1)
- return SDValue();
- SDLoc DL(Mul);
- return DAG.getNode(Mul->getOpcode(), DL, VT, Op0 ? Op0 : Mul->getOperand(0),
- Op1 ? Op1 : Mul->getOperand(1));
- }
- // Combine v4i32 Mul(And(Srl(X, 15), 0x10001), 0xffff) -> v8i16 CMLTz
- // Same for other types with equivalent constants.
- static SDValue performMulVectorCmpZeroCombine(SDNode *N, SelectionDAG &DAG) {
- EVT VT = N->getValueType(0);
- if (VT != MVT::v2i64 && VT != MVT::v1i64 && VT != MVT::v2i32 &&
- VT != MVT::v4i32 && VT != MVT::v4i16 && VT != MVT::v8i16)
- return SDValue();
- if (N->getOperand(0).getOpcode() != ISD::AND ||
- N->getOperand(0).getOperand(0).getOpcode() != ISD::SRL)
- return SDValue();
- SDValue And = N->getOperand(0);
- SDValue Srl = And.getOperand(0);
- APInt V1, V2, V3;
- if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), V1) ||
- !ISD::isConstantSplatVector(And.getOperand(1).getNode(), V2) ||
- !ISD::isConstantSplatVector(Srl.getOperand(1).getNode(), V3))
- return SDValue();
- unsigned HalfSize = VT.getScalarSizeInBits() / 2;
- if (!V1.isMask(HalfSize) || V2 != (1ULL | 1ULL << HalfSize) ||
- V3 != (HalfSize - 1))
- return SDValue();
- EVT HalfVT = EVT::getVectorVT(*DAG.getContext(),
- EVT::getIntegerVT(*DAG.getContext(), HalfSize),
- VT.getVectorElementCount() * 2);
- SDLoc DL(N);
- SDValue In = DAG.getNode(AArch64ISD::NVCAST, DL, HalfVT, Srl.getOperand(0));
- SDValue CM = DAG.getNode(AArch64ISD::CMLTz, DL, HalfVT, In);
- return DAG.getNode(AArch64ISD::NVCAST, DL, VT, CM);
- }
- static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget) {
- if (SDValue Ext = performMulVectorExtendCombine(N, DAG))
- return Ext;
- if (SDValue Ext = performMulVectorCmpZeroCombine(N, DAG))
- return Ext;
- if (DCI.isBeforeLegalizeOps())
- return SDValue();
- // Canonicalize X*(Y+1) -> X*Y+X and (X+1)*Y -> X*Y+Y,
- // and in MachineCombiner pass, add+mul will be combined into madd.
- // Similarly, X*(1-Y) -> X - X*Y and (1-Y)*X -> X - Y*X.
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
- SDValue MulOper;
- unsigned AddSubOpc;
- auto IsAddSubWith1 = [&](SDValue V) -> bool {
- AddSubOpc = V->getOpcode();
- if ((AddSubOpc == ISD::ADD || AddSubOpc == ISD::SUB) && V->hasOneUse()) {
- SDValue Opnd = V->getOperand(1);
- MulOper = V->getOperand(0);
- if (AddSubOpc == ISD::SUB)
- std::swap(Opnd, MulOper);
- if (auto C = dyn_cast<ConstantSDNode>(Opnd))
- return C->isOne();
- }
- return false;
- };
- if (IsAddSubWith1(N0)) {
- SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N1, MulOper);
- return DAG.getNode(AddSubOpc, DL, VT, N1, MulVal);
- }
- if (IsAddSubWith1(N1)) {
- SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N0, MulOper);
- return DAG.getNode(AddSubOpc, DL, VT, N0, MulVal);
- }
- // The below optimizations require a constant RHS.
- if (!isa<ConstantSDNode>(N1))
- return SDValue();
- ConstantSDNode *C = cast<ConstantSDNode>(N1);
- const APInt &ConstValue = C->getAPIntValue();
- // Allow the scaling to be folded into the `cnt` instruction by preventing
- // the scaling to be obscured here. This makes it easier to pattern match.
- if (IsSVECntIntrinsic(N0) ||
- (N0->getOpcode() == ISD::TRUNCATE &&
- (IsSVECntIntrinsic(N0->getOperand(0)))))
- if (ConstValue.sge(1) && ConstValue.sle(16))
- return SDValue();
- // Multiplication of a power of two plus/minus one can be done more
- // cheaply as as shift+add/sub. For now, this is true unilaterally. If
- // future CPUs have a cheaper MADD instruction, this may need to be
- // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and
- // 64-bit is 5 cycles, so this is always a win.
- // More aggressively, some multiplications N0 * C can be lowered to
- // shift+add+shift if the constant C = A * B where A = 2^N + 1 and B = 2^M,
- // e.g. 6=3*2=(2+1)*2, 45=(1+4)*(1+8)
- // TODO: lower more cases.
- // TrailingZeroes is used to test if the mul can be lowered to
- // shift+add+shift.
- unsigned TrailingZeroes = ConstValue.countTrailingZeros();
- if (TrailingZeroes) {
- // Conservatively do not lower to shift+add+shift if the mul might be
- // folded into smul or umul.
- if (N0->hasOneUse() && (isSignExtended(N0.getNode(), DAG) ||
- isZeroExtended(N0.getNode(), DAG)))
- return SDValue();
- // Conservatively do not lower to shift+add+shift if the mul might be
- // folded into madd or msub.
- if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ADD ||
- N->use_begin()->getOpcode() == ISD::SUB))
- return SDValue();
- }
- // Use ShiftedConstValue instead of ConstValue to support both shift+add/sub
- // and shift+add+shift.
- APInt ShiftedConstValue = ConstValue.ashr(TrailingZeroes);
- unsigned ShiftAmt;
- auto Shl = [&](SDValue N0, unsigned N1) {
- SDValue RHS = DAG.getConstant(N1, DL, MVT::i64);
- return DAG.getNode(ISD::SHL, DL, VT, N0, RHS);
- };
- auto Add = [&](SDValue N0, SDValue N1) {
- return DAG.getNode(ISD::ADD, DL, VT, N0, N1);
- };
- auto Sub = [&](SDValue N0, SDValue N1) {
- return DAG.getNode(ISD::SUB, DL, VT, N0, N1);
- };
- auto Negate = [&](SDValue N) {
- SDValue Zero = DAG.getConstant(0, DL, VT);
- return DAG.getNode(ISD::SUB, DL, VT, Zero, N);
- };
- // Can the const C be decomposed into (1+2^M1)*(1+2^N1), eg:
- // C = 45 is equal to (1+4)*(1+8), we don't decompose it into (1+2)*(16-1) as
- // the (2^N - 1) can't be execused via a single instruction.
- auto isPowPlusPlusConst = [](APInt C, APInt &M, APInt &N) {
- unsigned BitWidth = C.getBitWidth();
- for (unsigned i = 1; i < BitWidth / 2; i++) {
- APInt Rem;
- APInt X(BitWidth, (1 << i) + 1);
- APInt::sdivrem(C, X, N, Rem);
- APInt NVMinus1 = N - 1;
- if (Rem == 0 && NVMinus1.isPowerOf2()) {
- M = X;
- return true;
- }
- }
- return false;
- };
- if (ConstValue.isNonNegative()) {
- // (mul x, (2^N + 1) * 2^M) => (shl (add (shl x, N), x), M)
- // (mul x, 2^N - 1) => (sub (shl x, N), x)
- // (mul x, (2^(N-M) - 1) * 2^M) => (sub (shl x, N), (shl x, M))
- // (mul x, (2^M + 1) * (2^N + 1))
- // => MV = (add (shl x, M), x); (add (shl MV, N), MV)
- APInt SCVMinus1 = ShiftedConstValue - 1;
- APInt SCVPlus1 = ShiftedConstValue + 1;
- APInt CVPlus1 = ConstValue + 1;
- APInt CVM, CVN;
- if (SCVMinus1.isPowerOf2()) {
- ShiftAmt = SCVMinus1.logBase2();
- return Shl(Add(Shl(N0, ShiftAmt), N0), TrailingZeroes);
- } else if (CVPlus1.isPowerOf2()) {
- ShiftAmt = CVPlus1.logBase2();
- return Sub(Shl(N0, ShiftAmt), N0);
- } else if (SCVPlus1.isPowerOf2()) {
- ShiftAmt = SCVPlus1.logBase2() + TrailingZeroes;
- return Sub(Shl(N0, ShiftAmt), Shl(N0, TrailingZeroes));
- } else if (Subtarget->hasLSLFast() &&
- isPowPlusPlusConst(ConstValue, CVM, CVN)) {
- APInt CVMMinus1 = CVM - 1;
- APInt CVNMinus1 = CVN - 1;
- unsigned ShiftM1 = CVMMinus1.logBase2();
- unsigned ShiftN1 = CVNMinus1.logBase2();
- // LSLFast implicate that Shifts <= 3 places are fast
- if (ShiftM1 <= 3 && ShiftN1 <= 3) {
- SDValue MVal = Add(Shl(N0, ShiftM1), N0);
- return Add(Shl(MVal, ShiftN1), MVal);
- }
- }
- } else {
- // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
- // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
- // (mul x, -(2^(N-M) - 1) * 2^M) => (sub (shl x, M), (shl x, N))
- APInt SCVPlus1 = -ShiftedConstValue + 1;
- APInt CVNegPlus1 = -ConstValue + 1;
- APInt CVNegMinus1 = -ConstValue - 1;
- if (CVNegPlus1.isPowerOf2()) {
- ShiftAmt = CVNegPlus1.logBase2();
- return Sub(N0, Shl(N0, ShiftAmt));
- } else if (CVNegMinus1.isPowerOf2()) {
- ShiftAmt = CVNegMinus1.logBase2();
- return Negate(Add(Shl(N0, ShiftAmt), N0));
- } else if (SCVPlus1.isPowerOf2()) {
- ShiftAmt = SCVPlus1.logBase2() + TrailingZeroes;
- return Sub(Shl(N0, TrailingZeroes), Shl(N0, ShiftAmt));
- }
- }
- return SDValue();
- }
- static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
- SelectionDAG &DAG) {
- // Take advantage of vector comparisons producing 0 or -1 in each lane to
- // optimize away operation when it's from a constant.
- //
- // The general transformation is:
- // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
- // AND(VECTOR_CMP(x,y), constant2)
- // constant2 = UNARYOP(constant)
- // Early exit if this isn't a vector operation, the operand of the
- // unary operation isn't a bitwise AND, or if the sizes of the operations
- // aren't the same.
- EVT VT = N->getValueType(0);
- if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
- N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
- VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
- return SDValue();
- // Now check that the other operand of the AND is a constant. We could
- // make the transformation for non-constant splats as well, but it's unclear
- // that would be a benefit as it would not eliminate any operations, just
- // perform one more step in scalar code before moving to the vector unit.
- if (BuildVectorSDNode *BV =
- dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
- // Bail out if the vector isn't a constant.
- if (!BV->isConstant())
- return SDValue();
- // Everything checks out. Build up the new and improved node.
- SDLoc DL(N);
- EVT IntVT = BV->getValueType(0);
- // Create a new constant of the appropriate type for the transformed
- // DAG.
- SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
- // The AND node needs bitcasts to/from an integer vector type around it.
- SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
- SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
- N->getOperand(0)->getOperand(0), MaskConst);
- SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
- return Res;
- }
- return SDValue();
- }
- static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- // First try to optimize away the conversion when it's conditionally from
- // a constant. Vectors only.
- if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG))
- return Res;
- EVT VT = N->getValueType(0);
- if (VT != MVT::f32 && VT != MVT::f64)
- return SDValue();
- // Only optimize when the source and destination types have the same width.
- if (VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
- return SDValue();
- // If the result of an integer load is only used by an integer-to-float
- // conversion, use a fp load instead and a AdvSIMD scalar {S|U}CVTF instead.
- // This eliminates an "integer-to-vector-move" UOP and improves throughput.
- SDValue N0 = N->getOperand(0);
- if (Subtarget->hasNEON() && ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
- // Do not change the width of a volatile load.
- !cast<LoadSDNode>(N0)->isVolatile()) {
- LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
- LN0->getPointerInfo(), LN0->getAlign(),
- LN0->getMemOperand()->getFlags());
- // Make sure successors of the original load stay after it by updating them
- // to use the new Chain.
- DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), Load.getValue(1));
- unsigned Opcode =
- (N->getOpcode() == ISD::SINT_TO_FP) ? AArch64ISD::SITOF : AArch64ISD::UITOF;
- return DAG.getNode(Opcode, SDLoc(N), VT, Load);
- }
- return SDValue();
- }
- /// Fold a floating-point multiply by power of two into floating-point to
- /// fixed-point conversion.
- static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget) {
- if (!Subtarget->hasNEON() || Subtarget->forceStreamingCompatibleSVE())
- return SDValue();
- if (!N->getValueType(0).isSimple())
- return SDValue();
- SDValue Op = N->getOperand(0);
- if (!Op.getValueType().isSimple() || Op.getOpcode() != ISD::FMUL)
- return SDValue();
- if (!Op.getValueType().is64BitVector() && !Op.getValueType().is128BitVector())
- return SDValue();
- SDValue ConstVec = Op->getOperand(1);
- if (!isa<BuildVectorSDNode>(ConstVec))
- return SDValue();
- MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
- uint32_t FloatBits = FloatTy.getSizeInBits();
- if (FloatBits != 32 && FloatBits != 64 &&
- (FloatBits != 16 || !Subtarget->hasFullFP16()))
- return SDValue();
- MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
- uint32_t IntBits = IntTy.getSizeInBits();
- if (IntBits != 16 && IntBits != 32 && IntBits != 64)
- return SDValue();
- // Avoid conversions where iN is larger than the float (e.g., float -> i64).
- if (IntBits > FloatBits)
- return SDValue();
- BitVector UndefElements;
- BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
- int32_t Bits = IntBits == 64 ? 64 : 32;
- int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, Bits + 1);
- if (C == -1 || C == 0 || C > Bits)
- return SDValue();
- EVT ResTy = Op.getValueType().changeVectorElementTypeToInteger();
- if (!DAG.getTargetLoweringInfo().isTypeLegal(ResTy))
- return SDValue();
- if (N->getOpcode() == ISD::FP_TO_SINT_SAT ||
- N->getOpcode() == ISD::FP_TO_UINT_SAT) {
- EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
- if (SatVT.getScalarSizeInBits() != IntBits || IntBits != FloatBits)
- return SDValue();
- }
- SDLoc DL(N);
- bool IsSigned = (N->getOpcode() == ISD::FP_TO_SINT ||
- N->getOpcode() == ISD::FP_TO_SINT_SAT);
- unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfp2fxs
- : Intrinsic::aarch64_neon_vcvtfp2fxu;
- SDValue FixConv =
- DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ResTy,
- DAG.getConstant(IntrinsicOpcode, DL, MVT::i32),
- Op->getOperand(0), DAG.getConstant(C, DL, MVT::i32));
- // We can handle smaller integers by generating an extra trunc.
- if (IntBits < FloatBits)
- FixConv = DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), FixConv);
- return FixConv;
- }
- /// Fold a floating-point divide by power of two into fixed-point to
- /// floating-point conversion.
- static SDValue performFDivCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget) {
- if (!Subtarget->hasNEON())
- return SDValue();
- SDValue Op = N->getOperand(0);
- unsigned Opc = Op->getOpcode();
- if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
- !Op.getOperand(0).getValueType().isSimple() ||
- (Opc != ISD::SINT_TO_FP && Opc != ISD::UINT_TO_FP))
- return SDValue();
- SDValue ConstVec = N->getOperand(1);
- if (!isa<BuildVectorSDNode>(ConstVec))
- return SDValue();
- MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
- int32_t IntBits = IntTy.getSizeInBits();
- if (IntBits != 16 && IntBits != 32 && IntBits != 64)
- return SDValue();
- MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
- int32_t FloatBits = FloatTy.getSizeInBits();
- if (FloatBits != 32 && FloatBits != 64)
- return SDValue();
- // Avoid conversions where iN is larger than the float (e.g., i64 -> float).
- if (IntBits > FloatBits)
- return SDValue();
- BitVector UndefElements;
- BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
- int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, FloatBits + 1);
- if (C == -1 || C == 0 || C > FloatBits)
- return SDValue();
- MVT ResTy;
- unsigned NumLanes = Op.getValueType().getVectorNumElements();
- switch (NumLanes) {
- default:
- return SDValue();
- case 2:
- ResTy = FloatBits == 32 ? MVT::v2i32 : MVT::v2i64;
- break;
- case 4:
- ResTy = FloatBits == 32 ? MVT::v4i32 : MVT::v4i64;
- break;
- }
- if (ResTy == MVT::v4i64 && DCI.isBeforeLegalizeOps())
- return SDValue();
- SDLoc DL(N);
- SDValue ConvInput = Op.getOperand(0);
- bool IsSigned = Opc == ISD::SINT_TO_FP;
- if (IntBits < FloatBits)
- ConvInput = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
- ResTy, ConvInput);
- unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfxs2fp
- : Intrinsic::aarch64_neon_vcvtfxu2fp;
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
- DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), ConvInput,
- DAG.getConstant(C, DL, MVT::i32));
- }
- /// An EXTR instruction is made up of two shifts, ORed together. This helper
- /// searches for and classifies those shifts.
- static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
- bool &FromHi) {
- if (N.getOpcode() == ISD::SHL)
- FromHi = false;
- else if (N.getOpcode() == ISD::SRL)
- FromHi = true;
- else
- return false;
- if (!isa<ConstantSDNode>(N.getOperand(1)))
- return false;
- ShiftAmount = N->getConstantOperandVal(1);
- Src = N->getOperand(0);
- return true;
- }
- /// EXTR instruction extracts a contiguous chunk of bits from two existing
- /// registers viewed as a high/low pair. This function looks for the pattern:
- /// <tt>(or (shl VAL1, \#N), (srl VAL2, \#RegWidth-N))</tt> and replaces it
- /// with an EXTR. Can't quite be done in TableGen because the two immediates
- /// aren't independent.
- static SDValue tryCombineToEXTR(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- SelectionDAG &DAG = DCI.DAG;
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- assert(N->getOpcode() == ISD::OR && "Unexpected root");
- if (VT != MVT::i32 && VT != MVT::i64)
- return SDValue();
- SDValue LHS;
- uint32_t ShiftLHS = 0;
- bool LHSFromHi = false;
- if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
- return SDValue();
- SDValue RHS;
- uint32_t ShiftRHS = 0;
- bool RHSFromHi = false;
- if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
- return SDValue();
- // If they're both trying to come from the high part of the register, they're
- // not really an EXTR.
- if (LHSFromHi == RHSFromHi)
- return SDValue();
- if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
- return SDValue();
- if (LHSFromHi) {
- std::swap(LHS, RHS);
- std::swap(ShiftLHS, ShiftRHS);
- }
- return DAG.getNode(AArch64ISD::EXTR, DL, VT, LHS, RHS,
- DAG.getConstant(ShiftRHS, DL, MVT::i64));
- }
- static SDValue tryCombineToBSL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
- const AArch64TargetLowering &TLI) {
- EVT VT = N->getValueType(0);
- SelectionDAG &DAG = DCI.DAG;
- SDLoc DL(N);
- if (!VT.isVector())
- return SDValue();
- // The combining code currently only works for NEON vectors. In particular,
- // it does not work for SVE when dealing with vectors wider than 128 bits.
- // It also doesn't work for streaming mode because it causes generating
- // bsl instructions that are invalid in streaming mode.
- if (TLI.useSVEForFixedLengthVectorVT(
- VT,
- DAG.getSubtarget<AArch64Subtarget>().forceStreamingCompatibleSVE()))
- return SDValue();
- SDValue N0 = N->getOperand(0);
- if (N0.getOpcode() != ISD::AND)
- return SDValue();
- SDValue N1 = N->getOperand(1);
- if (N1.getOpcode() != ISD::AND)
- return SDValue();
- // InstCombine does (not (neg a)) => (add a -1).
- // Try: (or (and (neg a) b) (and (add a -1) c)) => (bsl (neg a) b c)
- // Loop over all combinations of AND operands.
- for (int i = 1; i >= 0; --i) {
- for (int j = 1; j >= 0; --j) {
- SDValue O0 = N0->getOperand(i);
- SDValue O1 = N1->getOperand(j);
- SDValue Sub, Add, SubSibling, AddSibling;
- // Find a SUB and an ADD operand, one from each AND.
- if (O0.getOpcode() == ISD::SUB && O1.getOpcode() == ISD::ADD) {
- Sub = O0;
- Add = O1;
- SubSibling = N0->getOperand(1 - i);
- AddSibling = N1->getOperand(1 - j);
- } else if (O0.getOpcode() == ISD::ADD && O1.getOpcode() == ISD::SUB) {
- Add = O0;
- Sub = O1;
- AddSibling = N0->getOperand(1 - i);
- SubSibling = N1->getOperand(1 - j);
- } else
- continue;
- if (!ISD::isBuildVectorAllZeros(Sub.getOperand(0).getNode()))
- continue;
- // Constant ones is always righthand operand of the Add.
- if (!ISD::isBuildVectorAllOnes(Add.getOperand(1).getNode()))
- continue;
- if (Sub.getOperand(1) != Add.getOperand(0))
- continue;
- return DAG.getNode(AArch64ISD::BSP, DL, VT, Sub, SubSibling, AddSibling);
- }
- }
- // (or (and a b) (and (not a) c)) => (bsl a b c)
- // We only have to look for constant vectors here since the general, variable
- // case can be handled in TableGen.
- unsigned Bits = VT.getScalarSizeInBits();
- uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1);
- for (int i = 1; i >= 0; --i)
- for (int j = 1; j >= 0; --j) {
- BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(i));
- BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(j));
- if (!BVN0 || !BVN1)
- continue;
- bool FoundMatch = true;
- for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) {
- ConstantSDNode *CN0 = dyn_cast<ConstantSDNode>(BVN0->getOperand(k));
- ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(BVN1->getOperand(k));
- if (!CN0 || !CN1 ||
- CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) {
- FoundMatch = false;
- break;
- }
- }
- if (FoundMatch)
- return DAG.getNode(AArch64ISD::BSP, DL, VT, SDValue(BVN0, 0),
- N0->getOperand(1 - i), N1->getOperand(1 - j));
- }
- return SDValue();
- }
- // Given a tree of and/or(csel(0, 1, cc0), csel(0, 1, cc1)), we may be able to
- // convert to csel(ccmp(.., cc0)), depending on cc1:
- // (AND (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1)))
- // =>
- // (CSET cc1 (CCMP x1 y1 !cc1 cc0 cmp0))
- //
- // (OR (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1)))
- // =>
- // (CSET cc1 (CCMP x1 y1 cc1 !cc0 cmp0))
- static SDValue performANDORCSELCombine(SDNode *N, SelectionDAG &DAG) {
- EVT VT = N->getValueType(0);
- SDValue CSel0 = N->getOperand(0);
- SDValue CSel1 = N->getOperand(1);
- if (CSel0.getOpcode() != AArch64ISD::CSEL ||
- CSel1.getOpcode() != AArch64ISD::CSEL)
- return SDValue();
- if (!CSel0->hasOneUse() || !CSel1->hasOneUse())
- return SDValue();
- if (!isNullConstant(CSel0.getOperand(0)) ||
- !isOneConstant(CSel0.getOperand(1)) ||
- !isNullConstant(CSel1.getOperand(0)) ||
- !isOneConstant(CSel1.getOperand(1)))
- return SDValue();
- SDValue Cmp0 = CSel0.getOperand(3);
- SDValue Cmp1 = CSel1.getOperand(3);
- AArch64CC::CondCode CC0 = (AArch64CC::CondCode)CSel0.getConstantOperandVal(2);
- AArch64CC::CondCode CC1 = (AArch64CC::CondCode)CSel1.getConstantOperandVal(2);
- if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse())
- return SDValue();
- if (Cmp1.getOpcode() != AArch64ISD::SUBS &&
- Cmp0.getOpcode() == AArch64ISD::SUBS) {
- std::swap(Cmp0, Cmp1);
- std::swap(CC0, CC1);
- }
- if (Cmp1.getOpcode() != AArch64ISD::SUBS)
- return SDValue();
- SDLoc DL(N);
- SDValue CCmp, Condition;
- unsigned NZCV;
- if (N->getOpcode() == ISD::AND) {
- AArch64CC::CondCode InvCC0 = AArch64CC::getInvertedCondCode(CC0);
- Condition = DAG.getConstant(InvCC0, DL, MVT_CC);
- NZCV = AArch64CC::getNZCVToSatisfyCondCode(CC1);
- } else {
- AArch64CC::CondCode InvCC1 = AArch64CC::getInvertedCondCode(CC1);
- Condition = DAG.getConstant(CC0, DL, MVT_CC);
- NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvCC1);
- }
- SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
- auto *Op1 = dyn_cast<ConstantSDNode>(Cmp1.getOperand(1));
- if (Op1 && Op1->getAPIntValue().isNegative() &&
- Op1->getAPIntValue().sgt(-32)) {
- // CCMP accept the constant int the range [0, 31]
- // if the Op1 is a constant in the range [-31, -1], we
- // can select to CCMN to avoid the extra mov
- SDValue AbsOp1 =
- DAG.getConstant(Op1->getAPIntValue().abs(), DL, Op1->getValueType(0));
- CCmp = DAG.getNode(AArch64ISD::CCMN, DL, MVT_CC, Cmp1.getOperand(0), AbsOp1,
- NZCVOp, Condition, Cmp0);
- } else {
- CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0),
- Cmp1.getOperand(1), NZCVOp, Condition, Cmp0);
- }
- return DAG.getNode(AArch64ISD::CSEL, DL, VT, CSel0.getOperand(0),
- CSel0.getOperand(1), DAG.getConstant(CC1, DL, MVT::i32),
- CCmp);
- }
- static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget,
- const AArch64TargetLowering &TLI) {
- SelectionDAG &DAG = DCI.DAG;
- EVT VT = N->getValueType(0);
- if (SDValue R = performANDORCSELCombine(N, DAG))
- return R;
- if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
- return SDValue();
- // Attempt to form an EXTR from (or (shl VAL1, #N), (srl VAL2, #RegWidth-N))
- if (SDValue Res = tryCombineToEXTR(N, DCI))
- return Res;
- if (SDValue Res = tryCombineToBSL(N, DCI, TLI))
- return Res;
- return SDValue();
- }
- static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT) {
- if (!MemVT.getVectorElementType().isSimple())
- return false;
- uint64_t MaskForTy = 0ull;
- switch (MemVT.getVectorElementType().getSimpleVT().SimpleTy) {
- case MVT::i8:
- MaskForTy = 0xffull;
- break;
- case MVT::i16:
- MaskForTy = 0xffffull;
- break;
- case MVT::i32:
- MaskForTy = 0xffffffffull;
- break;
- default:
- return false;
- break;
- }
- if (N->getOpcode() == AArch64ISD::DUP || N->getOpcode() == ISD::SPLAT_VECTOR)
- if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0)))
- return Op0->getAPIntValue().getLimitedValue() == MaskForTy;
- return false;
- }
- static bool isAllInactivePredicate(SDValue N) {
- // Look through cast.
- while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST)
- N = N.getOperand(0);
- return ISD::isConstantSplatVectorAllZeros(N.getNode());
- }
- static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) {
- unsigned NumElts = N.getValueType().getVectorMinNumElements();
- // Look through cast.
- while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST) {
- N = N.getOperand(0);
- // When reinterpreting from a type with fewer elements the "new" elements
- // are not active, so bail if they're likely to be used.
- if (N.getValueType().getVectorMinNumElements() < NumElts)
- return false;
- }
- if (ISD::isConstantSplatVectorAllOnes(N.getNode()))
- return true;
- // "ptrue p.<ty>, all" can be considered all active when <ty> is the same size
- // or smaller than the implicit element type represented by N.
- // NOTE: A larger element count implies a smaller element type.
- if (N.getOpcode() == AArch64ISD::PTRUE &&
- N.getConstantOperandVal(0) == AArch64SVEPredPattern::all)
- return N.getValueType().getVectorMinNumElements() >= NumElts;
- // If we're compiling for a specific vector-length, we can check if the
- // pattern's VL equals that of the scalable vector at runtime.
- if (N.getOpcode() == AArch64ISD::PTRUE) {
- const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
- unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
- unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
- if (MaxSVESize && MinSVESize == MaxSVESize) {
- unsigned VScale = MaxSVESize / AArch64::SVEBitsPerBlock;
- unsigned PatNumElts =
- getNumElementsFromSVEPredPattern(N.getConstantOperandVal(0));
- return PatNumElts == (NumElts * VScale);
- }
- }
- return false;
- }
- static SDValue performReinterpretCastCombine(SDNode *N) {
- SDValue LeafOp = SDValue(N, 0);
- SDValue Op = N->getOperand(0);
- while (Op.getOpcode() == AArch64ISD::REINTERPRET_CAST &&
- LeafOp.getValueType() != Op.getValueType())
- Op = Op->getOperand(0);
- if (LeafOp.getValueType() == Op.getValueType())
- return Op;
- return SDValue();
- }
- static SDValue performSVEAndCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- if (DCI.isBeforeLegalizeOps())
- return SDValue();
- SelectionDAG &DAG = DCI.DAG;
- SDValue Src = N->getOperand(0);
- unsigned Opc = Src->getOpcode();
- // Zero/any extend of an unsigned unpack
- if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) {
- SDValue UnpkOp = Src->getOperand(0);
- SDValue Dup = N->getOperand(1);
- if (Dup.getOpcode() != ISD::SPLAT_VECTOR)
- return SDValue();
- SDLoc DL(N);
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(Dup->getOperand(0));
- if (!C)
- return SDValue();
- uint64_t ExtVal = C->getZExtValue();
- // If the mask is fully covered by the unpack, we don't need to push
- // a new AND onto the operand
- EVT EltTy = UnpkOp->getValueType(0).getVectorElementType();
- if ((ExtVal == 0xFF && EltTy == MVT::i8) ||
- (ExtVal == 0xFFFF && EltTy == MVT::i16) ||
- (ExtVal == 0xFFFFFFFF && EltTy == MVT::i32))
- return Src;
- // Truncate to prevent a DUP with an over wide constant
- APInt Mask = C->getAPIntValue().trunc(EltTy.getSizeInBits());
- // Otherwise, make sure we propagate the AND to the operand
- // of the unpack
- Dup = DAG.getNode(ISD::SPLAT_VECTOR, DL, UnpkOp->getValueType(0),
- DAG.getConstant(Mask.zextOrTrunc(32), DL, MVT::i32));
- SDValue And = DAG.getNode(ISD::AND, DL,
- UnpkOp->getValueType(0), UnpkOp, Dup);
- return DAG.getNode(Opc, DL, N->getValueType(0), And);
- }
- // If both sides of AND operations are i1 splat_vectors then
- // we can produce just i1 splat_vector as the result.
- if (isAllActivePredicate(DAG, N->getOperand(0)))
- return N->getOperand(1);
- if (isAllActivePredicate(DAG, N->getOperand(1)))
- return N->getOperand(0);
- if (!EnableCombineMGatherIntrinsics)
- return SDValue();
- SDValue Mask = N->getOperand(1);
- if (!Src.hasOneUse())
- return SDValue();
- EVT MemVT;
- // SVE load instructions perform an implicit zero-extend, which makes them
- // perfect candidates for combining.
- switch (Opc) {
- case AArch64ISD::LD1_MERGE_ZERO:
- case AArch64ISD::LDNF1_MERGE_ZERO:
- case AArch64ISD::LDFF1_MERGE_ZERO:
- MemVT = cast<VTSDNode>(Src->getOperand(3))->getVT();
- break;
- case AArch64ISD::GLD1_MERGE_ZERO:
- case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
- case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
- case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
- case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
- case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
- case AArch64ISD::GLD1_IMM_MERGE_ZERO:
- case AArch64ISD::GLDFF1_MERGE_ZERO:
- case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO:
- case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO:
- case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO:
- case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO:
- case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO:
- case AArch64ISD::GLDFF1_IMM_MERGE_ZERO:
- case AArch64ISD::GLDNT1_MERGE_ZERO:
- MemVT = cast<VTSDNode>(Src->getOperand(4))->getVT();
- break;
- default:
- return SDValue();
- }
- if (isConstantSplatVectorMaskForType(Mask.getNode(), MemVT))
- return Src;
- return SDValue();
- }
- static SDValue performANDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- SelectionDAG &DAG = DCI.DAG;
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
- EVT VT = N->getValueType(0);
- if (SDValue R = performANDORCSELCombine(N, DAG))
- return R;
- if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
- return SDValue();
- if (VT.isScalableVector())
- return performSVEAndCombine(N, DCI);
- // The combining code below works only for NEON vectors. In particular, it
- // does not work for SVE when dealing with vectors wider than 128 bits.
- if (!VT.is64BitVector() && !VT.is128BitVector())
- return SDValue();
- BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
- if (!BVN)
- return SDValue();
- // AND does not accept an immediate, so check if we can use a BIC immediate
- // instruction instead. We do this here instead of using a (and x, (mvni imm))
- // pattern in isel, because some immediates may be lowered to the preferred
- // (and x, (movi imm)) form, even though an mvni representation also exists.
- APInt DefBits(VT.getSizeInBits(), 0);
- APInt UndefBits(VT.getSizeInBits(), 0);
- if (resolveBuildVector(BVN, DefBits, UndefBits)) {
- SDValue NewOp;
- DefBits = ~DefBits;
- if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG,
- DefBits, &LHS)) ||
- (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG,
- DefBits, &LHS)))
- return NewOp;
- UndefBits = ~UndefBits;
- if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG,
- UndefBits, &LHS)) ||
- (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG,
- UndefBits, &LHS)))
- return NewOp;
- }
- return SDValue();
- }
- static bool hasPairwiseAdd(unsigned Opcode, EVT VT, bool FullFP16) {
- switch (Opcode) {
- case ISD::STRICT_FADD:
- case ISD::FADD:
- return (FullFP16 && VT == MVT::f16) || VT == MVT::f32 || VT == MVT::f64;
- case ISD::ADD:
- return VT == MVT::i64;
- default:
- return false;
- }
- }
- static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
- AArch64CC::CondCode Cond);
- static bool isPredicateCCSettingOp(SDValue N) {
- if ((N.getOpcode() == ISD::SETCC) ||
- (N.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
- (N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege ||
- N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt ||
- N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehi ||
- N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehs ||
- N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilele ||
- N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelo ||
- N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilels ||
- N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt ||
- // get_active_lane_mask is lowered to a whilelo instruction.
- N.getConstantOperandVal(0) == Intrinsic::get_active_lane_mask)))
- return true;
- return false;
- }
- // Materialize : i1 = extract_vector_elt t37, Constant:i64<0>
- // ... into: "ptrue p, all" + PTEST
- static SDValue
- performFirstTrueTestVectorCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget) {
- assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
- // Make sure PTEST can be legalised with illegal types.
- if (!Subtarget->hasSVE() || DCI.isBeforeLegalize())
- return SDValue();
- SDValue N0 = N->getOperand(0);
- EVT VT = N0.getValueType();
- if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1 ||
- !isNullConstant(N->getOperand(1)))
- return SDValue();
- // Restricted the DAG combine to only cases where we're extracting from a
- // flag-setting operation.
- if (!isPredicateCCSettingOp(N0))
- return SDValue();
- // Extracts of lane 0 for SVE can be expressed as PTEST(Op, FIRST) ? 1 : 0
- SelectionDAG &DAG = DCI.DAG;
- SDValue Pg = getPTrue(DAG, SDLoc(N), VT, AArch64SVEPredPattern::all);
- return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::FIRST_ACTIVE);
- }
- // Materialize : Idx = (add (mul vscale, NumEls), -1)
- // i1 = extract_vector_elt t37, Constant:i64<Idx>
- // ... into: "ptrue p, all" + PTEST
- static SDValue
- performLastTrueTestVectorCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget) {
- assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
- // Make sure PTEST is legal types.
- if (!Subtarget->hasSVE() || DCI.isBeforeLegalize())
- return SDValue();
- SDValue N0 = N->getOperand(0);
- EVT OpVT = N0.getValueType();
- if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1)
- return SDValue();
- // Idx == (add (mul vscale, NumEls), -1)
- SDValue Idx = N->getOperand(1);
- if (Idx.getOpcode() != ISD::ADD || !isAllOnesConstant(Idx.getOperand(1)))
- return SDValue();
- SDValue VS = Idx.getOperand(0);
- if (VS.getOpcode() != ISD::VSCALE)
- return SDValue();
- unsigned NumEls = OpVT.getVectorElementCount().getKnownMinValue();
- if (VS.getConstantOperandVal(0) != NumEls)
- return SDValue();
- // Extracts of lane EC-1 for SVE can be expressed as PTEST(Op, LAST) ? 1 : 0
- SelectionDAG &DAG = DCI.DAG;
- SDValue Pg = getPTrue(DAG, SDLoc(N), OpVT, AArch64SVEPredPattern::all);
- return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::LAST_ACTIVE);
- }
- static SDValue
- performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget) {
- assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
- if (SDValue Res = performFirstTrueTestVectorCombine(N, DCI, Subtarget))
- return Res;
- if (SDValue Res = performLastTrueTestVectorCombine(N, DCI, Subtarget))
- return Res;
- SelectionDAG &DAG = DCI.DAG;
- SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
- ConstantSDNode *ConstantN1 = dyn_cast<ConstantSDNode>(N1);
- EVT VT = N->getValueType(0);
- const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
- bool IsStrict = N0->isStrictFPOpcode();
- // extract(dup x) -> x
- if (N0.getOpcode() == AArch64ISD::DUP)
- return DAG.getZExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);
- // Rewrite for pairwise fadd pattern
- // (f32 (extract_vector_elt
- // (fadd (vXf32 Other)
- // (vector_shuffle (vXf32 Other) undef <1,X,...> )) 0))
- // ->
- // (f32 (fadd (extract_vector_elt (vXf32 Other) 0)
- // (extract_vector_elt (vXf32 Other) 1))
- // For strict_fadd we need to make sure the old strict_fadd can be deleted, so
- // we can only do this when it's used only by the extract_vector_elt.
- if (ConstantN1 && ConstantN1->getZExtValue() == 0 &&
- hasPairwiseAdd(N0->getOpcode(), VT, FullFP16) &&
- (!IsStrict || N0.hasOneUse())) {
- SDLoc DL(N0);
- SDValue N00 = N0->getOperand(IsStrict ? 1 : 0);
- SDValue N01 = N0->getOperand(IsStrict ? 2 : 1);
- ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(N01);
- SDValue Other = N00;
- // And handle the commutative case.
- if (!Shuffle) {
- Shuffle = dyn_cast<ShuffleVectorSDNode>(N00);
- Other = N01;
- }
- if (Shuffle && Shuffle->getMaskElt(0) == 1 &&
- Other == Shuffle->getOperand(0)) {
- SDValue Extract1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other,
- DAG.getConstant(0, DL, MVT::i64));
- SDValue Extract2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other,
- DAG.getConstant(1, DL, MVT::i64));
- if (!IsStrict)
- return DAG.getNode(N0->getOpcode(), DL, VT, Extract1, Extract2);
- // For strict_fadd we need uses of the final extract_vector to be replaced
- // with the strict_fadd, but we also need uses of the chain output of the
- // original strict_fadd to use the chain output of the new strict_fadd as
- // otherwise it may not be deleted.
- SDValue Ret = DAG.getNode(N0->getOpcode(), DL,
- {VT, MVT::Other},
- {N0->getOperand(0), Extract1, Extract2});
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Ret);
- DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Ret.getValue(1));
- return SDValue(N, 0);
- }
- }
- return SDValue();
- }
- static SDValue performConcatVectorsCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- SDLoc dl(N);
- EVT VT = N->getValueType(0);
- SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
- unsigned N0Opc = N0->getOpcode(), N1Opc = N1->getOpcode();
- if (VT.isScalableVector())
- return SDValue();
- // Optimize concat_vectors of truncated vectors, where the intermediate
- // type is illegal, to avoid said illegality, e.g.,
- // (v4i16 (concat_vectors (v2i16 (truncate (v2i64))),
- // (v2i16 (truncate (v2i64)))))
- // ->
- // (v4i16 (truncate (vector_shuffle (v4i32 (bitcast (v2i64))),
- // (v4i32 (bitcast (v2i64))),
- // <0, 2, 4, 6>)))
- // This isn't really target-specific, but ISD::TRUNCATE legality isn't keyed
- // on both input and result type, so we might generate worse code.
- // On AArch64 we know it's fine for v2i64->v4i16 and v4i32->v8i8.
- if (N->getNumOperands() == 2 && N0Opc == ISD::TRUNCATE &&
- N1Opc == ISD::TRUNCATE) {
- SDValue N00 = N0->getOperand(0);
- SDValue N10 = N1->getOperand(0);
- EVT N00VT = N00.getValueType();
- if (N00VT == N10.getValueType() &&
- (N00VT == MVT::v2i64 || N00VT == MVT::v4i32) &&
- N00VT.getScalarSizeInBits() == 4 * VT.getScalarSizeInBits()) {
- MVT MidVT = (N00VT == MVT::v2i64 ? MVT::v4i32 : MVT::v8i16);
- SmallVector<int, 8> Mask(MidVT.getVectorNumElements());
- for (size_t i = 0; i < Mask.size(); ++i)
- Mask[i] = i * 2;
- return DAG.getNode(ISD::TRUNCATE, dl, VT,
- DAG.getVectorShuffle(
- MidVT, dl,
- DAG.getNode(ISD::BITCAST, dl, MidVT, N00),
- DAG.getNode(ISD::BITCAST, dl, MidVT, N10), Mask));
- }
- }
- if (N->getOperand(0).getValueType() == MVT::v4i8) {
- // If we have a concat of v4i8 loads, convert them to a buildvector of f32
- // loads to prevent having to go through the v4i8 load legalization that
- // needs to extend each element into a larger type.
- if (N->getNumOperands() % 2 == 0 && all_of(N->op_values(), [](SDValue V) {
- if (V.getValueType() != MVT::v4i8)
- return false;
- if (V.isUndef())
- return true;
- LoadSDNode *LD = dyn_cast<LoadSDNode>(V);
- return LD && V.hasOneUse() && LD->isSimple() && !LD->isIndexed() &&
- LD->getExtensionType() == ISD::NON_EXTLOAD;
- })) {
- EVT NVT =
- EVT::getVectorVT(*DAG.getContext(), MVT::f32, N->getNumOperands());
- SmallVector<SDValue> Ops;
- for (unsigned i = 0; i < N->getNumOperands(); i++) {
- SDValue V = N->getOperand(i);
- if (V.isUndef())
- Ops.push_back(DAG.getUNDEF(MVT::f32));
- else {
- LoadSDNode *LD = cast<LoadSDNode>(V);
- SDValue NewLoad =
- DAG.getLoad(MVT::f32, dl, LD->getChain(), LD->getBasePtr(),
- LD->getMemOperand());
- DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLoad.getValue(1));
- Ops.push_back(NewLoad);
- }
- }
- return DAG.getBitcast(N->getValueType(0),
- DAG.getBuildVector(NVT, dl, Ops));
- }
- }
- // Canonicalise concat_vectors to replace concatenations of truncated nots
- // with nots of concatenated truncates. This in some cases allows for multiple
- // redundant negations to be eliminated.
- // (concat_vectors (v4i16 (truncate (not (v4i32)))),
- // (v4i16 (truncate (not (v4i32)))))
- // ->
- // (not (concat_vectors (v4i16 (truncate (v4i32))),
- // (v4i16 (truncate (v4i32)))))
- if (N->getNumOperands() == 2 && N0Opc == ISD::TRUNCATE &&
- N1Opc == ISD::TRUNCATE && N->isOnlyUserOf(N0.getNode()) &&
- N->isOnlyUserOf(N1.getNode())) {
- auto isBitwiseVectorNegate = [](SDValue V) {
- return V->getOpcode() == ISD::XOR &&
- ISD::isConstantSplatVectorAllOnes(V.getOperand(1).getNode());
- };
- SDValue N00 = N0->getOperand(0);
- SDValue N10 = N1->getOperand(0);
- if (isBitwiseVectorNegate(N00) && N0->isOnlyUserOf(N00.getNode()) &&
- isBitwiseVectorNegate(N10) && N1->isOnlyUserOf(N10.getNode())) {
- return DAG.getNOT(
- dl,
- DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
- DAG.getNode(ISD::TRUNCATE, dl, N0.getValueType(),
- N00->getOperand(0)),
- DAG.getNode(ISD::TRUNCATE, dl, N1.getValueType(),
- N10->getOperand(0))),
- VT);
- }
- }
- // Wait till after everything is legalized to try this. That way we have
- // legal vector types and such.
- if (DCI.isBeforeLegalizeOps())
- return SDValue();
- // Optimise concat_vectors of two [us]avgceils or [us]avgfloors that use
- // extracted subvectors from the same original vectors. Combine these into a
- // single avg that operates on the two original vectors.
- // avgceil is the target independant name for rhadd, avgfloor is a hadd.
- // Example:
- // (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
- // extract_subvector (v16i8 OpB, <0>))),
- // (v8i8 (avgceils (extract_subvector (v16i8 OpA, <8>),
- // extract_subvector (v16i8 OpB, <8>)))))
- // ->
- // (v16i8(avgceils(v16i8 OpA, v16i8 OpB)))
- if (N->getNumOperands() == 2 && N0Opc == N1Opc &&
- (N0Opc == ISD::AVGCEILU || N0Opc == ISD::AVGCEILS ||
- N0Opc == ISD::AVGFLOORU || N0Opc == ISD::AVGFLOORS)) {
- SDValue N00 = N0->getOperand(0);
- SDValue N01 = N0->getOperand(1);
- SDValue N10 = N1->getOperand(0);
- SDValue N11 = N1->getOperand(1);
- EVT N00VT = N00.getValueType();
- EVT N10VT = N10.getValueType();
- if (N00->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- N01->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- N10->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- N11->getOpcode() == ISD::EXTRACT_SUBVECTOR && N00VT == N10VT) {
- SDValue N00Source = N00->getOperand(0);
- SDValue N01Source = N01->getOperand(0);
- SDValue N10Source = N10->getOperand(0);
- SDValue N11Source = N11->getOperand(0);
- if (N00Source == N10Source && N01Source == N11Source &&
- N00Source.getValueType() == VT && N01Source.getValueType() == VT) {
- assert(N0.getValueType() == N1.getValueType());
- uint64_t N00Index = N00.getConstantOperandVal(1);
- uint64_t N01Index = N01.getConstantOperandVal(1);
- uint64_t N10Index = N10.getConstantOperandVal(1);
- uint64_t N11Index = N11.getConstantOperandVal(1);
- if (N00Index == N01Index && N10Index == N11Index && N00Index == 0 &&
- N10Index == N00VT.getVectorNumElements())
- return DAG.getNode(N0Opc, dl, VT, N00Source, N01Source);
- }
- }
- }
- // If we see a (concat_vectors (v1x64 A), (v1x64 A)) it's really a vector
- // splat. The indexed instructions are going to be expecting a DUPLANE64, so
- // canonicalise to that.
- if (N->getNumOperands() == 2 && N0 == N1 && VT.getVectorNumElements() == 2) {
- assert(VT.getScalarSizeInBits() == 64);
- return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG),
- DAG.getConstant(0, dl, MVT::i64));
- }
- // Canonicalise concat_vectors so that the right-hand vector has as few
- // bit-casts as possible before its real operation. The primary matching
- // destination for these operations will be the narrowing "2" instructions,
- // which depend on the operation being performed on this right-hand vector.
- // For example,
- // (concat_vectors LHS, (v1i64 (bitconvert (v4i16 RHS))))
- // becomes
- // (bitconvert (concat_vectors (v4i16 (bitconvert LHS)), RHS))
- if (N->getNumOperands() != 2 || N1Opc != ISD::BITCAST)
- return SDValue();
- SDValue RHS = N1->getOperand(0);
- MVT RHSTy = RHS.getValueType().getSimpleVT();
- // If the RHS is not a vector, this is not the pattern we're looking for.
- if (!RHSTy.isVector())
- return SDValue();
- LLVM_DEBUG(
- dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n");
- MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(),
- RHSTy.getVectorNumElements() * 2);
- return DAG.getNode(ISD::BITCAST, dl, VT,
- DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatTy,
- DAG.getNode(ISD::BITCAST, dl, RHSTy, N0),
- RHS));
- }
- static SDValue
- performExtractSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- if (DCI.isBeforeLegalizeOps())
- return SDValue();
- EVT VT = N->getValueType(0);
- if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1)
- return SDValue();
- SDValue V = N->getOperand(0);
- // NOTE: This combine exists in DAGCombiner, but that version's legality check
- // blocks this combine because the non-const case requires custom lowering.
- //
- // ty1 extract_vector(ty2 splat(const))) -> ty1 splat(const)
- if (V.getOpcode() == ISD::SPLAT_VECTOR)
- if (isa<ConstantSDNode>(V.getOperand(0)))
- return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, V.getOperand(0));
- return SDValue();
- }
- static SDValue
- performInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- SDLoc DL(N);
- SDValue Vec = N->getOperand(0);
- SDValue SubVec = N->getOperand(1);
- uint64_t IdxVal = N->getConstantOperandVal(2);
- EVT VecVT = Vec.getValueType();
- EVT SubVT = SubVec.getValueType();
- // Only do this for legal fixed vector types.
- if (!VecVT.isFixedLengthVector() ||
- !DAG.getTargetLoweringInfo().isTypeLegal(VecVT) ||
- !DAG.getTargetLoweringInfo().isTypeLegal(SubVT))
- return SDValue();
- // Ignore widening patterns.
- if (IdxVal == 0 && Vec.isUndef())
- return SDValue();
- // Subvector must be half the width and an "aligned" insertion.
- unsigned NumSubElts = SubVT.getVectorNumElements();
- if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() ||
- (IdxVal != 0 && IdxVal != NumSubElts))
- return SDValue();
- // Fold insert_subvector -> concat_vectors
- // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi))
- // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub)
- SDValue Lo, Hi;
- if (IdxVal == 0) {
- Lo = SubVec;
- Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec,
- DAG.getVectorIdxConstant(NumSubElts, DL));
- } else {
- Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec,
- DAG.getVectorIdxConstant(0, DL));
- Hi = SubVec;
- }
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, Lo, Hi);
- }
- static SDValue tryCombineFixedPointConvert(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- // Wait until after everything is legalized to try this. That way we have
- // legal vector types and such.
- if (DCI.isBeforeLegalizeOps())
- return SDValue();
- // Transform a scalar conversion of a value from a lane extract into a
- // lane extract of a vector conversion. E.g., from foo1 to foo2:
- // double foo1(int64x2_t a) { return vcvtd_n_f64_s64(a[1], 9); }
- // double foo2(int64x2_t a) { return vcvtq_n_f64_s64(a, 9)[1]; }
- //
- // The second form interacts better with instruction selection and the
- // register allocator to avoid cross-class register copies that aren't
- // coalescable due to a lane reference.
- // Check the operand and see if it originates from a lane extract.
- SDValue Op1 = N->getOperand(1);
- if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
- return SDValue();
- // Yep, no additional predication needed. Perform the transform.
- SDValue IID = N->getOperand(0);
- SDValue Shift = N->getOperand(2);
- SDValue Vec = Op1.getOperand(0);
- SDValue Lane = Op1.getOperand(1);
- EVT ResTy = N->getValueType(0);
- EVT VecResTy;
- SDLoc DL(N);
- // The vector width should be 128 bits by the time we get here, even
- // if it started as 64 bits (the extract_vector handling will have
- // done so). Bail if it is not.
- if (Vec.getValueSizeInBits() != 128)
- return SDValue();
- if (Vec.getValueType() == MVT::v4i32)
- VecResTy = MVT::v4f32;
- else if (Vec.getValueType() == MVT::v2i64)
- VecResTy = MVT::v2f64;
- else
- return SDValue();
- SDValue Convert =
- DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane);
- }
- // AArch64 high-vector "long" operations are formed by performing the non-high
- // version on an extract_subvector of each operand which gets the high half:
- //
- // (longop2 LHS, RHS) == (longop (extract_high LHS), (extract_high RHS))
- //
- // However, there are cases which don't have an extract_high explicitly, but
- // have another operation that can be made compatible with one for free. For
- // example:
- //
- // (dupv64 scalar) --> (extract_high (dup128 scalar))
- //
- // This routine does the actual conversion of such DUPs, once outer routines
- // have determined that everything else is in order.
- // It also supports immediate DUP-like nodes (MOVI/MVNi), which we can fold
- // similarly here.
- static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) {
- MVT VT = N.getSimpleValueType();
- if (N.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- N.getConstantOperandVal(1) == 0)
- N = N.getOperand(0);
- switch (N.getOpcode()) {
- case AArch64ISD::DUP:
- case AArch64ISD::DUPLANE8:
- case AArch64ISD::DUPLANE16:
- case AArch64ISD::DUPLANE32:
- case AArch64ISD::DUPLANE64:
- case AArch64ISD::MOVI:
- case AArch64ISD::MOVIshift:
- case AArch64ISD::MOVIedit:
- case AArch64ISD::MOVImsl:
- case AArch64ISD::MVNIshift:
- case AArch64ISD::MVNImsl:
- break;
- default:
- // FMOV could be supported, but isn't very useful, as it would only occur
- // if you passed a bitcast' floating point immediate to an eligible long
- // integer op (addl, smull, ...).
- return SDValue();
- }
- if (!VT.is64BitVector())
- return SDValue();
- SDLoc DL(N);
- unsigned NumElems = VT.getVectorNumElements();
- if (N.getValueType().is64BitVector()) {
- MVT ElementTy = VT.getVectorElementType();
- MVT NewVT = MVT::getVectorVT(ElementTy, NumElems * 2);
- N = DAG.getNode(N->getOpcode(), DL, NewVT, N->ops());
- }
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N,
- DAG.getConstant(NumElems, DL, MVT::i64));
- }
- static bool isEssentiallyExtractHighSubvector(SDValue N) {
- if (N.getOpcode() == ISD::BITCAST)
- N = N.getOperand(0);
- if (N.getOpcode() != ISD::EXTRACT_SUBVECTOR)
- return false;
- if (N.getOperand(0).getValueType().isScalableVector())
- return false;
- return cast<ConstantSDNode>(N.getOperand(1))->getAPIntValue() ==
- N.getOperand(0).getValueType().getVectorNumElements() / 2;
- }
- /// Helper structure to keep track of ISD::SET_CC operands.
- struct GenericSetCCInfo {
- const SDValue *Opnd0;
- const SDValue *Opnd1;
- ISD::CondCode CC;
- };
- /// Helper structure to keep track of a SET_CC lowered into AArch64 code.
- struct AArch64SetCCInfo {
- const SDValue *Cmp;
- AArch64CC::CondCode CC;
- };
- /// Helper structure to keep track of SetCC information.
- union SetCCInfo {
- GenericSetCCInfo Generic;
- AArch64SetCCInfo AArch64;
- };
- /// Helper structure to be able to read SetCC information. If set to
- /// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a
- /// GenericSetCCInfo.
- struct SetCCInfoAndKind {
- SetCCInfo Info;
- bool IsAArch64;
- };
- /// Check whether or not \p Op is a SET_CC operation, either a generic or
- /// an
- /// AArch64 lowered one.
- /// \p SetCCInfo is filled accordingly.
- /// \post SetCCInfo is meanginfull only when this function returns true.
- /// \return True when Op is a kind of SET_CC operation.
- static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) {
- // If this is a setcc, this is straight forward.
- if (Op.getOpcode() == ISD::SETCC) {
- SetCCInfo.Info.Generic.Opnd0 = &Op.getOperand(0);
- SetCCInfo.Info.Generic.Opnd1 = &Op.getOperand(1);
- SetCCInfo.Info.Generic.CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
- SetCCInfo.IsAArch64 = false;
- return true;
- }
- // Otherwise, check if this is a matching csel instruction.
- // In other words:
- // - csel 1, 0, cc
- // - csel 0, 1, !cc
- if (Op.getOpcode() != AArch64ISD::CSEL)
- return false;
- // Set the information about the operands.
- // TODO: we want the operands of the Cmp not the csel
- SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3);
- SetCCInfo.IsAArch64 = true;
- SetCCInfo.Info.AArch64.CC = static_cast<AArch64CC::CondCode>(
- cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
- // Check that the operands matches the constraints:
- // (1) Both operands must be constants.
- // (2) One must be 1 and the other must be 0.
- ConstantSDNode *TValue = dyn_cast<ConstantSDNode>(Op.getOperand(0));
- ConstantSDNode *FValue = dyn_cast<ConstantSDNode>(Op.getOperand(1));
- // Check (1).
- if (!TValue || !FValue)
- return false;
- // Check (2).
- if (!TValue->isOne()) {
- // Update the comparison when we are interested in !cc.
- std::swap(TValue, FValue);
- SetCCInfo.Info.AArch64.CC =
- AArch64CC::getInvertedCondCode(SetCCInfo.Info.AArch64.CC);
- }
- return TValue->isOne() && FValue->isZero();
- }
- // Returns true if Op is setcc or zext of setcc.
- static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) {
- if (isSetCC(Op, Info))
- return true;
- return ((Op.getOpcode() == ISD::ZERO_EXTEND) &&
- isSetCC(Op->getOperand(0), Info));
- }
- // The folding we want to perform is:
- // (add x, [zext] (setcc cc ...) )
- // -->
- // (csel x, (add x, 1), !cc ...)
- //
- // The latter will get matched to a CSINC instruction.
- static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) {
- assert(Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!");
- SDValue LHS = Op->getOperand(0);
- SDValue RHS = Op->getOperand(1);
- SetCCInfoAndKind InfoAndKind;
- // If both operands are a SET_CC, then we don't want to perform this
- // folding and create another csel as this results in more instructions
- // (and higher register usage).
- if (isSetCCOrZExtSetCC(LHS, InfoAndKind) &&
- isSetCCOrZExtSetCC(RHS, InfoAndKind))
- return SDValue();
- // If neither operand is a SET_CC, give up.
- if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) {
- std::swap(LHS, RHS);
- if (!isSetCCOrZExtSetCC(LHS, InfoAndKind))
- return SDValue();
- }
- // FIXME: This could be generatized to work for FP comparisons.
- EVT CmpVT = InfoAndKind.IsAArch64
- ? InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType()
- : InfoAndKind.Info.Generic.Opnd0->getValueType();
- if (CmpVT != MVT::i32 && CmpVT != MVT::i64)
- return SDValue();
- SDValue CCVal;
- SDValue Cmp;
- SDLoc dl(Op);
- if (InfoAndKind.IsAArch64) {
- CCVal = DAG.getConstant(
- AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), dl,
- MVT::i32);
- Cmp = *InfoAndKind.Info.AArch64.Cmp;
- } else
- Cmp = getAArch64Cmp(
- *InfoAndKind.Info.Generic.Opnd0, *InfoAndKind.Info.Generic.Opnd1,
- ISD::getSetCCInverse(InfoAndKind.Info.Generic.CC, CmpVT), CCVal, DAG,
- dl);
- EVT VT = Op->getValueType(0);
- LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, dl, VT));
- return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp);
- }
- // ADD(UADDV a, UADDV b) --> UADDV(ADD a, b)
- static SDValue performAddUADDVCombine(SDNode *N, SelectionDAG &DAG) {
- EVT VT = N->getValueType(0);
- // Only scalar integer and vector types.
- if (N->getOpcode() != ISD::ADD || !VT.isScalarInteger())
- return SDValue();
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
- if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
- RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || LHS.getValueType() != VT)
- return SDValue();
- auto *LHSN1 = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
- auto *RHSN1 = dyn_cast<ConstantSDNode>(RHS->getOperand(1));
- if (!LHSN1 || LHSN1 != RHSN1 || !RHSN1->isZero())
- return SDValue();
- SDValue Op1 = LHS->getOperand(0);
- SDValue Op2 = RHS->getOperand(0);
- EVT OpVT1 = Op1.getValueType();
- EVT OpVT2 = Op2.getValueType();
- if (Op1.getOpcode() != AArch64ISD::UADDV || OpVT1 != OpVT2 ||
- Op2.getOpcode() != AArch64ISD::UADDV ||
- OpVT1.getVectorElementType() != VT)
- return SDValue();
- SDValue Val1 = Op1.getOperand(0);
- SDValue Val2 = Op2.getOperand(0);
- EVT ValVT = Val1->getValueType(0);
- SDLoc DL(N);
- SDValue AddVal = DAG.getNode(ISD::ADD, DL, ValVT, Val1, Val2);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
- DAG.getNode(AArch64ISD::UADDV, DL, ValVT, AddVal),
- DAG.getConstant(0, DL, MVT::i64));
- }
- /// Perform the scalar expression combine in the form of:
- /// CSEL(c, 1, cc) + b => CSINC(b+c, b, cc)
- /// CSNEG(c, -1, cc) + b => CSINC(b+c, b, cc)
- static SDValue performAddCSelIntoCSinc(SDNode *N, SelectionDAG &DAG) {
- EVT VT = N->getValueType(0);
- if (!VT.isScalarInteger() || N->getOpcode() != ISD::ADD)
- return SDValue();
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
- // Handle commutivity.
- if (LHS.getOpcode() != AArch64ISD::CSEL &&
- LHS.getOpcode() != AArch64ISD::CSNEG) {
- std::swap(LHS, RHS);
- if (LHS.getOpcode() != AArch64ISD::CSEL &&
- LHS.getOpcode() != AArch64ISD::CSNEG) {
- return SDValue();
- }
- }
- if (!LHS.hasOneUse())
- return SDValue();
- AArch64CC::CondCode AArch64CC =
- static_cast<AArch64CC::CondCode>(LHS.getConstantOperandVal(2));
- // The CSEL should include a const one operand, and the CSNEG should include
- // One or NegOne operand.
- ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(LHS.getOperand(0));
- ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
- if (!CTVal || !CFVal)
- return SDValue();
- if (!(LHS.getOpcode() == AArch64ISD::CSEL &&
- (CTVal->isOne() || CFVal->isOne())) &&
- !(LHS.getOpcode() == AArch64ISD::CSNEG &&
- (CTVal->isOne() || CFVal->isAllOnes())))
- return SDValue();
- // Switch CSEL(1, c, cc) to CSEL(c, 1, !cc)
- if (LHS.getOpcode() == AArch64ISD::CSEL && CTVal->isOne() &&
- !CFVal->isOne()) {
- std::swap(CTVal, CFVal);
- AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
- }
- SDLoc DL(N);
- // Switch CSNEG(1, c, cc) to CSNEG(-c, -1, !cc)
- if (LHS.getOpcode() == AArch64ISD::CSNEG && CTVal->isOne() &&
- !CFVal->isAllOnes()) {
- APInt C = -1 * CFVal->getAPIntValue();
- CTVal = cast<ConstantSDNode>(DAG.getConstant(C, DL, VT));
- CFVal = cast<ConstantSDNode>(DAG.getAllOnesConstant(DL, VT));
- AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
- }
- // It might be neutral for larger constants, as the immediate need to be
- // materialized in a register.
- APInt ADDC = CTVal->getAPIntValue();
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- if (!TLI.isLegalAddImmediate(ADDC.getSExtValue()))
- return SDValue();
- assert(((LHS.getOpcode() == AArch64ISD::CSEL && CFVal->isOne()) ||
- (LHS.getOpcode() == AArch64ISD::CSNEG && CFVal->isAllOnes())) &&
- "Unexpected constant value");
- SDValue NewNode = DAG.getNode(ISD::ADD, DL, VT, RHS, SDValue(CTVal, 0));
- SDValue CCVal = DAG.getConstant(AArch64CC, DL, MVT::i32);
- SDValue Cmp = LHS.getOperand(3);
- return DAG.getNode(AArch64ISD::CSINC, DL, VT, NewNode, RHS, CCVal, Cmp);
- }
- // ADD(UDOT(zero, x, y), A) --> UDOT(A, x, y)
- static SDValue performAddDotCombine(SDNode *N, SelectionDAG &DAG) {
- EVT VT = N->getValueType(0);
- if (N->getOpcode() != ISD::ADD)
- return SDValue();
- SDValue Dot = N->getOperand(0);
- SDValue A = N->getOperand(1);
- // Handle commutivity
- auto isZeroDot = [](SDValue Dot) {
- return (Dot.getOpcode() == AArch64ISD::UDOT ||
- Dot.getOpcode() == AArch64ISD::SDOT) &&
- isZerosVector(Dot.getOperand(0).getNode());
- };
- if (!isZeroDot(Dot))
- std::swap(Dot, A);
- if (!isZeroDot(Dot))
- return SDValue();
- return DAG.getNode(Dot.getOpcode(), SDLoc(N), VT, A, Dot.getOperand(1),
- Dot.getOperand(2));
- }
- static bool isNegatedInteger(SDValue Op) {
- return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0));
- }
- static SDValue getNegatedInteger(SDValue Op, SelectionDAG &DAG) {
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- SDValue Zero = DAG.getConstant(0, DL, VT);
- return DAG.getNode(ISD::SUB, DL, VT, Zero, Op);
- }
- // Try to fold
- //
- // (neg (csel X, Y)) -> (csel (neg X), (neg Y))
- //
- // The folding helps csel to be matched with csneg without generating
- // redundant neg instruction, which includes negation of the csel expansion
- // of abs node lowered by lowerABS.
- static SDValue performNegCSelCombine(SDNode *N, SelectionDAG &DAG) {
- if (!isNegatedInteger(SDValue(N, 0)))
- return SDValue();
- SDValue CSel = N->getOperand(1);
- if (CSel.getOpcode() != AArch64ISD::CSEL || !CSel->hasOneUse())
- return SDValue();
- SDValue N0 = CSel.getOperand(0);
- SDValue N1 = CSel.getOperand(1);
- // If both of them is not negations, it's not worth the folding as it
- // introduces two additional negations while reducing one negation.
- if (!isNegatedInteger(N0) && !isNegatedInteger(N1))
- return SDValue();
- SDValue N0N = getNegatedInteger(N0, DAG);
- SDValue N1N = getNegatedInteger(N1, DAG);
- SDLoc DL(N);
- EVT VT = CSel.getValueType();
- return DAG.getNode(AArch64ISD::CSEL, DL, VT, N0N, N1N, CSel.getOperand(2),
- CSel.getOperand(3));
- }
- // The basic add/sub long vector instructions have variants with "2" on the end
- // which act on the high-half of their inputs. They are normally matched by
- // patterns like:
- //
- // (add (zeroext (extract_high LHS)),
- // (zeroext (extract_high RHS)))
- // -> uaddl2 vD, vN, vM
- //
- // However, if one of the extracts is something like a duplicate, this
- // instruction can still be used profitably. This function puts the DAG into a
- // more appropriate form for those patterns to trigger.
- static SDValue performAddSubLongCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- if (DCI.isBeforeLegalizeOps())
- return SDValue();
- MVT VT = N->getSimpleValueType(0);
- if (!VT.is128BitVector()) {
- if (N->getOpcode() == ISD::ADD)
- return performSetccAddFolding(N, DAG);
- return SDValue();
- }
- // Make sure both branches are extended in the same way.
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
- if ((LHS.getOpcode() != ISD::ZERO_EXTEND &&
- LHS.getOpcode() != ISD::SIGN_EXTEND) ||
- LHS.getOpcode() != RHS.getOpcode())
- return SDValue();
- unsigned ExtType = LHS.getOpcode();
- // It's not worth doing if at least one of the inputs isn't already an
- // extract, but we don't know which it'll be so we have to try both.
- if (isEssentiallyExtractHighSubvector(LHS.getOperand(0))) {
- RHS = tryExtendDUPToExtractHigh(RHS.getOperand(0), DAG);
- if (!RHS.getNode())
- return SDValue();
- RHS = DAG.getNode(ExtType, SDLoc(N), VT, RHS);
- } else if (isEssentiallyExtractHighSubvector(RHS.getOperand(0))) {
- LHS = tryExtendDUPToExtractHigh(LHS.getOperand(0), DAG);
- if (!LHS.getNode())
- return SDValue();
- LHS = DAG.getNode(ExtType, SDLoc(N), VT, LHS);
- }
- return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS);
- }
- static bool isCMP(SDValue Op) {
- return Op.getOpcode() == AArch64ISD::SUBS &&
- !Op.getNode()->hasAnyUseOfValue(0);
- }
- // (CSEL 1 0 CC Cond) => CC
- // (CSEL 0 1 CC Cond) => !CC
- static std::optional<AArch64CC::CondCode> getCSETCondCode(SDValue Op) {
- if (Op.getOpcode() != AArch64ISD::CSEL)
- return std::nullopt;
- auto CC = static_cast<AArch64CC::CondCode>(Op.getConstantOperandVal(2));
- if (CC == AArch64CC::AL || CC == AArch64CC::NV)
- return std::nullopt;
- SDValue OpLHS = Op.getOperand(0);
- SDValue OpRHS = Op.getOperand(1);
- if (isOneConstant(OpLHS) && isNullConstant(OpRHS))
- return CC;
- if (isNullConstant(OpLHS) && isOneConstant(OpRHS))
- return getInvertedCondCode(CC);
- return std::nullopt;
- }
- // (ADC{S} l r (CMP (CSET HS carry) 1)) => (ADC{S} l r carry)
- // (SBC{S} l r (CMP 0 (CSET LO carry))) => (SBC{S} l r carry)
- static SDValue foldOverflowCheck(SDNode *Op, SelectionDAG &DAG, bool IsAdd) {
- SDValue CmpOp = Op->getOperand(2);
- if (!isCMP(CmpOp))
- return SDValue();
- if (IsAdd) {
- if (!isOneConstant(CmpOp.getOperand(1)))
- return SDValue();
- } else {
- if (!isNullConstant(CmpOp.getOperand(0)))
- return SDValue();
- }
- SDValue CsetOp = CmpOp->getOperand(IsAdd ? 0 : 1);
- auto CC = getCSETCondCode(CsetOp);
- if (CC != (IsAdd ? AArch64CC::HS : AArch64CC::LO))
- return SDValue();
- return DAG.getNode(Op->getOpcode(), SDLoc(Op), Op->getVTList(),
- Op->getOperand(0), Op->getOperand(1),
- CsetOp.getOperand(3));
- }
- // (ADC x 0 cond) => (CINC x HS cond)
- static SDValue foldADCToCINC(SDNode *N, SelectionDAG &DAG) {
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
- SDValue Cond = N->getOperand(2);
- if (!isNullConstant(RHS))
- return SDValue();
- EVT VT = N->getValueType(0);
- SDLoc DL(N);
- // (CINC x cc cond) <=> (CSINC x x !cc cond)
- SDValue CC = DAG.getConstant(AArch64CC::LO, DL, MVT::i32);
- return DAG.getNode(AArch64ISD::CSINC, DL, VT, LHS, LHS, CC, Cond);
- }
- // Transform vector add(zext i8 to i32, zext i8 to i32)
- // into sext(add(zext(i8 to i16), zext(i8 to i16)) to i32)
- // This allows extra uses of saddl/uaddl at the lower vector widths, and less
- // extends.
- static SDValue performVectorAddSubExtCombine(SDNode *N, SelectionDAG &DAG) {
- EVT VT = N->getValueType(0);
- if (!VT.isFixedLengthVector() || VT.getSizeInBits() <= 128 ||
- (N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
- N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND) ||
- (N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
- N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND) ||
- N->getOperand(0).getOperand(0).getValueType() !=
- N->getOperand(1).getOperand(0).getValueType())
- return SDValue();
- SDValue N0 = N->getOperand(0).getOperand(0);
- SDValue N1 = N->getOperand(1).getOperand(0);
- EVT InVT = N0.getValueType();
- EVT S1 = InVT.getScalarType();
- EVT S2 = VT.getScalarType();
- if ((S2 == MVT::i32 && S1 == MVT::i8) ||
- (S2 == MVT::i64 && (S1 == MVT::i8 || S1 == MVT::i16))) {
- SDLoc DL(N);
- EVT HalfVT = EVT::getVectorVT(*DAG.getContext(),
- S2.getHalfSizedIntegerVT(*DAG.getContext()),
- VT.getVectorElementCount());
- SDValue NewN0 = DAG.getNode(N->getOperand(0).getOpcode(), DL, HalfVT, N0);
- SDValue NewN1 = DAG.getNode(N->getOperand(1).getOpcode(), DL, HalfVT, N1);
- SDValue NewOp = DAG.getNode(N->getOpcode(), DL, HalfVT, NewN0, NewN1);
- return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NewOp);
- }
- return SDValue();
- }
- static SDValue performBuildVectorCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- // A build vector of two extracted elements is equivalent to an
- // extract subvector where the inner vector is any-extended to the
- // extract_vector_elt VT.
- // (build_vector (extract_elt_iXX_to_i32 vec Idx+0)
- // (extract_elt_iXX_to_i32 vec Idx+1))
- // => (extract_subvector (anyext_iXX_to_i32 vec) Idx)
- // For now, only consider the v2i32 case, which arises as a result of
- // legalization.
- if (VT != MVT::v2i32)
- return SDValue();
- SDValue Elt0 = N->getOperand(0), Elt1 = N->getOperand(1);
- // Reminder, EXTRACT_VECTOR_ELT has the effect of any-extending to its VT.
- if (Elt0->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
- Elt1->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
- // Constant index.
- isa<ConstantSDNode>(Elt0->getOperand(1)) &&
- isa<ConstantSDNode>(Elt1->getOperand(1)) &&
- // Both EXTRACT_VECTOR_ELT from same vector...
- Elt0->getOperand(0) == Elt1->getOperand(0) &&
- // ... and contiguous. First element's index +1 == second element's index.
- Elt0->getConstantOperandVal(1) + 1 == Elt1->getConstantOperandVal(1) &&
- // EXTRACT_SUBVECTOR requires that Idx be a constant multiple of
- // ResultType's known minimum vector length.
- Elt0->getConstantOperandVal(1) % VT.getVectorMinNumElements() == 0) {
- SDValue VecToExtend = Elt0->getOperand(0);
- EVT ExtVT = VecToExtend.getValueType().changeVectorElementType(MVT::i32);
- if (!DAG.getTargetLoweringInfo().isTypeLegal(ExtVT))
- return SDValue();
- SDValue SubvectorIdx = DAG.getVectorIdxConstant(Elt0->getConstantOperandVal(1), DL);
- SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, DL, ExtVT, VecToExtend);
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Ext,
- SubvectorIdx);
- }
- return SDValue();
- }
- static SDValue performTruncateCombine(SDNode *N,
- SelectionDAG &DAG) {
- EVT VT = N->getValueType(0);
- SDValue N0 = N->getOperand(0);
- if (VT.isFixedLengthVector() && VT.is64BitVector() && N0.hasOneUse() &&
- N0.getOpcode() == AArch64ISD::DUP) {
- SDValue Op = N0.getOperand(0);
- if (VT.getScalarType() == MVT::i32 &&
- N0.getOperand(0).getValueType().getScalarType() == MVT::i64)
- Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i32, Op);
- return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, Op);
- }
- return SDValue();
- }
- // Check an node is an extend or shift operand
- static bool isExtendOrShiftOperand(SDValue N) {
- unsigned Opcode = N.getOpcode();
- if (Opcode == ISD::SIGN_EXTEND || Opcode == ISD::SIGN_EXTEND_INREG ||
- Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND) {
- EVT SrcVT;
- if (Opcode == ISD::SIGN_EXTEND_INREG)
- SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
- else
- SrcVT = N.getOperand(0).getValueType();
- return SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8;
- } else if (Opcode == ISD::AND) {
- ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
- if (!CSD)
- return false;
- uint64_t AndMask = CSD->getZExtValue();
- return AndMask == 0xff || AndMask == 0xffff || AndMask == 0xffffffff;
- } else if (Opcode == ISD::SHL || Opcode == ISD::SRL || Opcode == ISD::SRA) {
- return isa<ConstantSDNode>(N.getOperand(1));
- }
- return false;
- }
- // (N - Y) + Z --> (Z - Y) + N
- // when N is an extend or shift operand
- static SDValue performAddCombineSubShift(SDNode *N, SDValue SUB, SDValue Z,
- SelectionDAG &DAG) {
- auto IsOneUseExtend = [](SDValue N) {
- return N.hasOneUse() && isExtendOrShiftOperand(N);
- };
- // DAGCombiner will revert the combination when Z is constant cause
- // dead loop. So don't enable the combination when Z is constant.
- // If Z is one use shift C, we also can't do the optimization.
- // It will falling to self infinite loop.
- if (isa<ConstantSDNode>(Z) || IsOneUseExtend(Z))
- return SDValue();
- if (SUB.getOpcode() != ISD::SUB || !SUB.hasOneUse())
- return SDValue();
- SDValue Shift = SUB.getOperand(0);
- if (!IsOneUseExtend(Shift))
- return SDValue();
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- SDValue Y = SUB.getOperand(1);
- SDValue NewSub = DAG.getNode(ISD::SUB, DL, VT, Z, Y);
- return DAG.getNode(ISD::ADD, DL, VT, NewSub, Shift);
- }
- static SDValue performAddCombineForShiftedOperands(SDNode *N,
- SelectionDAG &DAG) {
- // NOTE: Swapping LHS and RHS is not done for SUB, since SUB is not
- // commutative.
- if (N->getOpcode() != ISD::ADD)
- return SDValue();
- // Bail out when value type is not one of {i32, i64}, since AArch64 ADD with
- // shifted register is only available for i32 and i64.
- EVT VT = N->getValueType(0);
- if (VT != MVT::i32 && VT != MVT::i64)
- return SDValue();
- SDLoc DL(N);
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
- if (SDValue Val = performAddCombineSubShift(N, LHS, RHS, DAG))
- return Val;
- if (SDValue Val = performAddCombineSubShift(N, RHS, LHS, DAG))
- return Val;
- uint64_t LHSImm = 0, RHSImm = 0;
- // If both operand are shifted by imm and shift amount is not greater than 4
- // for one operand, swap LHS and RHS to put operand with smaller shift amount
- // on RHS.
- //
- // On many AArch64 processors (Cortex A78, Neoverse N1/N2/V1, etc), ADD with
- // LSL shift (shift <= 4) has smaller latency and larger throughput than ADD
- // with LSL (shift > 4). For the rest of processors, this is no-op for
- // performance or correctness.
- if (isOpcWithIntImmediate(LHS.getNode(), ISD::SHL, LHSImm) &&
- isOpcWithIntImmediate(RHS.getNode(), ISD::SHL, RHSImm) && LHSImm <= 4 &&
- RHSImm > 4 && LHS.hasOneUse())
- return DAG.getNode(ISD::ADD, DL, VT, RHS, LHS);
- return SDValue();
- }
- static SDValue performAddSubCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- // Try to change sum of two reductions.
- if (SDValue Val = performAddUADDVCombine(N, DAG))
- return Val;
- if (SDValue Val = performAddDotCombine(N, DAG))
- return Val;
- if (SDValue Val = performAddCSelIntoCSinc(N, DAG))
- return Val;
- if (SDValue Val = performNegCSelCombine(N, DAG))
- return Val;
- if (SDValue Val = performVectorAddSubExtCombine(N, DAG))
- return Val;
- if (SDValue Val = performAddCombineForShiftedOperands(N, DAG))
- return Val;
- return performAddSubLongCombine(N, DCI, DAG);
- }
- // Massage DAGs which we can use the high-half "long" operations on into
- // something isel will recognize better. E.g.
- //
- // (aarch64_neon_umull (extract_high vec) (dupv64 scalar)) -->
- // (aarch64_neon_umull (extract_high (v2i64 vec)))
- // (extract_high (v2i64 (dup128 scalar)))))
- //
- static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- if (DCI.isBeforeLegalizeOps())
- return SDValue();
- SDValue LHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 0 : 1);
- SDValue RHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 1 : 2);
- assert(LHS.getValueType().is64BitVector() &&
- RHS.getValueType().is64BitVector() &&
- "unexpected shape for long operation");
- // Either node could be a DUP, but it's not worth doing both of them (you'd
- // just as well use the non-high version) so look for a corresponding extract
- // operation on the other "wing".
- if (isEssentiallyExtractHighSubvector(LHS)) {
- RHS = tryExtendDUPToExtractHigh(RHS, DAG);
- if (!RHS.getNode())
- return SDValue();
- } else if (isEssentiallyExtractHighSubvector(RHS)) {
- LHS = tryExtendDUPToExtractHigh(LHS, DAG);
- if (!LHS.getNode())
- return SDValue();
- }
- if (IID == Intrinsic::not_intrinsic)
- return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), LHS, RHS);
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0),
- N->getOperand(0), LHS, RHS);
- }
- static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {
- MVT ElemTy = N->getSimpleValueType(0).getScalarType();
- unsigned ElemBits = ElemTy.getSizeInBits();
- int64_t ShiftAmount;
- if (BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(2))) {
- APInt SplatValue, SplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
- if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
- HasAnyUndefs, ElemBits) ||
- SplatBitSize != ElemBits)
- return SDValue();
- ShiftAmount = SplatValue.getSExtValue();
- } else if (ConstantSDNode *CVN = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
- ShiftAmount = CVN->getSExtValue();
- } else
- return SDValue();
- unsigned Opcode;
- bool IsRightShift;
- switch (IID) {
- default:
- llvm_unreachable("Unknown shift intrinsic");
- case Intrinsic::aarch64_neon_sqshl:
- Opcode = AArch64ISD::SQSHL_I;
- IsRightShift = false;
- break;
- case Intrinsic::aarch64_neon_uqshl:
- Opcode = AArch64ISD::UQSHL_I;
- IsRightShift = false;
- break;
- case Intrinsic::aarch64_neon_srshl:
- Opcode = AArch64ISD::SRSHR_I;
- IsRightShift = true;
- break;
- case Intrinsic::aarch64_neon_urshl:
- Opcode = AArch64ISD::URSHR_I;
- IsRightShift = true;
- break;
- case Intrinsic::aarch64_neon_sqshlu:
- Opcode = AArch64ISD::SQSHLU_I;
- IsRightShift = false;
- break;
- case Intrinsic::aarch64_neon_sshl:
- case Intrinsic::aarch64_neon_ushl:
- // For positive shift amounts we can use SHL, as ushl/sshl perform a regular
- // left shift for positive shift amounts. Below, we only replace the current
- // node with VSHL, if this condition is met.
- Opcode = AArch64ISD::VSHL;
- IsRightShift = false;
- break;
- }
- if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) {
- SDLoc dl(N);
- return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1),
- DAG.getConstant(-ShiftAmount, dl, MVT::i32));
- } else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) {
- SDLoc dl(N);
- return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1),
- DAG.getConstant(ShiftAmount, dl, MVT::i32));
- }
- return SDValue();
- }
- // The CRC32[BH] instructions ignore the high bits of their data operand. Since
- // the intrinsics must be legal and take an i32, this means there's almost
- // certainly going to be a zext in the DAG which we can eliminate.
- static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) {
- SDValue AndN = N->getOperand(2);
- if (AndN.getOpcode() != ISD::AND)
- return SDValue();
- ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(AndN.getOperand(1));
- if (!CMask || CMask->getZExtValue() != Mask)
- return SDValue();
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), MVT::i32,
- N->getOperand(0), N->getOperand(1), AndN.getOperand(0));
- }
- static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N,
- SelectionDAG &DAG) {
- SDLoc dl(N);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0),
- DAG.getNode(Opc, dl,
- N->getOperand(1).getSimpleValueType(),
- N->getOperand(1)),
- DAG.getConstant(0, dl, MVT::i64));
- }
- static SDValue LowerSVEIntrinsicIndex(SDNode *N, SelectionDAG &DAG) {
- SDLoc DL(N);
- SDValue Op1 = N->getOperand(1);
- SDValue Op2 = N->getOperand(2);
- EVT ScalarTy = Op2.getValueType();
- if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
- ScalarTy = MVT::i32;
- // Lower index_vector(base, step) to mul(step step_vector(1)) + splat(base).
- SDValue StepVector = DAG.getStepVector(DL, N->getValueType(0));
- SDValue Step = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op2);
- SDValue Mul = DAG.getNode(ISD::MUL, DL, N->getValueType(0), StepVector, Step);
- SDValue Base = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op1);
- return DAG.getNode(ISD::ADD, DL, N->getValueType(0), Mul, Base);
- }
- static SDValue LowerSVEIntrinsicDUP(SDNode *N, SelectionDAG &DAG) {
- SDLoc dl(N);
- SDValue Scalar = N->getOperand(3);
- EVT ScalarTy = Scalar.getValueType();
- if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
- Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar);
- SDValue Passthru = N->getOperand(1);
- SDValue Pred = N->getOperand(2);
- return DAG.getNode(AArch64ISD::DUP_MERGE_PASSTHRU, dl, N->getValueType(0),
- Pred, Scalar, Passthru);
- }
- static SDValue LowerSVEIntrinsicEXT(SDNode *N, SelectionDAG &DAG) {
- SDLoc dl(N);
- LLVMContext &Ctx = *DAG.getContext();
- EVT VT = N->getValueType(0);
- assert(VT.isScalableVector() && "Expected a scalable vector.");
- // Current lowering only supports the SVE-ACLE types.
- if (VT.getSizeInBits().getKnownMinValue() != AArch64::SVEBitsPerBlock)
- return SDValue();
- unsigned ElemSize = VT.getVectorElementType().getSizeInBits() / 8;
- unsigned ByteSize = VT.getSizeInBits().getKnownMinValue() / 8;
- EVT ByteVT =
- EVT::getVectorVT(Ctx, MVT::i8, ElementCount::getScalable(ByteSize));
- // Convert everything to the domain of EXT (i.e bytes).
- SDValue Op0 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(1));
- SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(2));
- SDValue Op2 = DAG.getNode(ISD::MUL, dl, MVT::i32, N->getOperand(3),
- DAG.getConstant(ElemSize, dl, MVT::i32));
- SDValue EXT = DAG.getNode(AArch64ISD::EXT, dl, ByteVT, Op0, Op1, Op2);
- return DAG.getNode(ISD::BITCAST, dl, VT, EXT);
- }
- static SDValue tryConvertSVEWideCompare(SDNode *N, ISD::CondCode CC,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- if (DCI.isBeforeLegalize())
- return SDValue();
- SDValue Comparator = N->getOperand(3);
- if (Comparator.getOpcode() == AArch64ISD::DUP ||
- Comparator.getOpcode() == ISD::SPLAT_VECTOR) {
- unsigned IID = getIntrinsicID(N);
- EVT VT = N->getValueType(0);
- EVT CmpVT = N->getOperand(2).getValueType();
- SDValue Pred = N->getOperand(1);
- SDValue Imm;
- SDLoc DL(N);
- switch (IID) {
- default:
- llvm_unreachable("Called with wrong intrinsic!");
- break;
- // Signed comparisons
- case Intrinsic::aarch64_sve_cmpeq_wide:
- case Intrinsic::aarch64_sve_cmpne_wide:
- case Intrinsic::aarch64_sve_cmpge_wide:
- case Intrinsic::aarch64_sve_cmpgt_wide:
- case Intrinsic::aarch64_sve_cmplt_wide:
- case Intrinsic::aarch64_sve_cmple_wide: {
- if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) {
- int64_t ImmVal = CN->getSExtValue();
- if (ImmVal >= -16 && ImmVal <= 15)
- Imm = DAG.getConstant(ImmVal, DL, MVT::i32);
- else
- return SDValue();
- }
- break;
- }
- // Unsigned comparisons
- case Intrinsic::aarch64_sve_cmphs_wide:
- case Intrinsic::aarch64_sve_cmphi_wide:
- case Intrinsic::aarch64_sve_cmplo_wide:
- case Intrinsic::aarch64_sve_cmpls_wide: {
- if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) {
- uint64_t ImmVal = CN->getZExtValue();
- if (ImmVal <= 127)
- Imm = DAG.getConstant(ImmVal, DL, MVT::i32);
- else
- return SDValue();
- }
- break;
- }
- }
- if (!Imm)
- return SDValue();
- SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, DL, CmpVT, Imm);
- return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, VT, Pred,
- N->getOperand(2), Splat, DAG.getCondCode(CC));
- }
- return SDValue();
- }
- static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
- AArch64CC::CondCode Cond) {
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- SDLoc DL(Op);
- assert(Op.getValueType().isScalableVector() &&
- TLI.isTypeLegal(Op.getValueType()) &&
- "Expected legal scalable vector type!");
- assert(Op.getValueType() == Pg.getValueType() &&
- "Expected same type for PTEST operands");
- // Ensure target specific opcodes are using legal type.
- EVT OutVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
- SDValue TVal = DAG.getConstant(1, DL, OutVT);
- SDValue FVal = DAG.getConstant(0, DL, OutVT);
- // Ensure operands have type nxv16i1.
- if (Op.getValueType() != MVT::nxv16i1) {
- if ((Cond == AArch64CC::ANY_ACTIVE || Cond == AArch64CC::NONE_ACTIVE) &&
- isZeroingInactiveLanes(Op))
- Pg = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Pg);
- else
- Pg = getSVEPredicateBitCast(MVT::nxv16i1, Pg, DAG);
- Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Op);
- }
- // Set condition code (CC) flags.
- SDValue Test = DAG.getNode(
- Cond == AArch64CC::ANY_ACTIVE ? AArch64ISD::PTEST_ANY : AArch64ISD::PTEST,
- DL, MVT::Other, Pg, Op);
- // Convert CC to integer based on requested condition.
- // NOTE: Cond is inverted to promote CSEL's removal when it feeds a compare.
- SDValue CC = DAG.getConstant(getInvertedCondCode(Cond), DL, MVT::i32);
- SDValue Res = DAG.getNode(AArch64ISD::CSEL, DL, OutVT, FVal, TVal, CC, Test);
- return DAG.getZExtOrTrunc(Res, DL, VT);
- }
- static SDValue combineSVEReductionInt(SDNode *N, unsigned Opc,
- SelectionDAG &DAG) {
- SDLoc DL(N);
- SDValue Pred = N->getOperand(1);
- SDValue VecToReduce = N->getOperand(2);
- // NOTE: The integer reduction's result type is not always linked to the
- // operand's element type so we construct it from the intrinsic's result type.
- EVT ReduceVT = getPackedSVEVectorVT(N->getValueType(0));
- SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce);
- // SVE reductions set the whole vector register with the first element
- // containing the reduction result, which we'll now extract.
- SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
- Zero);
- }
- static SDValue combineSVEReductionFP(SDNode *N, unsigned Opc,
- SelectionDAG &DAG) {
- SDLoc DL(N);
- SDValue Pred = N->getOperand(1);
- SDValue VecToReduce = N->getOperand(2);
- EVT ReduceVT = VecToReduce.getValueType();
- SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce);
- // SVE reductions set the whole vector register with the first element
- // containing the reduction result, which we'll now extract.
- SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
- Zero);
- }
- static SDValue combineSVEReductionOrderedFP(SDNode *N, unsigned Opc,
- SelectionDAG &DAG) {
- SDLoc DL(N);
- SDValue Pred = N->getOperand(1);
- SDValue InitVal = N->getOperand(2);
- SDValue VecToReduce = N->getOperand(3);
- EVT ReduceVT = VecToReduce.getValueType();
- // Ordered reductions use the first lane of the result vector as the
- // reduction's initial value.
- SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
- InitVal = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ReduceVT,
- DAG.getUNDEF(ReduceVT), InitVal, Zero);
- SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, InitVal, VecToReduce);
- // SVE reductions set the whole vector register with the first element
- // containing the reduction result, which we'll now extract.
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
- Zero);
- }
- // If a merged operation has no inactive lanes we can relax it to a predicated
- // or unpredicated operation, which potentially allows better isel (perhaps
- // using immediate forms) or relaxing register reuse requirements.
- static SDValue convertMergedOpToPredOp(SDNode *N, unsigned Opc,
- SelectionDAG &DAG, bool UnpredOp = false,
- bool SwapOperands = false) {
- assert(N->getOpcode() == ISD::INTRINSIC_WO_CHAIN && "Expected intrinsic!");
- assert(N->getNumOperands() == 4 && "Expected 3 operand intrinsic!");
- SDValue Pg = N->getOperand(1);
- SDValue Op1 = N->getOperand(SwapOperands ? 3 : 2);
- SDValue Op2 = N->getOperand(SwapOperands ? 2 : 3);
- // ISD way to specify an all active predicate.
- if (isAllActivePredicate(DAG, Pg)) {
- if (UnpredOp)
- return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Op1, Op2);
- return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Pg, Op1, Op2);
- }
- // FUTURE: SplatVector(true)
- return SDValue();
- }
- static SDValue performIntrinsicCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget) {
- SelectionDAG &DAG = DCI.DAG;
- unsigned IID = getIntrinsicID(N);
- switch (IID) {
- default:
- break;
- case Intrinsic::get_active_lane_mask: {
- SDValue Res = SDValue();
- EVT VT = N->getValueType(0);
- if (VT.isFixedLengthVector()) {
- // We can use the SVE whilelo instruction to lower this intrinsic by
- // creating the appropriate sequence of scalable vector operations and
- // then extracting a fixed-width subvector from the scalable vector.
- SDLoc DL(N);
- SDValue ID =
- DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64);
- EVT WhileVT = EVT::getVectorVT(
- *DAG.getContext(), MVT::i1,
- ElementCount::getScalable(VT.getVectorNumElements()));
- // Get promoted scalable vector VT, i.e. promote nxv4i1 -> nxv4i32.
- EVT PromVT = getPromotedVTForPredicate(WhileVT);
- // Get the fixed-width equivalent of PromVT for extraction.
- EVT ExtVT =
- EVT::getVectorVT(*DAG.getContext(), PromVT.getVectorElementType(),
- VT.getVectorElementCount());
- Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WhileVT, ID,
- N->getOperand(1), N->getOperand(2));
- Res = DAG.getNode(ISD::SIGN_EXTEND, DL, PromVT, Res);
- Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtVT, Res,
- DAG.getConstant(0, DL, MVT::i64));
- Res = DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
- }
- return Res;
- }
- case Intrinsic::aarch64_neon_vcvtfxs2fp:
- case Intrinsic::aarch64_neon_vcvtfxu2fp:
- return tryCombineFixedPointConvert(N, DCI, DAG);
- case Intrinsic::aarch64_neon_saddv:
- return combineAcrossLanesIntrinsic(AArch64ISD::SADDV, N, DAG);
- case Intrinsic::aarch64_neon_uaddv:
- return combineAcrossLanesIntrinsic(AArch64ISD::UADDV, N, DAG);
- case Intrinsic::aarch64_neon_sminv:
- return combineAcrossLanesIntrinsic(AArch64ISD::SMINV, N, DAG);
- case Intrinsic::aarch64_neon_uminv:
- return combineAcrossLanesIntrinsic(AArch64ISD::UMINV, N, DAG);
- case Intrinsic::aarch64_neon_smaxv:
- return combineAcrossLanesIntrinsic(AArch64ISD::SMAXV, N, DAG);
- case Intrinsic::aarch64_neon_umaxv:
- return combineAcrossLanesIntrinsic(AArch64ISD::UMAXV, N, DAG);
- case Intrinsic::aarch64_neon_fmax:
- return DAG.getNode(ISD::FMAXIMUM, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2));
- case Intrinsic::aarch64_neon_fmin:
- return DAG.getNode(ISD::FMINIMUM, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2));
- case Intrinsic::aarch64_neon_fmaxnm:
- return DAG.getNode(ISD::FMAXNUM, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2));
- case Intrinsic::aarch64_neon_fminnm:
- return DAG.getNode(ISD::FMINNUM, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2));
- case Intrinsic::aarch64_neon_smull:
- return DAG.getNode(AArch64ISD::SMULL, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2));
- case Intrinsic::aarch64_neon_umull:
- return DAG.getNode(AArch64ISD::UMULL, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2));
- case Intrinsic::aarch64_neon_pmull:
- return DAG.getNode(AArch64ISD::PMULL, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2));
- case Intrinsic::aarch64_neon_sqdmull:
- return tryCombineLongOpWithDup(IID, N, DCI, DAG);
- case Intrinsic::aarch64_neon_sqshl:
- case Intrinsic::aarch64_neon_uqshl:
- case Intrinsic::aarch64_neon_sqshlu:
- case Intrinsic::aarch64_neon_srshl:
- case Intrinsic::aarch64_neon_urshl:
- case Intrinsic::aarch64_neon_sshl:
- case Intrinsic::aarch64_neon_ushl:
- return tryCombineShiftImm(IID, N, DAG);
- case Intrinsic::aarch64_neon_rshrn: {
- EVT VT = N->getOperand(1).getValueType();
- SDLoc DL(N);
- SDValue Imm =
- DAG.getConstant(1LLU << (N->getConstantOperandVal(2) - 1), DL, VT);
- SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N->getOperand(1), Imm);
- SDValue Sht =
- DAG.getNode(ISD::SRL, DL, VT, Add,
- DAG.getConstant(N->getConstantOperandVal(2), DL, VT));
- return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), Sht);
- }
- case Intrinsic::aarch64_crc32b:
- case Intrinsic::aarch64_crc32cb:
- return tryCombineCRC32(0xff, N, DAG);
- case Intrinsic::aarch64_crc32h:
- case Intrinsic::aarch64_crc32ch:
- return tryCombineCRC32(0xffff, N, DAG);
- case Intrinsic::aarch64_sve_saddv:
- // There is no i64 version of SADDV because the sign is irrelevant.
- if (N->getOperand(2)->getValueType(0).getVectorElementType() == MVT::i64)
- return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG);
- else
- return combineSVEReductionInt(N, AArch64ISD::SADDV_PRED, DAG);
- case Intrinsic::aarch64_sve_uaddv:
- return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG);
- case Intrinsic::aarch64_sve_smaxv:
- return combineSVEReductionInt(N, AArch64ISD::SMAXV_PRED, DAG);
- case Intrinsic::aarch64_sve_umaxv:
- return combineSVEReductionInt(N, AArch64ISD::UMAXV_PRED, DAG);
- case Intrinsic::aarch64_sve_sminv:
- return combineSVEReductionInt(N, AArch64ISD::SMINV_PRED, DAG);
- case Intrinsic::aarch64_sve_uminv:
- return combineSVEReductionInt(N, AArch64ISD::UMINV_PRED, DAG);
- case Intrinsic::aarch64_sve_orv:
- return combineSVEReductionInt(N, AArch64ISD::ORV_PRED, DAG);
- case Intrinsic::aarch64_sve_eorv:
- return combineSVEReductionInt(N, AArch64ISD::EORV_PRED, DAG);
- case Intrinsic::aarch64_sve_andv:
- return combineSVEReductionInt(N, AArch64ISD::ANDV_PRED, DAG);
- case Intrinsic::aarch64_sve_index:
- return LowerSVEIntrinsicIndex(N, DAG);
- case Intrinsic::aarch64_sve_dup:
- return LowerSVEIntrinsicDUP(N, DAG);
- case Intrinsic::aarch64_sve_dup_x:
- return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), N->getValueType(0),
- N->getOperand(1));
- case Intrinsic::aarch64_sve_ext:
- return LowerSVEIntrinsicEXT(N, DAG);
- case Intrinsic::aarch64_sve_mul:
- return convertMergedOpToPredOp(N, AArch64ISD::MUL_PRED, DAG);
- case Intrinsic::aarch64_sve_mul_u:
- return DAG.getNode(AArch64ISD::MUL_PRED, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_smulh:
- return convertMergedOpToPredOp(N, AArch64ISD::MULHS_PRED, DAG);
- case Intrinsic::aarch64_sve_smulh_u:
- return DAG.getNode(AArch64ISD::MULHS_PRED, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_umulh:
- return convertMergedOpToPredOp(N, AArch64ISD::MULHU_PRED, DAG);
- case Intrinsic::aarch64_sve_umulh_u:
- return DAG.getNode(AArch64ISD::MULHU_PRED, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_smin:
- return convertMergedOpToPredOp(N, AArch64ISD::SMIN_PRED, DAG);
- case Intrinsic::aarch64_sve_smin_u:
- return DAG.getNode(AArch64ISD::SMIN_PRED, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_umin:
- return convertMergedOpToPredOp(N, AArch64ISD::UMIN_PRED, DAG);
- case Intrinsic::aarch64_sve_umin_u:
- return DAG.getNode(AArch64ISD::UMIN_PRED, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_smax:
- return convertMergedOpToPredOp(N, AArch64ISD::SMAX_PRED, DAG);
- case Intrinsic::aarch64_sve_smax_u:
- return DAG.getNode(AArch64ISD::SMAX_PRED, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_umax:
- return convertMergedOpToPredOp(N, AArch64ISD::UMAX_PRED, DAG);
- case Intrinsic::aarch64_sve_umax_u:
- return DAG.getNode(AArch64ISD::UMAX_PRED, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_lsl:
- return convertMergedOpToPredOp(N, AArch64ISD::SHL_PRED, DAG);
- case Intrinsic::aarch64_sve_lsl_u:
- return DAG.getNode(AArch64ISD::SHL_PRED, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_lsr:
- return convertMergedOpToPredOp(N, AArch64ISD::SRL_PRED, DAG);
- case Intrinsic::aarch64_sve_lsr_u:
- return DAG.getNode(AArch64ISD::SRL_PRED, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_asr:
- return convertMergedOpToPredOp(N, AArch64ISD::SRA_PRED, DAG);
- case Intrinsic::aarch64_sve_asr_u:
- return DAG.getNode(AArch64ISD::SRA_PRED, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_fadd:
- return convertMergedOpToPredOp(N, AArch64ISD::FADD_PRED, DAG);
- case Intrinsic::aarch64_sve_fsub:
- return convertMergedOpToPredOp(N, AArch64ISD::FSUB_PRED, DAG);
- case Intrinsic::aarch64_sve_fmul:
- return convertMergedOpToPredOp(N, AArch64ISD::FMUL_PRED, DAG);
- case Intrinsic::aarch64_sve_add:
- return convertMergedOpToPredOp(N, ISD::ADD, DAG, true);
- case Intrinsic::aarch64_sve_add_u:
- return DAG.getNode(ISD::ADD, SDLoc(N), N->getValueType(0), N->getOperand(2),
- N->getOperand(3));
- case Intrinsic::aarch64_sve_sub:
- return convertMergedOpToPredOp(N, ISD::SUB, DAG, true);
- case Intrinsic::aarch64_sve_sub_u:
- return DAG.getNode(ISD::SUB, SDLoc(N), N->getValueType(0), N->getOperand(2),
- N->getOperand(3));
- case Intrinsic::aarch64_sve_subr:
- return convertMergedOpToPredOp(N, ISD::SUB, DAG, true, true);
- case Intrinsic::aarch64_sve_and:
- return convertMergedOpToPredOp(N, ISD::AND, DAG, true);
- case Intrinsic::aarch64_sve_bic:
- return convertMergedOpToPredOp(N, AArch64ISD::BIC, DAG, true);
- case Intrinsic::aarch64_sve_eor:
- return convertMergedOpToPredOp(N, ISD::XOR, DAG, true);
- case Intrinsic::aarch64_sve_orr:
- return convertMergedOpToPredOp(N, ISD::OR, DAG, true);
- case Intrinsic::aarch64_sve_sabd:
- return convertMergedOpToPredOp(N, ISD::ABDS, DAG, true);
- case Intrinsic::aarch64_sve_sabd_u:
- return DAG.getNode(ISD::ABDS, SDLoc(N), N->getValueType(0),
- N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_uabd:
- return convertMergedOpToPredOp(N, ISD::ABDU, DAG, true);
- case Intrinsic::aarch64_sve_uabd_u:
- return DAG.getNode(ISD::ABDU, SDLoc(N), N->getValueType(0),
- N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_sdiv_u:
- return DAG.getNode(AArch64ISD::SDIV_PRED, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_udiv_u:
- return DAG.getNode(AArch64ISD::UDIV_PRED, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_sqadd:
- return convertMergedOpToPredOp(N, ISD::SADDSAT, DAG, true);
- case Intrinsic::aarch64_sve_sqsub:
- return convertMergedOpToPredOp(N, ISD::SSUBSAT, DAG, true);
- case Intrinsic::aarch64_sve_uqadd:
- return convertMergedOpToPredOp(N, ISD::UADDSAT, DAG, true);
- case Intrinsic::aarch64_sve_uqsub:
- return convertMergedOpToPredOp(N, ISD::USUBSAT, DAG, true);
- case Intrinsic::aarch64_sve_sqadd_x:
- return DAG.getNode(ISD::SADDSAT, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2));
- case Intrinsic::aarch64_sve_sqsub_x:
- return DAG.getNode(ISD::SSUBSAT, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2));
- case Intrinsic::aarch64_sve_uqadd_x:
- return DAG.getNode(ISD::UADDSAT, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2));
- case Intrinsic::aarch64_sve_uqsub_x:
- return DAG.getNode(ISD::USUBSAT, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2));
- case Intrinsic::aarch64_sve_asrd:
- return DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_cmphs:
- if (!N->getOperand(2).getValueType().isFloatingPoint())
- return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
- N->getValueType(0), N->getOperand(1), N->getOperand(2),
- N->getOperand(3), DAG.getCondCode(ISD::SETUGE));
- break;
- case Intrinsic::aarch64_sve_cmphi:
- if (!N->getOperand(2).getValueType().isFloatingPoint())
- return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
- N->getValueType(0), N->getOperand(1), N->getOperand(2),
- N->getOperand(3), DAG.getCondCode(ISD::SETUGT));
- break;
- case Intrinsic::aarch64_sve_fcmpge:
- case Intrinsic::aarch64_sve_cmpge:
- return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
- N->getValueType(0), N->getOperand(1), N->getOperand(2),
- N->getOperand(3), DAG.getCondCode(ISD::SETGE));
- break;
- case Intrinsic::aarch64_sve_fcmpgt:
- case Intrinsic::aarch64_sve_cmpgt:
- return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
- N->getValueType(0), N->getOperand(1), N->getOperand(2),
- N->getOperand(3), DAG.getCondCode(ISD::SETGT));
- break;
- case Intrinsic::aarch64_sve_fcmpeq:
- case Intrinsic::aarch64_sve_cmpeq:
- return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
- N->getValueType(0), N->getOperand(1), N->getOperand(2),
- N->getOperand(3), DAG.getCondCode(ISD::SETEQ));
- break;
- case Intrinsic::aarch64_sve_fcmpne:
- case Intrinsic::aarch64_sve_cmpne:
- return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
- N->getValueType(0), N->getOperand(1), N->getOperand(2),
- N->getOperand(3), DAG.getCondCode(ISD::SETNE));
- break;
- case Intrinsic::aarch64_sve_fcmpuo:
- return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
- N->getValueType(0), N->getOperand(1), N->getOperand(2),
- N->getOperand(3), DAG.getCondCode(ISD::SETUO));
- break;
- case Intrinsic::aarch64_sve_fadda:
- return combineSVEReductionOrderedFP(N, AArch64ISD::FADDA_PRED, DAG);
- case Intrinsic::aarch64_sve_faddv:
- return combineSVEReductionFP(N, AArch64ISD::FADDV_PRED, DAG);
- case Intrinsic::aarch64_sve_fmaxnmv:
- return combineSVEReductionFP(N, AArch64ISD::FMAXNMV_PRED, DAG);
- case Intrinsic::aarch64_sve_fmaxv:
- return combineSVEReductionFP(N, AArch64ISD::FMAXV_PRED, DAG);
- case Intrinsic::aarch64_sve_fminnmv:
- return combineSVEReductionFP(N, AArch64ISD::FMINNMV_PRED, DAG);
- case Intrinsic::aarch64_sve_fminv:
- return combineSVEReductionFP(N, AArch64ISD::FMINV_PRED, DAG);
- case Intrinsic::aarch64_sve_sel:
- return DAG.getNode(ISD::VSELECT, SDLoc(N), N->getValueType(0),
- N->getOperand(1), N->getOperand(2), N->getOperand(3));
- case Intrinsic::aarch64_sve_cmpeq_wide:
- return tryConvertSVEWideCompare(N, ISD::SETEQ, DCI, DAG);
- case Intrinsic::aarch64_sve_cmpne_wide:
- return tryConvertSVEWideCompare(N, ISD::SETNE, DCI, DAG);
- case Intrinsic::aarch64_sve_cmpge_wide:
- return tryConvertSVEWideCompare(N, ISD::SETGE, DCI, DAG);
- case Intrinsic::aarch64_sve_cmpgt_wide:
- return tryConvertSVEWideCompare(N, ISD::SETGT, DCI, DAG);
- case Intrinsic::aarch64_sve_cmplt_wide:
- return tryConvertSVEWideCompare(N, ISD::SETLT, DCI, DAG);
- case Intrinsic::aarch64_sve_cmple_wide:
- return tryConvertSVEWideCompare(N, ISD::SETLE, DCI, DAG);
- case Intrinsic::aarch64_sve_cmphs_wide:
- return tryConvertSVEWideCompare(N, ISD::SETUGE, DCI, DAG);
- case Intrinsic::aarch64_sve_cmphi_wide:
- return tryConvertSVEWideCompare(N, ISD::SETUGT, DCI, DAG);
- case Intrinsic::aarch64_sve_cmplo_wide:
- return tryConvertSVEWideCompare(N, ISD::SETULT, DCI, DAG);
- case Intrinsic::aarch64_sve_cmpls_wide:
- return tryConvertSVEWideCompare(N, ISD::SETULE, DCI, DAG);
- case Intrinsic::aarch64_sve_ptest_any:
- return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
- AArch64CC::ANY_ACTIVE);
- case Intrinsic::aarch64_sve_ptest_first:
- return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
- AArch64CC::FIRST_ACTIVE);
- case Intrinsic::aarch64_sve_ptest_last:
- return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
- AArch64CC::LAST_ACTIVE);
- }
- return SDValue();
- }
- static bool isCheapToExtend(const SDValue &N) {
- unsigned OC = N->getOpcode();
- return OC == ISD::LOAD || OC == ISD::MLOAD ||
- ISD::isConstantSplatVectorAllZeros(N.getNode());
- }
- static SDValue
- performSignExtendSetCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- // If we have (sext (setcc A B)) and A and B are cheap to extend,
- // we can move the sext into the arguments and have the same result. For
- // example, if A and B are both loads, we can make those extending loads and
- // avoid an extra instruction. This pattern appears often in VLS code
- // generation where the inputs to the setcc have a different size to the
- // instruction that wants to use the result of the setcc.
- assert(N->getOpcode() == ISD::SIGN_EXTEND &&
- N->getOperand(0)->getOpcode() == ISD::SETCC);
- const SDValue SetCC = N->getOperand(0);
- const SDValue CCOp0 = SetCC.getOperand(0);
- const SDValue CCOp1 = SetCC.getOperand(1);
- if (!CCOp0->getValueType(0).isInteger() ||
- !CCOp1->getValueType(0).isInteger())
- return SDValue();
- ISD::CondCode Code =
- cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get();
- ISD::NodeType ExtType =
- isSignedIntSetCC(Code) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- if (isCheapToExtend(SetCC.getOperand(0)) &&
- isCheapToExtend(SetCC.getOperand(1))) {
- const SDValue Ext1 =
- DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp0);
- const SDValue Ext2 =
- DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp1);
- return DAG.getSetCC(
- SDLoc(SetCC), N->getValueType(0), Ext1, Ext2,
- cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get());
- }
- return SDValue();
- }
- static SDValue performExtendCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- // If we see something like (zext (sabd (extract_high ...), (DUP ...))) then
- // we can convert that DUP into another extract_high (of a bigger DUP), which
- // helps the backend to decide that an sabdl2 would be useful, saving a real
- // extract_high operation.
- if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND &&
- (N->getOperand(0).getOpcode() == ISD::ABDU ||
- N->getOperand(0).getOpcode() == ISD::ABDS)) {
- SDNode *ABDNode = N->getOperand(0).getNode();
- SDValue NewABD =
- tryCombineLongOpWithDup(Intrinsic::not_intrinsic, ABDNode, DCI, DAG);
- if (!NewABD.getNode())
- return SDValue();
- return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), NewABD);
- }
- if (N->getValueType(0).isFixedLengthVector() &&
- N->getOpcode() == ISD::SIGN_EXTEND &&
- N->getOperand(0)->getOpcode() == ISD::SETCC)
- return performSignExtendSetCCCombine(N, DCI, DAG);
- return SDValue();
- }
- static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St,
- SDValue SplatVal, unsigned NumVecElts) {
- assert(!St.isTruncatingStore() && "cannot split truncating vector store");
- Align OrigAlignment = St.getAlign();
- unsigned EltOffset = SplatVal.getValueType().getSizeInBits() / 8;
- // Create scalar stores. This is at least as good as the code sequence for a
- // split unaligned store which is a dup.s, ext.b, and two stores.
- // Most of the time the three stores should be replaced by store pair
- // instructions (stp).
- SDLoc DL(&St);
- SDValue BasePtr = St.getBasePtr();
- uint64_t BaseOffset = 0;
- const MachinePointerInfo &PtrInfo = St.getPointerInfo();
- SDValue NewST1 =
- DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, PtrInfo,
- OrigAlignment, St.getMemOperand()->getFlags());
- // As this in ISel, we will not merge this add which may degrade results.
- if (BasePtr->getOpcode() == ISD::ADD &&
- isa<ConstantSDNode>(BasePtr->getOperand(1))) {
- BaseOffset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
- BasePtr = BasePtr->getOperand(0);
- }
- unsigned Offset = EltOffset;
- while (--NumVecElts) {
- Align Alignment = commonAlignment(OrigAlignment, Offset);
- SDValue OffsetPtr =
- DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
- DAG.getConstant(BaseOffset + Offset, DL, MVT::i64));
- NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr,
- PtrInfo.getWithOffset(Offset), Alignment,
- St.getMemOperand()->getFlags());
- Offset += EltOffset;
- }
- return NewST1;
- }
- // Returns an SVE type that ContentTy can be trivially sign or zero extended
- // into.
- static MVT getSVEContainerType(EVT ContentTy) {
- assert(ContentTy.isSimple() && "No SVE containers for extended types");
- switch (ContentTy.getSimpleVT().SimpleTy) {
- default:
- llvm_unreachable("No known SVE container for this MVT type");
- case MVT::nxv2i8:
- case MVT::nxv2i16:
- case MVT::nxv2i32:
- case MVT::nxv2i64:
- case MVT::nxv2f32:
- case MVT::nxv2f64:
- return MVT::nxv2i64;
- case MVT::nxv4i8:
- case MVT::nxv4i16:
- case MVT::nxv4i32:
- case MVT::nxv4f32:
- return MVT::nxv4i32;
- case MVT::nxv8i8:
- case MVT::nxv8i16:
- case MVT::nxv8f16:
- case MVT::nxv8bf16:
- return MVT::nxv8i16;
- case MVT::nxv16i8:
- return MVT::nxv16i8;
- }
- }
- static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc) {
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- if (VT.getSizeInBits().getKnownMinValue() > AArch64::SVEBitsPerBlock)
- return SDValue();
- EVT ContainerVT = VT;
- if (ContainerVT.isInteger())
- ContainerVT = getSVEContainerType(ContainerVT);
- SDVTList VTs = DAG.getVTList(ContainerVT, MVT::Other);
- SDValue Ops[] = { N->getOperand(0), // Chain
- N->getOperand(2), // Pg
- N->getOperand(3), // Base
- DAG.getValueType(VT) };
- SDValue Load = DAG.getNode(Opc, DL, VTs, Ops);
- SDValue LoadChain = SDValue(Load.getNode(), 1);
- if (ContainerVT.isInteger() && (VT != ContainerVT))
- Load = DAG.getNode(ISD::TRUNCATE, DL, VT, Load.getValue(0));
- return DAG.getMergeValues({ Load, LoadChain }, DL);
- }
- static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) {
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- EVT PtrTy = N->getOperand(3).getValueType();
- EVT LoadVT = VT;
- if (VT.isFloatingPoint())
- LoadVT = VT.changeTypeToInteger();
- auto *MINode = cast<MemIntrinsicSDNode>(N);
- SDValue PassThru = DAG.getConstant(0, DL, LoadVT);
- SDValue L = DAG.getMaskedLoad(LoadVT, DL, MINode->getChain(),
- MINode->getOperand(3), DAG.getUNDEF(PtrTy),
- MINode->getOperand(2), PassThru,
- MINode->getMemoryVT(), MINode->getMemOperand(),
- ISD::UNINDEXED, ISD::NON_EXTLOAD, false);
- if (VT.isFloatingPoint()) {
- SDValue Ops[] = { DAG.getNode(ISD::BITCAST, DL, VT, L), L.getValue(1) };
- return DAG.getMergeValues(Ops, DL);
- }
- return L;
- }
- template <unsigned Opcode>
- static SDValue performLD1ReplicateCombine(SDNode *N, SelectionDAG &DAG) {
- static_assert(Opcode == AArch64ISD::LD1RQ_MERGE_ZERO ||
- Opcode == AArch64ISD::LD1RO_MERGE_ZERO,
- "Unsupported opcode.");
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- EVT LoadVT = VT;
- if (VT.isFloatingPoint())
- LoadVT = VT.changeTypeToInteger();
- SDValue Ops[] = {N->getOperand(0), N->getOperand(2), N->getOperand(3)};
- SDValue Load = DAG.getNode(Opcode, DL, {LoadVT, MVT::Other}, Ops);
- SDValue LoadChain = SDValue(Load.getNode(), 1);
- if (VT.isFloatingPoint())
- Load = DAG.getNode(ISD::BITCAST, DL, VT, Load.getValue(0));
- return DAG.getMergeValues({Load, LoadChain}, DL);
- }
- static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) {
- SDLoc DL(N);
- SDValue Data = N->getOperand(2);
- EVT DataVT = Data.getValueType();
- EVT HwSrcVt = getSVEContainerType(DataVT);
- SDValue InputVT = DAG.getValueType(DataVT);
- if (DataVT.isFloatingPoint())
- InputVT = DAG.getValueType(HwSrcVt);
- SDValue SrcNew;
- if (Data.getValueType().isFloatingPoint())
- SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Data);
- else
- SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Data);
- SDValue Ops[] = { N->getOperand(0), // Chain
- SrcNew,
- N->getOperand(4), // Base
- N->getOperand(3), // Pg
- InputVT
- };
- return DAG.getNode(AArch64ISD::ST1_PRED, DL, N->getValueType(0), Ops);
- }
- static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) {
- SDLoc DL(N);
- SDValue Data = N->getOperand(2);
- EVT DataVT = Data.getValueType();
- EVT PtrTy = N->getOperand(4).getValueType();
- if (DataVT.isFloatingPoint())
- Data = DAG.getNode(ISD::BITCAST, DL, DataVT.changeTypeToInteger(), Data);
- auto *MINode = cast<MemIntrinsicSDNode>(N);
- return DAG.getMaskedStore(MINode->getChain(), DL, Data, MINode->getOperand(4),
- DAG.getUNDEF(PtrTy), MINode->getOperand(3),
- MINode->getMemoryVT(), MINode->getMemOperand(),
- ISD::UNINDEXED, false, false);
- }
- /// Replace a splat of zeros to a vector store by scalar stores of WZR/XZR. The
- /// load store optimizer pass will merge them to store pair stores. This should
- /// be better than a movi to create the vector zero followed by a vector store
- /// if the zero constant is not re-used, since one instructions and one register
- /// live range will be removed.
- ///
- /// For example, the final generated code should be:
- ///
- /// stp xzr, xzr, [x0]
- ///
- /// instead of:
- ///
- /// movi v0.2d, #0
- /// str q0, [x0]
- ///
- static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St) {
- SDValue StVal = St.getValue();
- EVT VT = StVal.getValueType();
- // Avoid scalarizing zero splat stores for scalable vectors.
- if (VT.isScalableVector())
- return SDValue();
- // It is beneficial to scalarize a zero splat store for 2 or 3 i64 elements or
- // 2, 3 or 4 i32 elements.
- int NumVecElts = VT.getVectorNumElements();
- if (!(((NumVecElts == 2 || NumVecElts == 3) &&
- VT.getVectorElementType().getSizeInBits() == 64) ||
- ((NumVecElts == 2 || NumVecElts == 3 || NumVecElts == 4) &&
- VT.getVectorElementType().getSizeInBits() == 32)))
- return SDValue();
- if (StVal.getOpcode() != ISD::BUILD_VECTOR)
- return SDValue();
- // If the zero constant has more than one use then the vector store could be
- // better since the constant mov will be amortized and stp q instructions
- // should be able to be formed.
- if (!StVal.hasOneUse())
- return SDValue();
- // If the store is truncating then it's going down to i16 or smaller, which
- // means it can be implemented in a single store anyway.
- if (St.isTruncatingStore())
- return SDValue();
- // If the immediate offset of the address operand is too large for the stp
- // instruction, then bail out.
- if (DAG.isBaseWithConstantOffset(St.getBasePtr())) {
- int64_t Offset = St.getBasePtr()->getConstantOperandVal(1);
- if (Offset < -512 || Offset > 504)
- return SDValue();
- }
- for (int I = 0; I < NumVecElts; ++I) {
- SDValue EltVal = StVal.getOperand(I);
- if (!isNullConstant(EltVal) && !isNullFPConstant(EltVal))
- return SDValue();
- }
- // Use a CopyFromReg WZR/XZR here to prevent
- // DAGCombiner::MergeConsecutiveStores from undoing this transformation.
- SDLoc DL(&St);
- unsigned ZeroReg;
- EVT ZeroVT;
- if (VT.getVectorElementType().getSizeInBits() == 32) {
- ZeroReg = AArch64::WZR;
- ZeroVT = MVT::i32;
- } else {
- ZeroReg = AArch64::XZR;
- ZeroVT = MVT::i64;
- }
- SDValue SplatVal =
- DAG.getCopyFromReg(DAG.getEntryNode(), DL, ZeroReg, ZeroVT);
- return splitStoreSplat(DAG, St, SplatVal, NumVecElts);
- }
- /// Replace a splat of a scalar to a vector store by scalar stores of the scalar
- /// value. The load store optimizer pass will merge them to store pair stores.
- /// This has better performance than a splat of the scalar followed by a split
- /// vector store. Even if the stores are not merged it is four stores vs a dup,
- /// followed by an ext.b and two stores.
- static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St) {
- SDValue StVal = St.getValue();
- EVT VT = StVal.getValueType();
- // Don't replace floating point stores, they possibly won't be transformed to
- // stp because of the store pair suppress pass.
- if (VT.isFloatingPoint())
- return SDValue();
- // We can express a splat as store pair(s) for 2 or 4 elements.
- unsigned NumVecElts = VT.getVectorNumElements();
- if (NumVecElts != 4 && NumVecElts != 2)
- return SDValue();
- // If the store is truncating then it's going down to i16 or smaller, which
- // means it can be implemented in a single store anyway.
- if (St.isTruncatingStore())
- return SDValue();
- // Check that this is a splat.
- // Make sure that each of the relevant vector element locations are inserted
- // to, i.e. 0 and 1 for v2i64 and 0, 1, 2, 3 for v4i32.
- std::bitset<4> IndexNotInserted((1 << NumVecElts) - 1);
- SDValue SplatVal;
- for (unsigned I = 0; I < NumVecElts; ++I) {
- // Check for insert vector elements.
- if (StVal.getOpcode() != ISD::INSERT_VECTOR_ELT)
- return SDValue();
- // Check that same value is inserted at each vector element.
- if (I == 0)
- SplatVal = StVal.getOperand(1);
- else if (StVal.getOperand(1) != SplatVal)
- return SDValue();
- // Check insert element index.
- ConstantSDNode *CIndex = dyn_cast<ConstantSDNode>(StVal.getOperand(2));
- if (!CIndex)
- return SDValue();
- uint64_t IndexVal = CIndex->getZExtValue();
- if (IndexVal >= NumVecElts)
- return SDValue();
- IndexNotInserted.reset(IndexVal);
- StVal = StVal.getOperand(0);
- }
- // Check that all vector element locations were inserted to.
- if (IndexNotInserted.any())
- return SDValue();
- return splitStoreSplat(DAG, St, SplatVal, NumVecElts);
- }
- static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- StoreSDNode *S = cast<StoreSDNode>(N);
- if (S->isVolatile() || S->isIndexed())
- return SDValue();
- SDValue StVal = S->getValue();
- EVT VT = StVal.getValueType();
- if (!VT.isFixedLengthVector())
- return SDValue();
- // If we get a splat of zeros, convert this vector store to a store of
- // scalars. They will be merged into store pairs of xzr thereby removing one
- // instruction and one register.
- if (SDValue ReplacedZeroSplat = replaceZeroVectorStore(DAG, *S))
- return ReplacedZeroSplat;
- // FIXME: The logic for deciding if an unaligned store should be split should
- // be included in TLI.allowsMisalignedMemoryAccesses(), and there should be
- // a call to that function here.
- if (!Subtarget->isMisaligned128StoreSlow())
- return SDValue();
- // Don't split at -Oz.
- if (DAG.getMachineFunction().getFunction().hasMinSize())
- return SDValue();
- // Don't split v2i64 vectors. Memcpy lowering produces those and splitting
- // those up regresses performance on micro-benchmarks and olden/bh.
- if (VT.getVectorNumElements() < 2 || VT == MVT::v2i64)
- return SDValue();
- // Split unaligned 16B stores. They are terrible for performance.
- // Don't split stores with alignment of 1 or 2. Code that uses clang vector
- // extensions can use this to mark that it does not want splitting to happen
- // (by underspecifying alignment to be 1 or 2). Furthermore, the chance of
- // eliminating alignment hazards is only 1 in 8 for alignment of 2.
- if (VT.getSizeInBits() != 128 || S->getAlign() >= Align(16) ||
- S->getAlign() <= Align(2))
- return SDValue();
- // If we get a splat of a scalar convert this vector store to a store of
- // scalars. They will be merged into store pairs thereby removing two
- // instructions.
- if (SDValue ReplacedSplat = replaceSplatVectorStore(DAG, *S))
- return ReplacedSplat;
- SDLoc DL(S);
- // Split VT into two.
- EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
- unsigned NumElts = HalfVT.getVectorNumElements();
- SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
- DAG.getConstant(0, DL, MVT::i64));
- SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
- DAG.getConstant(NumElts, DL, MVT::i64));
- SDValue BasePtr = S->getBasePtr();
- SDValue NewST1 =
- DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(),
- S->getAlign(), S->getMemOperand()->getFlags());
- SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
- DAG.getConstant(8, DL, MVT::i64));
- return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr,
- S->getPointerInfo(), S->getAlign(),
- S->getMemOperand()->getFlags());
- }
- static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {
- assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!");
- // splice(pg, op1, undef) -> op1
- if (N->getOperand(2).isUndef())
- return N->getOperand(1);
- return SDValue();
- }
- static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- assert((N->getOpcode() == AArch64ISD::UUNPKHI ||
- N->getOpcode() == AArch64ISD::UUNPKLO) &&
- "Unexpected Opcode!");
- // uunpklo/hi undef -> undef
- if (N->getOperand(0).isUndef())
- return DAG.getUNDEF(N->getValueType(0));
- // If this is a masked load followed by an UUNPKLO, fold this into a masked
- // extending load. We can do this even if this is already a masked
- // {z,}extload.
- if (N->getOperand(0).getOpcode() == ISD::MLOAD &&
- N->getOpcode() == AArch64ISD::UUNPKLO) {
- MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N->getOperand(0));
- SDValue Mask = MLD->getMask();
- SDLoc DL(N);
- if (MLD->isUnindexed() && MLD->getExtensionType() != ISD::SEXTLOAD &&
- SDValue(MLD, 0).hasOneUse() && Mask->getOpcode() == AArch64ISD::PTRUE &&
- (MLD->getPassThru()->isUndef() ||
- isZerosVector(MLD->getPassThru().getNode()))) {
- unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
- unsigned PgPattern = Mask->getConstantOperandVal(0);
- EVT VT = N->getValueType(0);
- // Ensure we can double the size of the predicate pattern
- unsigned NumElts = getNumElementsFromSVEPredPattern(PgPattern);
- if (NumElts &&
- NumElts * VT.getVectorElementType().getSizeInBits() <= MinSVESize) {
- Mask =
- getPTrue(DAG, DL, VT.changeVectorElementType(MVT::i1), PgPattern);
- SDValue PassThru = DAG.getConstant(0, DL, VT);
- SDValue NewLoad = DAG.getMaskedLoad(
- VT, DL, MLD->getChain(), MLD->getBasePtr(), MLD->getOffset(), Mask,
- PassThru, MLD->getMemoryVT(), MLD->getMemOperand(),
- MLD->getAddressingMode(), ISD::ZEXTLOAD);
- DAG.ReplaceAllUsesOfValueWith(SDValue(MLD, 1), NewLoad.getValue(1));
- return NewLoad;
- }
- }
- }
- return SDValue();
- }
- static SDValue performUzpCombine(SDNode *N, SelectionDAG &DAG) {
- SDLoc DL(N);
- SDValue Op0 = N->getOperand(0);
- SDValue Op1 = N->getOperand(1);
- EVT ResVT = N->getValueType(0);
- // uzp1(x, undef) -> concat(truncate(x), undef)
- if (Op1.getOpcode() == ISD::UNDEF) {
- EVT BCVT = MVT::Other, HalfVT = MVT::Other;
- switch (ResVT.getSimpleVT().SimpleTy) {
- default:
- break;
- case MVT::v16i8:
- BCVT = MVT::v8i16;
- HalfVT = MVT::v8i8;
- break;
- case MVT::v8i16:
- BCVT = MVT::v4i32;
- HalfVT = MVT::v4i16;
- break;
- case MVT::v4i32:
- BCVT = MVT::v2i64;
- HalfVT = MVT::v2i32;
- break;
- }
- if (BCVT != MVT::Other) {
- SDValue BC = DAG.getBitcast(BCVT, Op0);
- SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, HalfVT, BC);
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Trunc,
- DAG.getUNDEF(HalfVT));
- }
- }
- // uzp1(unpklo(uzp1(x, y)), z) => uzp1(x, z)
- if (Op0.getOpcode() == AArch64ISD::UUNPKLO) {
- if (Op0.getOperand(0).getOpcode() == AArch64ISD::UZP1) {
- SDValue X = Op0.getOperand(0).getOperand(0);
- return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, X, Op1);
- }
- }
- // uzp1(x, unpkhi(uzp1(y, z))) => uzp1(x, z)
- if (Op1.getOpcode() == AArch64ISD::UUNPKHI) {
- if (Op1.getOperand(0).getOpcode() == AArch64ISD::UZP1) {
- SDValue Z = Op1.getOperand(0).getOperand(1);
- return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Op0, Z);
- }
- }
- // uzp1(xtn x, xtn y) -> xtn(uzp1 (x, y))
- // Only implemented on little-endian subtargets.
- bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
- // This optimization only works on little endian.
- if (!IsLittleEndian)
- return SDValue();
- if (ResVT != MVT::v2i32 && ResVT != MVT::v4i16 && ResVT != MVT::v8i8)
- return SDValue();
- auto getSourceOp = [](SDValue Operand) -> SDValue {
- const unsigned Opcode = Operand.getOpcode();
- if (Opcode == ISD::TRUNCATE)
- return Operand->getOperand(0);
- if (Opcode == ISD::BITCAST &&
- Operand->getOperand(0).getOpcode() == ISD::TRUNCATE)
- return Operand->getOperand(0)->getOperand(0);
- return SDValue();
- };
- SDValue SourceOp0 = getSourceOp(Op0);
- SDValue SourceOp1 = getSourceOp(Op1);
- if (!SourceOp0 || !SourceOp1)
- return SDValue();
- if (SourceOp0.getValueType() != SourceOp1.getValueType() ||
- !SourceOp0.getValueType().isSimple())
- return SDValue();
- EVT ResultTy;
- switch (SourceOp0.getSimpleValueType().SimpleTy) {
- case MVT::v2i64:
- ResultTy = MVT::v4i32;
- break;
- case MVT::v4i32:
- ResultTy = MVT::v8i16;
- break;
- case MVT::v8i16:
- ResultTy = MVT::v16i8;
- break;
- default:
- return SDValue();
- }
- SDValue UzpOp0 = DAG.getNode(ISD::BITCAST, DL, ResultTy, SourceOp0);
- SDValue UzpOp1 = DAG.getNode(ISD::BITCAST, DL, ResultTy, SourceOp1);
- SDValue UzpResult =
- DAG.getNode(AArch64ISD::UZP1, DL, UzpOp0.getValueType(), UzpOp0, UzpOp1);
- EVT BitcastResultTy;
- switch (ResVT.getSimpleVT().SimpleTy) {
- case MVT::v2i32:
- BitcastResultTy = MVT::v2i64;
- break;
- case MVT::v4i16:
- BitcastResultTy = MVT::v4i32;
- break;
- case MVT::v8i8:
- BitcastResultTy = MVT::v8i16;
- break;
- default:
- llvm_unreachable("Should be one of {v2i32, v4i16, v8i8}");
- }
- return DAG.getNode(ISD::TRUNCATE, DL, ResVT,
- DAG.getNode(ISD::BITCAST, DL, BitcastResultTy, UzpResult));
- }
- static SDValue performGLD1Combine(SDNode *N, SelectionDAG &DAG) {
- unsigned Opc = N->getOpcode();
- assert(((Opc >= AArch64ISD::GLD1_MERGE_ZERO && // unsigned gather loads
- Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) ||
- (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && // signed gather loads
- Opc <= AArch64ISD::GLD1S_IMM_MERGE_ZERO)) &&
- "Invalid opcode.");
- const bool Scaled = Opc == AArch64ISD::GLD1_SCALED_MERGE_ZERO ||
- Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
- const bool Signed = Opc == AArch64ISD::GLD1S_MERGE_ZERO ||
- Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
- const bool Extended = Opc == AArch64ISD::GLD1_SXTW_MERGE_ZERO ||
- Opc == AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO ||
- Opc == AArch64ISD::GLD1_UXTW_MERGE_ZERO ||
- Opc == AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO;
- SDLoc DL(N);
- SDValue Chain = N->getOperand(0);
- SDValue Pg = N->getOperand(1);
- SDValue Base = N->getOperand(2);
- SDValue Offset = N->getOperand(3);
- SDValue Ty = N->getOperand(4);
- EVT ResVT = N->getValueType(0);
- const auto OffsetOpc = Offset.getOpcode();
- const bool OffsetIsZExt =
- OffsetOpc == AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU;
- const bool OffsetIsSExt =
- OffsetOpc == AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU;
- // Fold sign/zero extensions of vector offsets into GLD1 nodes where possible.
- if (!Extended && (OffsetIsSExt || OffsetIsZExt)) {
- SDValue ExtPg = Offset.getOperand(0);
- VTSDNode *ExtFrom = cast<VTSDNode>(Offset.getOperand(2).getNode());
- EVT ExtFromEVT = ExtFrom->getVT().getVectorElementType();
- // If the predicate for the sign- or zero-extended offset is the
- // same as the predicate used for this load and the sign-/zero-extension
- // was from a 32-bits...
- if (ExtPg == Pg && ExtFromEVT == MVT::i32) {
- SDValue UnextendedOffset = Offset.getOperand(1);
- unsigned NewOpc = getGatherVecOpcode(Scaled, OffsetIsSExt, true);
- if (Signed)
- NewOpc = getSignExtendedGatherOpcode(NewOpc);
- return DAG.getNode(NewOpc, DL, {ResVT, MVT::Other},
- {Chain, Pg, Base, UnextendedOffset, Ty});
- }
- }
- return SDValue();
- }
- /// Optimize a vector shift instruction and its operand if shifted out
- /// bits are not used.
- static SDValue performVectorShiftCombine(SDNode *N,
- const AArch64TargetLowering &TLI,
- TargetLowering::DAGCombinerInfo &DCI) {
- assert(N->getOpcode() == AArch64ISD::VASHR ||
- N->getOpcode() == AArch64ISD::VLSHR);
- SDValue Op = N->getOperand(0);
- unsigned OpScalarSize = Op.getScalarValueSizeInBits();
- unsigned ShiftImm = N->getConstantOperandVal(1);
- assert(OpScalarSize > ShiftImm && "Invalid shift imm");
- APInt ShiftedOutBits = APInt::getLowBitsSet(OpScalarSize, ShiftImm);
- APInt DemandedMask = ~ShiftedOutBits;
- if (TLI.SimplifyDemandedBits(Op, DemandedMask, DCI))
- return SDValue(N, 0);
- return SDValue();
- }
- static SDValue performSunpkloCombine(SDNode *N, SelectionDAG &DAG) {
- // sunpklo(sext(pred)) -> sext(extract_low_half(pred))
- // This transform works in partnership with performSetCCPunpkCombine to
- // remove unnecessary transfer of predicates into standard registers and back
- if (N->getOperand(0).getOpcode() == ISD::SIGN_EXTEND &&
- N->getOperand(0)->getOperand(0)->getValueType(0).getScalarType() ==
- MVT::i1) {
- SDValue CC = N->getOperand(0)->getOperand(0);
- auto VT = CC->getValueType(0).getHalfNumVectorElementsVT(*DAG.getContext());
- SDValue Unpk = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT, CC,
- DAG.getVectorIdxConstant(0, SDLoc(N)));
- return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), N->getValueType(0), Unpk);
- }
- return SDValue();
- }
- /// Target-specific DAG combine function for post-increment LD1 (lane) and
- /// post-increment LD1R.
- static SDValue performPostLD1Combine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- bool IsLaneOp) {
- if (DCI.isBeforeLegalizeOps())
- return SDValue();
- SelectionDAG &DAG = DCI.DAG;
- EVT VT = N->getValueType(0);
- if (!VT.is128BitVector() && !VT.is64BitVector())
- return SDValue();
- unsigned LoadIdx = IsLaneOp ? 1 : 0;
- SDNode *LD = N->getOperand(LoadIdx).getNode();
- // If it is not LOAD, can not do such combine.
- if (LD->getOpcode() != ISD::LOAD)
- return SDValue();
- // The vector lane must be a constant in the LD1LANE opcode.
- SDValue Lane;
- if (IsLaneOp) {
- Lane = N->getOperand(2);
- auto *LaneC = dyn_cast<ConstantSDNode>(Lane);
- if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements())
- return SDValue();
- }
- LoadSDNode *LoadSDN = cast<LoadSDNode>(LD);
- EVT MemVT = LoadSDN->getMemoryVT();
- // Check if memory operand is the same type as the vector element.
- if (MemVT != VT.getVectorElementType())
- return SDValue();
- // Check if there are other uses. If so, do not combine as it will introduce
- // an extra load.
- for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); UI != UE;
- ++UI) {
- if (UI.getUse().getResNo() == 1) // Ignore uses of the chain result.
- continue;
- if (*UI != N)
- return SDValue();
- }
- SDValue Addr = LD->getOperand(1);
- SDValue Vector = N->getOperand(0);
- // Search for a use of the address operand that is an increment.
- for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), UE =
- Addr.getNode()->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
- if (User->getOpcode() != ISD::ADD
- || UI.getUse().getResNo() != Addr.getResNo())
- continue;
- // If the increment is a constant, it must match the memory ref size.
- SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
- if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
- uint32_t IncVal = CInc->getZExtValue();
- unsigned NumBytes = VT.getScalarSizeInBits() / 8;
- if (IncVal != NumBytes)
- continue;
- Inc = DAG.getRegister(AArch64::XZR, MVT::i64);
- }
- // To avoid cycle construction make sure that neither the load nor the add
- // are predecessors to each other or the Vector.
- SmallPtrSet<const SDNode *, 32> Visited;
- SmallVector<const SDNode *, 16> Worklist;
- Visited.insert(Addr.getNode());
- Worklist.push_back(User);
- Worklist.push_back(LD);
- Worklist.push_back(Vector.getNode());
- if (SDNode::hasPredecessorHelper(LD, Visited, Worklist) ||
- SDNode::hasPredecessorHelper(User, Visited, Worklist))
- continue;
- SmallVector<SDValue, 8> Ops;
- Ops.push_back(LD->getOperand(0)); // Chain
- if (IsLaneOp) {
- Ops.push_back(Vector); // The vector to be inserted
- Ops.push_back(Lane); // The lane to be inserted in the vector
- }
- Ops.push_back(Addr);
- Ops.push_back(Inc);
- EVT Tys[3] = { VT, MVT::i64, MVT::Other };
- SDVTList SDTys = DAG.getVTList(Tys);
- unsigned NewOp = IsLaneOp ? AArch64ISD::LD1LANEpost : AArch64ISD::LD1DUPpost;
- SDValue UpdN = DAG.getMemIntrinsicNode(NewOp, SDLoc(N), SDTys, Ops,
- MemVT,
- LoadSDN->getMemOperand());
- // Update the uses.
- SDValue NewResults[] = {
- SDValue(LD, 0), // The result of load
- SDValue(UpdN.getNode(), 2) // Chain
- };
- DCI.CombineTo(LD, NewResults);
- DCI.CombineTo(N, SDValue(UpdN.getNode(), 0)); // Dup/Inserted Result
- DCI.CombineTo(User, SDValue(UpdN.getNode(), 1)); // Write back register
- break;
- }
- return SDValue();
- }
- /// Simplify ``Addr`` given that the top byte of it is ignored by HW during
- /// address translation.
- static bool performTBISimplification(SDValue Addr,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- APInt DemandedMask = APInt::getLowBitsSet(64, 56);
- KnownBits Known;
- TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
- !DCI.isBeforeLegalizeOps());
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) {
- DCI.CommitTargetLoweringOpt(TLO);
- return true;
- }
- return false;
- }
- static SDValue foldTruncStoreOfExt(SelectionDAG &DAG, SDNode *N) {
- assert((N->getOpcode() == ISD::STORE || N->getOpcode() == ISD::MSTORE) &&
- "Expected STORE dag node in input!");
- if (auto Store = dyn_cast<StoreSDNode>(N)) {
- if (!Store->isTruncatingStore() || Store->isIndexed())
- return SDValue();
- SDValue Ext = Store->getValue();
- auto ExtOpCode = Ext.getOpcode();
- if (ExtOpCode != ISD::ZERO_EXTEND && ExtOpCode != ISD::SIGN_EXTEND &&
- ExtOpCode != ISD::ANY_EXTEND)
- return SDValue();
- SDValue Orig = Ext->getOperand(0);
- if (Store->getMemoryVT() != Orig.getValueType())
- return SDValue();
- return DAG.getStore(Store->getChain(), SDLoc(Store), Orig,
- Store->getBasePtr(), Store->getMemOperand());
- }
- return SDValue();
- }
- // Perform TBI simplification if supported by the target and try to break up
- // nontemporal loads larger than 256-bits loads for odd types so LDNPQ 256-bit
- // load instructions can be selected.
- static SDValue performLOADCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- if (Subtarget->supportsAddressTopByteIgnored())
- performTBISimplification(N->getOperand(1), DCI, DAG);
- LoadSDNode *LD = cast<LoadSDNode>(N);
- EVT MemVT = LD->getMemoryVT();
- if (LD->isVolatile() || !LD->isNonTemporal() || !Subtarget->isLittleEndian())
- return SDValue(N, 0);
- if (MemVT.isScalableVector() || MemVT.getSizeInBits() <= 256 ||
- MemVT.getSizeInBits() % 256 == 0 ||
- 256 % MemVT.getScalarSizeInBits() != 0)
- return SDValue(N, 0);
- SDLoc DL(LD);
- SDValue Chain = LD->getChain();
- SDValue BasePtr = LD->getBasePtr();
- SDNodeFlags Flags = LD->getFlags();
- SmallVector<SDValue, 4> LoadOps;
- SmallVector<SDValue, 4> LoadOpsChain;
- // Replace any non temporal load over 256-bit with a series of 256 bit loads
- // and a scalar/vector load less than 256. This way we can utilize 256-bit
- // loads and reduce the amount of load instructions generated.
- MVT NewVT =
- MVT::getVectorVT(MemVT.getVectorElementType().getSimpleVT(),
- 256 / MemVT.getVectorElementType().getSizeInBits());
- unsigned Num256Loads = MemVT.getSizeInBits() / 256;
- // Create all 256-bit loads starting from offset 0 and up to Num256Loads-1*32.
- for (unsigned I = 0; I < Num256Loads; I++) {
- unsigned PtrOffset = I * 32;
- SDValue NewPtr = DAG.getMemBasePlusOffset(
- BasePtr, TypeSize::Fixed(PtrOffset), DL, Flags);
- Align NewAlign = commonAlignment(LD->getAlign(), PtrOffset);
- SDValue NewLoad = DAG.getLoad(
- NewVT, DL, Chain, NewPtr, LD->getPointerInfo().getWithOffset(PtrOffset),
- NewAlign, LD->getMemOperand()->getFlags(), LD->getAAInfo());
- LoadOps.push_back(NewLoad);
- LoadOpsChain.push_back(SDValue(cast<SDNode>(NewLoad), 1));
- }
- // Process remaining bits of the load operation.
- // This is done by creating an UNDEF vector to match the size of the
- // 256-bit loads and inserting the remaining load to it. We extract the
- // original load type at the end using EXTRACT_SUBVECTOR instruction.
- unsigned BitsRemaining = MemVT.getSizeInBits() % 256;
- unsigned PtrOffset = (MemVT.getSizeInBits() - BitsRemaining) / 8;
- MVT RemainingVT = MVT::getVectorVT(
- MemVT.getVectorElementType().getSimpleVT(),
- BitsRemaining / MemVT.getVectorElementType().getSizeInBits());
- SDValue NewPtr =
- DAG.getMemBasePlusOffset(BasePtr, TypeSize::Fixed(PtrOffset), DL, Flags);
- Align NewAlign = commonAlignment(LD->getAlign(), PtrOffset);
- SDValue RemainingLoad =
- DAG.getLoad(RemainingVT, DL, Chain, NewPtr,
- LD->getPointerInfo().getWithOffset(PtrOffset), NewAlign,
- LD->getMemOperand()->getFlags(), LD->getAAInfo());
- SDValue UndefVector = DAG.getUNDEF(NewVT);
- SDValue InsertIdx = DAG.getVectorIdxConstant(0, DL);
- SDValue ExtendedReminingLoad =
- DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewVT,
- {UndefVector, RemainingLoad, InsertIdx});
- LoadOps.push_back(ExtendedReminingLoad);
- LoadOpsChain.push_back(SDValue(cast<SDNode>(RemainingLoad), 1));
- EVT ConcatVT =
- EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
- LoadOps.size() * NewVT.getVectorNumElements());
- SDValue ConcatVectors =
- DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatVT, LoadOps);
- // Extract the original vector type size.
- SDValue ExtractSubVector =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MemVT,
- {ConcatVectors, DAG.getVectorIdxConstant(0, DL)});
- SDValue TokenFactor =
- DAG.getNode(ISD::TokenFactor, DL, MVT::Other, LoadOpsChain);
- return DAG.getMergeValues({ExtractSubVector, TokenFactor}, DL);
- }
- static SDValue performSTORECombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- StoreSDNode *ST = cast<StoreSDNode>(N);
- SDValue Chain = ST->getChain();
- SDValue Value = ST->getValue();
- SDValue Ptr = ST->getBasePtr();
- EVT ValueVT = Value.getValueType();
- auto hasValidElementTypeForFPTruncStore = [](EVT VT) {
- EVT EltVT = VT.getVectorElementType();
- return EltVT == MVT::f32 || EltVT == MVT::f64;
- };
- // If this is an FP_ROUND followed by a store, fold this into a truncating
- // store. We can do this even if this is already a truncstore.
- // We purposefully don't care about legality of the nodes here as we know
- // they can be split down into something legal.
- if (DCI.isBeforeLegalizeOps() && Value.getOpcode() == ISD::FP_ROUND &&
- Value.getNode()->hasOneUse() && ST->isUnindexed() &&
- Subtarget->useSVEForFixedLengthVectors() &&
- ValueVT.isFixedLengthVector() &&
- ValueVT.getFixedSizeInBits() >= Subtarget->getMinSVEVectorSizeInBits() &&
- hasValidElementTypeForFPTruncStore(Value.getOperand(0).getValueType()))
- return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
- ST->getMemoryVT(), ST->getMemOperand());
- if (SDValue Split = splitStores(N, DCI, DAG, Subtarget))
- return Split;
- if (Subtarget->supportsAddressTopByteIgnored() &&
- performTBISimplification(N->getOperand(2), DCI, DAG))
- return SDValue(N, 0);
- if (SDValue Store = foldTruncStoreOfExt(DAG, N))
- return Store;
- return SDValue();
- }
- static SDValue performMSTORECombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
- SDValue Value = MST->getValue();
- SDValue Mask = MST->getMask();
- SDLoc DL(N);
- // If this is a UZP1 followed by a masked store, fold this into a masked
- // truncating store. We can do this even if this is already a masked
- // truncstore.
- if (Value.getOpcode() == AArch64ISD::UZP1 && Value->hasOneUse() &&
- MST->isUnindexed() && Mask->getOpcode() == AArch64ISD::PTRUE &&
- Value.getValueType().isInteger()) {
- Value = Value.getOperand(0);
- if (Value.getOpcode() == ISD::BITCAST) {
- EVT HalfVT =
- Value.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
- EVT InVT = Value.getOperand(0).getValueType();
- if (HalfVT.widenIntegerVectorElementType(*DAG.getContext()) == InVT) {
- unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
- unsigned PgPattern = Mask->getConstantOperandVal(0);
- // Ensure we can double the size of the predicate pattern
- unsigned NumElts = getNumElementsFromSVEPredPattern(PgPattern);
- if (NumElts && NumElts * InVT.getVectorElementType().getSizeInBits() <=
- MinSVESize) {
- Mask = getPTrue(DAG, DL, InVT.changeVectorElementType(MVT::i1),
- PgPattern);
- return DAG.getMaskedStore(MST->getChain(), DL, Value.getOperand(0),
- MST->getBasePtr(), MST->getOffset(), Mask,
- MST->getMemoryVT(), MST->getMemOperand(),
- MST->getAddressingMode(),
- /*IsTruncating=*/true);
- }
- }
- }
- }
- return SDValue();
- }
- /// \return true if part of the index was folded into the Base.
- static bool foldIndexIntoBase(SDValue &BasePtr, SDValue &Index, SDValue Scale,
- SDLoc DL, SelectionDAG &DAG) {
- // This function assumes a vector of i64 indices.
- EVT IndexVT = Index.getValueType();
- if (!IndexVT.isVector() || IndexVT.getVectorElementType() != MVT::i64)
- return false;
- // Simplify:
- // BasePtr = Ptr
- // Index = X + splat(Offset)
- // ->
- // BasePtr = Ptr + Offset * scale.
- // Index = X
- if (Index.getOpcode() == ISD::ADD) {
- if (auto Offset = DAG.getSplatValue(Index.getOperand(1))) {
- Offset = DAG.getNode(ISD::MUL, DL, MVT::i64, Offset, Scale);
- BasePtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, Offset);
- Index = Index.getOperand(0);
- return true;
- }
- }
- // Simplify:
- // BasePtr = Ptr
- // Index = (X + splat(Offset)) << splat(Shift)
- // ->
- // BasePtr = Ptr + (Offset << Shift) * scale)
- // Index = X << splat(shift)
- if (Index.getOpcode() == ISD::SHL &&
- Index.getOperand(0).getOpcode() == ISD::ADD) {
- SDValue Add = Index.getOperand(0);
- SDValue ShiftOp = Index.getOperand(1);
- SDValue OffsetOp = Add.getOperand(1);
- if (auto Shift = DAG.getSplatValue(ShiftOp))
- if (auto Offset = DAG.getSplatValue(OffsetOp)) {
- Offset = DAG.getNode(ISD::SHL, DL, MVT::i64, Offset, Shift);
- Offset = DAG.getNode(ISD::MUL, DL, MVT::i64, Offset, Scale);
- BasePtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, Offset);
- Index = DAG.getNode(ISD::SHL, DL, Index.getValueType(),
- Add.getOperand(0), ShiftOp);
- return true;
- }
- }
- return false;
- }
- // Analyse the specified address returning true if a more optimal addressing
- // mode is available. When returning true all parameters are updated to reflect
- // their recommended values.
- static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N,
- SDValue &BasePtr, SDValue &Index,
- SelectionDAG &DAG) {
- // Try to iteratively fold parts of the index into the base pointer to
- // simplify the index as much as possible.
- bool Changed = false;
- while (foldIndexIntoBase(BasePtr, Index, N->getScale(), SDLoc(N), DAG))
- Changed = true;
- // Only consider element types that are pointer sized as smaller types can
- // be easily promoted.
- EVT IndexVT = Index.getValueType();
- if (IndexVT.getVectorElementType() != MVT::i64 || IndexVT == MVT::nxv2i64)
- return Changed;
- // Can indices be trivially shrunk?
- EVT DataVT = N->getOperand(1).getValueType();
- // Don't attempt to shrink the index for fixed vectors of 64 bit data since it
- // will later be re-extended to 64 bits in legalization
- if (DataVT.isFixedLengthVector() && DataVT.getScalarSizeInBits() == 64)
- return Changed;
- if (ISD::isVectorShrinkable(Index.getNode(), 32, N->isIndexSigned())) {
- EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32);
- Index = DAG.getNode(ISD::TRUNCATE, SDLoc(N), NewIndexVT, Index);
- return true;
- }
- // Match:
- // Index = step(const)
- int64_t Stride = 0;
- if (Index.getOpcode() == ISD::STEP_VECTOR) {
- Stride = cast<ConstantSDNode>(Index.getOperand(0))->getSExtValue();
- }
- // Match:
- // Index = step(const) << shift(const)
- else if (Index.getOpcode() == ISD::SHL &&
- Index.getOperand(0).getOpcode() == ISD::STEP_VECTOR) {
- SDValue RHS = Index.getOperand(1);
- if (auto *Shift =
- dyn_cast_or_null<ConstantSDNode>(DAG.getSplatValue(RHS))) {
- int64_t Step = (int64_t)Index.getOperand(0).getConstantOperandVal(1);
- Stride = Step << Shift->getZExtValue();
- }
- }
- // Return early because no supported pattern is found.
- if (Stride == 0)
- return Changed;
- if (Stride < std::numeric_limits<int32_t>::min() ||
- Stride > std::numeric_limits<int32_t>::max())
- return Changed;
- const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
- unsigned MaxVScale =
- Subtarget.getMaxSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock;
- int64_t LastElementOffset =
- IndexVT.getVectorMinNumElements() * Stride * MaxVScale;
- if (LastElementOffset < std::numeric_limits<int32_t>::min() ||
- LastElementOffset > std::numeric_limits<int32_t>::max())
- return Changed;
- EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32);
- // Stride does not scale explicitly by 'Scale', because it happens in
- // the gather/scatter addressing mode.
- Index = DAG.getStepVector(SDLoc(N), NewIndexVT, APInt(32, Stride));
- return true;
- }
- static SDValue performMaskedGatherScatterCombine(
- SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) {
- MaskedGatherScatterSDNode *MGS = cast<MaskedGatherScatterSDNode>(N);
- assert(MGS && "Can only combine gather load or scatter store nodes");
- if (!DCI.isBeforeLegalize())
- return SDValue();
- SDLoc DL(MGS);
- SDValue Chain = MGS->getChain();
- SDValue Scale = MGS->getScale();
- SDValue Index = MGS->getIndex();
- SDValue Mask = MGS->getMask();
- SDValue BasePtr = MGS->getBasePtr();
- ISD::MemIndexType IndexType = MGS->getIndexType();
- if (!findMoreOptimalIndexType(MGS, BasePtr, Index, DAG))
- return SDValue();
- // Here we catch such cases early and change MGATHER's IndexType to allow
- // the use of an Index that's more legalisation friendly.
- if (auto *MGT = dyn_cast<MaskedGatherSDNode>(MGS)) {
- SDValue PassThru = MGT->getPassThru();
- SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
- return DAG.getMaskedGather(
- DAG.getVTList(N->getValueType(0), MVT::Other), MGT->getMemoryVT(), DL,
- Ops, MGT->getMemOperand(), IndexType, MGT->getExtensionType());
- }
- auto *MSC = cast<MaskedScatterSDNode>(MGS);
- SDValue Data = MSC->getValue();
- SDValue Ops[] = {Chain, Data, Mask, BasePtr, Index, Scale};
- return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), MSC->getMemoryVT(), DL,
- Ops, MSC->getMemOperand(), IndexType,
- MSC->isTruncatingStore());
- }
- /// Target-specific DAG combine function for NEON load/store intrinsics
- /// to merge base address updates.
- static SDValue performNEONPostLDSTCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
- return SDValue();
- unsigned AddrOpIdx = N->getNumOperands() - 1;
- SDValue Addr = N->getOperand(AddrOpIdx);
- // Search for a use of the address operand that is an increment.
- for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
- UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
- if (User->getOpcode() != ISD::ADD ||
- UI.getUse().getResNo() != Addr.getResNo())
- continue;
- // Check that the add is independent of the load/store. Otherwise, folding
- // it would create a cycle.
- SmallPtrSet<const SDNode *, 32> Visited;
- SmallVector<const SDNode *, 16> Worklist;
- Visited.insert(Addr.getNode());
- Worklist.push_back(N);
- Worklist.push_back(User);
- if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
- SDNode::hasPredecessorHelper(User, Visited, Worklist))
- continue;
- // Find the new opcode for the updating load/store.
- bool IsStore = false;
- bool IsLaneOp = false;
- bool IsDupOp = false;
- unsigned NewOpc = 0;
- unsigned NumVecs = 0;
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
- switch (IntNo) {
- default: llvm_unreachable("unexpected intrinsic for Neon base update");
- case Intrinsic::aarch64_neon_ld2: NewOpc = AArch64ISD::LD2post;
- NumVecs = 2; break;
- case Intrinsic::aarch64_neon_ld3: NewOpc = AArch64ISD::LD3post;
- NumVecs = 3; break;
- case Intrinsic::aarch64_neon_ld4: NewOpc = AArch64ISD::LD4post;
- NumVecs = 4; break;
- case Intrinsic::aarch64_neon_st2: NewOpc = AArch64ISD::ST2post;
- NumVecs = 2; IsStore = true; break;
- case Intrinsic::aarch64_neon_st3: NewOpc = AArch64ISD::ST3post;
- NumVecs = 3; IsStore = true; break;
- case Intrinsic::aarch64_neon_st4: NewOpc = AArch64ISD::ST4post;
- NumVecs = 4; IsStore = true; break;
- case Intrinsic::aarch64_neon_ld1x2: NewOpc = AArch64ISD::LD1x2post;
- NumVecs = 2; break;
- case Intrinsic::aarch64_neon_ld1x3: NewOpc = AArch64ISD::LD1x3post;
- NumVecs = 3; break;
- case Intrinsic::aarch64_neon_ld1x4: NewOpc = AArch64ISD::LD1x4post;
- NumVecs = 4; break;
- case Intrinsic::aarch64_neon_st1x2: NewOpc = AArch64ISD::ST1x2post;
- NumVecs = 2; IsStore = true; break;
- case Intrinsic::aarch64_neon_st1x3: NewOpc = AArch64ISD::ST1x3post;
- NumVecs = 3; IsStore = true; break;
- case Intrinsic::aarch64_neon_st1x4: NewOpc = AArch64ISD::ST1x4post;
- NumVecs = 4; IsStore = true; break;
- case Intrinsic::aarch64_neon_ld2r: NewOpc = AArch64ISD::LD2DUPpost;
- NumVecs = 2; IsDupOp = true; break;
- case Intrinsic::aarch64_neon_ld3r: NewOpc = AArch64ISD::LD3DUPpost;
- NumVecs = 3; IsDupOp = true; break;
- case Intrinsic::aarch64_neon_ld4r: NewOpc = AArch64ISD::LD4DUPpost;
- NumVecs = 4; IsDupOp = true; break;
- case Intrinsic::aarch64_neon_ld2lane: NewOpc = AArch64ISD::LD2LANEpost;
- NumVecs = 2; IsLaneOp = true; break;
- case Intrinsic::aarch64_neon_ld3lane: NewOpc = AArch64ISD::LD3LANEpost;
- NumVecs = 3; IsLaneOp = true; break;
- case Intrinsic::aarch64_neon_ld4lane: NewOpc = AArch64ISD::LD4LANEpost;
- NumVecs = 4; IsLaneOp = true; break;
- case Intrinsic::aarch64_neon_st2lane: NewOpc = AArch64ISD::ST2LANEpost;
- NumVecs = 2; IsStore = true; IsLaneOp = true; break;
- case Intrinsic::aarch64_neon_st3lane: NewOpc = AArch64ISD::ST3LANEpost;
- NumVecs = 3; IsStore = true; IsLaneOp = true; break;
- case Intrinsic::aarch64_neon_st4lane: NewOpc = AArch64ISD::ST4LANEpost;
- NumVecs = 4; IsStore = true; IsLaneOp = true; break;
- }
- EVT VecTy;
- if (IsStore)
- VecTy = N->getOperand(2).getValueType();
- else
- VecTy = N->getValueType(0);
- // If the increment is a constant, it must match the memory ref size.
- SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
- if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
- uint32_t IncVal = CInc->getZExtValue();
- unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
- if (IsLaneOp || IsDupOp)
- NumBytes /= VecTy.getVectorNumElements();
- if (IncVal != NumBytes)
- continue;
- Inc = DAG.getRegister(AArch64::XZR, MVT::i64);
- }
- SmallVector<SDValue, 8> Ops;
- Ops.push_back(N->getOperand(0)); // Incoming chain
- // Load lane and store have vector list as input.
- if (IsLaneOp || IsStore)
- for (unsigned i = 2; i < AddrOpIdx; ++i)
- Ops.push_back(N->getOperand(i));
- Ops.push_back(Addr); // Base register
- Ops.push_back(Inc);
- // Return Types.
- EVT Tys[6];
- unsigned NumResultVecs = (IsStore ? 0 : NumVecs);
- unsigned n;
- for (n = 0; n < NumResultVecs; ++n)
- Tys[n] = VecTy;
- Tys[n++] = MVT::i64; // Type of write back register
- Tys[n] = MVT::Other; // Type of the chain
- SDVTList SDTys = DAG.getVTList(ArrayRef(Tys, NumResultVecs + 2));
- MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
- SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops,
- MemInt->getMemoryVT(),
- MemInt->getMemOperand());
- // Update the uses.
- std::vector<SDValue> NewResults;
- for (unsigned i = 0; i < NumResultVecs; ++i) {
- NewResults.push_back(SDValue(UpdN.getNode(), i));
- }
- NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1));
- DCI.CombineTo(N, NewResults);
- DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
- break;
- }
- return SDValue();
- }
- // Checks to see if the value is the prescribed width and returns information
- // about its extension mode.
- static
- bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) {
- ExtType = ISD::NON_EXTLOAD;
- switch(V.getNode()->getOpcode()) {
- default:
- return false;
- case ISD::LOAD: {
- LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode());
- if ((LoadNode->getMemoryVT() == MVT::i8 && width == 8)
- || (LoadNode->getMemoryVT() == MVT::i16 && width == 16)) {
- ExtType = LoadNode->getExtensionType();
- return true;
- }
- return false;
- }
- case ISD::AssertSext: {
- VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
- if ((TypeNode->getVT() == MVT::i8 && width == 8)
- || (TypeNode->getVT() == MVT::i16 && width == 16)) {
- ExtType = ISD::SEXTLOAD;
- return true;
- }
- return false;
- }
- case ISD::AssertZext: {
- VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
- if ((TypeNode->getVT() == MVT::i8 && width == 8)
- || (TypeNode->getVT() == MVT::i16 && width == 16)) {
- ExtType = ISD::ZEXTLOAD;
- return true;
- }
- return false;
- }
- case ISD::Constant:
- case ISD::TargetConstant: {
- return std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) <
- 1LL << (width - 1);
- }
- }
- return true;
- }
- // This function does a whole lot of voodoo to determine if the tests are
- // equivalent without and with a mask. Essentially what happens is that given a
- // DAG resembling:
- //
- // +-------------+ +-------------+ +-------------+ +-------------+
- // | Input | | AddConstant | | CompConstant| | CC |
- // +-------------+ +-------------+ +-------------+ +-------------+
- // | | | |
- // V V | +----------+
- // +-------------+ +----+ | |
- // | ADD | |0xff| | |
- // +-------------+ +----+ | |
- // | | | |
- // V V | |
- // +-------------+ | |
- // | AND | | |
- // +-------------+ | |
- // | | |
- // +-----+ | |
- // | | |
- // V V V
- // +-------------+
- // | CMP |
- // +-------------+
- //
- // The AND node may be safely removed for some combinations of inputs. In
- // particular we need to take into account the extension type of the Input,
- // the exact values of AddConstant, CompConstant, and CC, along with the nominal
- // width of the input (this can work for any width inputs, the above graph is
- // specific to 8 bits.
- //
- // The specific equations were worked out by generating output tables for each
- // AArch64CC value in terms of and AddConstant (w1), CompConstant(w2). The
- // problem was simplified by working with 4 bit inputs, which means we only
- // needed to reason about 24 distinct bit patterns: 8 patterns unique to zero
- // extension (8,15), 8 patterns unique to sign extensions (-8,-1), and 8
- // patterns present in both extensions (0,7). For every distinct set of
- // AddConstant and CompConstants bit patterns we can consider the masked and
- // unmasked versions to be equivalent if the result of this function is true for
- // all 16 distinct bit patterns of for the current extension type of Input (w0).
- //
- // sub w8, w0, w1
- // and w10, w8, #0x0f
- // cmp w8, w2
- // cset w9, AArch64CC
- // cmp w10, w2
- // cset w11, AArch64CC
- // cmp w9, w11
- // cset w0, eq
- // ret
- //
- // Since the above function shows when the outputs are equivalent it defines
- // when it is safe to remove the AND. Unfortunately it only runs on AArch64 and
- // would be expensive to run during compiles. The equations below were written
- // in a test harness that confirmed they gave equivalent outputs to the above
- // for all inputs function, so they can be used determine if the removal is
- // legal instead.
- //
- // isEquivalentMaskless() is the code for testing if the AND can be removed
- // factored out of the DAG recognition as the DAG can take several forms.
- static bool isEquivalentMaskless(unsigned CC, unsigned width,
- ISD::LoadExtType ExtType, int AddConstant,
- int CompConstant) {
- // By being careful about our equations and only writing the in term
- // symbolic values and well known constants (0, 1, -1, MaxUInt) we can
- // make them generally applicable to all bit widths.
- int MaxUInt = (1 << width);
- // For the purposes of these comparisons sign extending the type is
- // equivalent to zero extending the add and displacing it by half the integer
- // width. Provided we are careful and make sure our equations are valid over
- // the whole range we can just adjust the input and avoid writing equations
- // for sign extended inputs.
- if (ExtType == ISD::SEXTLOAD)
- AddConstant -= (1 << (width-1));
- switch(CC) {
- case AArch64CC::LE:
- case AArch64CC::GT:
- if ((AddConstant == 0) ||
- (CompConstant == MaxUInt - 1 && AddConstant < 0) ||
- (AddConstant >= 0 && CompConstant < 0) ||
- (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant))
- return true;
- break;
- case AArch64CC::LT:
- case AArch64CC::GE:
- if ((AddConstant == 0) ||
- (AddConstant >= 0 && CompConstant <= 0) ||
- (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant))
- return true;
- break;
- case AArch64CC::HI:
- case AArch64CC::LS:
- if ((AddConstant >= 0 && CompConstant < 0) ||
- (AddConstant <= 0 && CompConstant >= -1 &&
- CompConstant < AddConstant + MaxUInt))
- return true;
- break;
- case AArch64CC::PL:
- case AArch64CC::MI:
- if ((AddConstant == 0) ||
- (AddConstant > 0 && CompConstant <= 0) ||
- (AddConstant < 0 && CompConstant <= AddConstant))
- return true;
- break;
- case AArch64CC::LO:
- case AArch64CC::HS:
- if ((AddConstant >= 0 && CompConstant <= 0) ||
- (AddConstant <= 0 && CompConstant >= 0 &&
- CompConstant <= AddConstant + MaxUInt))
- return true;
- break;
- case AArch64CC::EQ:
- case AArch64CC::NE:
- if ((AddConstant > 0 && CompConstant < 0) ||
- (AddConstant < 0 && CompConstant >= 0 &&
- CompConstant < AddConstant + MaxUInt) ||
- (AddConstant >= 0 && CompConstant >= 0 &&
- CompConstant >= AddConstant) ||
- (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant))
- return true;
- break;
- case AArch64CC::VS:
- case AArch64CC::VC:
- case AArch64CC::AL:
- case AArch64CC::NV:
- return true;
- case AArch64CC::Invalid:
- break;
- }
- return false;
- }
- // (X & C) >u Mask --> (X & (C & (~Mask)) != 0
- // (X & C) <u Pow2 --> (X & (C & ~(Pow2-1)) == 0
- static SDValue performSubsToAndsCombine(SDNode *N, SDNode *SubsNode,
- SDNode *AndNode, SelectionDAG &DAG,
- unsigned CCIndex, unsigned CmpIndex,
- unsigned CC) {
- ConstantSDNode *SubsC = dyn_cast<ConstantSDNode>(SubsNode->getOperand(1));
- if (!SubsC)
- return SDValue();
- APInt SubsAP = SubsC->getAPIntValue();
- if (CC == AArch64CC::HI) {
- if (!SubsAP.isMask())
- return SDValue();
- } else if (CC == AArch64CC::LO) {
- if (!SubsAP.isPowerOf2())
- return SDValue();
- } else
- return SDValue();
- ConstantSDNode *AndC = dyn_cast<ConstantSDNode>(AndNode->getOperand(1));
- if (!AndC)
- return SDValue();
- APInt MaskAP = CC == AArch64CC::HI ? SubsAP : (SubsAP - 1);
- SDLoc DL(N);
- APInt AndSMask = (~MaskAP) & AndC->getAPIntValue();
- SDValue ANDS = DAG.getNode(
- AArch64ISD::ANDS, DL, SubsNode->getVTList(), AndNode->getOperand(0),
- DAG.getConstant(AndSMask, DL, SubsC->getValueType(0)));
- SDValue AArch64_CC =
- DAG.getConstant(CC == AArch64CC::HI ? AArch64CC::NE : AArch64CC::EQ, DL,
- N->getOperand(CCIndex)->getValueType(0));
- // For now, only performCSELCombine and performBRCONDCombine call this
- // function. And both of them pass 2 for CCIndex, 3 for CmpIndex with 4
- // operands. So just init the ops direct to simplify the code. If we have some
- // other case with different CCIndex, CmpIndex, we need to use for loop to
- // rewrite the code here.
- // TODO: Do we need to assert number of operand is 4 here?
- assert((CCIndex == 2 && CmpIndex == 3) &&
- "Expected CCIndex to be 2 and CmpIndex to be 3.");
- SDValue Ops[] = {N->getOperand(0), N->getOperand(1), AArch64_CC,
- ANDS.getValue(1)};
- return DAG.getNode(N->getOpcode(), N, N->getVTList(), Ops);
- }
- static
- SDValue performCONDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG, unsigned CCIndex,
- unsigned CmpIndex) {
- unsigned CC = cast<ConstantSDNode>(N->getOperand(CCIndex))->getSExtValue();
- SDNode *SubsNode = N->getOperand(CmpIndex).getNode();
- unsigned CondOpcode = SubsNode->getOpcode();
- if (CondOpcode != AArch64ISD::SUBS || SubsNode->hasAnyUseOfValue(0))
- return SDValue();
- // There is a SUBS feeding this condition. Is it fed by a mask we can
- // use?
- SDNode *AndNode = SubsNode->getOperand(0).getNode();
- unsigned MaskBits = 0;
- if (AndNode->getOpcode() != ISD::AND)
- return SDValue();
- if (SDValue Val = performSubsToAndsCombine(N, SubsNode, AndNode, DAG, CCIndex,
- CmpIndex, CC))
- return Val;
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(AndNode->getOperand(1))) {
- uint32_t CNV = CN->getZExtValue();
- if (CNV == 255)
- MaskBits = 8;
- else if (CNV == 65535)
- MaskBits = 16;
- }
- if (!MaskBits)
- return SDValue();
- SDValue AddValue = AndNode->getOperand(0);
- if (AddValue.getOpcode() != ISD::ADD)
- return SDValue();
- // The basic dag structure is correct, grab the inputs and validate them.
- SDValue AddInputValue1 = AddValue.getNode()->getOperand(0);
- SDValue AddInputValue2 = AddValue.getNode()->getOperand(1);
- SDValue SubsInputValue = SubsNode->getOperand(1);
- // The mask is present and the provenance of all the values is a smaller type,
- // lets see if the mask is superfluous.
- if (!isa<ConstantSDNode>(AddInputValue2.getNode()) ||
- !isa<ConstantSDNode>(SubsInputValue.getNode()))
- return SDValue();
- ISD::LoadExtType ExtType;
- if (!checkValueWidth(SubsInputValue, MaskBits, ExtType) ||
- !checkValueWidth(AddInputValue2, MaskBits, ExtType) ||
- !checkValueWidth(AddInputValue1, MaskBits, ExtType) )
- return SDValue();
- if(!isEquivalentMaskless(CC, MaskBits, ExtType,
- cast<ConstantSDNode>(AddInputValue2.getNode())->getSExtValue(),
- cast<ConstantSDNode>(SubsInputValue.getNode())->getSExtValue()))
- return SDValue();
- // The AND is not necessary, remove it.
- SDVTList VTs = DAG.getVTList(SubsNode->getValueType(0),
- SubsNode->getValueType(1));
- SDValue Ops[] = { AddValue, SubsNode->getOperand(1) };
- SDValue NewValue = DAG.getNode(CondOpcode, SDLoc(SubsNode), VTs, Ops);
- DAG.ReplaceAllUsesWith(SubsNode, NewValue.getNode());
- return SDValue(N, 0);
- }
- // Optimize compare with zero and branch.
- static SDValue performBRCONDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- MachineFunction &MF = DAG.getMachineFunction();
- // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
- // will not be produced, as they are conditional branch instructions that do
- // not set flags.
- if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
- return SDValue();
- if (SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3))
- N = NV.getNode();
- SDValue Chain = N->getOperand(0);
- SDValue Dest = N->getOperand(1);
- SDValue CCVal = N->getOperand(2);
- SDValue Cmp = N->getOperand(3);
- assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!");
- unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue();
- if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
- return SDValue();
- unsigned CmpOpc = Cmp.getOpcode();
- if (CmpOpc != AArch64ISD::ADDS && CmpOpc != AArch64ISD::SUBS)
- return SDValue();
- // Only attempt folding if there is only one use of the flag and no use of the
- // value.
- if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1))
- return SDValue();
- SDValue LHS = Cmp.getOperand(0);
- SDValue RHS = Cmp.getOperand(1);
- assert(LHS.getValueType() == RHS.getValueType() &&
- "Expected the value type to be the same for both operands!");
- if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64)
- return SDValue();
- if (isNullConstant(LHS))
- std::swap(LHS, RHS);
- if (!isNullConstant(RHS))
- return SDValue();
- if (LHS.getOpcode() == ISD::SHL || LHS.getOpcode() == ISD::SRA ||
- LHS.getOpcode() == ISD::SRL)
- return SDValue();
- // Fold the compare into the branch instruction.
- SDValue BR;
- if (CC == AArch64CC::EQ)
- BR = DAG.getNode(AArch64ISD::CBZ, SDLoc(N), MVT::Other, Chain, LHS, Dest);
- else
- BR = DAG.getNode(AArch64ISD::CBNZ, SDLoc(N), MVT::Other, Chain, LHS, Dest);
- // Do not add new nodes to DAG combiner worklist.
- DCI.CombineTo(N, BR, false);
- return SDValue();
- }
- static SDValue foldCSELofCTTZ(SDNode *N, SelectionDAG &DAG) {
- unsigned CC = N->getConstantOperandVal(2);
- SDValue SUBS = N->getOperand(3);
- SDValue Zero, CTTZ;
- if (CC == AArch64CC::EQ && SUBS.getOpcode() == AArch64ISD::SUBS) {
- Zero = N->getOperand(0);
- CTTZ = N->getOperand(1);
- } else if (CC == AArch64CC::NE && SUBS.getOpcode() == AArch64ISD::SUBS) {
- Zero = N->getOperand(1);
- CTTZ = N->getOperand(0);
- } else
- return SDValue();
- if ((CTTZ.getOpcode() != ISD::CTTZ && CTTZ.getOpcode() != ISD::TRUNCATE) ||
- (CTTZ.getOpcode() == ISD::TRUNCATE &&
- CTTZ.getOperand(0).getOpcode() != ISD::CTTZ))
- return SDValue();
- assert((CTTZ.getValueType() == MVT::i32 || CTTZ.getValueType() == MVT::i64) &&
- "Illegal type in CTTZ folding");
- if (!isNullConstant(Zero) || !isNullConstant(SUBS.getOperand(1)))
- return SDValue();
- SDValue X = CTTZ.getOpcode() == ISD::TRUNCATE
- ? CTTZ.getOperand(0).getOperand(0)
- : CTTZ.getOperand(0);
- if (X != SUBS.getOperand(0))
- return SDValue();
- unsigned BitWidth = CTTZ.getOpcode() == ISD::TRUNCATE
- ? CTTZ.getOperand(0).getValueSizeInBits()
- : CTTZ.getValueSizeInBits();
- SDValue BitWidthMinusOne =
- DAG.getConstant(BitWidth - 1, SDLoc(N), CTTZ.getValueType());
- return DAG.getNode(ISD::AND, SDLoc(N), CTTZ.getValueType(), CTTZ,
- BitWidthMinusOne);
- }
- // (CSEL l r EQ (CMP (CSEL x y cc2 cond) x)) => (CSEL l r cc2 cond)
- // (CSEL l r EQ (CMP (CSEL x y cc2 cond) y)) => (CSEL l r !cc2 cond)
- // Where x and y are constants and x != y
- // (CSEL l r NE (CMP (CSEL x y cc2 cond) x)) => (CSEL l r !cc2 cond)
- // (CSEL l r NE (CMP (CSEL x y cc2 cond) y)) => (CSEL l r cc2 cond)
- // Where x and y are constants and x != y
- static SDValue foldCSELOfCSEL(SDNode *Op, SelectionDAG &DAG) {
- SDValue L = Op->getOperand(0);
- SDValue R = Op->getOperand(1);
- AArch64CC::CondCode OpCC =
- static_cast<AArch64CC::CondCode>(Op->getConstantOperandVal(2));
- SDValue OpCmp = Op->getOperand(3);
- if (!isCMP(OpCmp))
- return SDValue();
- SDValue CmpLHS = OpCmp.getOperand(0);
- SDValue CmpRHS = OpCmp.getOperand(1);
- if (CmpRHS.getOpcode() == AArch64ISD::CSEL)
- std::swap(CmpLHS, CmpRHS);
- else if (CmpLHS.getOpcode() != AArch64ISD::CSEL)
- return SDValue();
- SDValue X = CmpLHS->getOperand(0);
- SDValue Y = CmpLHS->getOperand(1);
- if (!isa<ConstantSDNode>(X) || !isa<ConstantSDNode>(Y) || X == Y) {
- return SDValue();
- }
- // If one of the constant is opaque constant, x,y sdnode is still different
- // but the real value maybe the same. So check APInt here to make sure the
- // code is correct.
- ConstantSDNode *CX = cast<ConstantSDNode>(X);
- ConstantSDNode *CY = cast<ConstantSDNode>(Y);
- if (CX->getAPIntValue() == CY->getAPIntValue())
- return SDValue();
- AArch64CC::CondCode CC =
- static_cast<AArch64CC::CondCode>(CmpLHS->getConstantOperandVal(2));
- SDValue Cond = CmpLHS->getOperand(3);
- if (CmpRHS == Y)
- CC = AArch64CC::getInvertedCondCode(CC);
- else if (CmpRHS != X)
- return SDValue();
- if (OpCC == AArch64CC::NE)
- CC = AArch64CC::getInvertedCondCode(CC);
- else if (OpCC != AArch64CC::EQ)
- return SDValue();
- SDLoc DL(Op);
- EVT VT = Op->getValueType(0);
- SDValue CCValue = DAG.getConstant(CC, DL, MVT::i32);
- return DAG.getNode(AArch64ISD::CSEL, DL, VT, L, R, CCValue, Cond);
- }
- // Optimize CSEL instructions
- static SDValue performCSELCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- // CSEL x, x, cc -> x
- if (N->getOperand(0) == N->getOperand(1))
- return N->getOperand(0);
- if (SDValue R = foldCSELOfCSEL(N, DAG))
- return R;
- // CSEL 0, cttz(X), eq(X, 0) -> AND cttz bitwidth-1
- // CSEL cttz(X), 0, ne(X, 0) -> AND cttz bitwidth-1
- if (SDValue Folded = foldCSELofCTTZ(N, DAG))
- return Folded;
- return performCONDCombine(N, DCI, DAG, 2, 3);
- }
- // Try to re-use an already extended operand of a vector SetCC feeding a
- // extended select. Doing so avoids requiring another full extension of the
- // SET_CC result when lowering the select.
- static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) {
- EVT Op0MVT = Op->getOperand(0).getValueType();
- if (!Op0MVT.isVector() || Op->use_empty())
- return SDValue();
- // Make sure that all uses of Op are VSELECTs with result matching types where
- // the result type has a larger element type than the SetCC operand.
- SDNode *FirstUse = *Op->use_begin();
- if (FirstUse->getOpcode() != ISD::VSELECT)
- return SDValue();
- EVT UseMVT = FirstUse->getValueType(0);
- if (UseMVT.getScalarSizeInBits() <= Op0MVT.getScalarSizeInBits())
- return SDValue();
- if (any_of(Op->uses(), [&UseMVT](const SDNode *N) {
- return N->getOpcode() != ISD::VSELECT || N->getValueType(0) != UseMVT;
- }))
- return SDValue();
- APInt V;
- if (!ISD::isConstantSplatVector(Op->getOperand(1).getNode(), V))
- return SDValue();
- SDLoc DL(Op);
- SDValue Op0ExtV;
- SDValue Op1ExtV;
- ISD::CondCode CC = cast<CondCodeSDNode>(Op->getOperand(2))->get();
- // Check if the first operand of the SET_CC is already extended. If it is,
- // split the SET_CC and re-use the extended version of the operand.
- SDNode *Op0SExt = DAG.getNodeIfExists(ISD::SIGN_EXTEND, DAG.getVTList(UseMVT),
- Op->getOperand(0));
- SDNode *Op0ZExt = DAG.getNodeIfExists(ISD::ZERO_EXTEND, DAG.getVTList(UseMVT),
- Op->getOperand(0));
- if (Op0SExt && (isSignedIntSetCC(CC) || isIntEqualitySetCC(CC))) {
- Op0ExtV = SDValue(Op0SExt, 0);
- Op1ExtV = DAG.getNode(ISD::SIGN_EXTEND, DL, UseMVT, Op->getOperand(1));
- } else if (Op0ZExt && (isUnsignedIntSetCC(CC) || isIntEqualitySetCC(CC))) {
- Op0ExtV = SDValue(Op0ZExt, 0);
- Op1ExtV = DAG.getNode(ISD::ZERO_EXTEND, DL, UseMVT, Op->getOperand(1));
- } else
- return SDValue();
- return DAG.getNode(ISD::SETCC, DL, UseMVT.changeVectorElementType(MVT::i1),
- Op0ExtV, Op1ExtV, Op->getOperand(2));
- }
- static SDValue performSETCCCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- assert(N->getOpcode() == ISD::SETCC && "Unexpected opcode!");
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
- ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- if (SDValue V = tryToWidenSetCCOperands(N, DAG))
- return V;
- // setcc (csel 0, 1, cond, X), 1, ne ==> csel 0, 1, !cond, X
- if (Cond == ISD::SETNE && isOneConstant(RHS) &&
- LHS->getOpcode() == AArch64ISD::CSEL &&
- isNullConstant(LHS->getOperand(0)) && isOneConstant(LHS->getOperand(1)) &&
- LHS->hasOneUse()) {
- // Invert CSEL's condition.
- auto *OpCC = cast<ConstantSDNode>(LHS.getOperand(2));
- auto OldCond = static_cast<AArch64CC::CondCode>(OpCC->getZExtValue());
- auto NewCond = getInvertedCondCode(OldCond);
- // csel 0, 1, !cond, X
- SDValue CSEL =
- DAG.getNode(AArch64ISD::CSEL, DL, LHS.getValueType(), LHS.getOperand(0),
- LHS.getOperand(1), DAG.getConstant(NewCond, DL, MVT::i32),
- LHS.getOperand(3));
- return DAG.getZExtOrTrunc(CSEL, DL, VT);
- }
- // setcc (srl x, imm), 0, ne ==> setcc (and x, (-1 << imm)), 0, ne
- if (Cond == ISD::SETNE && isNullConstant(RHS) &&
- LHS->getOpcode() == ISD::SRL && isa<ConstantSDNode>(LHS->getOperand(1)) &&
- LHS->hasOneUse()) {
- EVT TstVT = LHS->getValueType(0);
- if (TstVT.isScalarInteger() && TstVT.getFixedSizeInBits() <= 64) {
- // this pattern will get better opt in emitComparison
- uint64_t TstImm = -1ULL << LHS->getConstantOperandVal(1);
- SDValue TST = DAG.getNode(ISD::AND, DL, TstVT, LHS->getOperand(0),
- DAG.getConstant(TstImm, DL, TstVT));
- return DAG.getNode(ISD::SETCC, DL, VT, TST, RHS, N->getOperand(2));
- }
- }
- // setcc (iN (bitcast (vNi1 X))), 0, (eq|ne)
- // ==> setcc (iN (zext (i1 (vecreduce_or (vNi1 X))))), 0, (eq|ne)
- if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
- (Cond == ISD::SETEQ || Cond == ISD::SETNE) && isNullConstant(RHS) &&
- LHS->getOpcode() == ISD::BITCAST) {
- EVT ToVT = LHS->getValueType(0);
- EVT FromVT = LHS->getOperand(0).getValueType();
- if (FromVT.isFixedLengthVector() &&
- FromVT.getVectorElementType() == MVT::i1) {
- LHS = DAG.getNode(ISD::VECREDUCE_OR, DL, MVT::i1, LHS->getOperand(0));
- LHS = DAG.getNode(ISD::ZERO_EXTEND, DL, ToVT, LHS);
- return DAG.getSetCC(DL, VT, LHS, RHS, Cond);
- }
- }
- // Try to perform the memcmp when the result is tested for [in]equality with 0
- if (SDValue V = performOrXorChainCombine(N, DAG))
- return V;
- return SDValue();
- }
- // Replace a flag-setting operator (eg ANDS) with the generic version
- // (eg AND) if the flag is unused.
- static SDValue performFlagSettingCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- unsigned GenericOpcode) {
- SDLoc DL(N);
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
- EVT VT = N->getValueType(0);
- // If the flag result isn't used, convert back to a generic opcode.
- if (!N->hasAnyUseOfValue(1)) {
- SDValue Res = DCI.DAG.getNode(GenericOpcode, DL, VT, N->ops());
- return DCI.DAG.getMergeValues({Res, DCI.DAG.getConstant(0, DL, MVT::i32)},
- DL);
- }
- // Combine identical generic nodes into this node, re-using the result.
- if (SDNode *Generic = DCI.DAG.getNodeIfExists(
- GenericOpcode, DCI.DAG.getVTList(VT), {LHS, RHS}))
- DCI.CombineTo(Generic, SDValue(N, 0));
- return SDValue();
- }
- static SDValue performSetCCPunpkCombine(SDNode *N, SelectionDAG &DAG) {
- // setcc_merge_zero pred
- // (sign_extend (extract_subvector (setcc_merge_zero ... pred ...))), 0, ne
- // => extract_subvector (inner setcc_merge_zero)
- SDValue Pred = N->getOperand(0);
- SDValue LHS = N->getOperand(1);
- SDValue RHS = N->getOperand(2);
- ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get();
- if (Cond != ISD::SETNE || !isZerosVector(RHS.getNode()) ||
- LHS->getOpcode() != ISD::SIGN_EXTEND)
- return SDValue();
- SDValue Extract = LHS->getOperand(0);
- if (Extract->getOpcode() != ISD::EXTRACT_SUBVECTOR ||
- Extract->getValueType(0) != N->getValueType(0) ||
- Extract->getConstantOperandVal(1) != 0)
- return SDValue();
- SDValue InnerSetCC = Extract->getOperand(0);
- if (InnerSetCC->getOpcode() != AArch64ISD::SETCC_MERGE_ZERO)
- return SDValue();
- // By this point we've effectively got
- // zero_inactive_lanes_and_trunc_i1(sext_i1(A)). If we can prove A's inactive
- // lanes are already zero then the trunc(sext()) sequence is redundant and we
- // can operate on A directly.
- SDValue InnerPred = InnerSetCC.getOperand(0);
- if (Pred.getOpcode() == AArch64ISD::PTRUE &&
- InnerPred.getOpcode() == AArch64ISD::PTRUE &&
- Pred.getConstantOperandVal(0) == InnerPred.getConstantOperandVal(0) &&
- Pred->getConstantOperandVal(0) >= AArch64SVEPredPattern::vl1 &&
- Pred->getConstantOperandVal(0) <= AArch64SVEPredPattern::vl256)
- return Extract;
- return SDValue();
- }
- static SDValue
- performSetccMergeZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
- assert(N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&
- "Unexpected opcode!");
- SelectionDAG &DAG = DCI.DAG;
- SDValue Pred = N->getOperand(0);
- SDValue LHS = N->getOperand(1);
- SDValue RHS = N->getOperand(2);
- ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get();
- if (SDValue V = performSetCCPunpkCombine(N, DAG))
- return V;
- if (Cond == ISD::SETNE && isZerosVector(RHS.getNode()) &&
- LHS->getOpcode() == ISD::SIGN_EXTEND &&
- LHS->getOperand(0)->getValueType(0) == N->getValueType(0)) {
- // setcc_merge_zero(
- // pred, extend(setcc_merge_zero(pred, ...)), != splat(0))
- // => setcc_merge_zero(pred, ...)
- if (LHS->getOperand(0)->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&
- LHS->getOperand(0)->getOperand(0) == Pred)
- return LHS->getOperand(0);
- // setcc_merge_zero(
- // all_active, extend(nxvNi1 ...), != splat(0))
- // -> nxvNi1 ...
- if (isAllActivePredicate(DAG, Pred))
- return LHS->getOperand(0);
- // setcc_merge_zero(
- // pred, extend(nxvNi1 ...), != splat(0))
- // -> nxvNi1 and(pred, ...)
- if (DCI.isAfterLegalizeDAG())
- // Do this after legalization to allow more folds on setcc_merge_zero
- // to be recognized.
- return DAG.getNode(ISD::AND, SDLoc(N), N->getValueType(0),
- LHS->getOperand(0), Pred);
- }
- return SDValue();
- }
- // Optimize some simple tbz/tbnz cases. Returns the new operand and bit to test
- // as well as whether the test should be inverted. This code is required to
- // catch these cases (as opposed to standard dag combines) because
- // AArch64ISD::TBZ is matched during legalization.
- static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert,
- SelectionDAG &DAG) {
- if (!Op->hasOneUse())
- return Op;
- // We don't handle undef/constant-fold cases below, as they should have
- // already been taken care of (e.g. and of 0, test of undefined shifted bits,
- // etc.)
- // (tbz (trunc x), b) -> (tbz x, b)
- // This case is just here to enable more of the below cases to be caught.
- if (Op->getOpcode() == ISD::TRUNCATE &&
- Bit < Op->getValueType(0).getSizeInBits()) {
- return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
- }
- // (tbz (any_ext x), b) -> (tbz x, b) if we don't use the extended bits.
- if (Op->getOpcode() == ISD::ANY_EXTEND &&
- Bit < Op->getOperand(0).getValueSizeInBits()) {
- return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
- }
- if (Op->getNumOperands() != 2)
- return Op;
- auto *C = dyn_cast<ConstantSDNode>(Op->getOperand(1));
- if (!C)
- return Op;
- switch (Op->getOpcode()) {
- default:
- return Op;
- // (tbz (and x, m), b) -> (tbz x, b)
- case ISD::AND:
- if ((C->getZExtValue() >> Bit) & 1)
- return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
- return Op;
- // (tbz (shl x, c), b) -> (tbz x, b-c)
- case ISD::SHL:
- if (C->getZExtValue() <= Bit &&
- (Bit - C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
- Bit = Bit - C->getZExtValue();
- return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
- }
- return Op;
- // (tbz (sra x, c), b) -> (tbz x, b+c) or (tbz x, msb) if b+c is > # bits in x
- case ISD::SRA:
- Bit = Bit + C->getZExtValue();
- if (Bit >= Op->getValueType(0).getSizeInBits())
- Bit = Op->getValueType(0).getSizeInBits() - 1;
- return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
- // (tbz (srl x, c), b) -> (tbz x, b+c)
- case ISD::SRL:
- if ((Bit + C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
- Bit = Bit + C->getZExtValue();
- return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
- }
- return Op;
- // (tbz (xor x, -1), b) -> (tbnz x, b)
- case ISD::XOR:
- if ((C->getZExtValue() >> Bit) & 1)
- Invert = !Invert;
- return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
- }
- }
- // Optimize test single bit zero/non-zero and branch.
- static SDValue performTBZCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- unsigned Bit = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
- bool Invert = false;
- SDValue TestSrc = N->getOperand(1);
- SDValue NewTestSrc = getTestBitOperand(TestSrc, Bit, Invert, DAG);
- if (TestSrc == NewTestSrc)
- return SDValue();
- unsigned NewOpc = N->getOpcode();
- if (Invert) {
- if (NewOpc == AArch64ISD::TBZ)
- NewOpc = AArch64ISD::TBNZ;
- else {
- assert(NewOpc == AArch64ISD::TBNZ);
- NewOpc = AArch64ISD::TBZ;
- }
- }
- SDLoc DL(N);
- return DAG.getNode(NewOpc, DL, MVT::Other, N->getOperand(0), NewTestSrc,
- DAG.getConstant(Bit, DL, MVT::i64), N->getOperand(3));
- }
- // Swap vselect operands where it may allow a predicated operation to achieve
- // the `sel`.
- //
- // (vselect (setcc ( condcode) (_) (_)) (a) (op (a) (b)))
- // => (vselect (setcc (!condcode) (_) (_)) (op (a) (b)) (a))
- static SDValue trySwapVSelectOperands(SDNode *N, SelectionDAG &DAG) {
- auto SelectA = N->getOperand(1);
- auto SelectB = N->getOperand(2);
- auto NTy = N->getValueType(0);
- if (!NTy.isScalableVector())
- return SDValue();
- SDValue SetCC = N->getOperand(0);
- if (SetCC.getOpcode() != ISD::SETCC || !SetCC.hasOneUse())
- return SDValue();
- switch (SelectB.getOpcode()) {
- default:
- return SDValue();
- case ISD::FMUL:
- case ISD::FSUB:
- case ISD::FADD:
- break;
- }
- if (SelectA != SelectB.getOperand(0))
- return SDValue();
- ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
- ISD::CondCode InverseCC =
- ISD::getSetCCInverse(CC, SetCC.getOperand(0).getValueType());
- auto InverseSetCC =
- DAG.getSetCC(SDLoc(SetCC), SetCC.getValueType(), SetCC.getOperand(0),
- SetCC.getOperand(1), InverseCC);
- return DAG.getNode(ISD::VSELECT, SDLoc(N), NTy,
- {InverseSetCC, SelectB, SelectA});
- }
- // vselect (v1i1 setcc) ->
- // vselect (v1iXX setcc) (XX is the size of the compared operand type)
- // FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as
- // condition. If it can legalize "VSELECT v1i1" correctly, no need to combine
- // such VSELECT.
- static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) {
- if (auto SwapResult = trySwapVSelectOperands(N, DAG))
- return SwapResult;
- SDValue N0 = N->getOperand(0);
- EVT CCVT = N0.getValueType();
- if (isAllActivePredicate(DAG, N0))
- return N->getOperand(1);
- if (isAllInactivePredicate(N0))
- return N->getOperand(2);
- // Check for sign pattern (VSELECT setgt, iN lhs, -1, 1, -1) and transform
- // into (OR (ASR lhs, N-1), 1), which requires less instructions for the
- // supported types.
- SDValue SetCC = N->getOperand(0);
- if (SetCC.getOpcode() == ISD::SETCC &&
- SetCC.getOperand(2) == DAG.getCondCode(ISD::SETGT)) {
- SDValue CmpLHS = SetCC.getOperand(0);
- EVT VT = CmpLHS.getValueType();
- SDNode *CmpRHS = SetCC.getOperand(1).getNode();
- SDNode *SplatLHS = N->getOperand(1).getNode();
- SDNode *SplatRHS = N->getOperand(2).getNode();
- APInt SplatLHSVal;
- if (CmpLHS.getValueType() == N->getOperand(1).getValueType() &&
- VT.isSimple() &&
- is_contained(ArrayRef({MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
- MVT::v2i32, MVT::v4i32, MVT::v2i64}),
- VT.getSimpleVT().SimpleTy) &&
- ISD::isConstantSplatVector(SplatLHS, SplatLHSVal) &&
- SplatLHSVal.isOne() && ISD::isConstantSplatVectorAllOnes(CmpRHS) &&
- ISD::isConstantSplatVectorAllOnes(SplatRHS)) {
- unsigned NumElts = VT.getVectorNumElements();
- SmallVector<SDValue, 8> Ops(
- NumElts, DAG.getConstant(VT.getScalarSizeInBits() - 1, SDLoc(N),
- VT.getScalarType()));
- SDValue Val = DAG.getBuildVector(VT, SDLoc(N), Ops);
- auto Shift = DAG.getNode(ISD::SRA, SDLoc(N), VT, CmpLHS, Val);
- auto Or = DAG.getNode(ISD::OR, SDLoc(N), VT, Shift, N->getOperand(1));
- return Or;
- }
- }
- if (N0.getOpcode() != ISD::SETCC ||
- CCVT.getVectorElementCount() != ElementCount::getFixed(1) ||
- CCVT.getVectorElementType() != MVT::i1)
- return SDValue();
- EVT ResVT = N->getValueType(0);
- EVT CmpVT = N0.getOperand(0).getValueType();
- // Only combine when the result type is of the same size as the compared
- // operands.
- if (ResVT.getSizeInBits() != CmpVT.getSizeInBits())
- return SDValue();
- SDValue IfTrue = N->getOperand(1);
- SDValue IfFalse = N->getOperand(2);
- SetCC = DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(),
- N0.getOperand(0), N0.getOperand(1),
- cast<CondCodeSDNode>(N0.getOperand(2))->get());
- return DAG.getNode(ISD::VSELECT, SDLoc(N), ResVT, SetCC,
- IfTrue, IfFalse);
- }
- /// A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with
- /// the compare-mask instructions rather than going via NZCV, even if LHS and
- /// RHS are really scalar. This replaces any scalar setcc in the above pattern
- /// with a vector one followed by a DUP shuffle on the result.
- static SDValue performSelectCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- SelectionDAG &DAG = DCI.DAG;
- SDValue N0 = N->getOperand(0);
- EVT ResVT = N->getValueType(0);
- if (N0.getOpcode() != ISD::SETCC)
- return SDValue();
- if (ResVT.isScalableVector())
- return SDValue();
- // Make sure the SETCC result is either i1 (initial DAG), or i32, the lowered
- // scalar SetCCResultType. We also don't expect vectors, because we assume
- // that selects fed by vector SETCCs are canonicalized to VSELECT.
- assert((N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) &&
- "Scalar-SETCC feeding SELECT has unexpected result type!");
- // If NumMaskElts == 0, the comparison is larger than select result. The
- // largest real NEON comparison is 64-bits per lane, which means the result is
- // at most 32-bits and an illegal vector. Just bail out for now.
- EVT SrcVT = N0.getOperand(0).getValueType();
- // Don't try to do this optimization when the setcc itself has i1 operands.
- // There are no legal vectors of i1, so this would be pointless.
- if (SrcVT == MVT::i1)
- return SDValue();
- int NumMaskElts = ResVT.getSizeInBits() / SrcVT.getSizeInBits();
- if (!ResVT.isVector() || NumMaskElts == 0)
- return SDValue();
- SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumMaskElts);
- EVT CCVT = SrcVT.changeVectorElementTypeToInteger();
- // Also bail out if the vector CCVT isn't the same size as ResVT.
- // This can happen if the SETCC operand size doesn't divide the ResVT size
- // (e.g., f64 vs v3f32).
- if (CCVT.getSizeInBits() != ResVT.getSizeInBits())
- return SDValue();
- // Make sure we didn't create illegal types, if we're not supposed to.
- assert(DCI.isBeforeLegalize() ||
- DAG.getTargetLoweringInfo().isTypeLegal(SrcVT));
- // First perform a vector comparison, where lane 0 is the one we're interested
- // in.
- SDLoc DL(N0);
- SDValue LHS =
- DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0));
- SDValue RHS =
- DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(1));
- SDValue SetCC = DAG.getNode(ISD::SETCC, DL, CCVT, LHS, RHS, N0.getOperand(2));
- // Now duplicate the comparison mask we want across all other lanes.
- SmallVector<int, 8> DUPMask(CCVT.getVectorNumElements(), 0);
- SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask);
- Mask = DAG.getNode(ISD::BITCAST, DL,
- ResVT.changeVectorElementTypeToInteger(), Mask);
- return DAG.getSelect(DL, ResVT, Mask, N->getOperand(1), N->getOperand(2));
- }
- static SDValue performDUPCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- EVT VT = N->getValueType(0);
- // If "v2i32 DUP(x)" and "v4i32 DUP(x)" both exist, use an extract from the
- // 128bit vector version.
- if (VT.is64BitVector() && DCI.isAfterLegalizeDAG()) {
- EVT LVT = VT.getDoubleNumVectorElementsVT(*DCI.DAG.getContext());
- if (SDNode *LN = DCI.DAG.getNodeIfExists(
- N->getOpcode(), DCI.DAG.getVTList(LVT), {N->getOperand(0)})) {
- SDLoc DL(N);
- return DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SDValue(LN, 0),
- DCI.DAG.getConstant(0, DL, MVT::i64));
- }
- }
- return performPostLD1Combine(N, DCI, false);
- }
- /// Get rid of unnecessary NVCASTs (that don't change the type).
- static SDValue performNVCASTCombine(SDNode *N) {
- if (N->getValueType(0) == N->getOperand(0).getValueType())
- return N->getOperand(0);
- return SDValue();
- }
- // If all users of the globaladdr are of the form (globaladdr + constant), find
- // the smallest constant, fold it into the globaladdr's offset and rewrite the
- // globaladdr as (globaladdr + constant) - constant.
- static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget,
- const TargetMachine &TM) {
- auto *GN = cast<GlobalAddressSDNode>(N);
- if (Subtarget->ClassifyGlobalReference(GN->getGlobal(), TM) !=
- AArch64II::MO_NO_FLAG)
- return SDValue();
- uint64_t MinOffset = -1ull;
- for (SDNode *N : GN->uses()) {
- if (N->getOpcode() != ISD::ADD)
- return SDValue();
- auto *C = dyn_cast<ConstantSDNode>(N->getOperand(0));
- if (!C)
- C = dyn_cast<ConstantSDNode>(N->getOperand(1));
- if (!C)
- return SDValue();
- MinOffset = std::min(MinOffset, C->getZExtValue());
- }
- uint64_t Offset = MinOffset + GN->getOffset();
- // Require that the new offset is larger than the existing one. Otherwise, we
- // can end up oscillating between two possible DAGs, for example,
- // (add (add globaladdr + 10, -1), 1) and (add globaladdr + 9, 1).
- if (Offset <= uint64_t(GN->getOffset()))
- return SDValue();
- // Check whether folding this offset is legal. It must not go out of bounds of
- // the referenced object to avoid violating the code model, and must be
- // smaller than 2^20 because this is the largest offset expressible in all
- // object formats. (The IMAGE_REL_ARM64_PAGEBASE_REL21 relocation in COFF
- // stores an immediate signed 21 bit offset.)
- //
- // This check also prevents us from folding negative offsets, which will end
- // up being treated in the same way as large positive ones. They could also
- // cause code model violations, and aren't really common enough to matter.
- if (Offset >= (1 << 20))
- return SDValue();
- const GlobalValue *GV = GN->getGlobal();
- Type *T = GV->getValueType();
- if (!T->isSized() ||
- Offset > GV->getParent()->getDataLayout().getTypeAllocSize(T))
- return SDValue();
- SDLoc DL(GN);
- SDValue Result = DAG.getGlobalAddress(GV, DL, MVT::i64, Offset);
- return DAG.getNode(ISD::SUB, DL, MVT::i64, Result,
- DAG.getConstant(MinOffset, DL, MVT::i64));
- }
- static SDValue performCTLZCombine(SDNode *N, SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- SDValue BR = N->getOperand(0);
- if (!Subtarget->hasCSSC() || BR.getOpcode() != ISD::BITREVERSE ||
- !BR.getValueType().isScalarInteger())
- return SDValue();
- SDLoc DL(N);
- return DAG.getNode(ISD::CTTZ, DL, BR.getValueType(), BR.getOperand(0));
- }
- // Turns the vector of indices into a vector of byte offstes by scaling Offset
- // by (BitWidth / 8).
- static SDValue getScaledOffsetForBitWidth(SelectionDAG &DAG, SDValue Offset,
- SDLoc DL, unsigned BitWidth) {
- assert(Offset.getValueType().isScalableVector() &&
- "This method is only for scalable vectors of offsets");
- SDValue Shift = DAG.getConstant(Log2_32(BitWidth / 8), DL, MVT::i64);
- SDValue SplatShift = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Shift);
- return DAG.getNode(ISD::SHL, DL, MVT::nxv2i64, Offset, SplatShift);
- }
- /// Check if the value of \p OffsetInBytes can be used as an immediate for
- /// the gather load/prefetch and scatter store instructions with vector base and
- /// immediate offset addressing mode:
- ///
- /// [<Zn>.[S|D]{, #<imm>}]
- ///
- /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
- inline static bool isValidImmForSVEVecImmAddrMode(unsigned OffsetInBytes,
- unsigned ScalarSizeInBytes) {
- // The immediate is not a multiple of the scalar size.
- if (OffsetInBytes % ScalarSizeInBytes)
- return false;
- // The immediate is out of range.
- if (OffsetInBytes / ScalarSizeInBytes > 31)
- return false;
- return true;
- }
- /// Check if the value of \p Offset represents a valid immediate for the SVE
- /// gather load/prefetch and scatter store instructiona with vector base and
- /// immediate offset addressing mode:
- ///
- /// [<Zn>.[S|D]{, #<imm>}]
- ///
- /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
- static bool isValidImmForSVEVecImmAddrMode(SDValue Offset,
- unsigned ScalarSizeInBytes) {
- ConstantSDNode *OffsetConst = dyn_cast<ConstantSDNode>(Offset.getNode());
- return OffsetConst && isValidImmForSVEVecImmAddrMode(
- OffsetConst->getZExtValue(), ScalarSizeInBytes);
- }
- static SDValue performScatterStoreCombine(SDNode *N, SelectionDAG &DAG,
- unsigned Opcode,
- bool OnlyPackedOffsets = true) {
- const SDValue Src = N->getOperand(2);
- const EVT SrcVT = Src->getValueType(0);
- assert(SrcVT.isScalableVector() &&
- "Scatter stores are only possible for SVE vectors");
- SDLoc DL(N);
- MVT SrcElVT = SrcVT.getVectorElementType().getSimpleVT();
- // Make sure that source data will fit into an SVE register
- if (SrcVT.getSizeInBits().getKnownMinValue() > AArch64::SVEBitsPerBlock)
- return SDValue();
- // For FPs, ACLE only supports _packed_ single and double precision types.
- if (SrcElVT.isFloatingPoint())
- if ((SrcVT != MVT::nxv4f32) && (SrcVT != MVT::nxv2f64))
- return SDValue();
- // Depending on the addressing mode, this is either a pointer or a vector of
- // pointers (that fits into one register)
- SDValue Base = N->getOperand(4);
- // Depending on the addressing mode, this is either a single offset or a
- // vector of offsets (that fits into one register)
- SDValue Offset = N->getOperand(5);
- // For "scalar + vector of indices", just scale the indices. This only
- // applies to non-temporal scatters because there's no instruction that takes
- // indicies.
- if (Opcode == AArch64ISD::SSTNT1_INDEX_PRED) {
- Offset =
- getScaledOffsetForBitWidth(DAG, Offset, DL, SrcElVT.getSizeInBits());
- Opcode = AArch64ISD::SSTNT1_PRED;
- }
- // In the case of non-temporal gather loads there's only one SVE instruction
- // per data-size: "scalar + vector", i.e.
- // * stnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0]
- // Since we do have intrinsics that allow the arguments to be in a different
- // order, we may need to swap them to match the spec.
- if (Opcode == AArch64ISD::SSTNT1_PRED && Offset.getValueType().isVector())
- std::swap(Base, Offset);
- // SST1_IMM requires that the offset is an immediate that is:
- // * a multiple of #SizeInBytes,
- // * in the range [0, 31 x #SizeInBytes],
- // where #SizeInBytes is the size in bytes of the stored items. For
- // immediates outside that range and non-immediate scalar offsets use SST1 or
- // SST1_UXTW instead.
- if (Opcode == AArch64ISD::SST1_IMM_PRED) {
- if (!isValidImmForSVEVecImmAddrMode(Offset,
- SrcVT.getScalarSizeInBits() / 8)) {
- if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy)
- Opcode = AArch64ISD::SST1_UXTW_PRED;
- else
- Opcode = AArch64ISD::SST1_PRED;
- std::swap(Base, Offset);
- }
- }
- auto &TLI = DAG.getTargetLoweringInfo();
- if (!TLI.isTypeLegal(Base.getValueType()))
- return SDValue();
- // Some scatter store variants allow unpacked offsets, but only as nxv2i32
- // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to
- // nxv2i64. Legalize accordingly.
- if (!OnlyPackedOffsets &&
- Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32)
- Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0);
- if (!TLI.isTypeLegal(Offset.getValueType()))
- return SDValue();
- // Source value type that is representable in hardware
- EVT HwSrcVt = getSVEContainerType(SrcVT);
- // Keep the original type of the input data to store - this is needed to be
- // able to select the correct instruction, e.g. ST1B, ST1H, ST1W and ST1D. For
- // FP values we want the integer equivalent, so just use HwSrcVt.
- SDValue InputVT = DAG.getValueType(SrcVT);
- if (SrcVT.isFloatingPoint())
- InputVT = DAG.getValueType(HwSrcVt);
- SDVTList VTs = DAG.getVTList(MVT::Other);
- SDValue SrcNew;
- if (Src.getValueType().isFloatingPoint())
- SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Src);
- else
- SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Src);
- SDValue Ops[] = {N->getOperand(0), // Chain
- SrcNew,
- N->getOperand(3), // Pg
- Base,
- Offset,
- InputVT};
- return DAG.getNode(Opcode, DL, VTs, Ops);
- }
- static SDValue performGatherLoadCombine(SDNode *N, SelectionDAG &DAG,
- unsigned Opcode,
- bool OnlyPackedOffsets = true) {
- const EVT RetVT = N->getValueType(0);
- assert(RetVT.isScalableVector() &&
- "Gather loads are only possible for SVE vectors");
- SDLoc DL(N);
- // Make sure that the loaded data will fit into an SVE register
- if (RetVT.getSizeInBits().getKnownMinValue() > AArch64::SVEBitsPerBlock)
- return SDValue();
- // Depending on the addressing mode, this is either a pointer or a vector of
- // pointers (that fits into one register)
- SDValue Base = N->getOperand(3);
- // Depending on the addressing mode, this is either a single offset or a
- // vector of offsets (that fits into one register)
- SDValue Offset = N->getOperand(4);
- // For "scalar + vector of indices", just scale the indices. This only
- // applies to non-temporal gathers because there's no instruction that takes
- // indicies.
- if (Opcode == AArch64ISD::GLDNT1_INDEX_MERGE_ZERO) {
- Offset = getScaledOffsetForBitWidth(DAG, Offset, DL,
- RetVT.getScalarSizeInBits());
- Opcode = AArch64ISD::GLDNT1_MERGE_ZERO;
- }
- // In the case of non-temporal gather loads there's only one SVE instruction
- // per data-size: "scalar + vector", i.e.
- // * ldnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0]
- // Since we do have intrinsics that allow the arguments to be in a different
- // order, we may need to swap them to match the spec.
- if (Opcode == AArch64ISD::GLDNT1_MERGE_ZERO &&
- Offset.getValueType().isVector())
- std::swap(Base, Offset);
- // GLD{FF}1_IMM requires that the offset is an immediate that is:
- // * a multiple of #SizeInBytes,
- // * in the range [0, 31 x #SizeInBytes],
- // where #SizeInBytes is the size in bytes of the loaded items. For
- // immediates outside that range and non-immediate scalar offsets use
- // GLD1_MERGE_ZERO or GLD1_UXTW_MERGE_ZERO instead.
- if (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO ||
- Opcode == AArch64ISD::GLDFF1_IMM_MERGE_ZERO) {
- if (!isValidImmForSVEVecImmAddrMode(Offset,
- RetVT.getScalarSizeInBits() / 8)) {
- if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy)
- Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO)
- ? AArch64ISD::GLD1_UXTW_MERGE_ZERO
- : AArch64ISD::GLDFF1_UXTW_MERGE_ZERO;
- else
- Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO)
- ? AArch64ISD::GLD1_MERGE_ZERO
- : AArch64ISD::GLDFF1_MERGE_ZERO;
- std::swap(Base, Offset);
- }
- }
- auto &TLI = DAG.getTargetLoweringInfo();
- if (!TLI.isTypeLegal(Base.getValueType()))
- return SDValue();
- // Some gather load variants allow unpacked offsets, but only as nxv2i32
- // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to
- // nxv2i64. Legalize accordingly.
- if (!OnlyPackedOffsets &&
- Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32)
- Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0);
- // Return value type that is representable in hardware
- EVT HwRetVt = getSVEContainerType(RetVT);
- // Keep the original output value type around - this is needed to be able to
- // select the correct instruction, e.g. LD1B, LD1H, LD1W and LD1D. For FP
- // values we want the integer equivalent, so just use HwRetVT.
- SDValue OutVT = DAG.getValueType(RetVT);
- if (RetVT.isFloatingPoint())
- OutVT = DAG.getValueType(HwRetVt);
- SDVTList VTs = DAG.getVTList(HwRetVt, MVT::Other);
- SDValue Ops[] = {N->getOperand(0), // Chain
- N->getOperand(2), // Pg
- Base, Offset, OutVT};
- SDValue Load = DAG.getNode(Opcode, DL, VTs, Ops);
- SDValue LoadChain = SDValue(Load.getNode(), 1);
- if (RetVT.isInteger() && (RetVT != HwRetVt))
- Load = DAG.getNode(ISD::TRUNCATE, DL, RetVT, Load.getValue(0));
- // If the original return value was FP, bitcast accordingly. Doing it here
- // means that we can avoid adding TableGen patterns for FPs.
- if (RetVT.isFloatingPoint())
- Load = DAG.getNode(ISD::BITCAST, DL, RetVT, Load.getValue(0));
- return DAG.getMergeValues({Load, LoadChain}, DL);
- }
- static SDValue
- performSignExtendInRegCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- SDLoc DL(N);
- SDValue Src = N->getOperand(0);
- unsigned Opc = Src->getOpcode();
- // Sign extend of an unsigned unpack -> signed unpack
- if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) {
- unsigned SOpc = Opc == AArch64ISD::UUNPKHI ? AArch64ISD::SUNPKHI
- : AArch64ISD::SUNPKLO;
- // Push the sign extend to the operand of the unpack
- // This is necessary where, for example, the operand of the unpack
- // is another unpack:
- // 4i32 sign_extend_inreg (4i32 uunpklo(8i16 uunpklo (16i8 opnd)), from 4i8)
- // ->
- // 4i32 sunpklo (8i16 sign_extend_inreg(8i16 uunpklo (16i8 opnd), from 8i8)
- // ->
- // 4i32 sunpklo(8i16 sunpklo(16i8 opnd))
- SDValue ExtOp = Src->getOperand(0);
- auto VT = cast<VTSDNode>(N->getOperand(1))->getVT();
- EVT EltTy = VT.getVectorElementType();
- (void)EltTy;
- assert((EltTy == MVT::i8 || EltTy == MVT::i16 || EltTy == MVT::i32) &&
- "Sign extending from an invalid type");
- EVT ExtVT = VT.getDoubleNumVectorElementsVT(*DAG.getContext());
- SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ExtOp.getValueType(),
- ExtOp, DAG.getValueType(ExtVT));
- return DAG.getNode(SOpc, DL, N->getValueType(0), Ext);
- }
- if (DCI.isBeforeLegalizeOps())
- return SDValue();
- if (!EnableCombineMGatherIntrinsics)
- return SDValue();
- // SVE load nodes (e.g. AArch64ISD::GLD1) are straightforward candidates
- // for DAG Combine with SIGN_EXTEND_INREG. Bail out for all other nodes.
- unsigned NewOpc;
- unsigned MemVTOpNum = 4;
- switch (Opc) {
- case AArch64ISD::LD1_MERGE_ZERO:
- NewOpc = AArch64ISD::LD1S_MERGE_ZERO;
- MemVTOpNum = 3;
- break;
- case AArch64ISD::LDNF1_MERGE_ZERO:
- NewOpc = AArch64ISD::LDNF1S_MERGE_ZERO;
- MemVTOpNum = 3;
- break;
- case AArch64ISD::LDFF1_MERGE_ZERO:
- NewOpc = AArch64ISD::LDFF1S_MERGE_ZERO;
- MemVTOpNum = 3;
- break;
- case AArch64ISD::GLD1_MERGE_ZERO:
- NewOpc = AArch64ISD::GLD1S_MERGE_ZERO;
- break;
- case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
- NewOpc = AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
- break;
- case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
- NewOpc = AArch64ISD::GLD1S_SXTW_MERGE_ZERO;
- break;
- case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
- NewOpc = AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO;
- break;
- case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
- NewOpc = AArch64ISD::GLD1S_UXTW_MERGE_ZERO;
- break;
- case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
- NewOpc = AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO;
- break;
- case AArch64ISD::GLD1_IMM_MERGE_ZERO:
- NewOpc = AArch64ISD::GLD1S_IMM_MERGE_ZERO;
- break;
- case AArch64ISD::GLDFF1_MERGE_ZERO:
- NewOpc = AArch64ISD::GLDFF1S_MERGE_ZERO;
- break;
- case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO:
- NewOpc = AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO;
- break;
- case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO:
- NewOpc = AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO;
- break;
- case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO:
- NewOpc = AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO;
- break;
- case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO:
- NewOpc = AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO;
- break;
- case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO:
- NewOpc = AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO;
- break;
- case AArch64ISD::GLDFF1_IMM_MERGE_ZERO:
- NewOpc = AArch64ISD::GLDFF1S_IMM_MERGE_ZERO;
- break;
- case AArch64ISD::GLDNT1_MERGE_ZERO:
- NewOpc = AArch64ISD::GLDNT1S_MERGE_ZERO;
- break;
- default:
- return SDValue();
- }
- EVT SignExtSrcVT = cast<VTSDNode>(N->getOperand(1))->getVT();
- EVT SrcMemVT = cast<VTSDNode>(Src->getOperand(MemVTOpNum))->getVT();
- if ((SignExtSrcVT != SrcMemVT) || !Src.hasOneUse())
- return SDValue();
- EVT DstVT = N->getValueType(0);
- SDVTList VTs = DAG.getVTList(DstVT, MVT::Other);
- SmallVector<SDValue, 5> Ops;
- for (unsigned I = 0; I < Src->getNumOperands(); ++I)
- Ops.push_back(Src->getOperand(I));
- SDValue ExtLoad = DAG.getNode(NewOpc, SDLoc(N), VTs, Ops);
- DCI.CombineTo(N, ExtLoad);
- DCI.CombineTo(Src.getNode(), ExtLoad, ExtLoad.getValue(1));
- // Return N so it doesn't get rechecked
- return SDValue(N, 0);
- }
- /// Legalize the gather prefetch (scalar + vector addressing mode) when the
- /// offset vector is an unpacked 32-bit scalable vector. The other cases (Offset
- /// != nxv2i32) do not need legalization.
- static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG) {
- const unsigned OffsetPos = 4;
- SDValue Offset = N->getOperand(OffsetPos);
- // Not an unpacked vector, bail out.
- if (Offset.getValueType().getSimpleVT().SimpleTy != MVT::nxv2i32)
- return SDValue();
- // Extend the unpacked offset vector to 64-bit lanes.
- SDLoc DL(N);
- Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset);
- SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end());
- // Replace the offset operand with the 64-bit one.
- Ops[OffsetPos] = Offset;
- return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
- }
- /// Combines a node carrying the intrinsic
- /// `aarch64_sve_prf<T>_gather_scalar_offset` into a node that uses
- /// `aarch64_sve_prfb_gather_uxtw_index` when the scalar offset passed to
- /// `aarch64_sve_prf<T>_gather_scalar_offset` is not a valid immediate for the
- /// sve gather prefetch instruction with vector plus immediate addressing mode.
- static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG,
- unsigned ScalarSizeInBytes) {
- const unsigned ImmPos = 4, OffsetPos = 3;
- // No need to combine the node if the immediate is valid...
- if (isValidImmForSVEVecImmAddrMode(N->getOperand(ImmPos), ScalarSizeInBytes))
- return SDValue();
- // ...otherwise swap the offset base with the offset...
- SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end());
- std::swap(Ops[ImmPos], Ops[OffsetPos]);
- // ...and remap the intrinsic `aarch64_sve_prf<T>_gather_scalar_offset` to
- // `aarch64_sve_prfb_gather_uxtw_index`.
- SDLoc DL(N);
- Ops[1] = DAG.getConstant(Intrinsic::aarch64_sve_prfb_gather_uxtw_index, DL,
- MVT::i64);
- return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
- }
- // Return true if the vector operation can guarantee only the first lane of its
- // result contains data, with all bits in other lanes set to zero.
- static bool isLanes1toNKnownZero(SDValue Op) {
- switch (Op.getOpcode()) {
- default:
- return false;
- case AArch64ISD::ANDV_PRED:
- case AArch64ISD::EORV_PRED:
- case AArch64ISD::FADDA_PRED:
- case AArch64ISD::FADDV_PRED:
- case AArch64ISD::FMAXNMV_PRED:
- case AArch64ISD::FMAXV_PRED:
- case AArch64ISD::FMINNMV_PRED:
- case AArch64ISD::FMINV_PRED:
- case AArch64ISD::ORV_PRED:
- case AArch64ISD::SADDV_PRED:
- case AArch64ISD::SMAXV_PRED:
- case AArch64ISD::SMINV_PRED:
- case AArch64ISD::UADDV_PRED:
- case AArch64ISD::UMAXV_PRED:
- case AArch64ISD::UMINV_PRED:
- return true;
- }
- }
- static SDValue removeRedundantInsertVectorElt(SDNode *N) {
- assert(N->getOpcode() == ISD::INSERT_VECTOR_ELT && "Unexpected node!");
- SDValue InsertVec = N->getOperand(0);
- SDValue InsertElt = N->getOperand(1);
- SDValue InsertIdx = N->getOperand(2);
- // We only care about inserts into the first element...
- if (!isNullConstant(InsertIdx))
- return SDValue();
- // ...of a zero'd vector...
- if (!ISD::isConstantSplatVectorAllZeros(InsertVec.getNode()))
- return SDValue();
- // ...where the inserted data was previously extracted...
- if (InsertElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
- return SDValue();
- SDValue ExtractVec = InsertElt.getOperand(0);
- SDValue ExtractIdx = InsertElt.getOperand(1);
- // ...from the first element of a vector.
- if (!isNullConstant(ExtractIdx))
- return SDValue();
- // If we get here we are effectively trying to zero lanes 1-N of a vector.
- // Ensure there's no type conversion going on.
- if (N->getValueType(0) != ExtractVec.getValueType())
- return SDValue();
- if (!isLanes1toNKnownZero(ExtractVec))
- return SDValue();
- // The explicit zeroing is redundant.
- return ExtractVec;
- }
- static SDValue
- performInsertVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
- if (SDValue Res = removeRedundantInsertVectorElt(N))
- return Res;
- return performPostLD1Combine(N, DCI, true);
- }
- static SDValue performSVESpliceCombine(SDNode *N, SelectionDAG &DAG) {
- EVT Ty = N->getValueType(0);
- if (Ty.isInteger())
- return SDValue();
- EVT IntTy = Ty.changeVectorElementTypeToInteger();
- EVT ExtIntTy = getPackedSVEVectorVT(IntTy.getVectorElementCount());
- if (ExtIntTy.getVectorElementType().getScalarSizeInBits() <
- IntTy.getVectorElementType().getScalarSizeInBits())
- return SDValue();
- SDLoc DL(N);
- SDValue LHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(0)),
- DL, ExtIntTy);
- SDValue RHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(1)),
- DL, ExtIntTy);
- SDValue Idx = N->getOperand(2);
- SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ExtIntTy, LHS, RHS, Idx);
- SDValue Trunc = DAG.getAnyExtOrTrunc(Splice, DL, IntTy);
- return DAG.getBitcast(Ty, Trunc);
- }
- static SDValue performFPExtendCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget) {
- SDValue N0 = N->getOperand(0);
- EVT VT = N->getValueType(0);
- // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
- if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND)
- return SDValue();
- auto hasValidElementTypeForFPExtLoad = [](EVT VT) {
- EVT EltVT = VT.getVectorElementType();
- return EltVT == MVT::f32 || EltVT == MVT::f64;
- };
- // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
- // We purposefully don't care about legality of the nodes here as we know
- // they can be split down into something legal.
- if (DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(N0.getNode()) &&
- N0.hasOneUse() && Subtarget->useSVEForFixedLengthVectors() &&
- VT.isFixedLengthVector() && hasValidElementTypeForFPExtLoad(VT) &&
- VT.getFixedSizeInBits() >= Subtarget->getMinSVEVectorSizeInBits()) {
- LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
- LN0->getChain(), LN0->getBasePtr(),
- N0.getValueType(), LN0->getMemOperand());
- DCI.CombineTo(N, ExtLoad);
- DCI.CombineTo(
- N0.getNode(),
- DAG.getNode(ISD::FP_ROUND, SDLoc(N0), N0.getValueType(), ExtLoad,
- DAG.getIntPtrConstant(1, SDLoc(N0), /*isTarget=*/true)),
- ExtLoad.getValue(1));
- return SDValue(N, 0); // Return N so it doesn't get rechecked!
- }
- return SDValue();
- }
- static SDValue performBSPExpandForSVE(SDNode *N, SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- EVT VT = N->getValueType(0);
- // Don't expand for NEON, SVE2 or SME
- if (!VT.isScalableVector() || Subtarget->hasSVE2() || Subtarget->hasSME())
- return SDValue();
- SDLoc DL(N);
- SDValue Mask = N->getOperand(0);
- SDValue In1 = N->getOperand(1);
- SDValue In2 = N->getOperand(2);
- SDValue InvMask = DAG.getNOT(DL, Mask, VT);
- SDValue Sel = DAG.getNode(ISD::AND, DL, VT, Mask, In1);
- SDValue SelInv = DAG.getNode(ISD::AND, DL, VT, InvMask, In2);
- return DAG.getNode(ISD::OR, DL, VT, Sel, SelInv);
- }
- static SDValue performDupLane128Combine(SDNode *N, SelectionDAG &DAG) {
- EVT VT = N->getValueType(0);
- SDValue Insert = N->getOperand(0);
- if (Insert.getOpcode() != ISD::INSERT_SUBVECTOR)
- return SDValue();
- if (!Insert.getOperand(0).isUndef())
- return SDValue();
- uint64_t IdxInsert = Insert.getConstantOperandVal(2);
- uint64_t IdxDupLane = N->getConstantOperandVal(1);
- if (IdxInsert != 0 || IdxDupLane != 0)
- return SDValue();
- SDValue Bitcast = Insert.getOperand(1);
- if (Bitcast.getOpcode() != ISD::BITCAST)
- return SDValue();
- SDValue Subvec = Bitcast.getOperand(0);
- EVT SubvecVT = Subvec.getValueType();
- if (!SubvecVT.is128BitVector())
- return SDValue();
- EVT NewSubvecVT =
- getPackedSVEVectorVT(Subvec.getValueType().getVectorElementType());
- SDLoc DL(N);
- SDValue NewInsert =
- DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewSubvecVT,
- DAG.getUNDEF(NewSubvecVT), Subvec, Insert->getOperand(2));
- SDValue NewDuplane128 = DAG.getNode(AArch64ISD::DUPLANE128, DL, NewSubvecVT,
- NewInsert, N->getOperand(1));
- return DAG.getNode(ISD::BITCAST, DL, VT, NewDuplane128);
- }
- SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
- DAGCombinerInfo &DCI) const {
- SelectionDAG &DAG = DCI.DAG;
- switch (N->getOpcode()) {
- default:
- LLVM_DEBUG(dbgs() << "Custom combining: skipping\n");
- break;
- case ISD::ADD:
- case ISD::SUB:
- return performAddSubCombine(N, DCI, DAG);
- case ISD::BUILD_VECTOR:
- return performBuildVectorCombine(N, DCI, DAG);
- case ISD::TRUNCATE:
- return performTruncateCombine(N, DAG);
- case AArch64ISD::ANDS:
- return performFlagSettingCombine(N, DCI, ISD::AND);
- case AArch64ISD::ADC:
- if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ true))
- return R;
- return foldADCToCINC(N, DAG);
- case AArch64ISD::SBC:
- return foldOverflowCheck(N, DAG, /* IsAdd */ false);
- case AArch64ISD::ADCS:
- if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ true))
- return R;
- return performFlagSettingCombine(N, DCI, AArch64ISD::ADC);
- case AArch64ISD::SBCS:
- if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ false))
- return R;
- return performFlagSettingCombine(N, DCI, AArch64ISD::SBC);
- case ISD::XOR:
- return performXorCombine(N, DAG, DCI, Subtarget);
- case ISD::MUL:
- return performMulCombine(N, DAG, DCI, Subtarget);
- case ISD::SINT_TO_FP:
- case ISD::UINT_TO_FP:
- return performIntToFpCombine(N, DAG, Subtarget);
- case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT:
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT:
- return performFpToIntCombine(N, DAG, DCI, Subtarget);
- case ISD::FDIV:
- return performFDivCombine(N, DAG, DCI, Subtarget);
- case ISD::OR:
- return performORCombine(N, DCI, Subtarget, *this);
- case ISD::AND:
- return performANDCombine(N, DCI);
- case ISD::INTRINSIC_WO_CHAIN:
- return performIntrinsicCombine(N, DCI, Subtarget);
- case ISD::ANY_EXTEND:
- case ISD::ZERO_EXTEND:
- case ISD::SIGN_EXTEND:
- return performExtendCombine(N, DCI, DAG);
- case ISD::SIGN_EXTEND_INREG:
- return performSignExtendInRegCombine(N, DCI, DAG);
- case ISD::CONCAT_VECTORS:
- return performConcatVectorsCombine(N, DCI, DAG);
- case ISD::EXTRACT_SUBVECTOR:
- return performExtractSubvectorCombine(N, DCI, DAG);
- case ISD::INSERT_SUBVECTOR:
- return performInsertSubvectorCombine(N, DCI, DAG);
- case ISD::SELECT:
- return performSelectCombine(N, DCI);
- case ISD::VSELECT:
- return performVSelectCombine(N, DCI.DAG);
- case ISD::SETCC:
- return performSETCCCombine(N, DCI, DAG);
- case ISD::LOAD:
- return performLOADCombine(N, DCI, DAG, Subtarget);
- case ISD::STORE:
- return performSTORECombine(N, DCI, DAG, Subtarget);
- case ISD::MSTORE:
- return performMSTORECombine(N, DCI, DAG, Subtarget);
- case ISD::MGATHER:
- case ISD::MSCATTER:
- return performMaskedGatherScatterCombine(N, DCI, DAG);
- case ISD::VECTOR_SPLICE:
- return performSVESpliceCombine(N, DAG);
- case ISD::FP_EXTEND:
- return performFPExtendCombine(N, DAG, DCI, Subtarget);
- case AArch64ISD::BRCOND:
- return performBRCONDCombine(N, DCI, DAG);
- case AArch64ISD::TBNZ:
- case AArch64ISD::TBZ:
- return performTBZCombine(N, DCI, DAG);
- case AArch64ISD::CSEL:
- return performCSELCombine(N, DCI, DAG);
- case AArch64ISD::DUP:
- return performDUPCombine(N, DCI);
- case AArch64ISD::DUPLANE128:
- return performDupLane128Combine(N, DAG);
- case AArch64ISD::NVCAST:
- return performNVCASTCombine(N);
- case AArch64ISD::SPLICE:
- return performSpliceCombine(N, DAG);
- case AArch64ISD::UUNPKLO:
- case AArch64ISD::UUNPKHI:
- return performUnpackCombine(N, DAG, Subtarget);
- case AArch64ISD::UZP1:
- return performUzpCombine(N, DAG);
- case AArch64ISD::SETCC_MERGE_ZERO:
- return performSetccMergeZeroCombine(N, DCI);
- case AArch64ISD::REINTERPRET_CAST:
- return performReinterpretCastCombine(N);
- case AArch64ISD::GLD1_MERGE_ZERO:
- case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
- case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
- case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
- case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
- case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
- case AArch64ISD::GLD1_IMM_MERGE_ZERO:
- case AArch64ISD::GLD1S_MERGE_ZERO:
- case AArch64ISD::GLD1S_SCALED_MERGE_ZERO:
- case AArch64ISD::GLD1S_UXTW_MERGE_ZERO:
- case AArch64ISD::GLD1S_SXTW_MERGE_ZERO:
- case AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO:
- case AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO:
- case AArch64ISD::GLD1S_IMM_MERGE_ZERO:
- return performGLD1Combine(N, DAG);
- case AArch64ISD::VASHR:
- case AArch64ISD::VLSHR:
- return performVectorShiftCombine(N, *this, DCI);
- case AArch64ISD::SUNPKLO:
- return performSunpkloCombine(N, DAG);
- case AArch64ISD::BSP:
- return performBSPExpandForSVE(N, DAG, Subtarget);
- case ISD::INSERT_VECTOR_ELT:
- return performInsertVectorEltCombine(N, DCI);
- case ISD::EXTRACT_VECTOR_ELT:
- return performExtractVectorEltCombine(N, DCI, Subtarget);
- case ISD::VECREDUCE_ADD:
- return performVecReduceAddCombine(N, DCI.DAG, Subtarget);
- case AArch64ISD::UADDV:
- return performUADDVCombine(N, DAG);
- case AArch64ISD::SMULL:
- case AArch64ISD::UMULL:
- case AArch64ISD::PMULL:
- return tryCombineLongOpWithDup(Intrinsic::not_intrinsic, N, DCI, DAG);
- case ISD::INTRINSIC_VOID:
- case ISD::INTRINSIC_W_CHAIN:
- switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
- case Intrinsic::aarch64_sve_prfb_gather_scalar_offset:
- return combineSVEPrefetchVecBaseImmOff(N, DAG, 1 /*=ScalarSizeInBytes*/);
- case Intrinsic::aarch64_sve_prfh_gather_scalar_offset:
- return combineSVEPrefetchVecBaseImmOff(N, DAG, 2 /*=ScalarSizeInBytes*/);
- case Intrinsic::aarch64_sve_prfw_gather_scalar_offset:
- return combineSVEPrefetchVecBaseImmOff(N, DAG, 4 /*=ScalarSizeInBytes*/);
- case Intrinsic::aarch64_sve_prfd_gather_scalar_offset:
- return combineSVEPrefetchVecBaseImmOff(N, DAG, 8 /*=ScalarSizeInBytes*/);
- case Intrinsic::aarch64_sve_prfb_gather_uxtw_index:
- case Intrinsic::aarch64_sve_prfb_gather_sxtw_index:
- case Intrinsic::aarch64_sve_prfh_gather_uxtw_index:
- case Intrinsic::aarch64_sve_prfh_gather_sxtw_index:
- case Intrinsic::aarch64_sve_prfw_gather_uxtw_index:
- case Intrinsic::aarch64_sve_prfw_gather_sxtw_index:
- case Intrinsic::aarch64_sve_prfd_gather_uxtw_index:
- case Intrinsic::aarch64_sve_prfd_gather_sxtw_index:
- return legalizeSVEGatherPrefetchOffsVec(N, DAG);
- case Intrinsic::aarch64_neon_ld2:
- case Intrinsic::aarch64_neon_ld3:
- case Intrinsic::aarch64_neon_ld4:
- case Intrinsic::aarch64_neon_ld1x2:
- case Intrinsic::aarch64_neon_ld1x3:
- case Intrinsic::aarch64_neon_ld1x4:
- case Intrinsic::aarch64_neon_ld2lane:
- case Intrinsic::aarch64_neon_ld3lane:
- case Intrinsic::aarch64_neon_ld4lane:
- case Intrinsic::aarch64_neon_ld2r:
- case Intrinsic::aarch64_neon_ld3r:
- case Intrinsic::aarch64_neon_ld4r:
- case Intrinsic::aarch64_neon_st2:
- case Intrinsic::aarch64_neon_st3:
- case Intrinsic::aarch64_neon_st4:
- case Intrinsic::aarch64_neon_st1x2:
- case Intrinsic::aarch64_neon_st1x3:
- case Intrinsic::aarch64_neon_st1x4:
- case Intrinsic::aarch64_neon_st2lane:
- case Intrinsic::aarch64_neon_st3lane:
- case Intrinsic::aarch64_neon_st4lane:
- return performNEONPostLDSTCombine(N, DCI, DAG);
- case Intrinsic::aarch64_sve_ldnt1:
- return performLDNT1Combine(N, DAG);
- case Intrinsic::aarch64_sve_ld1rq:
- return performLD1ReplicateCombine<AArch64ISD::LD1RQ_MERGE_ZERO>(N, DAG);
- case Intrinsic::aarch64_sve_ld1ro:
- return performLD1ReplicateCombine<AArch64ISD::LD1RO_MERGE_ZERO>(N, DAG);
- case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
- return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
- case Intrinsic::aarch64_sve_ldnt1_gather:
- return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
- case Intrinsic::aarch64_sve_ldnt1_gather_index:
- return performGatherLoadCombine(N, DAG,
- AArch64ISD::GLDNT1_INDEX_MERGE_ZERO);
- case Intrinsic::aarch64_sve_ldnt1_gather_uxtw:
- return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
- case Intrinsic::aarch64_sve_ld1:
- return performLD1Combine(N, DAG, AArch64ISD::LD1_MERGE_ZERO);
- case Intrinsic::aarch64_sve_ldnf1:
- return performLD1Combine(N, DAG, AArch64ISD::LDNF1_MERGE_ZERO);
- case Intrinsic::aarch64_sve_ldff1:
- return performLD1Combine(N, DAG, AArch64ISD::LDFF1_MERGE_ZERO);
- case Intrinsic::aarch64_sve_st1:
- return performST1Combine(N, DAG);
- case Intrinsic::aarch64_sve_stnt1:
- return performSTNT1Combine(N, DAG);
- case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset:
- return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
- case Intrinsic::aarch64_sve_stnt1_scatter_uxtw:
- return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
- case Intrinsic::aarch64_sve_stnt1_scatter:
- return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
- case Intrinsic::aarch64_sve_stnt1_scatter_index:
- return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_INDEX_PRED);
- case Intrinsic::aarch64_sve_ld1_gather:
- return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_MERGE_ZERO);
- case Intrinsic::aarch64_sve_ld1_gather_index:
- return performGatherLoadCombine(N, DAG,
- AArch64ISD::GLD1_SCALED_MERGE_ZERO);
- case Intrinsic::aarch64_sve_ld1_gather_sxtw:
- return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_SXTW_MERGE_ZERO,
- /*OnlyPackedOffsets=*/false);
- case Intrinsic::aarch64_sve_ld1_gather_uxtw:
- return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_UXTW_MERGE_ZERO,
- /*OnlyPackedOffsets=*/false);
- case Intrinsic::aarch64_sve_ld1_gather_sxtw_index:
- return performGatherLoadCombine(N, DAG,
- AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO,
- /*OnlyPackedOffsets=*/false);
- case Intrinsic::aarch64_sve_ld1_gather_uxtw_index:
- return performGatherLoadCombine(N, DAG,
- AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO,
- /*OnlyPackedOffsets=*/false);
- case Intrinsic::aarch64_sve_ld1_gather_scalar_offset:
- return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_IMM_MERGE_ZERO);
- case Intrinsic::aarch64_sve_ldff1_gather:
- return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_MERGE_ZERO);
- case Intrinsic::aarch64_sve_ldff1_gather_index:
- return performGatherLoadCombine(N, DAG,
- AArch64ISD::GLDFF1_SCALED_MERGE_ZERO);
- case Intrinsic::aarch64_sve_ldff1_gather_sxtw:
- return performGatherLoadCombine(N, DAG,
- AArch64ISD::GLDFF1_SXTW_MERGE_ZERO,
- /*OnlyPackedOffsets=*/false);
- case Intrinsic::aarch64_sve_ldff1_gather_uxtw:
- return performGatherLoadCombine(N, DAG,
- AArch64ISD::GLDFF1_UXTW_MERGE_ZERO,
- /*OnlyPackedOffsets=*/false);
- case Intrinsic::aarch64_sve_ldff1_gather_sxtw_index:
- return performGatherLoadCombine(N, DAG,
- AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO,
- /*OnlyPackedOffsets=*/false);
- case Intrinsic::aarch64_sve_ldff1_gather_uxtw_index:
- return performGatherLoadCombine(N, DAG,
- AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO,
- /*OnlyPackedOffsets=*/false);
- case Intrinsic::aarch64_sve_ldff1_gather_scalar_offset:
- return performGatherLoadCombine(N, DAG,
- AArch64ISD::GLDFF1_IMM_MERGE_ZERO);
- case Intrinsic::aarch64_sve_st1_scatter:
- return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_PRED);
- case Intrinsic::aarch64_sve_st1_scatter_index:
- return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SCALED_PRED);
- case Intrinsic::aarch64_sve_st1_scatter_sxtw:
- return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SXTW_PRED,
- /*OnlyPackedOffsets=*/false);
- case Intrinsic::aarch64_sve_st1_scatter_uxtw:
- return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_UXTW_PRED,
- /*OnlyPackedOffsets=*/false);
- case Intrinsic::aarch64_sve_st1_scatter_sxtw_index:
- return performScatterStoreCombine(N, DAG,
- AArch64ISD::SST1_SXTW_SCALED_PRED,
- /*OnlyPackedOffsets=*/false);
- case Intrinsic::aarch64_sve_st1_scatter_uxtw_index:
- return performScatterStoreCombine(N, DAG,
- AArch64ISD::SST1_UXTW_SCALED_PRED,
- /*OnlyPackedOffsets=*/false);
- case Intrinsic::aarch64_sve_st1_scatter_scalar_offset:
- return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_IMM_PRED);
- case Intrinsic::aarch64_rndr:
- case Intrinsic::aarch64_rndrrs: {
- unsigned IntrinsicID =
- cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
- auto Register =
- (IntrinsicID == Intrinsic::aarch64_rndr ? AArch64SysReg::RNDR
- : AArch64SysReg::RNDRRS);
- SDLoc DL(N);
- SDValue A = DAG.getNode(
- AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, MVT::Glue, MVT::Other),
- N->getOperand(0), DAG.getConstant(Register, DL, MVT::i64));
- SDValue B = DAG.getNode(
- AArch64ISD::CSINC, DL, MVT::i32, DAG.getConstant(0, DL, MVT::i32),
- DAG.getConstant(0, DL, MVT::i32),
- DAG.getConstant(AArch64CC::NE, DL, MVT::i32), A.getValue(1));
- return DAG.getMergeValues(
- {A, DAG.getZExtOrTrunc(B, DL, MVT::i1), A.getValue(2)}, DL);
- }
- default:
- break;
- }
- break;
- case ISD::GlobalAddress:
- return performGlobalAddressCombine(N, DAG, Subtarget, getTargetMachine());
- case ISD::CTLZ:
- return performCTLZCombine(N, DAG, Subtarget);
- }
- return SDValue();
- }
- // Check if the return value is used as only a return value, as otherwise
- // we can't perform a tail-call. In particular, we need to check for
- // target ISD nodes that are returns and any other "odd" constructs
- // that the generic analysis code won't necessarily catch.
- bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
- SDValue &Chain) const {
- if (N->getNumValues() != 1)
- return false;
- if (!N->hasNUsesOfValue(1, 0))
- return false;
- SDValue TCChain = Chain;
- SDNode *Copy = *N->use_begin();
- if (Copy->getOpcode() == ISD::CopyToReg) {
- // If the copy has a glue operand, we conservatively assume it isn't safe to
- // perform a tail call.
- if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() ==
- MVT::Glue)
- return false;
- TCChain = Copy->getOperand(0);
- } else if (Copy->getOpcode() != ISD::FP_EXTEND)
- return false;
- bool HasRet = false;
- for (SDNode *Node : Copy->uses()) {
- if (Node->getOpcode() != AArch64ISD::RET_FLAG)
- return false;
- HasRet = true;
- }
- if (!HasRet)
- return false;
- Chain = TCChain;
- return true;
- }
- // Return whether the an instruction can potentially be optimized to a tail
- // call. This will cause the optimizers to attempt to move, or duplicate,
- // return instructions to help enable tail call optimizations for this
- // instruction.
- bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
- return CI->isTailCall();
- }
- bool AArch64TargetLowering::getIndexedAddressParts(
- SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset,
- ISD::MemIndexedMode &AM, bool &IsInc, SelectionDAG &DAG) const {
- if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)
- return false;
- // Non-null if there is exactly one user of the loaded value (ignoring chain).
- SDNode *ValOnlyUser = nullptr;
- for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); UI != UE;
- ++UI) {
- if (UI.getUse().getResNo() == 1)
- continue; // Ignore chain.
- if (ValOnlyUser == nullptr)
- ValOnlyUser = *UI;
- else {
- ValOnlyUser = nullptr; // Multiple non-chain uses, bail out.
- break;
- }
- }
- auto IsUndefOrZero = [](SDValue V) {
- return V.isUndef() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
- };
- // If the only user of the value is a scalable vector splat, it is
- // preferable to do a replicating load (ld1r*).
- if (ValOnlyUser && ValOnlyUser->getValueType(0).isScalableVector() &&
- (ValOnlyUser->getOpcode() == ISD::SPLAT_VECTOR ||
- (ValOnlyUser->getOpcode() == AArch64ISD::DUP_MERGE_PASSTHRU &&
- IsUndefOrZero(ValOnlyUser->getOperand(2)))))
- return false;
- Base = Op->getOperand(0);
- // All of the indexed addressing mode instructions take a signed
- // 9 bit immediate offset.
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
- int64_t RHSC = RHS->getSExtValue();
- if (Op->getOpcode() == ISD::SUB)
- RHSC = -(uint64_t)RHSC;
- if (!isInt<9>(RHSC))
- return false;
- IsInc = (Op->getOpcode() == ISD::ADD);
- Offset = Op->getOperand(1);
- return true;
- }
- return false;
- }
- bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
- SDValue &Offset,
- ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) const {
- EVT VT;
- SDValue Ptr;
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
- VT = LD->getMemoryVT();
- Ptr = LD->getBasePtr();
- } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- VT = ST->getMemoryVT();
- Ptr = ST->getBasePtr();
- } else
- return false;
- bool IsInc;
- if (!getIndexedAddressParts(N, Ptr.getNode(), Base, Offset, AM, IsInc, DAG))
- return false;
- AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC;
- return true;
- }
- bool AArch64TargetLowering::getPostIndexedAddressParts(
- SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset,
- ISD::MemIndexedMode &AM, SelectionDAG &DAG) const {
- EVT VT;
- SDValue Ptr;
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
- VT = LD->getMemoryVT();
- Ptr = LD->getBasePtr();
- } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- VT = ST->getMemoryVT();
- Ptr = ST->getBasePtr();
- } else
- return false;
- bool IsInc;
- if (!getIndexedAddressParts(N, Op, Base, Offset, AM, IsInc, DAG))
- return false;
- // Post-indexing updates the base, so it's not a valid transform
- // if that's not the same as the load's pointer.
- if (Ptr != Base)
- return false;
- AM = IsInc ? ISD::POST_INC : ISD::POST_DEC;
- return true;
- }
- void AArch64TargetLowering::ReplaceBITCASTResults(
- SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
- SDLoc DL(N);
- SDValue Op = N->getOperand(0);
- EVT VT = N->getValueType(0);
- EVT SrcVT = Op.getValueType();
- if (VT.isScalableVector() && !isTypeLegal(VT) && isTypeLegal(SrcVT)) {
- assert(!VT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
- "Expected fp->int bitcast!");
- // Bitcasting between unpacked vector types of different element counts is
- // not a NOP because the live elements are laid out differently.
- // 01234567
- // e.g. nxv2i32 = XX??XX??
- // nxv4f16 = X?X?X?X?
- if (VT.getVectorElementCount() != SrcVT.getVectorElementCount())
- return;
- SDValue CastResult = getSVESafeBitCast(getSVEContainerType(VT), Op, DAG);
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, CastResult));
- return;
- }
- if (VT != MVT::i16 || (SrcVT != MVT::f16 && SrcVT != MVT::bf16))
- return;
- Op = SDValue(
- DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32,
- DAG.getUNDEF(MVT::i32), Op,
- DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
- 0);
- Op = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op);
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Op));
- }
- static void ReplaceAddWithADDP(SDNode *N, SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- EVT VT = N->getValueType(0);
- if (!VT.is256BitVector() ||
- (VT.getScalarType().isFloatingPoint() &&
- !N->getFlags().hasAllowReassociation()) ||
- (VT.getScalarType() == MVT::f16 && !Subtarget->hasFullFP16()))
- return;
- SDValue X = N->getOperand(0);
- auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(1));
- if (!Shuf) {
- Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(0));
- X = N->getOperand(1);
- if (!Shuf)
- return;
- }
- if (Shuf->getOperand(0) != X || !Shuf->getOperand(1)->isUndef())
- return;
- // Check the mask is 1,0,3,2,5,4,...
- ArrayRef<int> Mask = Shuf->getMask();
- for (int I = 0, E = Mask.size(); I < E; I++)
- if (Mask[I] != (I % 2 == 0 ? I + 1 : I - 1))
- return;
- SDLoc DL(N);
- auto LoHi = DAG.SplitVector(X, DL);
- assert(LoHi.first.getValueType() == LoHi.second.getValueType());
- SDValue Addp = DAG.getNode(AArch64ISD::ADDP, N, LoHi.first.getValueType(),
- LoHi.first, LoHi.second);
- // Shuffle the elements back into order.
- SmallVector<int> NMask;
- for (unsigned I = 0, E = VT.getVectorNumElements() / 2; I < E; I++) {
- NMask.push_back(I);
- NMask.push_back(I);
- }
- Results.push_back(
- DAG.getVectorShuffle(VT, DL,
- DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Addp,
- DAG.getUNDEF(LoHi.first.getValueType())),
- DAG.getUNDEF(VT), NMask));
- }
- static void ReplaceReductionResults(SDNode *N,
- SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG, unsigned InterOp,
- unsigned AcrossOp) {
- EVT LoVT, HiVT;
- SDValue Lo, Hi;
- SDLoc dl(N);
- std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
- std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
- SDValue InterVal = DAG.getNode(InterOp, dl, LoVT, Lo, Hi);
- SDValue SplitVal = DAG.getNode(AcrossOp, dl, LoVT, InterVal);
- Results.push_back(SplitVal);
- }
- static std::pair<SDValue, SDValue> splitInt128(SDValue N, SelectionDAG &DAG) {
- SDLoc DL(N);
- SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, N);
- SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64,
- DAG.getNode(ISD::SRL, DL, MVT::i128, N,
- DAG.getConstant(64, DL, MVT::i64)));
- return std::make_pair(Lo, Hi);
- }
- void AArch64TargetLowering::ReplaceExtractSubVectorResults(
- SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
- SDValue In = N->getOperand(0);
- EVT InVT = In.getValueType();
- // Common code will handle these just fine.
- if (!InVT.isScalableVector() || !InVT.isInteger())
- return;
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- // The following checks bail if this is not a halving operation.
- ElementCount ResEC = VT.getVectorElementCount();
- if (InVT.getVectorElementCount() != (ResEC * 2))
- return;
- auto *CIndex = dyn_cast<ConstantSDNode>(N->getOperand(1));
- if (!CIndex)
- return;
- unsigned Index = CIndex->getZExtValue();
- if ((Index != 0) && (Index != ResEC.getKnownMinValue()))
- return;
- unsigned Opcode = (Index == 0) ? AArch64ISD::UUNPKLO : AArch64ISD::UUNPKHI;
- EVT ExtendedHalfVT = VT.widenIntegerVectorElementType(*DAG.getContext());
- SDValue Half = DAG.getNode(Opcode, DL, ExtendedHalfVT, N->getOperand(0));
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Half));
- }
- // Create an even/odd pair of X registers holding integer value V.
- static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
- SDLoc dl(V.getNode());
- SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i64);
- SDValue VHi = DAG.getAnyExtOrTrunc(
- DAG.getNode(ISD::SRL, dl, MVT::i128, V, DAG.getConstant(64, dl, MVT::i64)),
- dl, MVT::i64);
- if (DAG.getDataLayout().isBigEndian())
- std::swap (VLo, VHi);
- SDValue RegClass =
- DAG.getTargetConstant(AArch64::XSeqPairsClassRegClassID, dl, MVT::i32);
- SDValue SubReg0 = DAG.getTargetConstant(AArch64::sube64, dl, MVT::i32);
- SDValue SubReg1 = DAG.getTargetConstant(AArch64::subo64, dl, MVT::i32);
- const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
- return SDValue(
- DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
- }
- static void ReplaceCMP_SWAP_128Results(SDNode *N,
- SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- assert(N->getValueType(0) == MVT::i128 &&
- "AtomicCmpSwap on types less than 128 should be legal");
- MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
- if (Subtarget->hasLSE() || Subtarget->outlineAtomics()) {
- // LSE has a 128-bit compare and swap (CASP), but i128 is not a legal type,
- // so lower it here, wrapped in REG_SEQUENCE and EXTRACT_SUBREG.
- SDValue Ops[] = {
- createGPRPairNode(DAG, N->getOperand(2)), // Compare value
- createGPRPairNode(DAG, N->getOperand(3)), // Store value
- N->getOperand(1), // Ptr
- N->getOperand(0), // Chain in
- };
- unsigned Opcode;
- switch (MemOp->getMergedOrdering()) {
- case AtomicOrdering::Monotonic:
- Opcode = AArch64::CASPX;
- break;
- case AtomicOrdering::Acquire:
- Opcode = AArch64::CASPAX;
- break;
- case AtomicOrdering::Release:
- Opcode = AArch64::CASPLX;
- break;
- case AtomicOrdering::AcquireRelease:
- case AtomicOrdering::SequentiallyConsistent:
- Opcode = AArch64::CASPALX;
- break;
- default:
- llvm_unreachable("Unexpected ordering!");
- }
- MachineSDNode *CmpSwap = DAG.getMachineNode(
- Opcode, SDLoc(N), DAG.getVTList(MVT::Untyped, MVT::Other), Ops);
- DAG.setNodeMemRefs(CmpSwap, {MemOp});
- unsigned SubReg1 = AArch64::sube64, SubReg2 = AArch64::subo64;
- if (DAG.getDataLayout().isBigEndian())
- std::swap(SubReg1, SubReg2);
- SDValue Lo = DAG.getTargetExtractSubreg(SubReg1, SDLoc(N), MVT::i64,
- SDValue(CmpSwap, 0));
- SDValue Hi = DAG.getTargetExtractSubreg(SubReg2, SDLoc(N), MVT::i64,
- SDValue(CmpSwap, 0));
- Results.push_back(
- DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128, Lo, Hi));
- Results.push_back(SDValue(CmpSwap, 1)); // Chain out
- return;
- }
- unsigned Opcode;
- switch (MemOp->getMergedOrdering()) {
- case AtomicOrdering::Monotonic:
- Opcode = AArch64::CMP_SWAP_128_MONOTONIC;
- break;
- case AtomicOrdering::Acquire:
- Opcode = AArch64::CMP_SWAP_128_ACQUIRE;
- break;
- case AtomicOrdering::Release:
- Opcode = AArch64::CMP_SWAP_128_RELEASE;
- break;
- case AtomicOrdering::AcquireRelease:
- case AtomicOrdering::SequentiallyConsistent:
- Opcode = AArch64::CMP_SWAP_128;
- break;
- default:
- llvm_unreachable("Unexpected ordering!");
- }
- auto Desired = splitInt128(N->getOperand(2), DAG);
- auto New = splitInt128(N->getOperand(3), DAG);
- SDValue Ops[] = {N->getOperand(1), Desired.first, Desired.second,
- New.first, New.second, N->getOperand(0)};
- SDNode *CmpSwap = DAG.getMachineNode(
- Opcode, SDLoc(N), DAG.getVTList(MVT::i64, MVT::i64, MVT::i32, MVT::Other),
- Ops);
- DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
- Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128,
- SDValue(CmpSwap, 0), SDValue(CmpSwap, 1)));
- Results.push_back(SDValue(CmpSwap, 3));
- }
- void AArch64TargetLowering::ReplaceNodeResults(
- SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
- switch (N->getOpcode()) {
- default:
- llvm_unreachable("Don't know how to custom expand this");
- case ISD::BITCAST:
- ReplaceBITCASTResults(N, Results, DAG);
- return;
- case ISD::VECREDUCE_ADD:
- case ISD::VECREDUCE_SMAX:
- case ISD::VECREDUCE_SMIN:
- case ISD::VECREDUCE_UMAX:
- case ISD::VECREDUCE_UMIN:
- Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG));
- return;
- case ISD::ADD:
- case ISD::FADD:
- ReplaceAddWithADDP(N, Results, DAG, Subtarget);
- return;
- case ISD::CTPOP:
- case ISD::PARITY:
- if (SDValue Result = LowerCTPOP_PARITY(SDValue(N, 0), DAG))
- Results.push_back(Result);
- return;
- case AArch64ISD::SADDV:
- ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::SADDV);
- return;
- case AArch64ISD::UADDV:
- ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::UADDV);
- return;
- case AArch64ISD::SMINV:
- ReplaceReductionResults(N, Results, DAG, ISD::SMIN, AArch64ISD::SMINV);
- return;
- case AArch64ISD::UMINV:
- ReplaceReductionResults(N, Results, DAG, ISD::UMIN, AArch64ISD::UMINV);
- return;
- case AArch64ISD::SMAXV:
- ReplaceReductionResults(N, Results, DAG, ISD::SMAX, AArch64ISD::SMAXV);
- return;
- case AArch64ISD::UMAXV:
- ReplaceReductionResults(N, Results, DAG, ISD::UMAX, AArch64ISD::UMAXV);
- return;
- case ISD::FP_TO_UINT:
- case ISD::FP_TO_SINT:
- case ISD::STRICT_FP_TO_SINT:
- case ISD::STRICT_FP_TO_UINT:
- assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion");
- // Let normal code take care of it by not adding anything to Results.
- return;
- case ISD::ATOMIC_CMP_SWAP:
- ReplaceCMP_SWAP_128Results(N, Results, DAG, Subtarget);
- return;
- case ISD::ATOMIC_LOAD:
- case ISD::LOAD: {
- MemSDNode *LoadNode = cast<MemSDNode>(N);
- EVT MemVT = LoadNode->getMemoryVT();
- // Handle lowering 256 bit non temporal loads into LDNP for little-endian
- // targets.
- if (LoadNode->isNonTemporal() && Subtarget->isLittleEndian() &&
- MemVT.getSizeInBits() == 256u &&
- (MemVT.getScalarSizeInBits() == 8u ||
- MemVT.getScalarSizeInBits() == 16u ||
- MemVT.getScalarSizeInBits() == 32u ||
- MemVT.getScalarSizeInBits() == 64u)) {
- SDValue Result = DAG.getMemIntrinsicNode(
- AArch64ISD::LDNP, SDLoc(N),
- DAG.getVTList({MemVT.getHalfNumVectorElementsVT(*DAG.getContext()),
- MemVT.getHalfNumVectorElementsVT(*DAG.getContext()),
- MVT::Other}),
- {LoadNode->getChain(), LoadNode->getBasePtr()},
- LoadNode->getMemoryVT(), LoadNode->getMemOperand());
- SDValue Pair = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), MemVT,
- Result.getValue(0), Result.getValue(1));
- Results.append({Pair, Result.getValue(2) /* Chain */});
- return;
- }
- if ((!LoadNode->isVolatile() && !LoadNode->isAtomic()) ||
- LoadNode->getMemoryVT() != MVT::i128) {
- // Non-volatile or atomic loads are optimized later in AArch64's load/store
- // optimizer.
- return;
- }
- if (SDValue(N, 0).getValueType() == MVT::i128) {
- SDValue Result = DAG.getMemIntrinsicNode(
- AArch64ISD::LDP, SDLoc(N),
- DAG.getVTList({MVT::i64, MVT::i64, MVT::Other}),
- {LoadNode->getChain(), LoadNode->getBasePtr()},
- LoadNode->getMemoryVT(), LoadNode->getMemOperand());
- SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128,
- Result.getValue(0), Result.getValue(1));
- Results.append({Pair, Result.getValue(2) /* Chain */});
- }
- return;
- }
- case ISD::EXTRACT_SUBVECTOR:
- ReplaceExtractSubVectorResults(N, Results, DAG);
- return;
- case ISD::INSERT_SUBVECTOR:
- case ISD::CONCAT_VECTORS:
- // Custom lowering has been requested for INSERT_SUBVECTOR and
- // CONCAT_VECTORS -- but delegate to common code for result type
- // legalisation
- return;
- case ISD::INTRINSIC_WO_CHAIN: {
- EVT VT = N->getValueType(0);
- assert((VT == MVT::i8 || VT == MVT::i16) &&
- "custom lowering for unexpected type");
- ConstantSDNode *CN = cast<ConstantSDNode>(N->getOperand(0));
- Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
- switch (IntID) {
- default:
- return;
- case Intrinsic::aarch64_sve_clasta_n: {
- SDLoc DL(N);
- auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2));
- auto V = DAG.getNode(AArch64ISD::CLASTA_N, DL, MVT::i32,
- N->getOperand(1), Op2, N->getOperand(3));
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
- return;
- }
- case Intrinsic::aarch64_sve_clastb_n: {
- SDLoc DL(N);
- auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2));
- auto V = DAG.getNode(AArch64ISD::CLASTB_N, DL, MVT::i32,
- N->getOperand(1), Op2, N->getOperand(3));
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
- return;
- }
- case Intrinsic::aarch64_sve_lasta: {
- SDLoc DL(N);
- auto V = DAG.getNode(AArch64ISD::LASTA, DL, MVT::i32,
- N->getOperand(1), N->getOperand(2));
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
- return;
- }
- case Intrinsic::aarch64_sve_lastb: {
- SDLoc DL(N);
- auto V = DAG.getNode(AArch64ISD::LASTB, DL, MVT::i32,
- N->getOperand(1), N->getOperand(2));
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
- return;
- }
- }
- }
- case ISD::READ_REGISTER: {
- SDLoc DL(N);
- assert(N->getValueType(0) == MVT::i128 &&
- "READ_REGISTER custom lowering is only for 128-bit sysregs");
- SDValue Chain = N->getOperand(0);
- SDValue SysRegName = N->getOperand(1);
- SDValue Result = DAG.getNode(
- AArch64ISD::MRRS, DL, DAG.getVTList({MVT::i64, MVT::i64, MVT::Other}),
- Chain, SysRegName);
- // Sysregs are not endian. Result.getValue(0) always contains the lower half
- // of the 128-bit System Register value.
- SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128,
- Result.getValue(0), Result.getValue(1));
- Results.push_back(Pair);
- Results.push_back(Result.getValue(2)); // Chain
- return;
- }
- }
- }
- bool AArch64TargetLowering::useLoadStackGuardNode() const {
- if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia())
- return TargetLowering::useLoadStackGuardNode();
- return true;
- }
- unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const {
- // Combine multiple FDIVs with the same divisor into multiple FMULs by the
- // reciprocal if there are three or more FDIVs.
- return 3;
- }
- TargetLoweringBase::LegalizeTypeAction
- AArch64TargetLowering::getPreferredVectorAction(MVT VT) const {
- // During type legalization, we prefer to widen v1i8, v1i16, v1i32 to v8i8,
- // v4i16, v2i32 instead of to promote.
- if (VT == MVT::v1i8 || VT == MVT::v1i16 || VT == MVT::v1i32 ||
- VT == MVT::v1f32)
- return TypeWidenVector;
- return TargetLoweringBase::getPreferredVectorAction(VT);
- }
- // In v8.4a, ldp and stp instructions are guaranteed to be single-copy atomic
- // provided the address is 16-byte aligned.
- bool AArch64TargetLowering::isOpSuitableForLDPSTP(const Instruction *I) const {
- if (!Subtarget->hasLSE2())
- return false;
- if (auto LI = dyn_cast<LoadInst>(I))
- return LI->getType()->getPrimitiveSizeInBits() == 128 &&
- LI->getAlign() >= Align(16);
- if (auto SI = dyn_cast<StoreInst>(I))
- return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() == 128 &&
- SI->getAlign() >= Align(16);
- return false;
- }
- bool AArch64TargetLowering::shouldInsertFencesForAtomic(
- const Instruction *I) const {
- return isOpSuitableForLDPSTP(I);
- }
- bool AArch64TargetLowering::shouldInsertTrailingFenceForAtomicStore(
- const Instruction *I) const {
- // Store-Release instructions only provide seq_cst guarantees when paired with
- // Load-Acquire instructions. MSVC CRT does not use these instructions to
- // implement seq_cst loads and stores, so we need additional explicit fences
- // after memory writes.
- if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
- return false;
- switch (I->getOpcode()) {
- default:
- return false;
- case Instruction::AtomicCmpXchg:
- return cast<AtomicCmpXchgInst>(I)->getSuccessOrdering() ==
- AtomicOrdering::SequentiallyConsistent;
- case Instruction::AtomicRMW:
- return cast<AtomicRMWInst>(I)->getOrdering() ==
- AtomicOrdering::SequentiallyConsistent;
- case Instruction::Store:
- return cast<StoreInst>(I)->getOrdering() ==
- AtomicOrdering::SequentiallyConsistent;
- }
- }
- // Loads and stores less than 128-bits are already atomic; ones above that
- // are doomed anyway, so defer to the default libcall and blame the OS when
- // things go wrong.
- TargetLoweringBase::AtomicExpansionKind
- AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
- unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
- if (Size != 128 || isOpSuitableForLDPSTP(SI))
- return AtomicExpansionKind::None;
- return AtomicExpansionKind::Expand;
- }
- // Loads and stores less than 128-bits are already atomic; ones above that
- // are doomed anyway, so defer to the default libcall and blame the OS when
- // things go wrong.
- TargetLowering::AtomicExpansionKind
- AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
- unsigned Size = LI->getType()->getPrimitiveSizeInBits();
- if (Size != 128 || isOpSuitableForLDPSTP(LI))
- return AtomicExpansionKind::None;
- // At -O0, fast-regalloc cannot cope with the live vregs necessary to
- // implement atomicrmw without spilling. If the target address is also on the
- // stack and close enough to the spill slot, this can lead to a situation
- // where the monitor always gets cleared and the atomic operation can never
- // succeed. So at -O0 lower this operation to a CAS loop.
- if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
- return AtomicExpansionKind::CmpXChg;
- // Using CAS for an atomic load has a better chance of succeeding under high
- // contention situations. So use it if available.
- return Subtarget->hasLSE() ? AtomicExpansionKind::CmpXChg
- : AtomicExpansionKind::LLSC;
- }
- // For the real atomic operations, we have ldxr/stxr up to 128 bits,
- TargetLowering::AtomicExpansionKind
- AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
- if (AI->isFloatingPointOperation())
- return AtomicExpansionKind::CmpXChg;
- unsigned Size = AI->getType()->getPrimitiveSizeInBits();
- if (Size > 128) return AtomicExpansionKind::None;
- // Nand is not supported in LSE.
- // Leave 128 bits to LLSC or CmpXChg.
- if (AI->getOperation() != AtomicRMWInst::Nand && Size < 128) {
- if (Subtarget->hasLSE())
- return AtomicExpansionKind::None;
- if (Subtarget->outlineAtomics()) {
- // [U]Min/[U]Max RWM atomics are used in __sync_fetch_ libcalls so far.
- // Don't outline them unless
- // (1) high level <atomic> support approved:
- // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p0493r1.pdf
- // (2) low level libgcc and compiler-rt support implemented by:
- // min/max outline atomics helpers
- if (AI->getOperation() != AtomicRMWInst::Min &&
- AI->getOperation() != AtomicRMWInst::Max &&
- AI->getOperation() != AtomicRMWInst::UMin &&
- AI->getOperation() != AtomicRMWInst::UMax) {
- return AtomicExpansionKind::None;
- }
- }
- }
- // At -O0, fast-regalloc cannot cope with the live vregs necessary to
- // implement atomicrmw without spilling. If the target address is also on the
- // stack and close enough to the spill slot, this can lead to a situation
- // where the monitor always gets cleared and the atomic operation can never
- // succeed. So at -O0 lower this operation to a CAS loop. Also worthwhile if
- // we have a single CAS instruction that can replace the loop.
- if (getTargetMachine().getOptLevel() == CodeGenOpt::None ||
- Subtarget->hasLSE())
- return AtomicExpansionKind::CmpXChg;
- return AtomicExpansionKind::LLSC;
- }
- TargetLowering::AtomicExpansionKind
- AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR(
- AtomicCmpXchgInst *AI) const {
- // If subtarget has LSE, leave cmpxchg intact for codegen.
- if (Subtarget->hasLSE() || Subtarget->outlineAtomics())
- return AtomicExpansionKind::None;
- // At -O0, fast-regalloc cannot cope with the live vregs necessary to
- // implement cmpxchg without spilling. If the address being exchanged is also
- // on the stack and close enough to the spill slot, this can lead to a
- // situation where the monitor always gets cleared and the atomic operation
- // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
- if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
- return AtomicExpansionKind::None;
- // 128-bit atomic cmpxchg is weird; AtomicExpand doesn't know how to expand
- // it.
- unsigned Size = AI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
- if (Size > 64)
- return AtomicExpansionKind::None;
- return AtomicExpansionKind::LLSC;
- }
- Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
- Type *ValueTy, Value *Addr,
- AtomicOrdering Ord) const {
- Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- bool IsAcquire = isAcquireOrStronger(Ord);
- // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd
- // intrinsic must return {i64, i64} and we have to recombine them into a
- // single i128 here.
- if (ValueTy->getPrimitiveSizeInBits() == 128) {
- Intrinsic::ID Int =
- IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp;
- Function *Ldxr = Intrinsic::getDeclaration(M, Int);
- Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
- Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi");
- Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
- Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
- Lo = Builder.CreateZExt(Lo, ValueTy, "lo64");
- Hi = Builder.CreateZExt(Hi, ValueTy, "hi64");
- return Builder.CreateOr(
- Lo, Builder.CreateShl(Hi, ConstantInt::get(ValueTy, 64)), "val64");
- }
- Type *Tys[] = { Addr->getType() };
- Intrinsic::ID Int =
- IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr;
- Function *Ldxr = Intrinsic::getDeclaration(M, Int, Tys);
- const DataLayout &DL = M->getDataLayout();
- IntegerType *IntEltTy = Builder.getIntNTy(DL.getTypeSizeInBits(ValueTy));
- CallInst *CI = Builder.CreateCall(Ldxr, Addr);
- CI->addParamAttr(
- 0, Attribute::get(Builder.getContext(), Attribute::ElementType, ValueTy));
- Value *Trunc = Builder.CreateTrunc(CI, IntEltTy);
- return Builder.CreateBitCast(Trunc, ValueTy);
- }
- void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
- IRBuilderBase &Builder) const {
- Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::aarch64_clrex));
- }
- Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
- Value *Val, Value *Addr,
- AtomicOrdering Ord) const {
- Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- bool IsRelease = isReleaseOrStronger(Ord);
- // Since the intrinsics must have legal type, the i128 intrinsics take two
- // parameters: "i64, i64". We must marshal Val into the appropriate form
- // before the call.
- if (Val->getType()->getPrimitiveSizeInBits() == 128) {
- Intrinsic::ID Int =
- IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp;
- Function *Stxr = Intrinsic::getDeclaration(M, Int);
- Type *Int64Ty = Type::getInt64Ty(M->getContext());
- Value *Lo = Builder.CreateTrunc(Val, Int64Ty, "lo");
- Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty, "hi");
- Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
- return Builder.CreateCall(Stxr, {Lo, Hi, Addr});
- }
- Intrinsic::ID Int =
- IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr;
- Type *Tys[] = { Addr->getType() };
- Function *Stxr = Intrinsic::getDeclaration(M, Int, Tys);
- const DataLayout &DL = M->getDataLayout();
- IntegerType *IntValTy = Builder.getIntNTy(DL.getTypeSizeInBits(Val->getType()));
- Val = Builder.CreateBitCast(Val, IntValTy);
- CallInst *CI = Builder.CreateCall(
- Stxr, {Builder.CreateZExtOrBitCast(
- Val, Stxr->getFunctionType()->getParamType(0)),
- Addr});
- CI->addParamAttr(1, Attribute::get(Builder.getContext(),
- Attribute::ElementType, Val->getType()));
- return CI;
- }
- bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
- Type *Ty, CallingConv::ID CallConv, bool isVarArg,
- const DataLayout &DL) const {
- if (!Ty->isArrayTy()) {
- const TypeSize &TySize = Ty->getPrimitiveSizeInBits();
- return TySize.isScalable() && TySize.getKnownMinValue() > 128;
- }
- // All non aggregate members of the type must have the same type
- SmallVector<EVT> ValueVTs;
- ComputeValueVTs(*this, DL, Ty, ValueVTs);
- return all_equal(ValueVTs);
- }
- bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
- EVT) const {
- return false;
- }
- static Value *UseTlsOffset(IRBuilderBase &IRB, unsigned Offset) {
- Module *M = IRB.GetInsertBlock()->getParent()->getParent();
- Function *ThreadPointerFunc =
- Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
- return IRB.CreatePointerCast(
- IRB.CreateConstGEP1_32(IRB.getInt8Ty(), IRB.CreateCall(ThreadPointerFunc),
- Offset),
- IRB.getInt8PtrTy()->getPointerTo(0));
- }
- Value *AArch64TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
- // Android provides a fixed TLS slot for the stack cookie. See the definition
- // of TLS_SLOT_STACK_GUARD in
- // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
- if (Subtarget->isTargetAndroid())
- return UseTlsOffset(IRB, 0x28);
- // Fuchsia is similar.
- // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
- if (Subtarget->isTargetFuchsia())
- return UseTlsOffset(IRB, -0x10);
- return TargetLowering::getIRStackGuard(IRB);
- }
- void AArch64TargetLowering::insertSSPDeclarations(Module &M) const {
- // MSVC CRT provides functionalities for stack protection.
- if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) {
- // MSVC CRT has a global variable holding security cookie.
- M.getOrInsertGlobal("__security_cookie",
- Type::getInt8PtrTy(M.getContext()));
- // MSVC CRT has a function to validate security cookie.
- FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
- Subtarget->getSecurityCheckCookieName(),
- Type::getVoidTy(M.getContext()), Type::getInt8PtrTy(M.getContext()));
- if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
- F->setCallingConv(CallingConv::Win64);
- F->addParamAttr(0, Attribute::AttrKind::InReg);
- }
- return;
- }
- TargetLowering::insertSSPDeclarations(M);
- }
- Value *AArch64TargetLowering::getSDagStackGuard(const Module &M) const {
- // MSVC CRT has a global variable holding security cookie.
- if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
- return M.getGlobalVariable("__security_cookie");
- return TargetLowering::getSDagStackGuard(M);
- }
- Function *AArch64TargetLowering::getSSPStackGuardCheck(const Module &M) const {
- // MSVC CRT has a function to validate security cookie.
- if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
- return M.getFunction(Subtarget->getSecurityCheckCookieName());
- return TargetLowering::getSSPStackGuardCheck(M);
- }
- Value *
- AArch64TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
- // Android provides a fixed TLS slot for the SafeStack pointer. See the
- // definition of TLS_SLOT_SAFESTACK in
- // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
- if (Subtarget->isTargetAndroid())
- return UseTlsOffset(IRB, 0x48);
- // Fuchsia is similar.
- // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
- if (Subtarget->isTargetFuchsia())
- return UseTlsOffset(IRB, -0x8);
- return TargetLowering::getSafeStackPointerLocation(IRB);
- }
- bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial(
- const Instruction &AndI) const {
- // Only sink 'and' mask to cmp use block if it is masking a single bit, since
- // this is likely to be fold the and/cmp/br into a single tbz instruction. It
- // may be beneficial to sink in other cases, but we would have to check that
- // the cmp would not get folded into the br to form a cbz for these to be
- // beneficial.
- ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1));
- if (!Mask)
- return false;
- return Mask->getValue().isPowerOf2();
- }
- bool AArch64TargetLowering::
- shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
- SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
- unsigned OldShiftOpcode, unsigned NewShiftOpcode,
- SelectionDAG &DAG) const {
- // Does baseline recommend not to perform the fold by default?
- if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
- X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
- return false;
- // Else, if this is a vector shift, prefer 'shl'.
- return X.getValueType().isScalarInteger() || NewShiftOpcode == ISD::SHL;
- }
- TargetLowering::ShiftLegalizationStrategy
- AArch64TargetLowering::preferredShiftLegalizationStrategy(
- SelectionDAG &DAG, SDNode *N, unsigned int ExpansionFactor) const {
- if (DAG.getMachineFunction().getFunction().hasMinSize() &&
- !Subtarget->isTargetWindows() && !Subtarget->isTargetDarwin())
- return ShiftLegalizationStrategy::LowerToLibcall;
- return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,
- ExpansionFactor);
- }
- void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
- // Update IsSplitCSR in AArch64unctionInfo.
- AArch64FunctionInfo *AFI = Entry->getParent()->getInfo<AArch64FunctionInfo>();
- AFI->setIsSplitCSR(true);
- }
- void AArch64TargetLowering::insertCopiesSplitCSR(
- MachineBasicBlock *Entry,
- const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
- const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
- const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
- if (!IStart)
- return;
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
- MachineBasicBlock::iterator MBBI = Entry->begin();
- for (const MCPhysReg *I = IStart; *I; ++I) {
- const TargetRegisterClass *RC = nullptr;
- if (AArch64::GPR64RegClass.contains(*I))
- RC = &AArch64::GPR64RegClass;
- else if (AArch64::FPR64RegClass.contains(*I))
- RC = &AArch64::FPR64RegClass;
- else
- llvm_unreachable("Unexpected register class in CSRsViaCopy!");
- Register NewVR = MRI->createVirtualRegister(RC);
- // Create copy from CSR to a virtual register.
- // FIXME: this currently does not emit CFI pseudo-instructions, it works
- // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
- // nounwind. If we want to generalize this later, we may need to emit
- // CFI pseudo-instructions.
- assert(Entry->getParent()->getFunction().hasFnAttribute(
- Attribute::NoUnwind) &&
- "Function should be nounwind in insertCopiesSplitCSR!");
- Entry->addLiveIn(*I);
- BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
- .addReg(*I);
- // Insert the copy-back instructions right before the terminator.
- for (auto *Exit : Exits)
- BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
- TII->get(TargetOpcode::COPY), *I)
- .addReg(NewVR);
- }
- }
- bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
- // Integer division on AArch64 is expensive. However, when aggressively
- // optimizing for code size, we prefer to use a div instruction, as it is
- // usually smaller than the alternative sequence.
- // The exception to this is vector division. Since AArch64 doesn't have vector
- // integer division, leaving the division as-is is a loss even in terms of
- // size, because it will have to be scalarized, while the alternative code
- // sequence can be performed in vector form.
- bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
- return OptSize && !VT.isVector();
- }
- bool AArch64TargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
- // We want inc-of-add for scalars and sub-of-not for vectors.
- return VT.isScalarInteger();
- }
- bool AArch64TargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
- EVT VT) const {
- // v8f16 without fp16 need to be extended to v8f32, which is more difficult to
- // legalize.
- if (FPVT == MVT::v8f16 && !Subtarget->hasFullFP16())
- return false;
- return TargetLowering::shouldConvertFpToSat(Op, FPVT, VT);
- }
- bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const {
- return Subtarget->hasAggressiveFMA() && VT.isFloatingPoint();
- }
- unsigned
- AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const {
- if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
- return getPointerTy(DL).getSizeInBits();
- return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32;
- }
- void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {
- MachineFrameInfo &MFI = MF.getFrameInfo();
- // If we have any vulnerable SVE stack objects then the stack protector
- // needs to be placed at the top of the SVE stack area, as the SVE locals
- // are placed above the other locals, so we allocate it as if it were a
- // scalable vector.
- // FIXME: It may be worthwhile having a specific interface for this rather
- // than doing it here in finalizeLowering.
- if (MFI.hasStackProtectorIndex()) {
- for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
- if (MFI.getStackID(i) == TargetStackID::ScalableVector &&
- MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) {
- MFI.setStackID(MFI.getStackProtectorIndex(),
- TargetStackID::ScalableVector);
- MFI.setObjectAlignment(MFI.getStackProtectorIndex(), Align(16));
- break;
- }
- }
- }
- MFI.computeMaxCallFrameSize(MF);
- TargetLoweringBase::finalizeLowering(MF);
- }
- // Unlike X86, we let frame lowering assign offsets to all catch objects.
- bool AArch64TargetLowering::needsFixedCatchObjects() const {
- return false;
- }
- bool AArch64TargetLowering::shouldLocalize(
- const MachineInstr &MI, const TargetTransformInfo *TTI) const {
- auto &MF = *MI.getMF();
- auto &MRI = MF.getRegInfo();
- auto maxUses = [](unsigned RematCost) {
- // A cost of 1 means remats are basically free.
- if (RematCost == 1)
- return std::numeric_limits<unsigned>::max();
- if (RematCost == 2)
- return 2U;
- // Remat is too expensive, only sink if there's one user.
- if (RematCost > 2)
- return 1U;
- llvm_unreachable("Unexpected remat cost");
- };
- switch (MI.getOpcode()) {
- case TargetOpcode::G_GLOBAL_VALUE: {
- // On Darwin, TLS global vars get selected into function calls, which
- // we don't want localized, as they can get moved into the middle of a
- // another call sequence.
- const GlobalValue &GV = *MI.getOperand(1).getGlobal();
- if (GV.isThreadLocal() && Subtarget->isTargetMachO())
- return false;
- break;
- }
- case TargetOpcode::G_CONSTANT: {
- auto *CI = MI.getOperand(1).getCImm();
- APInt Imm = CI->getValue();
- InstructionCost Cost = TTI->getIntImmCost(
- Imm, CI->getType(), TargetTransformInfo::TCK_CodeSize);
- assert(Cost.isValid() && "Expected a valid imm cost");
- unsigned RematCost = *Cost.getValue();
- Register Reg = MI.getOperand(0).getReg();
- unsigned MaxUses = maxUses(RematCost);
- // Don't pass UINT_MAX sentinal value to hasAtMostUserInstrs().
- if (MaxUses == std::numeric_limits<unsigned>::max())
- --MaxUses;
- return MRI.hasAtMostUserInstrs(Reg, MaxUses);
- }
- // If we legalized G_GLOBAL_VALUE into ADRP + G_ADD_LOW, mark both as being
- // localizable.
- case AArch64::ADRP:
- case AArch64::G_ADD_LOW:
- return true;
- default:
- break;
- }
- return TargetLoweringBase::shouldLocalize(MI, TTI);
- }
- bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
- if (isa<ScalableVectorType>(Inst.getType()))
- return true;
- for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
- if (isa<ScalableVectorType>(Inst.getOperand(i)->getType()))
- return true;
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
- if (isa<ScalableVectorType>(AI->getAllocatedType()))
- return true;
- }
- // Checks to allow the use of SME instructions
- if (auto *Base = dyn_cast<CallBase>(&Inst)) {
- auto CallerAttrs = SMEAttrs(*Inst.getFunction());
- auto CalleeAttrs = SMEAttrs(*Base);
- if (CallerAttrs.requiresSMChange(CalleeAttrs,
- /*BodyOverridesInterface=*/false) ||
- CallerAttrs.requiresLazySave(CalleeAttrs))
- return true;
- }
- return false;
- }
- // Return the largest legal scalable vector type that matches VT's element type.
- static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT) {
- assert(VT.isFixedLengthVector() &&
- DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
- "Expected legal fixed length vector!");
- switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
- default:
- llvm_unreachable("unexpected element type for SVE container");
- case MVT::i8:
- return EVT(MVT::nxv16i8);
- case MVT::i16:
- return EVT(MVT::nxv8i16);
- case MVT::i32:
- return EVT(MVT::nxv4i32);
- case MVT::i64:
- return EVT(MVT::nxv2i64);
- case MVT::f16:
- return EVT(MVT::nxv8f16);
- case MVT::f32:
- return EVT(MVT::nxv4f32);
- case MVT::f64:
- return EVT(MVT::nxv2f64);
- }
- }
- // Return a PTRUE with active lanes corresponding to the extent of VT.
- static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
- EVT VT) {
- assert(VT.isFixedLengthVector() &&
- DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
- "Expected legal fixed length vector!");
- std::optional<unsigned> PgPattern =
- getSVEPredPatternFromNumElements(VT.getVectorNumElements());
- assert(PgPattern && "Unexpected element count for SVE predicate");
- // For vectors that are exactly getMaxSVEVectorSizeInBits big, we can use
- // AArch64SVEPredPattern::all, which can enable the use of unpredicated
- // variants of instructions when available.
- const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
- unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
- unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
- if (MaxSVESize && MinSVESize == MaxSVESize &&
- MaxSVESize == VT.getSizeInBits())
- PgPattern = AArch64SVEPredPattern::all;
- MVT MaskVT;
- switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
- default:
- llvm_unreachable("unexpected element type for SVE predicate");
- case MVT::i8:
- MaskVT = MVT::nxv16i1;
- break;
- case MVT::i16:
- case MVT::f16:
- MaskVT = MVT::nxv8i1;
- break;
- case MVT::i32:
- case MVT::f32:
- MaskVT = MVT::nxv4i1;
- break;
- case MVT::i64:
- case MVT::f64:
- MaskVT = MVT::nxv2i1;
- break;
- }
- return getPTrue(DAG, DL, MaskVT, *PgPattern);
- }
- static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
- EVT VT) {
- assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
- "Expected legal scalable vector!");
- auto PredTy = VT.changeVectorElementType(MVT::i1);
- return getPTrue(DAG, DL, PredTy, AArch64SVEPredPattern::all);
- }
- static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT) {
- if (VT.isFixedLengthVector())
- return getPredicateForFixedLengthVector(DAG, DL, VT);
- return getPredicateForScalableVector(DAG, DL, VT);
- }
- // Grow V to consume an entire SVE register.
- static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {
- assert(VT.isScalableVector() &&
- "Expected to convert into a scalable vector!");
- assert(V.getValueType().isFixedLengthVector() &&
- "Expected a fixed length vector operand!");
- SDLoc DL(V);
- SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
- return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
- }
- // Shrink V so it's just big enough to maintain a VT's worth of data.
- static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {
- assert(VT.isFixedLengthVector() &&
- "Expected to convert into a fixed length vector!");
- assert(V.getValueType().isScalableVector() &&
- "Expected a scalable vector operand!");
- SDLoc DL(V);
- SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
- }
- // Convert all fixed length vector loads larger than NEON to masked_loads.
- SDValue AArch64TargetLowering::LowerFixedLengthVectorLoadToSVE(
- SDValue Op, SelectionDAG &DAG) const {
- auto Load = cast<LoadSDNode>(Op);
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- EVT LoadVT = ContainerVT;
- EVT MemVT = Load->getMemoryVT();
- auto Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
- if (VT.isFloatingPoint()) {
- LoadVT = ContainerVT.changeTypeToInteger();
- MemVT = MemVT.changeTypeToInteger();
- }
- SDValue NewLoad = DAG.getMaskedLoad(
- LoadVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(), Pg,
- DAG.getUNDEF(LoadVT), MemVT, Load->getMemOperand(),
- Load->getAddressingMode(), Load->getExtensionType());
- SDValue Result = NewLoad;
- if (VT.isFloatingPoint() && Load->getExtensionType() == ISD::EXTLOAD) {
- EVT ExtendVT = ContainerVT.changeVectorElementType(
- Load->getMemoryVT().getVectorElementType());
- Result = getSVESafeBitCast(ExtendVT, Result, DAG);
- Result = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT,
- Pg, Result, DAG.getUNDEF(ContainerVT));
- } else if (VT.isFloatingPoint()) {
- Result = DAG.getNode(ISD::BITCAST, DL, ContainerVT, Result);
- }
- Result = convertFromScalableVector(DAG, VT, Result);
- SDValue MergedValues[2] = {Result, NewLoad.getValue(1)};
- return DAG.getMergeValues(MergedValues, DL);
- }
- static SDValue convertFixedMaskToScalableVector(SDValue Mask,
- SelectionDAG &DAG) {
- SDLoc DL(Mask);
- EVT InVT = Mask.getValueType();
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
- auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT);
- if (ISD::isBuildVectorAllOnes(Mask.getNode()))
- return Pg;
- auto Op1 = convertToScalableVector(DAG, ContainerVT, Mask);
- auto Op2 = DAG.getConstant(0, DL, ContainerVT);
- return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, Pg.getValueType(),
- {Pg, Op1, Op2, DAG.getCondCode(ISD::SETNE)});
- }
- // Convert all fixed length vector loads larger than NEON to masked_loads.
- SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE(
- SDValue Op, SelectionDAG &DAG) const {
- auto Load = cast<MaskedLoadSDNode>(Op);
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- SDValue Mask = convertFixedMaskToScalableVector(Load->getMask(), DAG);
- SDValue PassThru;
- bool IsPassThruZeroOrUndef = false;
- if (Load->getPassThru()->isUndef()) {
- PassThru = DAG.getUNDEF(ContainerVT);
- IsPassThruZeroOrUndef = true;
- } else {
- if (ContainerVT.isInteger())
- PassThru = DAG.getConstant(0, DL, ContainerVT);
- else
- PassThru = DAG.getConstantFP(0, DL, ContainerVT);
- if (isZerosVector(Load->getPassThru().getNode()))
- IsPassThruZeroOrUndef = true;
- }
- SDValue NewLoad = DAG.getMaskedLoad(
- ContainerVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(),
- Mask, PassThru, Load->getMemoryVT(), Load->getMemOperand(),
- Load->getAddressingMode(), Load->getExtensionType());
- SDValue Result = NewLoad;
- if (!IsPassThruZeroOrUndef) {
- SDValue OldPassThru =
- convertToScalableVector(DAG, ContainerVT, Load->getPassThru());
- Result = DAG.getSelect(DL, ContainerVT, Mask, Result, OldPassThru);
- }
- Result = convertFromScalableVector(DAG, VT, Result);
- SDValue MergedValues[2] = {Result, NewLoad.getValue(1)};
- return DAG.getMergeValues(MergedValues, DL);
- }
- // Convert all fixed length vector stores larger than NEON to masked_stores.
- SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE(
- SDValue Op, SelectionDAG &DAG) const {
- auto Store = cast<StoreSDNode>(Op);
- SDLoc DL(Op);
- EVT VT = Store->getValue().getValueType();
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- EVT MemVT = Store->getMemoryVT();
- auto Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
- auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue());
- if (VT.isFloatingPoint() && Store->isTruncatingStore()) {
- EVT TruncVT = ContainerVT.changeVectorElementType(
- Store->getMemoryVT().getVectorElementType());
- MemVT = MemVT.changeTypeToInteger();
- NewValue = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, TruncVT, Pg,
- NewValue, DAG.getTargetConstant(0, DL, MVT::i64),
- DAG.getUNDEF(TruncVT));
- NewValue =
- getSVESafeBitCast(ContainerVT.changeTypeToInteger(), NewValue, DAG);
- } else if (VT.isFloatingPoint()) {
- MemVT = MemVT.changeTypeToInteger();
- NewValue =
- getSVESafeBitCast(ContainerVT.changeTypeToInteger(), NewValue, DAG);
- }
- return DAG.getMaskedStore(Store->getChain(), DL, NewValue,
- Store->getBasePtr(), Store->getOffset(), Pg, MemVT,
- Store->getMemOperand(), Store->getAddressingMode(),
- Store->isTruncatingStore());
- }
- SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE(
- SDValue Op, SelectionDAG &DAG) const {
- auto *Store = cast<MaskedStoreSDNode>(Op);
- SDLoc DL(Op);
- EVT VT = Store->getValue().getValueType();
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue());
- SDValue Mask = convertFixedMaskToScalableVector(Store->getMask(), DAG);
- return DAG.getMaskedStore(
- Store->getChain(), DL, NewValue, Store->getBasePtr(), Store->getOffset(),
- Mask, Store->getMemoryVT(), Store->getMemOperand(),
- Store->getAddressingMode(), Store->isTruncatingStore());
- }
- SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE(
- SDValue Op, SelectionDAG &DAG) const {
- SDLoc dl(Op);
- EVT VT = Op.getValueType();
- EVT EltVT = VT.getVectorElementType();
- bool Signed = Op.getOpcode() == ISD::SDIV;
- unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED;
- bool Negated;
- uint64_t SplatVal;
- if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) {
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
- SDValue Op2 = DAG.getTargetConstant(Log2_64(SplatVal), dl, MVT::i32);
- SDValue Pg = getPredicateForFixedLengthVector(DAG, dl, VT);
- SDValue Res =
- DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, ContainerVT, Pg, Op1, Op2);
- if (Negated)
- Res = DAG.getNode(ISD::SUB, dl, ContainerVT,
- DAG.getConstant(0, dl, ContainerVT), Res);
- return convertFromScalableVector(DAG, VT, Res);
- }
- // Scalable vector i32/i64 DIV is supported.
- if (EltVT == MVT::i32 || EltVT == MVT::i64)
- return LowerToPredicatedOp(Op, DAG, PredOpcode);
- // Scalable vector i8/i16 DIV is not supported. Promote it to i32.
- EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
- EVT PromVT = HalfVT.widenIntegerVectorElementType(*DAG.getContext());
- unsigned ExtendOpcode = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- // If the wider type is legal: extend, op, and truncate.
- EVT WideVT = VT.widenIntegerVectorElementType(*DAG.getContext());
- if (DAG.getTargetLoweringInfo().isTypeLegal(WideVT)) {
- SDValue Op0 = DAG.getNode(ExtendOpcode, dl, WideVT, Op.getOperand(0));
- SDValue Op1 = DAG.getNode(ExtendOpcode, dl, WideVT, Op.getOperand(1));
- SDValue Div = DAG.getNode(Op.getOpcode(), dl, WideVT, Op0, Op1);
- return DAG.getNode(ISD::TRUNCATE, dl, VT, Div);
- }
- auto HalveAndExtendVector = [&DAG, &dl, &HalfVT, &PromVT,
- &ExtendOpcode](SDValue Op) {
- SDValue IdxZero = DAG.getConstant(0, dl, MVT::i64);
- SDValue IdxHalf =
- DAG.getConstant(HalfVT.getVectorNumElements(), dl, MVT::i64);
- SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HalfVT, Op, IdxZero);
- SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HalfVT, Op, IdxHalf);
- return std::pair<SDValue, SDValue>(
- {DAG.getNode(ExtendOpcode, dl, PromVT, Lo),
- DAG.getNode(ExtendOpcode, dl, PromVT, Hi)});
- };
- // If wider type is not legal: split, extend, op, trunc and concat.
- auto [Op0LoExt, Op0HiExt] = HalveAndExtendVector(Op.getOperand(0));
- auto [Op1LoExt, Op1HiExt] = HalveAndExtendVector(Op.getOperand(1));
- SDValue Lo = DAG.getNode(Op.getOpcode(), dl, PromVT, Op0LoExt, Op1LoExt);
- SDValue Hi = DAG.getNode(Op.getOpcode(), dl, PromVT, Op0HiExt, Op1HiExt);
- SDValue LoTrunc = DAG.getNode(ISD::TRUNCATE, dl, HalfVT, Lo);
- SDValue HiTrunc = DAG.getNode(ISD::TRUNCATE, dl, HalfVT, Hi);
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, {LoTrunc, HiTrunc});
- }
- SDValue AArch64TargetLowering::LowerFixedLengthVectorIntExtendToSVE(
- SDValue Op, SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
- SDLoc DL(Op);
- SDValue Val = Op.getOperand(0);
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType());
- Val = convertToScalableVector(DAG, ContainerVT, Val);
- bool Signed = Op.getOpcode() == ISD::SIGN_EXTEND;
- unsigned ExtendOpc = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
- // Repeatedly unpack Val until the result is of the desired element type.
- switch (ContainerVT.getSimpleVT().SimpleTy) {
- default:
- llvm_unreachable("unimplemented container type");
- case MVT::nxv16i8:
- Val = DAG.getNode(ExtendOpc, DL, MVT::nxv8i16, Val);
- if (VT.getVectorElementType() == MVT::i16)
- break;
- [[fallthrough]];
- case MVT::nxv8i16:
- Val = DAG.getNode(ExtendOpc, DL, MVT::nxv4i32, Val);
- if (VT.getVectorElementType() == MVT::i32)
- break;
- [[fallthrough]];
- case MVT::nxv4i32:
- Val = DAG.getNode(ExtendOpc, DL, MVT::nxv2i64, Val);
- assert(VT.getVectorElementType() == MVT::i64 && "Unexpected element type!");
- break;
- }
- return convertFromScalableVector(DAG, VT, Val);
- }
- SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE(
- SDValue Op, SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
- SDLoc DL(Op);
- SDValue Val = Op.getOperand(0);
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType());
- Val = convertToScalableVector(DAG, ContainerVT, Val);
- // Repeatedly truncate Val until the result is of the desired element type.
- switch (ContainerVT.getSimpleVT().SimpleTy) {
- default:
- llvm_unreachable("unimplemented container type");
- case MVT::nxv2i64:
- Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv4i32, Val);
- Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv4i32, Val, Val);
- if (VT.getVectorElementType() == MVT::i32)
- break;
- [[fallthrough]];
- case MVT::nxv4i32:
- Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv8i16, Val);
- Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv8i16, Val, Val);
- if (VT.getVectorElementType() == MVT::i16)
- break;
- [[fallthrough]];
- case MVT::nxv8i16:
- Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv16i8, Val);
- Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv16i8, Val, Val);
- assert(VT.getVectorElementType() == MVT::i8 && "Unexpected element type!");
- break;
- }
- return convertFromScalableVector(DAG, VT, Val);
- }
- SDValue AArch64TargetLowering::LowerFixedLengthExtractVectorElt(
- SDValue Op, SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- EVT InVT = Op.getOperand(0).getValueType();
- assert(InVT.isFixedLengthVector() && "Expected fixed length vector type!");
- SDLoc DL(Op);
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
- SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0));
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op0, Op.getOperand(1));
- }
- SDValue AArch64TargetLowering::LowerFixedLengthInsertVectorElt(
- SDValue Op, SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
- SDLoc DL(Op);
- EVT InVT = Op.getOperand(0).getValueType();
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
- SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0));
- auto ScalableRes = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, Op0,
- Op.getOperand(1), Op.getOperand(2));
- return convertFromScalableVector(DAG, VT, ScalableRes);
- }
- // Convert vector operation 'Op' to an equivalent predicated operation whereby
- // the original operation's type is used to construct a suitable predicate.
- // NOTE: The results for inactive lanes are undefined.
- SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
- SelectionDAG &DAG,
- unsigned NewOp) const {
- EVT VT = Op.getValueType();
- SDLoc DL(Op);
- auto Pg = getPredicateForVector(DAG, DL, VT);
- if (VT.isFixedLengthVector()) {
- assert(isTypeLegal(VT) && "Expected only legal fixed-width types");
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- // Create list of operands by converting existing ones to scalable types.
- SmallVector<SDValue, 4> Operands = {Pg};
- for (const SDValue &V : Op->op_values()) {
- if (isa<CondCodeSDNode>(V)) {
- Operands.push_back(V);
- continue;
- }
- if (const VTSDNode *VTNode = dyn_cast<VTSDNode>(V)) {
- EVT VTArg = VTNode->getVT().getVectorElementType();
- EVT NewVTArg = ContainerVT.changeVectorElementType(VTArg);
- Operands.push_back(DAG.getValueType(NewVTArg));
- continue;
- }
- assert(isTypeLegal(V.getValueType()) &&
- "Expected only legal fixed-width types");
- Operands.push_back(convertToScalableVector(DAG, ContainerVT, V));
- }
- if (isMergePassthruOpcode(NewOp))
- Operands.push_back(DAG.getUNDEF(ContainerVT));
- auto ScalableRes = DAG.getNode(NewOp, DL, ContainerVT, Operands);
- return convertFromScalableVector(DAG, VT, ScalableRes);
- }
- assert(VT.isScalableVector() && "Only expect to lower scalable vector op!");
- SmallVector<SDValue, 4> Operands = {Pg};
- for (const SDValue &V : Op->op_values()) {
- assert((!V.getValueType().isVector() ||
- V.getValueType().isScalableVector()) &&
- "Only scalable vectors are supported!");
- Operands.push_back(V);
- }
- if (isMergePassthruOpcode(NewOp))
- Operands.push_back(DAG.getUNDEF(VT));
- return DAG.getNode(NewOp, DL, VT, Operands, Op->getFlags());
- }
- // If a fixed length vector operation has no side effects when applied to
- // undefined elements, we can safely use scalable vectors to perform the same
- // operation without needing to worry about predication.
- SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- assert(VT.isFixedLengthVector() && isTypeLegal(VT) &&
- "Only expected to lower fixed length vector operation!");
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- // Create list of operands by converting existing ones to scalable types.
- SmallVector<SDValue, 4> Ops;
- for (const SDValue &V : Op->op_values()) {
- assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
- // Pass through non-vector operands.
- if (!V.getValueType().isVector()) {
- Ops.push_back(V);
- continue;
- }
- // "cast" fixed length vector to a scalable vector.
- assert(V.getValueType().isFixedLengthVector() &&
- isTypeLegal(V.getValueType()) &&
- "Only fixed length vectors are supported!");
- Ops.push_back(convertToScalableVector(DAG, ContainerVT, V));
- }
- auto ScalableRes = DAG.getNode(Op.getOpcode(), SDLoc(Op), ContainerVT, Ops);
- return convertFromScalableVector(DAG, VT, ScalableRes);
- }
- SDValue AArch64TargetLowering::LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp,
- SelectionDAG &DAG) const {
- SDLoc DL(ScalarOp);
- SDValue AccOp = ScalarOp.getOperand(0);
- SDValue VecOp = ScalarOp.getOperand(1);
- EVT SrcVT = VecOp.getValueType();
- EVT ResVT = SrcVT.getVectorElementType();
- EVT ContainerVT = SrcVT;
- if (SrcVT.isFixedLengthVector()) {
- ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
- VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
- }
- SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
- SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
- // Convert operands to Scalable.
- AccOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT,
- DAG.getUNDEF(ContainerVT), AccOp, Zero);
- // Perform reduction.
- SDValue Rdx = DAG.getNode(AArch64ISD::FADDA_PRED, DL, ContainerVT,
- Pg, AccOp, VecOp);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Rdx, Zero);
- }
- SDValue AArch64TargetLowering::LowerPredReductionToSVE(SDValue ReduceOp,
- SelectionDAG &DAG) const {
- SDLoc DL(ReduceOp);
- SDValue Op = ReduceOp.getOperand(0);
- EVT OpVT = Op.getValueType();
- EVT VT = ReduceOp.getValueType();
- if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1)
- return SDValue();
- SDValue Pg = getPredicateForVector(DAG, DL, OpVT);
- switch (ReduceOp.getOpcode()) {
- default:
- return SDValue();
- case ISD::VECREDUCE_OR:
- if (isAllActivePredicate(DAG, Pg) && OpVT == MVT::nxv16i1)
- // The predicate can be 'Op' because
- // vecreduce_or(Op & <all true>) <=> vecreduce_or(Op).
- return getPTest(DAG, VT, Op, Op, AArch64CC::ANY_ACTIVE);
- else
- return getPTest(DAG, VT, Pg, Op, AArch64CC::ANY_ACTIVE);
- case ISD::VECREDUCE_AND: {
- Op = DAG.getNode(ISD::XOR, DL, OpVT, Op, Pg);
- return getPTest(DAG, VT, Pg, Op, AArch64CC::NONE_ACTIVE);
- }
- case ISD::VECREDUCE_XOR: {
- SDValue ID =
- DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64);
- if (OpVT == MVT::nxv1i1) {
- // Emulate a CNTP on .Q using .D and a different governing predicate.
- Pg = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv2i1, Pg);
- Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv2i1, Op);
- }
- SDValue Cntp =
- DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64, ID, Pg, Op);
- return DAG.getAnyExtOrTrunc(Cntp, DL, VT);
- }
- }
- return SDValue();
- }
- SDValue AArch64TargetLowering::LowerReductionToSVE(unsigned Opcode,
- SDValue ScalarOp,
- SelectionDAG &DAG) const {
- SDLoc DL(ScalarOp);
- SDValue VecOp = ScalarOp.getOperand(0);
- EVT SrcVT = VecOp.getValueType();
- if (useSVEForFixedLengthVectorVT(
- SrcVT,
- /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) {
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
- VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
- }
- // UADDV always returns an i64 result.
- EVT ResVT = (Opcode == AArch64ISD::UADDV_PRED) ? MVT::i64 :
- SrcVT.getVectorElementType();
- EVT RdxVT = SrcVT;
- if (SrcVT.isFixedLengthVector() || Opcode == AArch64ISD::UADDV_PRED)
- RdxVT = getPackedSVEVectorVT(ResVT);
- SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
- SDValue Rdx = DAG.getNode(Opcode, DL, RdxVT, Pg, VecOp);
- SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT,
- Rdx, DAG.getConstant(0, DL, MVT::i64));
- // The VEC_REDUCE nodes expect an element size result.
- if (ResVT != ScalarOp.getValueType())
- Res = DAG.getAnyExtOrTrunc(Res, DL, ScalarOp.getValueType());
- return Res;
- }
- SDValue
- AArch64TargetLowering::LowerFixedLengthVectorSelectToSVE(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- SDLoc DL(Op);
- EVT InVT = Op.getOperand(1).getValueType();
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
- SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(1));
- SDValue Op2 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(2));
- // Convert the mask to a predicated (NOTE: We don't need to worry about
- // inactive lanes since VSELECT is safe when given undefined elements).
- EVT MaskVT = Op.getOperand(0).getValueType();
- EVT MaskContainerVT = getContainerForFixedLengthVector(DAG, MaskVT);
- auto Mask = convertToScalableVector(DAG, MaskContainerVT, Op.getOperand(0));
- Mask = DAG.getNode(ISD::TRUNCATE, DL,
- MaskContainerVT.changeVectorElementType(MVT::i1), Mask);
- auto ScalableRes = DAG.getNode(ISD::VSELECT, DL, ContainerVT,
- Mask, Op1, Op2);
- return convertFromScalableVector(DAG, VT, ScalableRes);
- }
- SDValue AArch64TargetLowering::LowerFixedLengthVectorSetccToSVE(
- SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT InVT = Op.getOperand(0).getValueType();
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
- assert(InVT.isFixedLengthVector() && isTypeLegal(InVT) &&
- "Only expected to lower fixed length vector operation!");
- assert(Op.getValueType() == InVT.changeTypeToInteger() &&
- "Expected integer result of the same bit length as the inputs!");
- auto Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
- auto Op2 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1));
- auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT);
- EVT CmpVT = Pg.getValueType();
- auto Cmp = DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, CmpVT,
- {Pg, Op1, Op2, Op.getOperand(2)});
- EVT PromoteVT = ContainerVT.changeTypeToInteger();
- auto Promote = DAG.getBoolExtOrTrunc(Cmp, DL, PromoteVT, InVT);
- return convertFromScalableVector(DAG, Op.getValueType(), Promote);
- }
- SDValue
- AArch64TargetLowering::LowerFixedLengthBitcastToSVE(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc DL(Op);
- auto SrcOp = Op.getOperand(0);
- EVT VT = Op.getValueType();
- EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
- EVT ContainerSrcVT =
- getContainerForFixedLengthVector(DAG, SrcOp.getValueType());
- SrcOp = convertToScalableVector(DAG, ContainerSrcVT, SrcOp);
- Op = DAG.getNode(ISD::BITCAST, DL, ContainerDstVT, SrcOp);
- return convertFromScalableVector(DAG, VT, Op);
- }
- SDValue AArch64TargetLowering::LowerFixedLengthConcatVectorsToSVE(
- SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- unsigned NumOperands = Op->getNumOperands();
- assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
- "Unexpected number of operands in CONCAT_VECTORS");
- auto SrcOp1 = Op.getOperand(0);
- auto SrcOp2 = Op.getOperand(1);
- EVT VT = Op.getValueType();
- EVT SrcVT = SrcOp1.getValueType();
- if (NumOperands > 2) {
- SmallVector<SDValue, 4> Ops;
- EVT PairVT = SrcVT.getDoubleNumVectorElementsVT(*DAG.getContext());
- for (unsigned I = 0; I < NumOperands; I += 2)
- Ops.push_back(DAG.getNode(ISD::CONCAT_VECTORS, DL, PairVT,
- Op->getOperand(I), Op->getOperand(I + 1)));
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops);
- }
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, SrcVT);
- SrcOp1 = convertToScalableVector(DAG, ContainerVT, SrcOp1);
- SrcOp2 = convertToScalableVector(DAG, ContainerVT, SrcOp2);
- Op = DAG.getNode(AArch64ISD::SPLICE, DL, ContainerVT, Pg, SrcOp1, SrcOp2);
- return convertFromScalableVector(DAG, VT, Op);
- }
- SDValue
- AArch64TargetLowering::LowerFixedLengthFPExtendToSVE(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
- SDLoc DL(Op);
- SDValue Val = Op.getOperand(0);
- SDValue Pg = getPredicateForVector(DAG, DL, VT);
- EVT SrcVT = Val.getValueType();
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- EVT ExtendVT = ContainerVT.changeVectorElementType(
- SrcVT.getVectorElementType());
- Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val);
- Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT.changeTypeToInteger(), Val);
- Val = convertToScalableVector(DAG, ContainerVT.changeTypeToInteger(), Val);
- Val = getSVESafeBitCast(ExtendVT, Val, DAG);
- Val = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT,
- Pg, Val, DAG.getUNDEF(ContainerVT));
- return convertFromScalableVector(DAG, VT, Val);
- }
- SDValue
- AArch64TargetLowering::LowerFixedLengthFPRoundToSVE(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
- SDLoc DL(Op);
- SDValue Val = Op.getOperand(0);
- EVT SrcVT = Val.getValueType();
- EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
- EVT RoundVT = ContainerSrcVT.changeVectorElementType(
- VT.getVectorElementType());
- SDValue Pg = getPredicateForVector(DAG, DL, RoundVT);
- Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
- Val = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, RoundVT, Pg, Val,
- Op.getOperand(1), DAG.getUNDEF(RoundVT));
- Val = getSVESafeBitCast(ContainerSrcVT.changeTypeToInteger(), Val, DAG);
- Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val);
- Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val);
- return DAG.getNode(ISD::BITCAST, DL, VT, Val);
- }
- SDValue
- AArch64TargetLowering::LowerFixedLengthIntToFPToSVE(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
- bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP;
- unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU
- : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU;
- SDLoc DL(Op);
- SDValue Val = Op.getOperand(0);
- EVT SrcVT = Val.getValueType();
- EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
- EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
- if (VT.bitsGE(SrcVT)) {
- SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
- Val = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
- VT.changeTypeToInteger(), Val);
- // Safe to use a larger than specified operand because by promoting the
- // value nothing has changed from an arithmetic point of view.
- Val =
- convertToScalableVector(DAG, ContainerDstVT.changeTypeToInteger(), Val);
- Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val,
- DAG.getUNDEF(ContainerDstVT));
- return convertFromScalableVector(DAG, VT, Val);
- } else {
- EVT CvtVT = ContainerSrcVT.changeVectorElementType(
- ContainerDstVT.getVectorElementType());
- SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, SrcVT);
- Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
- Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT));
- Val = getSVESafeBitCast(ContainerSrcVT, Val, DAG);
- Val = convertFromScalableVector(DAG, SrcVT, Val);
- Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val);
- return DAG.getNode(ISD::BITCAST, DL, VT, Val);
- }
- }
- SDValue
- AArch64TargetLowering::LowerFixedLengthFPToIntToSVE(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
- bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
- unsigned Opcode = IsSigned ? AArch64ISD::FCVTZS_MERGE_PASSTHRU
- : AArch64ISD::FCVTZU_MERGE_PASSTHRU;
- SDLoc DL(Op);
- SDValue Val = Op.getOperand(0);
- EVT SrcVT = Val.getValueType();
- EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
- EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
- if (VT.bitsGT(SrcVT)) {
- EVT CvtVT = ContainerDstVT.changeVectorElementType(
- ContainerSrcVT.getVectorElementType());
- SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
- Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val);
- Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Val);
- Val = convertToScalableVector(DAG, ContainerDstVT, Val);
- Val = getSVESafeBitCast(CvtVT, Val, DAG);
- Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val,
- DAG.getUNDEF(ContainerDstVT));
- return convertFromScalableVector(DAG, VT, Val);
- } else {
- EVT CvtVT = ContainerSrcVT.changeTypeToInteger();
- SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, SrcVT);
- // Safe to use a larger than specified result since an fp_to_int where the
- // result doesn't fit into the destination is undefined.
- Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
- Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT));
- Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val);
- return DAG.getNode(ISD::TRUNCATE, DL, VT, Val);
- }
- }
- SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
- SDValue Op, SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
- auto *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
- auto ShuffleMask = SVN->getMask();
- SDLoc DL(Op);
- SDValue Op1 = Op.getOperand(0);
- SDValue Op2 = Op.getOperand(1);
- EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- Op1 = convertToScalableVector(DAG, ContainerVT, Op1);
- Op2 = convertToScalableVector(DAG, ContainerVT, Op2);
- auto MinLegalExtractEltScalarTy = [](EVT ScalarTy) -> EVT {
- if (ScalarTy == MVT::i8 || ScalarTy == MVT::i16)
- return MVT::i32;
- return ScalarTy;
- };
- if (SVN->isSplat()) {
- unsigned Lane = std::max(0, SVN->getSplatIndex());
- EVT ScalarTy = MinLegalExtractEltScalarTy(VT.getVectorElementType());
- SDValue SplatEl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarTy, Op1,
- DAG.getConstant(Lane, DL, MVT::i64));
- Op = DAG.getNode(ISD::SPLAT_VECTOR, DL, ContainerVT, SplatEl);
- return convertFromScalableVector(DAG, VT, Op);
- }
- bool ReverseEXT = false;
- unsigned Imm;
- if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm) &&
- Imm == VT.getVectorNumElements() - 1) {
- if (ReverseEXT)
- std::swap(Op1, Op2);
- EVT ScalarTy = MinLegalExtractEltScalarTy(VT.getVectorElementType());
- SDValue Scalar = DAG.getNode(
- ISD::EXTRACT_VECTOR_ELT, DL, ScalarTy, Op1,
- DAG.getConstant(VT.getVectorNumElements() - 1, DL, MVT::i64));
- Op = DAG.getNode(AArch64ISD::INSR, DL, ContainerVT, Op2, Scalar);
- return convertFromScalableVector(DAG, VT, Op);
- }
- for (unsigned LaneSize : {64U, 32U, 16U}) {
- if (isREVMask(ShuffleMask, VT, LaneSize)) {
- EVT NewVT =
- getPackedSVEVectorVT(EVT::getIntegerVT(*DAG.getContext(), LaneSize));
- unsigned RevOp;
- unsigned EltSz = VT.getScalarSizeInBits();
- if (EltSz == 8)
- RevOp = AArch64ISD::BSWAP_MERGE_PASSTHRU;
- else if (EltSz == 16)
- RevOp = AArch64ISD::REVH_MERGE_PASSTHRU;
- else
- RevOp = AArch64ISD::REVW_MERGE_PASSTHRU;
- Op = DAG.getNode(ISD::BITCAST, DL, NewVT, Op1);
- Op = LowerToPredicatedOp(Op, DAG, RevOp);
- Op = DAG.getNode(ISD::BITCAST, DL, ContainerVT, Op);
- return convertFromScalableVector(DAG, VT, Op);
- }
- }
- if (Subtarget->hasSVE2p1() && VT.getScalarSizeInBits() == 64 &&
- isREVMask(ShuffleMask, VT, 128)) {
- if (!VT.isFloatingPoint())
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::REVD_MERGE_PASSTHRU);
- EVT NewVT = getPackedSVEVectorVT(EVT::getIntegerVT(*DAG.getContext(), 64));
- Op = DAG.getNode(ISD::BITCAST, DL, NewVT, Op1);
- Op = LowerToPredicatedOp(Op, DAG, AArch64ISD::REVD_MERGE_PASSTHRU);
- Op = DAG.getNode(ISD::BITCAST, DL, ContainerVT, Op);
- return convertFromScalableVector(DAG, VT, Op);
- }
- unsigned WhichResult;
- if (isZIPMask(ShuffleMask, VT, WhichResult) && WhichResult == 0)
- return convertFromScalableVector(
- DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op2));
- if (isTRNMask(ShuffleMask, VT, WhichResult)) {
- unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
- return convertFromScalableVector(
- DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op2));
- }
- if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult == 0)
- return convertFromScalableVector(
- DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op1));
- if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
- unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
- return convertFromScalableVector(
- DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op1));
- }
- // Functions like isZIPMask return true when a ISD::VECTOR_SHUFFLE's mask
- // represents the same logical operation as performed by a ZIP instruction. In
- // isolation these functions do not mean the ISD::VECTOR_SHUFFLE is exactly
- // equivalent to an AArch64 instruction. There's the extra component of
- // ISD::VECTOR_SHUFFLE's value type to consider. Prior to SVE these functions
- // only operated on 64/128bit vector types that have a direct mapping to a
- // target register and so an exact mapping is implied.
- // However, when using SVE for fixed length vectors, most legal vector types
- // are actually sub-vectors of a larger SVE register. When mapping
- // ISD::VECTOR_SHUFFLE to an SVE instruction care must be taken to consider
- // how the mask's indices translate. Specifically, when the mapping requires
- // an exact meaning for a specific vector index (e.g. Index X is the last
- // vector element in the register) then such mappings are often only safe when
- // the exact SVE register size is know. The main exception to this is when
- // indices are logically relative to the first element of either
- // ISD::VECTOR_SHUFFLE operand because these relative indices don't change
- // when converting from fixed-length to scalable vector types (i.e. the start
- // of a fixed length vector is always the start of a scalable vector).
- unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
- unsigned MaxSVESize = Subtarget->getMaxSVEVectorSizeInBits();
- if (MinSVESize == MaxSVESize && MaxSVESize == VT.getSizeInBits()) {
- if (ShuffleVectorInst::isReverseMask(ShuffleMask) && Op2.isUndef()) {
- Op = DAG.getNode(ISD::VECTOR_REVERSE, DL, ContainerVT, Op1);
- return convertFromScalableVector(DAG, VT, Op);
- }
- if (isZIPMask(ShuffleMask, VT, WhichResult) && WhichResult != 0)
- return convertFromScalableVector(
- DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op2));
- if (isUZPMask(ShuffleMask, VT, WhichResult)) {
- unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
- return convertFromScalableVector(
- DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op2));
- }
- if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult != 0)
- return convertFromScalableVector(
- DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op1));
- if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
- unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
- return convertFromScalableVector(
- DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op1));
- }
- }
- return SDValue();
- }
- SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT InVT = Op.getValueType();
- assert(VT.isScalableVector() && isTypeLegal(VT) &&
- InVT.isScalableVector() && isTypeLegal(InVT) &&
- "Only expect to cast between legal scalable vector types!");
- assert(VT.getVectorElementType() != MVT::i1 &&
- InVT.getVectorElementType() != MVT::i1 &&
- "For predicate bitcasts, use getSVEPredicateBitCast");
- if (InVT == VT)
- return Op;
- EVT PackedVT = getPackedSVEVectorVT(VT.getVectorElementType());
- EVT PackedInVT = getPackedSVEVectorVT(InVT.getVectorElementType());
- // Safe bitcasting between unpacked vector types of different element counts
- // is currently unsupported because the following is missing the necessary
- // work to ensure the result's elements live where they're supposed to within
- // an SVE register.
- // 01234567
- // e.g. nxv2i32 = XX??XX??
- // nxv4f16 = X?X?X?X?
- assert((VT.getVectorElementCount() == InVT.getVectorElementCount() ||
- VT == PackedVT || InVT == PackedInVT) &&
- "Unexpected bitcast!");
- // Pack input if required.
- if (InVT != PackedInVT)
- Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, PackedInVT, Op);
- Op = DAG.getNode(ISD::BITCAST, DL, PackedVT, Op);
- // Unpack result if required.
- if (VT != PackedVT)
- Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op);
- return Op;
- }
- bool AArch64TargetLowering::isAllActivePredicate(SelectionDAG &DAG,
- SDValue N) const {
- return ::isAllActivePredicate(DAG, N);
- }
- EVT AArch64TargetLowering::getPromotedVTForPredicate(EVT VT) const {
- return ::getPromotedVTForPredicate(VT);
- }
- bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode(
- SDValue Op, const APInt &OriginalDemandedBits,
- const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
- unsigned Depth) const {
- unsigned Opc = Op.getOpcode();
- switch (Opc) {
- case AArch64ISD::VSHL: {
- // Match (VSHL (VLSHR Val X) X)
- SDValue ShiftL = Op;
- SDValue ShiftR = Op->getOperand(0);
- if (ShiftR->getOpcode() != AArch64ISD::VLSHR)
- return false;
- if (!ShiftL.hasOneUse() || !ShiftR.hasOneUse())
- return false;
- unsigned ShiftLBits = ShiftL->getConstantOperandVal(1);
- unsigned ShiftRBits = ShiftR->getConstantOperandVal(1);
- // Other cases can be handled as well, but this is not
- // implemented.
- if (ShiftRBits != ShiftLBits)
- return false;
- unsigned ScalarSize = Op.getScalarValueSizeInBits();
- assert(ScalarSize > ShiftLBits && "Invalid shift imm");
- APInt ZeroBits = APInt::getLowBitsSet(ScalarSize, ShiftLBits);
- APInt UnusedBits = ~OriginalDemandedBits;
- if ((ZeroBits & UnusedBits) != ZeroBits)
- return false;
- // All bits that are zeroed by (VSHL (VLSHR Val X) X) are not
- // used - simplify to just Val.
- return TLO.CombineTo(Op, ShiftR->getOperand(0));
- }
- case ISD::INTRINSIC_WO_CHAIN: {
- if (auto ElementSize = IsSVECntIntrinsic(Op)) {
- unsigned MaxSVEVectorSizeInBits = Subtarget->getMaxSVEVectorSizeInBits();
- if (!MaxSVEVectorSizeInBits)
- MaxSVEVectorSizeInBits = AArch64::SVEMaxBitsPerVector;
- unsigned MaxElements = MaxSVEVectorSizeInBits / *ElementSize;
- // The SVE count intrinsics don't support the multiplier immediate so we
- // don't have to account for that here. The value returned may be slightly
- // over the true required bits, as this is based on the "ALL" pattern. The
- // other patterns are also exposed by these intrinsics, but they all
- // return a value that's strictly less than "ALL".
- unsigned RequiredBits = llvm::bit_width(MaxElements);
- unsigned BitWidth = Known.Zero.getBitWidth();
- if (RequiredBits < BitWidth)
- Known.Zero.setHighBits(BitWidth - RequiredBits);
- return false;
- }
- }
- }
- return TargetLowering::SimplifyDemandedBitsForTargetNode(
- Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
- }
- bool AArch64TargetLowering::isTargetCanonicalConstantNode(SDValue Op) const {
- return Op.getOpcode() == AArch64ISD::DUP ||
- Op.getOpcode() == AArch64ISD::MOVI ||
- (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- Op.getOperand(0).getOpcode() == AArch64ISD::DUP) ||
- TargetLowering::isTargetCanonicalConstantNode(Op);
- }
- bool AArch64TargetLowering::isConstantUnsignedBitfieldExtractLegal(
- unsigned Opc, LLT Ty1, LLT Ty2) const {
- return Ty1 == Ty2 && (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64));
- }
- bool AArch64TargetLowering::isComplexDeinterleavingSupported() const {
- return Subtarget->hasComplxNum();
- }
- bool AArch64TargetLowering::isComplexDeinterleavingOperationSupported(
- ComplexDeinterleavingOperation Operation, Type *Ty) const {
- auto *VTy = dyn_cast<FixedVectorType>(Ty);
- if (!VTy)
- return false;
- auto *ScalarTy = VTy->getScalarType();
- unsigned NumElements = VTy->getNumElements();
- unsigned VTyWidth = VTy->getScalarSizeInBits() * NumElements;
- if ((VTyWidth < 128 && VTyWidth != 64) || !llvm::isPowerOf2_32(VTyWidth))
- return false;
- return (ScalarTy->isHalfTy() && Subtarget->hasFullFP16()) ||
- ScalarTy->isFloatTy() || ScalarTy->isDoubleTy();
- }
- Value *AArch64TargetLowering::createComplexDeinterleavingIR(
- Instruction *I, ComplexDeinterleavingOperation OperationType,
- ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
- Value *Accumulator) const {
- FixedVectorType *Ty = cast<FixedVectorType>(InputA->getType());
- IRBuilder<> B(I);
- unsigned TyWidth = Ty->getScalarSizeInBits() * Ty->getNumElements();
- assert(((TyWidth >= 128 && llvm::isPowerOf2_32(TyWidth)) || TyWidth == 64) &&
- "Vector type must be either 64 or a power of 2 that is at least 128");
- if (TyWidth > 128) {
- int Stride = Ty->getNumElements() / 2;
- auto SplitSeq = llvm::seq<int>(0, Ty->getNumElements());
- auto SplitSeqVec = llvm::to_vector(SplitSeq);
- ArrayRef<int> LowerSplitMask(&SplitSeqVec[0], Stride);
- ArrayRef<int> UpperSplitMask(&SplitSeqVec[Stride], Stride);
- auto *LowerSplitA = B.CreateShuffleVector(InputA, LowerSplitMask);
- auto *LowerSplitB = B.CreateShuffleVector(InputB, LowerSplitMask);
- auto *UpperSplitA = B.CreateShuffleVector(InputA, UpperSplitMask);
- auto *UpperSplitB = B.CreateShuffleVector(InputB, UpperSplitMask);
- Value *LowerSplitAcc = nullptr;
- Value *UpperSplitAcc = nullptr;
- if (Accumulator) {
- LowerSplitAcc = B.CreateShuffleVector(Accumulator, LowerSplitMask);
- UpperSplitAcc = B.CreateShuffleVector(Accumulator, UpperSplitMask);
- }
- auto *LowerSplitInt = createComplexDeinterleavingIR(
- I, OperationType, Rotation, LowerSplitA, LowerSplitB, LowerSplitAcc);
- auto *UpperSplitInt = createComplexDeinterleavingIR(
- I, OperationType, Rotation, UpperSplitA, UpperSplitB, UpperSplitAcc);
- ArrayRef<int> JoinMask(&SplitSeqVec[0], Ty->getNumElements());
- return B.CreateShuffleVector(LowerSplitInt, UpperSplitInt, JoinMask);
- }
- if (OperationType == ComplexDeinterleavingOperation::CMulPartial) {
- Intrinsic::ID IdMap[4] = {Intrinsic::aarch64_neon_vcmla_rot0,
- Intrinsic::aarch64_neon_vcmla_rot90,
- Intrinsic::aarch64_neon_vcmla_rot180,
- Intrinsic::aarch64_neon_vcmla_rot270};
- if (Accumulator == nullptr)
- Accumulator = ConstantFP::get(Ty, 0);
- return B.CreateIntrinsic(IdMap[(int)Rotation], Ty,
- {Accumulator, InputB, InputA});
- }
- if (OperationType == ComplexDeinterleavingOperation::CAdd) {
- Intrinsic::ID IntId = Intrinsic::not_intrinsic;
- if (Rotation == ComplexDeinterleavingRotation::Rotation_90)
- IntId = Intrinsic::aarch64_neon_vcadd_rot90;
- else if (Rotation == ComplexDeinterleavingRotation::Rotation_270)
- IntId = Intrinsic::aarch64_neon_vcadd_rot270;
- if (IntId == Intrinsic::not_intrinsic)
- return nullptr;
- return B.CreateIntrinsic(IntId, Ty, {InputA, InputB});
- }
- return nullptr;
- }
|