X86TargetTransformInfo.cpp 307 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680
  1. //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. /// \file
  9. /// This file implements a TargetTransformInfo analysis pass specific to the
  10. /// X86 target machine. It uses the target's detailed information to provide
  11. /// more precise answers to certain TTI queries, while letting the target
  12. /// independent and default TTI implementations handle the rest.
  13. ///
  14. //===----------------------------------------------------------------------===//
  15. /// About Cost Model numbers used below it's necessary to say the following:
  16. /// the numbers correspond to some "generic" X86 CPU instead of usage of a
  17. /// specific CPU model. Usually the numbers correspond to the CPU where the
  18. /// feature first appeared. For example, if we do Subtarget.hasSSE42() in
  19. /// the lookups below the cost is based on Nehalem as that was the first CPU
  20. /// to support that feature level and thus has most likely the worst case cost,
  21. /// although we may discard an outlying worst cost from one CPU (e.g. Atom).
  22. ///
  23. /// Some examples of other technologies/CPUs:
  24. /// SSE 3 - Pentium4 / Athlon64
  25. /// SSE 4.1 - Penryn
  26. /// SSE 4.2 - Nehalem / Silvermont
  27. /// AVX - Sandy Bridge / Jaguar / Bulldozer
  28. /// AVX2 - Haswell / Ryzen
  29. /// AVX-512 - Xeon Phi / Skylake
  30. ///
  31. /// And some examples of instruction target dependent costs (latency)
  32. /// divss sqrtss rsqrtss
  33. /// AMD K7 11-16 19 3
  34. /// Piledriver 9-24 13-15 5
  35. /// Jaguar 14 16 2
  36. /// Pentium II,III 18 30 2
  37. /// Nehalem 7-14 7-18 3
  38. /// Haswell 10-13 11 5
  39. ///
  40. /// Interpreting the 4 TargetCostKind types:
  41. /// TCK_RecipThroughput and TCK_Latency should try to match the worst case
  42. /// values reported by the CPU scheduler models (and llvm-mca).
  43. /// TCK_CodeSize should match the instruction count (e.g. divss = 1), NOT the
  44. /// actual encoding size of the instruction.
  45. /// TCK_SizeAndLatency should match the worst case micro-op counts reported by
  46. /// by the CPU scheduler models (and llvm-mca), to ensure that they are
  47. /// compatible with the MicroOpBufferSize and LoopMicroOpBufferSize values which are
  48. /// often used as the cost thresholds where TCK_SizeAndLatency is requested.
  49. //===----------------------------------------------------------------------===//
  50. #include "X86TargetTransformInfo.h"
  51. #include "llvm/Analysis/TargetTransformInfo.h"
  52. #include "llvm/CodeGen/BasicTTIImpl.h"
  53. #include "llvm/CodeGen/CostTable.h"
  54. #include "llvm/CodeGen/TargetLowering.h"
  55. #include "llvm/IR/InstIterator.h"
  56. #include "llvm/IR/IntrinsicInst.h"
  57. #include "llvm/Support/Debug.h"
  58. #include <optional>
  59. using namespace llvm;
  60. #define DEBUG_TYPE "x86tti"
  61. //===----------------------------------------------------------------------===//
  62. //
  63. // X86 cost model.
  64. //
  65. //===----------------------------------------------------------------------===//
  66. // Helper struct to store/access costs for each cost kind.
  67. // TODO: Move this to allow other targets to use it?
  68. struct CostKindCosts {
  69. unsigned RecipThroughputCost = ~0U;
  70. unsigned LatencyCost = ~0U;
  71. unsigned CodeSizeCost = ~0U;
  72. unsigned SizeAndLatencyCost = ~0U;
  73. std::optional<unsigned>
  74. operator[](TargetTransformInfo::TargetCostKind Kind) const {
  75. unsigned Cost = ~0U;
  76. switch (Kind) {
  77. case TargetTransformInfo::TCK_RecipThroughput:
  78. Cost = RecipThroughputCost;
  79. break;
  80. case TargetTransformInfo::TCK_Latency:
  81. Cost = LatencyCost;
  82. break;
  83. case TargetTransformInfo::TCK_CodeSize:
  84. Cost = CodeSizeCost;
  85. break;
  86. case TargetTransformInfo::TCK_SizeAndLatency:
  87. Cost = SizeAndLatencyCost;
  88. break;
  89. }
  90. if (Cost == ~0U)
  91. return std::nullopt;
  92. return Cost;
  93. }
  94. };
  95. using CostKindTblEntry = CostTblEntryT<CostKindCosts>;
  96. TargetTransformInfo::PopcntSupportKind
  97. X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
  98. assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
  99. // TODO: Currently the __builtin_popcount() implementation using SSE3
  100. // instructions is inefficient. Once the problem is fixed, we should
  101. // call ST->hasSSE3() instead of ST->hasPOPCNT().
  102. return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
  103. }
  104. std::optional<unsigned> X86TTIImpl::getCacheSize(
  105. TargetTransformInfo::CacheLevel Level) const {
  106. switch (Level) {
  107. case TargetTransformInfo::CacheLevel::L1D:
  108. // - Penryn
  109. // - Nehalem
  110. // - Westmere
  111. // - Sandy Bridge
  112. // - Ivy Bridge
  113. // - Haswell
  114. // - Broadwell
  115. // - Skylake
  116. // - Kabylake
  117. return 32 * 1024; // 32 KByte
  118. case TargetTransformInfo::CacheLevel::L2D:
  119. // - Penryn
  120. // - Nehalem
  121. // - Westmere
  122. // - Sandy Bridge
  123. // - Ivy Bridge
  124. // - Haswell
  125. // - Broadwell
  126. // - Skylake
  127. // - Kabylake
  128. return 256 * 1024; // 256 KByte
  129. }
  130. llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
  131. }
  132. std::optional<unsigned> X86TTIImpl::getCacheAssociativity(
  133. TargetTransformInfo::CacheLevel Level) const {
  134. // - Penryn
  135. // - Nehalem
  136. // - Westmere
  137. // - Sandy Bridge
  138. // - Ivy Bridge
  139. // - Haswell
  140. // - Broadwell
  141. // - Skylake
  142. // - Kabylake
  143. switch (Level) {
  144. case TargetTransformInfo::CacheLevel::L1D:
  145. [[fallthrough]];
  146. case TargetTransformInfo::CacheLevel::L2D:
  147. return 8;
  148. }
  149. llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
  150. }
  151. unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
  152. bool Vector = (ClassID == 1);
  153. if (Vector && !ST->hasSSE1())
  154. return 0;
  155. if (ST->is64Bit()) {
  156. if (Vector && ST->hasAVX512())
  157. return 32;
  158. return 16;
  159. }
  160. return 8;
  161. }
  162. TypeSize
  163. X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
  164. unsigned PreferVectorWidth = ST->getPreferVectorWidth();
  165. switch (K) {
  166. case TargetTransformInfo::RGK_Scalar:
  167. return TypeSize::getFixed(ST->is64Bit() ? 64 : 32);
  168. case TargetTransformInfo::RGK_FixedWidthVector:
  169. if (ST->hasAVX512() && PreferVectorWidth >= 512)
  170. return TypeSize::getFixed(512);
  171. if (ST->hasAVX() && PreferVectorWidth >= 256)
  172. return TypeSize::getFixed(256);
  173. if (ST->hasSSE1() && PreferVectorWidth >= 128)
  174. return TypeSize::getFixed(128);
  175. return TypeSize::getFixed(0);
  176. case TargetTransformInfo::RGK_ScalableVector:
  177. return TypeSize::getScalable(0);
  178. }
  179. llvm_unreachable("Unsupported register kind");
  180. }
  181. unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
  182. return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
  183. .getFixedValue();
  184. }
  185. unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
  186. // If the loop will not be vectorized, don't interleave the loop.
  187. // Let regular unroll to unroll the loop, which saves the overflow
  188. // check and memory check cost.
  189. if (VF == 1)
  190. return 1;
  191. if (ST->isAtom())
  192. return 1;
  193. // Sandybridge and Haswell have multiple execution ports and pipelined
  194. // vector units.
  195. if (ST->hasAVX())
  196. return 4;
  197. return 2;
  198. }
  199. InstructionCost X86TTIImpl::getArithmeticInstrCost(
  200. unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
  201. TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
  202. ArrayRef<const Value *> Args,
  203. const Instruction *CxtI) {
  204. // vXi8 multiplications are always promoted to vXi16.
  205. if (Opcode == Instruction::Mul && Ty->isVectorTy() &&
  206. Ty->getScalarSizeInBits() == 8) {
  207. Type *WideVecTy =
  208. VectorType::getExtendedElementVectorType(cast<VectorType>(Ty));
  209. return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty,
  210. TargetTransformInfo::CastContextHint::None,
  211. CostKind) +
  212. getCastInstrCost(Instruction::Trunc, Ty, WideVecTy,
  213. TargetTransformInfo::CastContextHint::None,
  214. CostKind) +
  215. getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info);
  216. }
  217. // Legalize the type.
  218. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
  219. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  220. assert(ISD && "Invalid opcode");
  221. if (ISD == ISD::MUL && Args.size() == 2 && LT.second.isVector() &&
  222. LT.second.getScalarType() == MVT::i32) {
  223. // Check if the operands can be represented as a smaller datatype.
  224. bool Op1Signed = false, Op2Signed = false;
  225. unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
  226. unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
  227. unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
  228. bool SignedMode = Op1Signed || Op2Signed;
  229. // If both are representable as i15 and at least one is constant,
  230. // zero-extended, or sign-extended from vXi16 (or less pre-SSE41) then we
  231. // can treat this as PMADDWD which has the same costs as a vXi16 multiply.
  232. if (OpMinSize <= 15 && !ST->isPMADDWDSlow()) {
  233. bool Op1Constant =
  234. isa<ConstantDataVector>(Args[0]) || isa<ConstantVector>(Args[0]);
  235. bool Op2Constant =
  236. isa<ConstantDataVector>(Args[1]) || isa<ConstantVector>(Args[1]);
  237. bool Op1Sext = isa<SExtInst>(Args[0]) &&
  238. (Op1MinSize == 15 || (Op1MinSize < 15 && !ST->hasSSE41()));
  239. bool Op2Sext = isa<SExtInst>(Args[1]) &&
  240. (Op2MinSize == 15 || (Op2MinSize < 15 && !ST->hasSSE41()));
  241. bool IsZeroExtended = !Op1Signed || !Op2Signed;
  242. bool IsConstant = Op1Constant || Op2Constant;
  243. bool IsSext = Op1Sext || Op2Sext;
  244. if (IsConstant || IsZeroExtended || IsSext)
  245. LT.second =
  246. MVT::getVectorVT(MVT::i16, 2 * LT.second.getVectorNumElements());
  247. }
  248. // Check if the vXi32 operands can be shrunk into a smaller datatype.
  249. // This should match the codegen from reduceVMULWidth.
  250. // TODO: Make this generic (!ST->SSE41 || ST->isPMULLDSlow()).
  251. if (ST->useSLMArithCosts() && LT.second == MVT::v4i32) {
  252. if (OpMinSize <= 7)
  253. return LT.first * 3; // pmullw/sext
  254. if (!SignedMode && OpMinSize <= 8)
  255. return LT.first * 3; // pmullw/zext
  256. if (OpMinSize <= 15)
  257. return LT.first * 5; // pmullw/pmulhw/pshuf
  258. if (!SignedMode && OpMinSize <= 16)
  259. return LT.first * 5; // pmullw/pmulhw/pshuf
  260. }
  261. }
  262. // Vector multiply by pow2 will be simplified to shifts.
  263. // Vector multiply by -pow2 will be simplified to shifts/negates.
  264. if (ISD == ISD::MUL && Op2Info.isConstant() &&
  265. (Op2Info.isPowerOf2() || Op2Info.isNegatedPowerOf2())) {
  266. InstructionCost Cost =
  267. getArithmeticInstrCost(Instruction::Shl, Ty, CostKind,
  268. Op1Info.getNoProps(), Op2Info.getNoProps());
  269. if (Op2Info.isNegatedPowerOf2())
  270. Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
  271. return Cost;
  272. }
  273. // On X86, vector signed division by constants power-of-two are
  274. // normally expanded to the sequence SRA + SRL + ADD + SRA.
  275. // The OperandValue properties may not be the same as that of the previous
  276. // operation; conservatively assume OP_None.
  277. if ((ISD == ISD::SDIV || ISD == ISD::SREM) &&
  278. Op2Info.isConstant() && Op2Info.isPowerOf2()) {
  279. InstructionCost Cost =
  280. 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
  281. Op1Info.getNoProps(), Op2Info.getNoProps());
  282. Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
  283. Op1Info.getNoProps(), Op2Info.getNoProps());
  284. Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind,
  285. Op1Info.getNoProps(), Op2Info.getNoProps());
  286. if (ISD == ISD::SREM) {
  287. // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
  288. Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info.getNoProps(),
  289. Op2Info.getNoProps());
  290. Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info.getNoProps(),
  291. Op2Info.getNoProps());
  292. }
  293. return Cost;
  294. }
  295. // Vector unsigned division/remainder will be simplified to shifts/masks.
  296. if ((ISD == ISD::UDIV || ISD == ISD::UREM) &&
  297. Op2Info.isConstant() && Op2Info.isPowerOf2()) {
  298. if (ISD == ISD::UDIV)
  299. return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
  300. Op1Info.getNoProps(), Op2Info.getNoProps());
  301. // UREM
  302. return getArithmeticInstrCost(Instruction::And, Ty, CostKind,
  303. Op1Info.getNoProps(), Op2Info.getNoProps());
  304. }
  305. static const CostKindTblEntry AVX512BWUniformConstCostTable[] = {
  306. { ISD::SHL, MVT::v16i8, { 1, 7, 2, 3 } }, // psllw + pand.
  307. { ISD::SRL, MVT::v16i8, { 1, 7, 2, 3 } }, // psrlw + pand.
  308. { ISD::SRA, MVT::v16i8, { 1, 8, 4, 5 } }, // psrlw, pand, pxor, psubb.
  309. { ISD::SHL, MVT::v32i8, { 1, 8, 2, 3 } }, // psllw + pand.
  310. { ISD::SRL, MVT::v32i8, { 1, 8, 2, 3 } }, // psrlw + pand.
  311. { ISD::SRA, MVT::v32i8, { 1, 9, 4, 5 } }, // psrlw, pand, pxor, psubb.
  312. { ISD::SHL, MVT::v64i8, { 1, 8, 2, 3 } }, // psllw + pand.
  313. { ISD::SRL, MVT::v64i8, { 1, 8, 2, 3 } }, // psrlw + pand.
  314. { ISD::SRA, MVT::v64i8, { 1, 9, 4, 6 } }, // psrlw, pand, pxor, psubb.
  315. { ISD::SHL, MVT::v16i16, { 1, 1, 1, 1 } }, // psllw
  316. { ISD::SRL, MVT::v16i16, { 1, 1, 1, 1 } }, // psrlw
  317. { ISD::SRA, MVT::v16i16, { 1, 1, 1, 1 } }, // psrlw
  318. { ISD::SHL, MVT::v32i16, { 1, 1, 1, 1 } }, // psllw
  319. { ISD::SRL, MVT::v32i16, { 1, 1, 1, 1 } }, // psrlw
  320. { ISD::SRA, MVT::v32i16, { 1, 1, 1, 1 } }, // psrlw
  321. };
  322. if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasBWI())
  323. if (const auto *Entry =
  324. CostTableLookup(AVX512BWUniformConstCostTable, ISD, LT.second))
  325. if (auto KindCost = Entry->Cost[CostKind])
  326. return LT.first * *KindCost;
  327. static const CostKindTblEntry AVX512UniformConstCostTable[] = {
  328. { ISD::SHL, MVT::v64i8, { 2, 12, 5, 6 } }, // psllw + pand.
  329. { ISD::SRL, MVT::v64i8, { 2, 12, 5, 6 } }, // psrlw + pand.
  330. { ISD::SRA, MVT::v64i8, { 3, 10, 12, 12 } }, // psrlw, pand, pxor, psubb.
  331. { ISD::SHL, MVT::v16i16, { 2, 7, 4, 4 } }, // psllw + split.
  332. { ISD::SRL, MVT::v16i16, { 2, 7, 4, 4 } }, // psrlw + split.
  333. { ISD::SRA, MVT::v16i16, { 2, 7, 4, 4 } }, // psraw + split.
  334. { ISD::SHL, MVT::v8i32, { 1, 1, 1, 1 } }, // pslld
  335. { ISD::SRL, MVT::v8i32, { 1, 1, 1, 1 } }, // psrld
  336. { ISD::SRA, MVT::v8i32, { 1, 1, 1, 1 } }, // psrad
  337. { ISD::SHL, MVT::v16i32, { 1, 1, 1, 1 } }, // pslld
  338. { ISD::SRL, MVT::v16i32, { 1, 1, 1, 1 } }, // psrld
  339. { ISD::SRA, MVT::v16i32, { 1, 1, 1, 1 } }, // psrad
  340. { ISD::SRA, MVT::v2i64, { 1, 1, 1, 1 } }, // psraq
  341. { ISD::SHL, MVT::v4i64, { 1, 1, 1, 1 } }, // psllq
  342. { ISD::SRL, MVT::v4i64, { 1, 1, 1, 1 } }, // psrlq
  343. { ISD::SRA, MVT::v4i64, { 1, 1, 1, 1 } }, // psraq
  344. { ISD::SHL, MVT::v8i64, { 1, 1, 1, 1 } }, // psllq
  345. { ISD::SRL, MVT::v8i64, { 1, 1, 1, 1 } }, // psrlq
  346. { ISD::SRA, MVT::v8i64, { 1, 1, 1, 1 } }, // psraq
  347. { ISD::SDIV, MVT::v16i32, { 6 } }, // pmuludq sequence
  348. { ISD::SREM, MVT::v16i32, { 8 } }, // pmuludq+mul+sub sequence
  349. { ISD::UDIV, MVT::v16i32, { 5 } }, // pmuludq sequence
  350. { ISD::UREM, MVT::v16i32, { 7 } }, // pmuludq+mul+sub sequence
  351. };
  352. if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasAVX512())
  353. if (const auto *Entry =
  354. CostTableLookup(AVX512UniformConstCostTable, ISD, LT.second))
  355. if (auto KindCost = Entry->Cost[CostKind])
  356. return LT.first * *KindCost;
  357. static const CostKindTblEntry AVX2UniformConstCostTable[] = {
  358. { ISD::SHL, MVT::v16i8, { 1, 8, 2, 3 } }, // psllw + pand.
  359. { ISD::SRL, MVT::v16i8, { 1, 8, 2, 3 } }, // psrlw + pand.
  360. { ISD::SRA, MVT::v16i8, { 2, 10, 5, 6 } }, // psrlw, pand, pxor, psubb.
  361. { ISD::SHL, MVT::v32i8, { 2, 8, 2, 4 } }, // psllw + pand.
  362. { ISD::SRL, MVT::v32i8, { 2, 8, 2, 4 } }, // psrlw + pand.
  363. { ISD::SRA, MVT::v32i8, { 3, 10, 5, 9 } }, // psrlw, pand, pxor, psubb.
  364. { ISD::SHL, MVT::v8i16, { 1, 1, 1, 1 } }, // psllw
  365. { ISD::SRL, MVT::v8i16, { 1, 1, 1, 1 } }, // psrlw
  366. { ISD::SRA, MVT::v8i16, { 1, 1, 1, 1 } }, // psraw
  367. { ISD::SHL, MVT::v16i16,{ 2, 2, 1, 2 } }, // psllw
  368. { ISD::SRL, MVT::v16i16,{ 2, 2, 1, 2 } }, // psrlw
  369. { ISD::SRA, MVT::v16i16,{ 2, 2, 1, 2 } }, // psraw
  370. { ISD::SHL, MVT::v4i32, { 1, 1, 1, 1 } }, // pslld
  371. { ISD::SRL, MVT::v4i32, { 1, 1, 1, 1 } }, // psrld
  372. { ISD::SRA, MVT::v4i32, { 1, 1, 1, 1 } }, // psrad
  373. { ISD::SHL, MVT::v8i32, { 2, 2, 1, 2 } }, // pslld
  374. { ISD::SRL, MVT::v8i32, { 2, 2, 1, 2 } }, // psrld
  375. { ISD::SRA, MVT::v8i32, { 2, 2, 1, 2 } }, // psrad
  376. { ISD::SHL, MVT::v2i64, { 1, 1, 1, 1 } }, // psllq
  377. { ISD::SRL, MVT::v2i64, { 1, 1, 1, 1 } }, // psrlq
  378. { ISD::SRA, MVT::v2i64, { 2, 3, 3, 3 } }, // psrad + shuffle.
  379. { ISD::SHL, MVT::v4i64, { 2, 2, 1, 2 } }, // psllq
  380. { ISD::SRL, MVT::v4i64, { 2, 2, 1, 2 } }, // psrlq
  381. { ISD::SRA, MVT::v4i64, { 4, 4, 3, 6 } }, // psrad + shuffle + split.
  382. { ISD::SDIV, MVT::v8i32, { 6 } }, // pmuludq sequence
  383. { ISD::SREM, MVT::v8i32, { 8 } }, // pmuludq+mul+sub sequence
  384. { ISD::UDIV, MVT::v8i32, { 5 } }, // pmuludq sequence
  385. { ISD::UREM, MVT::v8i32, { 7 } }, // pmuludq+mul+sub sequence
  386. };
  387. if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasAVX2())
  388. if (const auto *Entry =
  389. CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second))
  390. if (auto KindCost = Entry->Cost[CostKind])
  391. return LT.first * *KindCost;
  392. static const CostKindTblEntry AVXUniformConstCostTable[] = {
  393. { ISD::SHL, MVT::v16i8, { 2, 7, 2, 3 } }, // psllw + pand.
  394. { ISD::SRL, MVT::v16i8, { 2, 7, 2, 3 } }, // psrlw + pand.
  395. { ISD::SRA, MVT::v16i8, { 3, 9, 5, 6 } }, // psrlw, pand, pxor, psubb.
  396. { ISD::SHL, MVT::v32i8, { 4, 7, 7, 8 } }, // 2*(psllw + pand) + split.
  397. { ISD::SRL, MVT::v32i8, { 4, 7, 7, 8 } }, // 2*(psrlw + pand) + split.
  398. { ISD::SRA, MVT::v32i8, { 7, 7, 12, 13 } }, // 2*(psrlw, pand, pxor, psubb) + split.
  399. { ISD::SHL, MVT::v8i16, { 1, 2, 1, 1 } }, // psllw.
  400. { ISD::SRL, MVT::v8i16, { 1, 2, 1, 1 } }, // psrlw.
  401. { ISD::SRA, MVT::v8i16, { 1, 2, 1, 1 } }, // psraw.
  402. { ISD::SHL, MVT::v16i16,{ 3, 6, 4, 5 } }, // psllw + split.
  403. { ISD::SRL, MVT::v16i16,{ 3, 6, 4, 5 } }, // psrlw + split.
  404. { ISD::SRA, MVT::v16i16,{ 3, 6, 4, 5 } }, // psraw + split.
  405. { ISD::SHL, MVT::v4i32, { 1, 2, 1, 1 } }, // pslld.
  406. { ISD::SRL, MVT::v4i32, { 1, 2, 1, 1 } }, // psrld.
  407. { ISD::SRA, MVT::v4i32, { 1, 2, 1, 1 } }, // psrad.
  408. { ISD::SHL, MVT::v8i32, { 3, 6, 4, 5 } }, // pslld + split.
  409. { ISD::SRL, MVT::v8i32, { 3, 6, 4, 5 } }, // psrld + split.
  410. { ISD::SRA, MVT::v8i32, { 3, 6, 4, 5 } }, // psrad + split.
  411. { ISD::SHL, MVT::v2i64, { 1, 2, 1, 1 } }, // psllq.
  412. { ISD::SRL, MVT::v2i64, { 1, 2, 1, 1 } }, // psrlq.
  413. { ISD::SRA, MVT::v2i64, { 2, 3, 3, 3 } }, // psrad + shuffle.
  414. { ISD::SHL, MVT::v4i64, { 3, 6, 4, 5 } }, // 2 x psllq + split.
  415. { ISD::SRL, MVT::v4i64, { 3, 6, 4, 5 } }, // 2 x psllq + split.
  416. { ISD::SRA, MVT::v4i64, { 5, 7, 8, 9 } }, // 2 x psrad + shuffle + split.
  417. { ISD::SDIV, MVT::v8i32, { 14 } }, // 2*pmuludq sequence + split.
  418. { ISD::SREM, MVT::v8i32, { 18 } }, // 2*pmuludq+mul+sub sequence + split.
  419. { ISD::UDIV, MVT::v8i32, { 12 } }, // 2*pmuludq sequence + split.
  420. { ISD::UREM, MVT::v8i32, { 16 } }, // 2*pmuludq+mul+sub sequence + split.
  421. };
  422. // XOP has faster vXi8 shifts.
  423. if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasAVX() &&
  424. (!ST->hasXOP() || LT.second.getScalarSizeInBits() != 8))
  425. if (const auto *Entry =
  426. CostTableLookup(AVXUniformConstCostTable, ISD, LT.second))
  427. if (auto KindCost = Entry->Cost[CostKind])
  428. return LT.first * *KindCost;
  429. static const CostKindTblEntry SSE2UniformConstCostTable[] = {
  430. { ISD::SHL, MVT::v16i8, { 1, 7, 2, 3 } }, // psllw + pand.
  431. { ISD::SRL, MVT::v16i8, { 1, 7, 2, 3 } }, // psrlw + pand.
  432. { ISD::SRA, MVT::v16i8, { 3, 9, 5, 6 } }, // psrlw, pand, pxor, psubb.
  433. { ISD::SHL, MVT::v8i16, { 1, 1, 1, 1 } }, // psllw.
  434. { ISD::SRL, MVT::v8i16, { 1, 1, 1, 1 } }, // psrlw.
  435. { ISD::SRA, MVT::v8i16, { 1, 1, 1, 1 } }, // psraw.
  436. { ISD::SHL, MVT::v4i32, { 1, 1, 1, 1 } }, // pslld
  437. { ISD::SRL, MVT::v4i32, { 1, 1, 1, 1 } }, // psrld.
  438. { ISD::SRA, MVT::v4i32, { 1, 1, 1, 1 } }, // psrad.
  439. { ISD::SHL, MVT::v2i64, { 1, 1, 1, 1 } }, // psllq.
  440. { ISD::SRL, MVT::v2i64, { 1, 1, 1, 1 } }, // psrlq.
  441. { ISD::SRA, MVT::v2i64, { 3, 5, 6, 6 } }, // 2 x psrad + shuffle.
  442. { ISD::SDIV, MVT::v4i32, { 6 } }, // pmuludq sequence
  443. { ISD::SREM, MVT::v4i32, { 8 } }, // pmuludq+mul+sub sequence
  444. { ISD::UDIV, MVT::v4i32, { 5 } }, // pmuludq sequence
  445. { ISD::UREM, MVT::v4i32, { 7 } }, // pmuludq+mul+sub sequence
  446. };
  447. // XOP has faster vXi8 shifts.
  448. if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasSSE2() &&
  449. (!ST->hasXOP() || LT.second.getScalarSizeInBits() != 8))
  450. if (const auto *Entry =
  451. CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
  452. if (auto KindCost = Entry->Cost[CostKind])
  453. return LT.first * *KindCost;
  454. static const CostKindTblEntry AVX512BWConstCostTable[] = {
  455. { ISD::SDIV, MVT::v64i8, { 14 } }, // 2*ext+2*pmulhw sequence
  456. { ISD::SREM, MVT::v64i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
  457. { ISD::UDIV, MVT::v64i8, { 14 } }, // 2*ext+2*pmulhw sequence
  458. { ISD::UREM, MVT::v64i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
  459. { ISD::SDIV, MVT::v32i16, { 6 } }, // vpmulhw sequence
  460. { ISD::SREM, MVT::v32i16, { 8 } }, // vpmulhw+mul+sub sequence
  461. { ISD::UDIV, MVT::v32i16, { 6 } }, // vpmulhuw sequence
  462. { ISD::UREM, MVT::v32i16, { 8 } }, // vpmulhuw+mul+sub sequence
  463. };
  464. if (Op2Info.isConstant() && ST->hasBWI())
  465. if (const auto *Entry =
  466. CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
  467. if (auto KindCost = Entry->Cost[CostKind])
  468. return LT.first * *KindCost;
  469. static const CostKindTblEntry AVX512ConstCostTable[] = {
  470. { ISD::SDIV, MVT::v64i8, { 28 } }, // 4*ext+4*pmulhw sequence
  471. { ISD::SREM, MVT::v64i8, { 32 } }, // 4*ext+4*pmulhw+mul+sub sequence
  472. { ISD::UDIV, MVT::v64i8, { 28 } }, // 4*ext+4*pmulhw sequence
  473. { ISD::UREM, MVT::v64i8, { 32 } }, // 4*ext+4*pmulhw+mul+sub sequence
  474. { ISD::SDIV, MVT::v32i16, { 12 } }, // 2*vpmulhw sequence
  475. { ISD::SREM, MVT::v32i16, { 16 } }, // 2*vpmulhw+mul+sub sequence
  476. { ISD::UDIV, MVT::v32i16, { 12 } }, // 2*vpmulhuw sequence
  477. { ISD::UREM, MVT::v32i16, { 16 } }, // 2*vpmulhuw+mul+sub sequence
  478. { ISD::SDIV, MVT::v16i32, { 15 } }, // vpmuldq sequence
  479. { ISD::SREM, MVT::v16i32, { 17 } }, // vpmuldq+mul+sub sequence
  480. { ISD::UDIV, MVT::v16i32, { 15 } }, // vpmuludq sequence
  481. { ISD::UREM, MVT::v16i32, { 17 } }, // vpmuludq+mul+sub sequence
  482. };
  483. if (Op2Info.isConstant() && ST->hasAVX512())
  484. if (const auto *Entry =
  485. CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
  486. if (auto KindCost = Entry->Cost[CostKind])
  487. return LT.first * *KindCost;
  488. static const CostKindTblEntry AVX2ConstCostTable[] = {
  489. { ISD::SDIV, MVT::v32i8, { 14 } }, // 2*ext+2*pmulhw sequence
  490. { ISD::SREM, MVT::v32i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
  491. { ISD::UDIV, MVT::v32i8, { 14 } }, // 2*ext+2*pmulhw sequence
  492. { ISD::UREM, MVT::v32i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
  493. { ISD::SDIV, MVT::v16i16, { 6 } }, // vpmulhw sequence
  494. { ISD::SREM, MVT::v16i16, { 8 } }, // vpmulhw+mul+sub sequence
  495. { ISD::UDIV, MVT::v16i16, { 6 } }, // vpmulhuw sequence
  496. { ISD::UREM, MVT::v16i16, { 8 } }, // vpmulhuw+mul+sub sequence
  497. { ISD::SDIV, MVT::v8i32, { 15 } }, // vpmuldq sequence
  498. { ISD::SREM, MVT::v8i32, { 19 } }, // vpmuldq+mul+sub sequence
  499. { ISD::UDIV, MVT::v8i32, { 15 } }, // vpmuludq sequence
  500. { ISD::UREM, MVT::v8i32, { 19 } }, // vpmuludq+mul+sub sequence
  501. };
  502. if (Op2Info.isConstant() && ST->hasAVX2())
  503. if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
  504. if (auto KindCost = Entry->Cost[CostKind])
  505. return LT.first * *KindCost;
  506. static const CostKindTblEntry AVXConstCostTable[] = {
  507. { ISD::SDIV, MVT::v32i8, { 30 } }, // 4*ext+4*pmulhw sequence + split.
  508. { ISD::SREM, MVT::v32i8, { 34 } }, // 4*ext+4*pmulhw+mul+sub sequence + split.
  509. { ISD::UDIV, MVT::v32i8, { 30 } }, // 4*ext+4*pmulhw sequence + split.
  510. { ISD::UREM, MVT::v32i8, { 34 } }, // 4*ext+4*pmulhw+mul+sub sequence + split.
  511. { ISD::SDIV, MVT::v16i16, { 14 } }, // 2*pmulhw sequence + split.
  512. { ISD::SREM, MVT::v16i16, { 18 } }, // 2*pmulhw+mul+sub sequence + split.
  513. { ISD::UDIV, MVT::v16i16, { 14 } }, // 2*pmulhuw sequence + split.
  514. { ISD::UREM, MVT::v16i16, { 18 } }, // 2*pmulhuw+mul+sub sequence + split.
  515. { ISD::SDIV, MVT::v8i32, { 32 } }, // vpmuludq sequence
  516. { ISD::SREM, MVT::v8i32, { 38 } }, // vpmuludq+mul+sub sequence
  517. { ISD::UDIV, MVT::v8i32, { 32 } }, // 2*pmuludq sequence + split.
  518. { ISD::UREM, MVT::v8i32, { 42 } }, // 2*pmuludq+mul+sub sequence + split.
  519. };
  520. if (Op2Info.isConstant() && ST->hasAVX())
  521. if (const auto *Entry = CostTableLookup(AVXConstCostTable, ISD, LT.second))
  522. if (auto KindCost = Entry->Cost[CostKind])
  523. return LT.first * *KindCost;
  524. static const CostKindTblEntry SSE41ConstCostTable[] = {
  525. { ISD::SDIV, MVT::v4i32, { 15 } }, // vpmuludq sequence
  526. { ISD::SREM, MVT::v4i32, { 20 } }, // vpmuludq+mul+sub sequence
  527. };
  528. if (Op2Info.isConstant() && ST->hasSSE41())
  529. if (const auto *Entry =
  530. CostTableLookup(SSE41ConstCostTable, ISD, LT.second))
  531. if (auto KindCost = Entry->Cost[CostKind])
  532. return LT.first * *KindCost;
  533. static const CostKindTblEntry SSE2ConstCostTable[] = {
  534. { ISD::SDIV, MVT::v16i8, { 14 } }, // 2*ext+2*pmulhw sequence
  535. { ISD::SREM, MVT::v16i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
  536. { ISD::UDIV, MVT::v16i8, { 14 } }, // 2*ext+2*pmulhw sequence
  537. { ISD::UREM, MVT::v16i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
  538. { ISD::SDIV, MVT::v8i16, { 6 } }, // pmulhw sequence
  539. { ISD::SREM, MVT::v8i16, { 8 } }, // pmulhw+mul+sub sequence
  540. { ISD::UDIV, MVT::v8i16, { 6 } }, // pmulhuw sequence
  541. { ISD::UREM, MVT::v8i16, { 8 } }, // pmulhuw+mul+sub sequence
  542. { ISD::SDIV, MVT::v4i32, { 19 } }, // pmuludq sequence
  543. { ISD::SREM, MVT::v4i32, { 24 } }, // pmuludq+mul+sub sequence
  544. { ISD::UDIV, MVT::v4i32, { 15 } }, // pmuludq sequence
  545. { ISD::UREM, MVT::v4i32, { 20 } }, // pmuludq+mul+sub sequence
  546. };
  547. if (Op2Info.isConstant() && ST->hasSSE2())
  548. if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
  549. if (auto KindCost = Entry->Cost[CostKind])
  550. return LT.first * *KindCost;
  551. static const CostKindTblEntry AVX512BWUniformCostTable[] = {
  552. { ISD::SHL, MVT::v16i8, { 3, 5, 5, 7 } }, // psllw + pand.
  553. { ISD::SRL, MVT::v16i8, { 3,10, 5, 8 } }, // psrlw + pand.
  554. { ISD::SRA, MVT::v16i8, { 4,12, 8,12 } }, // psrlw, pand, pxor, psubb.
  555. { ISD::SHL, MVT::v32i8, { 4, 7, 6, 8 } }, // psllw + pand.
  556. { ISD::SRL, MVT::v32i8, { 4, 8, 7, 9 } }, // psrlw + pand.
  557. { ISD::SRA, MVT::v32i8, { 5,10,10,13 } }, // psrlw, pand, pxor, psubb.
  558. { ISD::SHL, MVT::v64i8, { 4, 7, 6, 8 } }, // psllw + pand.
  559. { ISD::SRL, MVT::v64i8, { 4, 8, 7,10 } }, // psrlw + pand.
  560. { ISD::SRA, MVT::v64i8, { 5,10,10,15 } }, // psrlw, pand, pxor, psubb.
  561. { ISD::SHL, MVT::v32i16, { 2, 4, 2, 3 } }, // psllw
  562. { ISD::SRL, MVT::v32i16, { 2, 4, 2, 3 } }, // psrlw
  563. { ISD::SRA, MVT::v32i16, { 2, 4, 2, 3 } }, // psrqw
  564. };
  565. if (ST->hasBWI() && Op2Info.isUniform())
  566. if (const auto *Entry =
  567. CostTableLookup(AVX512BWUniformCostTable, ISD, LT.second))
  568. if (auto KindCost = Entry->Cost[CostKind])
  569. return LT.first * *KindCost;
  570. static const CostKindTblEntry AVX512UniformCostTable[] = {
  571. { ISD::SHL, MVT::v32i16, { 5,10, 5, 7 } }, // psllw + split.
  572. { ISD::SRL, MVT::v32i16, { 5,10, 5, 7 } }, // psrlw + split.
  573. { ISD::SRA, MVT::v32i16, { 5,10, 5, 7 } }, // psraw + split.
  574. { ISD::SHL, MVT::v16i32, { 2, 4, 2, 3 } }, // pslld
  575. { ISD::SRL, MVT::v16i32, { 2, 4, 2, 3 } }, // psrld
  576. { ISD::SRA, MVT::v16i32, { 2, 4, 2, 3 } }, // psrad
  577. { ISD::SRA, MVT::v2i64, { 1, 2, 1, 2 } }, // psraq
  578. { ISD::SHL, MVT::v4i64, { 1, 4, 1, 2 } }, // psllq
  579. { ISD::SRL, MVT::v4i64, { 1, 4, 1, 2 } }, // psrlq
  580. { ISD::SRA, MVT::v4i64, { 1, 4, 1, 2 } }, // psraq
  581. { ISD::SHL, MVT::v8i64, { 1, 4, 1, 2 } }, // psllq
  582. { ISD::SRL, MVT::v8i64, { 1, 4, 1, 2 } }, // psrlq
  583. { ISD::SRA, MVT::v8i64, { 1, 4, 1, 2 } }, // psraq
  584. };
  585. if (ST->hasAVX512() && Op2Info.isUniform())
  586. if (const auto *Entry =
  587. CostTableLookup(AVX512UniformCostTable, ISD, LT.second))
  588. if (auto KindCost = Entry->Cost[CostKind])
  589. return LT.first * *KindCost;
  590. static const CostKindTblEntry AVX2UniformCostTable[] = {
  591. // Uniform splats are cheaper for the following instructions.
  592. { ISD::SHL, MVT::v16i8, { 3, 5, 5, 7 } }, // psllw + pand.
  593. { ISD::SRL, MVT::v16i8, { 3, 9, 5, 8 } }, // psrlw + pand.
  594. { ISD::SRA, MVT::v16i8, { 4, 5, 9,13 } }, // psrlw, pand, pxor, psubb.
  595. { ISD::SHL, MVT::v32i8, { 4, 7, 6, 8 } }, // psllw + pand.
  596. { ISD::SRL, MVT::v32i8, { 4, 8, 7, 9 } }, // psrlw + pand.
  597. { ISD::SRA, MVT::v32i8, { 6, 9,11,16 } }, // psrlw, pand, pxor, psubb.
  598. { ISD::SHL, MVT::v8i16, { 1, 2, 1, 2 } }, // psllw.
  599. { ISD::SRL, MVT::v8i16, { 1, 2, 1, 2 } }, // psrlw.
  600. { ISD::SRA, MVT::v8i16, { 1, 2, 1, 2 } }, // psraw.
  601. { ISD::SHL, MVT::v16i16, { 2, 4, 2, 3 } }, // psllw.
  602. { ISD::SRL, MVT::v16i16, { 2, 4, 2, 3 } }, // psrlw.
  603. { ISD::SRA, MVT::v16i16, { 2, 4, 2, 3 } }, // psraw.
  604. { ISD::SHL, MVT::v4i32, { 1, 2, 1, 2 } }, // pslld
  605. { ISD::SRL, MVT::v4i32, { 1, 2, 1, 2 } }, // psrld
  606. { ISD::SRA, MVT::v4i32, { 1, 2, 1, 2 } }, // psrad
  607. { ISD::SHL, MVT::v8i32, { 2, 4, 2, 3 } }, // pslld
  608. { ISD::SRL, MVT::v8i32, { 2, 4, 2, 3 } }, // psrld
  609. { ISD::SRA, MVT::v8i32, { 2, 4, 2, 3 } }, // psrad
  610. { ISD::SHL, MVT::v2i64, { 1, 2, 1, 2 } }, // psllq
  611. { ISD::SRL, MVT::v2i64, { 1, 2, 1, 2 } }, // psrlq
  612. { ISD::SRA, MVT::v2i64, { 2, 4, 5, 7 } }, // 2 x psrad + shuffle.
  613. { ISD::SHL, MVT::v4i64, { 2, 4, 1, 2 } }, // psllq
  614. { ISD::SRL, MVT::v4i64, { 2, 4, 1, 2 } }, // psrlq
  615. { ISD::SRA, MVT::v4i64, { 4, 6, 5, 9 } }, // 2 x psrad + shuffle.
  616. };
  617. if (ST->hasAVX2() && Op2Info.isUniform())
  618. if (const auto *Entry =
  619. CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
  620. if (auto KindCost = Entry->Cost[CostKind])
  621. return LT.first * *KindCost;
  622. static const CostKindTblEntry AVXUniformCostTable[] = {
  623. { ISD::SHL, MVT::v16i8, { 4, 4, 6, 8 } }, // psllw + pand.
  624. { ISD::SRL, MVT::v16i8, { 4, 8, 5, 8 } }, // psrlw + pand.
  625. { ISD::SRA, MVT::v16i8, { 6, 6, 9,13 } }, // psrlw, pand, pxor, psubb.
  626. { ISD::SHL, MVT::v32i8, { 7, 8,11,14 } }, // psllw + pand + split.
  627. { ISD::SRL, MVT::v32i8, { 7, 9,10,14 } }, // psrlw + pand + split.
  628. { ISD::SRA, MVT::v32i8, { 10,11,16,21 } }, // psrlw, pand, pxor, psubb + split.
  629. { ISD::SHL, MVT::v8i16, { 1, 3, 1, 2 } }, // psllw.
  630. { ISD::SRL, MVT::v8i16, { 1, 3, 1, 2 } }, // psrlw.
  631. { ISD::SRA, MVT::v8i16, { 1, 3, 1, 2 } }, // psraw.
  632. { ISD::SHL, MVT::v16i16, { 3, 7, 5, 7 } }, // psllw + split.
  633. { ISD::SRL, MVT::v16i16, { 3, 7, 5, 7 } }, // psrlw + split.
  634. { ISD::SRA, MVT::v16i16, { 3, 7, 5, 7 } }, // psraw + split.
  635. { ISD::SHL, MVT::v4i32, { 1, 3, 1, 2 } }, // pslld.
  636. { ISD::SRL, MVT::v4i32, { 1, 3, 1, 2 } }, // psrld.
  637. { ISD::SRA, MVT::v4i32, { 1, 3, 1, 2 } }, // psrad.
  638. { ISD::SHL, MVT::v8i32, { 3, 7, 5, 7 } }, // pslld + split.
  639. { ISD::SRL, MVT::v8i32, { 3, 7, 5, 7 } }, // psrld + split.
  640. { ISD::SRA, MVT::v8i32, { 3, 7, 5, 7 } }, // psrad + split.
  641. { ISD::SHL, MVT::v2i64, { 1, 3, 1, 2 } }, // psllq.
  642. { ISD::SRL, MVT::v2i64, { 1, 3, 1, 2 } }, // psrlq.
  643. { ISD::SRA, MVT::v2i64, { 3, 4, 5, 7 } }, // 2 x psrad + shuffle.
  644. { ISD::SHL, MVT::v4i64, { 3, 7, 4, 6 } }, // psllq + split.
  645. { ISD::SRL, MVT::v4i64, { 3, 7, 4, 6 } }, // psrlq + split.
  646. { ISD::SRA, MVT::v4i64, { 6, 7,10,13 } }, // 2 x (2 x psrad + shuffle) + split.
  647. };
  648. // XOP has faster vXi8 shifts.
  649. if (ST->hasAVX() && Op2Info.isUniform() &&
  650. (!ST->hasXOP() || LT.second.getScalarSizeInBits() != 8))
  651. if (const auto *Entry =
  652. CostTableLookup(AVXUniformCostTable, ISD, LT.second))
  653. if (auto KindCost = Entry->Cost[CostKind])
  654. return LT.first * *KindCost;
  655. static const CostKindTblEntry SSE2UniformCostTable[] = {
  656. // Uniform splats are cheaper for the following instructions.
  657. { ISD::SHL, MVT::v16i8, { 9, 10, 6, 9 } }, // psllw + pand.
  658. { ISD::SRL, MVT::v16i8, { 9, 13, 5, 9 } }, // psrlw + pand.
  659. { ISD::SRA, MVT::v16i8, { 11, 15, 9,13 } }, // pcmpgtb sequence.
  660. { ISD::SHL, MVT::v8i16, { 2, 2, 1, 2 } }, // psllw.
  661. { ISD::SRL, MVT::v8i16, { 2, 2, 1, 2 } }, // psrlw.
  662. { ISD::SRA, MVT::v8i16, { 2, 2, 1, 2 } }, // psraw.
  663. { ISD::SHL, MVT::v4i32, { 2, 2, 1, 2 } }, // pslld
  664. { ISD::SRL, MVT::v4i32, { 2, 2, 1, 2 } }, // psrld.
  665. { ISD::SRA, MVT::v4i32, { 2, 2, 1, 2 } }, // psrad.
  666. { ISD::SHL, MVT::v2i64, { 2, 2, 1, 2 } }, // psllq.
  667. { ISD::SRL, MVT::v2i64, { 2, 2, 1, 2 } }, // psrlq.
  668. { ISD::SRA, MVT::v2i64, { 5, 9, 5, 7 } }, // 2*psrlq + xor + sub.
  669. };
  670. if (ST->hasSSE2() && Op2Info.isUniform() &&
  671. (!ST->hasXOP() || LT.second.getScalarSizeInBits() != 8))
  672. if (const auto *Entry =
  673. CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
  674. if (auto KindCost = Entry->Cost[CostKind])
  675. return LT.first * *KindCost;
  676. static const CostKindTblEntry AVX512DQCostTable[] = {
  677. { ISD::MUL, MVT::v2i64, { 2, 15, 1, 3 } }, // pmullq
  678. { ISD::MUL, MVT::v4i64, { 2, 15, 1, 3 } }, // pmullq
  679. { ISD::MUL, MVT::v8i64, { 3, 15, 1, 3 } } // pmullq
  680. };
  681. // Look for AVX512DQ lowering tricks for custom cases.
  682. if (ST->hasDQI())
  683. if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
  684. if (auto KindCost = Entry->Cost[CostKind])
  685. return LT.first * *KindCost;
  686. static const CostKindTblEntry AVX512BWCostTable[] = {
  687. { ISD::SHL, MVT::v16i8, { 4, 8, 4, 5 } }, // extend/vpsllvw/pack sequence.
  688. { ISD::SRL, MVT::v16i8, { 4, 8, 4, 5 } }, // extend/vpsrlvw/pack sequence.
  689. { ISD::SRA, MVT::v16i8, { 4, 8, 4, 5 } }, // extend/vpsravw/pack sequence.
  690. { ISD::SHL, MVT::v32i8, { 4, 23,11,16 } }, // extend/vpsllvw/pack sequence.
  691. { ISD::SRL, MVT::v32i8, { 4, 30,12,18 } }, // extend/vpsrlvw/pack sequence.
  692. { ISD::SRA, MVT::v32i8, { 6, 13,24,30 } }, // extend/vpsravw/pack sequence.
  693. { ISD::SHL, MVT::v64i8, { 6, 19,13,15 } }, // extend/vpsllvw/pack sequence.
  694. { ISD::SRL, MVT::v64i8, { 7, 27,15,18 } }, // extend/vpsrlvw/pack sequence.
  695. { ISD::SRA, MVT::v64i8, { 15, 15,30,30 } }, // extend/vpsravw/pack sequence.
  696. { ISD::SHL, MVT::v8i16, { 1, 1, 1, 1 } }, // vpsllvw
  697. { ISD::SRL, MVT::v8i16, { 1, 1, 1, 1 } }, // vpsrlvw
  698. { ISD::SRA, MVT::v8i16, { 1, 1, 1, 1 } }, // vpsravw
  699. { ISD::SHL, MVT::v16i16, { 1, 1, 1, 1 } }, // vpsllvw
  700. { ISD::SRL, MVT::v16i16, { 1, 1, 1, 1 } }, // vpsrlvw
  701. { ISD::SRA, MVT::v16i16, { 1, 1, 1, 1 } }, // vpsravw
  702. { ISD::SHL, MVT::v32i16, { 1, 1, 1, 1 } }, // vpsllvw
  703. { ISD::SRL, MVT::v32i16, { 1, 1, 1, 1 } }, // vpsrlvw
  704. { ISD::SRA, MVT::v32i16, { 1, 1, 1, 1 } }, // vpsravw
  705. { ISD::ADD, MVT::v64i8, { 1, 1, 1, 1 } }, // paddb
  706. { ISD::ADD, MVT::v32i16, { 1, 1, 1, 1 } }, // paddw
  707. { ISD::ADD, MVT::v32i8, { 1, 1, 1, 1 } }, // paddb
  708. { ISD::ADD, MVT::v16i16, { 1, 1, 1, 1 } }, // paddw
  709. { ISD::ADD, MVT::v8i32, { 1, 1, 1, 1 } }, // paddd
  710. { ISD::ADD, MVT::v4i64, { 1, 1, 1, 1 } }, // paddq
  711. { ISD::SUB, MVT::v64i8, { 1, 1, 1, 1 } }, // psubb
  712. { ISD::SUB, MVT::v32i16, { 1, 1, 1, 1 } }, // psubw
  713. { ISD::MUL, MVT::v32i16, { 1, 5, 1, 1 } }, // pmullw
  714. { ISD::SUB, MVT::v32i8, { 1, 1, 1, 1 } }, // psubb
  715. { ISD::SUB, MVT::v16i16, { 1, 1, 1, 1 } }, // psubw
  716. { ISD::SUB, MVT::v8i32, { 1, 1, 1, 1 } }, // psubd
  717. { ISD::SUB, MVT::v4i64, { 1, 1, 1, 1 } }, // psubq
  718. };
  719. // Look for AVX512BW lowering tricks for custom cases.
  720. if (ST->hasBWI())
  721. if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
  722. if (auto KindCost = Entry->Cost[CostKind])
  723. return LT.first * *KindCost;
  724. static const CostKindTblEntry AVX512CostTable[] = {
  725. { ISD::SHL, MVT::v64i8, { 15, 19,27,33 } }, // vpblendv+split sequence.
  726. { ISD::SRL, MVT::v64i8, { 15, 19,30,36 } }, // vpblendv+split sequence.
  727. { ISD::SRA, MVT::v64i8, { 37, 37,51,63 } }, // vpblendv+split sequence.
  728. { ISD::SHL, MVT::v32i16, { 11, 16,11,15 } }, // 2*extend/vpsrlvd/pack sequence.
  729. { ISD::SRL, MVT::v32i16, { 11, 16,11,15 } }, // 2*extend/vpsrlvd/pack sequence.
  730. { ISD::SRA, MVT::v32i16, { 11, 16,11,15 } }, // 2*extend/vpsravd/pack sequence.
  731. { ISD::SHL, MVT::v4i32, { 1, 1, 1, 1 } },
  732. { ISD::SRL, MVT::v4i32, { 1, 1, 1, 1 } },
  733. { ISD::SRA, MVT::v4i32, { 1, 1, 1, 1 } },
  734. { ISD::SHL, MVT::v8i32, { 1, 1, 1, 1 } },
  735. { ISD::SRL, MVT::v8i32, { 1, 1, 1, 1 } },
  736. { ISD::SRA, MVT::v8i32, { 1, 1, 1, 1 } },
  737. { ISD::SHL, MVT::v16i32, { 1, 1, 1, 1 } },
  738. { ISD::SRL, MVT::v16i32, { 1, 1, 1, 1 } },
  739. { ISD::SRA, MVT::v16i32, { 1, 1, 1, 1 } },
  740. { ISD::SHL, MVT::v2i64, { 1, 1, 1, 1 } },
  741. { ISD::SRL, MVT::v2i64, { 1, 1, 1, 1 } },
  742. { ISD::SRA, MVT::v2i64, { 1, 1, 1, 1 } },
  743. { ISD::SHL, MVT::v4i64, { 1, 1, 1, 1 } },
  744. { ISD::SRL, MVT::v4i64, { 1, 1, 1, 1 } },
  745. { ISD::SRA, MVT::v4i64, { 1, 1, 1, 1 } },
  746. { ISD::SHL, MVT::v8i64, { 1, 1, 1, 1 } },
  747. { ISD::SRL, MVT::v8i64, { 1, 1, 1, 1 } },
  748. { ISD::SRA, MVT::v8i64, { 1, 1, 1, 1 } },
  749. { ISD::ADD, MVT::v64i8, { 3, 7, 5, 5 } }, // 2*paddb + split
  750. { ISD::ADD, MVT::v32i16, { 3, 7, 5, 5 } }, // 2*paddw + split
  751. { ISD::SUB, MVT::v64i8, { 3, 7, 5, 5 } }, // 2*psubb + split
  752. { ISD::SUB, MVT::v32i16, { 3, 7, 5, 5 } }, // 2*psubw + split
  753. { ISD::AND, MVT::v32i8, { 1, 1, 1, 1 } },
  754. { ISD::AND, MVT::v16i16, { 1, 1, 1, 1 } },
  755. { ISD::AND, MVT::v8i32, { 1, 1, 1, 1 } },
  756. { ISD::AND, MVT::v4i64, { 1, 1, 1, 1 } },
  757. { ISD::OR, MVT::v32i8, { 1, 1, 1, 1 } },
  758. { ISD::OR, MVT::v16i16, { 1, 1, 1, 1 } },
  759. { ISD::OR, MVT::v8i32, { 1, 1, 1, 1 } },
  760. { ISD::OR, MVT::v4i64, { 1, 1, 1, 1 } },
  761. { ISD::XOR, MVT::v32i8, { 1, 1, 1, 1 } },
  762. { ISD::XOR, MVT::v16i16, { 1, 1, 1, 1 } },
  763. { ISD::XOR, MVT::v8i32, { 1, 1, 1, 1 } },
  764. { ISD::XOR, MVT::v4i64, { 1, 1, 1, 1 } },
  765. { ISD::MUL, MVT::v16i32, { 1, 10, 1, 2 } }, // pmulld (Skylake from agner.org)
  766. { ISD::MUL, MVT::v8i32, { 1, 10, 1, 2 } }, // pmulld (Skylake from agner.org)
  767. { ISD::MUL, MVT::v4i32, { 1, 10, 1, 2 } }, // pmulld (Skylake from agner.org)
  768. { ISD::MUL, MVT::v8i64, { 6, 9, 8, 8 } }, // 3*pmuludq/3*shift/2*add
  769. { ISD::MUL, MVT::i64, { 1 } }, // Skylake from http://www.agner.org/
  770. { ISD::FNEG, MVT::v8f64, { 1, 1, 1, 2 } }, // Skylake from http://www.agner.org/
  771. { ISD::FADD, MVT::v8f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  772. { ISD::FADD, MVT::v4f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  773. { ISD::FSUB, MVT::v8f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  774. { ISD::FSUB, MVT::v4f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  775. { ISD::FMUL, MVT::v8f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  776. { ISD::FMUL, MVT::v4f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  777. { ISD::FMUL, MVT::v2f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  778. { ISD::FMUL, MVT::f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  779. { ISD::FDIV, MVT::f64, { 4, 14, 1, 1 } }, // Skylake from http://www.agner.org/
  780. { ISD::FDIV, MVT::v2f64, { 4, 14, 1, 1 } }, // Skylake from http://www.agner.org/
  781. { ISD::FDIV, MVT::v4f64, { 8, 14, 1, 1 } }, // Skylake from http://www.agner.org/
  782. { ISD::FDIV, MVT::v8f64, { 16, 23, 1, 3 } }, // Skylake from http://www.agner.org/
  783. { ISD::FNEG, MVT::v16f32, { 1, 1, 1, 2 } }, // Skylake from http://www.agner.org/
  784. { ISD::FADD, MVT::v16f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  785. { ISD::FADD, MVT::v8f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  786. { ISD::FSUB, MVT::v16f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  787. { ISD::FSUB, MVT::v8f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  788. { ISD::FMUL, MVT::v16f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  789. { ISD::FMUL, MVT::v8f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  790. { ISD::FMUL, MVT::v4f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  791. { ISD::FMUL, MVT::f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
  792. { ISD::FDIV, MVT::f32, { 3, 11, 1, 1 } }, // Skylake from http://www.agner.org/
  793. { ISD::FDIV, MVT::v4f32, { 3, 11, 1, 1 } }, // Skylake from http://www.agner.org/
  794. { ISD::FDIV, MVT::v8f32, { 5, 11, 1, 1 } }, // Skylake from http://www.agner.org/
  795. { ISD::FDIV, MVT::v16f32, { 10, 18, 1, 3 } }, // Skylake from http://www.agner.org/
  796. };
  797. if (ST->hasAVX512())
  798. if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
  799. if (auto KindCost = Entry->Cost[CostKind])
  800. return LT.first * *KindCost;
  801. static const CostKindTblEntry AVX2ShiftCostTable[] = {
  802. // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to
  803. // customize them to detect the cases where shift amount is a scalar one.
  804. { ISD::SHL, MVT::v4i32, { 2, 3, 1, 3 } }, // vpsllvd (Haswell from agner.org)
  805. { ISD::SRL, MVT::v4i32, { 2, 3, 1, 3 } }, // vpsrlvd (Haswell from agner.org)
  806. { ISD::SRA, MVT::v4i32, { 2, 3, 1, 3 } }, // vpsravd (Haswell from agner.org)
  807. { ISD::SHL, MVT::v8i32, { 4, 4, 1, 3 } }, // vpsllvd (Haswell from agner.org)
  808. { ISD::SRL, MVT::v8i32, { 4, 4, 1, 3 } }, // vpsrlvd (Haswell from agner.org)
  809. { ISD::SRA, MVT::v8i32, { 4, 4, 1, 3 } }, // vpsravd (Haswell from agner.org)
  810. { ISD::SHL, MVT::v2i64, { 2, 3, 1, 1 } }, // vpsllvq (Haswell from agner.org)
  811. { ISD::SRL, MVT::v2i64, { 2, 3, 1, 1 } }, // vpsrlvq (Haswell from agner.org)
  812. { ISD::SHL, MVT::v4i64, { 4, 4, 1, 2 } }, // vpsllvq (Haswell from agner.org)
  813. { ISD::SRL, MVT::v4i64, { 4, 4, 1, 2 } }, // vpsrlvq (Haswell from agner.org)
  814. };
  815. if (ST->hasAVX512()) {
  816. if (ISD == ISD::SHL && LT.second == MVT::v32i16 && Op2Info.isConstant())
  817. // On AVX512, a packed v32i16 shift left by a constant build_vector
  818. // is lowered into a vector multiply (vpmullw).
  819. return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
  820. Op1Info.getNoProps(), Op2Info.getNoProps());
  821. }
  822. // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts).
  823. if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) {
  824. if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
  825. Op2Info.isConstant())
  826. // On AVX2, a packed v16i16 shift left by a constant build_vector
  827. // is lowered into a vector multiply (vpmullw).
  828. return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
  829. Op1Info.getNoProps(), Op2Info.getNoProps());
  830. if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
  831. if (auto KindCost = Entry->Cost[CostKind])
  832. return LT.first * *KindCost;
  833. }
  834. static const CostKindTblEntry XOPShiftCostTable[] = {
  835. // 128bit shifts take 1cy, but right shifts require negation beforehand.
  836. { ISD::SHL, MVT::v16i8, { 1, 3, 1, 1 } },
  837. { ISD::SRL, MVT::v16i8, { 2, 3, 1, 1 } },
  838. { ISD::SRA, MVT::v16i8, { 2, 3, 1, 1 } },
  839. { ISD::SHL, MVT::v8i16, { 1, 3, 1, 1 } },
  840. { ISD::SRL, MVT::v8i16, { 2, 3, 1, 1 } },
  841. { ISD::SRA, MVT::v8i16, { 2, 3, 1, 1 } },
  842. { ISD::SHL, MVT::v4i32, { 1, 3, 1, 1 } },
  843. { ISD::SRL, MVT::v4i32, { 2, 3, 1, 1 } },
  844. { ISD::SRA, MVT::v4i32, { 2, 3, 1, 1 } },
  845. { ISD::SHL, MVT::v2i64, { 1, 3, 1, 1 } },
  846. { ISD::SRL, MVT::v2i64, { 2, 3, 1, 1 } },
  847. { ISD::SRA, MVT::v2i64, { 2, 3, 1, 1 } },
  848. // 256bit shifts require splitting if AVX2 didn't catch them above.
  849. { ISD::SHL, MVT::v32i8, { 4, 7, 5, 6 } },
  850. { ISD::SRL, MVT::v32i8, { 6, 7, 5, 6 } },
  851. { ISD::SRA, MVT::v32i8, { 6, 7, 5, 6 } },
  852. { ISD::SHL, MVT::v16i16, { 4, 7, 5, 6 } },
  853. { ISD::SRL, MVT::v16i16, { 6, 7, 5, 6 } },
  854. { ISD::SRA, MVT::v16i16, { 6, 7, 5, 6 } },
  855. { ISD::SHL, MVT::v8i32, { 4, 7, 5, 6 } },
  856. { ISD::SRL, MVT::v8i32, { 6, 7, 5, 6 } },
  857. { ISD::SRA, MVT::v8i32, { 6, 7, 5, 6 } },
  858. { ISD::SHL, MVT::v4i64, { 4, 7, 5, 6 } },
  859. { ISD::SRL, MVT::v4i64, { 6, 7, 5, 6 } },
  860. { ISD::SRA, MVT::v4i64, { 6, 7, 5, 6 } },
  861. };
  862. // Look for XOP lowering tricks.
  863. if (ST->hasXOP()) {
  864. // If the right shift is constant then we'll fold the negation so
  865. // it's as cheap as a left shift.
  866. int ShiftISD = ISD;
  867. if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) && Op2Info.isConstant())
  868. ShiftISD = ISD::SHL;
  869. if (const auto *Entry =
  870. CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
  871. if (auto KindCost = Entry->Cost[CostKind])
  872. return LT.first * *KindCost;
  873. }
  874. if (ISD == ISD::SHL && !Op2Info.isUniform() && Op2Info.isConstant()) {
  875. MVT VT = LT.second;
  876. // Vector shift left by non uniform constant can be lowered
  877. // into vector multiply.
  878. if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
  879. ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
  880. ISD = ISD::MUL;
  881. }
  882. static const CostKindTblEntry GLMCostTable[] = {
  883. { ISD::FDIV, MVT::f32, { 18, 19, 1, 1 } }, // divss
  884. { ISD::FDIV, MVT::v4f32, { 35, 36, 1, 1 } }, // divps
  885. { ISD::FDIV, MVT::f64, { 33, 34, 1, 1 } }, // divsd
  886. { ISD::FDIV, MVT::v2f64, { 65, 66, 1, 1 } }, // divpd
  887. };
  888. if (ST->useGLMDivSqrtCosts())
  889. if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, LT.second))
  890. if (auto KindCost = Entry->Cost[CostKind])
  891. return LT.first * *KindCost;
  892. static const CostKindTblEntry SLMCostTable[] = {
  893. { ISD::MUL, MVT::v4i32, { 11, 11, 1, 7 } }, // pmulld
  894. { ISD::MUL, MVT::v8i16, { 2, 5, 1, 1 } }, // pmullw
  895. { ISD::FMUL, MVT::f64, { 2, 5, 1, 1 } }, // mulsd
  896. { ISD::FMUL, MVT::f32, { 1, 4, 1, 1 } }, // mulss
  897. { ISD::FMUL, MVT::v2f64, { 4, 7, 1, 1 } }, // mulpd
  898. { ISD::FMUL, MVT::v4f32, { 2, 5, 1, 1 } }, // mulps
  899. { ISD::FDIV, MVT::f32, { 17, 19, 1, 1 } }, // divss
  900. { ISD::FDIV, MVT::v4f32, { 39, 39, 1, 6 } }, // divps
  901. { ISD::FDIV, MVT::f64, { 32, 34, 1, 1 } }, // divsd
  902. { ISD::FDIV, MVT::v2f64, { 69, 69, 1, 6 } }, // divpd
  903. { ISD::FADD, MVT::v2f64, { 2, 4, 1, 1 } }, // addpd
  904. { ISD::FSUB, MVT::v2f64, { 2, 4, 1, 1 } }, // subpd
  905. // v2i64/v4i64 mul is custom lowered as a series of long:
  906. // multiplies(3), shifts(3) and adds(2)
  907. // slm muldq version throughput is 2 and addq throughput 4
  908. // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
  909. // 3X4 (addq throughput) = 17
  910. { ISD::MUL, MVT::v2i64, { 17, 22, 9, 9 } },
  911. // slm addq\subq throughput is 4
  912. { ISD::ADD, MVT::v2i64, { 4, 2, 1, 2 } },
  913. { ISD::SUB, MVT::v2i64, { 4, 2, 1, 2 } },
  914. };
  915. if (ST->useSLMArithCosts())
  916. if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, LT.second))
  917. if (auto KindCost = Entry->Cost[CostKind])
  918. return LT.first * *KindCost;
  919. static const CostKindTblEntry AVX2CostTable[] = {
  920. { ISD::SHL, MVT::v16i8, { 6, 21,11,16 } }, // vpblendvb sequence.
  921. { ISD::SHL, MVT::v32i8, { 6, 23,11,22 } }, // vpblendvb sequence.
  922. { ISD::SHL, MVT::v8i16, { 5, 18, 5,10 } }, // extend/vpsrlvd/pack sequence.
  923. { ISD::SHL, MVT::v16i16, { 8, 10,10,14 } }, // extend/vpsrlvd/pack sequence.
  924. { ISD::SRL, MVT::v16i8, { 6, 27,12,18 } }, // vpblendvb sequence.
  925. { ISD::SRL, MVT::v32i8, { 8, 30,12,24 } }, // vpblendvb sequence.
  926. { ISD::SRL, MVT::v8i16, { 5, 11, 5,10 } }, // extend/vpsrlvd/pack sequence.
  927. { ISD::SRL, MVT::v16i16, { 8, 10,10,14 } }, // extend/vpsrlvd/pack sequence.
  928. { ISD::SRA, MVT::v16i8, { 17, 17,24,30 } }, // vpblendvb sequence.
  929. { ISD::SRA, MVT::v32i8, { 18, 20,24,43 } }, // vpblendvb sequence.
  930. { ISD::SRA, MVT::v8i16, { 5, 11, 5,10 } }, // extend/vpsravd/pack sequence.
  931. { ISD::SRA, MVT::v16i16, { 8, 10,10,14 } }, // extend/vpsravd/pack sequence.
  932. { ISD::SRA, MVT::v2i64, { 4, 5, 5, 5 } }, // srl/xor/sub sequence.
  933. { ISD::SRA, MVT::v4i64, { 8, 8, 5, 9 } }, // srl/xor/sub sequence.
  934. { ISD::SUB, MVT::v32i8, { 1, 1, 1, 2 } }, // psubb
  935. { ISD::ADD, MVT::v32i8, { 1, 1, 1, 2 } }, // paddb
  936. { ISD::SUB, MVT::v16i16, { 1, 1, 1, 2 } }, // psubw
  937. { ISD::ADD, MVT::v16i16, { 1, 1, 1, 2 } }, // paddw
  938. { ISD::SUB, MVT::v8i32, { 1, 1, 1, 2 } }, // psubd
  939. { ISD::ADD, MVT::v8i32, { 1, 1, 1, 2 } }, // paddd
  940. { ISD::SUB, MVT::v4i64, { 1, 1, 1, 2 } }, // psubq
  941. { ISD::ADD, MVT::v4i64, { 1, 1, 1, 2 } }, // paddq
  942. { ISD::MUL, MVT::v16i16, { 2, 5, 1, 1 } }, // pmullw
  943. { ISD::MUL, MVT::v8i32, { 4, 10, 1, 2 } }, // pmulld
  944. { ISD::MUL, MVT::v4i32, { 2, 10, 1, 2 } }, // pmulld
  945. { ISD::MUL, MVT::v4i64, { 6, 10, 8,13 } }, // 3*pmuludq/3*shift/2*add
  946. { ISD::MUL, MVT::v2i64, { 6, 10, 8, 8 } }, // 3*pmuludq/3*shift/2*add
  947. { ISD::FNEG, MVT::v4f64, { 1, 1, 1, 2 } }, // vxorpd
  948. { ISD::FNEG, MVT::v8f32, { 1, 1, 1, 2 } }, // vxorps
  949. { ISD::FADD, MVT::f64, { 1, 4, 1, 1 } }, // vaddsd
  950. { ISD::FADD, MVT::f32, { 1, 4, 1, 1 } }, // vaddss
  951. { ISD::FADD, MVT::v2f64, { 1, 4, 1, 1 } }, // vaddpd
  952. { ISD::FADD, MVT::v4f32, { 1, 4, 1, 1 } }, // vaddps
  953. { ISD::FADD, MVT::v4f64, { 1, 4, 1, 2 } }, // vaddpd
  954. { ISD::FADD, MVT::v8f32, { 1, 4, 1, 2 } }, // vaddps
  955. { ISD::FSUB, MVT::f64, { 1, 4, 1, 1 } }, // vsubsd
  956. { ISD::FSUB, MVT::f32, { 1, 4, 1, 1 } }, // vsubss
  957. { ISD::FSUB, MVT::v2f64, { 1, 4, 1, 1 } }, // vsubpd
  958. { ISD::FSUB, MVT::v4f32, { 1, 4, 1, 1 } }, // vsubps
  959. { ISD::FSUB, MVT::v4f64, { 1, 4, 1, 2 } }, // vsubpd
  960. { ISD::FSUB, MVT::v8f32, { 1, 4, 1, 2 } }, // vsubps
  961. { ISD::FMUL, MVT::f64, { 1, 5, 1, 1 } }, // vmulsd
  962. { ISD::FMUL, MVT::f32, { 1, 5, 1, 1 } }, // vmulss
  963. { ISD::FMUL, MVT::v2f64, { 1, 5, 1, 1 } }, // vmulpd
  964. { ISD::FMUL, MVT::v4f32, { 1, 5, 1, 1 } }, // vmulps
  965. { ISD::FMUL, MVT::v4f64, { 1, 5, 1, 2 } }, // vmulpd
  966. { ISD::FMUL, MVT::v8f32, { 1, 5, 1, 2 } }, // vmulps
  967. { ISD::FDIV, MVT::f32, { 7, 13, 1, 1 } }, // vdivss
  968. { ISD::FDIV, MVT::v4f32, { 7, 13, 1, 1 } }, // vdivps
  969. { ISD::FDIV, MVT::v8f32, { 14, 21, 1, 3 } }, // vdivps
  970. { ISD::FDIV, MVT::f64, { 14, 20, 1, 1 } }, // vdivsd
  971. { ISD::FDIV, MVT::v2f64, { 14, 20, 1, 1 } }, // vdivpd
  972. { ISD::FDIV, MVT::v4f64, { 28, 35, 1, 3 } }, // vdivpd
  973. };
  974. // Look for AVX2 lowering tricks for custom cases.
  975. if (ST->hasAVX2())
  976. if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
  977. if (auto KindCost = Entry->Cost[CostKind])
  978. return LT.first * *KindCost;
  979. static const CostKindTblEntry AVX1CostTable[] = {
  980. // We don't have to scalarize unsupported ops. We can issue two half-sized
  981. // operations and we only need to extract the upper YMM half.
  982. // Two ops + 1 extract + 1 insert = 4.
  983. { ISD::MUL, MVT::v16i16, { 4, 8, 5, 6 } }, // pmullw + split
  984. { ISD::MUL, MVT::v8i32, { 5, 8, 5, 10 } }, // pmulld + split
  985. { ISD::MUL, MVT::v4i32, { 2, 5, 1, 3 } }, // pmulld
  986. { ISD::MUL, MVT::v4i64, { 12, 15, 19, 20 } },
  987. { ISD::AND, MVT::v32i8, { 1, 1, 1, 2 } }, // vandps
  988. { ISD::AND, MVT::v16i16, { 1, 1, 1, 2 } }, // vandps
  989. { ISD::AND, MVT::v8i32, { 1, 1, 1, 2 } }, // vandps
  990. { ISD::AND, MVT::v4i64, { 1, 1, 1, 2 } }, // vandps
  991. { ISD::OR, MVT::v32i8, { 1, 1, 1, 2 } }, // vorps
  992. { ISD::OR, MVT::v16i16, { 1, 1, 1, 2 } }, // vorps
  993. { ISD::OR, MVT::v8i32, { 1, 1, 1, 2 } }, // vorps
  994. { ISD::OR, MVT::v4i64, { 1, 1, 1, 2 } }, // vorps
  995. { ISD::XOR, MVT::v32i8, { 1, 1, 1, 2 } }, // vxorps
  996. { ISD::XOR, MVT::v16i16, { 1, 1, 1, 2 } }, // vxorps
  997. { ISD::XOR, MVT::v8i32, { 1, 1, 1, 2 } }, // vxorps
  998. { ISD::XOR, MVT::v4i64, { 1, 1, 1, 2 } }, // vxorps
  999. { ISD::SUB, MVT::v32i8, { 4, 2, 5, 6 } }, // psubb + split
  1000. { ISD::ADD, MVT::v32i8, { 4, 2, 5, 6 } }, // paddb + split
  1001. { ISD::SUB, MVT::v16i16, { 4, 2, 5, 6 } }, // psubw + split
  1002. { ISD::ADD, MVT::v16i16, { 4, 2, 5, 6 } }, // paddw + split
  1003. { ISD::SUB, MVT::v8i32, { 4, 2, 5, 6 } }, // psubd + split
  1004. { ISD::ADD, MVT::v8i32, { 4, 2, 5, 6 } }, // paddd + split
  1005. { ISD::SUB, MVT::v4i64, { 4, 2, 5, 6 } }, // psubq + split
  1006. { ISD::ADD, MVT::v4i64, { 4, 2, 5, 6 } }, // paddq + split
  1007. { ISD::SUB, MVT::v2i64, { 1, 1, 1, 1 } }, // psubq
  1008. { ISD::ADD, MVT::v2i64, { 1, 1, 1, 1 } }, // paddq
  1009. { ISD::SHL, MVT::v16i8, { 10, 21,11,17 } }, // pblendvb sequence.
  1010. { ISD::SHL, MVT::v32i8, { 22, 22,27,40 } }, // pblendvb sequence + split.
  1011. { ISD::SHL, MVT::v8i16, { 6, 9,11,11 } }, // pblendvb sequence.
  1012. { ISD::SHL, MVT::v16i16, { 13, 16,24,25 } }, // pblendvb sequence + split.
  1013. { ISD::SHL, MVT::v4i32, { 3, 11, 4, 6 } }, // pslld/paddd/cvttps2dq/pmulld
  1014. { ISD::SHL, MVT::v8i32, { 9, 11,12,17 } }, // pslld/paddd/cvttps2dq/pmulld + split
  1015. { ISD::SHL, MVT::v2i64, { 2, 4, 4, 6 } }, // Shift each lane + blend.
  1016. { ISD::SHL, MVT::v4i64, { 6, 7,11,15 } }, // Shift each lane + blend + split.
  1017. { ISD::SRL, MVT::v16i8, { 11, 27,12,18 } }, // pblendvb sequence.
  1018. { ISD::SRL, MVT::v32i8, { 23, 23,30,43 } }, // pblendvb sequence + split.
  1019. { ISD::SRL, MVT::v8i16, { 13, 16,14,22 } }, // pblendvb sequence.
  1020. { ISD::SRL, MVT::v16i16, { 28, 30,31,48 } }, // pblendvb sequence + split.
  1021. { ISD::SRL, MVT::v4i32, { 6, 7,12,16 } }, // Shift each lane + blend.
  1022. { ISD::SRL, MVT::v8i32, { 14, 14,26,34 } }, // Shift each lane + blend + split.
  1023. { ISD::SRL, MVT::v2i64, { 2, 4, 4, 6 } }, // Shift each lane + blend.
  1024. { ISD::SRL, MVT::v4i64, { 6, 7,11,15 } }, // Shift each lane + blend + split.
  1025. { ISD::SRA, MVT::v16i8, { 21, 22,24,36 } }, // pblendvb sequence.
  1026. { ISD::SRA, MVT::v32i8, { 44, 45,51,76 } }, // pblendvb sequence + split.
  1027. { ISD::SRA, MVT::v8i16, { 13, 16,14,22 } }, // pblendvb sequence.
  1028. { ISD::SRA, MVT::v16i16, { 28, 30,31,48 } }, // pblendvb sequence + split.
  1029. { ISD::SRA, MVT::v4i32, { 6, 7,12,16 } }, // Shift each lane + blend.
  1030. { ISD::SRA, MVT::v8i32, { 14, 14,26,34 } }, // Shift each lane + blend + split.
  1031. { ISD::SRA, MVT::v2i64, { 5, 6,10,14 } }, // Shift each lane + blend.
  1032. { ISD::SRA, MVT::v4i64, { 12, 12,22,30 } }, // Shift each lane + blend + split.
  1033. { ISD::FNEG, MVT::v4f64, { 2, 2, 1, 2 } }, // BTVER2 from http://www.agner.org/
  1034. { ISD::FNEG, MVT::v8f32, { 2, 2, 1, 2 } }, // BTVER2 from http://www.agner.org/
  1035. { ISD::FADD, MVT::f64, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
  1036. { ISD::FADD, MVT::f32, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
  1037. { ISD::FADD, MVT::v2f64, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
  1038. { ISD::FADD, MVT::v4f32, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
  1039. { ISD::FADD, MVT::v4f64, { 2, 5, 1, 2 } }, // BDVER2 from http://www.agner.org/
  1040. { ISD::FADD, MVT::v8f32, { 2, 5, 1, 2 } }, // BDVER2 from http://www.agner.org/
  1041. { ISD::FSUB, MVT::f64, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
  1042. { ISD::FSUB, MVT::f32, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
  1043. { ISD::FSUB, MVT::v2f64, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
  1044. { ISD::FSUB, MVT::v4f32, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
  1045. { ISD::FSUB, MVT::v4f64, { 2, 5, 1, 2 } }, // BDVER2 from http://www.agner.org/
  1046. { ISD::FSUB, MVT::v8f32, { 2, 5, 1, 2 } }, // BDVER2 from http://www.agner.org/
  1047. { ISD::FMUL, MVT::f64, { 2, 5, 1, 1 } }, // BTVER2 from http://www.agner.org/
  1048. { ISD::FMUL, MVT::f32, { 1, 5, 1, 1 } }, // BTVER2 from http://www.agner.org/
  1049. { ISD::FMUL, MVT::v2f64, { 2, 5, 1, 1 } }, // BTVER2 from http://www.agner.org/
  1050. { ISD::FMUL, MVT::v4f32, { 1, 5, 1, 1 } }, // BTVER2 from http://www.agner.org/
  1051. { ISD::FMUL, MVT::v4f64, { 4, 5, 1, 2 } }, // BTVER2 from http://www.agner.org/
  1052. { ISD::FMUL, MVT::v8f32, { 2, 5, 1, 2 } }, // BTVER2 from http://www.agner.org/
  1053. { ISD::FDIV, MVT::f32, { 14, 14, 1, 1 } }, // SNB from http://www.agner.org/
  1054. { ISD::FDIV, MVT::v4f32, { 14, 14, 1, 1 } }, // SNB from http://www.agner.org/
  1055. { ISD::FDIV, MVT::v8f32, { 28, 29, 1, 3 } }, // SNB from http://www.agner.org/
  1056. { ISD::FDIV, MVT::f64, { 22, 22, 1, 1 } }, // SNB from http://www.agner.org/
  1057. { ISD::FDIV, MVT::v2f64, { 22, 22, 1, 1 } }, // SNB from http://www.agner.org/
  1058. { ISD::FDIV, MVT::v4f64, { 44, 45, 1, 3 } }, // SNB from http://www.agner.org/
  1059. };
  1060. if (ST->hasAVX())
  1061. if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
  1062. if (auto KindCost = Entry->Cost[CostKind])
  1063. return LT.first * *KindCost;
  1064. static const CostKindTblEntry SSE42CostTable[] = {
  1065. { ISD::FADD, MVT::f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
  1066. { ISD::FADD, MVT::f32, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
  1067. { ISD::FADD, MVT::v2f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
  1068. { ISD::FADD, MVT::v4f32, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
  1069. { ISD::FSUB, MVT::f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
  1070. { ISD::FSUB, MVT::f32 , { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
  1071. { ISD::FSUB, MVT::v2f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
  1072. { ISD::FSUB, MVT::v4f32, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
  1073. { ISD::FMUL, MVT::f64, { 1, 5, 1, 1 } }, // Nehalem from http://www.agner.org/
  1074. { ISD::FMUL, MVT::f32, { 1, 5, 1, 1 } }, // Nehalem from http://www.agner.org/
  1075. { ISD::FMUL, MVT::v2f64, { 1, 5, 1, 1 } }, // Nehalem from http://www.agner.org/
  1076. { ISD::FMUL, MVT::v4f32, { 1, 5, 1, 1 } }, // Nehalem from http://www.agner.org/
  1077. { ISD::FDIV, MVT::f32, { 14, 14, 1, 1 } }, // Nehalem from http://www.agner.org/
  1078. { ISD::FDIV, MVT::v4f32, { 14, 14, 1, 1 } }, // Nehalem from http://www.agner.org/
  1079. { ISD::FDIV, MVT::f64, { 22, 22, 1, 1 } }, // Nehalem from http://www.agner.org/
  1080. { ISD::FDIV, MVT::v2f64, { 22, 22, 1, 1 } }, // Nehalem from http://www.agner.org/
  1081. { ISD::MUL, MVT::v2i64, { 6, 10,10,10 } } // 3*pmuludq/3*shift/2*add
  1082. };
  1083. if (ST->hasSSE42())
  1084. if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
  1085. if (auto KindCost = Entry->Cost[CostKind])
  1086. return LT.first * *KindCost;
  1087. static const CostKindTblEntry SSE41CostTable[] = {
  1088. { ISD::SHL, MVT::v16i8, { 15, 24,17,22 } }, // pblendvb sequence.
  1089. { ISD::SHL, MVT::v8i16, { 11, 14,11,11 } }, // pblendvb sequence.
  1090. { ISD::SHL, MVT::v4i32, { 14, 20, 4,10 } }, // pslld/paddd/cvttps2dq/pmulld
  1091. { ISD::SRL, MVT::v16i8, { 16, 27,18,24 } }, // pblendvb sequence.
  1092. { ISD::SRL, MVT::v8i16, { 22, 26,23,27 } }, // pblendvb sequence.
  1093. { ISD::SRL, MVT::v4i32, { 16, 17,15,19 } }, // Shift each lane + blend.
  1094. { ISD::SRL, MVT::v2i64, { 4, 6, 5, 7 } }, // splat+shuffle sequence.
  1095. { ISD::SRA, MVT::v16i8, { 38, 41,30,36 } }, // pblendvb sequence.
  1096. { ISD::SRA, MVT::v8i16, { 22, 26,23,27 } }, // pblendvb sequence.
  1097. { ISD::SRA, MVT::v4i32, { 16, 17,15,19 } }, // Shift each lane + blend.
  1098. { ISD::SRA, MVT::v2i64, { 8, 17, 5, 7 } }, // splat+shuffle sequence.
  1099. { ISD::MUL, MVT::v4i32, { 2, 11, 1, 1 } } // pmulld (Nehalem from agner.org)
  1100. };
  1101. if (ST->hasSSE41())
  1102. if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
  1103. if (auto KindCost = Entry->Cost[CostKind])
  1104. return LT.first * *KindCost;
  1105. static const CostKindTblEntry SSE2CostTable[] = {
  1106. // We don't correctly identify costs of casts because they are marked as
  1107. // custom.
  1108. { ISD::SHL, MVT::v16i8, { 13, 21,26,28 } }, // cmpgtb sequence.
  1109. { ISD::SHL, MVT::v8i16, { 24, 27,16,20 } }, // cmpgtw sequence.
  1110. { ISD::SHL, MVT::v4i32, { 17, 19,10,12 } }, // pslld/paddd/cvttps2dq/pmuludq.
  1111. { ISD::SHL, MVT::v2i64, { 4, 6, 5, 7 } }, // splat+shuffle sequence.
  1112. { ISD::SRL, MVT::v16i8, { 14, 28,27,30 } }, // cmpgtb sequence.
  1113. { ISD::SRL, MVT::v8i16, { 16, 19,31,31 } }, // cmpgtw sequence.
  1114. { ISD::SRL, MVT::v4i32, { 12, 12,15,19 } }, // Shift each lane + blend.
  1115. { ISD::SRL, MVT::v2i64, { 4, 6, 5, 7 } }, // splat+shuffle sequence.
  1116. { ISD::SRA, MVT::v16i8, { 27, 30,54,54 } }, // unpacked cmpgtb sequence.
  1117. { ISD::SRA, MVT::v8i16, { 16, 19,31,31 } }, // cmpgtw sequence.
  1118. { ISD::SRA, MVT::v4i32, { 12, 12,15,19 } }, // Shift each lane + blend.
  1119. { ISD::SRA, MVT::v2i64, { 8, 11,12,16 } }, // srl/xor/sub splat+shuffle sequence.
  1120. { ISD::AND, MVT::v16i8, { 1, 1, 1, 1 } }, // pand
  1121. { ISD::AND, MVT::v8i16, { 1, 1, 1, 1 } }, // pand
  1122. { ISD::AND, MVT::v4i32, { 1, 1, 1, 1 } }, // pand
  1123. { ISD::AND, MVT::v2i64, { 1, 1, 1, 1 } }, // pand
  1124. { ISD::OR, MVT::v16i8, { 1, 1, 1, 1 } }, // por
  1125. { ISD::OR, MVT::v8i16, { 1, 1, 1, 1 } }, // por
  1126. { ISD::OR, MVT::v4i32, { 1, 1, 1, 1 } }, // por
  1127. { ISD::OR, MVT::v2i64, { 1, 1, 1, 1 } }, // por
  1128. { ISD::XOR, MVT::v16i8, { 1, 1, 1, 1 } }, // pxor
  1129. { ISD::XOR, MVT::v8i16, { 1, 1, 1, 1 } }, // pxor
  1130. { ISD::XOR, MVT::v4i32, { 1, 1, 1, 1 } }, // pxor
  1131. { ISD::XOR, MVT::v2i64, { 1, 1, 1, 1 } }, // pxor
  1132. { ISD::ADD, MVT::v2i64, { 1, 2, 1, 2 } }, // paddq
  1133. { ISD::SUB, MVT::v2i64, { 1, 2, 1, 2 } }, // psubq
  1134. { ISD::MUL, MVT::v8i16, { 1, 5, 1, 1 } }, // pmullw
  1135. { ISD::MUL, MVT::v4i32, { 6, 8, 7, 7 } }, // 3*pmuludq/4*shuffle
  1136. { ISD::MUL, MVT::v2i64, { 8, 10, 8, 8 } }, // 3*pmuludq/3*shift/2*add
  1137. { ISD::FDIV, MVT::f32, { 23, 23, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1138. { ISD::FDIV, MVT::v4f32, { 39, 39, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1139. { ISD::FDIV, MVT::f64, { 38, 38, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1140. { ISD::FDIV, MVT::v2f64, { 69, 69, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1141. { ISD::FNEG, MVT::f32, { 1, 1, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1142. { ISD::FNEG, MVT::f64, { 1, 1, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1143. { ISD::FNEG, MVT::v4f32, { 1, 1, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1144. { ISD::FNEG, MVT::v2f64, { 1, 1, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1145. { ISD::FADD, MVT::f32, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1146. { ISD::FADD, MVT::f64, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1147. { ISD::FADD, MVT::v2f64, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1148. { ISD::FSUB, MVT::f32, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1149. { ISD::FSUB, MVT::f64, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1150. { ISD::FSUB, MVT::v2f64, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1151. { ISD::FMUL, MVT::f64, { 2, 5, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1152. { ISD::FMUL, MVT::v2f64, { 2, 5, 1, 1 } }, // Pentium IV from http://www.agner.org/
  1153. };
  1154. if (ST->hasSSE2())
  1155. if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
  1156. if (auto KindCost = Entry->Cost[CostKind])
  1157. return LT.first * *KindCost;
  1158. static const CostKindTblEntry SSE1CostTable[] = {
  1159. { ISD::FDIV, MVT::f32, { 17, 18, 1, 1 } }, // Pentium III from http://www.agner.org/
  1160. { ISD::FDIV, MVT::v4f32, { 34, 48, 1, 1 } }, // Pentium III from http://www.agner.org/
  1161. { ISD::FNEG, MVT::f32, { 2, 2, 1, 2 } }, // Pentium III from http://www.agner.org/
  1162. { ISD::FNEG, MVT::v4f32, { 2, 2, 1, 2 } }, // Pentium III from http://www.agner.org/
  1163. { ISD::FADD, MVT::f32, { 1, 3, 1, 1 } }, // Pentium III from http://www.agner.org/
  1164. { ISD::FADD, MVT::v4f32, { 2, 3, 1, 1 } }, // Pentium III from http://www.agner.org/
  1165. { ISD::FSUB, MVT::f32, { 1, 3, 1, 1 } }, // Pentium III from http://www.agner.org/
  1166. { ISD::FSUB, MVT::v4f32, { 2, 3, 1, 1 } }, // Pentium III from http://www.agner.org/
  1167. { ISD::FMUL, MVT::f32, { 2, 5, 1, 1 } }, // Pentium III from http://www.agner.org/
  1168. { ISD::FMUL, MVT::v4f32, { 2, 5, 1, 1 } }, // Pentium III from http://www.agner.org/
  1169. };
  1170. if (ST->hasSSE1())
  1171. if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
  1172. if (auto KindCost = Entry->Cost[CostKind])
  1173. return LT.first * *KindCost;
  1174. static const CostKindTblEntry X64CostTbl[] = { // 64-bit targets
  1175. { ISD::ADD, MVT::i64, { 1 } }, // Core (Merom) from http://www.agner.org/
  1176. { ISD::SUB, MVT::i64, { 1 } }, // Core (Merom) from http://www.agner.org/
  1177. { ISD::MUL, MVT::i64, { 2 } }, // Nehalem from http://www.agner.org/
  1178. };
  1179. if (ST->is64Bit())
  1180. if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second))
  1181. if (auto KindCost = Entry->Cost[CostKind])
  1182. return LT.first * *KindCost;
  1183. static const CostKindTblEntry X86CostTbl[] = { // 32 or 64-bit targets
  1184. { ISD::ADD, MVT::i8, { 1 } }, // Pentium III from http://www.agner.org/
  1185. { ISD::ADD, MVT::i16, { 1 } }, // Pentium III from http://www.agner.org/
  1186. { ISD::ADD, MVT::i32, { 1 } }, // Pentium III from http://www.agner.org/
  1187. { ISD::SUB, MVT::i8, { 1 } }, // Pentium III from http://www.agner.org/
  1188. { ISD::SUB, MVT::i16, { 1 } }, // Pentium III from http://www.agner.org/
  1189. { ISD::SUB, MVT::i32, { 1 } }, // Pentium III from http://www.agner.org/
  1190. { ISD::FNEG, MVT::f64, { 2, 2, 1, 3 } }, // (x87)
  1191. { ISD::FADD, MVT::f64, { 2, 3, 1, 1 } }, // (x87)
  1192. { ISD::FSUB, MVT::f64, { 2, 3, 1, 1 } }, // (x87)
  1193. { ISD::FMUL, MVT::f64, { 2, 5, 1, 1 } }, // (x87)
  1194. { ISD::FDIV, MVT::f64, { 38, 38, 1, 1 } }, // (x87)
  1195. };
  1196. if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second))
  1197. if (auto KindCost = Entry->Cost[CostKind])
  1198. return LT.first * *KindCost;
  1199. // It is not a good idea to vectorize division. We have to scalarize it and
  1200. // in the process we will often end up having to spilling regular
  1201. // registers. The overhead of division is going to dominate most kernels
  1202. // anyways so try hard to prevent vectorization of division - it is
  1203. // generally a bad idea. Assume somewhat arbitrarily that we have to be able
  1204. // to hide "20 cycles" for each lane.
  1205. if (CostKind == TTI::TCK_RecipThroughput && LT.second.isVector() &&
  1206. (ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
  1207. ISD == ISD::UREM)) {
  1208. InstructionCost ScalarCost =
  1209. getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind,
  1210. Op1Info.getNoProps(), Op2Info.getNoProps());
  1211. return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
  1212. }
  1213. // Handle some basic single instruction code size cases.
  1214. if (CostKind == TTI::TCK_CodeSize) {
  1215. switch (ISD) {
  1216. case ISD::FADD:
  1217. case ISD::FSUB:
  1218. case ISD::FMUL:
  1219. case ISD::FDIV:
  1220. case ISD::FNEG:
  1221. case ISD::AND:
  1222. case ISD::OR:
  1223. case ISD::XOR:
  1224. return LT.first;
  1225. break;
  1226. }
  1227. }
  1228. // Fallback to the default implementation.
  1229. return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
  1230. Args, CxtI);
  1231. }
  1232. InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
  1233. VectorType *BaseTp,
  1234. ArrayRef<int> Mask,
  1235. TTI::TargetCostKind CostKind,
  1236. int Index, VectorType *SubTp,
  1237. ArrayRef<const Value *> Args) {
  1238. // 64-bit packed float vectors (v2f32) are widened to type v4f32.
  1239. // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
  1240. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(BaseTp);
  1241. Kind = improveShuffleKindFromMask(Kind, Mask);
  1242. // Treat Transpose as 2-op shuffles - there's no difference in lowering.
  1243. if (Kind == TTI::SK_Transpose)
  1244. Kind = TTI::SK_PermuteTwoSrc;
  1245. // For Broadcasts we are splatting the first element from the first input
  1246. // register, so only need to reference that input and all the output
  1247. // registers are the same.
  1248. if (Kind == TTI::SK_Broadcast)
  1249. LT.first = 1;
  1250. // Subvector extractions are free if they start at the beginning of a
  1251. // vector and cheap if the subvectors are aligned.
  1252. if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
  1253. int NumElts = LT.second.getVectorNumElements();
  1254. if ((Index % NumElts) == 0)
  1255. return 0;
  1256. std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(SubTp);
  1257. if (SubLT.second.isVector()) {
  1258. int NumSubElts = SubLT.second.getVectorNumElements();
  1259. if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
  1260. return SubLT.first;
  1261. // Handle some cases for widening legalization. For now we only handle
  1262. // cases where the original subvector was naturally aligned and evenly
  1263. // fit in its legalized subvector type.
  1264. // FIXME: Remove some of the alignment restrictions.
  1265. // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
  1266. // vectors.
  1267. int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
  1268. if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
  1269. (NumSubElts % OrigSubElts) == 0 &&
  1270. LT.second.getVectorElementType() ==
  1271. SubLT.second.getVectorElementType() &&
  1272. LT.second.getVectorElementType().getSizeInBits() ==
  1273. BaseTp->getElementType()->getPrimitiveSizeInBits()) {
  1274. assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&
  1275. "Unexpected number of elements!");
  1276. auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
  1277. LT.second.getVectorNumElements());
  1278. auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
  1279. SubLT.second.getVectorNumElements());
  1280. int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
  1281. InstructionCost ExtractCost =
  1282. getShuffleCost(TTI::SK_ExtractSubvector, VecTy, std::nullopt,
  1283. CostKind, ExtractIndex, SubTy);
  1284. // If the original size is 32-bits or more, we can use pshufd. Otherwise
  1285. // if we have SSSE3 we can use pshufb.
  1286. if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
  1287. return ExtractCost + 1; // pshufd or pshufb
  1288. assert(SubTp->getPrimitiveSizeInBits() == 16 &&
  1289. "Unexpected vector size");
  1290. return ExtractCost + 2; // worst case pshufhw + pshufd
  1291. }
  1292. }
  1293. }
  1294. // Subvector insertions are cheap if the subvectors are aligned.
  1295. // Note that in general, the insertion starting at the beginning of a vector
  1296. // isn't free, because we need to preserve the rest of the wide vector.
  1297. if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) {
  1298. int NumElts = LT.second.getVectorNumElements();
  1299. std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(SubTp);
  1300. if (SubLT.second.isVector()) {
  1301. int NumSubElts = SubLT.second.getVectorNumElements();
  1302. if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
  1303. return SubLT.first;
  1304. }
  1305. // If the insertion isn't aligned, treat it like a 2-op shuffle.
  1306. Kind = TTI::SK_PermuteTwoSrc;
  1307. }
  1308. // Handle some common (illegal) sub-vector types as they are often very cheap
  1309. // to shuffle even on targets without PSHUFB.
  1310. EVT VT = TLI->getValueType(DL, BaseTp);
  1311. if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
  1312. !ST->hasSSSE3()) {
  1313. static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
  1314. {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw
  1315. {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw
  1316. {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw
  1317. {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw
  1318. {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck
  1319. {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw
  1320. {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw
  1321. {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus
  1322. {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck
  1323. {TTI::SK_Splice, MVT::v4i16, 2}, // punpck+psrldq
  1324. {TTI::SK_Splice, MVT::v2i16, 2}, // punpck+psrldq
  1325. {TTI::SK_Splice, MVT::v4i8, 2}, // punpck+psrldq
  1326. {TTI::SK_Splice, MVT::v2i8, 2}, // punpck+psrldq
  1327. {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw
  1328. {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw
  1329. {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw
  1330. {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw
  1331. {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck
  1332. {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
  1333. {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
  1334. {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw
  1335. {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw
  1336. {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck
  1337. };
  1338. if (ST->hasSSE2())
  1339. if (const auto *Entry =
  1340. CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
  1341. return Entry->Cost;
  1342. }
  1343. // We are going to permute multiple sources and the result will be in multiple
  1344. // destinations. Providing an accurate cost only for splits where the element
  1345. // type remains the same.
  1346. if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
  1347. MVT LegalVT = LT.second;
  1348. if (LegalVT.isVector() &&
  1349. LegalVT.getVectorElementType().getSizeInBits() ==
  1350. BaseTp->getElementType()->getPrimitiveSizeInBits() &&
  1351. LegalVT.getVectorNumElements() <
  1352. cast<FixedVectorType>(BaseTp)->getNumElements()) {
  1353. unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
  1354. unsigned LegalVTSize = LegalVT.getStoreSize();
  1355. // Number of source vectors after legalization:
  1356. unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
  1357. // Number of destination vectors after legalization:
  1358. InstructionCost NumOfDests = LT.first;
  1359. auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
  1360. LegalVT.getVectorNumElements());
  1361. if (!Mask.empty() && NumOfDests.isValid()) {
  1362. // Try to perform better estimation of the permutation.
  1363. // 1. Split the source/destination vectors into real registers.
  1364. // 2. Do the mask analysis to identify which real registers are
  1365. // permuted. If more than 1 source registers are used for the
  1366. // destination register building, the cost for this destination register
  1367. // is (Number_of_source_register - 1) * Cost_PermuteTwoSrc. If only one
  1368. // source register is used, build mask and calculate the cost as a cost
  1369. // of PermuteSingleSrc.
  1370. // Also, for the single register permute we try to identify if the
  1371. // destination register is just a copy of the source register or the
  1372. // copy of the previous destination register (the cost is
  1373. // TTI::TCC_Basic). If the source register is just reused, the cost for
  1374. // this operation is 0.
  1375. unsigned E = *NumOfDests.getValue();
  1376. unsigned NormalizedVF =
  1377. LegalVT.getVectorNumElements() * std::max(NumOfSrcs, E);
  1378. unsigned NumOfSrcRegs = NormalizedVF / LegalVT.getVectorNumElements();
  1379. unsigned NumOfDestRegs = NormalizedVF / LegalVT.getVectorNumElements();
  1380. SmallVector<int> NormalizedMask(NormalizedVF, UndefMaskElem);
  1381. copy(Mask, NormalizedMask.begin());
  1382. unsigned PrevSrcReg = 0;
  1383. ArrayRef<int> PrevRegMask;
  1384. InstructionCost Cost = 0;
  1385. processShuffleMasks(
  1386. NormalizedMask, NumOfSrcRegs, NumOfDestRegs, NumOfDestRegs, []() {},
  1387. [this, SingleOpTy, CostKind, &PrevSrcReg, &PrevRegMask,
  1388. &Cost](ArrayRef<int> RegMask, unsigned SrcReg, unsigned DestReg) {
  1389. if (!ShuffleVectorInst::isIdentityMask(RegMask)) {
  1390. // Check if the previous register can be just copied to the next
  1391. // one.
  1392. if (PrevRegMask.empty() || PrevSrcReg != SrcReg ||
  1393. PrevRegMask != RegMask)
  1394. Cost += getShuffleCost(TTI::SK_PermuteSingleSrc, SingleOpTy,
  1395. RegMask, CostKind, 0, nullptr);
  1396. else
  1397. // Just a copy of previous destination register.
  1398. Cost += TTI::TCC_Basic;
  1399. return;
  1400. }
  1401. if (SrcReg != DestReg &&
  1402. any_of(RegMask, [](int I) { return I != UndefMaskElem; })) {
  1403. // Just a copy of the source register.
  1404. Cost += TTI::TCC_Basic;
  1405. }
  1406. PrevSrcReg = SrcReg;
  1407. PrevRegMask = RegMask;
  1408. },
  1409. [this, SingleOpTy, CostKind, &Cost](ArrayRef<int> RegMask,
  1410. unsigned /*Unused*/,
  1411. unsigned /*Unused*/) {
  1412. Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, RegMask,
  1413. CostKind, 0, nullptr);
  1414. });
  1415. return Cost;
  1416. }
  1417. InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
  1418. return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy,
  1419. std::nullopt, CostKind, 0, nullptr);
  1420. }
  1421. return BaseT::getShuffleCost(Kind, BaseTp, Mask, CostKind, Index, SubTp);
  1422. }
  1423. // For 2-input shuffles, we must account for splitting the 2 inputs into many.
  1424. if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
  1425. // We assume that source and destination have the same vector type.
  1426. InstructionCost NumOfDests = LT.first;
  1427. InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1;
  1428. LT.first = NumOfDests * NumOfShufflesPerDest;
  1429. }
  1430. static const CostTblEntry AVX512VBMIShuffleTbl[] = {
  1431. {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
  1432. {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
  1433. {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
  1434. {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
  1435. {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
  1436. {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
  1437. {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b
  1438. };
  1439. if (ST->hasVBMI())
  1440. if (const auto *Entry =
  1441. CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
  1442. return LT.first * Entry->Cost;
  1443. static const CostTblEntry AVX512BWShuffleTbl[] = {
  1444. {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
  1445. {TTI::SK_Broadcast, MVT::v32f16, 1}, // vpbroadcastw
  1446. {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
  1447. {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
  1448. {TTI::SK_Reverse, MVT::v32f16, 2}, // vpermw
  1449. {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
  1450. {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2
  1451. {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
  1452. {TTI::SK_PermuteSingleSrc, MVT::v32f16, 2}, // vpermw
  1453. {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
  1454. {TTI::SK_PermuteSingleSrc, MVT::v16f16, 2}, // vpermw
  1455. {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16
  1456. {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
  1457. {TTI::SK_PermuteTwoSrc, MVT::v32f16, 2}, // vpermt2w
  1458. {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
  1459. {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w
  1460. {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
  1461. {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
  1462. {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb
  1463. {TTI::SK_Splice, MVT::v32i16, 2}, // vshufi64x2 + palignr
  1464. {TTI::SK_Splice, MVT::v32f16, 2}, // vshufi64x2 + palignr
  1465. {TTI::SK_Splice, MVT::v64i8, 2}, // vshufi64x2 + palignr
  1466. };
  1467. if (ST->hasBWI())
  1468. if (const auto *Entry =
  1469. CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
  1470. return LT.first * Entry->Cost;
  1471. static const CostKindTblEntry AVX512ShuffleTbl[] = {
  1472. {TTI::SK_Broadcast, MVT::v8f64, { 1, 1, 1, 1 } }, // vbroadcastsd
  1473. {TTI::SK_Broadcast, MVT::v16f32, { 1, 1, 1, 1 } }, // vbroadcastss
  1474. {TTI::SK_Broadcast, MVT::v8i64, { 1, 1, 1, 1 } }, // vpbroadcastq
  1475. {TTI::SK_Broadcast, MVT::v16i32, { 1, 1, 1, 1 } }, // vpbroadcastd
  1476. {TTI::SK_Broadcast, MVT::v32i16, { 1, 1, 1, 1 } }, // vpbroadcastw
  1477. {TTI::SK_Broadcast, MVT::v32f16, { 1, 1, 1, 1 } }, // vpbroadcastw
  1478. {TTI::SK_Broadcast, MVT::v64i8, { 1, 1, 1, 1 } }, // vpbroadcastb
  1479. {TTI::SK_Reverse, MVT::v8f64, { 1, 3, 1, 1 } }, // vpermpd
  1480. {TTI::SK_Reverse, MVT::v16f32, { 1, 3, 1, 1 } }, // vpermps
  1481. {TTI::SK_Reverse, MVT::v8i64, { 1, 3, 1, 1 } }, // vpermq
  1482. {TTI::SK_Reverse, MVT::v16i32, { 1, 3, 1, 1 } }, // vpermd
  1483. {TTI::SK_Reverse, MVT::v32i16, { 7, 7, 7, 7 } }, // per mca
  1484. {TTI::SK_Reverse, MVT::v32f16, { 7, 7, 7, 7 } }, // per mca
  1485. {TTI::SK_Reverse, MVT::v64i8, { 7, 7, 7, 7 } }, // per mca
  1486. {TTI::SK_Splice, MVT::v8f64, { 1, 1, 1, 1 } }, // vpalignd
  1487. {TTI::SK_Splice, MVT::v4f64, { 1, 1, 1, 1 } }, // vpalignd
  1488. {TTI::SK_Splice, MVT::v16f32, { 1, 1, 1, 1 } }, // vpalignd
  1489. {TTI::SK_Splice, MVT::v8f32, { 1, 1, 1, 1 } }, // vpalignd
  1490. {TTI::SK_Splice, MVT::v8i64, { 1, 1, 1, 1 } }, // vpalignd
  1491. {TTI::SK_Splice, MVT::v4i64, { 1, 1, 1, 1 } }, // vpalignd
  1492. {TTI::SK_Splice, MVT::v16i32, { 1, 1, 1, 1 } }, // vpalignd
  1493. {TTI::SK_Splice, MVT::v8i32, { 1, 1, 1, 1 } }, // vpalignd
  1494. {TTI::SK_Splice, MVT::v32i16, { 4, 4, 4, 4 } }, // split + palignr
  1495. {TTI::SK_Splice, MVT::v32f16, { 4, 4, 4, 4 } }, // split + palignr
  1496. {TTI::SK_Splice, MVT::v64i8, { 4, 4, 4, 4 } }, // split + palignr
  1497. {TTI::SK_PermuteSingleSrc, MVT::v8f64, { 1, 3, 1, 1 } }, // vpermpd
  1498. {TTI::SK_PermuteSingleSrc, MVT::v4f64, { 1, 3, 1, 1 } }, // vpermpd
  1499. {TTI::SK_PermuteSingleSrc, MVT::v2f64, { 1, 3, 1, 1 } }, // vpermpd
  1500. {TTI::SK_PermuteSingleSrc, MVT::v16f32, { 1, 3, 1, 1 } }, // vpermps
  1501. {TTI::SK_PermuteSingleSrc, MVT::v8f32, { 1, 3, 1, 1 } }, // vpermps
  1502. {TTI::SK_PermuteSingleSrc, MVT::v4f32, { 1, 3, 1, 1 } }, // vpermps
  1503. {TTI::SK_PermuteSingleSrc, MVT::v8i64, { 1, 3, 1, 1 } }, // vpermq
  1504. {TTI::SK_PermuteSingleSrc, MVT::v4i64, { 1, 3, 1, 1 } }, // vpermq
  1505. {TTI::SK_PermuteSingleSrc, MVT::v2i64, { 1, 3, 1, 1 } }, // vpermq
  1506. {TTI::SK_PermuteSingleSrc, MVT::v16i32, { 1, 3, 1, 1 } }, // vpermd
  1507. {TTI::SK_PermuteSingleSrc, MVT::v8i32, { 1, 3, 1, 1 } }, // vpermd
  1508. {TTI::SK_PermuteSingleSrc, MVT::v4i32, { 1, 3, 1, 1 } }, // vpermd
  1509. {TTI::SK_PermuteSingleSrc, MVT::v16i8, { 1, 3, 1, 1 } }, // pshufb
  1510. {TTI::SK_PermuteTwoSrc, MVT::v8f64, { 1, 3, 1, 1 } }, // vpermt2pd
  1511. {TTI::SK_PermuteTwoSrc, MVT::v16f32, { 1, 3, 1, 1 } }, // vpermt2ps
  1512. {TTI::SK_PermuteTwoSrc, MVT::v8i64, { 1, 3, 1, 1 } }, // vpermt2q
  1513. {TTI::SK_PermuteTwoSrc, MVT::v16i32, { 1, 3, 1, 1 } }, // vpermt2d
  1514. {TTI::SK_PermuteTwoSrc, MVT::v4f64, { 1, 3, 1, 1 } }, // vpermt2pd
  1515. {TTI::SK_PermuteTwoSrc, MVT::v8f32, { 1, 3, 1, 1 } }, // vpermt2ps
  1516. {TTI::SK_PermuteTwoSrc, MVT::v4i64, { 1, 3, 1, 1 } }, // vpermt2q
  1517. {TTI::SK_PermuteTwoSrc, MVT::v8i32, { 1, 3, 1, 1 } }, // vpermt2d
  1518. {TTI::SK_PermuteTwoSrc, MVT::v2f64, { 1, 3, 1, 1 } }, // vpermt2pd
  1519. {TTI::SK_PermuteTwoSrc, MVT::v4f32, { 1, 3, 1, 1 } }, // vpermt2ps
  1520. {TTI::SK_PermuteTwoSrc, MVT::v2i64, { 1, 3, 1, 1 } }, // vpermt2q
  1521. {TTI::SK_PermuteTwoSrc, MVT::v4i32, { 1, 3, 1, 1 } }, // vpermt2d
  1522. // FIXME: This just applies the type legalization cost rules above
  1523. // assuming these completely split.
  1524. {TTI::SK_PermuteSingleSrc, MVT::v32i16, { 14, 14, 14, 14 } },
  1525. {TTI::SK_PermuteSingleSrc, MVT::v32f16, { 14, 14, 14, 14 } },
  1526. {TTI::SK_PermuteSingleSrc, MVT::v64i8, { 14, 14, 14, 14 } },
  1527. {TTI::SK_PermuteTwoSrc, MVT::v32i16, { 42, 42, 42, 42 } },
  1528. {TTI::SK_PermuteTwoSrc, MVT::v32f16, { 42, 42, 42, 42 } },
  1529. {TTI::SK_PermuteTwoSrc, MVT::v64i8, { 42, 42, 42, 42 } },
  1530. {TTI::SK_Select, MVT::v32i16, { 1, 1, 1, 1 } }, // vpternlogq
  1531. {TTI::SK_Select, MVT::v32f16, { 1, 1, 1, 1 } }, // vpternlogq
  1532. {TTI::SK_Select, MVT::v64i8, { 1, 1, 1, 1 } }, // vpternlogq
  1533. {TTI::SK_Select, MVT::v8f64, { 1, 1, 1, 1 } }, // vblendmpd
  1534. {TTI::SK_Select, MVT::v16f32, { 1, 1, 1, 1 } }, // vblendmps
  1535. {TTI::SK_Select, MVT::v8i64, { 1, 1, 1, 1 } }, // vblendmq
  1536. {TTI::SK_Select, MVT::v16i32, { 1, 1, 1, 1 } }, // vblendmd
  1537. };
  1538. if (ST->hasAVX512())
  1539. if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
  1540. if (auto KindCost = Entry->Cost[CostKind])
  1541. return LT.first * *KindCost;
  1542. static const CostTblEntry AVX2ShuffleTbl[] = {
  1543. {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd
  1544. {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps
  1545. {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq
  1546. {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd
  1547. {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
  1548. {TTI::SK_Broadcast, MVT::v16f16, 1}, // vpbroadcastw
  1549. {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb
  1550. {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd
  1551. {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps
  1552. {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq
  1553. {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd
  1554. {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
  1555. {TTI::SK_Reverse, MVT::v16f16, 2}, // vperm2i128 + pshufb
  1556. {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb
  1557. {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
  1558. {TTI::SK_Select, MVT::v16f16, 1}, // vpblendvb
  1559. {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb
  1560. {TTI::SK_Splice, MVT::v8i32, 2}, // vperm2i128 + vpalignr
  1561. {TTI::SK_Splice, MVT::v8f32, 2}, // vperm2i128 + vpalignr
  1562. {TTI::SK_Splice, MVT::v16i16, 2}, // vperm2i128 + vpalignr
  1563. {TTI::SK_Splice, MVT::v16f16, 2}, // vperm2i128 + vpalignr
  1564. {TTI::SK_Splice, MVT::v32i8, 2}, // vperm2i128 + vpalignr
  1565. {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
  1566. {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
  1567. {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
  1568. {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
  1569. {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
  1570. // + vpblendvb
  1571. {TTI::SK_PermuteSingleSrc, MVT::v16f16, 4}, // vperm2i128 + 2*vpshufb
  1572. // + vpblendvb
  1573. {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb
  1574. // + vpblendvb
  1575. {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd
  1576. {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps
  1577. {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd
  1578. {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd
  1579. {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
  1580. // + vpblendvb
  1581. {TTI::SK_PermuteTwoSrc, MVT::v16f16, 7}, // 2*vperm2i128 + 4*vpshufb
  1582. // + vpblendvb
  1583. {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb
  1584. // + vpblendvb
  1585. };
  1586. if (ST->hasAVX2())
  1587. if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
  1588. return LT.first * Entry->Cost;
  1589. static const CostTblEntry XOPShuffleTbl[] = {
  1590. {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd
  1591. {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps
  1592. {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd
  1593. {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps
  1594. {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
  1595. // + vinsertf128
  1596. {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm
  1597. // + vinsertf128
  1598. {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
  1599. // + vinsertf128
  1600. {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm
  1601. {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm
  1602. // + vinsertf128
  1603. {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm
  1604. };
  1605. if (ST->hasXOP())
  1606. if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
  1607. return LT.first * Entry->Cost;
  1608. static const CostTblEntry AVX1ShuffleTbl[] = {
  1609. {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
  1610. {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps
  1611. {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
  1612. {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps
  1613. {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
  1614. {TTI::SK_Broadcast, MVT::v16f16, 3}, // vpshuflw + vpshufd + vinsertf128
  1615. {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128
  1616. {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
  1617. {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps
  1618. {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
  1619. {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps
  1620. {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
  1621. // + vinsertf128
  1622. {TTI::SK_Reverse, MVT::v16f16, 4}, // vextractf128 + 2*pshufb
  1623. // + vinsertf128
  1624. {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb
  1625. // + vinsertf128
  1626. {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd
  1627. {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd
  1628. {TTI::SK_Select, MVT::v8i32, 1}, // vblendps
  1629. {TTI::SK_Select, MVT::v8f32, 1}, // vblendps
  1630. {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
  1631. {TTI::SK_Select, MVT::v16f16, 3}, // vpand + vpandn + vpor
  1632. {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor
  1633. {TTI::SK_Splice, MVT::v4i64, 2}, // vperm2f128 + shufpd
  1634. {TTI::SK_Splice, MVT::v4f64, 2}, // vperm2f128 + shufpd
  1635. {TTI::SK_Splice, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
  1636. {TTI::SK_Splice, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
  1637. {TTI::SK_Splice, MVT::v16i16, 5}, // 2*vperm2f128 + 2*vpalignr + vinsertf128
  1638. {TTI::SK_Splice, MVT::v16f16, 5}, // 2*vperm2f128 + 2*vpalignr + vinsertf128
  1639. {TTI::SK_Splice, MVT::v32i8, 5}, // 2*vperm2f128 + 2*vpalignr + vinsertf128
  1640. {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd
  1641. {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd
  1642. {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
  1643. {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
  1644. {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
  1645. // + 2*por + vinsertf128
  1646. {TTI::SK_PermuteSingleSrc, MVT::v16f16, 8}, // vextractf128 + 4*pshufb
  1647. // + 2*por + vinsertf128
  1648. {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb
  1649. // + 2*por + vinsertf128
  1650. {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd
  1651. {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd
  1652. {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
  1653. {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
  1654. {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
  1655. // + 4*por + vinsertf128
  1656. {TTI::SK_PermuteTwoSrc, MVT::v16f16, 15}, // 2*vextractf128 + 8*pshufb
  1657. // + 4*por + vinsertf128
  1658. {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb
  1659. // + 4*por + vinsertf128
  1660. };
  1661. if (ST->hasAVX())
  1662. if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
  1663. return LT.first * Entry->Cost;
  1664. static const CostTblEntry SSE41ShuffleTbl[] = {
  1665. {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
  1666. {TTI::SK_Select, MVT::v2f64, 1}, // movsd
  1667. {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
  1668. {TTI::SK_Select, MVT::v4f32, 1}, // blendps
  1669. {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
  1670. {TTI::SK_Select, MVT::v8f16, 1}, // pblendw
  1671. {TTI::SK_Select, MVT::v16i8, 1} // pblendvb
  1672. };
  1673. if (ST->hasSSE41())
  1674. if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
  1675. return LT.first * Entry->Cost;
  1676. static const CostTblEntry SSSE3ShuffleTbl[] = {
  1677. {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
  1678. {TTI::SK_Broadcast, MVT::v8f16, 1}, // pshufb
  1679. {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
  1680. {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
  1681. {TTI::SK_Reverse, MVT::v8f16, 1}, // pshufb
  1682. {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
  1683. {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
  1684. {TTI::SK_Select, MVT::v8f16, 3}, // 2*pshufb + por
  1685. {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
  1686. {TTI::SK_Splice, MVT::v4i32, 1}, // palignr
  1687. {TTI::SK_Splice, MVT::v4f32, 1}, // palignr
  1688. {TTI::SK_Splice, MVT::v8i16, 1}, // palignr
  1689. {TTI::SK_Splice, MVT::v8f16, 1}, // palignr
  1690. {TTI::SK_Splice, MVT::v16i8, 1}, // palignr
  1691. {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
  1692. {TTI::SK_PermuteSingleSrc, MVT::v8f16, 1}, // pshufb
  1693. {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
  1694. {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
  1695. {TTI::SK_PermuteTwoSrc, MVT::v8f16, 3}, // 2*pshufb + por
  1696. {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
  1697. };
  1698. if (ST->hasSSSE3())
  1699. if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
  1700. return LT.first * Entry->Cost;
  1701. static const CostTblEntry SSE2ShuffleTbl[] = {
  1702. {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
  1703. {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
  1704. {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
  1705. {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
  1706. {TTI::SK_Broadcast, MVT::v8f16, 2}, // pshuflw + pshufd
  1707. {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
  1708. {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
  1709. {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
  1710. {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
  1711. {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
  1712. {TTI::SK_Reverse, MVT::v8f16, 3}, // pshuflw + pshufhw + pshufd
  1713. {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
  1714. // + 2*pshufd + 2*unpck + packus
  1715. {TTI::SK_Select, MVT::v2i64, 1}, // movsd
  1716. {TTI::SK_Select, MVT::v2f64, 1}, // movsd
  1717. {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
  1718. {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
  1719. {TTI::SK_Select, MVT::v8f16, 3}, // pand + pandn + por
  1720. {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
  1721. {TTI::SK_Splice, MVT::v2i64, 1}, // shufpd
  1722. {TTI::SK_Splice, MVT::v2f64, 1}, // shufpd
  1723. {TTI::SK_Splice, MVT::v4i32, 2}, // 2*{unpck,movsd,pshufd}
  1724. {TTI::SK_Splice, MVT::v8i16, 3}, // psrldq + psrlldq + por
  1725. {TTI::SK_Splice, MVT::v8f16, 3}, // psrldq + psrlldq + por
  1726. {TTI::SK_Splice, MVT::v16i8, 3}, // psrldq + psrlldq + por
  1727. {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
  1728. {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
  1729. {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
  1730. {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
  1731. // + pshufd/unpck
  1732. {TTI::SK_PermuteSingleSrc, MVT::v8f16, 5}, // 2*pshuflw + 2*pshufhw
  1733. // + pshufd/unpck
  1734. { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
  1735. // + 2*pshufd + 2*unpck + 2*packus
  1736. { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd
  1737. { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd
  1738. { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd}
  1739. { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute
  1740. { TTI::SK_PermuteTwoSrc, MVT::v8f16, 8 }, // blend+permute
  1741. { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute
  1742. };
  1743. static const CostTblEntry SSE3BroadcastLoadTbl[] = {
  1744. {TTI::SK_Broadcast, MVT::v2f64, 0}, // broadcast handled by movddup
  1745. };
  1746. if (ST->hasSSE2()) {
  1747. bool IsLoad =
  1748. llvm::any_of(Args, [](const auto &V) { return isa<LoadInst>(V); });
  1749. if (ST->hasSSE3() && IsLoad)
  1750. if (const auto *Entry =
  1751. CostTableLookup(SSE3BroadcastLoadTbl, Kind, LT.second)) {
  1752. assert(isLegalBroadcastLoad(BaseTp->getElementType(),
  1753. LT.second.getVectorElementCount()) &&
  1754. "Table entry missing from isLegalBroadcastLoad()");
  1755. return LT.first * Entry->Cost;
  1756. }
  1757. if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
  1758. return LT.first * Entry->Cost;
  1759. }
  1760. static const CostTblEntry SSE1ShuffleTbl[] = {
  1761. { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
  1762. { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
  1763. { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps
  1764. { TTI::SK_Splice, MVT::v4f32, 2 }, // 2*shufps
  1765. { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
  1766. { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps
  1767. };
  1768. if (ST->hasSSE1())
  1769. if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
  1770. return LT.first * Entry->Cost;
  1771. return BaseT::getShuffleCost(Kind, BaseTp, Mask, CostKind, Index, SubTp);
  1772. }
  1773. InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
  1774. Type *Src,
  1775. TTI::CastContextHint CCH,
  1776. TTI::TargetCostKind CostKind,
  1777. const Instruction *I) {
  1778. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  1779. assert(ISD && "Invalid opcode");
  1780. // TODO: Allow non-throughput costs that aren't binary.
  1781. auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
  1782. if (CostKind != TTI::TCK_RecipThroughput)
  1783. return Cost == 0 ? 0 : 1;
  1784. return Cost;
  1785. };
  1786. // The cost tables include both specific, custom (non-legal) src/dst type
  1787. // conversions and generic, legalized types. We test for customs first, before
  1788. // falling back to legalization.
  1789. // FIXME: Need a better design of the cost table to handle non-simple types of
  1790. // potential massive combinations (elem_num x src_type x dst_type).
  1791. static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
  1792. { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
  1793. { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
  1794. // Mask sign extend has an instruction.
  1795. { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
  1796. { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v2i1, 1 },
  1797. { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
  1798. { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v2i1, 1 },
  1799. { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
  1800. { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v4i1, 1 },
  1801. { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
  1802. { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v4i1, 1 },
  1803. { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
  1804. { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v8i1, 1 },
  1805. { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
  1806. { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
  1807. { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
  1808. { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
  1809. { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
  1810. { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 },
  1811. { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v64i1, 1 },
  1812. // Mask zero extend is a sext + shift.
  1813. { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
  1814. { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v2i1, 2 },
  1815. { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
  1816. { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v2i1, 2 },
  1817. { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
  1818. { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v4i1, 2 },
  1819. { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
  1820. { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v4i1, 2 },
  1821. { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
  1822. { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v8i1, 2 },
  1823. { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
  1824. { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
  1825. { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
  1826. { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
  1827. { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
  1828. { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 },
  1829. { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v64i1, 2 },
  1830. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 },
  1831. { ISD::TRUNCATE, MVT::v2i1, MVT::v16i8, 2 },
  1832. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 },
  1833. { ISD::TRUNCATE, MVT::v2i1, MVT::v8i16, 2 },
  1834. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 },
  1835. { ISD::TRUNCATE, MVT::v4i1, MVT::v16i8, 2 },
  1836. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 },
  1837. { ISD::TRUNCATE, MVT::v4i1, MVT::v8i16, 2 },
  1838. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 },
  1839. { ISD::TRUNCATE, MVT::v8i1, MVT::v16i8, 2 },
  1840. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 },
  1841. { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 },
  1842. { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 },
  1843. { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 },
  1844. { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 },
  1845. { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 },
  1846. { ISD::TRUNCATE, MVT::v64i1, MVT::v32i16, 2 },
  1847. { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 },
  1848. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm
  1849. { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // vpmovwb
  1850. { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // vpmovwb
  1851. { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // vpmovwb
  1852. };
  1853. static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
  1854. // Mask sign extend has an instruction.
  1855. { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 },
  1856. { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v2i1, 1 },
  1857. { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 },
  1858. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 },
  1859. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 },
  1860. { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i1, 1 },
  1861. { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 },
  1862. { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 },
  1863. // Mask zero extend is a sext + shift.
  1864. { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 },
  1865. { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v2i1, 2 },
  1866. { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 },
  1867. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 },
  1868. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 },
  1869. { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i1, 2 },
  1870. { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 },
  1871. { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
  1872. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 },
  1873. { ISD::TRUNCATE, MVT::v2i1, MVT::v4i32, 2 },
  1874. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 },
  1875. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 },
  1876. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
  1877. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 },
  1878. { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 },
  1879. { ISD::TRUNCATE, MVT::v16i1, MVT::v8i64, 2 },
  1880. { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
  1881. { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
  1882. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
  1883. { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
  1884. { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
  1885. { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
  1886. { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
  1887. { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
  1888. };
  1889. // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
  1890. // 256-bit wide vectors.
  1891. static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
  1892. { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
  1893. { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
  1894. { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
  1895. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
  1896. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
  1897. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
  1898. { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd
  1899. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
  1900. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
  1901. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
  1902. { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd
  1903. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd
  1904. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd
  1905. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd
  1906. { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd
  1907. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq
  1908. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq
  1909. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq
  1910. { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 2 }, // vpmovdb
  1911. { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 2 }, // vpmovdb
  1912. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 }, // vpmovdb
  1913. { ISD::TRUNCATE, MVT::v32i8, MVT::v16i32, 2 }, // vpmovdb
  1914. { ISD::TRUNCATE, MVT::v64i8, MVT::v16i32, 2 }, // vpmovdb
  1915. { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 }, // vpmovdw
  1916. { ISD::TRUNCATE, MVT::v32i16, MVT::v16i32, 2 }, // vpmovdw
  1917. { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 2 }, // vpmovqb
  1918. { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1 }, // vpshufb
  1919. { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 }, // vpmovqb
  1920. { ISD::TRUNCATE, MVT::v16i8, MVT::v8i64, 2 }, // vpmovqb
  1921. { ISD::TRUNCATE, MVT::v32i8, MVT::v8i64, 2 }, // vpmovqb
  1922. { ISD::TRUNCATE, MVT::v64i8, MVT::v8i64, 2 }, // vpmovqb
  1923. { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 }, // vpmovqw
  1924. { ISD::TRUNCATE, MVT::v16i16, MVT::v8i64, 2 }, // vpmovqw
  1925. { ISD::TRUNCATE, MVT::v32i16, MVT::v8i64, 2 }, // vpmovqw
  1926. { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, // vpmovqd
  1927. { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd
  1928. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
  1929. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32
  1930. { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 },
  1931. { ISD::TRUNCATE, MVT::v64i8, MVT::v32i16, 8 },
  1932. // Sign extend is zmm vpternlogd+vptruncdb.
  1933. // Zero extend is zmm broadcast load+vptruncdw.
  1934. { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 },
  1935. { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 },
  1936. { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 },
  1937. { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 },
  1938. { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 },
  1939. { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 },
  1940. { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 },
  1941. { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 },
  1942. // Sign extend is zmm vpternlogd+vptruncdw.
  1943. // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
  1944. { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 },
  1945. { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
  1946. { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 },
  1947. { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
  1948. { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 },
  1949. { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
  1950. { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 },
  1951. { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
  1952. { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd
  1953. { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld
  1954. { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd
  1955. { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld
  1956. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd
  1957. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld
  1958. { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq
  1959. { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq
  1960. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq
  1961. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq
  1962. { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd
  1963. { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld
  1964. { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq
  1965. { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq
  1966. { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
  1967. { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
  1968. { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
  1969. { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
  1970. { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
  1971. { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
  1972. { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
  1973. { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
  1974. { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
  1975. { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
  1976. { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
  1977. { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
  1978. { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
  1979. { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
  1980. { ISD::SINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 },
  1981. { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 },
  1982. { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
  1983. { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 },
  1984. { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
  1985. { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
  1986. { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
  1987. { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
  1988. { ISD::UINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 },
  1989. { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 },
  1990. { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
  1991. { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 },
  1992. { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
  1993. { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
  1994. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
  1995. { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 },
  1996. { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 },
  1997. { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f64, 7 },
  1998. { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f64,15 },
  1999. { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f32,11 },
  2000. { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f64,31 },
  2001. { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 },
  2002. { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f64, 7 },
  2003. { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f32, 5 },
  2004. { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f64,15 },
  2005. { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 1 },
  2006. { ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f64, 3 },
  2007. { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
  2008. { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 },
  2009. { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 },
  2010. { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
  2011. { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 },
  2012. { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 },
  2013. };
  2014. static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
  2015. // Mask sign extend has an instruction.
  2016. { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
  2017. { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v2i1, 1 },
  2018. { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
  2019. { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v2i1, 1 },
  2020. { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
  2021. { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v4i1, 1 },
  2022. { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
  2023. { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v4i1, 1 },
  2024. { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
  2025. { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v8i1, 1 },
  2026. { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
  2027. { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
  2028. { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
  2029. { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
  2030. { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v32i1, 1 },
  2031. { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v64i1, 1 },
  2032. { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v64i1, 1 },
  2033. // Mask zero extend is a sext + shift.
  2034. { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
  2035. { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v2i1, 2 },
  2036. { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
  2037. { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v2i1, 2 },
  2038. { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
  2039. { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v4i1, 2 },
  2040. { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
  2041. { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v4i1, 2 },
  2042. { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
  2043. { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v8i1, 2 },
  2044. { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
  2045. { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
  2046. { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
  2047. { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
  2048. { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v32i1, 2 },
  2049. { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v64i1, 2 },
  2050. { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v64i1, 2 },
  2051. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 },
  2052. { ISD::TRUNCATE, MVT::v2i1, MVT::v16i8, 2 },
  2053. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 },
  2054. { ISD::TRUNCATE, MVT::v2i1, MVT::v8i16, 2 },
  2055. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 },
  2056. { ISD::TRUNCATE, MVT::v4i1, MVT::v16i8, 2 },
  2057. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 },
  2058. { ISD::TRUNCATE, MVT::v4i1, MVT::v8i16, 2 },
  2059. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 },
  2060. { ISD::TRUNCATE, MVT::v8i1, MVT::v16i8, 2 },
  2061. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 },
  2062. { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 },
  2063. { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 },
  2064. { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 },
  2065. { ISD::TRUNCATE, MVT::v32i1, MVT::v16i16, 2 },
  2066. { ISD::TRUNCATE, MVT::v64i1, MVT::v32i8, 2 },
  2067. { ISD::TRUNCATE, MVT::v64i1, MVT::v16i16, 2 },
  2068. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 },
  2069. };
  2070. static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
  2071. // Mask sign extend has an instruction.
  2072. { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 },
  2073. { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v2i1, 1 },
  2074. { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 },
  2075. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i1, 1 },
  2076. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 },
  2077. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i1, 1 },
  2078. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i1, 1 },
  2079. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 },
  2080. // Mask zero extend is a sext + shift.
  2081. { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 },
  2082. { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v2i1, 2 },
  2083. { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 },
  2084. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i1, 2 },
  2085. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 },
  2086. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i1, 2 },
  2087. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i1, 2 },
  2088. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 },
  2089. { ISD::TRUNCATE, MVT::v16i1, MVT::v4i64, 2 },
  2090. { ISD::TRUNCATE, MVT::v16i1, MVT::v8i32, 2 },
  2091. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 },
  2092. { ISD::TRUNCATE, MVT::v2i1, MVT::v4i32, 2 },
  2093. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 },
  2094. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 },
  2095. { ISD::TRUNCATE, MVT::v8i1, MVT::v4i64, 2 },
  2096. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
  2097. { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
  2098. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
  2099. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
  2100. { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
  2101. { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
  2102. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
  2103. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
  2104. { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
  2105. { ISD::FP_TO_SINT, MVT::v2i64, MVT::v4f32, 1 },
  2106. { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
  2107. { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
  2108. { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
  2109. { ISD::FP_TO_UINT, MVT::v2i64, MVT::v4f32, 1 },
  2110. { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
  2111. { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
  2112. { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
  2113. };
  2114. static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
  2115. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
  2116. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
  2117. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
  2118. { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8
  2119. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
  2120. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
  2121. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
  2122. { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16
  2123. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd
  2124. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd
  2125. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd
  2126. { ISD::TRUNCATE, MVT::v16i1, MVT::v8i32, 2 }, // vpslld+vptestmd
  2127. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq
  2128. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq
  2129. { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd
  2130. { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, // vpmovqb
  2131. { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, // vpmovqw
  2132. { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, // vpmovwb
  2133. // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
  2134. // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
  2135. { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 },
  2136. { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 },
  2137. { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 },
  2138. { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 },
  2139. { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 },
  2140. { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 },
  2141. { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 },
  2142. { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 },
  2143. // sign extend is vpcmpeq+maskedmove+vpmovdw
  2144. // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
  2145. { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
  2146. { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 },
  2147. { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
  2148. { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 },
  2149. { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
  2150. { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 },
  2151. { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
  2152. { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
  2153. { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd
  2154. { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld
  2155. { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd
  2156. { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld
  2157. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd
  2158. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld
  2159. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i1, 1 }, // vpternlogd
  2160. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i1, 2 }, // vpternlogd+psrld
  2161. { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq
  2162. { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq
  2163. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq
  2164. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq
  2165. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 1 },
  2166. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 1 },
  2167. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 1 },
  2168. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 1 },
  2169. { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
  2170. { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
  2171. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 1 },
  2172. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 1 },
  2173. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
  2174. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
  2175. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
  2176. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
  2177. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
  2178. { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 },
  2179. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
  2180. { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 },
  2181. { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 },
  2182. { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 },
  2183. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
  2184. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 },
  2185. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
  2186. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 },
  2187. { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
  2188. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
  2189. { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
  2190. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
  2191. { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
  2192. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
  2193. { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 },
  2194. { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 },
  2195. { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 },
  2196. { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f32, 5 },
  2197. { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 },
  2198. { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 },
  2199. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
  2200. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 1 },
  2201. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 },
  2202. { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
  2203. { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
  2204. };
  2205. static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
  2206. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
  2207. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
  2208. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
  2209. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
  2210. { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
  2211. { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
  2212. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 2 },
  2213. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 2 },
  2214. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 2 },
  2215. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 2 },
  2216. { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
  2217. { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
  2218. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 2 },
  2219. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 2 },
  2220. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
  2221. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
  2222. { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
  2223. { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
  2224. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
  2225. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
  2226. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
  2227. { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 4 },
  2228. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 4 },
  2229. { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 1 },
  2230. { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 1 },
  2231. { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 1 },
  2232. { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 4 },
  2233. { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 4 },
  2234. { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 1 },
  2235. { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 1 },
  2236. { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 5 },
  2237. { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 },
  2238. { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
  2239. { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
  2240. { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
  2241. { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 1 },
  2242. { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 1 },
  2243. { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 1 },
  2244. { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 3 },
  2245. { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 3 },
  2246. { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 3 },
  2247. { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 1 },
  2248. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 },
  2249. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
  2250. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4 },
  2251. { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 3 },
  2252. { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 4 },
  2253. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 },
  2254. { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 },
  2255. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 },
  2256. { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
  2257. { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
  2258. { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
  2259. { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 3 },
  2260. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 },
  2261. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 },
  2262. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 },
  2263. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
  2264. { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
  2265. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
  2266. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 2 },
  2267. { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
  2268. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
  2269. { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 },
  2270. };
  2271. static const TypeConversionCostTblEntry AVXConversionTbl[] = {
  2272. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
  2273. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
  2274. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
  2275. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
  2276. { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
  2277. { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
  2278. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 3 },
  2279. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 3 },
  2280. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 3 },
  2281. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 3 },
  2282. { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
  2283. { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
  2284. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 3 },
  2285. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 3 },
  2286. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
  2287. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
  2288. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
  2289. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
  2290. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 },
  2291. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 },
  2292. { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 },
  2293. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 },
  2294. { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 },
  2295. { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
  2296. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
  2297. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // and+extract+packuswb
  2298. { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 5 },
  2299. { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
  2300. { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 5 },
  2301. { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 3 }, // and+extract+2*packusdw
  2302. { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
  2303. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
  2304. { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
  2305. { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
  2306. { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 },
  2307. { ISD::SINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 },
  2308. { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
  2309. { ISD::SINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 },
  2310. { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
  2311. { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
  2312. { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 },
  2313. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 5 },
  2314. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 8 },
  2315. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
  2316. { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
  2317. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
  2318. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 },
  2319. { ISD::UINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 },
  2320. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
  2321. { ISD::UINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 },
  2322. { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 4 },
  2323. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 4 },
  2324. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
  2325. { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
  2326. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
  2327. { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 10 },
  2328. { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 10 },
  2329. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 18 },
  2330. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
  2331. { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 10 },
  2332. { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 },
  2333. { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f64, 2 },
  2334. { ISD::FP_TO_SINT, MVT::v32i8, MVT::v8f32, 2 },
  2335. { ISD::FP_TO_SINT, MVT::v32i8, MVT::v4f64, 2 },
  2336. { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 2 },
  2337. { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f64, 2 },
  2338. { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 2 },
  2339. { ISD::FP_TO_SINT, MVT::v16i16, MVT::v4f64, 2 },
  2340. { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 2 },
  2341. { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 2 },
  2342. { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 5 },
  2343. { ISD::FP_TO_UINT, MVT::v16i8, MVT::v8f32, 2 },
  2344. { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f64, 2 },
  2345. { ISD::FP_TO_UINT, MVT::v32i8, MVT::v8f32, 2 },
  2346. { ISD::FP_TO_UINT, MVT::v32i8, MVT::v4f64, 2 },
  2347. { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 2 },
  2348. { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f64, 2 },
  2349. { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 2 },
  2350. { ISD::FP_TO_UINT, MVT::v16i16, MVT::v4f64, 2 },
  2351. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 },
  2352. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
  2353. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 6 },
  2354. { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 7 },
  2355. { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 7 },
  2356. { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
  2357. { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
  2358. };
  2359. static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
  2360. { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 1 },
  2361. { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 1 },
  2362. { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 1 },
  2363. { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 1 },
  2364. { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
  2365. { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
  2366. { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 1 },
  2367. { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 1 },
  2368. { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
  2369. { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
  2370. { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
  2371. { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
  2372. // These truncates end up widening elements.
  2373. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ
  2374. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ
  2375. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD
  2376. { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 2 },
  2377. { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 2 },
  2378. { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 2 },
  2379. { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 1 },
  2380. { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 1 },
  2381. { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 1 },
  2382. { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 1 },
  2383. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 },
  2384. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
  2385. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 },
  2386. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
  2387. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
  2388. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 1 },
  2389. { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
  2390. { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 1 },
  2391. { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 1 },
  2392. { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 },
  2393. { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 },
  2394. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 },
  2395. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
  2396. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 },
  2397. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
  2398. { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 3 },
  2399. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 },
  2400. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 2 },
  2401. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 12 },
  2402. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 22 },
  2403. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 4 },
  2404. { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 1 },
  2405. { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 1 },
  2406. { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 1 },
  2407. { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 1 },
  2408. { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 2 },
  2409. { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 2 },
  2410. { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 1 },
  2411. { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 1 },
  2412. { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
  2413. { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 1 },
  2414. { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 1 },
  2415. { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
  2416. { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 1 },
  2417. { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 },
  2418. { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 2 },
  2419. { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 2 },
  2420. { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 1 },
  2421. { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 1 },
  2422. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 4 },
  2423. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
  2424. };
  2425. static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
  2426. // These are somewhat magic numbers justified by comparing the
  2427. // output of llvm-mca for our various supported scheduler models
  2428. // and basing it off the worst case scenario.
  2429. { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 3 },
  2430. { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 3 },
  2431. { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 3 },
  2432. { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 3 },
  2433. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 3 },
  2434. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 },
  2435. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 3 },
  2436. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 },
  2437. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 },
  2438. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4 },
  2439. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 8 },
  2440. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 8 },
  2441. { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 3 },
  2442. { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 3 },
  2443. { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 8 },
  2444. { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 9 },
  2445. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 },
  2446. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 4 },
  2447. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 4 },
  2448. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 },
  2449. { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 7 },
  2450. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 7 },
  2451. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
  2452. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 15 },
  2453. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 18 },
  2454. { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 4 },
  2455. { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 4 },
  2456. { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 4 },
  2457. { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 4 },
  2458. { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 6 },
  2459. { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 6 },
  2460. { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 5 },
  2461. { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 5 },
  2462. { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 4 },
  2463. { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 4 },
  2464. { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 4 },
  2465. { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
  2466. { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 4 },
  2467. { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 15 },
  2468. { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 6 },
  2469. { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 6 },
  2470. { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 5 },
  2471. { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 5 },
  2472. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 8 },
  2473. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 8 },
  2474. { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 4 },
  2475. { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 4 },
  2476. { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 2 },
  2477. { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 3 },
  2478. { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
  2479. { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 2 },
  2480. { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 2 },
  2481. { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 3 },
  2482. { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
  2483. { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 2 },
  2484. { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
  2485. { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 2 },
  2486. // These truncates are really widening elements.
  2487. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD
  2488. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ
  2489. { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD
  2490. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD
  2491. { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD
  2492. { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW
  2493. { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 2 }, // PAND+PACKUSWB
  2494. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
  2495. { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 3 }, // PAND+2*PACKUSWB
  2496. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
  2497. { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 },
  2498. { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 3 },
  2499. { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
  2500. { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32,10 },
  2501. { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB
  2502. { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW
  2503. { ISD::TRUNCATE, MVT::v4i32, MVT::v2i64, 1 }, // PSHUFD
  2504. };
  2505. // Attempt to map directly to (simple) MVT types to let us match custom entries.
  2506. EVT SrcTy = TLI->getValueType(DL, Src);
  2507. EVT DstTy = TLI->getValueType(DL, Dst);
  2508. // The function getSimpleVT only handles simple value types.
  2509. if (SrcTy.isSimple() && DstTy.isSimple()) {
  2510. MVT SimpleSrcTy = SrcTy.getSimpleVT();
  2511. MVT SimpleDstTy = DstTy.getSimpleVT();
  2512. if (ST->useAVX512Regs()) {
  2513. if (ST->hasBWI())
  2514. if (const auto *Entry = ConvertCostTableLookup(
  2515. AVX512BWConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
  2516. return AdjustCost(Entry->Cost);
  2517. if (ST->hasDQI())
  2518. if (const auto *Entry = ConvertCostTableLookup(
  2519. AVX512DQConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
  2520. return AdjustCost(Entry->Cost);
  2521. if (ST->hasAVX512())
  2522. if (const auto *Entry = ConvertCostTableLookup(
  2523. AVX512FConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
  2524. return AdjustCost(Entry->Cost);
  2525. }
  2526. if (ST->hasBWI())
  2527. if (const auto *Entry = ConvertCostTableLookup(
  2528. AVX512BWVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
  2529. return AdjustCost(Entry->Cost);
  2530. if (ST->hasDQI())
  2531. if (const auto *Entry = ConvertCostTableLookup(
  2532. AVX512DQVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
  2533. return AdjustCost(Entry->Cost);
  2534. if (ST->hasAVX512())
  2535. if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
  2536. SimpleDstTy, SimpleSrcTy))
  2537. return AdjustCost(Entry->Cost);
  2538. if (ST->hasAVX2()) {
  2539. if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
  2540. SimpleDstTy, SimpleSrcTy))
  2541. return AdjustCost(Entry->Cost);
  2542. }
  2543. if (ST->hasAVX()) {
  2544. if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
  2545. SimpleDstTy, SimpleSrcTy))
  2546. return AdjustCost(Entry->Cost);
  2547. }
  2548. if (ST->hasSSE41()) {
  2549. if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
  2550. SimpleDstTy, SimpleSrcTy))
  2551. return AdjustCost(Entry->Cost);
  2552. }
  2553. if (ST->hasSSE2()) {
  2554. if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
  2555. SimpleDstTy, SimpleSrcTy))
  2556. return AdjustCost(Entry->Cost);
  2557. }
  2558. }
  2559. // Fall back to legalized types.
  2560. std::pair<InstructionCost, MVT> LTSrc = getTypeLegalizationCost(Src);
  2561. std::pair<InstructionCost, MVT> LTDest = getTypeLegalizationCost(Dst);
  2562. // If we're truncating to the same legalized type - just assume its free.
  2563. if (ISD == ISD::TRUNCATE && LTSrc.second == LTDest.second)
  2564. return TTI::TCC_Free;
  2565. if (ST->useAVX512Regs()) {
  2566. if (ST->hasBWI())
  2567. if (const auto *Entry = ConvertCostTableLookup(
  2568. AVX512BWConversionTbl, ISD, LTDest.second, LTSrc.second))
  2569. return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
  2570. if (ST->hasDQI())
  2571. if (const auto *Entry = ConvertCostTableLookup(
  2572. AVX512DQConversionTbl, ISD, LTDest.second, LTSrc.second))
  2573. return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
  2574. if (ST->hasAVX512())
  2575. if (const auto *Entry = ConvertCostTableLookup(
  2576. AVX512FConversionTbl, ISD, LTDest.second, LTSrc.second))
  2577. return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
  2578. }
  2579. if (ST->hasBWI())
  2580. if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
  2581. LTDest.second, LTSrc.second))
  2582. return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
  2583. if (ST->hasDQI())
  2584. if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
  2585. LTDest.second, LTSrc.second))
  2586. return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
  2587. if (ST->hasAVX512())
  2588. if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
  2589. LTDest.second, LTSrc.second))
  2590. return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
  2591. if (ST->hasAVX2())
  2592. if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
  2593. LTDest.second, LTSrc.second))
  2594. return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
  2595. if (ST->hasAVX())
  2596. if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
  2597. LTDest.second, LTSrc.second))
  2598. return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
  2599. if (ST->hasSSE41())
  2600. if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
  2601. LTDest.second, LTSrc.second))
  2602. return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
  2603. if (ST->hasSSE2())
  2604. if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
  2605. LTDest.second, LTSrc.second))
  2606. return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
  2607. // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for
  2608. // sitofp.
  2609. if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) &&
  2610. 1 < Src->getScalarSizeInBits() && Src->getScalarSizeInBits() < 32) {
  2611. Type *ExtSrc = Src->getWithNewBitWidth(32);
  2612. unsigned ExtOpc =
  2613. (ISD == ISD::SINT_TO_FP) ? Instruction::SExt : Instruction::ZExt;
  2614. // For scalar loads the extend would be free.
  2615. InstructionCost ExtCost = 0;
  2616. if (!(Src->isIntegerTy() && I && isa<LoadInst>(I->getOperand(0))))
  2617. ExtCost = getCastInstrCost(ExtOpc, ExtSrc, Src, CCH, CostKind);
  2618. return ExtCost + getCastInstrCost(Instruction::SIToFP, Dst, ExtSrc,
  2619. TTI::CastContextHint::None, CostKind);
  2620. }
  2621. // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi
  2622. // i32.
  2623. if ((ISD == ISD::FP_TO_SINT || ISD == ISD::FP_TO_UINT) &&
  2624. 1 < Dst->getScalarSizeInBits() && Dst->getScalarSizeInBits() < 32) {
  2625. Type *TruncDst = Dst->getWithNewBitWidth(32);
  2626. return getCastInstrCost(Instruction::FPToSI, TruncDst, Src, CCH, CostKind) +
  2627. getCastInstrCost(Instruction::Trunc, Dst, TruncDst,
  2628. TTI::CastContextHint::None, CostKind);
  2629. }
  2630. return AdjustCost(
  2631. BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
  2632. }
  2633. InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
  2634. Type *CondTy,
  2635. CmpInst::Predicate VecPred,
  2636. TTI::TargetCostKind CostKind,
  2637. const Instruction *I) {
  2638. // Early out if this type isn't scalar/vector integer/float.
  2639. if (!(ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy()))
  2640. return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
  2641. I);
  2642. // Legalize the type.
  2643. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
  2644. MVT MTy = LT.second;
  2645. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  2646. assert(ISD && "Invalid opcode");
  2647. InstructionCost ExtraCost = 0;
  2648. if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
  2649. // Some vector comparison predicates cost extra instructions.
  2650. // TODO: Should we invert this and assume worst case cmp costs
  2651. // and reduce for particular predicates?
  2652. if (MTy.isVector() &&
  2653. !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
  2654. (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
  2655. ST->hasBWI())) {
  2656. // Fallback to I if a specific predicate wasn't specified.
  2657. CmpInst::Predicate Pred = VecPred;
  2658. if (I && (Pred == CmpInst::BAD_ICMP_PREDICATE ||
  2659. Pred == CmpInst::BAD_FCMP_PREDICATE))
  2660. Pred = cast<CmpInst>(I)->getPredicate();
  2661. switch (Pred) {
  2662. case CmpInst::Predicate::ICMP_NE:
  2663. // xor(cmpeq(x,y),-1)
  2664. ExtraCost = 1;
  2665. break;
  2666. case CmpInst::Predicate::ICMP_SGE:
  2667. case CmpInst::Predicate::ICMP_SLE:
  2668. // xor(cmpgt(x,y),-1)
  2669. ExtraCost = 1;
  2670. break;
  2671. case CmpInst::Predicate::ICMP_ULT:
  2672. case CmpInst::Predicate::ICMP_UGT:
  2673. // cmpgt(xor(x,signbit),xor(y,signbit))
  2674. // xor(cmpeq(pmaxu(x,y),x),-1)
  2675. ExtraCost = 2;
  2676. break;
  2677. case CmpInst::Predicate::ICMP_ULE:
  2678. case CmpInst::Predicate::ICMP_UGE:
  2679. if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
  2680. (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
  2681. // cmpeq(psubus(x,y),0)
  2682. // cmpeq(pminu(x,y),x)
  2683. ExtraCost = 1;
  2684. } else {
  2685. // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
  2686. ExtraCost = 3;
  2687. }
  2688. break;
  2689. case CmpInst::Predicate::FCMP_ONE:
  2690. case CmpInst::Predicate::FCMP_UEQ:
  2691. // Without AVX we need to expand FCMP_ONE/FCMP_UEQ cases.
  2692. // Use FCMP_UEQ expansion - FCMP_ONE should be the same.
  2693. if (CondTy && !ST->hasAVX())
  2694. return getCmpSelInstrCost(Opcode, ValTy, CondTy,
  2695. CmpInst::Predicate::FCMP_UNO, CostKind) +
  2696. getCmpSelInstrCost(Opcode, ValTy, CondTy,
  2697. CmpInst::Predicate::FCMP_OEQ, CostKind) +
  2698. getArithmeticInstrCost(Instruction::Or, CondTy, CostKind);
  2699. break;
  2700. case CmpInst::Predicate::BAD_ICMP_PREDICATE:
  2701. case CmpInst::Predicate::BAD_FCMP_PREDICATE:
  2702. // Assume worst case scenario and add the maximum extra cost.
  2703. ExtraCost = 3;
  2704. break;
  2705. default:
  2706. break;
  2707. }
  2708. }
  2709. }
  2710. static const CostKindTblEntry SLMCostTbl[] = {
  2711. // slm pcmpeq/pcmpgt throughput is 2
  2712. { ISD::SETCC, MVT::v2i64, { 2, 5, 1, 2 } },
  2713. // slm pblendvb/blendvpd/blendvps throughput is 4
  2714. { ISD::SELECT, MVT::v2f64, { 4, 4, 1, 3 } }, // vblendvpd
  2715. { ISD::SELECT, MVT::v4f32, { 4, 4, 1, 3 } }, // vblendvps
  2716. { ISD::SELECT, MVT::v2i64, { 4, 4, 1, 3 } }, // pblendvb
  2717. { ISD::SELECT, MVT::v8i32, { 4, 4, 1, 3 } }, // pblendvb
  2718. { ISD::SELECT, MVT::v8i16, { 4, 4, 1, 3 } }, // pblendvb
  2719. { ISD::SELECT, MVT::v16i8, { 4, 4, 1, 3 } }, // pblendvb
  2720. };
  2721. static const CostKindTblEntry AVX512BWCostTbl[] = {
  2722. { ISD::SETCC, MVT::v32i16, { 1, 1, 1, 1 } },
  2723. { ISD::SETCC, MVT::v16i16, { 1, 1, 1, 1 } },
  2724. { ISD::SETCC, MVT::v64i8, { 1, 1, 1, 1 } },
  2725. { ISD::SETCC, MVT::v32i8, { 1, 1, 1, 1 } },
  2726. { ISD::SELECT, MVT::v32i16, { 1, 1, 1, 1 } },
  2727. { ISD::SELECT, MVT::v64i8, { 1, 1, 1, 1 } },
  2728. };
  2729. static const CostKindTblEntry AVX512CostTbl[] = {
  2730. { ISD::SETCC, MVT::v8f64, { 1, 4, 1, 1 } },
  2731. { ISD::SETCC, MVT::v4f64, { 1, 4, 1, 1 } },
  2732. { ISD::SETCC, MVT::v16f32, { 1, 4, 1, 1 } },
  2733. { ISD::SETCC, MVT::v8f32, { 1, 4, 1, 1 } },
  2734. { ISD::SETCC, MVT::v8i64, { 1, 1, 1, 1 } },
  2735. { ISD::SETCC, MVT::v4i64, { 1, 1, 1, 1 } },
  2736. { ISD::SETCC, MVT::v2i64, { 1, 1, 1, 1 } },
  2737. { ISD::SETCC, MVT::v16i32, { 1, 1, 1, 1 } },
  2738. { ISD::SETCC, MVT::v8i32, { 1, 1, 1, 1 } },
  2739. { ISD::SETCC, MVT::v32i16, { 3, 7, 5, 5 } },
  2740. { ISD::SETCC, MVT::v64i8, { 3, 7, 5, 5 } },
  2741. { ISD::SELECT, MVT::v8i64, { 1, 1, 1, 1 } },
  2742. { ISD::SELECT, MVT::v4i64, { 1, 1, 1, 1 } },
  2743. { ISD::SELECT, MVT::v2i64, { 1, 1, 1, 1 } },
  2744. { ISD::SELECT, MVT::v16i32, { 1, 1, 1, 1 } },
  2745. { ISD::SELECT, MVT::v8i32, { 1, 1, 1, 1 } },
  2746. { ISD::SELECT, MVT::v4i32, { 1, 1, 1, 1 } },
  2747. { ISD::SELECT, MVT::v8f64, { 1, 1, 1, 1 } },
  2748. { ISD::SELECT, MVT::v4f64, { 1, 1, 1, 1 } },
  2749. { ISD::SELECT, MVT::v2f64, { 1, 1, 1, 1 } },
  2750. { ISD::SELECT, MVT::f64, { 1, 1, 1, 1 } },
  2751. { ISD::SELECT, MVT::v16f32, { 1, 1, 1, 1 } },
  2752. { ISD::SELECT, MVT::v8f32 , { 1, 1, 1, 1 } },
  2753. { ISD::SELECT, MVT::v4f32, { 1, 1, 1, 1 } },
  2754. { ISD::SELECT, MVT::f32 , { 1, 1, 1, 1 } },
  2755. { ISD::SELECT, MVT::v32i16, { 2, 2, 4, 4 } },
  2756. { ISD::SELECT, MVT::v16i16, { 1, 1, 1, 1 } },
  2757. { ISD::SELECT, MVT::v8i16, { 1, 1, 1, 1 } },
  2758. { ISD::SELECT, MVT::v64i8, { 2, 2, 4, 4 } },
  2759. { ISD::SELECT, MVT::v32i8, { 1, 1, 1, 1 } },
  2760. { ISD::SELECT, MVT::v16i8, { 1, 1, 1, 1 } },
  2761. };
  2762. static const CostKindTblEntry AVX2CostTbl[] = {
  2763. { ISD::SETCC, MVT::v4f64, { 1, 4, 1, 2 } },
  2764. { ISD::SETCC, MVT::v2f64, { 1, 4, 1, 1 } },
  2765. { ISD::SETCC, MVT::f64, { 1, 4, 1, 1 } },
  2766. { ISD::SETCC, MVT::v8f32, { 1, 4, 1, 2 } },
  2767. { ISD::SETCC, MVT::v4f32, { 1, 4, 1, 1 } },
  2768. { ISD::SETCC, MVT::f32, { 1, 4, 1, 1 } },
  2769. { ISD::SETCC, MVT::v4i64, { 1, 1, 1, 2 } },
  2770. { ISD::SETCC, MVT::v8i32, { 1, 1, 1, 2 } },
  2771. { ISD::SETCC, MVT::v16i16, { 1, 1, 1, 2 } },
  2772. { ISD::SETCC, MVT::v32i8, { 1, 1, 1, 2 } },
  2773. { ISD::SELECT, MVT::v4f64, { 2, 2, 1, 2 } }, // vblendvpd
  2774. { ISD::SELECT, MVT::v8f32, { 2, 2, 1, 2 } }, // vblendvps
  2775. { ISD::SELECT, MVT::v4i64, { 2, 2, 1, 2 } }, // pblendvb
  2776. { ISD::SELECT, MVT::v8i32, { 2, 2, 1, 2 } }, // pblendvb
  2777. { ISD::SELECT, MVT::v16i16, { 2, 2, 1, 2 } }, // pblendvb
  2778. { ISD::SELECT, MVT::v32i8, { 2, 2, 1, 2 } }, // pblendvb
  2779. };
  2780. static const CostKindTblEntry XOPCostTbl[] = {
  2781. { ISD::SETCC, MVT::v4i64, { 4, 2, 5, 6 } },
  2782. { ISD::SETCC, MVT::v2i64, { 1, 1, 1, 1 } },
  2783. };
  2784. static const CostKindTblEntry AVX1CostTbl[] = {
  2785. { ISD::SETCC, MVT::v4f64, { 2, 3, 1, 2 } },
  2786. { ISD::SETCC, MVT::v2f64, { 1, 3, 1, 1 } },
  2787. { ISD::SETCC, MVT::f64, { 1, 3, 1, 1 } },
  2788. { ISD::SETCC, MVT::v8f32, { 2, 3, 1, 2 } },
  2789. { ISD::SETCC, MVT::v4f32, { 1, 3, 1, 1 } },
  2790. { ISD::SETCC, MVT::f32, { 1, 3, 1, 1 } },
  2791. // AVX1 does not support 8-wide integer compare.
  2792. { ISD::SETCC, MVT::v4i64, { 4, 2, 5, 6 } },
  2793. { ISD::SETCC, MVT::v8i32, { 4, 2, 5, 6 } },
  2794. { ISD::SETCC, MVT::v16i16, { 4, 2, 5, 6 } },
  2795. { ISD::SETCC, MVT::v32i8, { 4, 2, 5, 6 } },
  2796. { ISD::SELECT, MVT::v4f64, { 3, 3, 1, 2 } }, // vblendvpd
  2797. { ISD::SELECT, MVT::v8f32, { 3, 3, 1, 2 } }, // vblendvps
  2798. { ISD::SELECT, MVT::v4i64, { 3, 3, 1, 2 } }, // vblendvpd
  2799. { ISD::SELECT, MVT::v8i32, { 3, 3, 1, 2 } }, // vblendvps
  2800. { ISD::SELECT, MVT::v16i16, { 3, 3, 3, 3 } }, // vandps + vandnps + vorps
  2801. { ISD::SELECT, MVT::v32i8, { 3, 3, 3, 3 } }, // vandps + vandnps + vorps
  2802. };
  2803. static const CostKindTblEntry SSE42CostTbl[] = {
  2804. { ISD::SETCC, MVT::v2i64, { 1, 2, 1, 2 } },
  2805. };
  2806. static const CostKindTblEntry SSE41CostTbl[] = {
  2807. { ISD::SETCC, MVT::v2f64, { 1, 5, 1, 1 } },
  2808. { ISD::SETCC, MVT::v4f32, { 1, 5, 1, 1 } },
  2809. { ISD::SELECT, MVT::v2f64, { 2, 2, 1, 2 } }, // blendvpd
  2810. { ISD::SELECT, MVT::f64, { 2, 2, 1, 2 } }, // blendvpd
  2811. { ISD::SELECT, MVT::v4f32, { 2, 2, 1, 2 } }, // blendvps
  2812. { ISD::SELECT, MVT::f32 , { 2, 2, 1, 2 } }, // blendvps
  2813. { ISD::SELECT, MVT::v2i64, { 2, 2, 1, 2 } }, // pblendvb
  2814. { ISD::SELECT, MVT::v4i32, { 2, 2, 1, 2 } }, // pblendvb
  2815. { ISD::SELECT, MVT::v8i16, { 2, 2, 1, 2 } }, // pblendvb
  2816. { ISD::SELECT, MVT::v16i8, { 2, 2, 1, 2 } }, // pblendvb
  2817. };
  2818. static const CostKindTblEntry SSE2CostTbl[] = {
  2819. { ISD::SETCC, MVT::v2f64, { 2, 5, 1, 1 } },
  2820. { ISD::SETCC, MVT::f64, { 1, 5, 1, 1 } },
  2821. { ISD::SETCC, MVT::v2i64, { 5, 4, 5, 5 } }, // pcmpeqd/pcmpgtd expansion
  2822. { ISD::SETCC, MVT::v4i32, { 1, 1, 1, 1 } },
  2823. { ISD::SETCC, MVT::v8i16, { 1, 1, 1, 1 } },
  2824. { ISD::SETCC, MVT::v16i8, { 1, 1, 1, 1 } },
  2825. { ISD::SELECT, MVT::v2f64, { 2, 2, 3, 3 } }, // andpd + andnpd + orpd
  2826. { ISD::SELECT, MVT::f64, { 2, 2, 3, 3 } }, // andpd + andnpd + orpd
  2827. { ISD::SELECT, MVT::v2i64, { 2, 2, 3, 3 } }, // pand + pandn + por
  2828. { ISD::SELECT, MVT::v4i32, { 2, 2, 3, 3 } }, // pand + pandn + por
  2829. { ISD::SELECT, MVT::v8i16, { 2, 2, 3, 3 } }, // pand + pandn + por
  2830. { ISD::SELECT, MVT::v16i8, { 2, 2, 3, 3 } }, // pand + pandn + por
  2831. };
  2832. static const CostKindTblEntry SSE1CostTbl[] = {
  2833. { ISD::SETCC, MVT::v4f32, { 2, 5, 1, 1 } },
  2834. { ISD::SETCC, MVT::f32, { 1, 5, 1, 1 } },
  2835. { ISD::SELECT, MVT::v4f32, { 2, 2, 3, 3 } }, // andps + andnps + orps
  2836. { ISD::SELECT, MVT::f32, { 2, 2, 3, 3 } }, // andps + andnps + orps
  2837. };
  2838. if (ST->useSLMArithCosts())
  2839. if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
  2840. if (auto KindCost = Entry->Cost[CostKind])
  2841. return LT.first * (ExtraCost + *KindCost);
  2842. if (ST->hasBWI())
  2843. if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
  2844. if (auto KindCost = Entry->Cost[CostKind])
  2845. return LT.first * (ExtraCost + *KindCost);
  2846. if (ST->hasAVX512())
  2847. if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
  2848. if (auto KindCost = Entry->Cost[CostKind])
  2849. return LT.first * (ExtraCost + *KindCost);
  2850. if (ST->hasAVX2())
  2851. if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
  2852. if (auto KindCost = Entry->Cost[CostKind])
  2853. return LT.first * (ExtraCost + *KindCost);
  2854. if (ST->hasXOP())
  2855. if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
  2856. if (auto KindCost = Entry->Cost[CostKind])
  2857. return LT.first * (ExtraCost + *KindCost);
  2858. if (ST->hasAVX())
  2859. if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
  2860. if (auto KindCost = Entry->Cost[CostKind])
  2861. return LT.first * (ExtraCost + *KindCost);
  2862. if (ST->hasSSE42())
  2863. if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
  2864. if (auto KindCost = Entry->Cost[CostKind])
  2865. return LT.first * (ExtraCost + *KindCost);
  2866. if (ST->hasSSE41())
  2867. if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
  2868. if (auto KindCost = Entry->Cost[CostKind])
  2869. return LT.first * (ExtraCost + *KindCost);
  2870. if (ST->hasSSE2())
  2871. if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
  2872. if (auto KindCost = Entry->Cost[CostKind])
  2873. return LT.first * (ExtraCost + *KindCost);
  2874. if (ST->hasSSE1())
  2875. if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
  2876. if (auto KindCost = Entry->Cost[CostKind])
  2877. return LT.first * (ExtraCost + *KindCost);
  2878. // Assume a 3cy latency for fp select ops.
  2879. if (CostKind == TTI::TCK_Latency && Opcode == Instruction::Select)
  2880. if (ValTy->getScalarType()->isFloatingPointTy())
  2881. return 3;
  2882. return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
  2883. }
  2884. unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
  2885. InstructionCost
  2886. X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
  2887. TTI::TargetCostKind CostKind) {
  2888. // Costs should match the codegen from:
  2889. // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
  2890. // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
  2891. // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
  2892. // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
  2893. // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
  2894. // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
  2895. // specialized in these tables yet.
  2896. static const CostKindTblEntry AVX512VBMI2CostTbl[] = {
  2897. { ISD::FSHL, MVT::v8i64, { 1, 1, 1, 1 } },
  2898. { ISD::FSHL, MVT::v4i64, { 1, 1, 1, 1 } },
  2899. { ISD::FSHL, MVT::v2i64, { 1, 1, 1, 1 } },
  2900. { ISD::FSHL, MVT::v16i32, { 1, 1, 1, 1 } },
  2901. { ISD::FSHL, MVT::v8i32, { 1, 1, 1, 1 } },
  2902. { ISD::FSHL, MVT::v4i32, { 1, 1, 1, 1 } },
  2903. { ISD::FSHL, MVT::v32i16, { 1, 1, 1, 1 } },
  2904. { ISD::FSHL, MVT::v16i16, { 1, 1, 1, 1 } },
  2905. { ISD::FSHL, MVT::v8i16, { 1, 1, 1, 1 } },
  2906. { ISD::ROTL, MVT::v32i16, { 1, 1, 1, 1 } },
  2907. { ISD::ROTL, MVT::v16i16, { 1, 1, 1, 1 } },
  2908. { ISD::ROTL, MVT::v8i16, { 1, 1, 1, 1 } },
  2909. { ISD::ROTR, MVT::v32i16, { 1, 1, 1, 1 } },
  2910. { ISD::ROTR, MVT::v16i16, { 1, 1, 1, 1 } },
  2911. { ISD::ROTR, MVT::v8i16, { 1, 1, 1, 1 } },
  2912. };
  2913. static const CostKindTblEntry AVX512BITALGCostTbl[] = {
  2914. { ISD::CTPOP, MVT::v32i16, { 1, 1, 1, 1 } },
  2915. { ISD::CTPOP, MVT::v64i8, { 1, 1, 1, 1 } },
  2916. { ISD::CTPOP, MVT::v16i16, { 1, 1, 1, 1 } },
  2917. { ISD::CTPOP, MVT::v32i8, { 1, 1, 1, 1 } },
  2918. { ISD::CTPOP, MVT::v8i16, { 1, 1, 1, 1 } },
  2919. { ISD::CTPOP, MVT::v16i8, { 1, 1, 1, 1 } },
  2920. };
  2921. static const CostKindTblEntry AVX512VPOPCNTDQCostTbl[] = {
  2922. { ISD::CTPOP, MVT::v8i64, { 1, 1, 1, 1 } },
  2923. { ISD::CTPOP, MVT::v16i32, { 1, 1, 1, 1 } },
  2924. { ISD::CTPOP, MVT::v4i64, { 1, 1, 1, 1 } },
  2925. { ISD::CTPOP, MVT::v8i32, { 1, 1, 1, 1 } },
  2926. { ISD::CTPOP, MVT::v2i64, { 1, 1, 1, 1 } },
  2927. { ISD::CTPOP, MVT::v4i32, { 1, 1, 1, 1 } },
  2928. };
  2929. static const CostKindTblEntry AVX512CDCostTbl[] = {
  2930. { ISD::CTLZ, MVT::v8i64, { 1, 5, 1, 1 } },
  2931. { ISD::CTLZ, MVT::v16i32, { 1, 5, 1, 1 } },
  2932. { ISD::CTLZ, MVT::v32i16, { 18, 27, 23, 27 } },
  2933. { ISD::CTLZ, MVT::v64i8, { 3, 16, 9, 11 } },
  2934. { ISD::CTLZ, MVT::v4i64, { 1, 5, 1, 1 } },
  2935. { ISD::CTLZ, MVT::v8i32, { 1, 5, 1, 1 } },
  2936. { ISD::CTLZ, MVT::v16i16, { 8, 19, 11, 13 } },
  2937. { ISD::CTLZ, MVT::v32i8, { 2, 11, 9, 10 } },
  2938. { ISD::CTLZ, MVT::v2i64, { 1, 5, 1, 1 } },
  2939. { ISD::CTLZ, MVT::v4i32, { 1, 5, 1, 1 } },
  2940. { ISD::CTLZ, MVT::v8i16, { 3, 15, 4, 6 } },
  2941. { ISD::CTLZ, MVT::v16i8, { 2, 10, 9, 10 } },
  2942. { ISD::CTTZ, MVT::v8i64, { 2, 8, 6, 7 } },
  2943. { ISD::CTTZ, MVT::v16i32, { 2, 8, 6, 7 } },
  2944. { ISD::CTTZ, MVT::v4i64, { 1, 8, 6, 6 } },
  2945. { ISD::CTTZ, MVT::v8i32, { 1, 8, 6, 6 } },
  2946. { ISD::CTTZ, MVT::v2i64, { 1, 8, 6, 6 } },
  2947. { ISD::CTTZ, MVT::v4i32, { 1, 8, 6, 6 } },
  2948. };
  2949. static const CostKindTblEntry AVX512BWCostTbl[] = {
  2950. { ISD::ABS, MVT::v32i16, { 1, 1, 1, 1 } },
  2951. { ISD::ABS, MVT::v64i8, { 1, 1, 1, 1 } },
  2952. { ISD::BITREVERSE, MVT::v8i64, { 3 } },
  2953. { ISD::BITREVERSE, MVT::v16i32, { 3 } },
  2954. { ISD::BITREVERSE, MVT::v32i16, { 3 } },
  2955. { ISD::BITREVERSE, MVT::v64i8, { 2 } },
  2956. { ISD::BSWAP, MVT::v8i64, { 1 } },
  2957. { ISD::BSWAP, MVT::v16i32, { 1 } },
  2958. { ISD::BSWAP, MVT::v32i16, { 1 } },
  2959. { ISD::CTLZ, MVT::v8i64, { 8, 22, 23, 23 } },
  2960. { ISD::CTLZ, MVT::v16i32, { 8, 23, 25, 25 } },
  2961. { ISD::CTLZ, MVT::v32i16, { 4, 15, 15, 16 } },
  2962. { ISD::CTLZ, MVT::v64i8, { 3, 12, 10, 9 } },
  2963. { ISD::CTPOP, MVT::v2i64, { 3, 7, 10, 10 } },
  2964. { ISD::CTPOP, MVT::v4i64, { 3, 7, 10, 10 } },
  2965. { ISD::CTPOP, MVT::v8i64, { 3, 8, 10, 12 } },
  2966. { ISD::CTPOP, MVT::v4i32, { 7, 11, 14, 14 } },
  2967. { ISD::CTPOP, MVT::v8i32, { 7, 11, 14, 14 } },
  2968. { ISD::CTPOP, MVT::v16i32, { 7, 12, 14, 16 } },
  2969. { ISD::CTPOP, MVT::v8i16, { 2, 7, 11, 11 } },
  2970. { ISD::CTPOP, MVT::v16i16, { 2, 7, 11, 11 } },
  2971. { ISD::CTPOP, MVT::v32i16, { 3, 7, 11, 13 } },
  2972. { ISD::CTPOP, MVT::v16i8, { 2, 4, 8, 8 } },
  2973. { ISD::CTPOP, MVT::v32i8, { 2, 4, 8, 8 } },
  2974. { ISD::CTPOP, MVT::v64i8, { 2, 5, 8, 10 } },
  2975. { ISD::CTTZ, MVT::v8i16, { 3, 9, 14, 14 } },
  2976. { ISD::CTTZ, MVT::v16i16, { 3, 9, 14, 14 } },
  2977. { ISD::CTTZ, MVT::v32i16, { 3, 10, 14, 16 } },
  2978. { ISD::CTTZ, MVT::v16i8, { 2, 6, 11, 11 } },
  2979. { ISD::CTTZ, MVT::v32i8, { 2, 6, 11, 11 } },
  2980. { ISD::CTTZ, MVT::v64i8, { 3, 7, 11, 13 } },
  2981. { ISD::ROTL, MVT::v32i16, { 2, 8, 6, 8 } },
  2982. { ISD::ROTL, MVT::v16i16, { 2, 8, 6, 7 } },
  2983. { ISD::ROTL, MVT::v8i16, { 2, 7, 6, 7 } },
  2984. { ISD::ROTL, MVT::v64i8, { 5, 6, 11, 12 } },
  2985. { ISD::ROTL, MVT::v32i8, { 5, 15, 7, 10 } },
  2986. { ISD::ROTL, MVT::v16i8, { 5, 15, 7, 10 } },
  2987. { ISD::ROTR, MVT::v32i16, { 2, 8, 6, 8 } },
  2988. { ISD::ROTR, MVT::v16i16, { 2, 8, 6, 7 } },
  2989. { ISD::ROTR, MVT::v8i16, { 2, 7, 6, 7 } },
  2990. { ISD::ROTR, MVT::v64i8, { 5, 6, 12, 14 } },
  2991. { ISD::ROTR, MVT::v32i8, { 5, 14, 6, 9 } },
  2992. { ISD::ROTR, MVT::v16i8, { 5, 14, 6, 9 } },
  2993. { ISD::SADDSAT, MVT::v32i16, { 1 } },
  2994. { ISD::SADDSAT, MVT::v64i8, { 1 } },
  2995. { ISD::SMAX, MVT::v32i16, { 1, 1, 1, 1 } },
  2996. { ISD::SMAX, MVT::v64i8, { 1, 1, 1, 1 } },
  2997. { ISD::SMIN, MVT::v32i16, { 1, 1, 1, 1 } },
  2998. { ISD::SMIN, MVT::v64i8, { 1, 1, 1, 1 } },
  2999. { ISD::SSUBSAT, MVT::v32i16, { 1 } },
  3000. { ISD::SSUBSAT, MVT::v64i8, { 1 } },
  3001. { ISD::UADDSAT, MVT::v32i16, { 1 } },
  3002. { ISD::UADDSAT, MVT::v64i8, { 1 } },
  3003. { ISD::UMAX, MVT::v32i16, { 1, 1, 1, 1 } },
  3004. { ISD::UMAX, MVT::v64i8, { 1, 1, 1, 1 } },
  3005. { ISD::UMIN, MVT::v32i16, { 1, 1, 1, 1 } },
  3006. { ISD::UMIN, MVT::v64i8, { 1, 1, 1, 1 } },
  3007. { ISD::USUBSAT, MVT::v32i16, { 1 } },
  3008. { ISD::USUBSAT, MVT::v64i8, { 1 } },
  3009. };
  3010. static const CostKindTblEntry AVX512CostTbl[] = {
  3011. { ISD::ABS, MVT::v8i64, { 1, 1, 1, 1 } },
  3012. { ISD::ABS, MVT::v4i64, { 1, 1, 1, 1 } },
  3013. { ISD::ABS, MVT::v2i64, { 1, 1, 1, 1 } },
  3014. { ISD::ABS, MVT::v16i32, { 1, 1, 1, 1 } },
  3015. { ISD::ABS, MVT::v8i32, { 1, 1, 1, 1 } },
  3016. { ISD::ABS, MVT::v32i16, { 2, 7, 4, 4 } },
  3017. { ISD::ABS, MVT::v16i16, { 1, 1, 1, 1 } },
  3018. { ISD::ABS, MVT::v64i8, { 2, 7, 4, 4 } },
  3019. { ISD::ABS, MVT::v32i8, { 1, 1, 1, 1 } },
  3020. { ISD::BITREVERSE, MVT::v8i64, { 36 } },
  3021. { ISD::BITREVERSE, MVT::v16i32, { 24 } },
  3022. { ISD::BITREVERSE, MVT::v32i16, { 10 } },
  3023. { ISD::BITREVERSE, MVT::v64i8, { 10 } },
  3024. { ISD::BSWAP, MVT::v8i64, { 4 } },
  3025. { ISD::BSWAP, MVT::v16i32, { 4 } },
  3026. { ISD::BSWAP, MVT::v32i16, { 4 } },
  3027. { ISD::CTLZ, MVT::v8i64, { 10, 28, 32, 32 } },
  3028. { ISD::CTLZ, MVT::v16i32, { 12, 30, 38, 38 } },
  3029. { ISD::CTLZ, MVT::v32i16, { 8, 15, 29, 29 } },
  3030. { ISD::CTLZ, MVT::v64i8, { 6, 11, 19, 19 } },
  3031. { ISD::CTPOP, MVT::v8i64, { 16, 16, 19, 19 } },
  3032. { ISD::CTPOP, MVT::v16i32, { 24, 19, 27, 27 } },
  3033. { ISD::CTPOP, MVT::v32i16, { 18, 15, 22, 22 } },
  3034. { ISD::CTPOP, MVT::v64i8, { 12, 11, 16, 16 } },
  3035. { ISD::CTTZ, MVT::v8i64, { 2, 8, 6, 7 } },
  3036. { ISD::CTTZ, MVT::v16i32, { 2, 8, 6, 7 } },
  3037. { ISD::CTTZ, MVT::v32i16, { 7, 17, 27, 27 } },
  3038. { ISD::CTTZ, MVT::v64i8, { 6, 13, 21, 21 } },
  3039. { ISD::ROTL, MVT::v8i64, { 1, 1, 1, 1 } },
  3040. { ISD::ROTL, MVT::v4i64, { 1, 1, 1, 1 } },
  3041. { ISD::ROTL, MVT::v2i64, { 1, 1, 1, 1 } },
  3042. { ISD::ROTL, MVT::v16i32, { 1, 1, 1, 1 } },
  3043. { ISD::ROTL, MVT::v8i32, { 1, 1, 1, 1 } },
  3044. { ISD::ROTL, MVT::v4i32, { 1, 1, 1, 1 } },
  3045. { ISD::ROTR, MVT::v8i64, { 1, 1, 1, 1 } },
  3046. { ISD::ROTR, MVT::v4i64, { 1, 1, 1, 1 } },
  3047. { ISD::ROTR, MVT::v2i64, { 1, 1, 1, 1 } },
  3048. { ISD::ROTR, MVT::v16i32, { 1, 1, 1, 1 } },
  3049. { ISD::ROTR, MVT::v8i32, { 1, 1, 1, 1 } },
  3050. { ISD::ROTR, MVT::v4i32, { 1, 1, 1, 1 } },
  3051. { ISD::SMAX, MVT::v8i64, { 1, 3, 1, 1 } },
  3052. { ISD::SMAX, MVT::v16i32, { 1, 1, 1, 1 } },
  3053. { ISD::SMAX, MVT::v32i16, { 3, 7, 5, 5 } },
  3054. { ISD::SMAX, MVT::v64i8, { 3, 7, 5, 5 } },
  3055. { ISD::SMAX, MVT::v4i64, { 1, 3, 1, 1 } },
  3056. { ISD::SMAX, MVT::v2i64, { 1, 3, 1, 1 } },
  3057. { ISD::SMIN, MVT::v8i64, { 1, 3, 1, 1 } },
  3058. { ISD::SMIN, MVT::v16i32, { 1, 1, 1, 1 } },
  3059. { ISD::SMIN, MVT::v32i16, { 3, 7, 5, 5 } },
  3060. { ISD::SMIN, MVT::v64i8, { 3, 7, 5, 5 } },
  3061. { ISD::SMIN, MVT::v4i64, { 1, 3, 1, 1 } },
  3062. { ISD::SMIN, MVT::v2i64, { 1, 3, 1, 1 } },
  3063. { ISD::UMAX, MVT::v8i64, { 1, 3, 1, 1 } },
  3064. { ISD::UMAX, MVT::v16i32, { 1, 1, 1, 1 } },
  3065. { ISD::UMAX, MVT::v32i16, { 3, 7, 5, 5 } },
  3066. { ISD::UMAX, MVT::v64i8, { 3, 7, 5, 5 } },
  3067. { ISD::UMAX, MVT::v4i64, { 1, 3, 1, 1 } },
  3068. { ISD::UMAX, MVT::v2i64, { 1, 3, 1, 1 } },
  3069. { ISD::UMIN, MVT::v8i64, { 1, 3, 1, 1 } },
  3070. { ISD::UMIN, MVT::v16i32, { 1, 1, 1, 1 } },
  3071. { ISD::UMIN, MVT::v32i16, { 3, 7, 5, 5 } },
  3072. { ISD::UMIN, MVT::v64i8, { 3, 7, 5, 5 } },
  3073. { ISD::UMIN, MVT::v4i64, { 1, 3, 1, 1 } },
  3074. { ISD::UMIN, MVT::v2i64, { 1, 3, 1, 1 } },
  3075. { ISD::USUBSAT, MVT::v16i32, { 2 } }, // pmaxud + psubd
  3076. { ISD::USUBSAT, MVT::v2i64, { 2 } }, // pmaxuq + psubq
  3077. { ISD::USUBSAT, MVT::v4i64, { 2 } }, // pmaxuq + psubq
  3078. { ISD::USUBSAT, MVT::v8i64, { 2 } }, // pmaxuq + psubq
  3079. { ISD::UADDSAT, MVT::v16i32, { 3 } }, // not + pminud + paddd
  3080. { ISD::UADDSAT, MVT::v2i64, { 3 } }, // not + pminuq + paddq
  3081. { ISD::UADDSAT, MVT::v4i64, { 3 } }, // not + pminuq + paddq
  3082. { ISD::UADDSAT, MVT::v8i64, { 3 } }, // not + pminuq + paddq
  3083. { ISD::SADDSAT, MVT::v32i16, { 2 } },
  3084. { ISD::SADDSAT, MVT::v64i8, { 2 } },
  3085. { ISD::SSUBSAT, MVT::v32i16, { 2 } },
  3086. { ISD::SSUBSAT, MVT::v64i8, { 2 } },
  3087. { ISD::UADDSAT, MVT::v32i16, { 2 } },
  3088. { ISD::UADDSAT, MVT::v64i8, { 2 } },
  3089. { ISD::USUBSAT, MVT::v32i16, { 2 } },
  3090. { ISD::USUBSAT, MVT::v64i8, { 2 } },
  3091. { ISD::FMAXNUM, MVT::f32, { 2 } },
  3092. { ISD::FMAXNUM, MVT::v4f32, { 2 } },
  3093. { ISD::FMAXNUM, MVT::v8f32, { 2 } },
  3094. { ISD::FMAXNUM, MVT::v16f32, { 2 } },
  3095. { ISD::FMAXNUM, MVT::f64, { 2 } },
  3096. { ISD::FMAXNUM, MVT::v2f64, { 2 } },
  3097. { ISD::FMAXNUM, MVT::v4f64, { 2 } },
  3098. { ISD::FMAXNUM, MVT::v8f64, { 2 } },
  3099. { ISD::FSQRT, MVT::f32, { 3, 12, 1, 1 } }, // Skylake from http://www.agner.org/
  3100. { ISD::FSQRT, MVT::v4f32, { 3, 12, 1, 1 } }, // Skylake from http://www.agner.org/
  3101. { ISD::FSQRT, MVT::v8f32, { 6, 12, 1, 1 } }, // Skylake from http://www.agner.org/
  3102. { ISD::FSQRT, MVT::v16f32, { 12, 20, 1, 3 } }, // Skylake from http://www.agner.org/
  3103. { ISD::FSQRT, MVT::f64, { 6, 18, 1, 1 } }, // Skylake from http://www.agner.org/
  3104. { ISD::FSQRT, MVT::v2f64, { 6, 18, 1, 1 } }, // Skylake from http://www.agner.org/
  3105. { ISD::FSQRT, MVT::v4f64, { 12, 18, 1, 1 } }, // Skylake from http://www.agner.org/
  3106. { ISD::FSQRT, MVT::v8f64, { 24, 32, 1, 3 } }, // Skylake from http://www.agner.org/
  3107. };
  3108. static const CostKindTblEntry XOPCostTbl[] = {
  3109. { ISD::BITREVERSE, MVT::v4i64, { 4 } },
  3110. { ISD::BITREVERSE, MVT::v8i32, { 4 } },
  3111. { ISD::BITREVERSE, MVT::v16i16, { 4 } },
  3112. { ISD::BITREVERSE, MVT::v32i8, { 4 } },
  3113. { ISD::BITREVERSE, MVT::v2i64, { 1 } },
  3114. { ISD::BITREVERSE, MVT::v4i32, { 1 } },
  3115. { ISD::BITREVERSE, MVT::v8i16, { 1 } },
  3116. { ISD::BITREVERSE, MVT::v16i8, { 1 } },
  3117. { ISD::BITREVERSE, MVT::i64, { 3 } },
  3118. { ISD::BITREVERSE, MVT::i32, { 3 } },
  3119. { ISD::BITREVERSE, MVT::i16, { 3 } },
  3120. { ISD::BITREVERSE, MVT::i8, { 3 } },
  3121. // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
  3122. { ISD::ROTL, MVT::v4i64, { 4, 7, 5, 6 } },
  3123. { ISD::ROTL, MVT::v8i32, { 4, 7, 5, 6 } },
  3124. { ISD::ROTL, MVT::v16i16, { 4, 7, 5, 6 } },
  3125. { ISD::ROTL, MVT::v32i8, { 4, 7, 5, 6 } },
  3126. { ISD::ROTL, MVT::v2i64, { 1, 3, 1, 1 } },
  3127. { ISD::ROTL, MVT::v4i32, { 1, 3, 1, 1 } },
  3128. { ISD::ROTL, MVT::v8i16, { 1, 3, 1, 1 } },
  3129. { ISD::ROTL, MVT::v16i8, { 1, 3, 1, 1 } },
  3130. { ISD::ROTR, MVT::v4i64, { 4, 7, 8, 9 } },
  3131. { ISD::ROTR, MVT::v8i32, { 4, 7, 8, 9 } },
  3132. { ISD::ROTR, MVT::v16i16, { 4, 7, 8, 9 } },
  3133. { ISD::ROTR, MVT::v32i8, { 4, 7, 8, 9 } },
  3134. { ISD::ROTR, MVT::v2i64, { 1, 3, 3, 3 } },
  3135. { ISD::ROTR, MVT::v4i32, { 1, 3, 3, 3 } },
  3136. { ISD::ROTR, MVT::v8i16, { 1, 3, 3, 3 } },
  3137. { ISD::ROTR, MVT::v16i8, { 1, 3, 3, 3 } }
  3138. };
  3139. static const CostKindTblEntry AVX2CostTbl[] = {
  3140. { ISD::ABS, MVT::v2i64, { 2, 4, 3, 5 } }, // VBLENDVPD(X,VPSUBQ(0,X),X)
  3141. { ISD::ABS, MVT::v4i64, { 2, 4, 3, 5 } }, // VBLENDVPD(X,VPSUBQ(0,X),X)
  3142. { ISD::ABS, MVT::v4i32, { 1, 1, 1, 1 } },
  3143. { ISD::ABS, MVT::v8i32, { 1, 1, 1, 2 } },
  3144. { ISD::ABS, MVT::v8i16, { 1, 1, 1, 1 } },
  3145. { ISD::ABS, MVT::v16i16, { 1, 1, 1, 2 } },
  3146. { ISD::ABS, MVT::v16i8, { 1, 1, 1, 1 } },
  3147. { ISD::ABS, MVT::v32i8, { 1, 1, 1, 2 } },
  3148. { ISD::BITREVERSE, MVT::v2i64, { 3 } },
  3149. { ISD::BITREVERSE, MVT::v4i64, { 3 } },
  3150. { ISD::BITREVERSE, MVT::v4i32, { 3 } },
  3151. { ISD::BITREVERSE, MVT::v8i32, { 3 } },
  3152. { ISD::BITREVERSE, MVT::v8i16, { 3 } },
  3153. { ISD::BITREVERSE, MVT::v16i16, { 3 } },
  3154. { ISD::BITREVERSE, MVT::v16i8, { 3 } },
  3155. { ISD::BITREVERSE, MVT::v32i8, { 3 } },
  3156. { ISD::BSWAP, MVT::v4i64, { 1 } },
  3157. { ISD::BSWAP, MVT::v8i32, { 1 } },
  3158. { ISD::BSWAP, MVT::v16i16, { 1 } },
  3159. { ISD::CTLZ, MVT::v2i64, { 7, 18, 24, 25 } },
  3160. { ISD::CTLZ, MVT::v4i64, { 14, 18, 24, 44 } },
  3161. { ISD::CTLZ, MVT::v4i32, { 5, 16, 19, 20 } },
  3162. { ISD::CTLZ, MVT::v8i32, { 10, 16, 19, 34 } },
  3163. { ISD::CTLZ, MVT::v8i16, { 4, 13, 14, 15 } },
  3164. { ISD::CTLZ, MVT::v16i16, { 6, 14, 14, 24 } },
  3165. { ISD::CTLZ, MVT::v16i8, { 3, 12, 9, 10 } },
  3166. { ISD::CTLZ, MVT::v32i8, { 4, 12, 9, 14 } },
  3167. { ISD::CTPOP, MVT::v2i64, { 3, 9, 10, 10 } },
  3168. { ISD::CTPOP, MVT::v4i64, { 4, 9, 10, 14 } },
  3169. { ISD::CTPOP, MVT::v4i32, { 7, 12, 14, 14 } },
  3170. { ISD::CTPOP, MVT::v8i32, { 7, 12, 14, 18 } },
  3171. { ISD::CTPOP, MVT::v8i16, { 3, 7, 11, 11 } },
  3172. { ISD::CTPOP, MVT::v16i16, { 6, 8, 11, 18 } },
  3173. { ISD::CTPOP, MVT::v16i8, { 2, 5, 8, 8 } },
  3174. { ISD::CTPOP, MVT::v32i8, { 3, 5, 8, 12 } },
  3175. { ISD::CTTZ, MVT::v2i64, { 4, 11, 13, 13 } },
  3176. { ISD::CTTZ, MVT::v4i64, { 5, 11, 13, 20 } },
  3177. { ISD::CTTZ, MVT::v4i32, { 7, 14, 17, 17 } },
  3178. { ISD::CTTZ, MVT::v8i32, { 7, 15, 17, 24 } },
  3179. { ISD::CTTZ, MVT::v8i16, { 4, 9, 14, 14 } },
  3180. { ISD::CTTZ, MVT::v16i16, { 6, 9, 14, 24 } },
  3181. { ISD::CTTZ, MVT::v16i8, { 3, 7, 11, 11 } },
  3182. { ISD::CTTZ, MVT::v32i8, { 5, 7, 11, 18 } },
  3183. { ISD::SADDSAT, MVT::v16i16, { 1 } },
  3184. { ISD::SADDSAT, MVT::v32i8, { 1 } },
  3185. { ISD::SMAX, MVT::v2i64, { 2, 7, 2, 3 } },
  3186. { ISD::SMAX, MVT::v4i64, { 2, 7, 2, 3 } },
  3187. { ISD::SMAX, MVT::v8i32, { 1, 1, 1, 2 } },
  3188. { ISD::SMAX, MVT::v16i16, { 1, 1, 1, 2 } },
  3189. { ISD::SMAX, MVT::v32i8, { 1, 1, 1, 2 } },
  3190. { ISD::SMIN, MVT::v2i64, { 2, 7, 2, 3 } },
  3191. { ISD::SMIN, MVT::v4i64, { 2, 7, 2, 3 } },
  3192. { ISD::SMIN, MVT::v8i32, { 1, 1, 1, 2 } },
  3193. { ISD::SMIN, MVT::v16i16, { 1, 1, 1, 2 } },
  3194. { ISD::SMIN, MVT::v32i8, { 1, 1, 1, 2 } },
  3195. { ISD::SSUBSAT, MVT::v16i16, { 1 } },
  3196. { ISD::SSUBSAT, MVT::v32i8, { 1 } },
  3197. { ISD::UADDSAT, MVT::v16i16, { 1 } },
  3198. { ISD::UADDSAT, MVT::v32i8, { 1 } },
  3199. { ISD::UADDSAT, MVT::v8i32, { 3 } }, // not + pminud + paddd
  3200. { ISD::UMAX, MVT::v2i64, { 2, 8, 5, 6 } },
  3201. { ISD::UMAX, MVT::v4i64, { 2, 8, 5, 8 } },
  3202. { ISD::UMAX, MVT::v8i32, { 1, 1, 1, 2 } },
  3203. { ISD::UMAX, MVT::v16i16, { 1, 1, 1, 2 } },
  3204. { ISD::UMAX, MVT::v32i8, { 1, 1, 1, 2 } },
  3205. { ISD::UMIN, MVT::v2i64, { 2, 8, 5, 6 } },
  3206. { ISD::UMIN, MVT::v4i64, { 2, 8, 5, 8 } },
  3207. { ISD::UMIN, MVT::v8i32, { 1, 1, 1, 2 } },
  3208. { ISD::UMIN, MVT::v16i16, { 1, 1, 1, 2 } },
  3209. { ISD::UMIN, MVT::v32i8, { 1, 1, 1, 2 } },
  3210. { ISD::USUBSAT, MVT::v16i16, { 1 } },
  3211. { ISD::USUBSAT, MVT::v32i8, { 1 } },
  3212. { ISD::USUBSAT, MVT::v8i32, { 2 } }, // pmaxud + psubd
  3213. { ISD::FMAXNUM, MVT::v8f32, { 3 } }, // MAXPS + CMPUNORDPS + BLENDVPS
  3214. { ISD::FMAXNUM, MVT::v4f64, { 3 } }, // MAXPD + CMPUNORDPD + BLENDVPD
  3215. { ISD::FSQRT, MVT::f32, { 7, 15, 1, 1 } }, // vsqrtss
  3216. { ISD::FSQRT, MVT::v4f32, { 7, 15, 1, 1 } }, // vsqrtps
  3217. { ISD::FSQRT, MVT::v8f32, { 14, 21, 1, 3 } }, // vsqrtps
  3218. { ISD::FSQRT, MVT::f64, { 14, 21, 1, 1 } }, // vsqrtsd
  3219. { ISD::FSQRT, MVT::v2f64, { 14, 21, 1, 1 } }, // vsqrtpd
  3220. { ISD::FSQRT, MVT::v4f64, { 28, 35, 1, 3 } }, // vsqrtpd
  3221. };
  3222. static const CostKindTblEntry AVX1CostTbl[] = {
  3223. { ISD::ABS, MVT::v4i64, { 6, 8, 6, 12 } }, // VBLENDVPD(X,VPSUBQ(0,X),X)
  3224. { ISD::ABS, MVT::v8i32, { 3, 6, 4, 5 } },
  3225. { ISD::ABS, MVT::v16i16, { 3, 6, 4, 5 } },
  3226. { ISD::ABS, MVT::v32i8, { 3, 6, 4, 5 } },
  3227. { ISD::BITREVERSE, MVT::v4i64, { 12 } }, // 2 x 128-bit Op + extract/insert
  3228. { ISD::BITREVERSE, MVT::v8i32, { 12 } }, // 2 x 128-bit Op + extract/insert
  3229. { ISD::BITREVERSE, MVT::v16i16, { 12 } }, // 2 x 128-bit Op + extract/insert
  3230. { ISD::BITREVERSE, MVT::v32i8, { 12 } }, // 2 x 128-bit Op + extract/insert
  3231. { ISD::BSWAP, MVT::v4i64, { 4 } },
  3232. { ISD::BSWAP, MVT::v8i32, { 4 } },
  3233. { ISD::BSWAP, MVT::v16i16, { 4 } },
  3234. { ISD::CTLZ, MVT::v4i64, { 29, 33, 49, 58 } }, // 2 x 128-bit Op + extract/insert
  3235. { ISD::CTLZ, MVT::v2i64, { 14, 24, 24, 28 } },
  3236. { ISD::CTLZ, MVT::v8i32, { 24, 28, 39, 48 } }, // 2 x 128-bit Op + extract/insert
  3237. { ISD::CTLZ, MVT::v4i32, { 12, 20, 19, 23 } },
  3238. { ISD::CTLZ, MVT::v16i16, { 19, 22, 29, 38 } }, // 2 x 128-bit Op + extract/insert
  3239. { ISD::CTLZ, MVT::v8i16, { 9, 16, 14, 18 } },
  3240. { ISD::CTLZ, MVT::v32i8, { 14, 15, 19, 28 } }, // 2 x 128-bit Op + extract/insert
  3241. { ISD::CTLZ, MVT::v16i8, { 7, 12, 9, 13 } },
  3242. { ISD::CTPOP, MVT::v4i64, { 14, 18, 19, 28 } }, // 2 x 128-bit Op + extract/insert
  3243. { ISD::CTPOP, MVT::v2i64, { 7, 14, 10, 14 } },
  3244. { ISD::CTPOP, MVT::v8i32, { 18, 24, 27, 36 } }, // 2 x 128-bit Op + extract/insert
  3245. { ISD::CTPOP, MVT::v4i32, { 9, 20, 14, 18 } },
  3246. { ISD::CTPOP, MVT::v16i16, { 16, 21, 22, 31 } }, // 2 x 128-bit Op + extract/insert
  3247. { ISD::CTPOP, MVT::v8i16, { 8, 18, 11, 15 } },
  3248. { ISD::CTPOP, MVT::v32i8, { 13, 15, 16, 25 } }, // 2 x 128-bit Op + extract/insert
  3249. { ISD::CTPOP, MVT::v16i8, { 6, 12, 8, 12 } },
  3250. { ISD::CTTZ, MVT::v4i64, { 17, 22, 24, 33 } }, // 2 x 128-bit Op + extract/insert
  3251. { ISD::CTTZ, MVT::v2i64, { 9, 19, 13, 17 } },
  3252. { ISD::CTTZ, MVT::v8i32, { 21, 27, 32, 41 } }, // 2 x 128-bit Op + extract/insert
  3253. { ISD::CTTZ, MVT::v4i32, { 11, 24, 17, 21 } },
  3254. { ISD::CTTZ, MVT::v16i16, { 18, 24, 27, 36 } }, // 2 x 128-bit Op + extract/insert
  3255. { ISD::CTTZ, MVT::v8i16, { 9, 21, 14, 18 } },
  3256. { ISD::CTTZ, MVT::v32i8, { 15, 18, 21, 30 } }, // 2 x 128-bit Op + extract/insert
  3257. { ISD::CTTZ, MVT::v16i8, { 8, 16, 11, 15 } },
  3258. { ISD::SADDSAT, MVT::v16i16, { 4 } }, // 2 x 128-bit Op + extract/insert
  3259. { ISD::SADDSAT, MVT::v32i8, { 4 } }, // 2 x 128-bit Op + extract/insert
  3260. { ISD::SMAX, MVT::v4i64, { 6, 9, 6, 12 } }, // 2 x 128-bit Op + extract/insert
  3261. { ISD::SMAX, MVT::v2i64, { 3, 7, 2, 4 } },
  3262. { ISD::SMAX, MVT::v8i32, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
  3263. { ISD::SMAX, MVT::v16i16, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
  3264. { ISD::SMAX, MVT::v32i8, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
  3265. { ISD::SMIN, MVT::v4i64, { 6, 9, 6, 12 } }, // 2 x 128-bit Op + extract/insert
  3266. { ISD::SMIN, MVT::v2i64, { 3, 7, 2, 3 } },
  3267. { ISD::SMIN, MVT::v8i32, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
  3268. { ISD::SMIN, MVT::v16i16, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
  3269. { ISD::SMIN, MVT::v32i8, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
  3270. { ISD::SSUBSAT, MVT::v16i16, { 4 } }, // 2 x 128-bit Op + extract/insert
  3271. { ISD::SSUBSAT, MVT::v32i8, { 4 } }, // 2 x 128-bit Op + extract/insert
  3272. { ISD::UADDSAT, MVT::v16i16, { 4 } }, // 2 x 128-bit Op + extract/insert
  3273. { ISD::UADDSAT, MVT::v32i8, { 4 } }, // 2 x 128-bit Op + extract/insert
  3274. { ISD::UADDSAT, MVT::v8i32, { 8 } }, // 2 x 128-bit Op + extract/insert
  3275. { ISD::UMAX, MVT::v4i64, { 9, 10, 11, 17 } }, // 2 x 128-bit Op + extract/insert
  3276. { ISD::UMAX, MVT::v2i64, { 4, 8, 5, 7 } },
  3277. { ISD::UMAX, MVT::v8i32, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
  3278. { ISD::UMAX, MVT::v16i16, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
  3279. { ISD::UMAX, MVT::v32i8, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
  3280. { ISD::UMIN, MVT::v4i64, { 9, 10, 11, 17 } }, // 2 x 128-bit Op + extract/insert
  3281. { ISD::UMIN, MVT::v2i64, { 4, 8, 5, 7 } },
  3282. { ISD::UMIN, MVT::v8i32, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
  3283. { ISD::UMIN, MVT::v16i16, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
  3284. { ISD::UMIN, MVT::v32i8, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
  3285. { ISD::USUBSAT, MVT::v16i16, { 4 } }, // 2 x 128-bit Op + extract/insert
  3286. { ISD::USUBSAT, MVT::v32i8, { 4 } }, // 2 x 128-bit Op + extract/insert
  3287. { ISD::USUBSAT, MVT::v8i32, { 6 } }, // 2 x 128-bit Op + extract/insert
  3288. { ISD::FMAXNUM, MVT::f32, { 3 } }, // MAXSS + CMPUNORDSS + BLENDVPS
  3289. { ISD::FMAXNUM, MVT::v4f32, { 3 } }, // MAXPS + CMPUNORDPS + BLENDVPS
  3290. { ISD::FMAXNUM, MVT::v8f32, { 5 } }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
  3291. { ISD::FMAXNUM, MVT::f64, { 3 } }, // MAXSD + CMPUNORDSD + BLENDVPD
  3292. { ISD::FMAXNUM, MVT::v2f64, { 3 } }, // MAXPD + CMPUNORDPD + BLENDVPD
  3293. { ISD::FMAXNUM, MVT::v4f64, { 5 } }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
  3294. { ISD::FSQRT, MVT::f32, { 21, 21, 1, 1 } }, // vsqrtss
  3295. { ISD::FSQRT, MVT::v4f32, { 21, 21, 1, 1 } }, // vsqrtps
  3296. { ISD::FSQRT, MVT::v8f32, { 42, 42, 1, 3 } }, // vsqrtps
  3297. { ISD::FSQRT, MVT::f64, { 27, 27, 1, 1 } }, // vsqrtsd
  3298. { ISD::FSQRT, MVT::v2f64, { 27, 27, 1, 1 } }, // vsqrtpd
  3299. { ISD::FSQRT, MVT::v4f64, { 54, 54, 1, 3 } }, // vsqrtpd
  3300. };
  3301. static const CostKindTblEntry GLMCostTbl[] = {
  3302. { ISD::FSQRT, MVT::f32, { 19, 20, 1, 1 } }, // sqrtss
  3303. { ISD::FSQRT, MVT::v4f32, { 37, 41, 1, 5 } }, // sqrtps
  3304. { ISD::FSQRT, MVT::f64, { 34, 35, 1, 1 } }, // sqrtsd
  3305. { ISD::FSQRT, MVT::v2f64, { 67, 71, 1, 5 } }, // sqrtpd
  3306. };
  3307. static const CostKindTblEntry SLMCostTbl[] = {
  3308. { ISD::FSQRT, MVT::f32, { 20, 20, 1, 1 } }, // sqrtss
  3309. { ISD::FSQRT, MVT::v4f32, { 40, 41, 1, 5 } }, // sqrtps
  3310. { ISD::FSQRT, MVT::f64, { 35, 35, 1, 1 } }, // sqrtsd
  3311. { ISD::FSQRT, MVT::v2f64, { 70, 71, 1, 5 } }, // sqrtpd
  3312. };
  3313. static const CostKindTblEntry SSE42CostTbl[] = {
  3314. { ISD::USUBSAT, MVT::v4i32, { 2 } }, // pmaxud + psubd
  3315. { ISD::UADDSAT, MVT::v4i32, { 3 } }, // not + pminud + paddd
  3316. { ISD::FSQRT, MVT::f32, { 18, 18, 1, 1 } }, // Nehalem from http://www.agner.org/
  3317. { ISD::FSQRT, MVT::v4f32, { 18, 18, 1, 1 } }, // Nehalem from http://www.agner.org/
  3318. };
  3319. static const CostKindTblEntry SSE41CostTbl[] = {
  3320. { ISD::ABS, MVT::v2i64, { 3, 4, 3, 5 } }, // BLENDVPD(X,PSUBQ(0,X),X)
  3321. { ISD::SMAX, MVT::v2i64, { 3, 7, 2, 3 } },
  3322. { ISD::SMAX, MVT::v4i32, { 1, 1, 1, 1 } },
  3323. { ISD::SMAX, MVT::v16i8, { 1, 1, 1, 1 } },
  3324. { ISD::SMIN, MVT::v2i64, { 3, 7, 2, 3 } },
  3325. { ISD::SMIN, MVT::v4i32, { 1, 1, 1, 1 } },
  3326. { ISD::SMIN, MVT::v16i8, { 1, 1, 1, 1 } },
  3327. { ISD::UMAX, MVT::v2i64, { 2, 11, 6, 7 } },
  3328. { ISD::UMAX, MVT::v4i32, { 1, 1, 1, 1 } },
  3329. { ISD::UMAX, MVT::v8i16, { 1, 1, 1, 1 } },
  3330. { ISD::UMIN, MVT::v2i64, { 2, 11, 6, 7 } },
  3331. { ISD::UMIN, MVT::v4i32, { 1, 1, 1, 1 } },
  3332. { ISD::UMIN, MVT::v8i16, { 1, 1, 1, 1 } },
  3333. };
  3334. static const CostKindTblEntry SSSE3CostTbl[] = {
  3335. { ISD::ABS, MVT::v4i32, { 1, 2, 1, 1 } },
  3336. { ISD::ABS, MVT::v8i16, { 1, 2, 1, 1 } },
  3337. { ISD::ABS, MVT::v16i8, { 1, 2, 1, 1 } },
  3338. { ISD::BITREVERSE, MVT::v2i64, { 5 } },
  3339. { ISD::BITREVERSE, MVT::v4i32, { 5 } },
  3340. { ISD::BITREVERSE, MVT::v8i16, { 5 } },
  3341. { ISD::BITREVERSE, MVT::v16i8, { 5 } },
  3342. { ISD::BSWAP, MVT::v2i64, { 1 } },
  3343. { ISD::BSWAP, MVT::v4i32, { 1 } },
  3344. { ISD::BSWAP, MVT::v8i16, { 1 } },
  3345. { ISD::CTLZ, MVT::v2i64, { 18, 28, 28, 35 } },
  3346. { ISD::CTLZ, MVT::v4i32, { 15, 20, 22, 28 } },
  3347. { ISD::CTLZ, MVT::v8i16, { 13, 17, 16, 22 } },
  3348. { ISD::CTLZ, MVT::v16i8, { 11, 15, 10, 16 } },
  3349. { ISD::CTPOP, MVT::v2i64, { 13, 19, 12, 18 } },
  3350. { ISD::CTPOP, MVT::v4i32, { 18, 24, 16, 22 } },
  3351. { ISD::CTPOP, MVT::v8i16, { 13, 18, 14, 20 } },
  3352. { ISD::CTPOP, MVT::v16i8, { 11, 12, 10, 16 } },
  3353. { ISD::CTTZ, MVT::v2i64, { 13, 25, 15, 22 } },
  3354. { ISD::CTTZ, MVT::v4i32, { 18, 26, 19, 25 } },
  3355. { ISD::CTTZ, MVT::v8i16, { 13, 20, 17, 23 } },
  3356. { ISD::CTTZ, MVT::v16i8, { 11, 16, 13, 19 } }
  3357. };
  3358. static const CostKindTblEntry SSE2CostTbl[] = {
  3359. { ISD::ABS, MVT::v2i64, { 3, 6, 5, 5 } },
  3360. { ISD::ABS, MVT::v4i32, { 1, 4, 4, 4 } },
  3361. { ISD::ABS, MVT::v8i16, { 1, 2, 3, 3 } },
  3362. { ISD::ABS, MVT::v16i8, { 1, 2, 3, 3 } },
  3363. { ISD::BITREVERSE, MVT::v2i64, { 29 } },
  3364. { ISD::BITREVERSE, MVT::v4i32, { 27 } },
  3365. { ISD::BITREVERSE, MVT::v8i16, { 27 } },
  3366. { ISD::BITREVERSE, MVT::v16i8, { 20 } },
  3367. { ISD::BSWAP, MVT::v2i64, { 7 } },
  3368. { ISD::BSWAP, MVT::v4i32, { 7 } },
  3369. { ISD::BSWAP, MVT::v8i16, { 7 } },
  3370. { ISD::CTLZ, MVT::v2i64, { 10, 45, 36, 38 } },
  3371. { ISD::CTLZ, MVT::v4i32, { 10, 45, 38, 40 } },
  3372. { ISD::CTLZ, MVT::v8i16, { 9, 38, 32, 34 } },
  3373. { ISD::CTLZ, MVT::v16i8, { 8, 39, 29, 32 } },
  3374. { ISD::CTPOP, MVT::v2i64, { 12, 26, 16, 18 } },
  3375. { ISD::CTPOP, MVT::v4i32, { 15, 29, 21, 23 } },
  3376. { ISD::CTPOP, MVT::v8i16, { 13, 25, 18, 20 } },
  3377. { ISD::CTPOP, MVT::v16i8, { 10, 21, 14, 16 } },
  3378. { ISD::CTTZ, MVT::v2i64, { 14, 28, 19, 21 } },
  3379. { ISD::CTTZ, MVT::v4i32, { 18, 31, 24, 26 } },
  3380. { ISD::CTTZ, MVT::v8i16, { 16, 27, 21, 23 } },
  3381. { ISD::CTTZ, MVT::v16i8, { 13, 23, 17, 19 } },
  3382. { ISD::SADDSAT, MVT::v8i16, { 1 } },
  3383. { ISD::SADDSAT, MVT::v16i8, { 1 } },
  3384. { ISD::SMAX, MVT::v2i64, { 4, 8, 15, 15 } },
  3385. { ISD::SMAX, MVT::v4i32, { 2, 4, 5, 5 } },
  3386. { ISD::SMAX, MVT::v8i16, { 1, 1, 1, 1 } },
  3387. { ISD::SMAX, MVT::v16i8, { 2, 4, 5, 5 } },
  3388. { ISD::SMIN, MVT::v2i64, { 4, 8, 15, 15 } },
  3389. { ISD::SMIN, MVT::v4i32, { 2, 4, 5, 5 } },
  3390. { ISD::SMIN, MVT::v8i16, { 1, 1, 1, 1 } },
  3391. { ISD::SMIN, MVT::v16i8, { 2, 4, 5, 5 } },
  3392. { ISD::SSUBSAT, MVT::v8i16, { 1 } },
  3393. { ISD::SSUBSAT, MVT::v16i8, { 1 } },
  3394. { ISD::UADDSAT, MVT::v8i16, { 1 } },
  3395. { ISD::UADDSAT, MVT::v16i8, { 1 } },
  3396. { ISD::UMAX, MVT::v2i64, { 4, 8, 15, 15 } },
  3397. { ISD::UMAX, MVT::v4i32, { 2, 5, 8, 8 } },
  3398. { ISD::UMAX, MVT::v8i16, { 1, 3, 3, 3 } },
  3399. { ISD::UMAX, MVT::v16i8, { 1, 1, 1, 1 } },
  3400. { ISD::UMIN, MVT::v2i64, { 4, 8, 15, 15 } },
  3401. { ISD::UMIN, MVT::v4i32, { 2, 5, 8, 8 } },
  3402. { ISD::UMIN, MVT::v8i16, { 1, 3, 3, 3 } },
  3403. { ISD::UMIN, MVT::v16i8, { 1, 1, 1, 1 } },
  3404. { ISD::USUBSAT, MVT::v8i16, { 1 } },
  3405. { ISD::USUBSAT, MVT::v16i8, { 1 } },
  3406. { ISD::FMAXNUM, MVT::f64, { 4 } },
  3407. { ISD::FMAXNUM, MVT::v2f64, { 4 } },
  3408. { ISD::FSQRT, MVT::f64, { 32, 32, 1, 1 } }, // Nehalem from http://www.agner.org/
  3409. { ISD::FSQRT, MVT::v2f64, { 32, 32, 1, 1 } }, // Nehalem from http://www.agner.org/
  3410. };
  3411. static const CostKindTblEntry SSE1CostTbl[] = {
  3412. { ISD::FMAXNUM, MVT::f32, { 4 } },
  3413. { ISD::FMAXNUM, MVT::v4f32, { 4 } },
  3414. { ISD::FSQRT, MVT::f32, { 28, 30, 1, 2 } }, // Pentium III from http://www.agner.org/
  3415. { ISD::FSQRT, MVT::v4f32, { 56, 56, 1, 2 } }, // Pentium III from http://www.agner.org/
  3416. };
  3417. static const CostKindTblEntry BMI64CostTbl[] = { // 64-bit targets
  3418. { ISD::CTTZ, MVT::i64, { 1 } },
  3419. };
  3420. static const CostKindTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
  3421. { ISD::CTTZ, MVT::i32, { 1 } },
  3422. { ISD::CTTZ, MVT::i16, { 1 } },
  3423. { ISD::CTTZ, MVT::i8, { 1 } },
  3424. };
  3425. static const CostKindTblEntry LZCNT64CostTbl[] = { // 64-bit targets
  3426. { ISD::CTLZ, MVT::i64, { 1 } },
  3427. };
  3428. static const CostKindTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
  3429. { ISD::CTLZ, MVT::i32, { 1 } },
  3430. { ISD::CTLZ, MVT::i16, { 2 } },
  3431. { ISD::CTLZ, MVT::i8, { 2 } },
  3432. };
  3433. static const CostKindTblEntry POPCNT64CostTbl[] = { // 64-bit targets
  3434. { ISD::CTPOP, MVT::i64, { 1, 1, 1, 1 } }, // popcnt
  3435. };
  3436. static const CostKindTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
  3437. { ISD::CTPOP, MVT::i32, { 1, 1, 1, 1 } }, // popcnt
  3438. { ISD::CTPOP, MVT::i16, { 1, 1, 2, 2 } }, // popcnt(zext())
  3439. { ISD::CTPOP, MVT::i8, { 1, 1, 2, 2 } }, // popcnt(zext())
  3440. };
  3441. static const CostKindTblEntry X64CostTbl[] = { // 64-bit targets
  3442. { ISD::ABS, MVT::i64, { 1, 2, 3, 4 } }, // SUB+CMOV
  3443. { ISD::BITREVERSE, MVT::i64, { 14 } },
  3444. { ISD::BSWAP, MVT::i64, { 1 } },
  3445. { ISD::CTLZ, MVT::i64, { 4 } }, // BSR+XOR or BSR+XOR+CMOV
  3446. { ISD::CTLZ_ZERO_UNDEF, MVT::i64,{ 1, 1, 1, 1 } }, // BSR+XOR
  3447. { ISD::CTTZ, MVT::i64, { 3 } }, // TEST+BSF+CMOV/BRANCH
  3448. { ISD::CTTZ_ZERO_UNDEF, MVT::i64,{ 1, 1, 1, 1 } }, // BSR
  3449. { ISD::CTPOP, MVT::i64, { 10, 6, 19, 19 } },
  3450. { ISD::ROTL, MVT::i64, { 2, 3, 1, 3 } },
  3451. { ISD::ROTR, MVT::i64, { 2, 3, 1, 3 } },
  3452. { ISD::FSHL, MVT::i64, { 4, 4, 1, 4 } },
  3453. { ISD::SMAX, MVT::i64, { 1, 3, 2, 3 } },
  3454. { ISD::SMIN, MVT::i64, { 1, 3, 2, 3 } },
  3455. { ISD::UMAX, MVT::i64, { 1, 3, 2, 3 } },
  3456. { ISD::UMIN, MVT::i64, { 1, 3, 2, 3 } },
  3457. { ISD::SADDO, MVT::i64, { 1 } },
  3458. { ISD::UADDO, MVT::i64, { 1 } },
  3459. { ISD::UMULO, MVT::i64, { 2 } }, // mulq + seto
  3460. };
  3461. static const CostKindTblEntry X86CostTbl[] = { // 32 or 64-bit targets
  3462. { ISD::ABS, MVT::i32, { 1, 2, 3, 4 } }, // SUB+XOR+SRA or SUB+CMOV
  3463. { ISD::ABS, MVT::i16, { 2, 2, 3, 4 } }, // SUB+XOR+SRA or SUB+CMOV
  3464. { ISD::ABS, MVT::i8, { 2, 4, 4, 4 } }, // SUB+XOR+SRA
  3465. { ISD::BITREVERSE, MVT::i32, { 14 } },
  3466. { ISD::BITREVERSE, MVT::i16, { 14 } },
  3467. { ISD::BITREVERSE, MVT::i8, { 11 } },
  3468. { ISD::BSWAP, MVT::i32, { 1 } },
  3469. { ISD::BSWAP, MVT::i16, { 1 } }, // ROL
  3470. { ISD::CTLZ, MVT::i32, { 4 } }, // BSR+XOR or BSR+XOR+CMOV
  3471. { ISD::CTLZ, MVT::i16, { 4 } }, // BSR+XOR or BSR+XOR+CMOV
  3472. { ISD::CTLZ, MVT::i8, { 4 } }, // BSR+XOR or BSR+XOR+CMOV
  3473. { ISD::CTLZ_ZERO_UNDEF, MVT::i32,{ 1, 1, 1, 1 } }, // BSR+XOR
  3474. { ISD::CTLZ_ZERO_UNDEF, MVT::i16,{ 2, 2, 3, 3 } }, // BSR+XOR
  3475. { ISD::CTLZ_ZERO_UNDEF, MVT::i8, { 2, 2, 3, 3 } }, // BSR+XOR
  3476. { ISD::CTTZ, MVT::i32, { 3 } }, // TEST+BSF+CMOV/BRANCH
  3477. { ISD::CTTZ, MVT::i16, { 3 } }, // TEST+BSF+CMOV/BRANCH
  3478. { ISD::CTTZ, MVT::i8, { 3 } }, // TEST+BSF+CMOV/BRANCH
  3479. { ISD::CTTZ_ZERO_UNDEF, MVT::i32,{ 1, 1, 1, 1 } }, // BSF
  3480. { ISD::CTTZ_ZERO_UNDEF, MVT::i16,{ 2, 2, 1, 1 } }, // BSF
  3481. { ISD::CTTZ_ZERO_UNDEF, MVT::i8, { 2, 2, 1, 1 } }, // BSF
  3482. { ISD::CTPOP, MVT::i32, { 8, 7, 15, 15 } },
  3483. { ISD::CTPOP, MVT::i16, { 9, 8, 17, 17 } },
  3484. { ISD::CTPOP, MVT::i8, { 7, 6, 13, 13 } },
  3485. { ISD::ROTL, MVT::i32, { 2, 3, 1, 3 } },
  3486. { ISD::ROTL, MVT::i16, { 2, 3, 1, 3 } },
  3487. { ISD::ROTL, MVT::i8, { 2, 3, 1, 3 } },
  3488. { ISD::ROTR, MVT::i32, { 2, 3, 1, 3 } },
  3489. { ISD::ROTR, MVT::i16, { 2, 3, 1, 3 } },
  3490. { ISD::ROTR, MVT::i8, { 2, 3, 1, 3 } },
  3491. { ISD::FSHL, MVT::i32, { 4, 4, 1, 4 } },
  3492. { ISD::FSHL, MVT::i16, { 4, 4, 2, 5 } },
  3493. { ISD::FSHL, MVT::i8, { 4, 4, 2, 5 } },
  3494. { ISD::SMAX, MVT::i32, { 1, 2, 2, 3 } },
  3495. { ISD::SMAX, MVT::i16, { 1, 4, 2, 4 } },
  3496. { ISD::SMAX, MVT::i8, { 1, 4, 2, 4 } },
  3497. { ISD::SMIN, MVT::i32, { 1, 2, 2, 3 } },
  3498. { ISD::SMIN, MVT::i16, { 1, 4, 2, 4 } },
  3499. { ISD::SMIN, MVT::i8, { 1, 4, 2, 4 } },
  3500. { ISD::UMAX, MVT::i32, { 1, 2, 2, 3 } },
  3501. { ISD::UMAX, MVT::i16, { 1, 4, 2, 4 } },
  3502. { ISD::UMAX, MVT::i8, { 1, 4, 2, 4 } },
  3503. { ISD::UMIN, MVT::i32, { 1, 2, 2, 3 } },
  3504. { ISD::UMIN, MVT::i16, { 1, 4, 2, 4 } },
  3505. { ISD::UMIN, MVT::i8, { 1, 4, 2, 4 } },
  3506. { ISD::SADDO, MVT::i32, { 1 } },
  3507. { ISD::SADDO, MVT::i16, { 1 } },
  3508. { ISD::SADDO, MVT::i8, { 1 } },
  3509. { ISD::UADDO, MVT::i32, { 1 } },
  3510. { ISD::UADDO, MVT::i16, { 1 } },
  3511. { ISD::UADDO, MVT::i8, { 1 } },
  3512. { ISD::UMULO, MVT::i32, { 2 } }, // mul + seto
  3513. { ISD::UMULO, MVT::i16, { 2 } },
  3514. { ISD::UMULO, MVT::i8, { 2 } },
  3515. };
  3516. Type *RetTy = ICA.getReturnType();
  3517. Type *OpTy = RetTy;
  3518. Intrinsic::ID IID = ICA.getID();
  3519. unsigned ISD = ISD::DELETED_NODE;
  3520. switch (IID) {
  3521. default:
  3522. break;
  3523. case Intrinsic::abs:
  3524. ISD = ISD::ABS;
  3525. break;
  3526. case Intrinsic::bitreverse:
  3527. ISD = ISD::BITREVERSE;
  3528. break;
  3529. case Intrinsic::bswap:
  3530. ISD = ISD::BSWAP;
  3531. break;
  3532. case Intrinsic::ctlz:
  3533. ISD = ISD::CTLZ;
  3534. break;
  3535. case Intrinsic::ctpop:
  3536. ISD = ISD::CTPOP;
  3537. break;
  3538. case Intrinsic::cttz:
  3539. ISD = ISD::CTTZ;
  3540. break;
  3541. case Intrinsic::fshl:
  3542. ISD = ISD::FSHL;
  3543. if (!ICA.isTypeBasedOnly()) {
  3544. const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
  3545. if (Args[0] == Args[1])
  3546. ISD = ISD::ROTL;
  3547. }
  3548. break;
  3549. case Intrinsic::fshr:
  3550. // FSHR has same costs so don't duplicate.
  3551. ISD = ISD::FSHL;
  3552. if (!ICA.isTypeBasedOnly()) {
  3553. const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
  3554. if (Args[0] == Args[1])
  3555. ISD = ISD::ROTR;
  3556. }
  3557. break;
  3558. case Intrinsic::maxnum:
  3559. case Intrinsic::minnum:
  3560. // FMINNUM has same costs so don't duplicate.
  3561. ISD = ISD::FMAXNUM;
  3562. break;
  3563. case Intrinsic::sadd_sat:
  3564. ISD = ISD::SADDSAT;
  3565. break;
  3566. case Intrinsic::smax:
  3567. ISD = ISD::SMAX;
  3568. break;
  3569. case Intrinsic::smin:
  3570. ISD = ISD::SMIN;
  3571. break;
  3572. case Intrinsic::ssub_sat:
  3573. ISD = ISD::SSUBSAT;
  3574. break;
  3575. case Intrinsic::uadd_sat:
  3576. ISD = ISD::UADDSAT;
  3577. break;
  3578. case Intrinsic::umax:
  3579. ISD = ISD::UMAX;
  3580. break;
  3581. case Intrinsic::umin:
  3582. ISD = ISD::UMIN;
  3583. break;
  3584. case Intrinsic::usub_sat:
  3585. ISD = ISD::USUBSAT;
  3586. break;
  3587. case Intrinsic::sqrt:
  3588. ISD = ISD::FSQRT;
  3589. break;
  3590. case Intrinsic::sadd_with_overflow:
  3591. case Intrinsic::ssub_with_overflow:
  3592. // SSUBO has same costs so don't duplicate.
  3593. ISD = ISD::SADDO;
  3594. OpTy = RetTy->getContainedType(0);
  3595. break;
  3596. case Intrinsic::uadd_with_overflow:
  3597. case Intrinsic::usub_with_overflow:
  3598. // USUBO has same costs so don't duplicate.
  3599. ISD = ISD::UADDO;
  3600. OpTy = RetTy->getContainedType(0);
  3601. break;
  3602. case Intrinsic::umul_with_overflow:
  3603. case Intrinsic::smul_with_overflow:
  3604. // SMULO has same costs so don't duplicate.
  3605. ISD = ISD::UMULO;
  3606. OpTy = RetTy->getContainedType(0);
  3607. break;
  3608. }
  3609. if (ISD != ISD::DELETED_NODE) {
  3610. // Legalize the type.
  3611. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(OpTy);
  3612. MVT MTy = LT.second;
  3613. // Attempt to lookup cost.
  3614. if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() &&
  3615. MTy.isVector()) {
  3616. // With PSHUFB the code is very similar for all types. If we have integer
  3617. // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
  3618. // we also need a PSHUFB.
  3619. unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2;
  3620. // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
  3621. // instructions. We also need an extract and an insert.
  3622. if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) ||
  3623. (ST->hasBWI() && MTy.is512BitVector())))
  3624. Cost = Cost * 2 + 2;
  3625. return LT.first * Cost;
  3626. }
  3627. // Without BMI/LZCNT see if we're only looking for a *_ZERO_UNDEF cost.
  3628. if (((ISD == ISD::CTTZ && !ST->hasBMI()) ||
  3629. (ISD == ISD::CTLZ && !ST->hasLZCNT())) &&
  3630. !MTy.isVector() && !ICA.isTypeBasedOnly()) {
  3631. const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
  3632. if (auto *Cst = dyn_cast<ConstantInt>(Args[1]))
  3633. if (Cst->isAllOnesValue())
  3634. ISD = ISD == ISD::CTTZ ? ISD::CTTZ_ZERO_UNDEF : ISD::CTLZ_ZERO_UNDEF;
  3635. }
  3636. // FSQRT is a single instruction.
  3637. if (ISD == ISD::FSQRT && CostKind == TTI::TCK_CodeSize)
  3638. return LT.first;
  3639. auto adjustTableCost = [](int ISD, unsigned Cost,
  3640. InstructionCost LegalizationCost,
  3641. FastMathFlags FMF) {
  3642. // If there are no NANs to deal with, then these are reduced to a
  3643. // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we
  3644. // assume is used in the non-fast case.
  3645. if (ISD == ISD::FMAXNUM || ISD == ISD::FMINNUM) {
  3646. if (FMF.noNaNs())
  3647. return LegalizationCost * 1;
  3648. }
  3649. return LegalizationCost * (int)Cost;
  3650. };
  3651. if (ST->useGLMDivSqrtCosts())
  3652. if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
  3653. if (auto KindCost = Entry->Cost[CostKind])
  3654. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3655. ICA.getFlags());
  3656. if (ST->useSLMArithCosts())
  3657. if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
  3658. if (auto KindCost = Entry->Cost[CostKind])
  3659. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3660. ICA.getFlags());
  3661. if (ST->hasVBMI2())
  3662. if (const auto *Entry = CostTableLookup(AVX512VBMI2CostTbl, ISD, MTy))
  3663. if (auto KindCost = Entry->Cost[CostKind])
  3664. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3665. ICA.getFlags());
  3666. if (ST->hasBITALG())
  3667. if (const auto *Entry = CostTableLookup(AVX512BITALGCostTbl, ISD, MTy))
  3668. if (auto KindCost = Entry->Cost[CostKind])
  3669. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3670. ICA.getFlags());
  3671. if (ST->hasVPOPCNTDQ())
  3672. if (const auto *Entry = CostTableLookup(AVX512VPOPCNTDQCostTbl, ISD, MTy))
  3673. if (auto KindCost = Entry->Cost[CostKind])
  3674. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3675. ICA.getFlags());
  3676. if (ST->hasCDI())
  3677. if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
  3678. if (auto KindCost = Entry->Cost[CostKind])
  3679. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3680. ICA.getFlags());
  3681. if (ST->hasBWI())
  3682. if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
  3683. if (auto KindCost = Entry->Cost[CostKind])
  3684. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3685. ICA.getFlags());
  3686. if (ST->hasAVX512())
  3687. if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
  3688. if (auto KindCost = Entry->Cost[CostKind])
  3689. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3690. ICA.getFlags());
  3691. if (ST->hasXOP())
  3692. if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
  3693. if (auto KindCost = Entry->Cost[CostKind])
  3694. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3695. ICA.getFlags());
  3696. if (ST->hasAVX2())
  3697. if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
  3698. if (auto KindCost = Entry->Cost[CostKind])
  3699. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3700. ICA.getFlags());
  3701. if (ST->hasAVX())
  3702. if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
  3703. if (auto KindCost = Entry->Cost[CostKind])
  3704. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3705. ICA.getFlags());
  3706. if (ST->hasSSE42())
  3707. if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
  3708. if (auto KindCost = Entry->Cost[CostKind])
  3709. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3710. ICA.getFlags());
  3711. if (ST->hasSSE41())
  3712. if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
  3713. if (auto KindCost = Entry->Cost[CostKind])
  3714. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3715. ICA.getFlags());
  3716. if (ST->hasSSSE3())
  3717. if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
  3718. if (auto KindCost = Entry->Cost[CostKind])
  3719. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3720. ICA.getFlags());
  3721. if (ST->hasSSE2())
  3722. if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
  3723. if (auto KindCost = Entry->Cost[CostKind])
  3724. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3725. ICA.getFlags());
  3726. if (ST->hasSSE1())
  3727. if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
  3728. if (auto KindCost = Entry->Cost[CostKind])
  3729. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3730. ICA.getFlags());
  3731. if (ST->hasBMI()) {
  3732. if (ST->is64Bit())
  3733. if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
  3734. if (auto KindCost = Entry->Cost[CostKind])
  3735. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3736. ICA.getFlags());
  3737. if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
  3738. if (auto KindCost = Entry->Cost[CostKind])
  3739. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3740. ICA.getFlags());
  3741. }
  3742. if (ST->hasLZCNT()) {
  3743. if (ST->is64Bit())
  3744. if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
  3745. if (auto KindCost = Entry->Cost[CostKind])
  3746. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3747. ICA.getFlags());
  3748. if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
  3749. if (auto KindCost = Entry->Cost[CostKind])
  3750. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3751. ICA.getFlags());
  3752. }
  3753. if (ST->hasPOPCNT()) {
  3754. if (ST->is64Bit())
  3755. if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
  3756. if (auto KindCost = Entry->Cost[CostKind])
  3757. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3758. ICA.getFlags());
  3759. if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
  3760. if (auto KindCost = Entry->Cost[CostKind])
  3761. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3762. ICA.getFlags());
  3763. }
  3764. if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) {
  3765. if (const Instruction *II = ICA.getInst()) {
  3766. if (II->hasOneUse() && isa<StoreInst>(II->user_back()))
  3767. return TTI::TCC_Free;
  3768. if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) {
  3769. if (LI->hasOneUse())
  3770. return TTI::TCC_Free;
  3771. }
  3772. }
  3773. }
  3774. if (ST->is64Bit())
  3775. if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
  3776. if (auto KindCost = Entry->Cost[CostKind])
  3777. return adjustTableCost(Entry->ISD, *KindCost, LT.first,
  3778. ICA.getFlags());
  3779. if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
  3780. if (auto KindCost = Entry->Cost[CostKind])
  3781. return adjustTableCost(Entry->ISD, *KindCost, LT.first, ICA.getFlags());
  3782. }
  3783. return BaseT::getIntrinsicInstrCost(ICA, CostKind);
  3784. }
  3785. InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
  3786. TTI::TargetCostKind CostKind,
  3787. unsigned Index, Value *Op0,
  3788. Value *Op1) {
  3789. static const CostTblEntry SLMCostTbl[] = {
  3790. { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 },
  3791. { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 },
  3792. { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 },
  3793. { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 }
  3794. };
  3795. assert(Val->isVectorTy() && "This must be a vector type");
  3796. Type *ScalarType = Val->getScalarType();
  3797. InstructionCost RegisterFileMoveCost = 0;
  3798. // Non-immediate extraction/insertion can be handled as a sequence of
  3799. // aliased loads+stores via the stack.
  3800. if (Index == -1U && (Opcode == Instruction::ExtractElement ||
  3801. Opcode == Instruction::InsertElement)) {
  3802. // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns:
  3803. // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
  3804. // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling.
  3805. assert(isa<FixedVectorType>(Val) && "Fixed vector type expected");
  3806. Align VecAlign = DL.getPrefTypeAlign(Val);
  3807. Align SclAlign = DL.getPrefTypeAlign(ScalarType);
  3808. // Extract - store vector to stack, load scalar.
  3809. if (Opcode == Instruction::ExtractElement) {
  3810. return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, CostKind) +
  3811. getMemoryOpCost(Instruction::Load, ScalarType, SclAlign, 0,
  3812. CostKind);
  3813. }
  3814. // Insert - store vector to stack, store scalar, load vector.
  3815. if (Opcode == Instruction::InsertElement) {
  3816. return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, CostKind) +
  3817. getMemoryOpCost(Instruction::Store, ScalarType, SclAlign, 0,
  3818. CostKind) +
  3819. getMemoryOpCost(Instruction::Load, Val, VecAlign, 0, CostKind);
  3820. }
  3821. }
  3822. if (Index != -1U && (Opcode == Instruction::ExtractElement ||
  3823. Opcode == Instruction::InsertElement)) {
  3824. // Extraction of vXi1 elements are now efficiently handled by MOVMSK.
  3825. if (Opcode == Instruction::ExtractElement &&
  3826. ScalarType->getScalarSizeInBits() == 1 &&
  3827. cast<FixedVectorType>(Val)->getNumElements() > 1)
  3828. return 1;
  3829. // Legalize the type.
  3830. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Val);
  3831. // This type is legalized to a scalar type.
  3832. if (!LT.second.isVector())
  3833. return 0;
  3834. // The type may be split. Normalize the index to the new type.
  3835. unsigned SizeInBits = LT.second.getSizeInBits();
  3836. unsigned NumElts = LT.second.getVectorNumElements();
  3837. unsigned SubNumElts = NumElts;
  3838. Index = Index % NumElts;
  3839. // For >128-bit vectors, we need to extract higher 128-bit subvectors.
  3840. // For inserts, we also need to insert the subvector back.
  3841. if (SizeInBits > 128) {
  3842. assert((SizeInBits % 128) == 0 && "Illegal vector");
  3843. unsigned NumSubVecs = SizeInBits / 128;
  3844. SubNumElts = NumElts / NumSubVecs;
  3845. if (SubNumElts <= Index) {
  3846. RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1);
  3847. Index %= SubNumElts;
  3848. }
  3849. }
  3850. MVT MScalarTy = LT.second.getScalarType();
  3851. auto IsCheapPInsrPExtrInsertPS = [&]() {
  3852. // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
  3853. // Also, assume insertps is relatively cheap on all >= SSE41 targets.
  3854. return (MScalarTy == MVT::i16 && ST->hasSSE2()) ||
  3855. (MScalarTy.isInteger() && ST->hasSSE41()) ||
  3856. (MScalarTy == MVT::f32 && ST->hasSSE41() &&
  3857. Opcode == Instruction::InsertElement);
  3858. };
  3859. if (Index == 0) {
  3860. // Floating point scalars are already located in index #0.
  3861. // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
  3862. // true for all.
  3863. if (ScalarType->isFloatingPointTy())
  3864. return RegisterFileMoveCost;
  3865. if (Opcode == Instruction::InsertElement &&
  3866. isa_and_nonnull<UndefValue>(Op0)) {
  3867. // Consider the gather cost to be cheap.
  3868. if (isa_and_nonnull<LoadInst>(Op1))
  3869. return RegisterFileMoveCost;
  3870. if (!IsCheapPInsrPExtrInsertPS()) {
  3871. // mov constant-to-GPR + movd/movq GPR -> XMM.
  3872. if (isa_and_nonnull<Constant>(Op1) && Op1->getType()->isIntegerTy())
  3873. return 2 + RegisterFileMoveCost;
  3874. // Assume movd/movq GPR -> XMM is relatively cheap on all targets.
  3875. return 1 + RegisterFileMoveCost;
  3876. }
  3877. }
  3878. // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
  3879. if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
  3880. return 1 + RegisterFileMoveCost;
  3881. }
  3882. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  3883. assert(ISD && "Unexpected vector opcode");
  3884. if (ST->useSLMArithCosts())
  3885. if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
  3886. return Entry->Cost + RegisterFileMoveCost;
  3887. // Consider cheap cases.
  3888. if (IsCheapPInsrPExtrInsertPS())
  3889. return 1 + RegisterFileMoveCost;
  3890. // For extractions we just need to shuffle the element to index 0, which
  3891. // should be very cheap (assume cost = 1). For insertions we need to shuffle
  3892. // the elements to its destination. In both cases we must handle the
  3893. // subvector move(s).
  3894. // If the vector type is already less than 128-bits then don't reduce it.
  3895. // TODO: Under what circumstances should we shuffle using the full width?
  3896. InstructionCost ShuffleCost = 1;
  3897. if (Opcode == Instruction::InsertElement) {
  3898. auto *SubTy = cast<VectorType>(Val);
  3899. EVT VT = TLI->getValueType(DL, Val);
  3900. if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
  3901. SubTy = FixedVectorType::get(ScalarType, SubNumElts);
  3902. ShuffleCost = getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, std::nullopt,
  3903. CostKind, 0, SubTy);
  3904. }
  3905. int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
  3906. return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
  3907. }
  3908. // Add to the base cost if we know that the extracted element of a vector is
  3909. // destined to be moved to and used in the integer register file.
  3910. if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
  3911. RegisterFileMoveCost += 1;
  3912. return BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1) +
  3913. RegisterFileMoveCost;
  3914. }
  3915. InstructionCost
  3916. X86TTIImpl::getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
  3917. bool Insert, bool Extract,
  3918. TTI::TargetCostKind CostKind) {
  3919. assert(DemandedElts.getBitWidth() ==
  3920. cast<FixedVectorType>(Ty)->getNumElements() &&
  3921. "Vector size mismatch");
  3922. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
  3923. MVT MScalarTy = LT.second.getScalarType();
  3924. unsigned LegalVectorBitWidth = LT.second.getSizeInBits();
  3925. InstructionCost Cost = 0;
  3926. constexpr unsigned LaneBitWidth = 128;
  3927. assert((LegalVectorBitWidth < LaneBitWidth ||
  3928. (LegalVectorBitWidth % LaneBitWidth) == 0) &&
  3929. "Illegal vector");
  3930. const int NumLegalVectors = *LT.first.getValue();
  3931. assert(NumLegalVectors >= 0 && "Negative cost!");
  3932. // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
  3933. // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
  3934. if (Insert) {
  3935. if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
  3936. (MScalarTy.isInteger() && ST->hasSSE41()) ||
  3937. (MScalarTy == MVT::f32 && ST->hasSSE41())) {
  3938. // For types we can insert directly, insertion into 128-bit sub vectors is
  3939. // cheap, followed by a cheap chain of concatenations.
  3940. if (LegalVectorBitWidth <= LaneBitWidth) {
  3941. Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert,
  3942. /*Extract*/ false, CostKind);
  3943. } else {
  3944. // In each 128-lane, if at least one index is demanded but not all
  3945. // indices are demanded and this 128-lane is not the first 128-lane of
  3946. // the legalized-vector, then this 128-lane needs a extracti128; If in
  3947. // each 128-lane, there is at least one demanded index, this 128-lane
  3948. // needs a inserti128.
  3949. // The following cases will help you build a better understanding:
  3950. // Assume we insert several elements into a v8i32 vector in avx2,
  3951. // Case#1: inserting into 1th index needs vpinsrd + inserti128.
  3952. // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
  3953. // inserti128.
  3954. // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
  3955. assert((LegalVectorBitWidth % LaneBitWidth) == 0 && "Illegal vector");
  3956. unsigned NumLegalLanes = LegalVectorBitWidth / LaneBitWidth;
  3957. unsigned NumLanesTotal = NumLegalLanes * NumLegalVectors;
  3958. unsigned NumLegalElts =
  3959. LT.second.getVectorNumElements() * NumLegalVectors;
  3960. assert(NumLegalElts >= DemandedElts.getBitWidth() &&
  3961. "Vector has been legalized to smaller element count");
  3962. assert((NumLegalElts % NumLanesTotal) == 0 &&
  3963. "Unexpected elts per lane");
  3964. unsigned NumEltsPerLane = NumLegalElts / NumLanesTotal;
  3965. APInt WidenedDemandedElts = DemandedElts.zext(NumLegalElts);
  3966. auto *LaneTy =
  3967. FixedVectorType::get(Ty->getElementType(), NumEltsPerLane);
  3968. for (unsigned I = 0; I != NumLanesTotal; ++I) {
  3969. APInt LaneEltMask = WidenedDemandedElts.extractBits(
  3970. NumEltsPerLane, NumEltsPerLane * I);
  3971. if (LaneEltMask.isNullValue())
  3972. continue;
  3973. // FIXME: we don't need to extract if all non-demanded elements
  3974. // are legalization-inserted padding.
  3975. if (!LaneEltMask.isAllOnes())
  3976. Cost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
  3977. CostKind, I * NumEltsPerLane, LaneTy);
  3978. Cost += BaseT::getScalarizationOverhead(LaneTy, LaneEltMask, Insert,
  3979. /*Extract*/ false, CostKind);
  3980. }
  3981. APInt AffectedLanes =
  3982. APIntOps::ScaleBitMask(WidenedDemandedElts, NumLanesTotal);
  3983. APInt FullyAffectedLegalVectors = APIntOps::ScaleBitMask(
  3984. AffectedLanes, NumLegalVectors, /*MatchAllBits=*/true);
  3985. for (int LegalVec = 0; LegalVec != NumLegalVectors; ++LegalVec) {
  3986. for (unsigned Lane = 0; Lane != NumLegalLanes; ++Lane) {
  3987. unsigned I = NumLegalLanes * LegalVec + Lane;
  3988. // No need to insert unaffected lane; or lane 0 of each legal vector
  3989. // iff ALL lanes of that vector were affected and will be inserted.
  3990. if (!AffectedLanes[I] ||
  3991. (Lane == 0 && FullyAffectedLegalVectors[LegalVec]))
  3992. continue;
  3993. Cost += getShuffleCost(TTI::SK_InsertSubvector, Ty, std::nullopt,
  3994. CostKind, I * NumEltsPerLane, LaneTy);
  3995. }
  3996. }
  3997. }
  3998. } else if (LT.second.isVector()) {
  3999. // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
  4000. // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
  4001. // series of UNPCK followed by CONCAT_VECTORS - all of these can be
  4002. // considered cheap.
  4003. if (Ty->isIntOrIntVectorTy())
  4004. Cost += DemandedElts.countPopulation();
  4005. // Get the smaller of the legalized or original pow2-extended number of
  4006. // vector elements, which represents the number of unpacks we'll end up
  4007. // performing.
  4008. unsigned NumElts = LT.second.getVectorNumElements();
  4009. unsigned Pow2Elts =
  4010. PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
  4011. Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
  4012. }
  4013. }
  4014. if (Extract) {
  4015. // vXi1 can be efficiently extracted with MOVMSK.
  4016. // TODO: AVX512 predicate mask handling.
  4017. // NOTE: This doesn't work well for roundtrip scalarization.
  4018. if (!Insert && Ty->getScalarSizeInBits() == 1 && !ST->hasAVX512()) {
  4019. unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
  4020. unsigned MaxElts = ST->hasAVX2() ? 32 : 16;
  4021. unsigned MOVMSKCost = (NumElts + MaxElts - 1) / MaxElts;
  4022. return MOVMSKCost;
  4023. }
  4024. if (LT.second.isVector()) {
  4025. unsigned NumLegalElts =
  4026. LT.second.getVectorNumElements() * NumLegalVectors;
  4027. assert(NumLegalElts >= DemandedElts.getBitWidth() &&
  4028. "Vector has been legalized to smaller element count");
  4029. // If we're extracting elements from a 128-bit subvector lane,
  4030. // we only need to extract each lane once, not for every element.
  4031. if (LegalVectorBitWidth > LaneBitWidth) {
  4032. unsigned NumLegalLanes = LegalVectorBitWidth / LaneBitWidth;
  4033. unsigned NumLanesTotal = NumLegalLanes * NumLegalVectors;
  4034. assert((NumLegalElts % NumLanesTotal) == 0 &&
  4035. "Unexpected elts per lane");
  4036. unsigned NumEltsPerLane = NumLegalElts / NumLanesTotal;
  4037. // Add cost for each demanded 128-bit subvector extraction.
  4038. // Luckily this is a lot easier than for insertion.
  4039. APInt WidenedDemandedElts = DemandedElts.zext(NumLegalElts);
  4040. auto *LaneTy =
  4041. FixedVectorType::get(Ty->getElementType(), NumEltsPerLane);
  4042. for (unsigned I = 0; I != NumLanesTotal; ++I) {
  4043. APInt LaneEltMask = WidenedDemandedElts.extractBits(
  4044. NumEltsPerLane, I * NumEltsPerLane);
  4045. if (LaneEltMask.isNullValue())
  4046. continue;
  4047. Cost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
  4048. CostKind, I * NumEltsPerLane, LaneTy);
  4049. Cost += BaseT::getScalarizationOverhead(
  4050. LaneTy, LaneEltMask, /*Insert*/ false, Extract, CostKind);
  4051. }
  4052. return Cost;
  4053. }
  4054. }
  4055. // Fallback to default extraction.
  4056. Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ false,
  4057. Extract, CostKind);
  4058. }
  4059. return Cost;
  4060. }
  4061. InstructionCost
  4062. X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
  4063. int VF, const APInt &DemandedDstElts,
  4064. TTI::TargetCostKind CostKind) {
  4065. const unsigned EltTyBits = DL.getTypeSizeInBits(EltTy);
  4066. // We don't differentiate element types here, only element bit width.
  4067. EltTy = IntegerType::getIntNTy(EltTy->getContext(), EltTyBits);
  4068. auto bailout = [&]() {
  4069. return BaseT::getReplicationShuffleCost(EltTy, ReplicationFactor, VF,
  4070. DemandedDstElts, CostKind);
  4071. };
  4072. // For now, only deal with AVX512 cases.
  4073. if (!ST->hasAVX512())
  4074. return bailout();
  4075. // Do we have a native shuffle for this element type, or should we promote?
  4076. unsigned PromEltTyBits = EltTyBits;
  4077. switch (EltTyBits) {
  4078. case 32:
  4079. case 64:
  4080. break; // AVX512F.
  4081. case 16:
  4082. if (!ST->hasBWI())
  4083. PromEltTyBits = 32; // promote to i32, AVX512F.
  4084. break; // AVX512BW
  4085. case 8:
  4086. if (!ST->hasVBMI())
  4087. PromEltTyBits = 32; // promote to i32, AVX512F.
  4088. break; // AVX512VBMI
  4089. case 1:
  4090. // There is no support for shuffling i1 elements. We *must* promote.
  4091. if (ST->hasBWI()) {
  4092. if (ST->hasVBMI())
  4093. PromEltTyBits = 8; // promote to i8, AVX512VBMI.
  4094. else
  4095. PromEltTyBits = 16; // promote to i16, AVX512BW.
  4096. break;
  4097. }
  4098. PromEltTyBits = 32; // promote to i32, AVX512F.
  4099. break;
  4100. default:
  4101. return bailout();
  4102. }
  4103. auto *PromEltTy = IntegerType::getIntNTy(EltTy->getContext(), PromEltTyBits);
  4104. auto *SrcVecTy = FixedVectorType::get(EltTy, VF);
  4105. auto *PromSrcVecTy = FixedVectorType::get(PromEltTy, VF);
  4106. int NumDstElements = VF * ReplicationFactor;
  4107. auto *PromDstVecTy = FixedVectorType::get(PromEltTy, NumDstElements);
  4108. auto *DstVecTy = FixedVectorType::get(EltTy, NumDstElements);
  4109. // Legalize the types.
  4110. MVT LegalSrcVecTy = getTypeLegalizationCost(SrcVecTy).second;
  4111. MVT LegalPromSrcVecTy = getTypeLegalizationCost(PromSrcVecTy).second;
  4112. MVT LegalPromDstVecTy = getTypeLegalizationCost(PromDstVecTy).second;
  4113. MVT LegalDstVecTy = getTypeLegalizationCost(DstVecTy).second;
  4114. // They should have legalized into vector types.
  4115. if (!LegalSrcVecTy.isVector() || !LegalPromSrcVecTy.isVector() ||
  4116. !LegalPromDstVecTy.isVector() || !LegalDstVecTy.isVector())
  4117. return bailout();
  4118. if (PromEltTyBits != EltTyBits) {
  4119. // If we have to perform the shuffle with wider elt type than our data type,
  4120. // then we will first need to anyext (we don't care about the new bits)
  4121. // the source elements, and then truncate Dst elements.
  4122. InstructionCost PromotionCost;
  4123. PromotionCost += getCastInstrCost(
  4124. Instruction::SExt, /*Dst=*/PromSrcVecTy, /*Src=*/SrcVecTy,
  4125. TargetTransformInfo::CastContextHint::None, CostKind);
  4126. PromotionCost +=
  4127. getCastInstrCost(Instruction::Trunc, /*Dst=*/DstVecTy,
  4128. /*Src=*/PromDstVecTy,
  4129. TargetTransformInfo::CastContextHint::None, CostKind);
  4130. return PromotionCost + getReplicationShuffleCost(PromEltTy,
  4131. ReplicationFactor, VF,
  4132. DemandedDstElts, CostKind);
  4133. }
  4134. assert(LegalSrcVecTy.getScalarSizeInBits() == EltTyBits &&
  4135. LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() &&
  4136. "We expect that the legalization doesn't affect the element width, "
  4137. "doesn't coalesce/split elements.");
  4138. unsigned NumEltsPerDstVec = LegalDstVecTy.getVectorNumElements();
  4139. unsigned NumDstVectors =
  4140. divideCeil(DstVecTy->getNumElements(), NumEltsPerDstVec);
  4141. auto *SingleDstVecTy = FixedVectorType::get(EltTy, NumEltsPerDstVec);
  4142. // Not all the produced Dst elements may be demanded. In our case,
  4143. // given that a single Dst vector is formed by a single shuffle,
  4144. // if all elements that will form a single Dst vector aren't demanded,
  4145. // then we won't need to do that shuffle, so adjust the cost accordingly.
  4146. APInt DemandedDstVectors = APIntOps::ScaleBitMask(
  4147. DemandedDstElts.zext(NumDstVectors * NumEltsPerDstVec), NumDstVectors);
  4148. unsigned NumDstVectorsDemanded = DemandedDstVectors.countPopulation();
  4149. InstructionCost SingleShuffleCost = getShuffleCost(
  4150. TTI::SK_PermuteSingleSrc, SingleDstVecTy, /*Mask=*/std::nullopt, CostKind,
  4151. /*Index=*/0, /*SubTp=*/nullptr);
  4152. return NumDstVectorsDemanded * SingleShuffleCost;
  4153. }
  4154. InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
  4155. MaybeAlign Alignment,
  4156. unsigned AddressSpace,
  4157. TTI::TargetCostKind CostKind,
  4158. TTI::OperandValueInfo OpInfo,
  4159. const Instruction *I) {
  4160. // TODO: Handle other cost kinds.
  4161. if (CostKind != TTI::TCK_RecipThroughput) {
  4162. if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
  4163. // Store instruction with index and scale costs 2 Uops.
  4164. // Check the preceding GEP to identify non-const indices.
  4165. if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) {
  4166. if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
  4167. return TTI::TCC_Basic * 2;
  4168. }
  4169. }
  4170. return TTI::TCC_Basic;
  4171. }
  4172. assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
  4173. "Invalid Opcode");
  4174. // Type legalization can't handle structs
  4175. if (TLI->getValueType(DL, Src, true) == MVT::Other)
  4176. return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
  4177. CostKind);
  4178. // Legalize the type.
  4179. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
  4180. auto *VTy = dyn_cast<FixedVectorType>(Src);
  4181. InstructionCost Cost = 0;
  4182. // Add a cost for constant load to vector.
  4183. if (Opcode == Instruction::Store && OpInfo.isConstant())
  4184. Cost += getMemoryOpCost(Instruction::Load, Src, DL.getABITypeAlign(Src),
  4185. /*AddressSpace=*/0, CostKind);
  4186. // Handle the simple case of non-vectors.
  4187. // NOTE: this assumes that legalization never creates vector from scalars!
  4188. if (!VTy || !LT.second.isVector()) {
  4189. // Each load/store unit costs 1.
  4190. return (LT.second.isFloatingPoint() ? Cost : 0) + LT.first * 1;
  4191. }
  4192. bool IsLoad = Opcode == Instruction::Load;
  4193. Type *EltTy = VTy->getElementType();
  4194. const int EltTyBits = DL.getTypeSizeInBits(EltTy);
  4195. // Source of truth: how many elements were there in the original IR vector?
  4196. const unsigned SrcNumElt = VTy->getNumElements();
  4197. // How far have we gotten?
  4198. int NumEltRemaining = SrcNumElt;
  4199. // Note that we intentionally capture by-reference, NumEltRemaining changes.
  4200. auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; };
  4201. const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8);
  4202. // Note that even if we can store 64 bits of an XMM, we still operate on XMM.
  4203. const unsigned XMMBits = 128;
  4204. if (XMMBits % EltTyBits != 0)
  4205. // Vector size must be a multiple of the element size. I.e. no padding.
  4206. return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
  4207. CostKind);
  4208. const int NumEltPerXMM = XMMBits / EltTyBits;
  4209. auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM);
  4210. for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0;
  4211. NumEltRemaining > 0; CurrOpSizeBytes /= 2) {
  4212. // How many elements would a single op deal with at once?
  4213. if ((8 * CurrOpSizeBytes) % EltTyBits != 0)
  4214. // Vector size must be a multiple of the element size. I.e. no padding.
  4215. return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
  4216. CostKind);
  4217. int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits;
  4218. assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?");
  4219. assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) ||
  4220. (CurrOpSizeBytes == MaxLegalOpSizeBytes)) &&
  4221. "Unless we haven't halved the op size yet, "
  4222. "we have less than two op's sized units of work left.");
  4223. auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM
  4224. ? FixedVectorType::get(EltTy, CurrNumEltPerOp)
  4225. : XMMVecTy;
  4226. assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 &&
  4227. "After halving sizes, the vector elt count is no longer a multiple "
  4228. "of number of elements per operation?");
  4229. auto *CoalescedVecTy =
  4230. CurrNumEltPerOp == 1
  4231. ? CurrVecTy
  4232. : FixedVectorType::get(
  4233. IntegerType::get(Src->getContext(),
  4234. EltTyBits * CurrNumEltPerOp),
  4235. CurrVecTy->getNumElements() / CurrNumEltPerOp);
  4236. assert(DL.getTypeSizeInBits(CoalescedVecTy) ==
  4237. DL.getTypeSizeInBits(CurrVecTy) &&
  4238. "coalesciing elements doesn't change vector width.");
  4239. while (NumEltRemaining > 0) {
  4240. assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?");
  4241. // Can we use this vector size, as per the remaining element count?
  4242. // Iff the vector is naturally aligned, we can do a wide load regardless.
  4243. if (NumEltRemaining < CurrNumEltPerOp &&
  4244. (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) &&
  4245. CurrOpSizeBytes != 1)
  4246. break; // Try smalled vector size.
  4247. bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0;
  4248. // If we have fully processed the previous reg, we need to replenish it.
  4249. if (SubVecEltsLeft == 0) {
  4250. SubVecEltsLeft += CurrVecTy->getNumElements();
  4251. // And that's free only for the 0'th subvector of a legalized vector.
  4252. if (!Is0thSubVec)
  4253. Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector
  4254. : TTI::ShuffleKind::SK_ExtractSubvector,
  4255. VTy, std::nullopt, CostKind, NumEltDone(),
  4256. CurrVecTy);
  4257. }
  4258. // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM,
  4259. // for smaller widths (32/16/8) we have to insert/extract them separately.
  4260. // Again, it's free for the 0'th subreg (if op is 32/64 bit wide,
  4261. // but let's pretend that it is also true for 16/8 bit wide ops...)
  4262. if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) {
  4263. int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM;
  4264. assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && "");
  4265. int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp;
  4266. APInt DemandedElts =
  4267. APInt::getBitsSet(CoalescedVecTy->getNumElements(),
  4268. CoalescedVecEltIdx, CoalescedVecEltIdx + 1);
  4269. assert(DemandedElts.countPopulation() == 1 && "Inserting single value");
  4270. Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad,
  4271. !IsLoad, CostKind);
  4272. }
  4273. // This isn't exactly right. We're using slow unaligned 32-byte accesses
  4274. // as a proxy for a double-pumped AVX memory interface such as on
  4275. // Sandybridge.
  4276. if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow())
  4277. Cost += 2;
  4278. else
  4279. Cost += 1;
  4280. SubVecEltsLeft -= CurrNumEltPerOp;
  4281. NumEltRemaining -= CurrNumEltPerOp;
  4282. Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes);
  4283. }
  4284. }
  4285. assert(NumEltRemaining <= 0 && "Should have processed all the elements.");
  4286. return Cost;
  4287. }
  4288. InstructionCost
  4289. X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
  4290. unsigned AddressSpace,
  4291. TTI::TargetCostKind CostKind) {
  4292. bool IsLoad = (Instruction::Load == Opcode);
  4293. bool IsStore = (Instruction::Store == Opcode);
  4294. auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
  4295. if (!SrcVTy)
  4296. // To calculate scalar take the regular cost, without mask
  4297. return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
  4298. unsigned NumElem = SrcVTy->getNumElements();
  4299. auto *MaskTy =
  4300. FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
  4301. if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
  4302. (IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) {
  4303. // Scalarization
  4304. APInt DemandedElts = APInt::getAllOnes(NumElem);
  4305. InstructionCost MaskSplitCost = getScalarizationOverhead(
  4306. MaskTy, DemandedElts, /*Insert*/ false, /*Extract*/ true, CostKind);
  4307. InstructionCost ScalarCompareCost = getCmpSelInstrCost(
  4308. Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
  4309. CmpInst::BAD_ICMP_PREDICATE, CostKind);
  4310. InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
  4311. InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
  4312. InstructionCost ValueSplitCost = getScalarizationOverhead(
  4313. SrcVTy, DemandedElts, IsLoad, IsStore, CostKind);
  4314. InstructionCost MemopCost =
  4315. NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
  4316. Alignment, AddressSpace, CostKind);
  4317. return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
  4318. }
  4319. // Legalize the type.
  4320. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(SrcVTy);
  4321. auto VT = TLI->getValueType(DL, SrcVTy);
  4322. InstructionCost Cost = 0;
  4323. if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
  4324. LT.second.getVectorNumElements() == NumElem)
  4325. // Promotion requires extend/truncate for data and a shuffle for mask.
  4326. Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, std::nullopt,
  4327. CostKind, 0, nullptr) +
  4328. getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, std::nullopt,
  4329. CostKind, 0, nullptr);
  4330. else if (LT.first * LT.second.getVectorNumElements() > NumElem) {
  4331. auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
  4332. LT.second.getVectorNumElements());
  4333. // Expanding requires fill mask with zeroes
  4334. Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, std::nullopt,
  4335. CostKind, 0, MaskTy);
  4336. }
  4337. // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
  4338. if (!ST->hasAVX512())
  4339. return Cost + LT.first * (IsLoad ? 2 : 8);
  4340. // AVX-512 masked load/store is cheaper
  4341. return Cost + LT.first;
  4342. }
  4343. InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty,
  4344. ScalarEvolution *SE,
  4345. const SCEV *Ptr) {
  4346. // Address computations in vectorized code with non-consecutive addresses will
  4347. // likely result in more instructions compared to scalar code where the
  4348. // computation can more often be merged into the index mode. The resulting
  4349. // extra micro-ops can significantly decrease throughput.
  4350. const unsigned NumVectorInstToHideOverhead = 10;
  4351. // Cost modeling of Strided Access Computation is hidden by the indexing
  4352. // modes of X86 regardless of the stride value. We dont believe that there
  4353. // is a difference between constant strided access in gerenal and constant
  4354. // strided value which is less than or equal to 64.
  4355. // Even in the case of (loop invariant) stride whose value is not known at
  4356. // compile time, the address computation will not incur more than one extra
  4357. // ADD instruction.
  4358. if (Ty->isVectorTy() && SE && !ST->hasAVX2()) {
  4359. // TODO: AVX2 is the current cut-off because we don't have correct
  4360. // interleaving costs for prior ISA's.
  4361. if (!BaseT::isStridedAccess(Ptr))
  4362. return NumVectorInstToHideOverhead;
  4363. if (!BaseT::getConstantStrideStep(SE, Ptr))
  4364. return 1;
  4365. }
  4366. return BaseT::getAddressComputationCost(Ty, SE, Ptr);
  4367. }
  4368. InstructionCost
  4369. X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
  4370. std::optional<FastMathFlags> FMF,
  4371. TTI::TargetCostKind CostKind) {
  4372. if (TTI::requiresOrderedReduction(FMF))
  4373. return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
  4374. // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
  4375. // and make it as the cost.
  4376. static const CostTblEntry SLMCostTblNoPairWise[] = {
  4377. { ISD::FADD, MVT::v2f64, 3 },
  4378. { ISD::ADD, MVT::v2i64, 5 },
  4379. };
  4380. static const CostTblEntry SSE2CostTblNoPairWise[] = {
  4381. { ISD::FADD, MVT::v2f64, 2 },
  4382. { ISD::FADD, MVT::v2f32, 2 },
  4383. { ISD::FADD, MVT::v4f32, 4 },
  4384. { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
  4385. { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32
  4386. { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
  4387. { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3".
  4388. { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3".
  4389. { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
  4390. { ISD::ADD, MVT::v2i8, 2 },
  4391. { ISD::ADD, MVT::v4i8, 2 },
  4392. { ISD::ADD, MVT::v8i8, 2 },
  4393. { ISD::ADD, MVT::v16i8, 3 },
  4394. };
  4395. static const CostTblEntry AVX1CostTblNoPairWise[] = {
  4396. { ISD::FADD, MVT::v4f64, 3 },
  4397. { ISD::FADD, MVT::v4f32, 3 },
  4398. { ISD::FADD, MVT::v8f32, 4 },
  4399. { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
  4400. { ISD::ADD, MVT::v4i64, 3 },
  4401. { ISD::ADD, MVT::v8i32, 5 },
  4402. { ISD::ADD, MVT::v16i16, 5 },
  4403. { ISD::ADD, MVT::v32i8, 4 },
  4404. };
  4405. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  4406. assert(ISD && "Invalid opcode");
  4407. // Before legalizing the type, give a chance to look up illegal narrow types
  4408. // in the table.
  4409. // FIXME: Is there a better way to do this?
  4410. EVT VT = TLI->getValueType(DL, ValTy);
  4411. if (VT.isSimple()) {
  4412. MVT MTy = VT.getSimpleVT();
  4413. if (ST->useSLMArithCosts())
  4414. if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
  4415. return Entry->Cost;
  4416. if (ST->hasAVX())
  4417. if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
  4418. return Entry->Cost;
  4419. if (ST->hasSSE2())
  4420. if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
  4421. return Entry->Cost;
  4422. }
  4423. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
  4424. MVT MTy = LT.second;
  4425. auto *ValVTy = cast<FixedVectorType>(ValTy);
  4426. // Special case: vXi8 mul reductions are performed as vXi16.
  4427. if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) {
  4428. auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16);
  4429. auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements());
  4430. return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy,
  4431. TargetTransformInfo::CastContextHint::None,
  4432. CostKind) +
  4433. getArithmeticReductionCost(Opcode, WideVecTy, FMF, CostKind);
  4434. }
  4435. InstructionCost ArithmeticCost = 0;
  4436. if (LT.first != 1 && MTy.isVector() &&
  4437. MTy.getVectorNumElements() < ValVTy->getNumElements()) {
  4438. // Type needs to be split. We need LT.first - 1 arithmetic ops.
  4439. auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
  4440. MTy.getVectorNumElements());
  4441. ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
  4442. ArithmeticCost *= LT.first - 1;
  4443. }
  4444. if (ST->useSLMArithCosts())
  4445. if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
  4446. return ArithmeticCost + Entry->Cost;
  4447. if (ST->hasAVX())
  4448. if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
  4449. return ArithmeticCost + Entry->Cost;
  4450. if (ST->hasSSE2())
  4451. if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
  4452. return ArithmeticCost + Entry->Cost;
  4453. // FIXME: These assume a naive kshift+binop lowering, which is probably
  4454. // conservative in most cases.
  4455. static const CostTblEntry AVX512BoolReduction[] = {
  4456. { ISD::AND, MVT::v2i1, 3 },
  4457. { ISD::AND, MVT::v4i1, 5 },
  4458. { ISD::AND, MVT::v8i1, 7 },
  4459. { ISD::AND, MVT::v16i1, 9 },
  4460. { ISD::AND, MVT::v32i1, 11 },
  4461. { ISD::AND, MVT::v64i1, 13 },
  4462. { ISD::OR, MVT::v2i1, 3 },
  4463. { ISD::OR, MVT::v4i1, 5 },
  4464. { ISD::OR, MVT::v8i1, 7 },
  4465. { ISD::OR, MVT::v16i1, 9 },
  4466. { ISD::OR, MVT::v32i1, 11 },
  4467. { ISD::OR, MVT::v64i1, 13 },
  4468. };
  4469. static const CostTblEntry AVX2BoolReduction[] = {
  4470. { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp
  4471. { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp
  4472. { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp
  4473. { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp
  4474. };
  4475. static const CostTblEntry AVX1BoolReduction[] = {
  4476. { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp
  4477. { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp
  4478. { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
  4479. { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
  4480. { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp
  4481. { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp
  4482. { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
  4483. { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
  4484. };
  4485. static const CostTblEntry SSE2BoolReduction[] = {
  4486. { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp
  4487. { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp
  4488. { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp
  4489. { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp
  4490. { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp
  4491. { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp
  4492. { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp
  4493. { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp
  4494. };
  4495. // Handle bool allof/anyof patterns.
  4496. if (ValVTy->getElementType()->isIntegerTy(1)) {
  4497. InstructionCost ArithmeticCost = 0;
  4498. if (LT.first != 1 && MTy.isVector() &&
  4499. MTy.getVectorNumElements() < ValVTy->getNumElements()) {
  4500. // Type needs to be split. We need LT.first - 1 arithmetic ops.
  4501. auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
  4502. MTy.getVectorNumElements());
  4503. ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
  4504. ArithmeticCost *= LT.first - 1;
  4505. }
  4506. if (ST->hasAVX512())
  4507. if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
  4508. return ArithmeticCost + Entry->Cost;
  4509. if (ST->hasAVX2())
  4510. if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
  4511. return ArithmeticCost + Entry->Cost;
  4512. if (ST->hasAVX())
  4513. if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
  4514. return ArithmeticCost + Entry->Cost;
  4515. if (ST->hasSSE2())
  4516. if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
  4517. return ArithmeticCost + Entry->Cost;
  4518. return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
  4519. }
  4520. unsigned NumVecElts = ValVTy->getNumElements();
  4521. unsigned ScalarSize = ValVTy->getScalarSizeInBits();
  4522. // Special case power of 2 reductions where the scalar type isn't changed
  4523. // by type legalization.
  4524. if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
  4525. return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
  4526. InstructionCost ReductionCost = 0;
  4527. auto *Ty = ValVTy;
  4528. if (LT.first != 1 && MTy.isVector() &&
  4529. MTy.getVectorNumElements() < ValVTy->getNumElements()) {
  4530. // Type needs to be split. We need LT.first - 1 arithmetic ops.
  4531. Ty = FixedVectorType::get(ValVTy->getElementType(),
  4532. MTy.getVectorNumElements());
  4533. ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
  4534. ReductionCost *= LT.first - 1;
  4535. NumVecElts = MTy.getVectorNumElements();
  4536. }
  4537. // Now handle reduction with the legal type, taking into account size changes
  4538. // at each level.
  4539. while (NumVecElts > 1) {
  4540. // Determine the size of the remaining vector we need to reduce.
  4541. unsigned Size = NumVecElts * ScalarSize;
  4542. NumVecElts /= 2;
  4543. // If we're reducing from 256/512 bits, use an extract_subvector.
  4544. if (Size > 128) {
  4545. auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
  4546. ReductionCost +=
  4547. getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt, CostKind,
  4548. NumVecElts, SubTy);
  4549. Ty = SubTy;
  4550. } else if (Size == 128) {
  4551. // Reducing from 128 bits is a permute of v2f64/v2i64.
  4552. FixedVectorType *ShufTy;
  4553. if (ValVTy->isFloatingPointTy())
  4554. ShufTy =
  4555. FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
  4556. else
  4557. ShufTy =
  4558. FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
  4559. ReductionCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy,
  4560. std::nullopt, CostKind, 0, nullptr);
  4561. } else if (Size == 64) {
  4562. // Reducing from 64 bits is a shuffle of v4f32/v4i32.
  4563. FixedVectorType *ShufTy;
  4564. if (ValVTy->isFloatingPointTy())
  4565. ShufTy =
  4566. FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
  4567. else
  4568. ShufTy =
  4569. FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
  4570. ReductionCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy,
  4571. std::nullopt, CostKind, 0, nullptr);
  4572. } else {
  4573. // Reducing from smaller size is a shift by immediate.
  4574. auto *ShiftTy = FixedVectorType::get(
  4575. Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
  4576. ReductionCost += getArithmeticInstrCost(
  4577. Instruction::LShr, ShiftTy, CostKind,
  4578. {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
  4579. {TargetTransformInfo::OK_UniformConstantValue, TargetTransformInfo::OP_None});
  4580. }
  4581. // Add the arithmetic op for this level.
  4582. ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
  4583. }
  4584. // Add the final extract element to the cost.
  4585. return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty,
  4586. CostKind, 0, nullptr, nullptr);
  4587. }
  4588. InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy,
  4589. bool IsUnsigned) {
  4590. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
  4591. MVT MTy = LT.second;
  4592. int ISD;
  4593. if (Ty->isIntOrIntVectorTy()) {
  4594. ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
  4595. } else {
  4596. assert(Ty->isFPOrFPVectorTy() &&
  4597. "Expected float point or integer vector type.");
  4598. ISD = ISD::FMINNUM;
  4599. }
  4600. static const CostTblEntry SSE1CostTbl[] = {
  4601. {ISD::FMINNUM, MVT::v4f32, 1},
  4602. };
  4603. static const CostTblEntry SSE2CostTbl[] = {
  4604. {ISD::FMINNUM, MVT::v2f64, 1},
  4605. {ISD::SMIN, MVT::v8i16, 1},
  4606. {ISD::UMIN, MVT::v16i8, 1},
  4607. };
  4608. static const CostTblEntry SSE41CostTbl[] = {
  4609. {ISD::SMIN, MVT::v4i32, 1},
  4610. {ISD::UMIN, MVT::v4i32, 1},
  4611. {ISD::UMIN, MVT::v8i16, 1},
  4612. {ISD::SMIN, MVT::v16i8, 1},
  4613. };
  4614. static const CostTblEntry SSE42CostTbl[] = {
  4615. {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
  4616. };
  4617. static const CostTblEntry AVX1CostTbl[] = {
  4618. {ISD::FMINNUM, MVT::v8f32, 1},
  4619. {ISD::FMINNUM, MVT::v4f64, 1},
  4620. {ISD::SMIN, MVT::v8i32, 3},
  4621. {ISD::UMIN, MVT::v8i32, 3},
  4622. {ISD::SMIN, MVT::v16i16, 3},
  4623. {ISD::UMIN, MVT::v16i16, 3},
  4624. {ISD::SMIN, MVT::v32i8, 3},
  4625. {ISD::UMIN, MVT::v32i8, 3},
  4626. };
  4627. static const CostTblEntry AVX2CostTbl[] = {
  4628. {ISD::SMIN, MVT::v8i32, 1},
  4629. {ISD::UMIN, MVT::v8i32, 1},
  4630. {ISD::SMIN, MVT::v16i16, 1},
  4631. {ISD::UMIN, MVT::v16i16, 1},
  4632. {ISD::SMIN, MVT::v32i8, 1},
  4633. {ISD::UMIN, MVT::v32i8, 1},
  4634. };
  4635. static const CostTblEntry AVX512CostTbl[] = {
  4636. {ISD::FMINNUM, MVT::v16f32, 1},
  4637. {ISD::FMINNUM, MVT::v8f64, 1},
  4638. {ISD::SMIN, MVT::v2i64, 1},
  4639. {ISD::UMIN, MVT::v2i64, 1},
  4640. {ISD::SMIN, MVT::v4i64, 1},
  4641. {ISD::UMIN, MVT::v4i64, 1},
  4642. {ISD::SMIN, MVT::v8i64, 1},
  4643. {ISD::UMIN, MVT::v8i64, 1},
  4644. {ISD::SMIN, MVT::v16i32, 1},
  4645. {ISD::UMIN, MVT::v16i32, 1},
  4646. };
  4647. static const CostTblEntry AVX512BWCostTbl[] = {
  4648. {ISD::SMIN, MVT::v32i16, 1},
  4649. {ISD::UMIN, MVT::v32i16, 1},
  4650. {ISD::SMIN, MVT::v64i8, 1},
  4651. {ISD::UMIN, MVT::v64i8, 1},
  4652. };
  4653. // If we have a native MIN/MAX instruction for this type, use it.
  4654. if (ST->hasBWI())
  4655. if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
  4656. return LT.first * Entry->Cost;
  4657. if (ST->hasAVX512())
  4658. if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
  4659. return LT.first * Entry->Cost;
  4660. if (ST->hasAVX2())
  4661. if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
  4662. return LT.first * Entry->Cost;
  4663. if (ST->hasAVX())
  4664. if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
  4665. return LT.first * Entry->Cost;
  4666. if (ST->hasSSE42())
  4667. if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
  4668. return LT.first * Entry->Cost;
  4669. if (ST->hasSSE41())
  4670. if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
  4671. return LT.first * Entry->Cost;
  4672. if (ST->hasSSE2())
  4673. if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
  4674. return LT.first * Entry->Cost;
  4675. if (ST->hasSSE1())
  4676. if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
  4677. return LT.first * Entry->Cost;
  4678. unsigned CmpOpcode;
  4679. if (Ty->isFPOrFPVectorTy()) {
  4680. CmpOpcode = Instruction::FCmp;
  4681. } else {
  4682. assert(Ty->isIntOrIntVectorTy() &&
  4683. "expecting floating point or integer type for min/max reduction");
  4684. CmpOpcode = Instruction::ICmp;
  4685. }
  4686. TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  4687. // Otherwise fall back to cmp+select.
  4688. InstructionCost Result =
  4689. getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE,
  4690. CostKind) +
  4691. getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
  4692. CmpInst::BAD_ICMP_PREDICATE, CostKind);
  4693. return Result;
  4694. }
  4695. InstructionCost
  4696. X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
  4697. bool IsUnsigned,
  4698. TTI::TargetCostKind CostKind) {
  4699. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
  4700. MVT MTy = LT.second;
  4701. int ISD;
  4702. if (ValTy->isIntOrIntVectorTy()) {
  4703. ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
  4704. } else {
  4705. assert(ValTy->isFPOrFPVectorTy() &&
  4706. "Expected float point or integer vector type.");
  4707. ISD = ISD::FMINNUM;
  4708. }
  4709. // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
  4710. // and make it as the cost.
  4711. static const CostTblEntry SSE2CostTblNoPairWise[] = {
  4712. {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
  4713. {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
  4714. {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
  4715. };
  4716. static const CostTblEntry SSE41CostTblNoPairWise[] = {
  4717. {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
  4718. {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
  4719. {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
  4720. {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
  4721. {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
  4722. {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
  4723. {ISD::SMIN, MVT::v2i8, 3}, // pminsb
  4724. {ISD::SMIN, MVT::v4i8, 5}, // pminsb
  4725. {ISD::SMIN, MVT::v8i8, 7}, // pminsb
  4726. {ISD::SMIN, MVT::v16i8, 6},
  4727. {ISD::UMIN, MVT::v2i8, 3}, // same as sse2
  4728. {ISD::UMIN, MVT::v4i8, 5}, // same as sse2
  4729. {ISD::UMIN, MVT::v8i8, 7}, // same as sse2
  4730. {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
  4731. };
  4732. static const CostTblEntry AVX1CostTblNoPairWise[] = {
  4733. {ISD::SMIN, MVT::v16i16, 6},
  4734. {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
  4735. {ISD::SMIN, MVT::v32i8, 8},
  4736. {ISD::UMIN, MVT::v32i8, 8},
  4737. };
  4738. static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
  4739. {ISD::SMIN, MVT::v32i16, 8},
  4740. {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
  4741. {ISD::SMIN, MVT::v64i8, 10},
  4742. {ISD::UMIN, MVT::v64i8, 10},
  4743. };
  4744. // Before legalizing the type, give a chance to look up illegal narrow types
  4745. // in the table.
  4746. // FIXME: Is there a better way to do this?
  4747. EVT VT = TLI->getValueType(DL, ValTy);
  4748. if (VT.isSimple()) {
  4749. MVT MTy = VT.getSimpleVT();
  4750. if (ST->hasBWI())
  4751. if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
  4752. return Entry->Cost;
  4753. if (ST->hasAVX())
  4754. if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
  4755. return Entry->Cost;
  4756. if (ST->hasSSE41())
  4757. if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
  4758. return Entry->Cost;
  4759. if (ST->hasSSE2())
  4760. if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
  4761. return Entry->Cost;
  4762. }
  4763. auto *ValVTy = cast<FixedVectorType>(ValTy);
  4764. unsigned NumVecElts = ValVTy->getNumElements();
  4765. auto *Ty = ValVTy;
  4766. InstructionCost MinMaxCost = 0;
  4767. if (LT.first != 1 && MTy.isVector() &&
  4768. MTy.getVectorNumElements() < ValVTy->getNumElements()) {
  4769. // Type needs to be split. We need LT.first - 1 operations ops.
  4770. Ty = FixedVectorType::get(ValVTy->getElementType(),
  4771. MTy.getVectorNumElements());
  4772. auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
  4773. MTy.getVectorNumElements());
  4774. MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
  4775. MinMaxCost *= LT.first - 1;
  4776. NumVecElts = MTy.getVectorNumElements();
  4777. }
  4778. if (ST->hasBWI())
  4779. if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
  4780. return MinMaxCost + Entry->Cost;
  4781. if (ST->hasAVX())
  4782. if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
  4783. return MinMaxCost + Entry->Cost;
  4784. if (ST->hasSSE41())
  4785. if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
  4786. return MinMaxCost + Entry->Cost;
  4787. if (ST->hasSSE2())
  4788. if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
  4789. return MinMaxCost + Entry->Cost;
  4790. unsigned ScalarSize = ValTy->getScalarSizeInBits();
  4791. // Special case power of 2 reductions where the scalar type isn't changed
  4792. // by type legalization.
  4793. if (!isPowerOf2_32(ValVTy->getNumElements()) ||
  4794. ScalarSize != MTy.getScalarSizeInBits())
  4795. return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsUnsigned, CostKind);
  4796. // Now handle reduction with the legal type, taking into account size changes
  4797. // at each level.
  4798. while (NumVecElts > 1) {
  4799. // Determine the size of the remaining vector we need to reduce.
  4800. unsigned Size = NumVecElts * ScalarSize;
  4801. NumVecElts /= 2;
  4802. // If we're reducing from 256/512 bits, use an extract_subvector.
  4803. if (Size > 128) {
  4804. auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
  4805. MinMaxCost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
  4806. CostKind, NumVecElts, SubTy);
  4807. Ty = SubTy;
  4808. } else if (Size == 128) {
  4809. // Reducing from 128 bits is a permute of v2f64/v2i64.
  4810. VectorType *ShufTy;
  4811. if (ValTy->isFloatingPointTy())
  4812. ShufTy =
  4813. FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
  4814. else
  4815. ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
  4816. MinMaxCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy,
  4817. std::nullopt, CostKind, 0, nullptr);
  4818. } else if (Size == 64) {
  4819. // Reducing from 64 bits is a shuffle of v4f32/v4i32.
  4820. FixedVectorType *ShufTy;
  4821. if (ValTy->isFloatingPointTy())
  4822. ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
  4823. else
  4824. ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
  4825. MinMaxCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy,
  4826. std::nullopt, CostKind, 0, nullptr);
  4827. } else {
  4828. // Reducing from smaller size is a shift by immediate.
  4829. auto *ShiftTy = FixedVectorType::get(
  4830. Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
  4831. MinMaxCost += getArithmeticInstrCost(
  4832. Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
  4833. {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
  4834. {TargetTransformInfo::OK_UniformConstantValue, TargetTransformInfo::OP_None});
  4835. }
  4836. // Add the arithmetic op for this level.
  4837. auto *SubCondTy =
  4838. FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
  4839. MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
  4840. }
  4841. // Add the final extract element to the cost.
  4842. return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty,
  4843. CostKind, 0, nullptr, nullptr);
  4844. }
  4845. /// Calculate the cost of materializing a 64-bit value. This helper
  4846. /// method might only calculate a fraction of a larger immediate. Therefore it
  4847. /// is valid to return a cost of ZERO.
  4848. InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) {
  4849. if (Val == 0)
  4850. return TTI::TCC_Free;
  4851. if (isInt<32>(Val))
  4852. return TTI::TCC_Basic;
  4853. return 2 * TTI::TCC_Basic;
  4854. }
  4855. InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
  4856. TTI::TargetCostKind CostKind) {
  4857. assert(Ty->isIntegerTy());
  4858. unsigned BitSize = Ty->getPrimitiveSizeInBits();
  4859. if (BitSize == 0)
  4860. return ~0U;
  4861. // Never hoist constants larger than 128bit, because this might lead to
  4862. // incorrect code generation or assertions in codegen.
  4863. // Fixme: Create a cost model for types larger than i128 once the codegen
  4864. // issues have been fixed.
  4865. if (BitSize > 128)
  4866. return TTI::TCC_Free;
  4867. if (Imm == 0)
  4868. return TTI::TCC_Free;
  4869. // Sign-extend all constants to a multiple of 64-bit.
  4870. APInt ImmVal = Imm;
  4871. if (BitSize % 64 != 0)
  4872. ImmVal = Imm.sext(alignTo(BitSize, 64));
  4873. // Split the constant into 64-bit chunks and calculate the cost for each
  4874. // chunk.
  4875. InstructionCost Cost = 0;
  4876. for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
  4877. APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
  4878. int64_t Val = Tmp.getSExtValue();
  4879. Cost += getIntImmCost(Val);
  4880. }
  4881. // We need at least one instruction to materialize the constant.
  4882. return std::max<InstructionCost>(1, Cost);
  4883. }
  4884. InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
  4885. const APInt &Imm, Type *Ty,
  4886. TTI::TargetCostKind CostKind,
  4887. Instruction *Inst) {
  4888. assert(Ty->isIntegerTy());
  4889. unsigned BitSize = Ty->getPrimitiveSizeInBits();
  4890. // There is no cost model for constants with a bit size of 0. Return TCC_Free
  4891. // here, so that constant hoisting will ignore this constant.
  4892. if (BitSize == 0)
  4893. return TTI::TCC_Free;
  4894. unsigned ImmIdx = ~0U;
  4895. switch (Opcode) {
  4896. default:
  4897. return TTI::TCC_Free;
  4898. case Instruction::GetElementPtr:
  4899. // Always hoist the base address of a GetElementPtr. This prevents the
  4900. // creation of new constants for every base constant that gets constant
  4901. // folded with the offset.
  4902. if (Idx == 0)
  4903. return 2 * TTI::TCC_Basic;
  4904. return TTI::TCC_Free;
  4905. case Instruction::Store:
  4906. ImmIdx = 0;
  4907. break;
  4908. case Instruction::ICmp:
  4909. // This is an imperfect hack to prevent constant hoisting of
  4910. // compares that might be trying to check if a 64-bit value fits in
  4911. // 32-bits. The backend can optimize these cases using a right shift by 32.
  4912. // Ideally we would check the compare predicate here. There also other
  4913. // similar immediates the backend can use shifts for.
  4914. if (Idx == 1 && Imm.getBitWidth() == 64) {
  4915. uint64_t ImmVal = Imm.getZExtValue();
  4916. if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
  4917. return TTI::TCC_Free;
  4918. }
  4919. ImmIdx = 1;
  4920. break;
  4921. case Instruction::And:
  4922. // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
  4923. // by using a 32-bit operation with implicit zero extension. Detect such
  4924. // immediates here as the normal path expects bit 31 to be sign extended.
  4925. if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.isIntN(32))
  4926. return TTI::TCC_Free;
  4927. ImmIdx = 1;
  4928. break;
  4929. case Instruction::Add:
  4930. case Instruction::Sub:
  4931. // For add/sub, we can use the opposite instruction for INT32_MIN.
  4932. if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
  4933. return TTI::TCC_Free;
  4934. ImmIdx = 1;
  4935. break;
  4936. case Instruction::UDiv:
  4937. case Instruction::SDiv:
  4938. case Instruction::URem:
  4939. case Instruction::SRem:
  4940. // Division by constant is typically expanded later into a different
  4941. // instruction sequence. This completely changes the constants.
  4942. // Report them as "free" to stop ConstantHoist from marking them as opaque.
  4943. return TTI::TCC_Free;
  4944. case Instruction::Mul:
  4945. case Instruction::Or:
  4946. case Instruction::Xor:
  4947. ImmIdx = 1;
  4948. break;
  4949. // Always return TCC_Free for the shift value of a shift instruction.
  4950. case Instruction::Shl:
  4951. case Instruction::LShr:
  4952. case Instruction::AShr:
  4953. if (Idx == 1)
  4954. return TTI::TCC_Free;
  4955. break;
  4956. case Instruction::Trunc:
  4957. case Instruction::ZExt:
  4958. case Instruction::SExt:
  4959. case Instruction::IntToPtr:
  4960. case Instruction::PtrToInt:
  4961. case Instruction::BitCast:
  4962. case Instruction::PHI:
  4963. case Instruction::Call:
  4964. case Instruction::Select:
  4965. case Instruction::Ret:
  4966. case Instruction::Load:
  4967. break;
  4968. }
  4969. if (Idx == ImmIdx) {
  4970. int NumConstants = divideCeil(BitSize, 64);
  4971. InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
  4972. return (Cost <= NumConstants * TTI::TCC_Basic)
  4973. ? static_cast<int>(TTI::TCC_Free)
  4974. : Cost;
  4975. }
  4976. return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
  4977. }
  4978. InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
  4979. const APInt &Imm, Type *Ty,
  4980. TTI::TargetCostKind CostKind) {
  4981. assert(Ty->isIntegerTy());
  4982. unsigned BitSize = Ty->getPrimitiveSizeInBits();
  4983. // There is no cost model for constants with a bit size of 0. Return TCC_Free
  4984. // here, so that constant hoisting will ignore this constant.
  4985. if (BitSize == 0)
  4986. return TTI::TCC_Free;
  4987. switch (IID) {
  4988. default:
  4989. return TTI::TCC_Free;
  4990. case Intrinsic::sadd_with_overflow:
  4991. case Intrinsic::uadd_with_overflow:
  4992. case Intrinsic::ssub_with_overflow:
  4993. case Intrinsic::usub_with_overflow:
  4994. case Intrinsic::smul_with_overflow:
  4995. case Intrinsic::umul_with_overflow:
  4996. if ((Idx == 1) && Imm.getBitWidth() <= 64 && Imm.isSignedIntN(32))
  4997. return TTI::TCC_Free;
  4998. break;
  4999. case Intrinsic::experimental_stackmap:
  5000. if ((Idx < 2) || (Imm.getBitWidth() <= 64 && Imm.isSignedIntN(64)))
  5001. return TTI::TCC_Free;
  5002. break;
  5003. case Intrinsic::experimental_patchpoint_void:
  5004. case Intrinsic::experimental_patchpoint_i64:
  5005. if ((Idx < 4) || (Imm.getBitWidth() <= 64 && Imm.isSignedIntN(64)))
  5006. return TTI::TCC_Free;
  5007. break;
  5008. }
  5009. return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
  5010. }
  5011. InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode,
  5012. TTI::TargetCostKind CostKind,
  5013. const Instruction *I) {
  5014. if (CostKind != TTI::TCK_RecipThroughput)
  5015. return Opcode == Instruction::PHI ? 0 : 1;
  5016. // Branches are assumed to be predicted.
  5017. return 0;
  5018. }
  5019. int X86TTIImpl::getGatherOverhead() const {
  5020. // Some CPUs have more overhead for gather. The specified overhead is relative
  5021. // to the Load operation. "2" is the number provided by Intel architects. This
  5022. // parameter is used for cost estimation of Gather Op and comparison with
  5023. // other alternatives.
  5024. // TODO: Remove the explicit hasAVX512()?, That would mean we would only
  5025. // enable gather with a -march.
  5026. if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
  5027. return 2;
  5028. return 1024;
  5029. }
  5030. int X86TTIImpl::getScatterOverhead() const {
  5031. if (ST->hasAVX512())
  5032. return 2;
  5033. return 1024;
  5034. }
  5035. // Return an average cost of Gather / Scatter instruction, maybe improved later.
  5036. // FIXME: Add TargetCostKind support.
  5037. InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy,
  5038. const Value *Ptr, Align Alignment,
  5039. unsigned AddressSpace) {
  5040. assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
  5041. unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
  5042. // Try to reduce index size from 64 bit (default for GEP)
  5043. // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
  5044. // operation will use 16 x 64 indices which do not fit in a zmm and needs
  5045. // to split. Also check that the base pointer is the same for all lanes,
  5046. // and that there's at most one variable index.
  5047. auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
  5048. unsigned IndexSize = DL.getPointerSizeInBits();
  5049. const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
  5050. if (IndexSize < 64 || !GEP)
  5051. return IndexSize;
  5052. unsigned NumOfVarIndices = 0;
  5053. const Value *Ptrs = GEP->getPointerOperand();
  5054. if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
  5055. return IndexSize;
  5056. for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
  5057. if (isa<Constant>(GEP->getOperand(i)))
  5058. continue;
  5059. Type *IndxTy = GEP->getOperand(i)->getType();
  5060. if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
  5061. IndxTy = IndexVTy->getElementType();
  5062. if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
  5063. !isa<SExtInst>(GEP->getOperand(i))) ||
  5064. ++NumOfVarIndices > 1)
  5065. return IndexSize; // 64
  5066. }
  5067. return (unsigned)32;
  5068. };
  5069. // Trying to reduce IndexSize to 32 bits for vector 16.
  5070. // By default the IndexSize is equal to pointer size.
  5071. unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
  5072. ? getIndexSizeInBits(Ptr, DL)
  5073. : DL.getPointerSizeInBits();
  5074. auto *IndexVTy = FixedVectorType::get(
  5075. IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
  5076. std::pair<InstructionCost, MVT> IdxsLT = getTypeLegalizationCost(IndexVTy);
  5077. std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(SrcVTy);
  5078. InstructionCost::CostType SplitFactor =
  5079. *std::max(IdxsLT.first, SrcLT.first).getValue();
  5080. if (SplitFactor > 1) {
  5081. // Handle splitting of vector of pointers
  5082. auto *SplitSrcTy =
  5083. FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
  5084. return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
  5085. AddressSpace);
  5086. }
  5087. // The gather / scatter cost is given by Intel architects. It is a rough
  5088. // number since we are looking at one instruction in a time.
  5089. const int GSOverhead = (Opcode == Instruction::Load)
  5090. ? getGatherOverhead()
  5091. : getScatterOverhead();
  5092. return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
  5093. MaybeAlign(Alignment), AddressSpace,
  5094. TTI::TCK_RecipThroughput);
  5095. }
  5096. /// Return the cost of full scalarization of gather / scatter operation.
  5097. ///
  5098. /// Opcode - Load or Store instruction.
  5099. /// SrcVTy - The type of the data vector that should be gathered or scattered.
  5100. /// VariableMask - The mask is non-constant at compile time.
  5101. /// Alignment - Alignment for one element.
  5102. /// AddressSpace - pointer[s] address space.
  5103. ///
  5104. /// FIXME: Add TargetCostKind support.
  5105. InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
  5106. bool VariableMask, Align Alignment,
  5107. unsigned AddressSpace) {
  5108. Type *ScalarTy = SrcVTy->getScalarType();
  5109. unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
  5110. APInt DemandedElts = APInt::getAllOnes(VF);
  5111. TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  5112. InstructionCost MaskUnpackCost = 0;
  5113. if (VariableMask) {
  5114. auto *MaskTy =
  5115. FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
  5116. MaskUnpackCost = getScalarizationOverhead(
  5117. MaskTy, DemandedElts, /*Insert=*/false, /*Extract=*/true, CostKind);
  5118. InstructionCost ScalarCompareCost = getCmpSelInstrCost(
  5119. Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr,
  5120. CmpInst::BAD_ICMP_PREDICATE, CostKind);
  5121. InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
  5122. MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
  5123. }
  5124. InstructionCost AddressUnpackCost = getScalarizationOverhead(
  5125. FixedVectorType::get(ScalarTy->getPointerTo(), VF), DemandedElts,
  5126. /*Insert=*/false, /*Extract=*/true, CostKind);
  5127. // The cost of the scalar loads/stores.
  5128. InstructionCost MemoryOpCost =
  5129. VF * getMemoryOpCost(Opcode, ScalarTy, MaybeAlign(Alignment),
  5130. AddressSpace, CostKind);
  5131. // The cost of forming the vector from loaded scalars/
  5132. // scalarizing the vector to perform scalar stores.
  5133. InstructionCost InsertExtractCost = getScalarizationOverhead(
  5134. cast<FixedVectorType>(SrcVTy), DemandedElts,
  5135. /*Insert=*/Opcode == Instruction::Load,
  5136. /*Extract=*/Opcode == Instruction::Store, CostKind);
  5137. return AddressUnpackCost + MemoryOpCost + MaskUnpackCost + InsertExtractCost;
  5138. }
  5139. /// Calculate the cost of Gather / Scatter operation
  5140. InstructionCost X86TTIImpl::getGatherScatterOpCost(
  5141. unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask,
  5142. Align Alignment, TTI::TargetCostKind CostKind,
  5143. const Instruction *I = nullptr) {
  5144. if (CostKind != TTI::TCK_RecipThroughput) {
  5145. if ((Opcode == Instruction::Load &&
  5146. isLegalMaskedGather(SrcVTy, Align(Alignment)) &&
  5147. !forceScalarizeMaskedGather(cast<VectorType>(SrcVTy),
  5148. Align(Alignment))) ||
  5149. (Opcode == Instruction::Store &&
  5150. isLegalMaskedScatter(SrcVTy, Align(Alignment)) &&
  5151. !forceScalarizeMaskedScatter(cast<VectorType>(SrcVTy),
  5152. Align(Alignment))))
  5153. return 1;
  5154. return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask,
  5155. Alignment, CostKind, I);
  5156. }
  5157. assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
  5158. PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
  5159. if (!PtrTy && Ptr->getType()->isVectorTy())
  5160. PtrTy = dyn_cast<PointerType>(
  5161. cast<VectorType>(Ptr->getType())->getElementType());
  5162. assert(PtrTy && "Unexpected type for Ptr argument");
  5163. unsigned AddressSpace = PtrTy->getAddressSpace();
  5164. if ((Opcode == Instruction::Load &&
  5165. (!isLegalMaskedGather(SrcVTy, Align(Alignment)) ||
  5166. forceScalarizeMaskedGather(cast<VectorType>(SrcVTy),
  5167. Align(Alignment)))) ||
  5168. (Opcode == Instruction::Store &&
  5169. (!isLegalMaskedScatter(SrcVTy, Align(Alignment)) ||
  5170. forceScalarizeMaskedScatter(cast<VectorType>(SrcVTy),
  5171. Align(Alignment)))))
  5172. return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
  5173. AddressSpace);
  5174. return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
  5175. }
  5176. bool X86TTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
  5177. const TargetTransformInfo::LSRCost &C2) {
  5178. // X86 specific here are "instruction number 1st priority".
  5179. return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
  5180. C1.NumIVMuls, C1.NumBaseAdds,
  5181. C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
  5182. std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
  5183. C2.NumIVMuls, C2.NumBaseAdds,
  5184. C2.ScaleCost, C2.ImmCost, C2.SetupCost);
  5185. }
  5186. bool X86TTIImpl::canMacroFuseCmp() {
  5187. return ST->hasMacroFusion() || ST->hasBranchFusion();
  5188. }
  5189. bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
  5190. if (!ST->hasAVX())
  5191. return false;
  5192. // The backend can't handle a single element vector.
  5193. if (isa<VectorType>(DataTy) &&
  5194. cast<FixedVectorType>(DataTy)->getNumElements() == 1)
  5195. return false;
  5196. Type *ScalarTy = DataTy->getScalarType();
  5197. if (ScalarTy->isPointerTy())
  5198. return true;
  5199. if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
  5200. return true;
  5201. if (ScalarTy->isHalfTy() && ST->hasBWI())
  5202. return true;
  5203. if (!ScalarTy->isIntegerTy())
  5204. return false;
  5205. unsigned IntWidth = ScalarTy->getIntegerBitWidth();
  5206. return IntWidth == 32 || IntWidth == 64 ||
  5207. ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
  5208. }
  5209. bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
  5210. return isLegalMaskedLoad(DataType, Alignment);
  5211. }
  5212. bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
  5213. unsigned DataSize = DL.getTypeStoreSize(DataType);
  5214. // The only supported nontemporal loads are for aligned vectors of 16 or 32
  5215. // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
  5216. // (the equivalent stores only require AVX).
  5217. if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
  5218. return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2();
  5219. return false;
  5220. }
  5221. bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
  5222. unsigned DataSize = DL.getTypeStoreSize(DataType);
  5223. // SSE4A supports nontemporal stores of float and double at arbitrary
  5224. // alignment.
  5225. if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
  5226. return true;
  5227. // Besides the SSE4A subtarget exception above, only aligned stores are
  5228. // available nontemporaly on any other subtarget. And only stores with a size
  5229. // of 4..32 bytes (powers of 2, only) are permitted.
  5230. if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
  5231. !isPowerOf2_32(DataSize))
  5232. return false;
  5233. // 32-byte vector nontemporal stores are supported by AVX (the equivalent
  5234. // loads require AVX2).
  5235. if (DataSize == 32)
  5236. return ST->hasAVX();
  5237. if (DataSize == 16)
  5238. return ST->hasSSE1();
  5239. return true;
  5240. }
  5241. bool X86TTIImpl::isLegalBroadcastLoad(Type *ElementTy,
  5242. ElementCount NumElements) const {
  5243. // movddup
  5244. return ST->hasSSE3() && !NumElements.isScalable() &&
  5245. NumElements.getFixedValue() == 2 &&
  5246. ElementTy == Type::getDoubleTy(ElementTy->getContext());
  5247. }
  5248. bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
  5249. if (!isa<VectorType>(DataTy))
  5250. return false;
  5251. if (!ST->hasAVX512())
  5252. return false;
  5253. // The backend can't handle a single element vector.
  5254. if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
  5255. return false;
  5256. Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
  5257. if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
  5258. return true;
  5259. if (!ScalarTy->isIntegerTy())
  5260. return false;
  5261. unsigned IntWidth = ScalarTy->getIntegerBitWidth();
  5262. return IntWidth == 32 || IntWidth == 64 ||
  5263. ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
  5264. }
  5265. bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
  5266. return isLegalMaskedExpandLoad(DataTy);
  5267. }
  5268. bool X86TTIImpl::supportsGather() const {
  5269. // Some CPUs have better gather performance than others.
  5270. // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
  5271. // enable gather with a -march.
  5272. return ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2());
  5273. }
  5274. bool X86TTIImpl::forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) {
  5275. // Gather / Scatter for vector 2 is not profitable on KNL / SKX
  5276. // Vector-4 of gather/scatter instruction does not exist on KNL. We can extend
  5277. // it to 8 elements, but zeroing upper bits of the mask vector will add more
  5278. // instructions. Right now we give the scalar cost of vector-4 for KNL. TODO:
  5279. // Check, maybe the gather/scatter instruction is better in the VariableMask
  5280. // case.
  5281. unsigned NumElts = cast<FixedVectorType>(VTy)->getNumElements();
  5282. return NumElts == 1 ||
  5283. (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX())));
  5284. }
  5285. bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
  5286. if (!supportsGather())
  5287. return false;
  5288. Type *ScalarTy = DataTy->getScalarType();
  5289. if (ScalarTy->isPointerTy())
  5290. return true;
  5291. if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
  5292. return true;
  5293. if (!ScalarTy->isIntegerTy())
  5294. return false;
  5295. unsigned IntWidth = ScalarTy->getIntegerBitWidth();
  5296. return IntWidth == 32 || IntWidth == 64;
  5297. }
  5298. bool X86TTIImpl::isLegalAltInstr(VectorType *VecTy, unsigned Opcode0,
  5299. unsigned Opcode1,
  5300. const SmallBitVector &OpcodeMask) const {
  5301. // ADDSUBPS 4xf32 SSE3
  5302. // VADDSUBPS 4xf32 AVX
  5303. // VADDSUBPS 8xf32 AVX2
  5304. // ADDSUBPD 2xf64 SSE3
  5305. // VADDSUBPD 2xf64 AVX
  5306. // VADDSUBPD 4xf64 AVX2
  5307. unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
  5308. assert(OpcodeMask.size() == NumElements && "Mask and VecTy are incompatible");
  5309. if (!isPowerOf2_32(NumElements))
  5310. return false;
  5311. // Check the opcode pattern. We apply the mask on the opcode arguments and
  5312. // then check if it is what we expect.
  5313. for (int Lane : seq<int>(0, NumElements)) {
  5314. unsigned Opc = OpcodeMask.test(Lane) ? Opcode1 : Opcode0;
  5315. // We expect FSub for even lanes and FAdd for odd lanes.
  5316. if (Lane % 2 == 0 && Opc != Instruction::FSub)
  5317. return false;
  5318. if (Lane % 2 == 1 && Opc != Instruction::FAdd)
  5319. return false;
  5320. }
  5321. // Now check that the pattern is supported by the target ISA.
  5322. Type *ElemTy = cast<VectorType>(VecTy)->getElementType();
  5323. if (ElemTy->isFloatTy())
  5324. return ST->hasSSE3() && NumElements % 4 == 0;
  5325. if (ElemTy->isDoubleTy())
  5326. return ST->hasSSE3() && NumElements % 2 == 0;
  5327. return false;
  5328. }
  5329. bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
  5330. // AVX2 doesn't support scatter
  5331. if (!ST->hasAVX512())
  5332. return false;
  5333. return isLegalMaskedGather(DataType, Alignment);
  5334. }
  5335. bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
  5336. EVT VT = TLI->getValueType(DL, DataType);
  5337. return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
  5338. }
  5339. bool X86TTIImpl::isExpensiveToSpeculativelyExecute(const Instruction* I) {
  5340. // FDIV is always expensive, even if it has a very low uop count.
  5341. // TODO: Still necessary for recent CPUs with low latency/throughput fdiv?
  5342. if (I->getOpcode() == Instruction::FDiv)
  5343. return true;
  5344. return BaseT::isExpensiveToSpeculativelyExecute(I);
  5345. }
  5346. bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
  5347. return false;
  5348. }
  5349. bool X86TTIImpl::areInlineCompatible(const Function *Caller,
  5350. const Function *Callee) const {
  5351. const TargetMachine &TM = getTLI()->getTargetMachine();
  5352. // Work this as a subsetting of subtarget features.
  5353. const FeatureBitset &CallerBits =
  5354. TM.getSubtargetImpl(*Caller)->getFeatureBits();
  5355. const FeatureBitset &CalleeBits =
  5356. TM.getSubtargetImpl(*Callee)->getFeatureBits();
  5357. // Check whether features are the same (apart from the ignore list).
  5358. FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
  5359. FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
  5360. if (RealCallerBits == RealCalleeBits)
  5361. return true;
  5362. // If the features are a subset, we need to additionally check for calls
  5363. // that may become ABI-incompatible as a result of inlining.
  5364. if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
  5365. return false;
  5366. for (const Instruction &I : instructions(Callee)) {
  5367. if (const auto *CB = dyn_cast<CallBase>(&I)) {
  5368. SmallVector<Type *, 8> Types;
  5369. for (Value *Arg : CB->args())
  5370. Types.push_back(Arg->getType());
  5371. if (!CB->getType()->isVoidTy())
  5372. Types.push_back(CB->getType());
  5373. // Simple types are always ABI compatible.
  5374. auto IsSimpleTy = [](Type *Ty) {
  5375. return !Ty->isVectorTy() && !Ty->isAggregateType();
  5376. };
  5377. if (all_of(Types, IsSimpleTy))
  5378. continue;
  5379. if (Function *NestedCallee = CB->getCalledFunction()) {
  5380. // Assume that intrinsics are always ABI compatible.
  5381. if (NestedCallee->isIntrinsic())
  5382. continue;
  5383. // Do a precise compatibility check.
  5384. if (!areTypesABICompatible(Caller, NestedCallee, Types))
  5385. return false;
  5386. } else {
  5387. // We don't know the target features of the callee,
  5388. // assume it is incompatible.
  5389. return false;
  5390. }
  5391. }
  5392. }
  5393. return true;
  5394. }
  5395. bool X86TTIImpl::areTypesABICompatible(const Function *Caller,
  5396. const Function *Callee,
  5397. const ArrayRef<Type *> &Types) const {
  5398. if (!BaseT::areTypesABICompatible(Caller, Callee, Types))
  5399. return false;
  5400. // If we get here, we know the target features match. If one function
  5401. // considers 512-bit vectors legal and the other does not, consider them
  5402. // incompatible.
  5403. const TargetMachine &TM = getTLI()->getTargetMachine();
  5404. if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
  5405. TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
  5406. return true;
  5407. // Consider the arguments compatible if they aren't vectors or aggregates.
  5408. // FIXME: Look at the size of vectors.
  5409. // FIXME: Look at the element types of aggregates to see if there are vectors.
  5410. return llvm::none_of(Types,
  5411. [](Type *T) { return T->isVectorTy() || T->isAggregateType(); });
  5412. }
  5413. X86TTIImpl::TTI::MemCmpExpansionOptions
  5414. X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
  5415. TTI::MemCmpExpansionOptions Options;
  5416. Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
  5417. Options.NumLoadsPerBlock = 2;
  5418. // All GPR and vector loads can be unaligned.
  5419. Options.AllowOverlappingLoads = true;
  5420. if (IsZeroCmp) {
  5421. // Only enable vector loads for equality comparison. Right now the vector
  5422. // version is not as fast for three way compare (see #33329).
  5423. const unsigned PreferredWidth = ST->getPreferVectorWidth();
  5424. if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64);
  5425. if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
  5426. if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
  5427. }
  5428. if (ST->is64Bit()) {
  5429. Options.LoadSizes.push_back(8);
  5430. }
  5431. Options.LoadSizes.push_back(4);
  5432. Options.LoadSizes.push_back(2);
  5433. Options.LoadSizes.push_back(1);
  5434. return Options;
  5435. }
  5436. bool X86TTIImpl::prefersVectorizedAddressing() const {
  5437. return supportsGather();
  5438. }
  5439. bool X86TTIImpl::supportsEfficientVectorElementLoadStore() const {
  5440. return false;
  5441. }
  5442. bool X86TTIImpl::enableInterleavedAccessVectorization() {
  5443. // TODO: We expect this to be beneficial regardless of arch,
  5444. // but there are currently some unexplained performance artifacts on Atom.
  5445. // As a temporary solution, disable on Atom.
  5446. return !(ST->isAtom());
  5447. }
  5448. // Get estimation for interleaved load/store operations and strided load.
  5449. // \p Indices contains indices for strided load.
  5450. // \p Factor - the factor of interleaving.
  5451. // AVX-512 provides 3-src shuffles that significantly reduces the cost.
  5452. InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
  5453. unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
  5454. ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
  5455. TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
  5456. // VecTy for interleave memop is <VF*Factor x Elt>.
  5457. // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
  5458. // VecTy = <12 x i32>.
  5459. // Calculate the number of memory operations (NumOfMemOps), required
  5460. // for load/store the VecTy.
  5461. MVT LegalVT = getTypeLegalizationCost(VecTy).second;
  5462. unsigned VecTySize = DL.getTypeStoreSize(VecTy);
  5463. unsigned LegalVTSize = LegalVT.getStoreSize();
  5464. unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
  5465. // Get the cost of one memory operation.
  5466. auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
  5467. LegalVT.getVectorNumElements());
  5468. InstructionCost MemOpCost;
  5469. bool UseMaskedMemOp = UseMaskForCond || UseMaskForGaps;
  5470. if (UseMaskedMemOp)
  5471. MemOpCost = getMaskedMemoryOpCost(Opcode, SingleMemOpTy, Alignment,
  5472. AddressSpace, CostKind);
  5473. else
  5474. MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, MaybeAlign(Alignment),
  5475. AddressSpace, CostKind);
  5476. unsigned VF = VecTy->getNumElements() / Factor;
  5477. MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
  5478. InstructionCost MaskCost;
  5479. if (UseMaskedMemOp) {
  5480. APInt DemandedLoadStoreElts = APInt::getZero(VecTy->getNumElements());
  5481. for (unsigned Index : Indices) {
  5482. assert(Index < Factor && "Invalid index for interleaved memory op");
  5483. for (unsigned Elm = 0; Elm < VF; Elm++)
  5484. DemandedLoadStoreElts.setBit(Index + Elm * Factor);
  5485. }
  5486. Type *I1Type = Type::getInt1Ty(VecTy->getContext());
  5487. MaskCost = getReplicationShuffleCost(
  5488. I1Type, Factor, VF,
  5489. UseMaskForGaps ? DemandedLoadStoreElts
  5490. : APInt::getAllOnes(VecTy->getNumElements()),
  5491. CostKind);
  5492. // The Gaps mask is invariant and created outside the loop, therefore the
  5493. // cost of creating it is not accounted for here. However if we have both
  5494. // a MaskForGaps and some other mask that guards the execution of the
  5495. // memory access, we need to account for the cost of And-ing the two masks
  5496. // inside the loop.
  5497. if (UseMaskForGaps) {
  5498. auto *MaskVT = FixedVectorType::get(I1Type, VecTy->getNumElements());
  5499. MaskCost += getArithmeticInstrCost(BinaryOperator::And, MaskVT, CostKind);
  5500. }
  5501. }
  5502. if (Opcode == Instruction::Load) {
  5503. // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
  5504. // contain the cost of the optimized shuffle sequence that the
  5505. // X86InterleavedAccess pass will generate.
  5506. // The cost of loads and stores are computed separately from the table.
  5507. // X86InterleavedAccess support only the following interleaved-access group.
  5508. static const CostTblEntry AVX512InterleavedLoadTbl[] = {
  5509. {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
  5510. {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
  5511. {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
  5512. };
  5513. if (const auto *Entry =
  5514. CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
  5515. return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost;
  5516. //If an entry does not exist, fallback to the default implementation.
  5517. // Kind of shuffle depends on number of loaded values.
  5518. // If we load the entire data in one register, we can use a 1-src shuffle.
  5519. // Otherwise, we'll merge 2 sources in each operation.
  5520. TTI::ShuffleKind ShuffleKind =
  5521. (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
  5522. InstructionCost ShuffleCost = getShuffleCost(
  5523. ShuffleKind, SingleMemOpTy, std::nullopt, CostKind, 0, nullptr);
  5524. unsigned NumOfLoadsInInterleaveGrp =
  5525. Indices.size() ? Indices.size() : Factor;
  5526. auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
  5527. VecTy->getNumElements() / Factor);
  5528. InstructionCost NumOfResults =
  5529. getTypeLegalizationCost(ResultTy).first * NumOfLoadsInInterleaveGrp;
  5530. // About a half of the loads may be folded in shuffles when we have only
  5531. // one result. If we have more than one result, or the loads are masked,
  5532. // we do not fold loads at all.
  5533. unsigned NumOfUnfoldedLoads =
  5534. UseMaskedMemOp || NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
  5535. // Get a number of shuffle operations per result.
  5536. unsigned NumOfShufflesPerResult =
  5537. std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
  5538. // The SK_MergeTwoSrc shuffle clobbers one of src operands.
  5539. // When we have more than one destination, we need additional instructions
  5540. // to keep sources.
  5541. InstructionCost NumOfMoves = 0;
  5542. if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
  5543. NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
  5544. InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
  5545. MaskCost + NumOfUnfoldedLoads * MemOpCost +
  5546. NumOfMoves;
  5547. return Cost;
  5548. }
  5549. // Store.
  5550. assert(Opcode == Instruction::Store &&
  5551. "Expected Store Instruction at this point");
  5552. // X86InterleavedAccess support only the following interleaved-access group.
  5553. static const CostTblEntry AVX512InterleavedStoreTbl[] = {
  5554. {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
  5555. {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
  5556. {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
  5557. {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
  5558. {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store)
  5559. {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
  5560. {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store)
  5561. };
  5562. if (const auto *Entry =
  5563. CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
  5564. return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost;
  5565. //If an entry does not exist, fallback to the default implementation.
  5566. // There is no strided stores meanwhile. And store can't be folded in
  5567. // shuffle.
  5568. unsigned NumOfSources = Factor; // The number of values to be merged.
  5569. InstructionCost ShuffleCost = getShuffleCost(
  5570. TTI::SK_PermuteTwoSrc, SingleMemOpTy, std::nullopt, CostKind, 0, nullptr);
  5571. unsigned NumOfShufflesPerStore = NumOfSources - 1;
  5572. // The SK_MergeTwoSrc shuffle clobbers one of src operands.
  5573. // We need additional instructions to keep sources.
  5574. unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
  5575. InstructionCost Cost =
  5576. MaskCost +
  5577. NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
  5578. NumOfMoves;
  5579. return Cost;
  5580. }
  5581. InstructionCost X86TTIImpl::getInterleavedMemoryOpCost(
  5582. unsigned Opcode, Type *BaseTy, unsigned Factor, ArrayRef<unsigned> Indices,
  5583. Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
  5584. bool UseMaskForCond, bool UseMaskForGaps) {
  5585. auto *VecTy = cast<FixedVectorType>(BaseTy);
  5586. auto isSupportedOnAVX512 = [&](Type *VecTy, bool HasBW) {
  5587. Type *EltTy = cast<VectorType>(VecTy)->getElementType();
  5588. if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
  5589. EltTy->isIntegerTy(32) || EltTy->isPointerTy())
  5590. return true;
  5591. if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8) || EltTy->isHalfTy())
  5592. return HasBW;
  5593. return false;
  5594. };
  5595. if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
  5596. return getInterleavedMemoryOpCostAVX512(
  5597. Opcode, VecTy, Factor, Indices, Alignment,
  5598. AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
  5599. if (UseMaskForCond || UseMaskForGaps)
  5600. return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
  5601. Alignment, AddressSpace, CostKind,
  5602. UseMaskForCond, UseMaskForGaps);
  5603. // Get estimation for interleaved load/store operations for SSE-AVX2.
  5604. // As opposed to AVX-512, SSE-AVX2 do not have generic shuffles that allow
  5605. // computing the cost using a generic formula as a function of generic
  5606. // shuffles. We therefore use a lookup table instead, filled according to
  5607. // the instruction sequences that codegen currently generates.
  5608. // VecTy for interleave memop is <VF*Factor x Elt>.
  5609. // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
  5610. // VecTy = <12 x i32>.
  5611. MVT LegalVT = getTypeLegalizationCost(VecTy).second;
  5612. // This function can be called with VecTy=<6xi128>, Factor=3, in which case
  5613. // the VF=2, while v2i128 is an unsupported MVT vector type
  5614. // (see MachineValueType.h::getVectorVT()).
  5615. if (!LegalVT.isVector())
  5616. return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
  5617. Alignment, AddressSpace, CostKind);
  5618. unsigned VF = VecTy->getNumElements() / Factor;
  5619. Type *ScalarTy = VecTy->getElementType();
  5620. // Deduplicate entries, model floats/pointers as appropriately-sized integers.
  5621. if (!ScalarTy->isIntegerTy())
  5622. ScalarTy =
  5623. Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy));
  5624. // Get the cost of all the memory operations.
  5625. // FIXME: discount dead loads.
  5626. InstructionCost MemOpCosts = getMemoryOpCost(
  5627. Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind);
  5628. auto *VT = FixedVectorType::get(ScalarTy, VF);
  5629. EVT ETy = TLI->getValueType(DL, VT);
  5630. if (!ETy.isSimple())
  5631. return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
  5632. Alignment, AddressSpace, CostKind);
  5633. // TODO: Complete for other data-types and strides.
  5634. // Each combination of Stride, element bit width and VF results in a different
  5635. // sequence; The cost tables are therefore accessed with:
  5636. // Factor (stride) and VectorType=VFxiN.
  5637. // The Cost accounts only for the shuffle sequence;
  5638. // The cost of the loads/stores is accounted for separately.
  5639. //
  5640. static const CostTblEntry AVX2InterleavedLoadTbl[] = {
  5641. {2, MVT::v2i8, 2}, // (load 4i8 and) deinterleave into 2 x 2i8
  5642. {2, MVT::v4i8, 2}, // (load 8i8 and) deinterleave into 2 x 4i8
  5643. {2, MVT::v8i8, 2}, // (load 16i8 and) deinterleave into 2 x 8i8
  5644. {2, MVT::v16i8, 4}, // (load 32i8 and) deinterleave into 2 x 16i8
  5645. {2, MVT::v32i8, 6}, // (load 64i8 and) deinterleave into 2 x 32i8
  5646. {2, MVT::v8i16, 6}, // (load 16i16 and) deinterleave into 2 x 8i16
  5647. {2, MVT::v16i16, 9}, // (load 32i16 and) deinterleave into 2 x 16i16
  5648. {2, MVT::v32i16, 18}, // (load 64i16 and) deinterleave into 2 x 32i16
  5649. {2, MVT::v8i32, 4}, // (load 16i32 and) deinterleave into 2 x 8i32
  5650. {2, MVT::v16i32, 8}, // (load 32i32 and) deinterleave into 2 x 16i32
  5651. {2, MVT::v32i32, 16}, // (load 64i32 and) deinterleave into 2 x 32i32
  5652. {2, MVT::v4i64, 4}, // (load 8i64 and) deinterleave into 2 x 4i64
  5653. {2, MVT::v8i64, 8}, // (load 16i64 and) deinterleave into 2 x 8i64
  5654. {2, MVT::v16i64, 16}, // (load 32i64 and) deinterleave into 2 x 16i64
  5655. {2, MVT::v32i64, 32}, // (load 64i64 and) deinterleave into 2 x 32i64
  5656. {3, MVT::v2i8, 3}, // (load 6i8 and) deinterleave into 3 x 2i8
  5657. {3, MVT::v4i8, 3}, // (load 12i8 and) deinterleave into 3 x 4i8
  5658. {3, MVT::v8i8, 6}, // (load 24i8 and) deinterleave into 3 x 8i8
  5659. {3, MVT::v16i8, 11}, // (load 48i8 and) deinterleave into 3 x 16i8
  5660. {3, MVT::v32i8, 14}, // (load 96i8 and) deinterleave into 3 x 32i8
  5661. {3, MVT::v2i16, 5}, // (load 6i16 and) deinterleave into 3 x 2i16
  5662. {3, MVT::v4i16, 7}, // (load 12i16 and) deinterleave into 3 x 4i16
  5663. {3, MVT::v8i16, 9}, // (load 24i16 and) deinterleave into 3 x 8i16
  5664. {3, MVT::v16i16, 28}, // (load 48i16 and) deinterleave into 3 x 16i16
  5665. {3, MVT::v32i16, 56}, // (load 96i16 and) deinterleave into 3 x 32i16
  5666. {3, MVT::v2i32, 3}, // (load 6i32 and) deinterleave into 3 x 2i32
  5667. {3, MVT::v4i32, 3}, // (load 12i32 and) deinterleave into 3 x 4i32
  5668. {3, MVT::v8i32, 7}, // (load 24i32 and) deinterleave into 3 x 8i32
  5669. {3, MVT::v16i32, 14}, // (load 48i32 and) deinterleave into 3 x 16i32
  5670. {3, MVT::v32i32, 32}, // (load 96i32 and) deinterleave into 3 x 32i32
  5671. {3, MVT::v2i64, 1}, // (load 6i64 and) deinterleave into 3 x 2i64
  5672. {3, MVT::v4i64, 5}, // (load 12i64 and) deinterleave into 3 x 4i64
  5673. {3, MVT::v8i64, 10}, // (load 24i64 and) deinterleave into 3 x 8i64
  5674. {3, MVT::v16i64, 20}, // (load 48i64 and) deinterleave into 3 x 16i64
  5675. {4, MVT::v2i8, 4}, // (load 8i8 and) deinterleave into 4 x 2i8
  5676. {4, MVT::v4i8, 4}, // (load 16i8 and) deinterleave into 4 x 4i8
  5677. {4, MVT::v8i8, 12}, // (load 32i8 and) deinterleave into 4 x 8i8
  5678. {4, MVT::v16i8, 24}, // (load 64i8 and) deinterleave into 4 x 16i8
  5679. {4, MVT::v32i8, 56}, // (load 128i8 and) deinterleave into 4 x 32i8
  5680. {4, MVT::v2i16, 6}, // (load 8i16 and) deinterleave into 4 x 2i16
  5681. {4, MVT::v4i16, 17}, // (load 16i16 and) deinterleave into 4 x 4i16
  5682. {4, MVT::v8i16, 33}, // (load 32i16 and) deinterleave into 4 x 8i16
  5683. {4, MVT::v16i16, 75}, // (load 64i16 and) deinterleave into 4 x 16i16
  5684. {4, MVT::v32i16, 150}, // (load 128i16 and) deinterleave into 4 x 32i16
  5685. {4, MVT::v2i32, 4}, // (load 8i32 and) deinterleave into 4 x 2i32
  5686. {4, MVT::v4i32, 8}, // (load 16i32 and) deinterleave into 4 x 4i32
  5687. {4, MVT::v8i32, 16}, // (load 32i32 and) deinterleave into 4 x 8i32
  5688. {4, MVT::v16i32, 32}, // (load 64i32 and) deinterleave into 4 x 16i32
  5689. {4, MVT::v32i32, 68}, // (load 128i32 and) deinterleave into 4 x 32i32
  5690. {4, MVT::v2i64, 6}, // (load 8i64 and) deinterleave into 4 x 2i64
  5691. {4, MVT::v4i64, 8}, // (load 16i64 and) deinterleave into 4 x 4i64
  5692. {4, MVT::v8i64, 20}, // (load 32i64 and) deinterleave into 4 x 8i64
  5693. {4, MVT::v16i64, 40}, // (load 64i64 and) deinterleave into 4 x 16i64
  5694. {6, MVT::v2i8, 6}, // (load 12i8 and) deinterleave into 6 x 2i8
  5695. {6, MVT::v4i8, 14}, // (load 24i8 and) deinterleave into 6 x 4i8
  5696. {6, MVT::v8i8, 18}, // (load 48i8 and) deinterleave into 6 x 8i8
  5697. {6, MVT::v16i8, 43}, // (load 96i8 and) deinterleave into 6 x 16i8
  5698. {6, MVT::v32i8, 82}, // (load 192i8 and) deinterleave into 6 x 32i8
  5699. {6, MVT::v2i16, 13}, // (load 12i16 and) deinterleave into 6 x 2i16
  5700. {6, MVT::v4i16, 9}, // (load 24i16 and) deinterleave into 6 x 4i16
  5701. {6, MVT::v8i16, 39}, // (load 48i16 and) deinterleave into 6 x 8i16
  5702. {6, MVT::v16i16, 106}, // (load 96i16 and) deinterleave into 6 x 16i16
  5703. {6, MVT::v32i16, 212}, // (load 192i16 and) deinterleave into 6 x 32i16
  5704. {6, MVT::v2i32, 6}, // (load 12i32 and) deinterleave into 6 x 2i32
  5705. {6, MVT::v4i32, 15}, // (load 24i32 and) deinterleave into 6 x 4i32
  5706. {6, MVT::v8i32, 31}, // (load 48i32 and) deinterleave into 6 x 8i32
  5707. {6, MVT::v16i32, 64}, // (load 96i32 and) deinterleave into 6 x 16i32
  5708. {6, MVT::v2i64, 6}, // (load 12i64 and) deinterleave into 6 x 2i64
  5709. {6, MVT::v4i64, 18}, // (load 24i64 and) deinterleave into 6 x 4i64
  5710. {6, MVT::v8i64, 36}, // (load 48i64 and) deinterleave into 6 x 8i64
  5711. {8, MVT::v8i32, 40} // (load 64i32 and) deinterleave into 8 x 8i32
  5712. };
  5713. static const CostTblEntry SSSE3InterleavedLoadTbl[] = {
  5714. {2, MVT::v4i16, 2}, // (load 8i16 and) deinterleave into 2 x 4i16
  5715. };
  5716. static const CostTblEntry SSE2InterleavedLoadTbl[] = {
  5717. {2, MVT::v2i16, 2}, // (load 4i16 and) deinterleave into 2 x 2i16
  5718. {2, MVT::v4i16, 7}, // (load 8i16 and) deinterleave into 2 x 4i16
  5719. {2, MVT::v2i32, 2}, // (load 4i32 and) deinterleave into 2 x 2i32
  5720. {2, MVT::v4i32, 2}, // (load 8i32 and) deinterleave into 2 x 4i32
  5721. {2, MVT::v2i64, 2}, // (load 4i64 and) deinterleave into 2 x 2i64
  5722. };
  5723. static const CostTblEntry AVX2InterleavedStoreTbl[] = {
  5724. {2, MVT::v16i8, 3}, // interleave 2 x 16i8 into 32i8 (and store)
  5725. {2, MVT::v32i8, 4}, // interleave 2 x 32i8 into 64i8 (and store)
  5726. {2, MVT::v8i16, 3}, // interleave 2 x 8i16 into 16i16 (and store)
  5727. {2, MVT::v16i16, 4}, // interleave 2 x 16i16 into 32i16 (and store)
  5728. {2, MVT::v32i16, 8}, // interleave 2 x 32i16 into 64i16 (and store)
  5729. {2, MVT::v4i32, 2}, // interleave 2 x 4i32 into 8i32 (and store)
  5730. {2, MVT::v8i32, 4}, // interleave 2 x 8i32 into 16i32 (and store)
  5731. {2, MVT::v16i32, 8}, // interleave 2 x 16i32 into 32i32 (and store)
  5732. {2, MVT::v32i32, 16}, // interleave 2 x 32i32 into 64i32 (and store)
  5733. {2, MVT::v2i64, 2}, // interleave 2 x 2i64 into 4i64 (and store)
  5734. {2, MVT::v4i64, 4}, // interleave 2 x 4i64 into 8i64 (and store)
  5735. {2, MVT::v8i64, 8}, // interleave 2 x 8i64 into 16i64 (and store)
  5736. {2, MVT::v16i64, 16}, // interleave 2 x 16i64 into 32i64 (and store)
  5737. {2, MVT::v32i64, 32}, // interleave 2 x 32i64 into 64i64 (and store)
  5738. {3, MVT::v2i8, 4}, // interleave 3 x 2i8 into 6i8 (and store)
  5739. {3, MVT::v4i8, 4}, // interleave 3 x 4i8 into 12i8 (and store)
  5740. {3, MVT::v8i8, 6}, // interleave 3 x 8i8 into 24i8 (and store)
  5741. {3, MVT::v16i8, 11}, // interleave 3 x 16i8 into 48i8 (and store)
  5742. {3, MVT::v32i8, 13}, // interleave 3 x 32i8 into 96i8 (and store)
  5743. {3, MVT::v2i16, 4}, // interleave 3 x 2i16 into 6i16 (and store)
  5744. {3, MVT::v4i16, 6}, // interleave 3 x 4i16 into 12i16 (and store)
  5745. {3, MVT::v8i16, 12}, // interleave 3 x 8i16 into 24i16 (and store)
  5746. {3, MVT::v16i16, 27}, // interleave 3 x 16i16 into 48i16 (and store)
  5747. {3, MVT::v32i16, 54}, // interleave 3 x 32i16 into 96i16 (and store)
  5748. {3, MVT::v2i32, 4}, // interleave 3 x 2i32 into 6i32 (and store)
  5749. {3, MVT::v4i32, 5}, // interleave 3 x 4i32 into 12i32 (and store)
  5750. {3, MVT::v8i32, 11}, // interleave 3 x 8i32 into 24i32 (and store)
  5751. {3, MVT::v16i32, 22}, // interleave 3 x 16i32 into 48i32 (and store)
  5752. {3, MVT::v32i32, 48}, // interleave 3 x 32i32 into 96i32 (and store)
  5753. {3, MVT::v2i64, 4}, // interleave 3 x 2i64 into 6i64 (and store)
  5754. {3, MVT::v4i64, 6}, // interleave 3 x 4i64 into 12i64 (and store)
  5755. {3, MVT::v8i64, 12}, // interleave 3 x 8i64 into 24i64 (and store)
  5756. {3, MVT::v16i64, 24}, // interleave 3 x 16i64 into 48i64 (and store)
  5757. {4, MVT::v2i8, 4}, // interleave 4 x 2i8 into 8i8 (and store)
  5758. {4, MVT::v4i8, 4}, // interleave 4 x 4i8 into 16i8 (and store)
  5759. {4, MVT::v8i8, 4}, // interleave 4 x 8i8 into 32i8 (and store)
  5760. {4, MVT::v16i8, 8}, // interleave 4 x 16i8 into 64i8 (and store)
  5761. {4, MVT::v32i8, 12}, // interleave 4 x 32i8 into 128i8 (and store)
  5762. {4, MVT::v2i16, 2}, // interleave 4 x 2i16 into 8i16 (and store)
  5763. {4, MVT::v4i16, 6}, // interleave 4 x 4i16 into 16i16 (and store)
  5764. {4, MVT::v8i16, 10}, // interleave 4 x 8i16 into 32i16 (and store)
  5765. {4, MVT::v16i16, 32}, // interleave 4 x 16i16 into 64i16 (and store)
  5766. {4, MVT::v32i16, 64}, // interleave 4 x 32i16 into 128i16 (and store)
  5767. {4, MVT::v2i32, 5}, // interleave 4 x 2i32 into 8i32 (and store)
  5768. {4, MVT::v4i32, 6}, // interleave 4 x 4i32 into 16i32 (and store)
  5769. {4, MVT::v8i32, 16}, // interleave 4 x 8i32 into 32i32 (and store)
  5770. {4, MVT::v16i32, 32}, // interleave 4 x 16i32 into 64i32 (and store)
  5771. {4, MVT::v32i32, 64}, // interleave 4 x 32i32 into 128i32 (and store)
  5772. {4, MVT::v2i64, 6}, // interleave 4 x 2i64 into 8i64 (and store)
  5773. {4, MVT::v4i64, 8}, // interleave 4 x 4i64 into 16i64 (and store)
  5774. {4, MVT::v8i64, 20}, // interleave 4 x 8i64 into 32i64 (and store)
  5775. {4, MVT::v16i64, 40}, // interleave 4 x 16i64 into 64i64 (and store)
  5776. {6, MVT::v2i8, 7}, // interleave 6 x 2i8 into 12i8 (and store)
  5777. {6, MVT::v4i8, 9}, // interleave 6 x 4i8 into 24i8 (and store)
  5778. {6, MVT::v8i8, 16}, // interleave 6 x 8i8 into 48i8 (and store)
  5779. {6, MVT::v16i8, 27}, // interleave 6 x 16i8 into 96i8 (and store)
  5780. {6, MVT::v32i8, 90}, // interleave 6 x 32i8 into 192i8 (and store)
  5781. {6, MVT::v2i16, 10}, // interleave 6 x 2i16 into 12i16 (and store)
  5782. {6, MVT::v4i16, 15}, // interleave 6 x 4i16 into 24i16 (and store)
  5783. {6, MVT::v8i16, 21}, // interleave 6 x 8i16 into 48i16 (and store)
  5784. {6, MVT::v16i16, 58}, // interleave 6 x 16i16 into 96i16 (and store)
  5785. {6, MVT::v32i16, 90}, // interleave 6 x 32i16 into 192i16 (and store)
  5786. {6, MVT::v2i32, 9}, // interleave 6 x 2i32 into 12i32 (and store)
  5787. {6, MVT::v4i32, 12}, // interleave 6 x 4i32 into 24i32 (and store)
  5788. {6, MVT::v8i32, 33}, // interleave 6 x 8i32 into 48i32 (and store)
  5789. {6, MVT::v16i32, 66}, // interleave 6 x 16i32 into 96i32 (and store)
  5790. {6, MVT::v2i64, 8}, // interleave 6 x 2i64 into 12i64 (and store)
  5791. {6, MVT::v4i64, 15}, // interleave 6 x 4i64 into 24i64 (and store)
  5792. {6, MVT::v8i64, 30}, // interleave 6 x 8i64 into 48i64 (and store)
  5793. };
  5794. static const CostTblEntry SSE2InterleavedStoreTbl[] = {
  5795. {2, MVT::v2i8, 1}, // interleave 2 x 2i8 into 4i8 (and store)
  5796. {2, MVT::v4i8, 1}, // interleave 2 x 4i8 into 8i8 (and store)
  5797. {2, MVT::v8i8, 1}, // interleave 2 x 8i8 into 16i8 (and store)
  5798. {2, MVT::v2i16, 1}, // interleave 2 x 2i16 into 4i16 (and store)
  5799. {2, MVT::v4i16, 1}, // interleave 2 x 4i16 into 8i16 (and store)
  5800. {2, MVT::v2i32, 1}, // interleave 2 x 2i32 into 4i32 (and store)
  5801. };
  5802. if (Opcode == Instruction::Load) {
  5803. auto GetDiscountedCost = [Factor, NumMembers = Indices.size(),
  5804. MemOpCosts](const CostTblEntry *Entry) {
  5805. // NOTE: this is just an approximation!
  5806. // It can over/under -estimate the cost!
  5807. return MemOpCosts + divideCeil(NumMembers * Entry->Cost, Factor);
  5808. };
  5809. if (ST->hasAVX2())
  5810. if (const auto *Entry = CostTableLookup(AVX2InterleavedLoadTbl, Factor,
  5811. ETy.getSimpleVT()))
  5812. return GetDiscountedCost(Entry);
  5813. if (ST->hasSSSE3())
  5814. if (const auto *Entry = CostTableLookup(SSSE3InterleavedLoadTbl, Factor,
  5815. ETy.getSimpleVT()))
  5816. return GetDiscountedCost(Entry);
  5817. if (ST->hasSSE2())
  5818. if (const auto *Entry = CostTableLookup(SSE2InterleavedLoadTbl, Factor,
  5819. ETy.getSimpleVT()))
  5820. return GetDiscountedCost(Entry);
  5821. } else {
  5822. assert(Opcode == Instruction::Store &&
  5823. "Expected Store Instruction at this point");
  5824. assert((!Indices.size() || Indices.size() == Factor) &&
  5825. "Interleaved store only supports fully-interleaved groups.");
  5826. if (ST->hasAVX2())
  5827. if (const auto *Entry = CostTableLookup(AVX2InterleavedStoreTbl, Factor,
  5828. ETy.getSimpleVT()))
  5829. return MemOpCosts + Entry->Cost;
  5830. if (ST->hasSSE2())
  5831. if (const auto *Entry = CostTableLookup(SSE2InterleavedStoreTbl, Factor,
  5832. ETy.getSimpleVT()))
  5833. return MemOpCosts + Entry->Cost;
  5834. }
  5835. return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
  5836. Alignment, AddressSpace, CostKind,
  5837. UseMaskForCond, UseMaskForGaps);
  5838. }
  5839. InstructionCost X86TTIImpl::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
  5840. int64_t BaseOffset,
  5841. bool HasBaseReg, int64_t Scale,
  5842. unsigned AddrSpace) const {
  5843. // Scaling factors are not free at all.
  5844. // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
  5845. // will take 2 allocations in the out of order engine instead of 1
  5846. // for plain addressing mode, i.e. inst (reg1).
  5847. // E.g.,
  5848. // vaddps (%rsi,%rdx), %ymm0, %ymm1
  5849. // Requires two allocations (one for the load, one for the computation)
  5850. // whereas:
  5851. // vaddps (%rsi), %ymm0, %ymm1
  5852. // Requires just 1 allocation, i.e., freeing allocations for other operations
  5853. // and having less micro operations to execute.
  5854. //
  5855. // For some X86 architectures, this is even worse because for instance for
  5856. // stores, the complex addressing mode forces the instruction to use the
  5857. // "load" ports instead of the dedicated "store" port.
  5858. // E.g., on Haswell:
  5859. // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
  5860. // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
  5861. TargetLoweringBase::AddrMode AM;
  5862. AM.BaseGV = BaseGV;
  5863. AM.BaseOffs = BaseOffset;
  5864. AM.HasBaseReg = HasBaseReg;
  5865. AM.Scale = Scale;
  5866. if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
  5867. // Scale represents reg2 * scale, thus account for 1
  5868. // as soon as we use a second register.
  5869. return AM.Scale != 0;
  5870. return -1;
  5871. }