LoopVectorize.cpp 445 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711
  1. //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
  10. // and generates target-independent LLVM-IR.
  11. // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
  12. // of instructions in order to estimate the profitability of vectorization.
  13. //
  14. // The loop vectorizer combines consecutive loop iterations into a single
  15. // 'wide' iteration. After this transformation the index is incremented
  16. // by the SIMD vector width, and not by one.
  17. //
  18. // This pass has three parts:
  19. // 1. The main loop pass that drives the different parts.
  20. // 2. LoopVectorizationLegality - A unit that checks for the legality
  21. // of the vectorization.
  22. // 3. InnerLoopVectorizer - A unit that performs the actual
  23. // widening of instructions.
  24. // 4. LoopVectorizationCostModel - A unit that checks for the profitability
  25. // of vectorization. It decides on the optimal vector width, which
  26. // can be one, if vectorization is not profitable.
  27. //
  28. // There is a development effort going on to migrate loop vectorizer to the
  29. // VPlan infrastructure and to introduce outer loop vectorization support (see
  30. // docs/Proposal/VectorizationPlan.rst and
  31. // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
  32. // purpose, we temporarily introduced the VPlan-native vectorization path: an
  33. // alternative vectorization path that is natively implemented on top of the
  34. // VPlan infrastructure. See EnableVPlanNativePath for enabling.
  35. //
  36. //===----------------------------------------------------------------------===//
  37. //
  38. // The reduction-variable vectorization is based on the paper:
  39. // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
  40. //
  41. // Variable uniformity checks are inspired by:
  42. // Karrenberg, R. and Hack, S. Whole Function Vectorization.
  43. //
  44. // The interleaved access vectorization is based on the paper:
  45. // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
  46. // Data for SIMD
  47. //
  48. // Other ideas/concepts are from:
  49. // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
  50. //
  51. // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
  52. // Vectorizing Compilers.
  53. //
  54. //===----------------------------------------------------------------------===//
  55. #include "llvm/Transforms/Vectorize/LoopVectorize.h"
  56. #include "LoopVectorizationPlanner.h"
  57. #include "VPRecipeBuilder.h"
  58. #include "VPlan.h"
  59. #include "VPlanHCFGBuilder.h"
  60. #include "VPlanTransforms.h"
  61. #include "llvm/ADT/APInt.h"
  62. #include "llvm/ADT/ArrayRef.h"
  63. #include "llvm/ADT/DenseMap.h"
  64. #include "llvm/ADT/DenseMapInfo.h"
  65. #include "llvm/ADT/Hashing.h"
  66. #include "llvm/ADT/MapVector.h"
  67. #include "llvm/ADT/STLExtras.h"
  68. #include "llvm/ADT/SmallPtrSet.h"
  69. #include "llvm/ADT/SmallSet.h"
  70. #include "llvm/ADT/SmallVector.h"
  71. #include "llvm/ADT/Statistic.h"
  72. #include "llvm/ADT/StringRef.h"
  73. #include "llvm/ADT/Twine.h"
  74. #include "llvm/ADT/iterator_range.h"
  75. #include "llvm/Analysis/AssumptionCache.h"
  76. #include "llvm/Analysis/BasicAliasAnalysis.h"
  77. #include "llvm/Analysis/BlockFrequencyInfo.h"
  78. #include "llvm/Analysis/CFG.h"
  79. #include "llvm/Analysis/CodeMetrics.h"
  80. #include "llvm/Analysis/DemandedBits.h"
  81. #include "llvm/Analysis/GlobalsModRef.h"
  82. #include "llvm/Analysis/LoopAccessAnalysis.h"
  83. #include "llvm/Analysis/LoopAnalysisManager.h"
  84. #include "llvm/Analysis/LoopInfo.h"
  85. #include "llvm/Analysis/LoopIterator.h"
  86. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  87. #include "llvm/Analysis/ProfileSummaryInfo.h"
  88. #include "llvm/Analysis/ScalarEvolution.h"
  89. #include "llvm/Analysis/ScalarEvolutionExpressions.h"
  90. #include "llvm/Analysis/TargetLibraryInfo.h"
  91. #include "llvm/Analysis/TargetTransformInfo.h"
  92. #include "llvm/Analysis/ValueTracking.h"
  93. #include "llvm/Analysis/VectorUtils.h"
  94. #include "llvm/IR/Attributes.h"
  95. #include "llvm/IR/BasicBlock.h"
  96. #include "llvm/IR/CFG.h"
  97. #include "llvm/IR/Constant.h"
  98. #include "llvm/IR/Constants.h"
  99. #include "llvm/IR/DataLayout.h"
  100. #include "llvm/IR/DebugInfoMetadata.h"
  101. #include "llvm/IR/DebugLoc.h"
  102. #include "llvm/IR/DerivedTypes.h"
  103. #include "llvm/IR/DiagnosticInfo.h"
  104. #include "llvm/IR/Dominators.h"
  105. #include "llvm/IR/Function.h"
  106. #include "llvm/IR/IRBuilder.h"
  107. #include "llvm/IR/InstrTypes.h"
  108. #include "llvm/IR/Instruction.h"
  109. #include "llvm/IR/Instructions.h"
  110. #include "llvm/IR/IntrinsicInst.h"
  111. #include "llvm/IR/Intrinsics.h"
  112. #include "llvm/IR/Metadata.h"
  113. #include "llvm/IR/Module.h"
  114. #include "llvm/IR/Operator.h"
  115. #include "llvm/IR/PatternMatch.h"
  116. #include "llvm/IR/Type.h"
  117. #include "llvm/IR/Use.h"
  118. #include "llvm/IR/User.h"
  119. #include "llvm/IR/Value.h"
  120. #include "llvm/IR/ValueHandle.h"
  121. #include "llvm/IR/Verifier.h"
  122. #include "llvm/InitializePasses.h"
  123. #include "llvm/Pass.h"
  124. #include "llvm/Support/Casting.h"
  125. #include "llvm/Support/CommandLine.h"
  126. #include "llvm/Support/Compiler.h"
  127. #include "llvm/Support/Debug.h"
  128. #include "llvm/Support/ErrorHandling.h"
  129. #include "llvm/Support/InstructionCost.h"
  130. #include "llvm/Support/MathExtras.h"
  131. #include "llvm/Support/raw_ostream.h"
  132. #include "llvm/Transforms/Utils/BasicBlockUtils.h"
  133. #include "llvm/Transforms/Utils/InjectTLIMappings.h"
  134. #include "llvm/Transforms/Utils/LoopSimplify.h"
  135. #include "llvm/Transforms/Utils/LoopUtils.h"
  136. #include "llvm/Transforms/Utils/LoopVersioning.h"
  137. #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
  138. #include "llvm/Transforms/Utils/SizeOpts.h"
  139. #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
  140. #include <algorithm>
  141. #include <cassert>
  142. #include <cmath>
  143. #include <cstdint>
  144. #include <functional>
  145. #include <iterator>
  146. #include <limits>
  147. #include <map>
  148. #include <memory>
  149. #include <string>
  150. #include <tuple>
  151. #include <utility>
  152. using namespace llvm;
  153. #define LV_NAME "loop-vectorize"
  154. #define DEBUG_TYPE LV_NAME
  155. #ifndef NDEBUG
  156. const char VerboseDebug[] = DEBUG_TYPE "-verbose";
  157. #endif
  158. /// @{
  159. /// Metadata attribute names
  160. const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
  161. const char LLVMLoopVectorizeFollowupVectorized[] =
  162. "llvm.loop.vectorize.followup_vectorized";
  163. const char LLVMLoopVectorizeFollowupEpilogue[] =
  164. "llvm.loop.vectorize.followup_epilogue";
  165. /// @}
  166. STATISTIC(LoopsVectorized, "Number of loops vectorized");
  167. STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
  168. STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
  169. static cl::opt<bool> EnableEpilogueVectorization(
  170. "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
  171. cl::desc("Enable vectorization of epilogue loops."));
  172. static cl::opt<unsigned> EpilogueVectorizationForceVF(
  173. "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
  174. cl::desc("When epilogue vectorization is enabled, and a value greater than "
  175. "1 is specified, forces the given VF for all applicable epilogue "
  176. "loops."));
  177. static cl::opt<unsigned> EpilogueVectorizationMinVF(
  178. "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
  179. cl::desc("Only loops with vectorization factor equal to or larger than "
  180. "the specified value are considered for epilogue vectorization."));
  181. /// Loops with a known constant trip count below this number are vectorized only
  182. /// if no scalar iteration overheads are incurred.
  183. static cl::opt<unsigned> TinyTripCountVectorThreshold(
  184. "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
  185. cl::desc("Loops with a constant trip count that is smaller than this "
  186. "value are vectorized only if no scalar iteration overheads "
  187. "are incurred."));
  188. static cl::opt<unsigned> VectorizeMemoryCheckThreshold(
  189. "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
  190. cl::desc("The maximum allowed number of runtime memory checks"));
  191. // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
  192. // that predication is preferred, and this lists all options. I.e., the
  193. // vectorizer will try to fold the tail-loop (epilogue) into the vector body
  194. // and predicate the instructions accordingly. If tail-folding fails, there are
  195. // different fallback strategies depending on these values:
  196. namespace PreferPredicateTy {
  197. enum Option {
  198. ScalarEpilogue = 0,
  199. PredicateElseScalarEpilogue,
  200. PredicateOrDontVectorize
  201. };
  202. } // namespace PreferPredicateTy
  203. static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
  204. "prefer-predicate-over-epilogue",
  205. cl::init(PreferPredicateTy::ScalarEpilogue),
  206. cl::Hidden,
  207. cl::desc("Tail-folding and predication preferences over creating a scalar "
  208. "epilogue loop."),
  209. cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
  210. "scalar-epilogue",
  211. "Don't tail-predicate loops, create scalar epilogue"),
  212. clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
  213. "predicate-else-scalar-epilogue",
  214. "prefer tail-folding, create scalar epilogue if tail "
  215. "folding fails."),
  216. clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
  217. "predicate-dont-vectorize",
  218. "prefers tail-folding, don't attempt vectorization if "
  219. "tail-folding fails.")));
  220. static cl::opt<bool> MaximizeBandwidth(
  221. "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
  222. cl::desc("Maximize bandwidth when selecting vectorization factor which "
  223. "will be determined by the smallest type in loop."));
  224. static cl::opt<bool> EnableInterleavedMemAccesses(
  225. "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
  226. cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
  227. /// An interleave-group may need masking if it resides in a block that needs
  228. /// predication, or in order to mask away gaps.
  229. static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
  230. "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
  231. cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
  232. static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
  233. "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
  234. cl::desc("We don't interleave loops with a estimated constant trip count "
  235. "below this number"));
  236. static cl::opt<unsigned> ForceTargetNumScalarRegs(
  237. "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
  238. cl::desc("A flag that overrides the target's number of scalar registers."));
  239. static cl::opt<unsigned> ForceTargetNumVectorRegs(
  240. "force-target-num-vector-regs", cl::init(0), cl::Hidden,
  241. cl::desc("A flag that overrides the target's number of vector registers."));
  242. static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
  243. "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
  244. cl::desc("A flag that overrides the target's max interleave factor for "
  245. "scalar loops."));
  246. static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
  247. "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
  248. cl::desc("A flag that overrides the target's max interleave factor for "
  249. "vectorized loops."));
  250. static cl::opt<unsigned> ForceTargetInstructionCost(
  251. "force-target-instruction-cost", cl::init(0), cl::Hidden,
  252. cl::desc("A flag that overrides the target's expected cost for "
  253. "an instruction to a single constant value. Mostly "
  254. "useful for getting consistent testing."));
  255. static cl::opt<bool> ForceTargetSupportsScalableVectors(
  256. "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
  257. cl::desc(
  258. "Pretend that scalable vectors are supported, even if the target does "
  259. "not support them. This flag should only be used for testing."));
  260. static cl::opt<unsigned> SmallLoopCost(
  261. "small-loop-cost", cl::init(20), cl::Hidden,
  262. cl::desc(
  263. "The cost of a loop that is considered 'small' by the interleaver."));
  264. static cl::opt<bool> LoopVectorizeWithBlockFrequency(
  265. "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
  266. cl::desc("Enable the use of the block frequency analysis to access PGO "
  267. "heuristics minimizing code growth in cold regions and being more "
  268. "aggressive in hot regions."));
  269. // Runtime interleave loops for load/store throughput.
  270. static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
  271. "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
  272. cl::desc(
  273. "Enable runtime interleaving until load/store ports are saturated"));
  274. /// Interleave small loops with scalar reductions.
  275. static cl::opt<bool> InterleaveSmallLoopScalarReduction(
  276. "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
  277. cl::desc("Enable interleaving for loops with small iteration counts that "
  278. "contain scalar reductions to expose ILP."));
  279. /// The number of stores in a loop that are allowed to need predication.
  280. static cl::opt<unsigned> NumberOfStoresToPredicate(
  281. "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
  282. cl::desc("Max number of stores to be predicated behind an if."));
  283. static cl::opt<bool> EnableIndVarRegisterHeur(
  284. "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
  285. cl::desc("Count the induction variable only once when interleaving"));
  286. static cl::opt<bool> EnableCondStoresVectorization(
  287. "enable-cond-stores-vec", cl::init(true), cl::Hidden,
  288. cl::desc("Enable if predication of stores during vectorization."));
  289. static cl::opt<unsigned> MaxNestedScalarReductionIC(
  290. "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
  291. cl::desc("The maximum interleave count to use when interleaving a scalar "
  292. "reduction in a nested loop."));
  293. static cl::opt<bool>
  294. PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
  295. cl::Hidden,
  296. cl::desc("Prefer in-loop vector reductions, "
  297. "overriding the targets preference."));
  298. static cl::opt<bool> ForceOrderedReductions(
  299. "force-ordered-reductions", cl::init(false), cl::Hidden,
  300. cl::desc("Enable the vectorisation of loops with in-order (strict) "
  301. "FP reductions"));
  302. static cl::opt<bool> PreferPredicatedReductionSelect(
  303. "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
  304. cl::desc(
  305. "Prefer predicating a reduction operation over an after loop select."));
  306. cl::opt<bool> EnableVPlanNativePath(
  307. "enable-vplan-native-path", cl::init(false), cl::Hidden,
  308. cl::desc("Enable VPlan-native vectorization path with "
  309. "support for outer loop vectorization."));
  310. // This flag enables the stress testing of the VPlan H-CFG construction in the
  311. // VPlan-native vectorization path. It must be used in conjuction with
  312. // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
  313. // verification of the H-CFGs built.
  314. static cl::opt<bool> VPlanBuildStressTest(
  315. "vplan-build-stress-test", cl::init(false), cl::Hidden,
  316. cl::desc(
  317. "Build VPlan for every supported loop nest in the function and bail "
  318. "out right after the build (stress test the VPlan H-CFG construction "
  319. "in the VPlan-native vectorization path)."));
  320. cl::opt<bool> llvm::EnableLoopInterleaving(
  321. "interleave-loops", cl::init(true), cl::Hidden,
  322. cl::desc("Enable loop interleaving in Loop vectorization passes"));
  323. cl::opt<bool> llvm::EnableLoopVectorization(
  324. "vectorize-loops", cl::init(true), cl::Hidden,
  325. cl::desc("Run the Loop vectorization passes"));
  326. static cl::opt<bool> PrintVPlansInDotFormat(
  327. "vplan-print-in-dot-format", cl::Hidden,
  328. cl::desc("Use dot format instead of plain text when dumping VPlans"));
  329. static cl::opt<cl::boolOrDefault> ForceSafeDivisor(
  330. "force-widen-divrem-via-safe-divisor", cl::Hidden,
  331. cl::desc(
  332. "Override cost based safe divisor widening for div/rem instructions"));
  333. /// A helper function that returns true if the given type is irregular. The
  334. /// type is irregular if its allocated size doesn't equal the store size of an
  335. /// element of the corresponding vector type.
  336. static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
  337. // Determine if an array of N elements of type Ty is "bitcast compatible"
  338. // with a <N x Ty> vector.
  339. // This is only true if there is no padding between the array elements.
  340. return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
  341. }
  342. /// A helper function that returns the reciprocal of the block probability of
  343. /// predicated blocks. If we return X, we are assuming the predicated block
  344. /// will execute once for every X iterations of the loop header.
  345. ///
  346. /// TODO: We should use actual block probability here, if available. Currently,
  347. /// we always assume predicated blocks have a 50% chance of executing.
  348. static unsigned getReciprocalPredBlockProb() { return 2; }
  349. /// A helper function that returns an integer or floating-point constant with
  350. /// value C.
  351. static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
  352. return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
  353. : ConstantFP::get(Ty, C);
  354. }
  355. /// Returns "best known" trip count for the specified loop \p L as defined by
  356. /// the following procedure:
  357. /// 1) Returns exact trip count if it is known.
  358. /// 2) Returns expected trip count according to profile data if any.
  359. /// 3) Returns upper bound estimate if it is known.
  360. /// 4) Returns std::nullopt if all of the above failed.
  361. static std::optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE,
  362. Loop *L) {
  363. // Check if exact trip count is known.
  364. if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
  365. return ExpectedTC;
  366. // Check if there is an expected trip count available from profile data.
  367. if (LoopVectorizeWithBlockFrequency)
  368. if (auto EstimatedTC = getLoopEstimatedTripCount(L))
  369. return *EstimatedTC;
  370. // Check if upper bound estimate is known.
  371. if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
  372. return ExpectedTC;
  373. return std::nullopt;
  374. }
  375. namespace {
  376. // Forward declare GeneratedRTChecks.
  377. class GeneratedRTChecks;
  378. } // namespace
  379. namespace llvm {
  380. AnalysisKey ShouldRunExtraVectorPasses::Key;
  381. /// InnerLoopVectorizer vectorizes loops which contain only one basic
  382. /// block to a specified vectorization factor (VF).
  383. /// This class performs the widening of scalars into vectors, or multiple
  384. /// scalars. This class also implements the following features:
  385. /// * It inserts an epilogue loop for handling loops that don't have iteration
  386. /// counts that are known to be a multiple of the vectorization factor.
  387. /// * It handles the code generation for reduction variables.
  388. /// * Scalarization (implementation using scalars) of un-vectorizable
  389. /// instructions.
  390. /// InnerLoopVectorizer does not perform any vectorization-legality
  391. /// checks, and relies on the caller to check for the different legality
  392. /// aspects. The InnerLoopVectorizer relies on the
  393. /// LoopVectorizationLegality class to provide information about the induction
  394. /// and reduction variables that were found to a given vectorization factor.
  395. class InnerLoopVectorizer {
  396. public:
  397. InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
  398. LoopInfo *LI, DominatorTree *DT,
  399. const TargetLibraryInfo *TLI,
  400. const TargetTransformInfo *TTI, AssumptionCache *AC,
  401. OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
  402. ElementCount MinProfitableTripCount,
  403. unsigned UnrollFactor, LoopVectorizationLegality *LVL,
  404. LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
  405. ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
  406. : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
  407. AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
  408. Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
  409. PSI(PSI), RTChecks(RTChecks) {
  410. // Query this against the original loop and save it here because the profile
  411. // of the original loop header may change as the transformation happens.
  412. OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
  413. OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
  414. if (MinProfitableTripCount.isZero())
  415. this->MinProfitableTripCount = VecWidth;
  416. else
  417. this->MinProfitableTripCount = MinProfitableTripCount;
  418. }
  419. virtual ~InnerLoopVectorizer() = default;
  420. /// Create a new empty loop that will contain vectorized instructions later
  421. /// on, while the old loop will be used as the scalar remainder. Control flow
  422. /// is generated around the vectorized (and scalar epilogue) loops consisting
  423. /// of various checks and bypasses. Return the pre-header block of the new
  424. /// loop and the start value for the canonical induction, if it is != 0. The
  425. /// latter is the case when vectorizing the epilogue loop. In the case of
  426. /// epilogue vectorization, this function is overriden to handle the more
  427. /// complex control flow around the loops.
  428. virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
  429. /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
  430. void fixVectorizedLoop(VPTransformState &State, VPlan &Plan);
  431. // Return true if any runtime check is added.
  432. bool areSafetyChecksAdded() { return AddedSafetyChecks; }
  433. /// A type for vectorized values in the new loop. Each value from the
  434. /// original loop, when vectorized, is represented by UF vector values in the
  435. /// new unrolled loop, where UF is the unroll factor.
  436. using VectorParts = SmallVector<Value *, 2>;
  437. /// A helper function to scalarize a single Instruction in the innermost loop.
  438. /// Generates a sequence of scalar instances for each lane between \p MinLane
  439. /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
  440. /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
  441. /// Instr's operands.
  442. void scalarizeInstruction(const Instruction *Instr,
  443. VPReplicateRecipe *RepRecipe,
  444. const VPIteration &Instance, bool IfPredicateInstr,
  445. VPTransformState &State);
  446. /// Construct the vector value of a scalarized value \p V one lane at a time.
  447. void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
  448. VPTransformState &State);
  449. /// Try to vectorize interleaved access group \p Group with the base address
  450. /// given in \p Addr, optionally masking the vector operations if \p
  451. /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
  452. /// values in the vectorized loop.
  453. void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
  454. ArrayRef<VPValue *> VPDefs,
  455. VPTransformState &State, VPValue *Addr,
  456. ArrayRef<VPValue *> StoredValues,
  457. VPValue *BlockInMask = nullptr);
  458. /// Fix the non-induction PHIs in \p Plan.
  459. void fixNonInductionPHIs(VPlan &Plan, VPTransformState &State);
  460. /// Returns true if the reordering of FP operations is not allowed, but we are
  461. /// able to vectorize with strict in-order reductions for the given RdxDesc.
  462. bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
  463. /// Create a broadcast instruction. This method generates a broadcast
  464. /// instruction (shuffle) for loop invariant values and for the induction
  465. /// value. If this is the induction variable then we extend it to N, N+1, ...
  466. /// this is needed because each iteration in the loop corresponds to a SIMD
  467. /// element.
  468. virtual Value *getBroadcastInstrs(Value *V);
  469. // Returns the resume value (bc.merge.rdx) for a reduction as
  470. // generated by fixReduction.
  471. PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc);
  472. /// Create a new phi node for the induction variable \p OrigPhi to resume
  473. /// iteration count in the scalar epilogue, from where the vectorized loop
  474. /// left off. In cases where the loop skeleton is more complicated (eg.
  475. /// epilogue vectorization) and the resume values can come from an additional
  476. /// bypass block, the \p AdditionalBypass pair provides information about the
  477. /// bypass block and the end value on the edge from bypass to this loop.
  478. PHINode *createInductionResumeValue(
  479. PHINode *OrigPhi, const InductionDescriptor &ID,
  480. ArrayRef<BasicBlock *> BypassBlocks,
  481. std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
  482. protected:
  483. friend class LoopVectorizationPlanner;
  484. /// A small list of PHINodes.
  485. using PhiVector = SmallVector<PHINode *, 4>;
  486. /// A type for scalarized values in the new loop. Each value from the
  487. /// original loop, when scalarized, is represented by UF x VF scalar values
  488. /// in the new unrolled loop, where UF is the unroll factor and VF is the
  489. /// vectorization factor.
  490. using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
  491. /// Set up the values of the IVs correctly when exiting the vector loop.
  492. void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
  493. Value *VectorTripCount, Value *EndValue,
  494. BasicBlock *MiddleBlock, BasicBlock *VectorHeader,
  495. VPlan &Plan);
  496. /// Handle all cross-iteration phis in the header.
  497. void fixCrossIterationPHIs(VPTransformState &State);
  498. /// Create the exit value of first order recurrences in the middle block and
  499. /// update their users.
  500. void fixFixedOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
  501. VPTransformState &State);
  502. /// Create code for the loop exit value of the reduction.
  503. void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
  504. /// Clear NSW/NUW flags from reduction instructions if necessary.
  505. void clearReductionWrapFlags(VPReductionPHIRecipe *PhiR,
  506. VPTransformState &State);
  507. /// Iteratively sink the scalarized operands of a predicated instruction into
  508. /// the block that was created for it.
  509. void sinkScalarOperands(Instruction *PredInst);
  510. /// Shrinks vector element sizes to the smallest bitwidth they can be legally
  511. /// represented as.
  512. void truncateToMinimalBitwidths(VPTransformState &State);
  513. /// Returns (and creates if needed) the original loop trip count.
  514. Value *getOrCreateTripCount(BasicBlock *InsertBlock);
  515. /// Returns (and creates if needed) the trip count of the widened loop.
  516. Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock);
  517. /// Returns a bitcasted value to the requested vector type.
  518. /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
  519. Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
  520. const DataLayout &DL);
  521. /// Emit a bypass check to see if the vector trip count is zero, including if
  522. /// it overflows.
  523. void emitIterationCountCheck(BasicBlock *Bypass);
  524. /// Emit a bypass check to see if all of the SCEV assumptions we've
  525. /// had to make are correct. Returns the block containing the checks or
  526. /// nullptr if no checks have been added.
  527. BasicBlock *emitSCEVChecks(BasicBlock *Bypass);
  528. /// Emit bypass checks to check any memory assumptions we may have made.
  529. /// Returns the block containing the checks or nullptr if no checks have been
  530. /// added.
  531. BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass);
  532. /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
  533. /// vector loop preheader, middle block and scalar preheader.
  534. void createVectorLoopSkeleton(StringRef Prefix);
  535. /// Create new phi nodes for the induction variables to resume iteration count
  536. /// in the scalar epilogue, from where the vectorized loop left off.
  537. /// In cases where the loop skeleton is more complicated (eg. epilogue
  538. /// vectorization) and the resume values can come from an additional bypass
  539. /// block, the \p AdditionalBypass pair provides information about the bypass
  540. /// block and the end value on the edge from bypass to this loop.
  541. void createInductionResumeValues(
  542. std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
  543. /// Complete the loop skeleton by adding debug MDs, creating appropriate
  544. /// conditional branches in the middle block, preparing the builder and
  545. /// running the verifier. Return the preheader of the completed vector loop.
  546. BasicBlock *completeLoopSkeleton();
  547. /// Collect poison-generating recipes that may generate a poison value that is
  548. /// used after vectorization, even when their operands are not poison. Those
  549. /// recipes meet the following conditions:
  550. /// * Contribute to the address computation of a recipe generating a widen
  551. /// memory load/store (VPWidenMemoryInstructionRecipe or
  552. /// VPInterleaveRecipe).
  553. /// * Such a widen memory load/store has at least one underlying Instruction
  554. /// that is in a basic block that needs predication and after vectorization
  555. /// the generated instruction won't be predicated.
  556. void collectPoisonGeneratingRecipes(VPTransformState &State);
  557. /// Allow subclasses to override and print debug traces before/after vplan
  558. /// execution, when trace information is requested.
  559. virtual void printDebugTracesAtStart(){};
  560. virtual void printDebugTracesAtEnd(){};
  561. /// The original loop.
  562. Loop *OrigLoop;
  563. /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
  564. /// dynamic knowledge to simplify SCEV expressions and converts them to a
  565. /// more usable form.
  566. PredicatedScalarEvolution &PSE;
  567. /// Loop Info.
  568. LoopInfo *LI;
  569. /// Dominator Tree.
  570. DominatorTree *DT;
  571. /// Target Library Info.
  572. const TargetLibraryInfo *TLI;
  573. /// Target Transform Info.
  574. const TargetTransformInfo *TTI;
  575. /// Assumption Cache.
  576. AssumptionCache *AC;
  577. /// Interface to emit optimization remarks.
  578. OptimizationRemarkEmitter *ORE;
  579. /// The vectorization SIMD factor to use. Each vector will have this many
  580. /// vector elements.
  581. ElementCount VF;
  582. ElementCount MinProfitableTripCount;
  583. /// The vectorization unroll factor to use. Each scalar is vectorized to this
  584. /// many different vector instructions.
  585. unsigned UF;
  586. /// The builder that we use
  587. IRBuilder<> Builder;
  588. // --- Vectorization state ---
  589. /// The vector-loop preheader.
  590. BasicBlock *LoopVectorPreHeader;
  591. /// The scalar-loop preheader.
  592. BasicBlock *LoopScalarPreHeader;
  593. /// Middle Block between the vector and the scalar.
  594. BasicBlock *LoopMiddleBlock;
  595. /// The unique ExitBlock of the scalar loop if one exists. Note that
  596. /// there can be multiple exiting edges reaching this block.
  597. BasicBlock *LoopExitBlock;
  598. /// The scalar loop body.
  599. BasicBlock *LoopScalarBody;
  600. /// A list of all bypass blocks. The first block is the entry of the loop.
  601. SmallVector<BasicBlock *, 4> LoopBypassBlocks;
  602. /// Store instructions that were predicated.
  603. SmallVector<Instruction *, 4> PredicatedInstructions;
  604. /// Trip count of the original loop.
  605. Value *TripCount = nullptr;
  606. /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
  607. Value *VectorTripCount = nullptr;
  608. /// The legality analysis.
  609. LoopVectorizationLegality *Legal;
  610. /// The profitablity analysis.
  611. LoopVectorizationCostModel *Cost;
  612. // Record whether runtime checks are added.
  613. bool AddedSafetyChecks = false;
  614. // Holds the end values for each induction variable. We save the end values
  615. // so we can later fix-up the external users of the induction variables.
  616. DenseMap<PHINode *, Value *> IVEndValues;
  617. /// BFI and PSI are used to check for profile guided size optimizations.
  618. BlockFrequencyInfo *BFI;
  619. ProfileSummaryInfo *PSI;
  620. // Whether this loop should be optimized for size based on profile guided size
  621. // optimizatios.
  622. bool OptForSizeBasedOnProfile;
  623. /// Structure to hold information about generated runtime checks, responsible
  624. /// for cleaning the checks, if vectorization turns out unprofitable.
  625. GeneratedRTChecks &RTChecks;
  626. // Holds the resume values for reductions in the loops, used to set the
  627. // correct start value of reduction PHIs when vectorizing the epilogue.
  628. SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4>
  629. ReductionResumeValues;
  630. };
  631. class InnerLoopUnroller : public InnerLoopVectorizer {
  632. public:
  633. InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
  634. LoopInfo *LI, DominatorTree *DT,
  635. const TargetLibraryInfo *TLI,
  636. const TargetTransformInfo *TTI, AssumptionCache *AC,
  637. OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
  638. LoopVectorizationLegality *LVL,
  639. LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
  640. ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
  641. : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
  642. ElementCount::getFixed(1),
  643. ElementCount::getFixed(1), UnrollFactor, LVL, CM,
  644. BFI, PSI, Check) {}
  645. private:
  646. Value *getBroadcastInstrs(Value *V) override;
  647. };
  648. /// Encapsulate information regarding vectorization of a loop and its epilogue.
  649. /// This information is meant to be updated and used across two stages of
  650. /// epilogue vectorization.
  651. struct EpilogueLoopVectorizationInfo {
  652. ElementCount MainLoopVF = ElementCount::getFixed(0);
  653. unsigned MainLoopUF = 0;
  654. ElementCount EpilogueVF = ElementCount::getFixed(0);
  655. unsigned EpilogueUF = 0;
  656. BasicBlock *MainLoopIterationCountCheck = nullptr;
  657. BasicBlock *EpilogueIterationCountCheck = nullptr;
  658. BasicBlock *SCEVSafetyCheck = nullptr;
  659. BasicBlock *MemSafetyCheck = nullptr;
  660. Value *TripCount = nullptr;
  661. Value *VectorTripCount = nullptr;
  662. EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
  663. ElementCount EVF, unsigned EUF)
  664. : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
  665. assert(EUF == 1 &&
  666. "A high UF for the epilogue loop is likely not beneficial.");
  667. }
  668. };
  669. /// An extension of the inner loop vectorizer that creates a skeleton for a
  670. /// vectorized loop that has its epilogue (residual) also vectorized.
  671. /// The idea is to run the vplan on a given loop twice, firstly to setup the
  672. /// skeleton and vectorize the main loop, and secondly to complete the skeleton
  673. /// from the first step and vectorize the epilogue. This is achieved by
  674. /// deriving two concrete strategy classes from this base class and invoking
  675. /// them in succession from the loop vectorizer planner.
  676. class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
  677. public:
  678. InnerLoopAndEpilogueVectorizer(
  679. Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
  680. DominatorTree *DT, const TargetLibraryInfo *TLI,
  681. const TargetTransformInfo *TTI, AssumptionCache *AC,
  682. OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
  683. LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
  684. BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
  685. GeneratedRTChecks &Checks)
  686. : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
  687. EPI.MainLoopVF, EPI.MainLoopVF, EPI.MainLoopUF, LVL,
  688. CM, BFI, PSI, Checks),
  689. EPI(EPI) {}
  690. // Override this function to handle the more complex control flow around the
  691. // three loops.
  692. std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton() final {
  693. return createEpilogueVectorizedLoopSkeleton();
  694. }
  695. /// The interface for creating a vectorized skeleton using one of two
  696. /// different strategies, each corresponding to one execution of the vplan
  697. /// as described above.
  698. virtual std::pair<BasicBlock *, Value *>
  699. createEpilogueVectorizedLoopSkeleton() = 0;
  700. /// Holds and updates state information required to vectorize the main loop
  701. /// and its epilogue in two separate passes. This setup helps us avoid
  702. /// regenerating and recomputing runtime safety checks. It also helps us to
  703. /// shorten the iteration-count-check path length for the cases where the
  704. /// iteration count of the loop is so small that the main vector loop is
  705. /// completely skipped.
  706. EpilogueLoopVectorizationInfo &EPI;
  707. };
  708. /// A specialized derived class of inner loop vectorizer that performs
  709. /// vectorization of *main* loops in the process of vectorizing loops and their
  710. /// epilogues.
  711. class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
  712. public:
  713. EpilogueVectorizerMainLoop(
  714. Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
  715. DominatorTree *DT, const TargetLibraryInfo *TLI,
  716. const TargetTransformInfo *TTI, AssumptionCache *AC,
  717. OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
  718. LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
  719. BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
  720. GeneratedRTChecks &Check)
  721. : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
  722. EPI, LVL, CM, BFI, PSI, Check) {}
  723. /// Implements the interface for creating a vectorized skeleton using the
  724. /// *main loop* strategy (ie the first pass of vplan execution).
  725. std::pair<BasicBlock *, Value *> createEpilogueVectorizedLoopSkeleton() final;
  726. protected:
  727. /// Emits an iteration count bypass check once for the main loop (when \p
  728. /// ForEpilogue is false) and once for the epilogue loop (when \p
  729. /// ForEpilogue is true).
  730. BasicBlock *emitIterationCountCheck(BasicBlock *Bypass, bool ForEpilogue);
  731. void printDebugTracesAtStart() override;
  732. void printDebugTracesAtEnd() override;
  733. };
  734. // A specialized derived class of inner loop vectorizer that performs
  735. // vectorization of *epilogue* loops in the process of vectorizing loops and
  736. // their epilogues.
  737. class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
  738. public:
  739. EpilogueVectorizerEpilogueLoop(
  740. Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
  741. DominatorTree *DT, const TargetLibraryInfo *TLI,
  742. const TargetTransformInfo *TTI, AssumptionCache *AC,
  743. OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
  744. LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
  745. BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
  746. GeneratedRTChecks &Checks)
  747. : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
  748. EPI, LVL, CM, BFI, PSI, Checks) {
  749. TripCount = EPI.TripCount;
  750. }
  751. /// Implements the interface for creating a vectorized skeleton using the
  752. /// *epilogue loop* strategy (ie the second pass of vplan execution).
  753. std::pair<BasicBlock *, Value *> createEpilogueVectorizedLoopSkeleton() final;
  754. protected:
  755. /// Emits an iteration count bypass check after the main vector loop has
  756. /// finished to see if there are any iterations left to execute by either
  757. /// the vector epilogue or the scalar epilogue.
  758. BasicBlock *emitMinimumVectorEpilogueIterCountCheck(
  759. BasicBlock *Bypass,
  760. BasicBlock *Insert);
  761. void printDebugTracesAtStart() override;
  762. void printDebugTracesAtEnd() override;
  763. };
  764. } // end namespace llvm
  765. /// Look for a meaningful debug location on the instruction or it's
  766. /// operands.
  767. static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
  768. if (!I)
  769. return I;
  770. DebugLoc Empty;
  771. if (I->getDebugLoc() != Empty)
  772. return I;
  773. for (Use &Op : I->operands()) {
  774. if (Instruction *OpInst = dyn_cast<Instruction>(Op))
  775. if (OpInst->getDebugLoc() != Empty)
  776. return OpInst;
  777. }
  778. return I;
  779. }
  780. /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
  781. /// is passed, the message relates to that particular instruction.
  782. #ifndef NDEBUG
  783. static void debugVectorizationMessage(const StringRef Prefix,
  784. const StringRef DebugMsg,
  785. Instruction *I) {
  786. dbgs() << "LV: " << Prefix << DebugMsg;
  787. if (I != nullptr)
  788. dbgs() << " " << *I;
  789. else
  790. dbgs() << '.';
  791. dbgs() << '\n';
  792. }
  793. #endif
  794. /// Create an analysis remark that explains why vectorization failed
  795. ///
  796. /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
  797. /// RemarkName is the identifier for the remark. If \p I is passed it is an
  798. /// instruction that prevents vectorization. Otherwise \p TheLoop is used for
  799. /// the location of the remark. \return the remark object that can be
  800. /// streamed to.
  801. static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
  802. StringRef RemarkName, Loop *TheLoop, Instruction *I) {
  803. Value *CodeRegion = TheLoop->getHeader();
  804. DebugLoc DL = TheLoop->getStartLoc();
  805. if (I) {
  806. CodeRegion = I->getParent();
  807. // If there is no debug location attached to the instruction, revert back to
  808. // using the loop's.
  809. if (I->getDebugLoc())
  810. DL = I->getDebugLoc();
  811. }
  812. return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
  813. }
  814. namespace llvm {
  815. /// Return a value for Step multiplied by VF.
  816. Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
  817. int64_t Step) {
  818. assert(Ty->isIntegerTy() && "Expected an integer step");
  819. Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
  820. return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
  821. }
  822. /// Return the runtime value for VF.
  823. Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) {
  824. Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
  825. return VF.isScalable() ? B.CreateVScale(EC) : EC;
  826. }
  827. const SCEV *createTripCountSCEV(Type *IdxTy, PredicatedScalarEvolution &PSE) {
  828. const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
  829. assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && "Invalid loop count");
  830. ScalarEvolution &SE = *PSE.getSE();
  831. // The exit count might have the type of i64 while the phi is i32. This can
  832. // happen if we have an induction variable that is sign extended before the
  833. // compare. The only way that we get a backedge taken count is that the
  834. // induction variable was signed and as such will not overflow. In such a case
  835. // truncation is legal.
  836. if (SE.getTypeSizeInBits(BackedgeTakenCount->getType()) >
  837. IdxTy->getPrimitiveSizeInBits())
  838. BackedgeTakenCount = SE.getTruncateOrNoop(BackedgeTakenCount, IdxTy);
  839. BackedgeTakenCount = SE.getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
  840. // Get the total trip count from the count by adding 1.
  841. return SE.getAddExpr(BackedgeTakenCount,
  842. SE.getOne(BackedgeTakenCount->getType()));
  843. }
  844. static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy,
  845. ElementCount VF) {
  846. assert(FTy->isFloatingPointTy() && "Expected floating point type!");
  847. Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
  848. Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
  849. return B.CreateUIToFP(RuntimeVF, FTy);
  850. }
  851. void reportVectorizationFailure(const StringRef DebugMsg,
  852. const StringRef OREMsg, const StringRef ORETag,
  853. OptimizationRemarkEmitter *ORE, Loop *TheLoop,
  854. Instruction *I) {
  855. LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
  856. LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
  857. ORE->emit(
  858. createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
  859. << "loop not vectorized: " << OREMsg);
  860. }
  861. void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
  862. OptimizationRemarkEmitter *ORE, Loop *TheLoop,
  863. Instruction *I) {
  864. LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
  865. LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
  866. ORE->emit(
  867. createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
  868. << Msg);
  869. }
  870. } // end namespace llvm
  871. #ifndef NDEBUG
  872. /// \return string containing a file name and a line # for the given loop.
  873. static std::string getDebugLocString(const Loop *L) {
  874. std::string Result;
  875. if (L) {
  876. raw_string_ostream OS(Result);
  877. if (const DebugLoc LoopDbgLoc = L->getStartLoc())
  878. LoopDbgLoc.print(OS);
  879. else
  880. // Just print the module name.
  881. OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
  882. OS.flush();
  883. }
  884. return Result;
  885. }
  886. #endif
  887. void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
  888. VPTransformState &State) {
  889. // Collect recipes in the backward slice of `Root` that may generate a poison
  890. // value that is used after vectorization.
  891. SmallPtrSet<VPRecipeBase *, 16> Visited;
  892. auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
  893. SmallVector<VPRecipeBase *, 16> Worklist;
  894. Worklist.push_back(Root);
  895. // Traverse the backward slice of Root through its use-def chain.
  896. while (!Worklist.empty()) {
  897. VPRecipeBase *CurRec = Worklist.back();
  898. Worklist.pop_back();
  899. if (!Visited.insert(CurRec).second)
  900. continue;
  901. // Prune search if we find another recipe generating a widen memory
  902. // instruction. Widen memory instructions involved in address computation
  903. // will lead to gather/scatter instructions, which don't need to be
  904. // handled.
  905. if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
  906. isa<VPInterleaveRecipe>(CurRec) ||
  907. isa<VPScalarIVStepsRecipe>(CurRec) ||
  908. isa<VPCanonicalIVPHIRecipe>(CurRec) ||
  909. isa<VPActiveLaneMaskPHIRecipe>(CurRec))
  910. continue;
  911. // This recipe contributes to the address computation of a widen
  912. // load/store. Collect recipe if its underlying instruction has
  913. // poison-generating flags.
  914. Instruction *Instr = CurRec->getUnderlyingInstr();
  915. if (Instr && Instr->hasPoisonGeneratingFlags())
  916. State.MayGeneratePoisonRecipes.insert(CurRec);
  917. // Add new definitions to the worklist.
  918. for (VPValue *operand : CurRec->operands())
  919. if (VPRecipeBase *OpDef = operand->getDefiningRecipe())
  920. Worklist.push_back(OpDef);
  921. }
  922. });
  923. // Traverse all the recipes in the VPlan and collect the poison-generating
  924. // recipes in the backward slice starting at the address of a VPWidenRecipe or
  925. // VPInterleaveRecipe.
  926. auto Iter = vp_depth_first_deep(State.Plan->getEntry());
  927. for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
  928. for (VPRecipeBase &Recipe : *VPBB) {
  929. if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
  930. Instruction &UnderlyingInstr = WidenRec->getIngredient();
  931. VPRecipeBase *AddrDef = WidenRec->getAddr()->getDefiningRecipe();
  932. if (AddrDef && WidenRec->isConsecutive() &&
  933. Legal->blockNeedsPredication(UnderlyingInstr.getParent()))
  934. collectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
  935. } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
  936. VPRecipeBase *AddrDef = InterleaveRec->getAddr()->getDefiningRecipe();
  937. if (AddrDef) {
  938. // Check if any member of the interleave group needs predication.
  939. const InterleaveGroup<Instruction> *InterGroup =
  940. InterleaveRec->getInterleaveGroup();
  941. bool NeedPredication = false;
  942. for (int I = 0, NumMembers = InterGroup->getNumMembers();
  943. I < NumMembers; ++I) {
  944. Instruction *Member = InterGroup->getMember(I);
  945. if (Member)
  946. NeedPredication |=
  947. Legal->blockNeedsPredication(Member->getParent());
  948. }
  949. if (NeedPredication)
  950. collectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
  951. }
  952. }
  953. }
  954. }
  955. }
  956. PHINode *InnerLoopVectorizer::getReductionResumeValue(
  957. const RecurrenceDescriptor &RdxDesc) {
  958. auto It = ReductionResumeValues.find(&RdxDesc);
  959. assert(It != ReductionResumeValues.end() &&
  960. "Expected to find a resume value for the reduction.");
  961. return It->second;
  962. }
  963. namespace llvm {
  964. // Loop vectorization cost-model hints how the scalar epilogue loop should be
  965. // lowered.
  966. enum ScalarEpilogueLowering {
  967. // The default: allowing scalar epilogues.
  968. CM_ScalarEpilogueAllowed,
  969. // Vectorization with OptForSize: don't allow epilogues.
  970. CM_ScalarEpilogueNotAllowedOptSize,
  971. // A special case of vectorisation with OptForSize: loops with a very small
  972. // trip count are considered for vectorization under OptForSize, thereby
  973. // making sure the cost of their loop body is dominant, free of runtime
  974. // guards and scalar iteration overheads.
  975. CM_ScalarEpilogueNotAllowedLowTripLoop,
  976. // Loop hint predicate indicating an epilogue is undesired.
  977. CM_ScalarEpilogueNotNeededUsePredicate,
  978. // Directive indicating we must either tail fold or not vectorize
  979. CM_ScalarEpilogueNotAllowedUsePredicate
  980. };
  981. /// ElementCountComparator creates a total ordering for ElementCount
  982. /// for the purposes of using it in a set structure.
  983. struct ElementCountComparator {
  984. bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
  985. return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
  986. std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
  987. }
  988. };
  989. using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
  990. /// LoopVectorizationCostModel - estimates the expected speedups due to
  991. /// vectorization.
  992. /// In many cases vectorization is not profitable. This can happen because of
  993. /// a number of reasons. In this class we mainly attempt to predict the
  994. /// expected speedup/slowdowns due to the supported instruction set. We use the
  995. /// TargetTransformInfo to query the different backends for the cost of
  996. /// different operations.
  997. class LoopVectorizationCostModel {
  998. public:
  999. LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
  1000. PredicatedScalarEvolution &PSE, LoopInfo *LI,
  1001. LoopVectorizationLegality *Legal,
  1002. const TargetTransformInfo &TTI,
  1003. const TargetLibraryInfo *TLI, DemandedBits *DB,
  1004. AssumptionCache *AC,
  1005. OptimizationRemarkEmitter *ORE, const Function *F,
  1006. const LoopVectorizeHints *Hints,
  1007. InterleavedAccessInfo &IAI)
  1008. : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
  1009. TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
  1010. Hints(Hints), InterleaveInfo(IAI) {}
  1011. /// \return An upper bound for the vectorization factors (both fixed and
  1012. /// scalable). If the factors are 0, vectorization and interleaving should be
  1013. /// avoided up front.
  1014. FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
  1015. /// \return True if runtime checks are required for vectorization, and false
  1016. /// otherwise.
  1017. bool runtimeChecksRequired();
  1018. /// \return The most profitable vectorization factor and the cost of that VF.
  1019. /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
  1020. /// then this vectorization factor will be selected if vectorization is
  1021. /// possible.
  1022. VectorizationFactor
  1023. selectVectorizationFactor(const ElementCountSet &CandidateVFs);
  1024. VectorizationFactor
  1025. selectEpilogueVectorizationFactor(const ElementCount MaxVF,
  1026. const LoopVectorizationPlanner &LVP);
  1027. /// Setup cost-based decisions for user vectorization factor.
  1028. /// \return true if the UserVF is a feasible VF to be chosen.
  1029. bool selectUserVectorizationFactor(ElementCount UserVF) {
  1030. collectUniformsAndScalars(UserVF);
  1031. collectInstsToScalarize(UserVF);
  1032. return expectedCost(UserVF).first.isValid();
  1033. }
  1034. /// \return The size (in bits) of the smallest and widest types in the code
  1035. /// that needs to be vectorized. We ignore values that remain scalar such as
  1036. /// 64 bit loop indices.
  1037. std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
  1038. /// \return The desired interleave count.
  1039. /// If interleave count has been specified by metadata it will be returned.
  1040. /// Otherwise, the interleave count is computed and returned. VF and LoopCost
  1041. /// are the selected vectorization factor and the cost of the selected VF.
  1042. unsigned selectInterleaveCount(ElementCount VF, InstructionCost LoopCost);
  1043. /// Memory access instruction may be vectorized in more than one way.
  1044. /// Form of instruction after vectorization depends on cost.
  1045. /// This function takes cost-based decisions for Load/Store instructions
  1046. /// and collects them in a map. This decisions map is used for building
  1047. /// the lists of loop-uniform and loop-scalar instructions.
  1048. /// The calculated cost is saved with widening decision in order to
  1049. /// avoid redundant calculations.
  1050. void setCostBasedWideningDecision(ElementCount VF);
  1051. /// A struct that represents some properties of the register usage
  1052. /// of a loop.
  1053. struct RegisterUsage {
  1054. /// Holds the number of loop invariant values that are used in the loop.
  1055. /// The key is ClassID of target-provided register class.
  1056. SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
  1057. /// Holds the maximum number of concurrent live intervals in the loop.
  1058. /// The key is ClassID of target-provided register class.
  1059. SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
  1060. };
  1061. /// \return Returns information about the register usages of the loop for the
  1062. /// given vectorization factors.
  1063. SmallVector<RegisterUsage, 8>
  1064. calculateRegisterUsage(ArrayRef<ElementCount> VFs);
  1065. /// Collect values we want to ignore in the cost model.
  1066. void collectValuesToIgnore();
  1067. /// Collect all element types in the loop for which widening is needed.
  1068. void collectElementTypesForWidening();
  1069. /// Split reductions into those that happen in the loop, and those that happen
  1070. /// outside. In loop reductions are collected into InLoopReductionChains.
  1071. void collectInLoopReductions();
  1072. /// Returns true if we should use strict in-order reductions for the given
  1073. /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
  1074. /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
  1075. /// of FP operations.
  1076. bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const {
  1077. return !Hints->allowReordering() && RdxDesc.isOrdered();
  1078. }
  1079. /// \returns The smallest bitwidth each instruction can be represented with.
  1080. /// The vector equivalents of these instructions should be truncated to this
  1081. /// type.
  1082. const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
  1083. return MinBWs;
  1084. }
  1085. /// \returns True if it is more profitable to scalarize instruction \p I for
  1086. /// vectorization factor \p VF.
  1087. bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
  1088. assert(VF.isVector() &&
  1089. "Profitable to scalarize relevant only for VF > 1.");
  1090. // Cost model is not run in the VPlan-native path - return conservative
  1091. // result until this changes.
  1092. if (EnableVPlanNativePath)
  1093. return false;
  1094. auto Scalars = InstsToScalarize.find(VF);
  1095. assert(Scalars != InstsToScalarize.end() &&
  1096. "VF not yet analyzed for scalarization profitability");
  1097. return Scalars->second.find(I) != Scalars->second.end();
  1098. }
  1099. /// Returns true if \p I is known to be uniform after vectorization.
  1100. bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
  1101. if (VF.isScalar())
  1102. return true;
  1103. // Cost model is not run in the VPlan-native path - return conservative
  1104. // result until this changes.
  1105. if (EnableVPlanNativePath)
  1106. return false;
  1107. auto UniformsPerVF = Uniforms.find(VF);
  1108. assert(UniformsPerVF != Uniforms.end() &&
  1109. "VF not yet analyzed for uniformity");
  1110. return UniformsPerVF->second.count(I);
  1111. }
  1112. /// Returns true if \p I is known to be scalar after vectorization.
  1113. bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
  1114. if (VF.isScalar())
  1115. return true;
  1116. // Cost model is not run in the VPlan-native path - return conservative
  1117. // result until this changes.
  1118. if (EnableVPlanNativePath)
  1119. return false;
  1120. auto ScalarsPerVF = Scalars.find(VF);
  1121. assert(ScalarsPerVF != Scalars.end() &&
  1122. "Scalar values are not calculated for VF");
  1123. return ScalarsPerVF->second.count(I);
  1124. }
  1125. /// \returns True if instruction \p I can be truncated to a smaller bitwidth
  1126. /// for vectorization factor \p VF.
  1127. bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
  1128. return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
  1129. !isProfitableToScalarize(I, VF) &&
  1130. !isScalarAfterVectorization(I, VF);
  1131. }
  1132. /// Decision that was taken during cost calculation for memory instruction.
  1133. enum InstWidening {
  1134. CM_Unknown,
  1135. CM_Widen, // For consecutive accesses with stride +1.
  1136. CM_Widen_Reverse, // For consecutive accesses with stride -1.
  1137. CM_Interleave,
  1138. CM_GatherScatter,
  1139. CM_Scalarize
  1140. };
  1141. /// Save vectorization decision \p W and \p Cost taken by the cost model for
  1142. /// instruction \p I and vector width \p VF.
  1143. void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
  1144. InstructionCost Cost) {
  1145. assert(VF.isVector() && "Expected VF >=2");
  1146. WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
  1147. }
  1148. /// Save vectorization decision \p W and \p Cost taken by the cost model for
  1149. /// interleaving group \p Grp and vector width \p VF.
  1150. void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
  1151. ElementCount VF, InstWidening W,
  1152. InstructionCost Cost) {
  1153. assert(VF.isVector() && "Expected VF >=2");
  1154. /// Broadcast this decicion to all instructions inside the group.
  1155. /// But the cost will be assigned to one instruction only.
  1156. for (unsigned i = 0; i < Grp->getFactor(); ++i) {
  1157. if (auto *I = Grp->getMember(i)) {
  1158. if (Grp->getInsertPos() == I)
  1159. WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
  1160. else
  1161. WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
  1162. }
  1163. }
  1164. }
  1165. /// Return the cost model decision for the given instruction \p I and vector
  1166. /// width \p VF. Return CM_Unknown if this instruction did not pass
  1167. /// through the cost modeling.
  1168. InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
  1169. assert(VF.isVector() && "Expected VF to be a vector VF");
  1170. // Cost model is not run in the VPlan-native path - return conservative
  1171. // result until this changes.
  1172. if (EnableVPlanNativePath)
  1173. return CM_GatherScatter;
  1174. std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
  1175. auto Itr = WideningDecisions.find(InstOnVF);
  1176. if (Itr == WideningDecisions.end())
  1177. return CM_Unknown;
  1178. return Itr->second.first;
  1179. }
  1180. /// Return the vectorization cost for the given instruction \p I and vector
  1181. /// width \p VF.
  1182. InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
  1183. assert(VF.isVector() && "Expected VF >=2");
  1184. std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
  1185. assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
  1186. "The cost is not calculated");
  1187. return WideningDecisions[InstOnVF].second;
  1188. }
  1189. /// Return True if instruction \p I is an optimizable truncate whose operand
  1190. /// is an induction variable. Such a truncate will be removed by adding a new
  1191. /// induction variable with the destination type.
  1192. bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
  1193. // If the instruction is not a truncate, return false.
  1194. auto *Trunc = dyn_cast<TruncInst>(I);
  1195. if (!Trunc)
  1196. return false;
  1197. // Get the source and destination types of the truncate.
  1198. Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
  1199. Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
  1200. // If the truncate is free for the given types, return false. Replacing a
  1201. // free truncate with an induction variable would add an induction variable
  1202. // update instruction to each iteration of the loop. We exclude from this
  1203. // check the primary induction variable since it will need an update
  1204. // instruction regardless.
  1205. Value *Op = Trunc->getOperand(0);
  1206. if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
  1207. return false;
  1208. // If the truncated value is not an induction variable, return false.
  1209. return Legal->isInductionPhi(Op);
  1210. }
  1211. /// Collects the instructions to scalarize for each predicated instruction in
  1212. /// the loop.
  1213. void collectInstsToScalarize(ElementCount VF);
  1214. /// Collect Uniform and Scalar values for the given \p VF.
  1215. /// The sets depend on CM decision for Load/Store instructions
  1216. /// that may be vectorized as interleave, gather-scatter or scalarized.
  1217. void collectUniformsAndScalars(ElementCount VF) {
  1218. // Do the analysis once.
  1219. if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
  1220. return;
  1221. setCostBasedWideningDecision(VF);
  1222. collectLoopUniforms(VF);
  1223. collectLoopScalars(VF);
  1224. }
  1225. /// Returns true if the target machine supports masked store operation
  1226. /// for the given \p DataType and kind of access to \p Ptr.
  1227. bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
  1228. return Legal->isConsecutivePtr(DataType, Ptr) &&
  1229. TTI.isLegalMaskedStore(DataType, Alignment);
  1230. }
  1231. /// Returns true if the target machine supports masked load operation
  1232. /// for the given \p DataType and kind of access to \p Ptr.
  1233. bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
  1234. return Legal->isConsecutivePtr(DataType, Ptr) &&
  1235. TTI.isLegalMaskedLoad(DataType, Alignment);
  1236. }
  1237. /// Returns true if the target machine can represent \p V as a masked gather
  1238. /// or scatter operation.
  1239. bool isLegalGatherOrScatter(Value *V,
  1240. ElementCount VF = ElementCount::getFixed(1)) {
  1241. bool LI = isa<LoadInst>(V);
  1242. bool SI = isa<StoreInst>(V);
  1243. if (!LI && !SI)
  1244. return false;
  1245. auto *Ty = getLoadStoreType(V);
  1246. Align Align = getLoadStoreAlignment(V);
  1247. if (VF.isVector())
  1248. Ty = VectorType::get(Ty, VF);
  1249. return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
  1250. (SI && TTI.isLegalMaskedScatter(Ty, Align));
  1251. }
  1252. /// Returns true if the target machine supports all of the reduction
  1253. /// variables found for the given VF.
  1254. bool canVectorizeReductions(ElementCount VF) const {
  1255. return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
  1256. const RecurrenceDescriptor &RdxDesc = Reduction.second;
  1257. return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
  1258. }));
  1259. }
  1260. /// Given costs for both strategies, return true if the scalar predication
  1261. /// lowering should be used for div/rem. This incorporates an override
  1262. /// option so it is not simply a cost comparison.
  1263. bool isDivRemScalarWithPredication(InstructionCost ScalarCost,
  1264. InstructionCost SafeDivisorCost) const {
  1265. switch (ForceSafeDivisor) {
  1266. case cl::BOU_UNSET:
  1267. return ScalarCost < SafeDivisorCost;
  1268. case cl::BOU_TRUE:
  1269. return false;
  1270. case cl::BOU_FALSE:
  1271. return true;
  1272. };
  1273. llvm_unreachable("impossible case value");
  1274. }
  1275. /// Returns true if \p I is an instruction which requires predication and
  1276. /// for which our chosen predication strategy is scalarization (i.e. we
  1277. /// don't have an alternate strategy such as masking available).
  1278. /// \p VF is the vectorization factor that will be used to vectorize \p I.
  1279. bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
  1280. /// Returns true if \p I is an instruction that needs to be predicated
  1281. /// at runtime. The result is independent of the predication mechanism.
  1282. /// Superset of instructions that return true for isScalarWithPredication.
  1283. bool isPredicatedInst(Instruction *I) const;
  1284. /// Return the costs for our two available strategies for lowering a
  1285. /// div/rem operation which requires speculating at least one lane.
  1286. /// First result is for scalarization (will be invalid for scalable
  1287. /// vectors); second is for the safe-divisor strategy.
  1288. std::pair<InstructionCost, InstructionCost>
  1289. getDivRemSpeculationCost(Instruction *I,
  1290. ElementCount VF) const;
  1291. /// Returns true if \p I is a memory instruction with consecutive memory
  1292. /// access that can be widened.
  1293. bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF);
  1294. /// Returns true if \p I is a memory instruction in an interleaved-group
  1295. /// of memory accesses that can be vectorized with wide vector loads/stores
  1296. /// and shuffles.
  1297. bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF);
  1298. /// Check if \p Instr belongs to any interleaved access group.
  1299. bool isAccessInterleaved(Instruction *Instr) {
  1300. return InterleaveInfo.isInterleaved(Instr);
  1301. }
  1302. /// Get the interleaved access group that \p Instr belongs to.
  1303. const InterleaveGroup<Instruction> *
  1304. getInterleavedAccessGroup(Instruction *Instr) {
  1305. return InterleaveInfo.getInterleaveGroup(Instr);
  1306. }
  1307. /// Returns true if we're required to use a scalar epilogue for at least
  1308. /// the final iteration of the original loop.
  1309. bool requiresScalarEpilogue(ElementCount VF) const {
  1310. if (!isScalarEpilogueAllowed())
  1311. return false;
  1312. // If we might exit from anywhere but the latch, must run the exiting
  1313. // iteration in scalar form.
  1314. if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
  1315. return true;
  1316. return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
  1317. }
  1318. /// Returns true if a scalar epilogue is not allowed due to optsize or a
  1319. /// loop hint annotation.
  1320. bool isScalarEpilogueAllowed() const {
  1321. return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
  1322. }
  1323. /// Returns true if all loop blocks should be masked to fold tail loop.
  1324. bool foldTailByMasking() const { return FoldTailByMasking; }
  1325. /// Returns true if were tail-folding and want to use the active lane mask
  1326. /// for vector loop control flow.
  1327. bool useActiveLaneMaskForControlFlow() const {
  1328. return FoldTailByMasking &&
  1329. TTI.emitGetActiveLaneMask() == PredicationStyle::DataAndControlFlow;
  1330. }
  1331. /// Returns true if the instructions in this block requires predication
  1332. /// for any reason, e.g. because tail folding now requires a predicate
  1333. /// or because the block in the original loop was predicated.
  1334. bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
  1335. return foldTailByMasking() || Legal->blockNeedsPredication(BB);
  1336. }
  1337. /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
  1338. /// nodes to the chain of instructions representing the reductions. Uses a
  1339. /// MapVector to ensure deterministic iteration order.
  1340. using ReductionChainMap =
  1341. SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
  1342. /// Return the chain of instructions representing an inloop reduction.
  1343. const ReductionChainMap &getInLoopReductionChains() const {
  1344. return InLoopReductionChains;
  1345. }
  1346. /// Returns true if the Phi is part of an inloop reduction.
  1347. bool isInLoopReduction(PHINode *Phi) const {
  1348. return InLoopReductionChains.count(Phi);
  1349. }
  1350. /// Estimate cost of an intrinsic call instruction CI if it were vectorized
  1351. /// with factor VF. Return the cost of the instruction, including
  1352. /// scalarization overhead if it's needed.
  1353. InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
  1354. /// Estimate cost of a call instruction CI if it were vectorized with factor
  1355. /// VF. Return the cost of the instruction, including scalarization overhead
  1356. /// if it's needed. The flag NeedToScalarize shows if the call needs to be
  1357. /// scalarized -
  1358. /// i.e. either vector version isn't available, or is too expensive.
  1359. InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
  1360. bool &NeedToScalarize) const;
  1361. /// Returns true if the per-lane cost of VectorizationFactor A is lower than
  1362. /// that of B.
  1363. bool isMoreProfitable(const VectorizationFactor &A,
  1364. const VectorizationFactor &B) const;
  1365. /// Invalidates decisions already taken by the cost model.
  1366. void invalidateCostModelingDecisions() {
  1367. WideningDecisions.clear();
  1368. Uniforms.clear();
  1369. Scalars.clear();
  1370. }
  1371. /// Convenience function that returns the value of vscale_range iff
  1372. /// vscale_range.min == vscale_range.max or otherwise returns the value
  1373. /// returned by the corresponding TLI method.
  1374. std::optional<unsigned> getVScaleForTuning() const;
  1375. private:
  1376. unsigned NumPredStores = 0;
  1377. /// \return An upper bound for the vectorization factors for both
  1378. /// fixed and scalable vectorization, where the minimum-known number of
  1379. /// elements is a power-of-2 larger than zero. If scalable vectorization is
  1380. /// disabled or unsupported, then the scalable part will be equal to
  1381. /// ElementCount::getScalable(0).
  1382. FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
  1383. ElementCount UserVF,
  1384. bool FoldTailByMasking);
  1385. /// \return the maximized element count based on the targets vector
  1386. /// registers and the loop trip-count, but limited to a maximum safe VF.
  1387. /// This is a helper function of computeFeasibleMaxVF.
  1388. ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
  1389. unsigned SmallestType,
  1390. unsigned WidestType,
  1391. ElementCount MaxSafeVF,
  1392. bool FoldTailByMasking);
  1393. /// \return the maximum legal scalable VF, based on the safe max number
  1394. /// of elements.
  1395. ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
  1396. /// The vectorization cost is a combination of the cost itself and a boolean
  1397. /// indicating whether any of the contributing operations will actually
  1398. /// operate on vector values after type legalization in the backend. If this
  1399. /// latter value is false, then all operations will be scalarized (i.e. no
  1400. /// vectorization has actually taken place).
  1401. using VectorizationCostTy = std::pair<InstructionCost, bool>;
  1402. /// Returns the expected execution cost. The unit of the cost does
  1403. /// not matter because we use the 'cost' units to compare different
  1404. /// vector widths. The cost that is returned is *not* normalized by
  1405. /// the factor width. If \p Invalid is not nullptr, this function
  1406. /// will add a pair(Instruction*, ElementCount) to \p Invalid for
  1407. /// each instruction that has an Invalid cost for the given VF.
  1408. using InstructionVFPair = std::pair<Instruction *, ElementCount>;
  1409. VectorizationCostTy
  1410. expectedCost(ElementCount VF,
  1411. SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
  1412. /// Returns the execution time cost of an instruction for a given vector
  1413. /// width. Vector width of one means scalar.
  1414. VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
  1415. /// The cost-computation logic from getInstructionCost which provides
  1416. /// the vector type as an output parameter.
  1417. InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
  1418. Type *&VectorTy);
  1419. /// Return the cost of instructions in an inloop reduction pattern, if I is
  1420. /// part of that pattern.
  1421. std::optional<InstructionCost>
  1422. getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
  1423. TTI::TargetCostKind CostKind);
  1424. /// Calculate vectorization cost of memory instruction \p I.
  1425. InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
  1426. /// The cost computation for scalarized memory instruction.
  1427. InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
  1428. /// The cost computation for interleaving group of memory instructions.
  1429. InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
  1430. /// The cost computation for Gather/Scatter instruction.
  1431. InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
  1432. /// The cost computation for widening instruction \p I with consecutive
  1433. /// memory access.
  1434. InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
  1435. /// The cost calculation for Load/Store instruction \p I with uniform pointer -
  1436. /// Load: scalar load + broadcast.
  1437. /// Store: scalar store + (loop invariant value stored? 0 : extract of last
  1438. /// element)
  1439. InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
  1440. /// Estimate the overhead of scalarizing an instruction. This is a
  1441. /// convenience wrapper for the type-based getScalarizationOverhead API.
  1442. InstructionCost getScalarizationOverhead(Instruction *I, ElementCount VF,
  1443. TTI::TargetCostKind CostKind) const;
  1444. /// Returns true if an artificially high cost for emulated masked memrefs
  1445. /// should be used.
  1446. bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
  1447. /// Map of scalar integer values to the smallest bitwidth they can be legally
  1448. /// represented as. The vector equivalents of these values should be truncated
  1449. /// to this type.
  1450. MapVector<Instruction *, uint64_t> MinBWs;
  1451. /// A type representing the costs for instructions if they were to be
  1452. /// scalarized rather than vectorized. The entries are Instruction-Cost
  1453. /// pairs.
  1454. using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
  1455. /// A set containing all BasicBlocks that are known to present after
  1456. /// vectorization as a predicated block.
  1457. DenseMap<ElementCount, SmallPtrSet<BasicBlock *, 4>>
  1458. PredicatedBBsAfterVectorization;
  1459. /// Records whether it is allowed to have the original scalar loop execute at
  1460. /// least once. This may be needed as a fallback loop in case runtime
  1461. /// aliasing/dependence checks fail, or to handle the tail/remainder
  1462. /// iterations when the trip count is unknown or doesn't divide by the VF,
  1463. /// or as a peel-loop to handle gaps in interleave-groups.
  1464. /// Under optsize and when the trip count is very small we don't allow any
  1465. /// iterations to execute in the scalar loop.
  1466. ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
  1467. /// All blocks of loop are to be masked to fold tail of scalar iterations.
  1468. bool FoldTailByMasking = false;
  1469. /// A map holding scalar costs for different vectorization factors. The
  1470. /// presence of a cost for an instruction in the mapping indicates that the
  1471. /// instruction will be scalarized when vectorizing with the associated
  1472. /// vectorization factor. The entries are VF-ScalarCostTy pairs.
  1473. DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
  1474. /// Holds the instructions known to be uniform after vectorization.
  1475. /// The data is collected per VF.
  1476. DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
  1477. /// Holds the instructions known to be scalar after vectorization.
  1478. /// The data is collected per VF.
  1479. DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
  1480. /// Holds the instructions (address computations) that are forced to be
  1481. /// scalarized.
  1482. DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
  1483. /// PHINodes of the reductions that should be expanded in-loop along with
  1484. /// their associated chains of reduction operations, in program order from top
  1485. /// (PHI) to bottom
  1486. ReductionChainMap InLoopReductionChains;
  1487. /// A Map of inloop reduction operations and their immediate chain operand.
  1488. /// FIXME: This can be removed once reductions can be costed correctly in
  1489. /// vplan. This was added to allow quick lookup to the inloop operations,
  1490. /// without having to loop through InLoopReductionChains.
  1491. DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
  1492. /// Returns the expected difference in cost from scalarizing the expression
  1493. /// feeding a predicated instruction \p PredInst. The instructions to
  1494. /// scalarize and their scalar costs are collected in \p ScalarCosts. A
  1495. /// non-negative return value implies the expression will be scalarized.
  1496. /// Currently, only single-use chains are considered for scalarization.
  1497. InstructionCost computePredInstDiscount(Instruction *PredInst,
  1498. ScalarCostsTy &ScalarCosts,
  1499. ElementCount VF);
  1500. /// Collect the instructions that are uniform after vectorization. An
  1501. /// instruction is uniform if we represent it with a single scalar value in
  1502. /// the vectorized loop corresponding to each vector iteration. Examples of
  1503. /// uniform instructions include pointer operands of consecutive or
  1504. /// interleaved memory accesses. Note that although uniformity implies an
  1505. /// instruction will be scalar, the reverse is not true. In general, a
  1506. /// scalarized instruction will be represented by VF scalar values in the
  1507. /// vectorized loop, each corresponding to an iteration of the original
  1508. /// scalar loop.
  1509. void collectLoopUniforms(ElementCount VF);
  1510. /// Collect the instructions that are scalar after vectorization. An
  1511. /// instruction is scalar if it is known to be uniform or will be scalarized
  1512. /// during vectorization. collectLoopScalars should only add non-uniform nodes
  1513. /// to the list if they are used by a load/store instruction that is marked as
  1514. /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
  1515. /// VF values in the vectorized loop, each corresponding to an iteration of
  1516. /// the original scalar loop.
  1517. void collectLoopScalars(ElementCount VF);
  1518. /// Keeps cost model vectorization decision and cost for instructions.
  1519. /// Right now it is used for memory instructions only.
  1520. using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
  1521. std::pair<InstWidening, InstructionCost>>;
  1522. DecisionList WideningDecisions;
  1523. /// Returns true if \p V is expected to be vectorized and it needs to be
  1524. /// extracted.
  1525. bool needsExtract(Value *V, ElementCount VF) const {
  1526. Instruction *I = dyn_cast<Instruction>(V);
  1527. if (VF.isScalar() || !I || !TheLoop->contains(I) ||
  1528. TheLoop->isLoopInvariant(I))
  1529. return false;
  1530. // Assume we can vectorize V (and hence we need extraction) if the
  1531. // scalars are not computed yet. This can happen, because it is called
  1532. // via getScalarizationOverhead from setCostBasedWideningDecision, before
  1533. // the scalars are collected. That should be a safe assumption in most
  1534. // cases, because we check if the operands have vectorizable types
  1535. // beforehand in LoopVectorizationLegality.
  1536. return Scalars.find(VF) == Scalars.end() ||
  1537. !isScalarAfterVectorization(I, VF);
  1538. };
  1539. /// Returns a range containing only operands needing to be extracted.
  1540. SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
  1541. ElementCount VF) const {
  1542. return SmallVector<Value *, 4>(make_filter_range(
  1543. Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
  1544. }
  1545. /// Determines if we have the infrastructure to vectorize loop \p L and its
  1546. /// epilogue, assuming the main loop is vectorized by \p VF.
  1547. bool isCandidateForEpilogueVectorization(const Loop &L,
  1548. const ElementCount VF) const;
  1549. /// Returns true if epilogue vectorization is considered profitable, and
  1550. /// false otherwise.
  1551. /// \p VF is the vectorization factor chosen for the original loop.
  1552. bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
  1553. public:
  1554. /// The loop that we evaluate.
  1555. Loop *TheLoop;
  1556. /// Predicated scalar evolution analysis.
  1557. PredicatedScalarEvolution &PSE;
  1558. /// Loop Info analysis.
  1559. LoopInfo *LI;
  1560. /// Vectorization legality.
  1561. LoopVectorizationLegality *Legal;
  1562. /// Vector target information.
  1563. const TargetTransformInfo &TTI;
  1564. /// Target Library Info.
  1565. const TargetLibraryInfo *TLI;
  1566. /// Demanded bits analysis.
  1567. DemandedBits *DB;
  1568. /// Assumption cache.
  1569. AssumptionCache *AC;
  1570. /// Interface to emit optimization remarks.
  1571. OptimizationRemarkEmitter *ORE;
  1572. const Function *TheFunction;
  1573. /// Loop Vectorize Hint.
  1574. const LoopVectorizeHints *Hints;
  1575. /// The interleave access information contains groups of interleaved accesses
  1576. /// with the same stride and close to each other.
  1577. InterleavedAccessInfo &InterleaveInfo;
  1578. /// Values to ignore in the cost model.
  1579. SmallPtrSet<const Value *, 16> ValuesToIgnore;
  1580. /// Values to ignore in the cost model when VF > 1.
  1581. SmallPtrSet<const Value *, 16> VecValuesToIgnore;
  1582. /// All element types found in the loop.
  1583. SmallPtrSet<Type *, 16> ElementTypesInLoop;
  1584. /// Profitable vector factors.
  1585. SmallVector<VectorizationFactor, 8> ProfitableVFs;
  1586. };
  1587. } // end namespace llvm
  1588. namespace {
  1589. /// Helper struct to manage generating runtime checks for vectorization.
  1590. ///
  1591. /// The runtime checks are created up-front in temporary blocks to allow better
  1592. /// estimating the cost and un-linked from the existing IR. After deciding to
  1593. /// vectorize, the checks are moved back. If deciding not to vectorize, the
  1594. /// temporary blocks are completely removed.
  1595. class GeneratedRTChecks {
  1596. /// Basic block which contains the generated SCEV checks, if any.
  1597. BasicBlock *SCEVCheckBlock = nullptr;
  1598. /// The value representing the result of the generated SCEV checks. If it is
  1599. /// nullptr, either no SCEV checks have been generated or they have been used.
  1600. Value *SCEVCheckCond = nullptr;
  1601. /// Basic block which contains the generated memory runtime checks, if any.
  1602. BasicBlock *MemCheckBlock = nullptr;
  1603. /// The value representing the result of the generated memory runtime checks.
  1604. /// If it is nullptr, either no memory runtime checks have been generated or
  1605. /// they have been used.
  1606. Value *MemRuntimeCheckCond = nullptr;
  1607. DominatorTree *DT;
  1608. LoopInfo *LI;
  1609. TargetTransformInfo *TTI;
  1610. SCEVExpander SCEVExp;
  1611. SCEVExpander MemCheckExp;
  1612. bool CostTooHigh = false;
  1613. public:
  1614. GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
  1615. TargetTransformInfo *TTI, const DataLayout &DL)
  1616. : DT(DT), LI(LI), TTI(TTI), SCEVExp(SE, DL, "scev.check"),
  1617. MemCheckExp(SE, DL, "scev.check") {}
  1618. /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
  1619. /// accurately estimate the cost of the runtime checks. The blocks are
  1620. /// un-linked from the IR and is added back during vector code generation. If
  1621. /// there is no vector code generation, the check blocks are removed
  1622. /// completely.
  1623. void Create(Loop *L, const LoopAccessInfo &LAI,
  1624. const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC) {
  1625. // Hard cutoff to limit compile-time increase in case a very large number of
  1626. // runtime checks needs to be generated.
  1627. // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to
  1628. // profile info.
  1629. CostTooHigh =
  1630. LAI.getNumRuntimePointerChecks() > VectorizeMemoryCheckThreshold;
  1631. if (CostTooHigh)
  1632. return;
  1633. BasicBlock *LoopHeader = L->getHeader();
  1634. BasicBlock *Preheader = L->getLoopPreheader();
  1635. // Use SplitBlock to create blocks for SCEV & memory runtime checks to
  1636. // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
  1637. // may be used by SCEVExpander. The blocks will be un-linked from their
  1638. // predecessors and removed from LI & DT at the end of the function.
  1639. if (!UnionPred.isAlwaysTrue()) {
  1640. SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
  1641. nullptr, "vector.scevcheck");
  1642. SCEVCheckCond = SCEVExp.expandCodeForPredicate(
  1643. &UnionPred, SCEVCheckBlock->getTerminator());
  1644. }
  1645. const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
  1646. if (RtPtrChecking.Need) {
  1647. auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
  1648. MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
  1649. "vector.memcheck");
  1650. auto DiffChecks = RtPtrChecking.getDiffChecks();
  1651. if (DiffChecks) {
  1652. Value *RuntimeVF = nullptr;
  1653. MemRuntimeCheckCond = addDiffRuntimeChecks(
  1654. MemCheckBlock->getTerminator(), *DiffChecks, MemCheckExp,
  1655. [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) {
  1656. if (!RuntimeVF)
  1657. RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
  1658. return RuntimeVF;
  1659. },
  1660. IC);
  1661. } else {
  1662. MemRuntimeCheckCond =
  1663. addRuntimeChecks(MemCheckBlock->getTerminator(), L,
  1664. RtPtrChecking.getChecks(), MemCheckExp);
  1665. }
  1666. assert(MemRuntimeCheckCond &&
  1667. "no RT checks generated although RtPtrChecking "
  1668. "claimed checks are required");
  1669. }
  1670. if (!MemCheckBlock && !SCEVCheckBlock)
  1671. return;
  1672. // Unhook the temporary block with the checks, update various places
  1673. // accordingly.
  1674. if (SCEVCheckBlock)
  1675. SCEVCheckBlock->replaceAllUsesWith(Preheader);
  1676. if (MemCheckBlock)
  1677. MemCheckBlock->replaceAllUsesWith(Preheader);
  1678. if (SCEVCheckBlock) {
  1679. SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
  1680. new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
  1681. Preheader->getTerminator()->eraseFromParent();
  1682. }
  1683. if (MemCheckBlock) {
  1684. MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
  1685. new UnreachableInst(Preheader->getContext(), MemCheckBlock);
  1686. Preheader->getTerminator()->eraseFromParent();
  1687. }
  1688. DT->changeImmediateDominator(LoopHeader, Preheader);
  1689. if (MemCheckBlock) {
  1690. DT->eraseNode(MemCheckBlock);
  1691. LI->removeBlock(MemCheckBlock);
  1692. }
  1693. if (SCEVCheckBlock) {
  1694. DT->eraseNode(SCEVCheckBlock);
  1695. LI->removeBlock(SCEVCheckBlock);
  1696. }
  1697. }
  1698. InstructionCost getCost() {
  1699. if (SCEVCheckBlock || MemCheckBlock)
  1700. LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n");
  1701. if (CostTooHigh) {
  1702. InstructionCost Cost;
  1703. Cost.setInvalid();
  1704. LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n");
  1705. return Cost;
  1706. }
  1707. InstructionCost RTCheckCost = 0;
  1708. if (SCEVCheckBlock)
  1709. for (Instruction &I : *SCEVCheckBlock) {
  1710. if (SCEVCheckBlock->getTerminator() == &I)
  1711. continue;
  1712. InstructionCost C =
  1713. TTI->getInstructionCost(&I, TTI::TCK_RecipThroughput);
  1714. LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
  1715. RTCheckCost += C;
  1716. }
  1717. if (MemCheckBlock)
  1718. for (Instruction &I : *MemCheckBlock) {
  1719. if (MemCheckBlock->getTerminator() == &I)
  1720. continue;
  1721. InstructionCost C =
  1722. TTI->getInstructionCost(&I, TTI::TCK_RecipThroughput);
  1723. LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
  1724. RTCheckCost += C;
  1725. }
  1726. if (SCEVCheckBlock || MemCheckBlock)
  1727. LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost
  1728. << "\n");
  1729. return RTCheckCost;
  1730. }
  1731. /// Remove the created SCEV & memory runtime check blocks & instructions, if
  1732. /// unused.
  1733. ~GeneratedRTChecks() {
  1734. SCEVExpanderCleaner SCEVCleaner(SCEVExp);
  1735. SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
  1736. if (!SCEVCheckCond)
  1737. SCEVCleaner.markResultUsed();
  1738. if (!MemRuntimeCheckCond)
  1739. MemCheckCleaner.markResultUsed();
  1740. if (MemRuntimeCheckCond) {
  1741. auto &SE = *MemCheckExp.getSE();
  1742. // Memory runtime check generation creates compares that use expanded
  1743. // values. Remove them before running the SCEVExpanderCleaners.
  1744. for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
  1745. if (MemCheckExp.isInsertedInstruction(&I))
  1746. continue;
  1747. SE.forgetValue(&I);
  1748. I.eraseFromParent();
  1749. }
  1750. }
  1751. MemCheckCleaner.cleanup();
  1752. SCEVCleaner.cleanup();
  1753. if (SCEVCheckCond)
  1754. SCEVCheckBlock->eraseFromParent();
  1755. if (MemRuntimeCheckCond)
  1756. MemCheckBlock->eraseFromParent();
  1757. }
  1758. /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
  1759. /// adjusts the branches to branch to the vector preheader or \p Bypass,
  1760. /// depending on the generated condition.
  1761. BasicBlock *emitSCEVChecks(BasicBlock *Bypass,
  1762. BasicBlock *LoopVectorPreHeader,
  1763. BasicBlock *LoopExitBlock) {
  1764. if (!SCEVCheckCond)
  1765. return nullptr;
  1766. Value *Cond = SCEVCheckCond;
  1767. // Mark the check as used, to prevent it from being removed during cleanup.
  1768. SCEVCheckCond = nullptr;
  1769. if (auto *C = dyn_cast<ConstantInt>(Cond))
  1770. if (C->isZero())
  1771. return nullptr;
  1772. auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
  1773. BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
  1774. // Create new preheader for vector loop.
  1775. if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
  1776. PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
  1777. SCEVCheckBlock->getTerminator()->eraseFromParent();
  1778. SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
  1779. Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
  1780. SCEVCheckBlock);
  1781. DT->addNewBlock(SCEVCheckBlock, Pred);
  1782. DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
  1783. ReplaceInstWithInst(SCEVCheckBlock->getTerminator(),
  1784. BranchInst::Create(Bypass, LoopVectorPreHeader, Cond));
  1785. return SCEVCheckBlock;
  1786. }
  1787. /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
  1788. /// the branches to branch to the vector preheader or \p Bypass, depending on
  1789. /// the generated condition.
  1790. BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass,
  1791. BasicBlock *LoopVectorPreHeader) {
  1792. // Check if we generated code that checks in runtime if arrays overlap.
  1793. if (!MemRuntimeCheckCond)
  1794. return nullptr;
  1795. auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
  1796. Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
  1797. MemCheckBlock);
  1798. DT->addNewBlock(MemCheckBlock, Pred);
  1799. DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
  1800. MemCheckBlock->moveBefore(LoopVectorPreHeader);
  1801. if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
  1802. PL->addBasicBlockToLoop(MemCheckBlock, *LI);
  1803. ReplaceInstWithInst(
  1804. MemCheckBlock->getTerminator(),
  1805. BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
  1806. MemCheckBlock->getTerminator()->setDebugLoc(
  1807. Pred->getTerminator()->getDebugLoc());
  1808. // Mark the check as used, to prevent it from being removed during cleanup.
  1809. MemRuntimeCheckCond = nullptr;
  1810. return MemCheckBlock;
  1811. }
  1812. };
  1813. } // namespace
  1814. // Return true if \p OuterLp is an outer loop annotated with hints for explicit
  1815. // vectorization. The loop needs to be annotated with #pragma omp simd
  1816. // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
  1817. // vector length information is not provided, vectorization is not considered
  1818. // explicit. Interleave hints are not allowed either. These limitations will be
  1819. // relaxed in the future.
  1820. // Please, note that we are currently forced to abuse the pragma 'clang
  1821. // vectorize' semantics. This pragma provides *auto-vectorization hints*
  1822. // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
  1823. // provides *explicit vectorization hints* (LV can bypass legal checks and
  1824. // assume that vectorization is legal). However, both hints are implemented
  1825. // using the same metadata (llvm.loop.vectorize, processed by
  1826. // LoopVectorizeHints). This will be fixed in the future when the native IR
  1827. // representation for pragma 'omp simd' is introduced.
  1828. static bool isExplicitVecOuterLoop(Loop *OuterLp,
  1829. OptimizationRemarkEmitter *ORE) {
  1830. assert(!OuterLp->isInnermost() && "This is not an outer loop");
  1831. LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
  1832. // Only outer loops with an explicit vectorization hint are supported.
  1833. // Unannotated outer loops are ignored.
  1834. if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
  1835. return false;
  1836. Function *Fn = OuterLp->getHeader()->getParent();
  1837. if (!Hints.allowVectorization(Fn, OuterLp,
  1838. true /*VectorizeOnlyWhenForced*/)) {
  1839. LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
  1840. return false;
  1841. }
  1842. if (Hints.getInterleave() > 1) {
  1843. // TODO: Interleave support is future work.
  1844. LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
  1845. "outer loops.\n");
  1846. Hints.emitRemarkWithHints();
  1847. return false;
  1848. }
  1849. return true;
  1850. }
  1851. static void collectSupportedLoops(Loop &L, LoopInfo *LI,
  1852. OptimizationRemarkEmitter *ORE,
  1853. SmallVectorImpl<Loop *> &V) {
  1854. // Collect inner loops and outer loops without irreducible control flow. For
  1855. // now, only collect outer loops that have explicit vectorization hints. If we
  1856. // are stress testing the VPlan H-CFG construction, we collect the outermost
  1857. // loop of every loop nest.
  1858. if (L.isInnermost() || VPlanBuildStressTest ||
  1859. (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
  1860. LoopBlocksRPO RPOT(&L);
  1861. RPOT.perform(LI);
  1862. if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
  1863. V.push_back(&L);
  1864. // TODO: Collect inner loops inside marked outer loops in case
  1865. // vectorization fails for the outer loop. Do not invoke
  1866. // 'containsIrreducibleCFG' again for inner loops when the outer loop is
  1867. // already known to be reducible. We can use an inherited attribute for
  1868. // that.
  1869. return;
  1870. }
  1871. }
  1872. for (Loop *InnerL : L)
  1873. collectSupportedLoops(*InnerL, LI, ORE, V);
  1874. }
  1875. namespace {
  1876. /// The LoopVectorize Pass.
  1877. struct LoopVectorize : public FunctionPass {
  1878. /// Pass identification, replacement for typeid
  1879. static char ID;
  1880. LoopVectorizePass Impl;
  1881. explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
  1882. bool VectorizeOnlyWhenForced = false)
  1883. : FunctionPass(ID),
  1884. Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
  1885. initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
  1886. }
  1887. bool runOnFunction(Function &F) override {
  1888. if (skipFunction(F))
  1889. return false;
  1890. auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
  1891. auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  1892. auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
  1893. auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  1894. auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
  1895. auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
  1896. auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
  1897. auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
  1898. auto &LAIs = getAnalysis<LoopAccessLegacyAnalysis>().getLAIs();
  1899. auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
  1900. auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
  1901. auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
  1902. return Impl
  1903. .runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AC, LAIs, *ORE, PSI)
  1904. .MadeAnyChange;
  1905. }
  1906. void getAnalysisUsage(AnalysisUsage &AU) const override {
  1907. AU.addRequired<AssumptionCacheTracker>();
  1908. AU.addRequired<BlockFrequencyInfoWrapperPass>();
  1909. AU.addRequired<DominatorTreeWrapperPass>();
  1910. AU.addRequired<LoopInfoWrapperPass>();
  1911. AU.addRequired<ScalarEvolutionWrapperPass>();
  1912. AU.addRequired<TargetTransformInfoWrapperPass>();
  1913. AU.addRequired<LoopAccessLegacyAnalysis>();
  1914. AU.addRequired<DemandedBitsWrapperPass>();
  1915. AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
  1916. AU.addRequired<InjectTLIMappingsLegacy>();
  1917. // We currently do not preserve loopinfo/dominator analyses with outer loop
  1918. // vectorization. Until this is addressed, mark these analyses as preserved
  1919. // only for non-VPlan-native path.
  1920. // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
  1921. if (!EnableVPlanNativePath) {
  1922. AU.addPreserved<LoopInfoWrapperPass>();
  1923. AU.addPreserved<DominatorTreeWrapperPass>();
  1924. }
  1925. AU.addPreserved<BasicAAWrapperPass>();
  1926. AU.addPreserved<GlobalsAAWrapperPass>();
  1927. AU.addRequired<ProfileSummaryInfoWrapperPass>();
  1928. }
  1929. };
  1930. } // end anonymous namespace
  1931. //===----------------------------------------------------------------------===//
  1932. // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
  1933. // LoopVectorizationCostModel and LoopVectorizationPlanner.
  1934. //===----------------------------------------------------------------------===//
  1935. Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
  1936. // We need to place the broadcast of invariant variables outside the loop,
  1937. // but only if it's proven safe to do so. Else, broadcast will be inside
  1938. // vector loop body.
  1939. Instruction *Instr = dyn_cast<Instruction>(V);
  1940. bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
  1941. (!Instr ||
  1942. DT->dominates(Instr->getParent(), LoopVectorPreHeader));
  1943. // Place the code for broadcasting invariant variables in the new preheader.
  1944. IRBuilder<>::InsertPointGuard Guard(Builder);
  1945. if (SafeToHoist)
  1946. Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
  1947. // Broadcast the scalar into all locations in the vector.
  1948. Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
  1949. return Shuf;
  1950. }
  1951. /// This function adds
  1952. /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
  1953. /// to each vector element of Val. The sequence starts at StartIndex.
  1954. /// \p Opcode is relevant for FP induction variable.
  1955. static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
  1956. Instruction::BinaryOps BinOp, ElementCount VF,
  1957. IRBuilderBase &Builder) {
  1958. assert(VF.isVector() && "only vector VFs are supported");
  1959. // Create and check the types.
  1960. auto *ValVTy = cast<VectorType>(Val->getType());
  1961. ElementCount VLen = ValVTy->getElementCount();
  1962. Type *STy = Val->getType()->getScalarType();
  1963. assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
  1964. "Induction Step must be an integer or FP");
  1965. assert(Step->getType() == STy && "Step has wrong type");
  1966. SmallVector<Constant *, 8> Indices;
  1967. // Create a vector of consecutive numbers from zero to VF.
  1968. VectorType *InitVecValVTy = ValVTy;
  1969. if (STy->isFloatingPointTy()) {
  1970. Type *InitVecValSTy =
  1971. IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
  1972. InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
  1973. }
  1974. Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
  1975. // Splat the StartIdx
  1976. Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
  1977. if (STy->isIntegerTy()) {
  1978. InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
  1979. Step = Builder.CreateVectorSplat(VLen, Step);
  1980. assert(Step->getType() == Val->getType() && "Invalid step vec");
  1981. // FIXME: The newly created binary instructions should contain nsw/nuw
  1982. // flags, which can be found from the original scalar operations.
  1983. Step = Builder.CreateMul(InitVec, Step);
  1984. return Builder.CreateAdd(Val, Step, "induction");
  1985. }
  1986. // Floating point induction.
  1987. assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
  1988. "Binary Opcode should be specified for FP induction");
  1989. InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
  1990. InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
  1991. Step = Builder.CreateVectorSplat(VLen, Step);
  1992. Value *MulOp = Builder.CreateFMul(InitVec, Step);
  1993. return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
  1994. }
  1995. /// Compute scalar induction steps. \p ScalarIV is the scalar induction
  1996. /// variable on which to base the steps, \p Step is the size of the step.
  1997. static void buildScalarSteps(Value *ScalarIV, Value *Step,
  1998. const InductionDescriptor &ID, VPValue *Def,
  1999. VPTransformState &State) {
  2000. IRBuilderBase &Builder = State.Builder;
  2001. // Ensure step has the same type as that of scalar IV.
  2002. Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
  2003. if (ScalarIVTy != Step->getType()) {
  2004. // TODO: Also use VPDerivedIVRecipe when only the step needs truncating, to
  2005. // avoid separate truncate here.
  2006. assert(Step->getType()->isIntegerTy() &&
  2007. "Truncation requires an integer step");
  2008. Step = State.Builder.CreateTrunc(Step, ScalarIVTy);
  2009. }
  2010. // We build scalar steps for both integer and floating-point induction
  2011. // variables. Here, we determine the kind of arithmetic we will perform.
  2012. Instruction::BinaryOps AddOp;
  2013. Instruction::BinaryOps MulOp;
  2014. if (ScalarIVTy->isIntegerTy()) {
  2015. AddOp = Instruction::Add;
  2016. MulOp = Instruction::Mul;
  2017. } else {
  2018. AddOp = ID.getInductionOpcode();
  2019. MulOp = Instruction::FMul;
  2020. }
  2021. // Determine the number of scalars we need to generate for each unroll
  2022. // iteration.
  2023. bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def);
  2024. // Compute the scalar steps and save the results in State.
  2025. Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
  2026. ScalarIVTy->getScalarSizeInBits());
  2027. Type *VecIVTy = nullptr;
  2028. Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
  2029. if (!FirstLaneOnly && State.VF.isScalable()) {
  2030. VecIVTy = VectorType::get(ScalarIVTy, State.VF);
  2031. UnitStepVec =
  2032. Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
  2033. SplatStep = Builder.CreateVectorSplat(State.VF, Step);
  2034. SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
  2035. }
  2036. unsigned StartPart = 0;
  2037. unsigned EndPart = State.UF;
  2038. unsigned StartLane = 0;
  2039. unsigned EndLane = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
  2040. if (State.Instance) {
  2041. StartPart = State.Instance->Part;
  2042. EndPart = StartPart + 1;
  2043. StartLane = State.Instance->Lane.getKnownLane();
  2044. EndLane = StartLane + 1;
  2045. }
  2046. for (unsigned Part = StartPart; Part < EndPart; ++Part) {
  2047. Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
  2048. if (!FirstLaneOnly && State.VF.isScalable()) {
  2049. auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
  2050. auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
  2051. if (ScalarIVTy->isFloatingPointTy())
  2052. InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
  2053. auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
  2054. auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
  2055. State.set(Def, Add, Part);
  2056. // It's useful to record the lane values too for the known minimum number
  2057. // of elements so we do those below. This improves the code quality when
  2058. // trying to extract the first element, for example.
  2059. }
  2060. if (ScalarIVTy->isFloatingPointTy())
  2061. StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
  2062. for (unsigned Lane = StartLane; Lane < EndLane; ++Lane) {
  2063. Value *StartIdx = Builder.CreateBinOp(
  2064. AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
  2065. // The step returned by `createStepForVF` is a runtime-evaluated value
  2066. // when VF is scalable. Otherwise, it should be folded into a Constant.
  2067. assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
  2068. "Expected StartIdx to be folded to a constant when VF is not "
  2069. "scalable");
  2070. auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
  2071. auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
  2072. State.set(Def, Add, VPIteration(Part, Lane));
  2073. }
  2074. }
  2075. }
  2076. // Generate code for the induction step. Note that induction steps are
  2077. // required to be loop-invariant
  2078. static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE,
  2079. Instruction *InsertBefore,
  2080. Loop *OrigLoop = nullptr) {
  2081. const DataLayout &DL = SE.getDataLayout();
  2082. assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) &&
  2083. "Induction step should be loop invariant");
  2084. if (auto *E = dyn_cast<SCEVUnknown>(Step))
  2085. return E->getValue();
  2086. SCEVExpander Exp(SE, DL, "induction");
  2087. return Exp.expandCodeFor(Step, Step->getType(), InsertBefore);
  2088. }
  2089. /// Compute the transformed value of Index at offset StartValue using step
  2090. /// StepValue.
  2091. /// For integer induction, returns StartValue + Index * StepValue.
  2092. /// For pointer induction, returns StartValue[Index * StepValue].
  2093. /// FIXME: The newly created binary instructions should contain nsw/nuw
  2094. /// flags, which can be found from the original scalar operations.
  2095. static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index,
  2096. Value *StartValue, Value *Step,
  2097. const InductionDescriptor &ID) {
  2098. Type *StepTy = Step->getType();
  2099. Value *CastedIndex = StepTy->isIntegerTy()
  2100. ? B.CreateSExtOrTrunc(Index, StepTy)
  2101. : B.CreateCast(Instruction::SIToFP, Index, StepTy);
  2102. if (CastedIndex != Index) {
  2103. CastedIndex->setName(CastedIndex->getName() + ".cast");
  2104. Index = CastedIndex;
  2105. }
  2106. // Note: the IR at this point is broken. We cannot use SE to create any new
  2107. // SCEV and then expand it, hoping that SCEV's simplification will give us
  2108. // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
  2109. // lead to various SCEV crashes. So all we can do is to use builder and rely
  2110. // on InstCombine for future simplifications. Here we handle some trivial
  2111. // cases only.
  2112. auto CreateAdd = [&B](Value *X, Value *Y) {
  2113. assert(X->getType() == Y->getType() && "Types don't match!");
  2114. if (auto *CX = dyn_cast<ConstantInt>(X))
  2115. if (CX->isZero())
  2116. return Y;
  2117. if (auto *CY = dyn_cast<ConstantInt>(Y))
  2118. if (CY->isZero())
  2119. return X;
  2120. return B.CreateAdd(X, Y);
  2121. };
  2122. // We allow X to be a vector type, in which case Y will potentially be
  2123. // splatted into a vector with the same element count.
  2124. auto CreateMul = [&B](Value *X, Value *Y) {
  2125. assert(X->getType()->getScalarType() == Y->getType() &&
  2126. "Types don't match!");
  2127. if (auto *CX = dyn_cast<ConstantInt>(X))
  2128. if (CX->isOne())
  2129. return Y;
  2130. if (auto *CY = dyn_cast<ConstantInt>(Y))
  2131. if (CY->isOne())
  2132. return X;
  2133. VectorType *XVTy = dyn_cast<VectorType>(X->getType());
  2134. if (XVTy && !isa<VectorType>(Y->getType()))
  2135. Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
  2136. return B.CreateMul(X, Y);
  2137. };
  2138. switch (ID.getKind()) {
  2139. case InductionDescriptor::IK_IntInduction: {
  2140. assert(!isa<VectorType>(Index->getType()) &&
  2141. "Vector indices not supported for integer inductions yet");
  2142. assert(Index->getType() == StartValue->getType() &&
  2143. "Index type does not match StartValue type");
  2144. if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
  2145. return B.CreateSub(StartValue, Index);
  2146. auto *Offset = CreateMul(Index, Step);
  2147. return CreateAdd(StartValue, Offset);
  2148. }
  2149. case InductionDescriptor::IK_PtrInduction: {
  2150. assert(isa<Constant>(Step) &&
  2151. "Expected constant step for pointer induction");
  2152. return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step));
  2153. }
  2154. case InductionDescriptor::IK_FpInduction: {
  2155. assert(!isa<VectorType>(Index->getType()) &&
  2156. "Vector indices not supported for FP inductions yet");
  2157. assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
  2158. auto InductionBinOp = ID.getInductionBinOp();
  2159. assert(InductionBinOp &&
  2160. (InductionBinOp->getOpcode() == Instruction::FAdd ||
  2161. InductionBinOp->getOpcode() == Instruction::FSub) &&
  2162. "Original bin op should be defined for FP induction");
  2163. Value *MulExp = B.CreateFMul(Step, Index);
  2164. return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
  2165. "induction");
  2166. }
  2167. case InductionDescriptor::IK_NoInduction:
  2168. return nullptr;
  2169. }
  2170. llvm_unreachable("invalid enum");
  2171. }
  2172. void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
  2173. const VPIteration &Instance,
  2174. VPTransformState &State) {
  2175. Value *ScalarInst = State.get(Def, Instance);
  2176. Value *VectorValue = State.get(Def, Instance.Part);
  2177. VectorValue = Builder.CreateInsertElement(
  2178. VectorValue, ScalarInst,
  2179. Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
  2180. State.set(Def, VectorValue, Instance.Part);
  2181. }
  2182. // Return whether we allow using masked interleave-groups (for dealing with
  2183. // strided loads/stores that reside in predicated blocks, or for dealing
  2184. // with gaps).
  2185. static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
  2186. // If an override option has been passed in for interleaved accesses, use it.
  2187. if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
  2188. return EnableMaskedInterleavedMemAccesses;
  2189. return TTI.enableMaskedInterleavedAccessVectorization();
  2190. }
  2191. // Try to vectorize the interleave group that \p Instr belongs to.
  2192. //
  2193. // E.g. Translate following interleaved load group (factor = 3):
  2194. // for (i = 0; i < N; i+=3) {
  2195. // R = Pic[i]; // Member of index 0
  2196. // G = Pic[i+1]; // Member of index 1
  2197. // B = Pic[i+2]; // Member of index 2
  2198. // ... // do something to R, G, B
  2199. // }
  2200. // To:
  2201. // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B
  2202. // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements
  2203. // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements
  2204. // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements
  2205. //
  2206. // Or translate following interleaved store group (factor = 3):
  2207. // for (i = 0; i < N; i+=3) {
  2208. // ... do something to R, G, B
  2209. // Pic[i] = R; // Member of index 0
  2210. // Pic[i+1] = G; // Member of index 1
  2211. // Pic[i+2] = B; // Member of index 2
  2212. // }
  2213. // To:
  2214. // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
  2215. // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
  2216. // %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
  2217. // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements
  2218. // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B
  2219. void InnerLoopVectorizer::vectorizeInterleaveGroup(
  2220. const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
  2221. VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
  2222. VPValue *BlockInMask) {
  2223. Instruction *Instr = Group->getInsertPos();
  2224. const DataLayout &DL = Instr->getModule()->getDataLayout();
  2225. // Prepare for the vector type of the interleaved load/store.
  2226. Type *ScalarTy = getLoadStoreType(Instr);
  2227. unsigned InterleaveFactor = Group->getFactor();
  2228. assert(!VF.isScalable() && "scalable vectors not yet supported.");
  2229. auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
  2230. // Prepare for the new pointers.
  2231. SmallVector<Value *, 2> AddrParts;
  2232. unsigned Index = Group->getIndex(Instr);
  2233. // TODO: extend the masked interleaved-group support to reversed access.
  2234. assert((!BlockInMask || !Group->isReverse()) &&
  2235. "Reversed masked interleave-group not supported.");
  2236. // If the group is reverse, adjust the index to refer to the last vector lane
  2237. // instead of the first. We adjust the index from the first vector lane,
  2238. // rather than directly getting the pointer for lane VF - 1, because the
  2239. // pointer operand of the interleaved access is supposed to be uniform. For
  2240. // uniform instructions, we're only required to generate a value for the
  2241. // first vector lane in each unroll iteration.
  2242. if (Group->isReverse())
  2243. Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
  2244. for (unsigned Part = 0; Part < UF; Part++) {
  2245. Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
  2246. State.setDebugLocFromInst(AddrPart);
  2247. // Notice current instruction could be any index. Need to adjust the address
  2248. // to the member of index 0.
  2249. //
  2250. // E.g. a = A[i+1]; // Member of index 1 (Current instruction)
  2251. // b = A[i]; // Member of index 0
  2252. // Current pointer is pointed to A[i+1], adjust it to A[i].
  2253. //
  2254. // E.g. A[i+1] = a; // Member of index 1
  2255. // A[i] = b; // Member of index 0
  2256. // A[i+2] = c; // Member of index 2 (Current instruction)
  2257. // Current pointer is pointed to A[i+2], adjust it to A[i].
  2258. bool InBounds = false;
  2259. if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
  2260. InBounds = gep->isInBounds();
  2261. AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
  2262. cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
  2263. // Cast to the vector pointer type.
  2264. unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
  2265. Type *PtrTy = VecTy->getPointerTo(AddressSpace);
  2266. AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
  2267. }
  2268. State.setDebugLocFromInst(Instr);
  2269. Value *PoisonVec = PoisonValue::get(VecTy);
  2270. Value *MaskForGaps = nullptr;
  2271. if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
  2272. MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
  2273. assert(MaskForGaps && "Mask for Gaps is required but it is null");
  2274. }
  2275. // Vectorize the interleaved load group.
  2276. if (isa<LoadInst>(Instr)) {
  2277. // For each unroll part, create a wide load for the group.
  2278. SmallVector<Value *, 2> NewLoads;
  2279. for (unsigned Part = 0; Part < UF; Part++) {
  2280. Instruction *NewLoad;
  2281. if (BlockInMask || MaskForGaps) {
  2282. assert(useMaskedInterleavedAccesses(*TTI) &&
  2283. "masked interleaved groups are not allowed.");
  2284. Value *GroupMask = MaskForGaps;
  2285. if (BlockInMask) {
  2286. Value *BlockInMaskPart = State.get(BlockInMask, Part);
  2287. Value *ShuffledMask = Builder.CreateShuffleVector(
  2288. BlockInMaskPart,
  2289. createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
  2290. "interleaved.mask");
  2291. GroupMask = MaskForGaps
  2292. ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
  2293. MaskForGaps)
  2294. : ShuffledMask;
  2295. }
  2296. NewLoad =
  2297. Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
  2298. GroupMask, PoisonVec, "wide.masked.vec");
  2299. }
  2300. else
  2301. NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
  2302. Group->getAlign(), "wide.vec");
  2303. Group->addMetadata(NewLoad);
  2304. NewLoads.push_back(NewLoad);
  2305. }
  2306. // For each member in the group, shuffle out the appropriate data from the
  2307. // wide loads.
  2308. unsigned J = 0;
  2309. for (unsigned I = 0; I < InterleaveFactor; ++I) {
  2310. Instruction *Member = Group->getMember(I);
  2311. // Skip the gaps in the group.
  2312. if (!Member)
  2313. continue;
  2314. auto StrideMask =
  2315. createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
  2316. for (unsigned Part = 0; Part < UF; Part++) {
  2317. Value *StridedVec = Builder.CreateShuffleVector(
  2318. NewLoads[Part], StrideMask, "strided.vec");
  2319. // If this member has different type, cast the result type.
  2320. if (Member->getType() != ScalarTy) {
  2321. assert(!VF.isScalable() && "VF is assumed to be non scalable.");
  2322. VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
  2323. StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
  2324. }
  2325. if (Group->isReverse())
  2326. StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
  2327. State.set(VPDefs[J], StridedVec, Part);
  2328. }
  2329. ++J;
  2330. }
  2331. return;
  2332. }
  2333. // The sub vector type for current instruction.
  2334. auto *SubVT = VectorType::get(ScalarTy, VF);
  2335. // Vectorize the interleaved store group.
  2336. MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
  2337. assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
  2338. "masked interleaved groups are not allowed.");
  2339. assert((!MaskForGaps || !VF.isScalable()) &&
  2340. "masking gaps for scalable vectors is not yet supported.");
  2341. for (unsigned Part = 0; Part < UF; Part++) {
  2342. // Collect the stored vector from each member.
  2343. SmallVector<Value *, 4> StoredVecs;
  2344. unsigned StoredIdx = 0;
  2345. for (unsigned i = 0; i < InterleaveFactor; i++) {
  2346. assert((Group->getMember(i) || MaskForGaps) &&
  2347. "Fail to get a member from an interleaved store group");
  2348. Instruction *Member = Group->getMember(i);
  2349. // Skip the gaps in the group.
  2350. if (!Member) {
  2351. Value *Undef = PoisonValue::get(SubVT);
  2352. StoredVecs.push_back(Undef);
  2353. continue;
  2354. }
  2355. Value *StoredVec = State.get(StoredValues[StoredIdx], Part);
  2356. ++StoredIdx;
  2357. if (Group->isReverse())
  2358. StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
  2359. // If this member has different type, cast it to a unified type.
  2360. if (StoredVec->getType() != SubVT)
  2361. StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
  2362. StoredVecs.push_back(StoredVec);
  2363. }
  2364. // Concatenate all vectors into a wide vector.
  2365. Value *WideVec = concatenateVectors(Builder, StoredVecs);
  2366. // Interleave the elements in the wide vector.
  2367. Value *IVec = Builder.CreateShuffleVector(
  2368. WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
  2369. "interleaved.vec");
  2370. Instruction *NewStoreInstr;
  2371. if (BlockInMask || MaskForGaps) {
  2372. Value *GroupMask = MaskForGaps;
  2373. if (BlockInMask) {
  2374. Value *BlockInMaskPart = State.get(BlockInMask, Part);
  2375. Value *ShuffledMask = Builder.CreateShuffleVector(
  2376. BlockInMaskPart,
  2377. createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
  2378. "interleaved.mask");
  2379. GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
  2380. ShuffledMask, MaskForGaps)
  2381. : ShuffledMask;
  2382. }
  2383. NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
  2384. Group->getAlign(), GroupMask);
  2385. } else
  2386. NewStoreInstr =
  2387. Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
  2388. Group->addMetadata(NewStoreInstr);
  2389. }
  2390. }
  2391. void InnerLoopVectorizer::scalarizeInstruction(const Instruction *Instr,
  2392. VPReplicateRecipe *RepRecipe,
  2393. const VPIteration &Instance,
  2394. bool IfPredicateInstr,
  2395. VPTransformState &State) {
  2396. assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
  2397. // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
  2398. // the first lane and part.
  2399. if (isa<NoAliasScopeDeclInst>(Instr))
  2400. if (!Instance.isFirstIteration())
  2401. return;
  2402. // Does this instruction return a value ?
  2403. bool IsVoidRetTy = Instr->getType()->isVoidTy();
  2404. Instruction *Cloned = Instr->clone();
  2405. if (!IsVoidRetTy)
  2406. Cloned->setName(Instr->getName() + ".cloned");
  2407. // If the scalarized instruction contributes to the address computation of a
  2408. // widen masked load/store which was in a basic block that needed predication
  2409. // and is not predicated after vectorization, we can't propagate
  2410. // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
  2411. // instruction could feed a poison value to the base address of the widen
  2412. // load/store.
  2413. if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
  2414. Cloned->dropPoisonGeneratingFlags();
  2415. if (Instr->getDebugLoc())
  2416. State.setDebugLocFromInst(Instr);
  2417. // Replace the operands of the cloned instructions with their scalar
  2418. // equivalents in the new loop.
  2419. for (const auto &I : enumerate(RepRecipe->operands())) {
  2420. auto InputInstance = Instance;
  2421. VPValue *Operand = I.value();
  2422. if (vputils::isUniformAfterVectorization(Operand))
  2423. InputInstance.Lane = VPLane::getFirstLane();
  2424. Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
  2425. }
  2426. State.addNewMetadata(Cloned, Instr);
  2427. // Place the cloned scalar in the new loop.
  2428. State.Builder.Insert(Cloned);
  2429. State.set(RepRecipe, Cloned, Instance);
  2430. // If we just cloned a new assumption, add it the assumption cache.
  2431. if (auto *II = dyn_cast<AssumeInst>(Cloned))
  2432. AC->registerAssumption(II);
  2433. // End if-block.
  2434. if (IfPredicateInstr)
  2435. PredicatedInstructions.push_back(Cloned);
  2436. }
  2437. Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) {
  2438. if (TripCount)
  2439. return TripCount;
  2440. assert(InsertBlock);
  2441. IRBuilder<> Builder(InsertBlock->getTerminator());
  2442. // Find the loop boundaries.
  2443. Type *IdxTy = Legal->getWidestInductionType();
  2444. assert(IdxTy && "No type for induction");
  2445. const SCEV *ExitCount = createTripCountSCEV(IdxTy, PSE);
  2446. const DataLayout &DL = InsertBlock->getModule()->getDataLayout();
  2447. // Expand the trip count and place the new instructions in the preheader.
  2448. // Notice that the pre-header does not change, only the loop body.
  2449. SCEVExpander Exp(*PSE.getSE(), DL, "induction");
  2450. // Count holds the overall loop count (N).
  2451. TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
  2452. InsertBlock->getTerminator());
  2453. if (TripCount->getType()->isPointerTy())
  2454. TripCount =
  2455. CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
  2456. InsertBlock->getTerminator());
  2457. return TripCount;
  2458. }
  2459. Value *
  2460. InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) {
  2461. if (VectorTripCount)
  2462. return VectorTripCount;
  2463. Value *TC = getOrCreateTripCount(InsertBlock);
  2464. IRBuilder<> Builder(InsertBlock->getTerminator());
  2465. Type *Ty = TC->getType();
  2466. // This is where we can make the step a runtime constant.
  2467. Value *Step = createStepForVF(Builder, Ty, VF, UF);
  2468. // If the tail is to be folded by masking, round the number of iterations N
  2469. // up to a multiple of Step instead of rounding down. This is done by first
  2470. // adding Step-1 and then rounding down. Note that it's ok if this addition
  2471. // overflows: the vector induction variable will eventually wrap to zero given
  2472. // that it starts at zero and its Step is a power of two; the loop will then
  2473. // exit, with the last early-exit vector comparison also producing all-true.
  2474. // For scalable vectors the VF is not guaranteed to be a power of 2, but this
  2475. // is accounted for in emitIterationCountCheck that adds an overflow check.
  2476. if (Cost->foldTailByMasking()) {
  2477. assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
  2478. "VF*UF must be a power of 2 when folding tail by masking");
  2479. Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
  2480. TC = Builder.CreateAdd(
  2481. TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
  2482. }
  2483. // Now we need to generate the expression for the part of the loop that the
  2484. // vectorized body will execute. This is equal to N - (N % Step) if scalar
  2485. // iterations are not required for correctness, or N - Step, otherwise. Step
  2486. // is equal to the vectorization factor (number of SIMD elements) times the
  2487. // unroll factor (number of SIMD instructions).
  2488. Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
  2489. // There are cases where we *must* run at least one iteration in the remainder
  2490. // loop. See the cost model for when this can happen. If the step evenly
  2491. // divides the trip count, we set the remainder to be equal to the step. If
  2492. // the step does not evenly divide the trip count, no adjustment is necessary
  2493. // since there will already be scalar iterations. Note that the minimum
  2494. // iterations check ensures that N >= Step.
  2495. if (Cost->requiresScalarEpilogue(VF)) {
  2496. auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
  2497. R = Builder.CreateSelect(IsZero, Step, R);
  2498. }
  2499. VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
  2500. return VectorTripCount;
  2501. }
  2502. Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
  2503. const DataLayout &DL) {
  2504. // Verify that V is a vector type with same number of elements as DstVTy.
  2505. auto *DstFVTy = cast<FixedVectorType>(DstVTy);
  2506. unsigned VF = DstFVTy->getNumElements();
  2507. auto *SrcVecTy = cast<FixedVectorType>(V->getType());
  2508. assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
  2509. Type *SrcElemTy = SrcVecTy->getElementType();
  2510. Type *DstElemTy = DstFVTy->getElementType();
  2511. assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
  2512. "Vector elements must have same size");
  2513. // Do a direct cast if element types are castable.
  2514. if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
  2515. return Builder.CreateBitOrPointerCast(V, DstFVTy);
  2516. }
  2517. // V cannot be directly casted to desired vector type.
  2518. // May happen when V is a floating point vector but DstVTy is a vector of
  2519. // pointers or vice-versa. Handle this using a two-step bitcast using an
  2520. // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
  2521. assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
  2522. "Only one type should be a pointer type");
  2523. assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
  2524. "Only one type should be a floating point type");
  2525. Type *IntTy =
  2526. IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
  2527. auto *VecIntTy = FixedVectorType::get(IntTy, VF);
  2528. Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
  2529. return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
  2530. }
  2531. void InnerLoopVectorizer::emitIterationCountCheck(BasicBlock *Bypass) {
  2532. Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
  2533. // Reuse existing vector loop preheader for TC checks.
  2534. // Note that new preheader block is generated for vector loop.
  2535. BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
  2536. IRBuilder<> Builder(TCCheckBlock->getTerminator());
  2537. // Generate code to check if the loop's trip count is less than VF * UF, or
  2538. // equal to it in case a scalar epilogue is required; this implies that the
  2539. // vector trip count is zero. This check also covers the case where adding one
  2540. // to the backedge-taken count overflowed leading to an incorrect trip count
  2541. // of zero. In this case we will also jump to the scalar loop.
  2542. auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
  2543. : ICmpInst::ICMP_ULT;
  2544. // If tail is to be folded, vector loop takes care of all iterations.
  2545. Type *CountTy = Count->getType();
  2546. Value *CheckMinIters = Builder.getFalse();
  2547. auto CreateStep = [&]() -> Value * {
  2548. // Create step with max(MinProTripCount, UF * VF).
  2549. if (UF * VF.getKnownMinValue() >= MinProfitableTripCount.getKnownMinValue())
  2550. return createStepForVF(Builder, CountTy, VF, UF);
  2551. Value *MinProfTC =
  2552. createStepForVF(Builder, CountTy, MinProfitableTripCount, 1);
  2553. if (!VF.isScalable())
  2554. return MinProfTC;
  2555. return Builder.CreateBinaryIntrinsic(
  2556. Intrinsic::umax, MinProfTC, createStepForVF(Builder, CountTy, VF, UF));
  2557. };
  2558. if (!Cost->foldTailByMasking())
  2559. CheckMinIters =
  2560. Builder.CreateICmp(P, Count, CreateStep(), "min.iters.check");
  2561. else if (VF.isScalable()) {
  2562. // vscale is not necessarily a power-of-2, which means we cannot guarantee
  2563. // an overflow to zero when updating induction variables and so an
  2564. // additional overflow check is required before entering the vector loop.
  2565. // Get the maximum unsigned value for the type.
  2566. Value *MaxUIntTripCount =
  2567. ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
  2568. Value *LHS = Builder.CreateSub(MaxUIntTripCount, Count);
  2569. // Don't execute the vector loop if (UMax - n) < (VF * UF).
  2570. CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, CreateStep());
  2571. }
  2572. // Create new preheader for vector loop.
  2573. LoopVectorPreHeader =
  2574. SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
  2575. "vector.ph");
  2576. assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
  2577. DT->getNode(Bypass)->getIDom()) &&
  2578. "TC check is expected to dominate Bypass");
  2579. // Update dominator for Bypass & LoopExit (if needed).
  2580. DT->changeImmediateDominator(Bypass, TCCheckBlock);
  2581. if (!Cost->requiresScalarEpilogue(VF))
  2582. // If there is an epilogue which must run, there's no edge from the
  2583. // middle block to exit blocks and thus no need to update the immediate
  2584. // dominator of the exit blocks.
  2585. DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
  2586. ReplaceInstWithInst(
  2587. TCCheckBlock->getTerminator(),
  2588. BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
  2589. LoopBypassBlocks.push_back(TCCheckBlock);
  2590. }
  2591. BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) {
  2592. BasicBlock *const SCEVCheckBlock =
  2593. RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock);
  2594. if (!SCEVCheckBlock)
  2595. return nullptr;
  2596. assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
  2597. (OptForSizeBasedOnProfile &&
  2598. Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
  2599. "Cannot SCEV check stride or overflow when optimizing for size");
  2600. // Update dominator only if this is first RT check.
  2601. if (LoopBypassBlocks.empty()) {
  2602. DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
  2603. if (!Cost->requiresScalarEpilogue(VF))
  2604. // If there is an epilogue which must run, there's no edge from the
  2605. // middle block to exit blocks and thus no need to update the immediate
  2606. // dominator of the exit blocks.
  2607. DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
  2608. }
  2609. LoopBypassBlocks.push_back(SCEVCheckBlock);
  2610. AddedSafetyChecks = true;
  2611. return SCEVCheckBlock;
  2612. }
  2613. BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) {
  2614. // VPlan-native path does not do any analysis for runtime checks currently.
  2615. if (EnableVPlanNativePath)
  2616. return nullptr;
  2617. BasicBlock *const MemCheckBlock =
  2618. RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader);
  2619. // Check if we generated code that checks in runtime if arrays overlap. We put
  2620. // the checks into a separate block to make the more common case of few
  2621. // elements faster.
  2622. if (!MemCheckBlock)
  2623. return nullptr;
  2624. if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
  2625. assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
  2626. "Cannot emit memory checks when optimizing for size, unless forced "
  2627. "to vectorize.");
  2628. ORE->emit([&]() {
  2629. return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
  2630. OrigLoop->getStartLoc(),
  2631. OrigLoop->getHeader())
  2632. << "Code-size may be reduced by not forcing "
  2633. "vectorization, or by source-code modifications "
  2634. "eliminating the need for runtime checks "
  2635. "(e.g., adding 'restrict').";
  2636. });
  2637. }
  2638. LoopBypassBlocks.push_back(MemCheckBlock);
  2639. AddedSafetyChecks = true;
  2640. return MemCheckBlock;
  2641. }
  2642. void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
  2643. LoopScalarBody = OrigLoop->getHeader();
  2644. LoopVectorPreHeader = OrigLoop->getLoopPreheader();
  2645. assert(LoopVectorPreHeader && "Invalid loop structure");
  2646. LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
  2647. assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
  2648. "multiple exit loop without required epilogue?");
  2649. LoopMiddleBlock =
  2650. SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
  2651. LI, nullptr, Twine(Prefix) + "middle.block");
  2652. LoopScalarPreHeader =
  2653. SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
  2654. nullptr, Twine(Prefix) + "scalar.ph");
  2655. auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
  2656. // Set up the middle block terminator. Two cases:
  2657. // 1) If we know that we must execute the scalar epilogue, emit an
  2658. // unconditional branch.
  2659. // 2) Otherwise, we must have a single unique exit block (due to how we
  2660. // implement the multiple exit case). In this case, set up a conditional
  2661. // branch from the middle block to the loop scalar preheader, and the
  2662. // exit block. completeLoopSkeleton will update the condition to use an
  2663. // iteration check, if required to decide whether to execute the remainder.
  2664. BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
  2665. BranchInst::Create(LoopScalarPreHeader) :
  2666. BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
  2667. Builder.getTrue());
  2668. BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
  2669. ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
  2670. // Update dominator for loop exit. During skeleton creation, only the vector
  2671. // pre-header and the middle block are created. The vector loop is entirely
  2672. // created during VPlan exection.
  2673. if (!Cost->requiresScalarEpilogue(VF))
  2674. // If there is an epilogue which must run, there's no edge from the
  2675. // middle block to exit blocks and thus no need to update the immediate
  2676. // dominator of the exit blocks.
  2677. DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
  2678. }
  2679. PHINode *InnerLoopVectorizer::createInductionResumeValue(
  2680. PHINode *OrigPhi, const InductionDescriptor &II,
  2681. ArrayRef<BasicBlock *> BypassBlocks,
  2682. std::pair<BasicBlock *, Value *> AdditionalBypass) {
  2683. Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader);
  2684. assert(VectorTripCount && "Expected valid arguments");
  2685. Instruction *OldInduction = Legal->getPrimaryInduction();
  2686. Value *&EndValue = IVEndValues[OrigPhi];
  2687. Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
  2688. if (OrigPhi == OldInduction) {
  2689. // We know what the end value is.
  2690. EndValue = VectorTripCount;
  2691. } else {
  2692. IRBuilder<> B(LoopVectorPreHeader->getTerminator());
  2693. // Fast-math-flags propagate from the original induction instruction.
  2694. if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
  2695. B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
  2696. Value *Step =
  2697. CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
  2698. EndValue =
  2699. emitTransformedIndex(B, VectorTripCount, II.getStartValue(), Step, II);
  2700. EndValue->setName("ind.end");
  2701. // Compute the end value for the additional bypass (if applicable).
  2702. if (AdditionalBypass.first) {
  2703. B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
  2704. Value *Step =
  2705. CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
  2706. EndValueFromAdditionalBypass = emitTransformedIndex(
  2707. B, AdditionalBypass.second, II.getStartValue(), Step, II);
  2708. EndValueFromAdditionalBypass->setName("ind.end");
  2709. }
  2710. }
  2711. // Create phi nodes to merge from the backedge-taken check block.
  2712. PHINode *BCResumeVal = PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
  2713. LoopScalarPreHeader->getTerminator());
  2714. // Copy original phi DL over to the new one.
  2715. BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
  2716. // The new PHI merges the original incoming value, in case of a bypass,
  2717. // or the value at the end of the vectorized loop.
  2718. BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
  2719. // Fix the scalar body counter (PHI node).
  2720. // The old induction's phi node in the scalar body needs the truncated
  2721. // value.
  2722. for (BasicBlock *BB : BypassBlocks)
  2723. BCResumeVal->addIncoming(II.getStartValue(), BB);
  2724. if (AdditionalBypass.first)
  2725. BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
  2726. EndValueFromAdditionalBypass);
  2727. return BCResumeVal;
  2728. }
  2729. void InnerLoopVectorizer::createInductionResumeValues(
  2730. std::pair<BasicBlock *, Value *> AdditionalBypass) {
  2731. assert(((AdditionalBypass.first && AdditionalBypass.second) ||
  2732. (!AdditionalBypass.first && !AdditionalBypass.second)) &&
  2733. "Inconsistent information about additional bypass.");
  2734. // We are going to resume the execution of the scalar loop.
  2735. // Go over all of the induction variables that we found and fix the
  2736. // PHIs that are left in the scalar version of the loop.
  2737. // The starting values of PHI nodes depend on the counter of the last
  2738. // iteration in the vectorized loop.
  2739. // If we come from a bypass edge then we need to start from the original
  2740. // start value.
  2741. for (const auto &InductionEntry : Legal->getInductionVars()) {
  2742. PHINode *OrigPhi = InductionEntry.first;
  2743. const InductionDescriptor &II = InductionEntry.second;
  2744. PHINode *BCResumeVal = createInductionResumeValue(
  2745. OrigPhi, II, LoopBypassBlocks, AdditionalBypass);
  2746. OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
  2747. }
  2748. }
  2749. BasicBlock *InnerLoopVectorizer::completeLoopSkeleton() {
  2750. // The trip counts should be cached by now.
  2751. Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
  2752. Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader);
  2753. auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
  2754. // Add a check in the middle block to see if we have completed
  2755. // all of the iterations in the first vector loop. Three cases:
  2756. // 1) If we require a scalar epilogue, there is no conditional branch as
  2757. // we unconditionally branch to the scalar preheader. Do nothing.
  2758. // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
  2759. // Thus if tail is to be folded, we know we don't need to run the
  2760. // remainder and we can use the previous value for the condition (true).
  2761. // 3) Otherwise, construct a runtime check.
  2762. if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
  2763. Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
  2764. Count, VectorTripCount, "cmp.n",
  2765. LoopMiddleBlock->getTerminator());
  2766. // Here we use the same DebugLoc as the scalar loop latch terminator instead
  2767. // of the corresponding compare because they may have ended up with
  2768. // different line numbers and we want to avoid awkward line stepping while
  2769. // debugging. Eg. if the compare has got a line number inside the loop.
  2770. CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
  2771. cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
  2772. }
  2773. #ifdef EXPENSIVE_CHECKS
  2774. assert(DT->verify(DominatorTree::VerificationLevel::Fast));
  2775. #endif
  2776. return LoopVectorPreHeader;
  2777. }
  2778. std::pair<BasicBlock *, Value *>
  2779. InnerLoopVectorizer::createVectorizedLoopSkeleton() {
  2780. /*
  2781. In this function we generate a new loop. The new loop will contain
  2782. the vectorized instructions while the old loop will continue to run the
  2783. scalar remainder.
  2784. [ ] <-- loop iteration number check.
  2785. / |
  2786. / v
  2787. | [ ] <-- vector loop bypass (may consist of multiple blocks).
  2788. | / |
  2789. | / v
  2790. || [ ] <-- vector pre header.
  2791. |/ |
  2792. | v
  2793. | [ ] \
  2794. | [ ]_| <-- vector loop (created during VPlan execution).
  2795. | |
  2796. | v
  2797. \ -[ ] <--- middle-block.
  2798. \/ |
  2799. /\ v
  2800. | ->[ ] <--- new preheader.
  2801. | |
  2802. (opt) v <-- edge from middle to exit iff epilogue is not required.
  2803. | [ ] \
  2804. | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue).
  2805. \ |
  2806. \ v
  2807. >[ ] <-- exit block(s).
  2808. ...
  2809. */
  2810. // Create an empty vector loop, and prepare basic blocks for the runtime
  2811. // checks.
  2812. createVectorLoopSkeleton("");
  2813. // Now, compare the new count to zero. If it is zero skip the vector loop and
  2814. // jump to the scalar loop. This check also covers the case where the
  2815. // backedge-taken count is uint##_max: adding one to it will overflow leading
  2816. // to an incorrect trip count of zero. In this (rare) case we will also jump
  2817. // to the scalar loop.
  2818. emitIterationCountCheck(LoopScalarPreHeader);
  2819. // Generate the code to check any assumptions that we've made for SCEV
  2820. // expressions.
  2821. emitSCEVChecks(LoopScalarPreHeader);
  2822. // Generate the code that checks in runtime if arrays overlap. We put the
  2823. // checks into a separate block to make the more common case of few elements
  2824. // faster.
  2825. emitMemRuntimeChecks(LoopScalarPreHeader);
  2826. // Emit phis for the new starting index of the scalar loop.
  2827. createInductionResumeValues();
  2828. return {completeLoopSkeleton(), nullptr};
  2829. }
  2830. // Fix up external users of the induction variable. At this point, we are
  2831. // in LCSSA form, with all external PHIs that use the IV having one input value,
  2832. // coming from the remainder loop. We need those PHIs to also have a correct
  2833. // value for the IV when arriving directly from the middle block.
  2834. void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
  2835. const InductionDescriptor &II,
  2836. Value *VectorTripCount, Value *EndValue,
  2837. BasicBlock *MiddleBlock,
  2838. BasicBlock *VectorHeader, VPlan &Plan) {
  2839. // There are two kinds of external IV usages - those that use the value
  2840. // computed in the last iteration (the PHI) and those that use the penultimate
  2841. // value (the value that feeds into the phi from the loop latch).
  2842. // We allow both, but they, obviously, have different values.
  2843. assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
  2844. DenseMap<Value *, Value *> MissingVals;
  2845. // An external user of the last iteration's value should see the value that
  2846. // the remainder loop uses to initialize its own IV.
  2847. Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
  2848. for (User *U : PostInc->users()) {
  2849. Instruction *UI = cast<Instruction>(U);
  2850. if (!OrigLoop->contains(UI)) {
  2851. assert(isa<PHINode>(UI) && "Expected LCSSA form");
  2852. MissingVals[UI] = EndValue;
  2853. }
  2854. }
  2855. // An external user of the penultimate value need to see EndValue - Step.
  2856. // The simplest way to get this is to recompute it from the constituent SCEVs,
  2857. // that is Start + (Step * (CRD - 1)).
  2858. for (User *U : OrigPhi->users()) {
  2859. auto *UI = cast<Instruction>(U);
  2860. if (!OrigLoop->contains(UI)) {
  2861. assert(isa<PHINode>(UI) && "Expected LCSSA form");
  2862. IRBuilder<> B(MiddleBlock->getTerminator());
  2863. // Fast-math-flags propagate from the original induction instruction.
  2864. if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
  2865. B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
  2866. Value *CountMinusOne = B.CreateSub(
  2867. VectorTripCount, ConstantInt::get(VectorTripCount->getType(), 1));
  2868. CountMinusOne->setName("cmo");
  2869. Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(),
  2870. VectorHeader->getTerminator());
  2871. Value *Escape =
  2872. emitTransformedIndex(B, CountMinusOne, II.getStartValue(), Step, II);
  2873. Escape->setName("ind.escape");
  2874. MissingVals[UI] = Escape;
  2875. }
  2876. }
  2877. for (auto &I : MissingVals) {
  2878. PHINode *PHI = cast<PHINode>(I.first);
  2879. // One corner case we have to handle is two IVs "chasing" each-other,
  2880. // that is %IV2 = phi [...], [ %IV1, %latch ]
  2881. // In this case, if IV1 has an external use, we need to avoid adding both
  2882. // "last value of IV1" and "penultimate value of IV2". So, verify that we
  2883. // don't already have an incoming value for the middle block.
  2884. if (PHI->getBasicBlockIndex(MiddleBlock) == -1) {
  2885. PHI->addIncoming(I.second, MiddleBlock);
  2886. Plan.removeLiveOut(PHI);
  2887. }
  2888. }
  2889. }
  2890. namespace {
  2891. struct CSEDenseMapInfo {
  2892. static bool canHandle(const Instruction *I) {
  2893. return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
  2894. isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
  2895. }
  2896. static inline Instruction *getEmptyKey() {
  2897. return DenseMapInfo<Instruction *>::getEmptyKey();
  2898. }
  2899. static inline Instruction *getTombstoneKey() {
  2900. return DenseMapInfo<Instruction *>::getTombstoneKey();
  2901. }
  2902. static unsigned getHashValue(const Instruction *I) {
  2903. assert(canHandle(I) && "Unknown instruction!");
  2904. return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
  2905. I->value_op_end()));
  2906. }
  2907. static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
  2908. if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
  2909. LHS == getTombstoneKey() || RHS == getTombstoneKey())
  2910. return LHS == RHS;
  2911. return LHS->isIdenticalTo(RHS);
  2912. }
  2913. };
  2914. } // end anonymous namespace
  2915. ///Perform cse of induction variable instructions.
  2916. static void cse(BasicBlock *BB) {
  2917. // Perform simple cse.
  2918. SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
  2919. for (Instruction &In : llvm::make_early_inc_range(*BB)) {
  2920. if (!CSEDenseMapInfo::canHandle(&In))
  2921. continue;
  2922. // Check if we can replace this instruction with any of the
  2923. // visited instructions.
  2924. if (Instruction *V = CSEMap.lookup(&In)) {
  2925. In.replaceAllUsesWith(V);
  2926. In.eraseFromParent();
  2927. continue;
  2928. }
  2929. CSEMap[&In] = &In;
  2930. }
  2931. }
  2932. InstructionCost
  2933. LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
  2934. bool &NeedToScalarize) const {
  2935. Function *F = CI->getCalledFunction();
  2936. Type *ScalarRetTy = CI->getType();
  2937. SmallVector<Type *, 4> Tys, ScalarTys;
  2938. for (auto &ArgOp : CI->args())
  2939. ScalarTys.push_back(ArgOp->getType());
  2940. // Estimate cost of scalarized vector call. The source operands are assumed
  2941. // to be vectors, so we need to extract individual elements from there,
  2942. // execute VF scalar calls, and then gather the result into the vector return
  2943. // value.
  2944. TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  2945. InstructionCost ScalarCallCost =
  2946. TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, CostKind);
  2947. if (VF.isScalar())
  2948. return ScalarCallCost;
  2949. // Compute corresponding vector type for return value and arguments.
  2950. Type *RetTy = ToVectorTy(ScalarRetTy, VF);
  2951. for (Type *ScalarTy : ScalarTys)
  2952. Tys.push_back(ToVectorTy(ScalarTy, VF));
  2953. // Compute costs of unpacking argument values for the scalar calls and
  2954. // packing the return values to a vector.
  2955. InstructionCost ScalarizationCost =
  2956. getScalarizationOverhead(CI, VF, CostKind);
  2957. InstructionCost Cost =
  2958. ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
  2959. // If we can't emit a vector call for this function, then the currently found
  2960. // cost is the cost we need to return.
  2961. NeedToScalarize = true;
  2962. VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
  2963. Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
  2964. if (!TLI || CI->isNoBuiltin() || !VecFunc)
  2965. return Cost;
  2966. // If the corresponding vector cost is cheaper, return its cost.
  2967. InstructionCost VectorCallCost =
  2968. TTI.getCallInstrCost(nullptr, RetTy, Tys, CostKind);
  2969. if (VectorCallCost < Cost) {
  2970. NeedToScalarize = false;
  2971. Cost = VectorCallCost;
  2972. }
  2973. return Cost;
  2974. }
  2975. static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
  2976. if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
  2977. return Elt;
  2978. return VectorType::get(Elt, VF);
  2979. }
  2980. InstructionCost
  2981. LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
  2982. ElementCount VF) const {
  2983. Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
  2984. assert(ID && "Expected intrinsic call!");
  2985. Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
  2986. FastMathFlags FMF;
  2987. if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
  2988. FMF = FPMO->getFastMathFlags();
  2989. SmallVector<const Value *> Arguments(CI->args());
  2990. FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
  2991. SmallVector<Type *> ParamTys;
  2992. std::transform(FTy->param_begin(), FTy->param_end(),
  2993. std::back_inserter(ParamTys),
  2994. [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
  2995. IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
  2996. dyn_cast<IntrinsicInst>(CI));
  2997. return TTI.getIntrinsicInstrCost(CostAttrs,
  2998. TargetTransformInfo::TCK_RecipThroughput);
  2999. }
  3000. static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
  3001. auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
  3002. auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
  3003. return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
  3004. }
  3005. static Type *largestIntegerVectorType(Type *T1, Type *T2) {
  3006. auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
  3007. auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
  3008. return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
  3009. }
  3010. void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
  3011. // For every instruction `I` in MinBWs, truncate the operands, create a
  3012. // truncated version of `I` and reextend its result. InstCombine runs
  3013. // later and will remove any ext/trunc pairs.
  3014. SmallPtrSet<Value *, 4> Erased;
  3015. for (const auto &KV : Cost->getMinimalBitwidths()) {
  3016. // If the value wasn't vectorized, we must maintain the original scalar
  3017. // type. The absence of the value from State indicates that it
  3018. // wasn't vectorized.
  3019. // FIXME: Should not rely on getVPValue at this point.
  3020. VPValue *Def = State.Plan->getVPValue(KV.first, true);
  3021. if (!State.hasAnyVectorValue(Def))
  3022. continue;
  3023. for (unsigned Part = 0; Part < UF; ++Part) {
  3024. Value *I = State.get(Def, Part);
  3025. if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
  3026. continue;
  3027. Type *OriginalTy = I->getType();
  3028. Type *ScalarTruncatedTy =
  3029. IntegerType::get(OriginalTy->getContext(), KV.second);
  3030. auto *TruncatedTy = VectorType::get(
  3031. ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
  3032. if (TruncatedTy == OriginalTy)
  3033. continue;
  3034. IRBuilder<> B(cast<Instruction>(I));
  3035. auto ShrinkOperand = [&](Value *V) -> Value * {
  3036. if (auto *ZI = dyn_cast<ZExtInst>(V))
  3037. if (ZI->getSrcTy() == TruncatedTy)
  3038. return ZI->getOperand(0);
  3039. return B.CreateZExtOrTrunc(V, TruncatedTy);
  3040. };
  3041. // The actual instruction modification depends on the instruction type,
  3042. // unfortunately.
  3043. Value *NewI = nullptr;
  3044. if (auto *BO = dyn_cast<BinaryOperator>(I)) {
  3045. NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
  3046. ShrinkOperand(BO->getOperand(1)));
  3047. // Any wrapping introduced by shrinking this operation shouldn't be
  3048. // considered undefined behavior. So, we can't unconditionally copy
  3049. // arithmetic wrapping flags to NewI.
  3050. cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
  3051. } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
  3052. NewI =
  3053. B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
  3054. ShrinkOperand(CI->getOperand(1)));
  3055. } else if (auto *SI = dyn_cast<SelectInst>(I)) {
  3056. NewI = B.CreateSelect(SI->getCondition(),
  3057. ShrinkOperand(SI->getTrueValue()),
  3058. ShrinkOperand(SI->getFalseValue()));
  3059. } else if (auto *CI = dyn_cast<CastInst>(I)) {
  3060. switch (CI->getOpcode()) {
  3061. default:
  3062. llvm_unreachable("Unhandled cast!");
  3063. case Instruction::Trunc:
  3064. NewI = ShrinkOperand(CI->getOperand(0));
  3065. break;
  3066. case Instruction::SExt:
  3067. NewI = B.CreateSExtOrTrunc(
  3068. CI->getOperand(0),
  3069. smallestIntegerVectorType(OriginalTy, TruncatedTy));
  3070. break;
  3071. case Instruction::ZExt:
  3072. NewI = B.CreateZExtOrTrunc(
  3073. CI->getOperand(0),
  3074. smallestIntegerVectorType(OriginalTy, TruncatedTy));
  3075. break;
  3076. }
  3077. } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
  3078. auto Elements0 =
  3079. cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
  3080. auto *O0 = B.CreateZExtOrTrunc(
  3081. SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
  3082. auto Elements1 =
  3083. cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
  3084. auto *O1 = B.CreateZExtOrTrunc(
  3085. SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
  3086. NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
  3087. } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
  3088. // Don't do anything with the operands, just extend the result.
  3089. continue;
  3090. } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
  3091. auto Elements =
  3092. cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
  3093. auto *O0 = B.CreateZExtOrTrunc(
  3094. IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
  3095. auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
  3096. NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
  3097. } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
  3098. auto Elements =
  3099. cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
  3100. auto *O0 = B.CreateZExtOrTrunc(
  3101. EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
  3102. NewI = B.CreateExtractElement(O0, EE->getOperand(2));
  3103. } else {
  3104. // If we don't know what to do, be conservative and don't do anything.
  3105. continue;
  3106. }
  3107. // Lastly, extend the result.
  3108. NewI->takeName(cast<Instruction>(I));
  3109. Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
  3110. I->replaceAllUsesWith(Res);
  3111. cast<Instruction>(I)->eraseFromParent();
  3112. Erased.insert(I);
  3113. State.reset(Def, Res, Part);
  3114. }
  3115. }
  3116. // We'll have created a bunch of ZExts that are now parentless. Clean up.
  3117. for (const auto &KV : Cost->getMinimalBitwidths()) {
  3118. // If the value wasn't vectorized, we must maintain the original scalar
  3119. // type. The absence of the value from State indicates that it
  3120. // wasn't vectorized.
  3121. // FIXME: Should not rely on getVPValue at this point.
  3122. VPValue *Def = State.Plan->getVPValue(KV.first, true);
  3123. if (!State.hasAnyVectorValue(Def))
  3124. continue;
  3125. for (unsigned Part = 0; Part < UF; ++Part) {
  3126. Value *I = State.get(Def, Part);
  3127. ZExtInst *Inst = dyn_cast<ZExtInst>(I);
  3128. if (Inst && Inst->use_empty()) {
  3129. Value *NewI = Inst->getOperand(0);
  3130. Inst->eraseFromParent();
  3131. State.reset(Def, NewI, Part);
  3132. }
  3133. }
  3134. }
  3135. }
  3136. void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State,
  3137. VPlan &Plan) {
  3138. // Insert truncates and extends for any truncated instructions as hints to
  3139. // InstCombine.
  3140. if (VF.isVector())
  3141. truncateToMinimalBitwidths(State);
  3142. // Fix widened non-induction PHIs by setting up the PHI operands.
  3143. if (EnableVPlanNativePath)
  3144. fixNonInductionPHIs(Plan, State);
  3145. // At this point every instruction in the original loop is widened to a
  3146. // vector form. Now we need to fix the recurrences in the loop. These PHI
  3147. // nodes are currently empty because we did not want to introduce cycles.
  3148. // This is the second stage of vectorizing recurrences.
  3149. fixCrossIterationPHIs(State);
  3150. // Forget the original basic block.
  3151. PSE.getSE()->forgetLoop(OrigLoop);
  3152. VPBasicBlock *LatchVPBB = Plan.getVectorLoopRegion()->getExitingBasicBlock();
  3153. Loop *VectorLoop = LI->getLoopFor(State.CFG.VPBB2IRBB[LatchVPBB]);
  3154. if (Cost->requiresScalarEpilogue(VF)) {
  3155. // No edge from the middle block to the unique exit block has been inserted
  3156. // and there is nothing to fix from vector loop; phis should have incoming
  3157. // from scalar loop only.
  3158. Plan.clearLiveOuts();
  3159. } else {
  3160. // If we inserted an edge from the middle block to the unique exit block,
  3161. // update uses outside the loop (phis) to account for the newly inserted
  3162. // edge.
  3163. // Fix-up external users of the induction variables.
  3164. for (const auto &Entry : Legal->getInductionVars())
  3165. fixupIVUsers(Entry.first, Entry.second,
  3166. getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()),
  3167. IVEndValues[Entry.first], LoopMiddleBlock,
  3168. VectorLoop->getHeader(), Plan);
  3169. }
  3170. // Fix LCSSA phis not already fixed earlier. Extracts may need to be generated
  3171. // in the exit block, so update the builder.
  3172. State.Builder.SetInsertPoint(State.CFG.ExitBB->getFirstNonPHI());
  3173. for (const auto &KV : Plan.getLiveOuts())
  3174. KV.second->fixPhi(Plan, State);
  3175. for (Instruction *PI : PredicatedInstructions)
  3176. sinkScalarOperands(&*PI);
  3177. // Remove redundant induction instructions.
  3178. cse(VectorLoop->getHeader());
  3179. // Set/update profile weights for the vector and remainder loops as original
  3180. // loop iterations are now distributed among them. Note that original loop
  3181. // represented by LoopScalarBody becomes remainder loop after vectorization.
  3182. //
  3183. // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
  3184. // end up getting slightly roughened result but that should be OK since
  3185. // profile is not inherently precise anyway. Note also possible bypass of
  3186. // vector code caused by legality checks is ignored, assigning all the weight
  3187. // to the vector loop, optimistically.
  3188. //
  3189. // For scalable vectorization we can't know at compile time how many iterations
  3190. // of the loop are handled in one vector iteration, so instead assume a pessimistic
  3191. // vscale of '1'.
  3192. setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop,
  3193. LI->getLoopFor(LoopScalarBody),
  3194. VF.getKnownMinValue() * UF);
  3195. }
  3196. void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
  3197. // In order to support recurrences we need to be able to vectorize Phi nodes.
  3198. // Phi nodes have cycles, so we need to vectorize them in two stages. This is
  3199. // stage #2: We now need to fix the recurrences by adding incoming edges to
  3200. // the currently empty PHI nodes. At this point every instruction in the
  3201. // original loop is widened to a vector form so we can use them to construct
  3202. // the incoming edges.
  3203. VPBasicBlock *Header =
  3204. State.Plan->getVectorLoopRegion()->getEntryBasicBlock();
  3205. for (VPRecipeBase &R : Header->phis()) {
  3206. if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
  3207. fixReduction(ReductionPhi, State);
  3208. else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
  3209. fixFixedOrderRecurrence(FOR, State);
  3210. }
  3211. }
  3212. void InnerLoopVectorizer::fixFixedOrderRecurrence(
  3213. VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
  3214. // This is the second phase of vectorizing first-order recurrences. An
  3215. // overview of the transformation is described below. Suppose we have the
  3216. // following loop.
  3217. //
  3218. // for (int i = 0; i < n; ++i)
  3219. // b[i] = a[i] - a[i - 1];
  3220. //
  3221. // There is a first-order recurrence on "a". For this loop, the shorthand
  3222. // scalar IR looks like:
  3223. //
  3224. // scalar.ph:
  3225. // s_init = a[-1]
  3226. // br scalar.body
  3227. //
  3228. // scalar.body:
  3229. // i = phi [0, scalar.ph], [i+1, scalar.body]
  3230. // s1 = phi [s_init, scalar.ph], [s2, scalar.body]
  3231. // s2 = a[i]
  3232. // b[i] = s2 - s1
  3233. // br cond, scalar.body, ...
  3234. //
  3235. // In this example, s1 is a recurrence because it's value depends on the
  3236. // previous iteration. In the first phase of vectorization, we created a
  3237. // vector phi v1 for s1. We now complete the vectorization and produce the
  3238. // shorthand vector IR shown below (for VF = 4, UF = 1).
  3239. //
  3240. // vector.ph:
  3241. // v_init = vector(..., ..., ..., a[-1])
  3242. // br vector.body
  3243. //
  3244. // vector.body
  3245. // i = phi [0, vector.ph], [i+4, vector.body]
  3246. // v1 = phi [v_init, vector.ph], [v2, vector.body]
  3247. // v2 = a[i, i+1, i+2, i+3];
  3248. // v3 = vector(v1(3), v2(0, 1, 2))
  3249. // b[i, i+1, i+2, i+3] = v2 - v3
  3250. // br cond, vector.body, middle.block
  3251. //
  3252. // middle.block:
  3253. // x = v2(3)
  3254. // br scalar.ph
  3255. //
  3256. // scalar.ph:
  3257. // s_init = phi [x, middle.block], [a[-1], otherwise]
  3258. // br scalar.body
  3259. //
  3260. // After execution completes the vector loop, we extract the next value of
  3261. // the recurrence (x) to use as the initial value in the scalar loop.
  3262. // Extract the last vector element in the middle block. This will be the
  3263. // initial value for the recurrence when jumping to the scalar loop.
  3264. VPValue *PreviousDef = PhiR->getBackedgeValue();
  3265. Value *Incoming = State.get(PreviousDef, UF - 1);
  3266. auto *ExtractForScalar = Incoming;
  3267. auto *IdxTy = Builder.getInt32Ty();
  3268. if (VF.isVector()) {
  3269. auto *One = ConstantInt::get(IdxTy, 1);
  3270. Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
  3271. auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
  3272. auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
  3273. ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
  3274. "vector.recur.extract");
  3275. }
  3276. // Extract the second last element in the middle block if the
  3277. // Phi is used outside the loop. We need to extract the phi itself
  3278. // and not the last element (the phi update in the current iteration). This
  3279. // will be the value when jumping to the exit block from the LoopMiddleBlock,
  3280. // when the scalar loop is not run at all.
  3281. Value *ExtractForPhiUsedOutsideLoop = nullptr;
  3282. if (VF.isVector()) {
  3283. auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
  3284. auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
  3285. ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
  3286. Incoming, Idx, "vector.recur.extract.for.phi");
  3287. } else if (UF > 1)
  3288. // When loop is unrolled without vectorizing, initialize
  3289. // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
  3290. // of `Incoming`. This is analogous to the vectorized case above: extracting
  3291. // the second last element when VF > 1.
  3292. ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
  3293. // Fix the initial value of the original recurrence in the scalar loop.
  3294. Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
  3295. PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
  3296. auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
  3297. auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
  3298. for (auto *BB : predecessors(LoopScalarPreHeader)) {
  3299. auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
  3300. Start->addIncoming(Incoming, BB);
  3301. }
  3302. Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
  3303. Phi->setName("scalar.recur");
  3304. // Finally, fix users of the recurrence outside the loop. The users will need
  3305. // either the last value of the scalar recurrence or the last value of the
  3306. // vector recurrence we extracted in the middle block. Since the loop is in
  3307. // LCSSA form, we just need to find all the phi nodes for the original scalar
  3308. // recurrence in the exit block, and then add an edge for the middle block.
  3309. // Note that LCSSA does not imply single entry when the original scalar loop
  3310. // had multiple exiting edges (as we always run the last iteration in the
  3311. // scalar epilogue); in that case, there is no edge from middle to exit and
  3312. // and thus no phis which needed updated.
  3313. if (!Cost->requiresScalarEpilogue(VF))
  3314. for (PHINode &LCSSAPhi : LoopExitBlock->phis())
  3315. if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) {
  3316. LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
  3317. State.Plan->removeLiveOut(&LCSSAPhi);
  3318. }
  3319. }
  3320. void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
  3321. VPTransformState &State) {
  3322. PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
  3323. // Get it's reduction variable descriptor.
  3324. assert(Legal->isReductionVariable(OrigPhi) &&
  3325. "Unable to find the reduction variable");
  3326. const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
  3327. RecurKind RK = RdxDesc.getRecurrenceKind();
  3328. TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
  3329. Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
  3330. State.setDebugLocFromInst(ReductionStartValue);
  3331. VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
  3332. // This is the vector-clone of the value that leaves the loop.
  3333. Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
  3334. // Wrap flags are in general invalid after vectorization, clear them.
  3335. clearReductionWrapFlags(PhiR, State);
  3336. // Before each round, move the insertion point right between
  3337. // the PHIs and the values we are going to write.
  3338. // This allows us to write both PHINodes and the extractelement
  3339. // instructions.
  3340. Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
  3341. State.setDebugLocFromInst(LoopExitInst);
  3342. Type *PhiTy = OrigPhi->getType();
  3343. VPBasicBlock *LatchVPBB =
  3344. PhiR->getParent()->getEnclosingLoopRegion()->getExitingBasicBlock();
  3345. BasicBlock *VectorLoopLatch = State.CFG.VPBB2IRBB[LatchVPBB];
  3346. // If tail is folded by masking, the vector value to leave the loop should be
  3347. // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
  3348. // instead of the former. For an inloop reduction the reduction will already
  3349. // be predicated, and does not need to be handled here.
  3350. if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
  3351. for (unsigned Part = 0; Part < UF; ++Part) {
  3352. Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
  3353. SelectInst *Sel = nullptr;
  3354. for (User *U : VecLoopExitInst->users()) {
  3355. if (isa<SelectInst>(U)) {
  3356. assert(!Sel && "Reduction exit feeding two selects");
  3357. Sel = cast<SelectInst>(U);
  3358. } else
  3359. assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
  3360. }
  3361. assert(Sel && "Reduction exit feeds no select");
  3362. State.reset(LoopExitInstDef, Sel, Part);
  3363. if (isa<FPMathOperator>(Sel))
  3364. Sel->setFastMathFlags(RdxDesc.getFastMathFlags());
  3365. // If the target can create a predicated operator for the reduction at no
  3366. // extra cost in the loop (for example a predicated vadd), it can be
  3367. // cheaper for the select to remain in the loop than be sunk out of it,
  3368. // and so use the select value for the phi instead of the old
  3369. // LoopExitValue.
  3370. if (PreferPredicatedReductionSelect ||
  3371. TTI->preferPredicatedReductionSelect(
  3372. RdxDesc.getOpcode(), PhiTy,
  3373. TargetTransformInfo::ReductionFlags())) {
  3374. auto *VecRdxPhi =
  3375. cast<PHINode>(State.get(PhiR, Part));
  3376. VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel);
  3377. }
  3378. }
  3379. }
  3380. // If the vector reduction can be performed in a smaller type, we truncate
  3381. // then extend the loop exit value to enable InstCombine to evaluate the
  3382. // entire expression in the smaller type.
  3383. if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
  3384. assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
  3385. Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
  3386. Builder.SetInsertPoint(VectorLoopLatch->getTerminator());
  3387. VectorParts RdxParts(UF);
  3388. for (unsigned Part = 0; Part < UF; ++Part) {
  3389. RdxParts[Part] = State.get(LoopExitInstDef, Part);
  3390. Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
  3391. Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
  3392. : Builder.CreateZExt(Trunc, VecTy);
  3393. for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
  3394. if (U != Trunc) {
  3395. U->replaceUsesOfWith(RdxParts[Part], Extnd);
  3396. RdxParts[Part] = Extnd;
  3397. }
  3398. }
  3399. Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
  3400. for (unsigned Part = 0; Part < UF; ++Part) {
  3401. RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
  3402. State.reset(LoopExitInstDef, RdxParts[Part], Part);
  3403. }
  3404. }
  3405. // Reduce all of the unrolled parts into a single vector.
  3406. Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
  3407. unsigned Op = RecurrenceDescriptor::getOpcode(RK);
  3408. // The middle block terminator has already been assigned a DebugLoc here (the
  3409. // OrigLoop's single latch terminator). We want the whole middle block to
  3410. // appear to execute on this line because: (a) it is all compiler generated,
  3411. // (b) these instructions are always executed after evaluating the latch
  3412. // conditional branch, and (c) other passes may add new predecessors which
  3413. // terminate on this line. This is the easiest way to ensure we don't
  3414. // accidentally cause an extra step back into the loop while debugging.
  3415. State.setDebugLocFromInst(LoopMiddleBlock->getTerminator());
  3416. if (PhiR->isOrdered())
  3417. ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
  3418. else {
  3419. // Floating-point operations should have some FMF to enable the reduction.
  3420. IRBuilderBase::FastMathFlagGuard FMFG(Builder);
  3421. Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
  3422. for (unsigned Part = 1; Part < UF; ++Part) {
  3423. Value *RdxPart = State.get(LoopExitInstDef, Part);
  3424. if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
  3425. ReducedPartRdx = Builder.CreateBinOp(
  3426. (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
  3427. } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
  3428. ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
  3429. ReducedPartRdx, RdxPart);
  3430. else
  3431. ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
  3432. }
  3433. }
  3434. // Create the reduction after the loop. Note that inloop reductions create the
  3435. // target reduction in the loop using a Reduction recipe.
  3436. if (VF.isVector() && !PhiR->isInLoop()) {
  3437. ReducedPartRdx =
  3438. createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
  3439. // If the reduction can be performed in a smaller type, we need to extend
  3440. // the reduction to the wider type before we branch to the original loop.
  3441. if (PhiTy != RdxDesc.getRecurrenceType())
  3442. ReducedPartRdx = RdxDesc.isSigned()
  3443. ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
  3444. : Builder.CreateZExt(ReducedPartRdx, PhiTy);
  3445. }
  3446. PHINode *ResumePhi =
  3447. dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
  3448. // Create a phi node that merges control-flow from the backedge-taken check
  3449. // block and the middle block.
  3450. PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
  3451. LoopScalarPreHeader->getTerminator());
  3452. // If we are fixing reductions in the epilogue loop then we should already
  3453. // have created a bc.merge.rdx Phi after the main vector body. Ensure that
  3454. // we carry over the incoming values correctly.
  3455. for (auto *Incoming : predecessors(LoopScalarPreHeader)) {
  3456. if (Incoming == LoopMiddleBlock)
  3457. BCBlockPhi->addIncoming(ReducedPartRdx, Incoming);
  3458. else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming))
  3459. BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming),
  3460. Incoming);
  3461. else
  3462. BCBlockPhi->addIncoming(ReductionStartValue, Incoming);
  3463. }
  3464. // Set the resume value for this reduction
  3465. ReductionResumeValues.insert({&RdxDesc, BCBlockPhi});
  3466. // If there were stores of the reduction value to a uniform memory address
  3467. // inside the loop, create the final store here.
  3468. if (StoreInst *SI = RdxDesc.IntermediateStore) {
  3469. StoreInst *NewSI =
  3470. Builder.CreateStore(ReducedPartRdx, SI->getPointerOperand());
  3471. propagateMetadata(NewSI, SI);
  3472. // If the reduction value is used in other places,
  3473. // then let the code below create PHI's for that.
  3474. }
  3475. // Now, we need to fix the users of the reduction variable
  3476. // inside and outside of the scalar remainder loop.
  3477. // We know that the loop is in LCSSA form. We need to update the PHI nodes
  3478. // in the exit blocks. See comment on analogous loop in
  3479. // fixFixedOrderRecurrence for a more complete explaination of the logic.
  3480. if (!Cost->requiresScalarEpilogue(VF))
  3481. for (PHINode &LCSSAPhi : LoopExitBlock->phis())
  3482. if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) {
  3483. LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
  3484. State.Plan->removeLiveOut(&LCSSAPhi);
  3485. }
  3486. // Fix the scalar loop reduction variable with the incoming reduction sum
  3487. // from the vector body and from the backedge value.
  3488. int IncomingEdgeBlockIdx =
  3489. OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
  3490. assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
  3491. // Pick the other block.
  3492. int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
  3493. OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
  3494. OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
  3495. }
  3496. void InnerLoopVectorizer::clearReductionWrapFlags(VPReductionPHIRecipe *PhiR,
  3497. VPTransformState &State) {
  3498. const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
  3499. RecurKind RK = RdxDesc.getRecurrenceKind();
  3500. if (RK != RecurKind::Add && RK != RecurKind::Mul)
  3501. return;
  3502. SmallVector<VPValue *, 8> Worklist;
  3503. SmallPtrSet<VPValue *, 8> Visited;
  3504. Worklist.push_back(PhiR);
  3505. Visited.insert(PhiR);
  3506. while (!Worklist.empty()) {
  3507. VPValue *Cur = Worklist.pop_back_val();
  3508. for (unsigned Part = 0; Part < UF; ++Part) {
  3509. Value *V = State.get(Cur, Part);
  3510. if (!isa<OverflowingBinaryOperator>(V))
  3511. break;
  3512. cast<Instruction>(V)->dropPoisonGeneratingFlags();
  3513. }
  3514. for (VPUser *U : Cur->users()) {
  3515. auto *UserRecipe = dyn_cast<VPRecipeBase>(U);
  3516. if (!UserRecipe)
  3517. continue;
  3518. for (VPValue *V : UserRecipe->definedValues())
  3519. if (Visited.insert(V).second)
  3520. Worklist.push_back(V);
  3521. }
  3522. }
  3523. }
  3524. void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
  3525. // The basic block and loop containing the predicated instruction.
  3526. auto *PredBB = PredInst->getParent();
  3527. auto *VectorLoop = LI->getLoopFor(PredBB);
  3528. // Initialize a worklist with the operands of the predicated instruction.
  3529. SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
  3530. // Holds instructions that we need to analyze again. An instruction may be
  3531. // reanalyzed if we don't yet know if we can sink it or not.
  3532. SmallVector<Instruction *, 8> InstsToReanalyze;
  3533. // Returns true if a given use occurs in the predicated block. Phi nodes use
  3534. // their operands in their corresponding predecessor blocks.
  3535. auto isBlockOfUsePredicated = [&](Use &U) -> bool {
  3536. auto *I = cast<Instruction>(U.getUser());
  3537. BasicBlock *BB = I->getParent();
  3538. if (auto *Phi = dyn_cast<PHINode>(I))
  3539. BB = Phi->getIncomingBlock(
  3540. PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
  3541. return BB == PredBB;
  3542. };
  3543. // Iteratively sink the scalarized operands of the predicated instruction
  3544. // into the block we created for it. When an instruction is sunk, it's
  3545. // operands are then added to the worklist. The algorithm ends after one pass
  3546. // through the worklist doesn't sink a single instruction.
  3547. bool Changed;
  3548. do {
  3549. // Add the instructions that need to be reanalyzed to the worklist, and
  3550. // reset the changed indicator.
  3551. Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
  3552. InstsToReanalyze.clear();
  3553. Changed = false;
  3554. while (!Worklist.empty()) {
  3555. auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
  3556. // We can't sink an instruction if it is a phi node, is not in the loop,
  3557. // or may have side effects.
  3558. if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
  3559. I->mayHaveSideEffects())
  3560. continue;
  3561. // If the instruction is already in PredBB, check if we can sink its
  3562. // operands. In that case, VPlan's sinkScalarOperands() succeeded in
  3563. // sinking the scalar instruction I, hence it appears in PredBB; but it
  3564. // may have failed to sink I's operands (recursively), which we try
  3565. // (again) here.
  3566. if (I->getParent() == PredBB) {
  3567. Worklist.insert(I->op_begin(), I->op_end());
  3568. continue;
  3569. }
  3570. // It's legal to sink the instruction if all its uses occur in the
  3571. // predicated block. Otherwise, there's nothing to do yet, and we may
  3572. // need to reanalyze the instruction.
  3573. if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
  3574. InstsToReanalyze.push_back(I);
  3575. continue;
  3576. }
  3577. // Move the instruction to the beginning of the predicated block, and add
  3578. // it's operands to the worklist.
  3579. I->moveBefore(&*PredBB->getFirstInsertionPt());
  3580. Worklist.insert(I->op_begin(), I->op_end());
  3581. // The sinking may have enabled other instructions to be sunk, so we will
  3582. // need to iterate.
  3583. Changed = true;
  3584. }
  3585. } while (Changed);
  3586. }
  3587. void InnerLoopVectorizer::fixNonInductionPHIs(VPlan &Plan,
  3588. VPTransformState &State) {
  3589. auto Iter = vp_depth_first_deep(Plan.getEntry());
  3590. for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
  3591. for (VPRecipeBase &P : VPBB->phis()) {
  3592. VPWidenPHIRecipe *VPPhi = dyn_cast<VPWidenPHIRecipe>(&P);
  3593. if (!VPPhi)
  3594. continue;
  3595. PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
  3596. // Make sure the builder has a valid insert point.
  3597. Builder.SetInsertPoint(NewPhi);
  3598. for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
  3599. VPValue *Inc = VPPhi->getIncomingValue(i);
  3600. VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
  3601. NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
  3602. }
  3603. }
  3604. }
  3605. }
  3606. bool InnerLoopVectorizer::useOrderedReductions(
  3607. const RecurrenceDescriptor &RdxDesc) {
  3608. return Cost->useOrderedReductions(RdxDesc);
  3609. }
  3610. void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
  3611. // We should not collect Scalars more than once per VF. Right now, this
  3612. // function is called from collectUniformsAndScalars(), which already does
  3613. // this check. Collecting Scalars for VF=1 does not make any sense.
  3614. assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
  3615. "This function should not be visited twice for the same VF");
  3616. // This avoids any chances of creating a REPLICATE recipe during planning
  3617. // since that would result in generation of scalarized code during execution,
  3618. // which is not supported for scalable vectors.
  3619. if (VF.isScalable()) {
  3620. Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end());
  3621. return;
  3622. }
  3623. SmallSetVector<Instruction *, 8> Worklist;
  3624. // These sets are used to seed the analysis with pointers used by memory
  3625. // accesses that will remain scalar.
  3626. SmallSetVector<Instruction *, 8> ScalarPtrs;
  3627. SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
  3628. auto *Latch = TheLoop->getLoopLatch();
  3629. // A helper that returns true if the use of Ptr by MemAccess will be scalar.
  3630. // The pointer operands of loads and stores will be scalar as long as the
  3631. // memory access is not a gather or scatter operation. The value operand of a
  3632. // store will remain scalar if the store is scalarized.
  3633. auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
  3634. InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
  3635. assert(WideningDecision != CM_Unknown &&
  3636. "Widening decision should be ready at this moment");
  3637. if (auto *Store = dyn_cast<StoreInst>(MemAccess))
  3638. if (Ptr == Store->getValueOperand())
  3639. return WideningDecision == CM_Scalarize;
  3640. assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
  3641. "Ptr is neither a value or pointer operand");
  3642. return WideningDecision != CM_GatherScatter;
  3643. };
  3644. // A helper that returns true if the given value is a bitcast or
  3645. // getelementptr instruction contained in the loop.
  3646. auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
  3647. return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
  3648. isa<GetElementPtrInst>(V)) &&
  3649. !TheLoop->isLoopInvariant(V);
  3650. };
  3651. // A helper that evaluates a memory access's use of a pointer. If the use will
  3652. // be a scalar use and the pointer is only used by memory accesses, we place
  3653. // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
  3654. // PossibleNonScalarPtrs.
  3655. auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
  3656. // We only care about bitcast and getelementptr instructions contained in
  3657. // the loop.
  3658. if (!isLoopVaryingBitCastOrGEP(Ptr))
  3659. return;
  3660. // If the pointer has already been identified as scalar (e.g., if it was
  3661. // also identified as uniform), there's nothing to do.
  3662. auto *I = cast<Instruction>(Ptr);
  3663. if (Worklist.count(I))
  3664. return;
  3665. // If the use of the pointer will be a scalar use, and all users of the
  3666. // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
  3667. // place the pointer in PossibleNonScalarPtrs.
  3668. if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
  3669. return isa<LoadInst>(U) || isa<StoreInst>(U);
  3670. }))
  3671. ScalarPtrs.insert(I);
  3672. else
  3673. PossibleNonScalarPtrs.insert(I);
  3674. };
  3675. // We seed the scalars analysis with three classes of instructions: (1)
  3676. // instructions marked uniform-after-vectorization and (2) bitcast,
  3677. // getelementptr and (pointer) phi instructions used by memory accesses
  3678. // requiring a scalar use.
  3679. //
  3680. // (1) Add to the worklist all instructions that have been identified as
  3681. // uniform-after-vectorization.
  3682. Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
  3683. // (2) Add to the worklist all bitcast and getelementptr instructions used by
  3684. // memory accesses requiring a scalar use. The pointer operands of loads and
  3685. // stores will be scalar as long as the memory accesses is not a gather or
  3686. // scatter operation. The value operand of a store will remain scalar if the
  3687. // store is scalarized.
  3688. for (auto *BB : TheLoop->blocks())
  3689. for (auto &I : *BB) {
  3690. if (auto *Load = dyn_cast<LoadInst>(&I)) {
  3691. evaluatePtrUse(Load, Load->getPointerOperand());
  3692. } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
  3693. evaluatePtrUse(Store, Store->getPointerOperand());
  3694. evaluatePtrUse(Store, Store->getValueOperand());
  3695. }
  3696. }
  3697. for (auto *I : ScalarPtrs)
  3698. if (!PossibleNonScalarPtrs.count(I)) {
  3699. LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
  3700. Worklist.insert(I);
  3701. }
  3702. // Insert the forced scalars.
  3703. // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector
  3704. // induction variable when the PHI user is scalarized.
  3705. auto ForcedScalar = ForcedScalars.find(VF);
  3706. if (ForcedScalar != ForcedScalars.end())
  3707. for (auto *I : ForcedScalar->second) {
  3708. LLVM_DEBUG(dbgs() << "LV: Found (forced) scalar instruction: " << *I << "\n");
  3709. Worklist.insert(I);
  3710. }
  3711. // Expand the worklist by looking through any bitcasts and getelementptr
  3712. // instructions we've already identified as scalar. This is similar to the
  3713. // expansion step in collectLoopUniforms(); however, here we're only
  3714. // expanding to include additional bitcasts and getelementptr instructions.
  3715. unsigned Idx = 0;
  3716. while (Idx != Worklist.size()) {
  3717. Instruction *Dst = Worklist[Idx++];
  3718. if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
  3719. continue;
  3720. auto *Src = cast<Instruction>(Dst->getOperand(0));
  3721. if (llvm::all_of(Src->users(), [&](User *U) -> bool {
  3722. auto *J = cast<Instruction>(U);
  3723. return !TheLoop->contains(J) || Worklist.count(J) ||
  3724. ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
  3725. isScalarUse(J, Src));
  3726. })) {
  3727. Worklist.insert(Src);
  3728. LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
  3729. }
  3730. }
  3731. // An induction variable will remain scalar if all users of the induction
  3732. // variable and induction variable update remain scalar.
  3733. for (const auto &Induction : Legal->getInductionVars()) {
  3734. auto *Ind = Induction.first;
  3735. auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
  3736. // If tail-folding is applied, the primary induction variable will be used
  3737. // to feed a vector compare.
  3738. if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
  3739. continue;
  3740. // Returns true if \p Indvar is a pointer induction that is used directly by
  3741. // load/store instruction \p I.
  3742. auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
  3743. Instruction *I) {
  3744. return Induction.second.getKind() ==
  3745. InductionDescriptor::IK_PtrInduction &&
  3746. (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
  3747. Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
  3748. };
  3749. // Determine if all users of the induction variable are scalar after
  3750. // vectorization.
  3751. auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
  3752. auto *I = cast<Instruction>(U);
  3753. return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
  3754. IsDirectLoadStoreFromPtrIndvar(Ind, I);
  3755. });
  3756. if (!ScalarInd)
  3757. continue;
  3758. // Determine if all users of the induction variable update instruction are
  3759. // scalar after vectorization.
  3760. auto ScalarIndUpdate =
  3761. llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
  3762. auto *I = cast<Instruction>(U);
  3763. return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
  3764. IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
  3765. });
  3766. if (!ScalarIndUpdate)
  3767. continue;
  3768. // The induction variable and its update instruction will remain scalar.
  3769. Worklist.insert(Ind);
  3770. Worklist.insert(IndUpdate);
  3771. LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
  3772. LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
  3773. << "\n");
  3774. }
  3775. Scalars[VF].insert(Worklist.begin(), Worklist.end());
  3776. }
  3777. bool LoopVectorizationCostModel::isScalarWithPredication(
  3778. Instruction *I, ElementCount VF) const {
  3779. if (!isPredicatedInst(I))
  3780. return false;
  3781. // Do we have a non-scalar lowering for this predicated
  3782. // instruction? No - it is scalar with predication.
  3783. switch(I->getOpcode()) {
  3784. default:
  3785. return true;
  3786. case Instruction::Load:
  3787. case Instruction::Store: {
  3788. auto *Ptr = getLoadStorePointerOperand(I);
  3789. auto *Ty = getLoadStoreType(I);
  3790. Type *VTy = Ty;
  3791. if (VF.isVector())
  3792. VTy = VectorType::get(Ty, VF);
  3793. const Align Alignment = getLoadStoreAlignment(I);
  3794. return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
  3795. TTI.isLegalMaskedGather(VTy, Alignment))
  3796. : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
  3797. TTI.isLegalMaskedScatter(VTy, Alignment));
  3798. }
  3799. case Instruction::UDiv:
  3800. case Instruction::SDiv:
  3801. case Instruction::SRem:
  3802. case Instruction::URem: {
  3803. // We have the option to use the safe-divisor idiom to avoid predication.
  3804. // The cost based decision here will always select safe-divisor for
  3805. // scalable vectors as scalarization isn't legal.
  3806. const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
  3807. return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost);
  3808. }
  3809. }
  3810. }
  3811. bool LoopVectorizationCostModel::isPredicatedInst(Instruction *I) const {
  3812. if (!blockNeedsPredicationForAnyReason(I->getParent()))
  3813. return false;
  3814. // Can we prove this instruction is safe to unconditionally execute?
  3815. // If not, we must use some form of predication.
  3816. switch(I->getOpcode()) {
  3817. default:
  3818. return false;
  3819. case Instruction::Load:
  3820. case Instruction::Store: {
  3821. if (!Legal->isMaskRequired(I))
  3822. return false;
  3823. // When we know the load's address is loop invariant and the instruction
  3824. // in the original scalar loop was unconditionally executed then we
  3825. // don't need to mark it as a predicated instruction. Tail folding may
  3826. // introduce additional predication, but we're guaranteed to always have
  3827. // at least one active lane. We call Legal->blockNeedsPredication here
  3828. // because it doesn't query tail-folding. For stores, we need to prove
  3829. // both speculation safety (which follows from the same argument as loads),
  3830. // but also must prove the value being stored is correct. The easiest
  3831. // form of the later is to require that all values stored are the same.
  3832. if (Legal->isUniformMemOp(*I) &&
  3833. (isa<LoadInst>(I) ||
  3834. (isa<StoreInst>(I) &&
  3835. TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()))) &&
  3836. !Legal->blockNeedsPredication(I->getParent()))
  3837. return false;
  3838. return true;
  3839. }
  3840. case Instruction::UDiv:
  3841. case Instruction::SDiv:
  3842. case Instruction::SRem:
  3843. case Instruction::URem:
  3844. // TODO: We can use the loop-preheader as context point here and get
  3845. // context sensitive reasoning
  3846. return !isSafeToSpeculativelyExecute(I);
  3847. }
  3848. }
  3849. std::pair<InstructionCost, InstructionCost>
  3850. LoopVectorizationCostModel::getDivRemSpeculationCost(Instruction *I,
  3851. ElementCount VF) const {
  3852. assert(I->getOpcode() == Instruction::UDiv ||
  3853. I->getOpcode() == Instruction::SDiv ||
  3854. I->getOpcode() == Instruction::SRem ||
  3855. I->getOpcode() == Instruction::URem);
  3856. assert(!isSafeToSpeculativelyExecute(I));
  3857. const TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  3858. // Scalarization isn't legal for scalable vector types
  3859. InstructionCost ScalarizationCost = InstructionCost::getInvalid();
  3860. if (!VF.isScalable()) {
  3861. // Get the scalarization cost and scale this amount by the probability of
  3862. // executing the predicated block. If the instruction is not predicated,
  3863. // we fall through to the next case.
  3864. ScalarizationCost = 0;
  3865. // These instructions have a non-void type, so account for the phi nodes
  3866. // that we will create. This cost is likely to be zero. The phi node
  3867. // cost, if any, should be scaled by the block probability because it
  3868. // models a copy at the end of each predicated block.
  3869. ScalarizationCost += VF.getKnownMinValue() *
  3870. TTI.getCFInstrCost(Instruction::PHI, CostKind);
  3871. // The cost of the non-predicated instruction.
  3872. ScalarizationCost += VF.getKnownMinValue() *
  3873. TTI.getArithmeticInstrCost(I->getOpcode(), I->getType(), CostKind);
  3874. // The cost of insertelement and extractelement instructions needed for
  3875. // scalarization.
  3876. ScalarizationCost += getScalarizationOverhead(I, VF, CostKind);
  3877. // Scale the cost by the probability of executing the predicated blocks.
  3878. // This assumes the predicated block for each vector lane is equally
  3879. // likely.
  3880. ScalarizationCost = ScalarizationCost / getReciprocalPredBlockProb();
  3881. }
  3882. InstructionCost SafeDivisorCost = 0;
  3883. auto *VecTy = ToVectorTy(I->getType(), VF);
  3884. // The cost of the select guard to ensure all lanes are well defined
  3885. // after we speculate above any internal control flow.
  3886. SafeDivisorCost += TTI.getCmpSelInstrCost(
  3887. Instruction::Select, VecTy,
  3888. ToVectorTy(Type::getInt1Ty(I->getContext()), VF),
  3889. CmpInst::BAD_ICMP_PREDICATE, CostKind);
  3890. // Certain instructions can be cheaper to vectorize if they have a constant
  3891. // second vector operand. One example of this are shifts on x86.
  3892. Value *Op2 = I->getOperand(1);
  3893. auto Op2Info = TTI.getOperandInfo(Op2);
  3894. if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
  3895. Op2Info.Kind = TargetTransformInfo::OK_UniformValue;
  3896. SmallVector<const Value *, 4> Operands(I->operand_values());
  3897. SafeDivisorCost += TTI.getArithmeticInstrCost(
  3898. I->getOpcode(), VecTy, CostKind,
  3899. {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
  3900. Op2Info, Operands, I);
  3901. return {ScalarizationCost, SafeDivisorCost};
  3902. }
  3903. bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
  3904. Instruction *I, ElementCount VF) {
  3905. assert(isAccessInterleaved(I) && "Expecting interleaved access.");
  3906. assert(getWideningDecision(I, VF) == CM_Unknown &&
  3907. "Decision should not be set yet.");
  3908. auto *Group = getInterleavedAccessGroup(I);
  3909. assert(Group && "Must have a group.");
  3910. // If the instruction's allocated size doesn't equal it's type size, it
  3911. // requires padding and will be scalarized.
  3912. auto &DL = I->getModule()->getDataLayout();
  3913. auto *ScalarTy = getLoadStoreType(I);
  3914. if (hasIrregularType(ScalarTy, DL))
  3915. return false;
  3916. // If the group involves a non-integral pointer, we may not be able to
  3917. // losslessly cast all values to a common type.
  3918. unsigned InterleaveFactor = Group->getFactor();
  3919. bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
  3920. for (unsigned i = 0; i < InterleaveFactor; i++) {
  3921. Instruction *Member = Group->getMember(i);
  3922. if (!Member)
  3923. continue;
  3924. auto *MemberTy = getLoadStoreType(Member);
  3925. bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
  3926. // Don't coerce non-integral pointers to integers or vice versa.
  3927. if (MemberNI != ScalarNI) {
  3928. // TODO: Consider adding special nullptr value case here
  3929. return false;
  3930. } else if (MemberNI && ScalarNI &&
  3931. ScalarTy->getPointerAddressSpace() !=
  3932. MemberTy->getPointerAddressSpace()) {
  3933. return false;
  3934. }
  3935. }
  3936. // Check if masking is required.
  3937. // A Group may need masking for one of two reasons: it resides in a block that
  3938. // needs predication, or it was decided to use masking to deal with gaps
  3939. // (either a gap at the end of a load-access that may result in a speculative
  3940. // load, or any gaps in a store-access).
  3941. bool PredicatedAccessRequiresMasking =
  3942. blockNeedsPredicationForAnyReason(I->getParent()) &&
  3943. Legal->isMaskRequired(I);
  3944. bool LoadAccessWithGapsRequiresEpilogMasking =
  3945. isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
  3946. !isScalarEpilogueAllowed();
  3947. bool StoreAccessWithGapsRequiresMasking =
  3948. isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
  3949. if (!PredicatedAccessRequiresMasking &&
  3950. !LoadAccessWithGapsRequiresEpilogMasking &&
  3951. !StoreAccessWithGapsRequiresMasking)
  3952. return true;
  3953. // If masked interleaving is required, we expect that the user/target had
  3954. // enabled it, because otherwise it either wouldn't have been created or
  3955. // it should have been invalidated by the CostModel.
  3956. assert(useMaskedInterleavedAccesses(TTI) &&
  3957. "Masked interleave-groups for predicated accesses are not enabled.");
  3958. if (Group->isReverse())
  3959. return false;
  3960. auto *Ty = getLoadStoreType(I);
  3961. const Align Alignment = getLoadStoreAlignment(I);
  3962. return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
  3963. : TTI.isLegalMaskedStore(Ty, Alignment);
  3964. }
  3965. bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
  3966. Instruction *I, ElementCount VF) {
  3967. // Get and ensure we have a valid memory instruction.
  3968. assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
  3969. auto *Ptr = getLoadStorePointerOperand(I);
  3970. auto *ScalarTy = getLoadStoreType(I);
  3971. // In order to be widened, the pointer should be consecutive, first of all.
  3972. if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
  3973. return false;
  3974. // If the instruction is a store located in a predicated block, it will be
  3975. // scalarized.
  3976. if (isScalarWithPredication(I, VF))
  3977. return false;
  3978. // If the instruction's allocated size doesn't equal it's type size, it
  3979. // requires padding and will be scalarized.
  3980. auto &DL = I->getModule()->getDataLayout();
  3981. if (hasIrregularType(ScalarTy, DL))
  3982. return false;
  3983. return true;
  3984. }
  3985. void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
  3986. // We should not collect Uniforms more than once per VF. Right now,
  3987. // this function is called from collectUniformsAndScalars(), which
  3988. // already does this check. Collecting Uniforms for VF=1 does not make any
  3989. // sense.
  3990. assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
  3991. "This function should not be visited twice for the same VF");
  3992. // Visit the list of Uniforms. If we'll not find any uniform value, we'll
  3993. // not analyze again. Uniforms.count(VF) will return 1.
  3994. Uniforms[VF].clear();
  3995. // We now know that the loop is vectorizable!
  3996. // Collect instructions inside the loop that will remain uniform after
  3997. // vectorization.
  3998. // Global values, params and instructions outside of current loop are out of
  3999. // scope.
  4000. auto isOutOfScope = [&](Value *V) -> bool {
  4001. Instruction *I = dyn_cast<Instruction>(V);
  4002. return (!I || !TheLoop->contains(I));
  4003. };
  4004. // Worklist containing uniform instructions demanding lane 0.
  4005. SetVector<Instruction *> Worklist;
  4006. BasicBlock *Latch = TheLoop->getLoopLatch();
  4007. // Add uniform instructions demanding lane 0 to the worklist. Instructions
  4008. // that are scalar with predication must not be considered uniform after
  4009. // vectorization, because that would create an erroneous replicating region
  4010. // where only a single instance out of VF should be formed.
  4011. // TODO: optimize such seldom cases if found important, see PR40816.
  4012. auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
  4013. if (isOutOfScope(I)) {
  4014. LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
  4015. << *I << "\n");
  4016. return;
  4017. }
  4018. if (isScalarWithPredication(I, VF)) {
  4019. LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
  4020. << *I << "\n");
  4021. return;
  4022. }
  4023. LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
  4024. Worklist.insert(I);
  4025. };
  4026. // Start with the conditional branch. If the branch condition is an
  4027. // instruction contained in the loop that is only used by the branch, it is
  4028. // uniform.
  4029. auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
  4030. if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
  4031. addToWorklistIfAllowed(Cmp);
  4032. // Return true if all lanes perform the same memory operation, and we can
  4033. // thus chose to execute only one.
  4034. auto isUniformMemOpUse = [&](Instruction *I) {
  4035. if (!Legal->isUniformMemOp(*I))
  4036. return false;
  4037. if (isa<LoadInst>(I))
  4038. // Loading the same address always produces the same result - at least
  4039. // assuming aliasing and ordering which have already been checked.
  4040. return true;
  4041. // Storing the same value on every iteration.
  4042. return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
  4043. };
  4044. auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
  4045. InstWidening WideningDecision = getWideningDecision(I, VF);
  4046. assert(WideningDecision != CM_Unknown &&
  4047. "Widening decision should be ready at this moment");
  4048. if (isUniformMemOpUse(I))
  4049. return true;
  4050. return (WideningDecision == CM_Widen ||
  4051. WideningDecision == CM_Widen_Reverse ||
  4052. WideningDecision == CM_Interleave);
  4053. };
  4054. // Returns true if Ptr is the pointer operand of a memory access instruction
  4055. // I, and I is known to not require scalarization.
  4056. auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
  4057. return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
  4058. };
  4059. // Holds a list of values which are known to have at least one uniform use.
  4060. // Note that there may be other uses which aren't uniform. A "uniform use"
  4061. // here is something which only demands lane 0 of the unrolled iterations;
  4062. // it does not imply that all lanes produce the same value (e.g. this is not
  4063. // the usual meaning of uniform)
  4064. SetVector<Value *> HasUniformUse;
  4065. // Scan the loop for instructions which are either a) known to have only
  4066. // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
  4067. for (auto *BB : TheLoop->blocks())
  4068. for (auto &I : *BB) {
  4069. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
  4070. switch (II->getIntrinsicID()) {
  4071. case Intrinsic::sideeffect:
  4072. case Intrinsic::experimental_noalias_scope_decl:
  4073. case Intrinsic::assume:
  4074. case Intrinsic::lifetime_start:
  4075. case Intrinsic::lifetime_end:
  4076. if (TheLoop->hasLoopInvariantOperands(&I))
  4077. addToWorklistIfAllowed(&I);
  4078. break;
  4079. default:
  4080. break;
  4081. }
  4082. }
  4083. // ExtractValue instructions must be uniform, because the operands are
  4084. // known to be loop-invariant.
  4085. if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
  4086. assert(isOutOfScope(EVI->getAggregateOperand()) &&
  4087. "Expected aggregate value to be loop invariant");
  4088. addToWorklistIfAllowed(EVI);
  4089. continue;
  4090. }
  4091. // If there's no pointer operand, there's nothing to do.
  4092. auto *Ptr = getLoadStorePointerOperand(&I);
  4093. if (!Ptr)
  4094. continue;
  4095. if (isUniformMemOpUse(&I))
  4096. addToWorklistIfAllowed(&I);
  4097. if (isUniformDecision(&I, VF)) {
  4098. assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
  4099. HasUniformUse.insert(Ptr);
  4100. }
  4101. }
  4102. // Add to the worklist any operands which have *only* uniform (e.g. lane 0
  4103. // demanding) users. Since loops are assumed to be in LCSSA form, this
  4104. // disallows uses outside the loop as well.
  4105. for (auto *V : HasUniformUse) {
  4106. if (isOutOfScope(V))
  4107. continue;
  4108. auto *I = cast<Instruction>(V);
  4109. auto UsersAreMemAccesses =
  4110. llvm::all_of(I->users(), [&](User *U) -> bool {
  4111. return isVectorizedMemAccessUse(cast<Instruction>(U), V);
  4112. });
  4113. if (UsersAreMemAccesses)
  4114. addToWorklistIfAllowed(I);
  4115. }
  4116. // Expand Worklist in topological order: whenever a new instruction
  4117. // is added , its users should be already inside Worklist. It ensures
  4118. // a uniform instruction will only be used by uniform instructions.
  4119. unsigned idx = 0;
  4120. while (idx != Worklist.size()) {
  4121. Instruction *I = Worklist[idx++];
  4122. for (auto *OV : I->operand_values()) {
  4123. // isOutOfScope operands cannot be uniform instructions.
  4124. if (isOutOfScope(OV))
  4125. continue;
  4126. // First order recurrence Phi's should typically be considered
  4127. // non-uniform.
  4128. auto *OP = dyn_cast<PHINode>(OV);
  4129. if (OP && Legal->isFixedOrderRecurrence(OP))
  4130. continue;
  4131. // If all the users of the operand are uniform, then add the
  4132. // operand into the uniform worklist.
  4133. auto *OI = cast<Instruction>(OV);
  4134. if (llvm::all_of(OI->users(), [&](User *U) -> bool {
  4135. auto *J = cast<Instruction>(U);
  4136. return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
  4137. }))
  4138. addToWorklistIfAllowed(OI);
  4139. }
  4140. }
  4141. // For an instruction to be added into Worklist above, all its users inside
  4142. // the loop should also be in Worklist. However, this condition cannot be
  4143. // true for phi nodes that form a cyclic dependence. We must process phi
  4144. // nodes separately. An induction variable will remain uniform if all users
  4145. // of the induction variable and induction variable update remain uniform.
  4146. // The code below handles both pointer and non-pointer induction variables.
  4147. for (const auto &Induction : Legal->getInductionVars()) {
  4148. auto *Ind = Induction.first;
  4149. auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
  4150. // Determine if all users of the induction variable are uniform after
  4151. // vectorization.
  4152. auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
  4153. auto *I = cast<Instruction>(U);
  4154. return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
  4155. isVectorizedMemAccessUse(I, Ind);
  4156. });
  4157. if (!UniformInd)
  4158. continue;
  4159. // Determine if all users of the induction variable update instruction are
  4160. // uniform after vectorization.
  4161. auto UniformIndUpdate =
  4162. llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
  4163. auto *I = cast<Instruction>(U);
  4164. return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
  4165. isVectorizedMemAccessUse(I, IndUpdate);
  4166. });
  4167. if (!UniformIndUpdate)
  4168. continue;
  4169. // The induction variable and its update instruction will remain uniform.
  4170. addToWorklistIfAllowed(Ind);
  4171. addToWorklistIfAllowed(IndUpdate);
  4172. }
  4173. Uniforms[VF].insert(Worklist.begin(), Worklist.end());
  4174. }
  4175. bool LoopVectorizationCostModel::runtimeChecksRequired() {
  4176. LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
  4177. if (Legal->getRuntimePointerChecking()->Need) {
  4178. reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
  4179. "runtime pointer checks needed. Enable vectorization of this "
  4180. "loop with '#pragma clang loop vectorize(enable)' when "
  4181. "compiling with -Os/-Oz",
  4182. "CantVersionLoopWithOptForSize", ORE, TheLoop);
  4183. return true;
  4184. }
  4185. if (!PSE.getPredicate().isAlwaysTrue()) {
  4186. reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
  4187. "runtime SCEV checks needed. Enable vectorization of this "
  4188. "loop with '#pragma clang loop vectorize(enable)' when "
  4189. "compiling with -Os/-Oz",
  4190. "CantVersionLoopWithOptForSize", ORE, TheLoop);
  4191. return true;
  4192. }
  4193. // FIXME: Avoid specializing for stride==1 instead of bailing out.
  4194. if (!Legal->getLAI()->getSymbolicStrides().empty()) {
  4195. reportVectorizationFailure("Runtime stride check for small trip count",
  4196. "runtime stride == 1 checks needed. Enable vectorization of "
  4197. "this loop without such check by compiling with -Os/-Oz",
  4198. "CantVersionLoopWithOptForSize", ORE, TheLoop);
  4199. return true;
  4200. }
  4201. return false;
  4202. }
  4203. ElementCount
  4204. LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
  4205. if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
  4206. return ElementCount::getScalable(0);
  4207. if (Hints->isScalableVectorizationDisabled()) {
  4208. reportVectorizationInfo("Scalable vectorization is explicitly disabled",
  4209. "ScalableVectorizationDisabled", ORE, TheLoop);
  4210. return ElementCount::getScalable(0);
  4211. }
  4212. LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
  4213. auto MaxScalableVF = ElementCount::getScalable(
  4214. std::numeric_limits<ElementCount::ScalarTy>::max());
  4215. // Test that the loop-vectorizer can legalize all operations for this MaxVF.
  4216. // FIXME: While for scalable vectors this is currently sufficient, this should
  4217. // be replaced by a more detailed mechanism that filters out specific VFs,
  4218. // instead of invalidating vectorization for a whole set of VFs based on the
  4219. // MaxVF.
  4220. // Disable scalable vectorization if the loop contains unsupported reductions.
  4221. if (!canVectorizeReductions(MaxScalableVF)) {
  4222. reportVectorizationInfo(
  4223. "Scalable vectorization not supported for the reduction "
  4224. "operations found in this loop.",
  4225. "ScalableVFUnfeasible", ORE, TheLoop);
  4226. return ElementCount::getScalable(0);
  4227. }
  4228. // Disable scalable vectorization if the loop contains any instructions
  4229. // with element types not supported for scalable vectors.
  4230. if (any_of(ElementTypesInLoop, [&](Type *Ty) {
  4231. return !Ty->isVoidTy() &&
  4232. !this->TTI.isElementTypeLegalForScalableVector(Ty);
  4233. })) {
  4234. reportVectorizationInfo("Scalable vectorization is not supported "
  4235. "for all element types found in this loop.",
  4236. "ScalableVFUnfeasible", ORE, TheLoop);
  4237. return ElementCount::getScalable(0);
  4238. }
  4239. if (Legal->isSafeForAnyVectorWidth())
  4240. return MaxScalableVF;
  4241. // Limit MaxScalableVF by the maximum safe dependence distance.
  4242. std::optional<unsigned> MaxVScale = TTI.getMaxVScale();
  4243. if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
  4244. MaxVScale =
  4245. TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
  4246. MaxScalableVF =
  4247. ElementCount::getScalable(MaxVScale ? (MaxSafeElements / *MaxVScale) : 0);
  4248. if (!MaxScalableVF)
  4249. reportVectorizationInfo(
  4250. "Max legal vector width too small, scalable vectorization "
  4251. "unfeasible.",
  4252. "ScalableVFUnfeasible", ORE, TheLoop);
  4253. return MaxScalableVF;
  4254. }
  4255. FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
  4256. unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
  4257. MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
  4258. unsigned SmallestType, WidestType;
  4259. std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
  4260. // Get the maximum safe dependence distance in bits computed by LAA.
  4261. // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
  4262. // the memory accesses that is most restrictive (involved in the smallest
  4263. // dependence distance).
  4264. unsigned MaxSafeElements =
  4265. PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
  4266. auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
  4267. auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
  4268. LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
  4269. << ".\n");
  4270. LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
  4271. << ".\n");
  4272. // First analyze the UserVF, fall back if the UserVF should be ignored.
  4273. if (UserVF) {
  4274. auto MaxSafeUserVF =
  4275. UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
  4276. if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
  4277. // If `VF=vscale x N` is safe, then so is `VF=N`
  4278. if (UserVF.isScalable())
  4279. return FixedScalableVFPair(
  4280. ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
  4281. else
  4282. return UserVF;
  4283. }
  4284. assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
  4285. // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
  4286. // is better to ignore the hint and let the compiler choose a suitable VF.
  4287. if (!UserVF.isScalable()) {
  4288. LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
  4289. << " is unsafe, clamping to max safe VF="
  4290. << MaxSafeFixedVF << ".\n");
  4291. ORE->emit([&]() {
  4292. return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
  4293. TheLoop->getStartLoc(),
  4294. TheLoop->getHeader())
  4295. << "User-specified vectorization factor "
  4296. << ore::NV("UserVectorizationFactor", UserVF)
  4297. << " is unsafe, clamping to maximum safe vectorization factor "
  4298. << ore::NV("VectorizationFactor", MaxSafeFixedVF);
  4299. });
  4300. return MaxSafeFixedVF;
  4301. }
  4302. if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
  4303. LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
  4304. << " is ignored because scalable vectors are not "
  4305. "available.\n");
  4306. ORE->emit([&]() {
  4307. return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
  4308. TheLoop->getStartLoc(),
  4309. TheLoop->getHeader())
  4310. << "User-specified vectorization factor "
  4311. << ore::NV("UserVectorizationFactor", UserVF)
  4312. << " is ignored because the target does not support scalable "
  4313. "vectors. The compiler will pick a more suitable value.";
  4314. });
  4315. } else {
  4316. LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
  4317. << " is unsafe. Ignoring scalable UserVF.\n");
  4318. ORE->emit([&]() {
  4319. return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
  4320. TheLoop->getStartLoc(),
  4321. TheLoop->getHeader())
  4322. << "User-specified vectorization factor "
  4323. << ore::NV("UserVectorizationFactor", UserVF)
  4324. << " is unsafe. Ignoring the hint to let the compiler pick a "
  4325. "more suitable value.";
  4326. });
  4327. }
  4328. }
  4329. LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
  4330. << " / " << WidestType << " bits.\n");
  4331. FixedScalableVFPair Result(ElementCount::getFixed(1),
  4332. ElementCount::getScalable(0));
  4333. if (auto MaxVF =
  4334. getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
  4335. MaxSafeFixedVF, FoldTailByMasking))
  4336. Result.FixedVF = MaxVF;
  4337. if (auto MaxVF =
  4338. getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
  4339. MaxSafeScalableVF, FoldTailByMasking))
  4340. if (MaxVF.isScalable()) {
  4341. Result.ScalableVF = MaxVF;
  4342. LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
  4343. << "\n");
  4344. }
  4345. return Result;
  4346. }
  4347. FixedScalableVFPair
  4348. LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
  4349. if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
  4350. // TODO: It may by useful to do since it's still likely to be dynamically
  4351. // uniform if the target can skip.
  4352. reportVectorizationFailure(
  4353. "Not inserting runtime ptr check for divergent target",
  4354. "runtime pointer checks needed. Not enabled for divergent target",
  4355. "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
  4356. return FixedScalableVFPair::getNone();
  4357. }
  4358. unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
  4359. LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
  4360. if (TC == 1) {
  4361. reportVectorizationFailure("Single iteration (non) loop",
  4362. "loop trip count is one, irrelevant for vectorization",
  4363. "SingleIterationLoop", ORE, TheLoop);
  4364. return FixedScalableVFPair::getNone();
  4365. }
  4366. switch (ScalarEpilogueStatus) {
  4367. case CM_ScalarEpilogueAllowed:
  4368. return computeFeasibleMaxVF(TC, UserVF, false);
  4369. case CM_ScalarEpilogueNotAllowedUsePredicate:
  4370. [[fallthrough]];
  4371. case CM_ScalarEpilogueNotNeededUsePredicate:
  4372. LLVM_DEBUG(
  4373. dbgs() << "LV: vector predicate hint/switch found.\n"
  4374. << "LV: Not allowing scalar epilogue, creating predicated "
  4375. << "vector loop.\n");
  4376. break;
  4377. case CM_ScalarEpilogueNotAllowedLowTripLoop:
  4378. // fallthrough as a special case of OptForSize
  4379. case CM_ScalarEpilogueNotAllowedOptSize:
  4380. if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
  4381. LLVM_DEBUG(
  4382. dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
  4383. else
  4384. LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
  4385. << "count.\n");
  4386. // Bail if runtime checks are required, which are not good when optimising
  4387. // for size.
  4388. if (runtimeChecksRequired())
  4389. return FixedScalableVFPair::getNone();
  4390. break;
  4391. }
  4392. // The only loops we can vectorize without a scalar epilogue, are loops with
  4393. // a bottom-test and a single exiting block. We'd have to handle the fact
  4394. // that not every instruction executes on the last iteration. This will
  4395. // require a lane mask which varies through the vector loop body. (TODO)
  4396. if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
  4397. // If there was a tail-folding hint/switch, but we can't fold the tail by
  4398. // masking, fallback to a vectorization with a scalar epilogue.
  4399. if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
  4400. LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
  4401. "scalar epilogue instead.\n");
  4402. ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
  4403. return computeFeasibleMaxVF(TC, UserVF, false);
  4404. }
  4405. return FixedScalableVFPair::getNone();
  4406. }
  4407. // Now try the tail folding
  4408. // Invalidate interleave groups that require an epilogue if we can't mask
  4409. // the interleave-group.
  4410. if (!useMaskedInterleavedAccesses(TTI)) {
  4411. assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
  4412. "No decisions should have been taken at this point");
  4413. // Note: There is no need to invalidate any cost modeling decisions here, as
  4414. // non where taken so far.
  4415. InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
  4416. }
  4417. FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
  4418. // Avoid tail folding if the trip count is known to be a multiple of any VF
  4419. // we chose.
  4420. // FIXME: The condition below pessimises the case for fixed-width vectors,
  4421. // when scalable VFs are also candidates for vectorization.
  4422. if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
  4423. ElementCount MaxFixedVF = MaxFactors.FixedVF;
  4424. assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
  4425. "MaxFixedVF must be a power of 2");
  4426. unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
  4427. : MaxFixedVF.getFixedValue();
  4428. ScalarEvolution *SE = PSE.getSE();
  4429. const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
  4430. const SCEV *ExitCount = SE->getAddExpr(
  4431. BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
  4432. const SCEV *Rem = SE->getURemExpr(
  4433. SE->applyLoopGuards(ExitCount, TheLoop),
  4434. SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
  4435. if (Rem->isZero()) {
  4436. // Accept MaxFixedVF if we do not have a tail.
  4437. LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
  4438. return MaxFactors;
  4439. }
  4440. }
  4441. // If we don't know the precise trip count, or if the trip count that we
  4442. // found modulo the vectorization factor is not zero, try to fold the tail
  4443. // by masking.
  4444. // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
  4445. if (Legal->prepareToFoldTailByMasking()) {
  4446. FoldTailByMasking = true;
  4447. return MaxFactors;
  4448. }
  4449. // If there was a tail-folding hint/switch, but we can't fold the tail by
  4450. // masking, fallback to a vectorization with a scalar epilogue.
  4451. if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
  4452. LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
  4453. "scalar epilogue instead.\n");
  4454. ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
  4455. return MaxFactors;
  4456. }
  4457. if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
  4458. LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
  4459. return FixedScalableVFPair::getNone();
  4460. }
  4461. if (TC == 0) {
  4462. reportVectorizationFailure(
  4463. "Unable to calculate the loop count due to complex control flow",
  4464. "unable to calculate the loop count due to complex control flow",
  4465. "UnknownLoopCountComplexCFG", ORE, TheLoop);
  4466. return FixedScalableVFPair::getNone();
  4467. }
  4468. reportVectorizationFailure(
  4469. "Cannot optimize for size and vectorize at the same time.",
  4470. "cannot optimize for size and vectorize at the same time. "
  4471. "Enable vectorization of this loop with '#pragma clang loop "
  4472. "vectorize(enable)' when compiling with -Os/-Oz",
  4473. "NoTailLoopWithOptForSize", ORE, TheLoop);
  4474. return FixedScalableVFPair::getNone();
  4475. }
  4476. ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
  4477. unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
  4478. ElementCount MaxSafeVF, bool FoldTailByMasking) {
  4479. bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
  4480. const TypeSize WidestRegister = TTI.getRegisterBitWidth(
  4481. ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
  4482. : TargetTransformInfo::RGK_FixedWidthVector);
  4483. // Convenience function to return the minimum of two ElementCounts.
  4484. auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
  4485. assert((LHS.isScalable() == RHS.isScalable()) &&
  4486. "Scalable flags must match");
  4487. return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
  4488. };
  4489. // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
  4490. // Note that both WidestRegister and WidestType may not be a powers of 2.
  4491. auto MaxVectorElementCount = ElementCount::get(
  4492. PowerOf2Floor(WidestRegister.getKnownMinValue() / WidestType),
  4493. ComputeScalableMaxVF);
  4494. MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
  4495. LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
  4496. << (MaxVectorElementCount * WidestType) << " bits.\n");
  4497. if (!MaxVectorElementCount) {
  4498. LLVM_DEBUG(dbgs() << "LV: The target has no "
  4499. << (ComputeScalableMaxVF ? "scalable" : "fixed")
  4500. << " vector registers.\n");
  4501. return ElementCount::getFixed(1);
  4502. }
  4503. unsigned WidestRegisterMinEC = MaxVectorElementCount.getKnownMinValue();
  4504. if (MaxVectorElementCount.isScalable() &&
  4505. TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
  4506. auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
  4507. auto Min = Attr.getVScaleRangeMin();
  4508. WidestRegisterMinEC *= Min;
  4509. }
  4510. if (ConstTripCount && ConstTripCount <= WidestRegisterMinEC &&
  4511. (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) {
  4512. // If loop trip count (TC) is known at compile time there is no point in
  4513. // choosing VF greater than TC (as done in the loop below). Select maximum
  4514. // power of two which doesn't exceed TC.
  4515. // If MaxVectorElementCount is scalable, we only fall back on a fixed VF
  4516. // when the TC is less than or equal to the known number of lanes.
  4517. auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount);
  4518. LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
  4519. "exceeding the constant trip count: "
  4520. << ClampedConstTripCount << "\n");
  4521. return ElementCount::getFixed(ClampedConstTripCount);
  4522. }
  4523. TargetTransformInfo::RegisterKind RegKind =
  4524. ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
  4525. : TargetTransformInfo::RGK_FixedWidthVector;
  4526. ElementCount MaxVF = MaxVectorElementCount;
  4527. if (MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
  4528. TTI.shouldMaximizeVectorBandwidth(RegKind))) {
  4529. auto MaxVectorElementCountMaxBW = ElementCount::get(
  4530. PowerOf2Floor(WidestRegister.getKnownMinValue() / SmallestType),
  4531. ComputeScalableMaxVF);
  4532. MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
  4533. // Collect all viable vectorization factors larger than the default MaxVF
  4534. // (i.e. MaxVectorElementCount).
  4535. SmallVector<ElementCount, 8> VFs;
  4536. for (ElementCount VS = MaxVectorElementCount * 2;
  4537. ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
  4538. VFs.push_back(VS);
  4539. // For each VF calculate its register usage.
  4540. auto RUs = calculateRegisterUsage(VFs);
  4541. // Select the largest VF which doesn't require more registers than existing
  4542. // ones.
  4543. for (int i = RUs.size() - 1; i >= 0; --i) {
  4544. bool Selected = true;
  4545. for (auto &pair : RUs[i].MaxLocalUsers) {
  4546. unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
  4547. if (pair.second > TargetNumRegisters)
  4548. Selected = false;
  4549. }
  4550. if (Selected) {
  4551. MaxVF = VFs[i];
  4552. break;
  4553. }
  4554. }
  4555. if (ElementCount MinVF =
  4556. TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
  4557. if (ElementCount::isKnownLT(MaxVF, MinVF)) {
  4558. LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
  4559. << ") with target's minimum: " << MinVF << '\n');
  4560. MaxVF = MinVF;
  4561. }
  4562. }
  4563. // Invalidate any widening decisions we might have made, in case the loop
  4564. // requires prediction (decided later), but we have already made some
  4565. // load/store widening decisions.
  4566. invalidateCostModelingDecisions();
  4567. }
  4568. return MaxVF;
  4569. }
  4570. std::optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const {
  4571. if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
  4572. auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
  4573. auto Min = Attr.getVScaleRangeMin();
  4574. auto Max = Attr.getVScaleRangeMax();
  4575. if (Max && Min == Max)
  4576. return Max;
  4577. }
  4578. return TTI.getVScaleForTuning();
  4579. }
  4580. bool LoopVectorizationCostModel::isMoreProfitable(
  4581. const VectorizationFactor &A, const VectorizationFactor &B) const {
  4582. InstructionCost CostA = A.Cost;
  4583. InstructionCost CostB = B.Cost;
  4584. unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
  4585. if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
  4586. MaxTripCount) {
  4587. // If we are folding the tail and the trip count is a known (possibly small)
  4588. // constant, the trip count will be rounded up to an integer number of
  4589. // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
  4590. // which we compare directly. When not folding the tail, the total cost will
  4591. // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
  4592. // approximated with the per-lane cost below instead of using the tripcount
  4593. // as here.
  4594. auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
  4595. auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
  4596. return RTCostA < RTCostB;
  4597. }
  4598. // Improve estimate for the vector width if it is scalable.
  4599. unsigned EstimatedWidthA = A.Width.getKnownMinValue();
  4600. unsigned EstimatedWidthB = B.Width.getKnownMinValue();
  4601. if (std::optional<unsigned> VScale = getVScaleForTuning()) {
  4602. if (A.Width.isScalable())
  4603. EstimatedWidthA *= *VScale;
  4604. if (B.Width.isScalable())
  4605. EstimatedWidthB *= *VScale;
  4606. }
  4607. // Assume vscale may be larger than 1 (or the value being tuned for),
  4608. // so that scalable vectorization is slightly favorable over fixed-width
  4609. // vectorization.
  4610. if (A.Width.isScalable() && !B.Width.isScalable())
  4611. return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
  4612. // To avoid the need for FP division:
  4613. // (CostA / A.Width) < (CostB / B.Width)
  4614. // <=> (CostA * B.Width) < (CostB * A.Width)
  4615. return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
  4616. }
  4617. VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
  4618. const ElementCountSet &VFCandidates) {
  4619. InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
  4620. LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
  4621. assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
  4622. assert(VFCandidates.count(ElementCount::getFixed(1)) &&
  4623. "Expected Scalar VF to be a candidate");
  4624. const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost,
  4625. ExpectedCost);
  4626. VectorizationFactor ChosenFactor = ScalarCost;
  4627. bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
  4628. if (ForceVectorization && VFCandidates.size() > 1) {
  4629. // Ignore scalar width, because the user explicitly wants vectorization.
  4630. // Initialize cost to max so that VF = 2 is, at least, chosen during cost
  4631. // evaluation.
  4632. ChosenFactor.Cost = InstructionCost::getMax();
  4633. }
  4634. SmallVector<InstructionVFPair> InvalidCosts;
  4635. for (const auto &i : VFCandidates) {
  4636. // The cost for scalar VF=1 is already calculated, so ignore it.
  4637. if (i.isScalar())
  4638. continue;
  4639. VectorizationCostTy C = expectedCost(i, &InvalidCosts);
  4640. VectorizationFactor Candidate(i, C.first, ScalarCost.ScalarCost);
  4641. #ifndef NDEBUG
  4642. unsigned AssumedMinimumVscale = 1;
  4643. if (std::optional<unsigned> VScale = getVScaleForTuning())
  4644. AssumedMinimumVscale = *VScale;
  4645. unsigned Width =
  4646. Candidate.Width.isScalable()
  4647. ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
  4648. : Candidate.Width.getFixedValue();
  4649. LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
  4650. << " costs: " << (Candidate.Cost / Width));
  4651. if (i.isScalable())
  4652. LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
  4653. << AssumedMinimumVscale << ")");
  4654. LLVM_DEBUG(dbgs() << ".\n");
  4655. #endif
  4656. if (!C.second && !ForceVectorization) {
  4657. LLVM_DEBUG(
  4658. dbgs() << "LV: Not considering vector loop of width " << i
  4659. << " because it will not generate any vector instructions.\n");
  4660. continue;
  4661. }
  4662. // If profitable add it to ProfitableVF list.
  4663. if (isMoreProfitable(Candidate, ScalarCost))
  4664. ProfitableVFs.push_back(Candidate);
  4665. if (isMoreProfitable(Candidate, ChosenFactor))
  4666. ChosenFactor = Candidate;
  4667. }
  4668. // Emit a report of VFs with invalid costs in the loop.
  4669. if (!InvalidCosts.empty()) {
  4670. // Group the remarks per instruction, keeping the instruction order from
  4671. // InvalidCosts.
  4672. std::map<Instruction *, unsigned> Numbering;
  4673. unsigned I = 0;
  4674. for (auto &Pair : InvalidCosts)
  4675. if (!Numbering.count(Pair.first))
  4676. Numbering[Pair.first] = I++;
  4677. // Sort the list, first on instruction(number) then on VF.
  4678. llvm::sort(InvalidCosts,
  4679. [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
  4680. if (Numbering[A.first] != Numbering[B.first])
  4681. return Numbering[A.first] < Numbering[B.first];
  4682. ElementCountComparator ECC;
  4683. return ECC(A.second, B.second);
  4684. });
  4685. // For a list of ordered instruction-vf pairs:
  4686. // [(load, vf1), (load, vf2), (store, vf1)]
  4687. // Group the instructions together to emit separate remarks for:
  4688. // load (vf1, vf2)
  4689. // store (vf1)
  4690. auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
  4691. auto Subset = ArrayRef<InstructionVFPair>();
  4692. do {
  4693. if (Subset.empty())
  4694. Subset = Tail.take_front(1);
  4695. Instruction *I = Subset.front().first;
  4696. // If the next instruction is different, or if there are no other pairs,
  4697. // emit a remark for the collated subset. e.g.
  4698. // [(load, vf1), (load, vf2))]
  4699. // to emit:
  4700. // remark: invalid costs for 'load' at VF=(vf, vf2)
  4701. if (Subset == Tail || Tail[Subset.size()].first != I) {
  4702. std::string OutString;
  4703. raw_string_ostream OS(OutString);
  4704. assert(!Subset.empty() && "Unexpected empty range");
  4705. OS << "Instruction with invalid costs prevented vectorization at VF=(";
  4706. for (const auto &Pair : Subset)
  4707. OS << (Pair.second == Subset.front().second ? "" : ", ")
  4708. << Pair.second;
  4709. OS << "):";
  4710. if (auto *CI = dyn_cast<CallInst>(I))
  4711. OS << " call to " << CI->getCalledFunction()->getName();
  4712. else
  4713. OS << " " << I->getOpcodeName();
  4714. OS.flush();
  4715. reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
  4716. Tail = Tail.drop_front(Subset.size());
  4717. Subset = {};
  4718. } else
  4719. // Grow the subset by one element
  4720. Subset = Tail.take_front(Subset.size() + 1);
  4721. } while (!Tail.empty());
  4722. }
  4723. if (!EnableCondStoresVectorization && NumPredStores) {
  4724. reportVectorizationFailure("There are conditional stores.",
  4725. "store that is conditionally executed prevents vectorization",
  4726. "ConditionalStore", ORE, TheLoop);
  4727. ChosenFactor = ScalarCost;
  4728. }
  4729. LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
  4730. !isMoreProfitable(ChosenFactor, ScalarCost)) dbgs()
  4731. << "LV: Vectorization seems to be not beneficial, "
  4732. << "but was forced by a user.\n");
  4733. LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
  4734. return ChosenFactor;
  4735. }
  4736. bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
  4737. const Loop &L, ElementCount VF) const {
  4738. // Cross iteration phis such as reductions need special handling and are
  4739. // currently unsupported.
  4740. if (any_of(L.getHeader()->phis(),
  4741. [&](PHINode &Phi) { return Legal->isFixedOrderRecurrence(&Phi); }))
  4742. return false;
  4743. // Phis with uses outside of the loop require special handling and are
  4744. // currently unsupported.
  4745. for (const auto &Entry : Legal->getInductionVars()) {
  4746. // Look for uses of the value of the induction at the last iteration.
  4747. Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
  4748. for (User *U : PostInc->users())
  4749. if (!L.contains(cast<Instruction>(U)))
  4750. return false;
  4751. // Look for uses of penultimate value of the induction.
  4752. for (User *U : Entry.first->users())
  4753. if (!L.contains(cast<Instruction>(U)))
  4754. return false;
  4755. }
  4756. // Epilogue vectorization code has not been auditted to ensure it handles
  4757. // non-latch exits properly. It may be fine, but it needs auditted and
  4758. // tested.
  4759. if (L.getExitingBlock() != L.getLoopLatch())
  4760. return false;
  4761. return true;
  4762. }
  4763. bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
  4764. const ElementCount VF) const {
  4765. // FIXME: We need a much better cost-model to take different parameters such
  4766. // as register pressure, code size increase and cost of extra branches into
  4767. // account. For now we apply a very crude heuristic and only consider loops
  4768. // with vectorization factors larger than a certain value.
  4769. // Allow the target to opt out entirely.
  4770. if (!TTI.preferEpilogueVectorization())
  4771. return false;
  4772. // We also consider epilogue vectorization unprofitable for targets that don't
  4773. // consider interleaving beneficial (eg. MVE).
  4774. if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
  4775. return false;
  4776. // FIXME: We should consider changing the threshold for scalable
  4777. // vectors to take VScaleForTuning into account.
  4778. if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF)
  4779. return true;
  4780. return false;
  4781. }
  4782. VectorizationFactor
  4783. LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
  4784. const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
  4785. VectorizationFactor Result = VectorizationFactor::Disabled();
  4786. if (!EnableEpilogueVectorization) {
  4787. LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
  4788. return Result;
  4789. }
  4790. if (!isScalarEpilogueAllowed()) {
  4791. LLVM_DEBUG(
  4792. dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
  4793. "allowed.\n";);
  4794. return Result;
  4795. }
  4796. // Not really a cost consideration, but check for unsupported cases here to
  4797. // simplify the logic.
  4798. if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
  4799. LLVM_DEBUG(
  4800. dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
  4801. "not a supported candidate.\n";);
  4802. return Result;
  4803. }
  4804. if (EpilogueVectorizationForceVF > 1) {
  4805. LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
  4806. ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
  4807. if (LVP.hasPlanWithVF(ForcedEC))
  4808. return {ForcedEC, 0, 0};
  4809. else {
  4810. LLVM_DEBUG(
  4811. dbgs()
  4812. << "LEV: Epilogue vectorization forced factor is not viable.\n";);
  4813. return Result;
  4814. }
  4815. }
  4816. if (TheLoop->getHeader()->getParent()->hasOptSize() ||
  4817. TheLoop->getHeader()->getParent()->hasMinSize()) {
  4818. LLVM_DEBUG(
  4819. dbgs()
  4820. << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
  4821. return Result;
  4822. }
  4823. if (!isEpilogueVectorizationProfitable(MainLoopVF)) {
  4824. LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
  4825. "this loop\n");
  4826. return Result;
  4827. }
  4828. // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
  4829. // the main loop handles 8 lanes per iteration. We could still benefit from
  4830. // vectorizing the epilogue loop with VF=4.
  4831. ElementCount EstimatedRuntimeVF = MainLoopVF;
  4832. if (MainLoopVF.isScalable()) {
  4833. EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
  4834. if (std::optional<unsigned> VScale = getVScaleForTuning())
  4835. EstimatedRuntimeVF *= *VScale;
  4836. }
  4837. for (auto &NextVF : ProfitableVFs)
  4838. if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
  4839. ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) ||
  4840. ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) &&
  4841. (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) &&
  4842. LVP.hasPlanWithVF(NextVF.Width))
  4843. Result = NextVF;
  4844. if (Result != VectorizationFactor::Disabled())
  4845. LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
  4846. << Result.Width << "\n";);
  4847. return Result;
  4848. }
  4849. std::pair<unsigned, unsigned>
  4850. LoopVectorizationCostModel::getSmallestAndWidestTypes() {
  4851. unsigned MinWidth = -1U;
  4852. unsigned MaxWidth = 8;
  4853. const DataLayout &DL = TheFunction->getParent()->getDataLayout();
  4854. // For in-loop reductions, no element types are added to ElementTypesInLoop
  4855. // if there are no loads/stores in the loop. In this case, check through the
  4856. // reduction variables to determine the maximum width.
  4857. if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
  4858. // Reset MaxWidth so that we can find the smallest type used by recurrences
  4859. // in the loop.
  4860. MaxWidth = -1U;
  4861. for (const auto &PhiDescriptorPair : Legal->getReductionVars()) {
  4862. const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
  4863. // When finding the min width used by the recurrence we need to account
  4864. // for casts on the input operands of the recurrence.
  4865. MaxWidth = std::min<unsigned>(
  4866. MaxWidth, std::min<unsigned>(
  4867. RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
  4868. RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
  4869. }
  4870. } else {
  4871. for (Type *T : ElementTypesInLoop) {
  4872. MinWidth = std::min<unsigned>(
  4873. MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
  4874. MaxWidth = std::max<unsigned>(
  4875. MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
  4876. }
  4877. }
  4878. return {MinWidth, MaxWidth};
  4879. }
  4880. void LoopVectorizationCostModel::collectElementTypesForWidening() {
  4881. ElementTypesInLoop.clear();
  4882. // For each block.
  4883. for (BasicBlock *BB : TheLoop->blocks()) {
  4884. // For each instruction in the loop.
  4885. for (Instruction &I : BB->instructionsWithoutDebug()) {
  4886. Type *T = I.getType();
  4887. // Skip ignored values.
  4888. if (ValuesToIgnore.count(&I))
  4889. continue;
  4890. // Only examine Loads, Stores and PHINodes.
  4891. if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
  4892. continue;
  4893. // Examine PHI nodes that are reduction variables. Update the type to
  4894. // account for the recurrence type.
  4895. if (auto *PN = dyn_cast<PHINode>(&I)) {
  4896. if (!Legal->isReductionVariable(PN))
  4897. continue;
  4898. const RecurrenceDescriptor &RdxDesc =
  4899. Legal->getReductionVars().find(PN)->second;
  4900. if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
  4901. TTI.preferInLoopReduction(RdxDesc.getOpcode(),
  4902. RdxDesc.getRecurrenceType(),
  4903. TargetTransformInfo::ReductionFlags()))
  4904. continue;
  4905. T = RdxDesc.getRecurrenceType();
  4906. }
  4907. // Examine the stored values.
  4908. if (auto *ST = dyn_cast<StoreInst>(&I))
  4909. T = ST->getValueOperand()->getType();
  4910. assert(T->isSized() &&
  4911. "Expected the load/store/recurrence type to be sized");
  4912. ElementTypesInLoop.insert(T);
  4913. }
  4914. }
  4915. }
  4916. unsigned
  4917. LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
  4918. InstructionCost LoopCost) {
  4919. // -- The interleave heuristics --
  4920. // We interleave the loop in order to expose ILP and reduce the loop overhead.
  4921. // There are many micro-architectural considerations that we can't predict
  4922. // at this level. For example, frontend pressure (on decode or fetch) due to
  4923. // code size, or the number and capabilities of the execution ports.
  4924. //
  4925. // We use the following heuristics to select the interleave count:
  4926. // 1. If the code has reductions, then we interleave to break the cross
  4927. // iteration dependency.
  4928. // 2. If the loop is really small, then we interleave to reduce the loop
  4929. // overhead.
  4930. // 3. We don't interleave if we think that we will spill registers to memory
  4931. // due to the increased register pressure.
  4932. if (!isScalarEpilogueAllowed())
  4933. return 1;
  4934. // We used the distance for the interleave count.
  4935. if (Legal->getMaxSafeDepDistBytes() != -1U)
  4936. return 1;
  4937. auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
  4938. const bool HasReductions = !Legal->getReductionVars().empty();
  4939. // Do not interleave loops with a relatively small known or estimated trip
  4940. // count. But we will interleave when InterleaveSmallLoopScalarReduction is
  4941. // enabled, and the code has scalar reductions(HasReductions && VF = 1),
  4942. // because with the above conditions interleaving can expose ILP and break
  4943. // cross iteration dependences for reductions.
  4944. if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
  4945. !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
  4946. return 1;
  4947. // If we did not calculate the cost for VF (because the user selected the VF)
  4948. // then we calculate the cost of VF here.
  4949. if (LoopCost == 0) {
  4950. LoopCost = expectedCost(VF).first;
  4951. assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
  4952. // Loop body is free and there is no need for interleaving.
  4953. if (LoopCost == 0)
  4954. return 1;
  4955. }
  4956. RegisterUsage R = calculateRegisterUsage({VF})[0];
  4957. // We divide by these constants so assume that we have at least one
  4958. // instruction that uses at least one register.
  4959. for (auto& pair : R.MaxLocalUsers) {
  4960. pair.second = std::max(pair.second, 1U);
  4961. }
  4962. // We calculate the interleave count using the following formula.
  4963. // Subtract the number of loop invariants from the number of available
  4964. // registers. These registers are used by all of the interleaved instances.
  4965. // Next, divide the remaining registers by the number of registers that is
  4966. // required by the loop, in order to estimate how many parallel instances
  4967. // fit without causing spills. All of this is rounded down if necessary to be
  4968. // a power of two. We want power of two interleave count to simplify any
  4969. // addressing operations or alignment considerations.
  4970. // We also want power of two interleave counts to ensure that the induction
  4971. // variable of the vector loop wraps to zero, when tail is folded by masking;
  4972. // this currently happens when OptForSize, in which case IC is set to 1 above.
  4973. unsigned IC = UINT_MAX;
  4974. for (auto& pair : R.MaxLocalUsers) {
  4975. unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
  4976. LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
  4977. << " registers of "
  4978. << TTI.getRegisterClassName(pair.first) << " register class\n");
  4979. if (VF.isScalar()) {
  4980. if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
  4981. TargetNumRegisters = ForceTargetNumScalarRegs;
  4982. } else {
  4983. if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
  4984. TargetNumRegisters = ForceTargetNumVectorRegs;
  4985. }
  4986. unsigned MaxLocalUsers = pair.second;
  4987. unsigned LoopInvariantRegs = 0;
  4988. if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
  4989. LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
  4990. unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
  4991. // Don't count the induction variable as interleaved.
  4992. if (EnableIndVarRegisterHeur) {
  4993. TmpIC =
  4994. PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
  4995. std::max(1U, (MaxLocalUsers - 1)));
  4996. }
  4997. IC = std::min(IC, TmpIC);
  4998. }
  4999. // Clamp the interleave ranges to reasonable counts.
  5000. unsigned MaxInterleaveCount =
  5001. TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
  5002. // Check if the user has overridden the max.
  5003. if (VF.isScalar()) {
  5004. if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
  5005. MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
  5006. } else {
  5007. if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
  5008. MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
  5009. }
  5010. // If trip count is known or estimated compile time constant, limit the
  5011. // interleave count to be less than the trip count divided by VF, provided it
  5012. // is at least 1.
  5013. //
  5014. // For scalable vectors we can't know if interleaving is beneficial. It may
  5015. // not be beneficial for small loops if none of the lanes in the second vector
  5016. // iterations is enabled. However, for larger loops, there is likely to be a
  5017. // similar benefit as for fixed-width vectors. For now, we choose to leave
  5018. // the InterleaveCount as if vscale is '1', although if some information about
  5019. // the vector is known (e.g. min vector size), we can make a better decision.
  5020. if (BestKnownTC) {
  5021. MaxInterleaveCount =
  5022. std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
  5023. // Make sure MaxInterleaveCount is greater than 0.
  5024. MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
  5025. }
  5026. assert(MaxInterleaveCount > 0 &&
  5027. "Maximum interleave count must be greater than 0");
  5028. // Clamp the calculated IC to be between the 1 and the max interleave count
  5029. // that the target and trip count allows.
  5030. if (IC > MaxInterleaveCount)
  5031. IC = MaxInterleaveCount;
  5032. else
  5033. // Make sure IC is greater than 0.
  5034. IC = std::max(1u, IC);
  5035. assert(IC > 0 && "Interleave count must be greater than 0.");
  5036. // Interleave if we vectorized this loop and there is a reduction that could
  5037. // benefit from interleaving.
  5038. if (VF.isVector() && HasReductions) {
  5039. LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
  5040. return IC;
  5041. }
  5042. // For any scalar loop that either requires runtime checks or predication we
  5043. // are better off leaving this to the unroller. Note that if we've already
  5044. // vectorized the loop we will have done the runtime check and so interleaving
  5045. // won't require further checks.
  5046. bool ScalarInterleavingRequiresPredication =
  5047. (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) {
  5048. return Legal->blockNeedsPredication(BB);
  5049. }));
  5050. bool ScalarInterleavingRequiresRuntimePointerCheck =
  5051. (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
  5052. // We want to interleave small loops in order to reduce the loop overhead and
  5053. // potentially expose ILP opportunities.
  5054. LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
  5055. << "LV: IC is " << IC << '\n'
  5056. << "LV: VF is " << VF << '\n');
  5057. const bool AggressivelyInterleaveReductions =
  5058. TTI.enableAggressiveInterleaving(HasReductions);
  5059. if (!ScalarInterleavingRequiresRuntimePointerCheck &&
  5060. !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
  5061. // We assume that the cost overhead is 1 and we use the cost model
  5062. // to estimate the cost of the loop and interleave until the cost of the
  5063. // loop overhead is about 5% of the cost of the loop.
  5064. unsigned SmallIC = std::min(
  5065. IC, (unsigned)PowerOf2Floor(SmallLoopCost / *LoopCost.getValue()));
  5066. // Interleave until store/load ports (estimated by max interleave count) are
  5067. // saturated.
  5068. unsigned NumStores = Legal->getNumStores();
  5069. unsigned NumLoads = Legal->getNumLoads();
  5070. unsigned StoresIC = IC / (NumStores ? NumStores : 1);
  5071. unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
  5072. // There is little point in interleaving for reductions containing selects
  5073. // and compares when VF=1 since it may just create more overhead than it's
  5074. // worth for loops with small trip counts. This is because we still have to
  5075. // do the final reduction after the loop.
  5076. bool HasSelectCmpReductions =
  5077. HasReductions &&
  5078. any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
  5079. const RecurrenceDescriptor &RdxDesc = Reduction.second;
  5080. return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
  5081. RdxDesc.getRecurrenceKind());
  5082. });
  5083. if (HasSelectCmpReductions) {
  5084. LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
  5085. return 1;
  5086. }
  5087. // If we have a scalar reduction (vector reductions are already dealt with
  5088. // by this point), we can increase the critical path length if the loop
  5089. // we're interleaving is inside another loop. For tree-wise reductions
  5090. // set the limit to 2, and for ordered reductions it's best to disable
  5091. // interleaving entirely.
  5092. if (HasReductions && TheLoop->getLoopDepth() > 1) {
  5093. bool HasOrderedReductions =
  5094. any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
  5095. const RecurrenceDescriptor &RdxDesc = Reduction.second;
  5096. return RdxDesc.isOrdered();
  5097. });
  5098. if (HasOrderedReductions) {
  5099. LLVM_DEBUG(
  5100. dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
  5101. return 1;
  5102. }
  5103. unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
  5104. SmallIC = std::min(SmallIC, F);
  5105. StoresIC = std::min(StoresIC, F);
  5106. LoadsIC = std::min(LoadsIC, F);
  5107. }
  5108. if (EnableLoadStoreRuntimeInterleave &&
  5109. std::max(StoresIC, LoadsIC) > SmallIC) {
  5110. LLVM_DEBUG(
  5111. dbgs() << "LV: Interleaving to saturate store or load ports.\n");
  5112. return std::max(StoresIC, LoadsIC);
  5113. }
  5114. // If there are scalar reductions and TTI has enabled aggressive
  5115. // interleaving for reductions, we will interleave to expose ILP.
  5116. if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
  5117. AggressivelyInterleaveReductions) {
  5118. LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
  5119. // Interleave no less than SmallIC but not as aggressive as the normal IC
  5120. // to satisfy the rare situation when resources are too limited.
  5121. return std::max(IC / 2, SmallIC);
  5122. } else {
  5123. LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
  5124. return SmallIC;
  5125. }
  5126. }
  5127. // Interleave if this is a large loop (small loops are already dealt with by
  5128. // this point) that could benefit from interleaving.
  5129. if (AggressivelyInterleaveReductions) {
  5130. LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
  5131. return IC;
  5132. }
  5133. LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
  5134. return 1;
  5135. }
  5136. SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
  5137. LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
  5138. // This function calculates the register usage by measuring the highest number
  5139. // of values that are alive at a single location. Obviously, this is a very
  5140. // rough estimation. We scan the loop in a topological order in order and
  5141. // assign a number to each instruction. We use RPO to ensure that defs are
  5142. // met before their users. We assume that each instruction that has in-loop
  5143. // users starts an interval. We record every time that an in-loop value is
  5144. // used, so we have a list of the first and last occurrences of each
  5145. // instruction. Next, we transpose this data structure into a multi map that
  5146. // holds the list of intervals that *end* at a specific location. This multi
  5147. // map allows us to perform a linear search. We scan the instructions linearly
  5148. // and record each time that a new interval starts, by placing it in a set.
  5149. // If we find this value in the multi-map then we remove it from the set.
  5150. // The max register usage is the maximum size of the set.
  5151. // We also search for instructions that are defined outside the loop, but are
  5152. // used inside the loop. We need this number separately from the max-interval
  5153. // usage number because when we unroll, loop-invariant values do not take
  5154. // more register.
  5155. LoopBlocksDFS DFS(TheLoop);
  5156. DFS.perform(LI);
  5157. RegisterUsage RU;
  5158. // Each 'key' in the map opens a new interval. The values
  5159. // of the map are the index of the 'last seen' usage of the
  5160. // instruction that is the key.
  5161. using IntervalMap = DenseMap<Instruction *, unsigned>;
  5162. // Maps instruction to its index.
  5163. SmallVector<Instruction *, 64> IdxToInstr;
  5164. // Marks the end of each interval.
  5165. IntervalMap EndPoint;
  5166. // Saves the list of instruction indices that are used in the loop.
  5167. SmallPtrSet<Instruction *, 8> Ends;
  5168. // Saves the list of values that are used in the loop but are defined outside
  5169. // the loop (not including non-instruction values such as arguments and
  5170. // constants).
  5171. SmallPtrSet<Instruction *, 8> LoopInvariants;
  5172. for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
  5173. for (Instruction &I : BB->instructionsWithoutDebug()) {
  5174. IdxToInstr.push_back(&I);
  5175. // Save the end location of each USE.
  5176. for (Value *U : I.operands()) {
  5177. auto *Instr = dyn_cast<Instruction>(U);
  5178. // Ignore non-instruction values such as arguments, constants, etc.
  5179. // FIXME: Might need some motivation why these values are ignored. If
  5180. // for example an argument is used inside the loop it will increase the
  5181. // register pressure (so shouldn't we add it to LoopInvariants).
  5182. if (!Instr)
  5183. continue;
  5184. // If this instruction is outside the loop then record it and continue.
  5185. if (!TheLoop->contains(Instr)) {
  5186. LoopInvariants.insert(Instr);
  5187. continue;
  5188. }
  5189. // Overwrite previous end points.
  5190. EndPoint[Instr] = IdxToInstr.size();
  5191. Ends.insert(Instr);
  5192. }
  5193. }
  5194. }
  5195. // Saves the list of intervals that end with the index in 'key'.
  5196. using InstrList = SmallVector<Instruction *, 2>;
  5197. DenseMap<unsigned, InstrList> TransposeEnds;
  5198. // Transpose the EndPoints to a list of values that end at each index.
  5199. for (auto &Interval : EndPoint)
  5200. TransposeEnds[Interval.second].push_back(Interval.first);
  5201. SmallPtrSet<Instruction *, 8> OpenIntervals;
  5202. SmallVector<RegisterUsage, 8> RUs(VFs.size());
  5203. SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
  5204. LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
  5205. const auto &TTICapture = TTI;
  5206. auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
  5207. if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
  5208. return 0;
  5209. return TTICapture.getRegUsageForType(VectorType::get(Ty, VF));
  5210. };
  5211. for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
  5212. Instruction *I = IdxToInstr[i];
  5213. // Remove all of the instructions that end at this location.
  5214. InstrList &List = TransposeEnds[i];
  5215. for (Instruction *ToRemove : List)
  5216. OpenIntervals.erase(ToRemove);
  5217. // Ignore instructions that are never used within the loop.
  5218. if (!Ends.count(I))
  5219. continue;
  5220. // Skip ignored values.
  5221. if (ValuesToIgnore.count(I))
  5222. continue;
  5223. // For each VF find the maximum usage of registers.
  5224. for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
  5225. // Count the number of registers used, per register class, given all open
  5226. // intervals.
  5227. // Note that elements in this SmallMapVector will be default constructed
  5228. // as 0. So we can use "RegUsage[ClassID] += n" in the code below even if
  5229. // there is no previous entry for ClassID.
  5230. SmallMapVector<unsigned, unsigned, 4> RegUsage;
  5231. if (VFs[j].isScalar()) {
  5232. for (auto *Inst : OpenIntervals) {
  5233. unsigned ClassID =
  5234. TTI.getRegisterClassForType(false, Inst->getType());
  5235. // FIXME: The target might use more than one register for the type
  5236. // even in the scalar case.
  5237. RegUsage[ClassID] += 1;
  5238. }
  5239. } else {
  5240. collectUniformsAndScalars(VFs[j]);
  5241. for (auto *Inst : OpenIntervals) {
  5242. // Skip ignored values for VF > 1.
  5243. if (VecValuesToIgnore.count(Inst))
  5244. continue;
  5245. if (isScalarAfterVectorization(Inst, VFs[j])) {
  5246. unsigned ClassID =
  5247. TTI.getRegisterClassForType(false, Inst->getType());
  5248. // FIXME: The target might use more than one register for the type
  5249. // even in the scalar case.
  5250. RegUsage[ClassID] += 1;
  5251. } else {
  5252. unsigned ClassID =
  5253. TTI.getRegisterClassForType(true, Inst->getType());
  5254. RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
  5255. }
  5256. }
  5257. }
  5258. for (auto& pair : RegUsage) {
  5259. auto &Entry = MaxUsages[j][pair.first];
  5260. Entry = std::max(Entry, pair.second);
  5261. }
  5262. }
  5263. LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
  5264. << OpenIntervals.size() << '\n');
  5265. // Add the current instruction to the list of open intervals.
  5266. OpenIntervals.insert(I);
  5267. }
  5268. for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
  5269. // Note that elements in this SmallMapVector will be default constructed
  5270. // as 0. So we can use "Invariant[ClassID] += n" in the code below even if
  5271. // there is no previous entry for ClassID.
  5272. SmallMapVector<unsigned, unsigned, 4> Invariant;
  5273. for (auto *Inst : LoopInvariants) {
  5274. // FIXME: The target might use more than one register for the type
  5275. // even in the scalar case.
  5276. bool IsScalar = all_of(Inst->users(), [&](User *U) {
  5277. auto *I = cast<Instruction>(U);
  5278. return TheLoop != LI->getLoopFor(I->getParent()) ||
  5279. isScalarAfterVectorization(I, VFs[i]);
  5280. });
  5281. ElementCount VF = IsScalar ? ElementCount::getFixed(1) : VFs[i];
  5282. unsigned ClassID =
  5283. TTI.getRegisterClassForType(VF.isVector(), Inst->getType());
  5284. Invariant[ClassID] += GetRegUsage(Inst->getType(), VF);
  5285. }
  5286. LLVM_DEBUG({
  5287. dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
  5288. dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
  5289. << " item\n";
  5290. for (const auto &pair : MaxUsages[i]) {
  5291. dbgs() << "LV(REG): RegisterClass: "
  5292. << TTI.getRegisterClassName(pair.first) << ", " << pair.second
  5293. << " registers\n";
  5294. }
  5295. dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
  5296. << " item\n";
  5297. for (const auto &pair : Invariant) {
  5298. dbgs() << "LV(REG): RegisterClass: "
  5299. << TTI.getRegisterClassName(pair.first) << ", " << pair.second
  5300. << " registers\n";
  5301. }
  5302. });
  5303. RU.LoopInvariantRegs = Invariant;
  5304. RU.MaxLocalUsers = MaxUsages[i];
  5305. RUs[i] = RU;
  5306. }
  5307. return RUs;
  5308. }
  5309. bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
  5310. ElementCount VF) {
  5311. // TODO: Cost model for emulated masked load/store is completely
  5312. // broken. This hack guides the cost model to use an artificially
  5313. // high enough value to practically disable vectorization with such
  5314. // operations, except where previously deployed legality hack allowed
  5315. // using very low cost values. This is to avoid regressions coming simply
  5316. // from moving "masked load/store" check from legality to cost model.
  5317. // Masked Load/Gather emulation was previously never allowed.
  5318. // Limited number of Masked Store/Scatter emulation was allowed.
  5319. assert((isPredicatedInst(I)) &&
  5320. "Expecting a scalar emulated instruction");
  5321. return isa<LoadInst>(I) ||
  5322. (isa<StoreInst>(I) &&
  5323. NumPredStores > NumberOfStoresToPredicate);
  5324. }
  5325. void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
  5326. // If we aren't vectorizing the loop, or if we've already collected the
  5327. // instructions to scalarize, there's nothing to do. Collection may already
  5328. // have occurred if we have a user-selected VF and are now computing the
  5329. // expected cost for interleaving.
  5330. if (VF.isScalar() || VF.isZero() ||
  5331. InstsToScalarize.find(VF) != InstsToScalarize.end())
  5332. return;
  5333. // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
  5334. // not profitable to scalarize any instructions, the presence of VF in the
  5335. // map will indicate that we've analyzed it already.
  5336. ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
  5337. PredicatedBBsAfterVectorization[VF].clear();
  5338. // Find all the instructions that are scalar with predication in the loop and
  5339. // determine if it would be better to not if-convert the blocks they are in.
  5340. // If so, we also record the instructions to scalarize.
  5341. for (BasicBlock *BB : TheLoop->blocks()) {
  5342. if (!blockNeedsPredicationForAnyReason(BB))
  5343. continue;
  5344. for (Instruction &I : *BB)
  5345. if (isScalarWithPredication(&I, VF)) {
  5346. ScalarCostsTy ScalarCosts;
  5347. // Do not apply discount if scalable, because that would lead to
  5348. // invalid scalarization costs.
  5349. // Do not apply discount logic if hacked cost is needed
  5350. // for emulated masked memrefs.
  5351. if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) &&
  5352. computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
  5353. ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
  5354. // Remember that BB will remain after vectorization.
  5355. PredicatedBBsAfterVectorization[VF].insert(BB);
  5356. }
  5357. }
  5358. }
  5359. InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
  5360. Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
  5361. assert(!isUniformAfterVectorization(PredInst, VF) &&
  5362. "Instruction marked uniform-after-vectorization will be predicated");
  5363. // Initialize the discount to zero, meaning that the scalar version and the
  5364. // vector version cost the same.
  5365. InstructionCost Discount = 0;
  5366. // Holds instructions to analyze. The instructions we visit are mapped in
  5367. // ScalarCosts. Those instructions are the ones that would be scalarized if
  5368. // we find that the scalar version costs less.
  5369. SmallVector<Instruction *, 8> Worklist;
  5370. // Returns true if the given instruction can be scalarized.
  5371. auto canBeScalarized = [&](Instruction *I) -> bool {
  5372. // We only attempt to scalarize instructions forming a single-use chain
  5373. // from the original predicated block that would otherwise be vectorized.
  5374. // Although not strictly necessary, we give up on instructions we know will
  5375. // already be scalar to avoid traversing chains that are unlikely to be
  5376. // beneficial.
  5377. if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
  5378. isScalarAfterVectorization(I, VF))
  5379. return false;
  5380. // If the instruction is scalar with predication, it will be analyzed
  5381. // separately. We ignore it within the context of PredInst.
  5382. if (isScalarWithPredication(I, VF))
  5383. return false;
  5384. // If any of the instruction's operands are uniform after vectorization,
  5385. // the instruction cannot be scalarized. This prevents, for example, a
  5386. // masked load from being scalarized.
  5387. //
  5388. // We assume we will only emit a value for lane zero of an instruction
  5389. // marked uniform after vectorization, rather than VF identical values.
  5390. // Thus, if we scalarize an instruction that uses a uniform, we would
  5391. // create uses of values corresponding to the lanes we aren't emitting code
  5392. // for. This behavior can be changed by allowing getScalarValue to clone
  5393. // the lane zero values for uniforms rather than asserting.
  5394. for (Use &U : I->operands())
  5395. if (auto *J = dyn_cast<Instruction>(U.get()))
  5396. if (isUniformAfterVectorization(J, VF))
  5397. return false;
  5398. // Otherwise, we can scalarize the instruction.
  5399. return true;
  5400. };
  5401. // Compute the expected cost discount from scalarizing the entire expression
  5402. // feeding the predicated instruction. We currently only consider expressions
  5403. // that are single-use instruction chains.
  5404. Worklist.push_back(PredInst);
  5405. while (!Worklist.empty()) {
  5406. Instruction *I = Worklist.pop_back_val();
  5407. // If we've already analyzed the instruction, there's nothing to do.
  5408. if (ScalarCosts.find(I) != ScalarCosts.end())
  5409. continue;
  5410. // Compute the cost of the vector instruction. Note that this cost already
  5411. // includes the scalarization overhead of the predicated instruction.
  5412. InstructionCost VectorCost = getInstructionCost(I, VF).first;
  5413. // Compute the cost of the scalarized instruction. This cost is the cost of
  5414. // the instruction as if it wasn't if-converted and instead remained in the
  5415. // predicated block. We will scale this cost by block probability after
  5416. // computing the scalarization overhead.
  5417. InstructionCost ScalarCost =
  5418. VF.getFixedValue() *
  5419. getInstructionCost(I, ElementCount::getFixed(1)).first;
  5420. // Compute the scalarization overhead of needed insertelement instructions
  5421. // and phi nodes.
  5422. TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  5423. if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
  5424. ScalarCost += TTI.getScalarizationOverhead(
  5425. cast<VectorType>(ToVectorTy(I->getType(), VF)),
  5426. APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ true,
  5427. /*Extract*/ false, CostKind);
  5428. ScalarCost +=
  5429. VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
  5430. }
  5431. // Compute the scalarization overhead of needed extractelement
  5432. // instructions. For each of the instruction's operands, if the operand can
  5433. // be scalarized, add it to the worklist; otherwise, account for the
  5434. // overhead.
  5435. for (Use &U : I->operands())
  5436. if (auto *J = dyn_cast<Instruction>(U.get())) {
  5437. assert(VectorType::isValidElementType(J->getType()) &&
  5438. "Instruction has non-scalar type");
  5439. if (canBeScalarized(J))
  5440. Worklist.push_back(J);
  5441. else if (needsExtract(J, VF)) {
  5442. ScalarCost += TTI.getScalarizationOverhead(
  5443. cast<VectorType>(ToVectorTy(J->getType(), VF)),
  5444. APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
  5445. /*Extract*/ true, CostKind);
  5446. }
  5447. }
  5448. // Scale the total scalar cost by block probability.
  5449. ScalarCost /= getReciprocalPredBlockProb();
  5450. // Compute the discount. A non-negative discount means the vector version
  5451. // of the instruction costs more, and scalarizing would be beneficial.
  5452. Discount += VectorCost - ScalarCost;
  5453. ScalarCosts[I] = ScalarCost;
  5454. }
  5455. return Discount;
  5456. }
  5457. LoopVectorizationCostModel::VectorizationCostTy
  5458. LoopVectorizationCostModel::expectedCost(
  5459. ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
  5460. VectorizationCostTy Cost;
  5461. // For each block.
  5462. for (BasicBlock *BB : TheLoop->blocks()) {
  5463. VectorizationCostTy BlockCost;
  5464. // For each instruction in the old loop.
  5465. for (Instruction &I : BB->instructionsWithoutDebug()) {
  5466. // Skip ignored values.
  5467. if (ValuesToIgnore.count(&I) ||
  5468. (VF.isVector() && VecValuesToIgnore.count(&I)))
  5469. continue;
  5470. VectorizationCostTy C = getInstructionCost(&I, VF);
  5471. // Check if we should override the cost.
  5472. if (C.first.isValid() &&
  5473. ForceTargetInstructionCost.getNumOccurrences() > 0)
  5474. C.first = InstructionCost(ForceTargetInstructionCost);
  5475. // Keep a list of instructions with invalid costs.
  5476. if (Invalid && !C.first.isValid())
  5477. Invalid->emplace_back(&I, VF);
  5478. BlockCost.first += C.first;
  5479. BlockCost.second |= C.second;
  5480. LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
  5481. << " for VF " << VF << " For instruction: " << I
  5482. << '\n');
  5483. }
  5484. // If we are vectorizing a predicated block, it will have been
  5485. // if-converted. This means that the block's instructions (aside from
  5486. // stores and instructions that may divide by zero) will now be
  5487. // unconditionally executed. For the scalar case, we may not always execute
  5488. // the predicated block, if it is an if-else block. Thus, scale the block's
  5489. // cost by the probability of executing it. blockNeedsPredication from
  5490. // Legal is used so as to not include all blocks in tail folded loops.
  5491. if (VF.isScalar() && Legal->blockNeedsPredication(BB))
  5492. BlockCost.first /= getReciprocalPredBlockProb();
  5493. Cost.first += BlockCost.first;
  5494. Cost.second |= BlockCost.second;
  5495. }
  5496. return Cost;
  5497. }
  5498. /// Gets Address Access SCEV after verifying that the access pattern
  5499. /// is loop invariant except the induction variable dependence.
  5500. ///
  5501. /// This SCEV can be sent to the Target in order to estimate the address
  5502. /// calculation cost.
  5503. static const SCEV *getAddressAccessSCEV(
  5504. Value *Ptr,
  5505. LoopVectorizationLegality *Legal,
  5506. PredicatedScalarEvolution &PSE,
  5507. const Loop *TheLoop) {
  5508. auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
  5509. if (!Gep)
  5510. return nullptr;
  5511. // We are looking for a gep with all loop invariant indices except for one
  5512. // which should be an induction variable.
  5513. auto SE = PSE.getSE();
  5514. unsigned NumOperands = Gep->getNumOperands();
  5515. for (unsigned i = 1; i < NumOperands; ++i) {
  5516. Value *Opd = Gep->getOperand(i);
  5517. if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
  5518. !Legal->isInductionVariable(Opd))
  5519. return nullptr;
  5520. }
  5521. // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
  5522. return PSE.getSCEV(Ptr);
  5523. }
  5524. static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
  5525. return Legal->hasStride(I->getOperand(0)) ||
  5526. Legal->hasStride(I->getOperand(1));
  5527. }
  5528. InstructionCost
  5529. LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
  5530. ElementCount VF) {
  5531. assert(VF.isVector() &&
  5532. "Scalarization cost of instruction implies vectorization.");
  5533. if (VF.isScalable())
  5534. return InstructionCost::getInvalid();
  5535. Type *ValTy = getLoadStoreType(I);
  5536. auto SE = PSE.getSE();
  5537. unsigned AS = getLoadStoreAddressSpace(I);
  5538. Value *Ptr = getLoadStorePointerOperand(I);
  5539. Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
  5540. // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
  5541. // that it is being called from this specific place.
  5542. // Figure out whether the access is strided and get the stride value
  5543. // if it's known in compile time
  5544. const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
  5545. // Get the cost of the scalar memory instruction and address computation.
  5546. InstructionCost Cost =
  5547. VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
  5548. // Don't pass *I here, since it is scalar but will actually be part of a
  5549. // vectorized loop where the user of it is a vectorized instruction.
  5550. TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  5551. const Align Alignment = getLoadStoreAlignment(I);
  5552. Cost += VF.getKnownMinValue() * TTI.getMemoryOpCost(I->getOpcode(),
  5553. ValTy->getScalarType(),
  5554. Alignment, AS, CostKind);
  5555. // Get the overhead of the extractelement and insertelement instructions
  5556. // we might create due to scalarization.
  5557. Cost += getScalarizationOverhead(I, VF, CostKind);
  5558. // If we have a predicated load/store, it will need extra i1 extracts and
  5559. // conditional branches, but may not be executed for each vector lane. Scale
  5560. // the cost by the probability of executing the predicated block.
  5561. if (isPredicatedInst(I)) {
  5562. Cost /= getReciprocalPredBlockProb();
  5563. // Add the cost of an i1 extract and a branch
  5564. auto *Vec_i1Ty =
  5565. VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
  5566. Cost += TTI.getScalarizationOverhead(
  5567. Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
  5568. /*Insert=*/false, /*Extract=*/true, CostKind);
  5569. Cost += TTI.getCFInstrCost(Instruction::Br, CostKind);
  5570. if (useEmulatedMaskMemRefHack(I, VF))
  5571. // Artificially setting to a high enough value to practically disable
  5572. // vectorization with such operations.
  5573. Cost = 3000000;
  5574. }
  5575. return Cost;
  5576. }
  5577. InstructionCost
  5578. LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
  5579. ElementCount VF) {
  5580. Type *ValTy = getLoadStoreType(I);
  5581. auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
  5582. Value *Ptr = getLoadStorePointerOperand(I);
  5583. unsigned AS = getLoadStoreAddressSpace(I);
  5584. int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
  5585. enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  5586. assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
  5587. "Stride should be 1 or -1 for consecutive memory access");
  5588. const Align Alignment = getLoadStoreAlignment(I);
  5589. InstructionCost Cost = 0;
  5590. if (Legal->isMaskRequired(I)) {
  5591. Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
  5592. CostKind);
  5593. } else {
  5594. TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
  5595. Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
  5596. CostKind, OpInfo, I);
  5597. }
  5598. bool Reverse = ConsecutiveStride < 0;
  5599. if (Reverse)
  5600. Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
  5601. std::nullopt, CostKind, 0);
  5602. return Cost;
  5603. }
  5604. InstructionCost
  5605. LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
  5606. ElementCount VF) {
  5607. assert(Legal->isUniformMemOp(*I));
  5608. Type *ValTy = getLoadStoreType(I);
  5609. auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
  5610. const Align Alignment = getLoadStoreAlignment(I);
  5611. unsigned AS = getLoadStoreAddressSpace(I);
  5612. enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  5613. if (isa<LoadInst>(I)) {
  5614. return TTI.getAddressComputationCost(ValTy) +
  5615. TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
  5616. CostKind) +
  5617. TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
  5618. }
  5619. StoreInst *SI = cast<StoreInst>(I);
  5620. bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
  5621. return TTI.getAddressComputationCost(ValTy) +
  5622. TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
  5623. CostKind) +
  5624. (isLoopInvariantStoreValue
  5625. ? 0
  5626. : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
  5627. CostKind, VF.getKnownMinValue() - 1));
  5628. }
  5629. InstructionCost
  5630. LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
  5631. ElementCount VF) {
  5632. Type *ValTy = getLoadStoreType(I);
  5633. auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
  5634. const Align Alignment = getLoadStoreAlignment(I);
  5635. const Value *Ptr = getLoadStorePointerOperand(I);
  5636. return TTI.getAddressComputationCost(VectorTy) +
  5637. TTI.getGatherScatterOpCost(
  5638. I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
  5639. TargetTransformInfo::TCK_RecipThroughput, I);
  5640. }
  5641. InstructionCost
  5642. LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
  5643. ElementCount VF) {
  5644. // TODO: Once we have support for interleaving with scalable vectors
  5645. // we can calculate the cost properly here.
  5646. if (VF.isScalable())
  5647. return InstructionCost::getInvalid();
  5648. Type *ValTy = getLoadStoreType(I);
  5649. auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
  5650. unsigned AS = getLoadStoreAddressSpace(I);
  5651. enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  5652. auto Group = getInterleavedAccessGroup(I);
  5653. assert(Group && "Fail to get an interleaved access group.");
  5654. unsigned InterleaveFactor = Group->getFactor();
  5655. auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
  5656. // Holds the indices of existing members in the interleaved group.
  5657. SmallVector<unsigned, 4> Indices;
  5658. for (unsigned IF = 0; IF < InterleaveFactor; IF++)
  5659. if (Group->getMember(IF))
  5660. Indices.push_back(IF);
  5661. // Calculate the cost of the whole interleaved group.
  5662. bool UseMaskForGaps =
  5663. (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
  5664. (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
  5665. InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
  5666. I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
  5667. AS, CostKind, Legal->isMaskRequired(I), UseMaskForGaps);
  5668. if (Group->isReverse()) {
  5669. // TODO: Add support for reversed masked interleaved access.
  5670. assert(!Legal->isMaskRequired(I) &&
  5671. "Reverse masked interleaved access not supported.");
  5672. Cost += Group->getNumMembers() *
  5673. TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
  5674. std::nullopt, CostKind, 0);
  5675. }
  5676. return Cost;
  5677. }
  5678. std::optional<InstructionCost>
  5679. LoopVectorizationCostModel::getReductionPatternCost(
  5680. Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
  5681. using namespace llvm::PatternMatch;
  5682. // Early exit for no inloop reductions
  5683. if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
  5684. return std::nullopt;
  5685. auto *VectorTy = cast<VectorType>(Ty);
  5686. // We are looking for a pattern of, and finding the minimal acceptable cost:
  5687. // reduce(mul(ext(A), ext(B))) or
  5688. // reduce(mul(A, B)) or
  5689. // reduce(ext(A)) or
  5690. // reduce(A).
  5691. // The basic idea is that we walk down the tree to do that, finding the root
  5692. // reduction instruction in InLoopReductionImmediateChains. From there we find
  5693. // the pattern of mul/ext and test the cost of the entire pattern vs the cost
  5694. // of the components. If the reduction cost is lower then we return it for the
  5695. // reduction instruction and 0 for the other instructions in the pattern. If
  5696. // it is not we return an invalid cost specifying the orignal cost method
  5697. // should be used.
  5698. Instruction *RetI = I;
  5699. if (match(RetI, m_ZExtOrSExt(m_Value()))) {
  5700. if (!RetI->hasOneUser())
  5701. return std::nullopt;
  5702. RetI = RetI->user_back();
  5703. }
  5704. if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
  5705. RetI->user_back()->getOpcode() == Instruction::Add) {
  5706. RetI = RetI->user_back();
  5707. }
  5708. // Test if the found instruction is a reduction, and if not return an invalid
  5709. // cost specifying the parent to use the original cost modelling.
  5710. if (!InLoopReductionImmediateChains.count(RetI))
  5711. return std::nullopt;
  5712. // Find the reduction this chain is a part of and calculate the basic cost of
  5713. // the reduction on its own.
  5714. Instruction *LastChain = InLoopReductionImmediateChains[RetI];
  5715. Instruction *ReductionPhi = LastChain;
  5716. while (!isa<PHINode>(ReductionPhi))
  5717. ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
  5718. const RecurrenceDescriptor &RdxDesc =
  5719. Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second;
  5720. InstructionCost BaseCost = TTI.getArithmeticReductionCost(
  5721. RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
  5722. // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
  5723. // normal fmul instruction to the cost of the fadd reduction.
  5724. if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
  5725. BaseCost +=
  5726. TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
  5727. // If we're using ordered reductions then we can just return the base cost
  5728. // here, since getArithmeticReductionCost calculates the full ordered
  5729. // reduction cost when FP reassociation is not allowed.
  5730. if (useOrderedReductions(RdxDesc))
  5731. return BaseCost;
  5732. // Get the operand that was not the reduction chain and match it to one of the
  5733. // patterns, returning the better cost if it is found.
  5734. Instruction *RedOp = RetI->getOperand(1) == LastChain
  5735. ? dyn_cast<Instruction>(RetI->getOperand(0))
  5736. : dyn_cast<Instruction>(RetI->getOperand(1));
  5737. VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
  5738. Instruction *Op0, *Op1;
  5739. if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
  5740. match(RedOp,
  5741. m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
  5742. match(Op0, m_ZExtOrSExt(m_Value())) &&
  5743. Op0->getOpcode() == Op1->getOpcode() &&
  5744. Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
  5745. !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
  5746. (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
  5747. // Matched reduce.add(ext(mul(ext(A), ext(B)))
  5748. // Note that the extend opcodes need to all match, or if A==B they will have
  5749. // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
  5750. // which is equally fine.
  5751. bool IsUnsigned = isa<ZExtInst>(Op0);
  5752. auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
  5753. auto *MulType = VectorType::get(Op0->getType(), VectorTy);
  5754. InstructionCost ExtCost =
  5755. TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
  5756. TTI::CastContextHint::None, CostKind, Op0);
  5757. InstructionCost MulCost =
  5758. TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
  5759. InstructionCost Ext2Cost =
  5760. TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
  5761. TTI::CastContextHint::None, CostKind, RedOp);
  5762. InstructionCost RedCost = TTI.getMulAccReductionCost(
  5763. IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, CostKind);
  5764. if (RedCost.isValid() &&
  5765. RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
  5766. return I == RetI ? RedCost : 0;
  5767. } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
  5768. !TheLoop->isLoopInvariant(RedOp)) {
  5769. // Matched reduce(ext(A))
  5770. bool IsUnsigned = isa<ZExtInst>(RedOp);
  5771. auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
  5772. InstructionCost RedCost = TTI.getExtendedReductionCost(
  5773. RdxDesc.getOpcode(), IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
  5774. RdxDesc.getFastMathFlags(), CostKind);
  5775. InstructionCost ExtCost =
  5776. TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
  5777. TTI::CastContextHint::None, CostKind, RedOp);
  5778. if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
  5779. return I == RetI ? RedCost : 0;
  5780. } else if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
  5781. match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
  5782. if (match(Op0, m_ZExtOrSExt(m_Value())) &&
  5783. Op0->getOpcode() == Op1->getOpcode() &&
  5784. !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
  5785. bool IsUnsigned = isa<ZExtInst>(Op0);
  5786. Type *Op0Ty = Op0->getOperand(0)->getType();
  5787. Type *Op1Ty = Op1->getOperand(0)->getType();
  5788. Type *LargestOpTy =
  5789. Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
  5790. : Op0Ty;
  5791. auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
  5792. // Matched reduce.add(mul(ext(A), ext(B))), where the two ext may be of
  5793. // different sizes. We take the largest type as the ext to reduce, and add
  5794. // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
  5795. InstructionCost ExtCost0 = TTI.getCastInstrCost(
  5796. Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
  5797. TTI::CastContextHint::None, CostKind, Op0);
  5798. InstructionCost ExtCost1 = TTI.getCastInstrCost(
  5799. Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
  5800. TTI::CastContextHint::None, CostKind, Op1);
  5801. InstructionCost MulCost =
  5802. TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
  5803. InstructionCost RedCost = TTI.getMulAccReductionCost(
  5804. IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, CostKind);
  5805. InstructionCost ExtraExtCost = 0;
  5806. if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
  5807. Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
  5808. ExtraExtCost = TTI.getCastInstrCost(
  5809. ExtraExtOp->getOpcode(), ExtType,
  5810. VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
  5811. TTI::CastContextHint::None, CostKind, ExtraExtOp);
  5812. }
  5813. if (RedCost.isValid() &&
  5814. (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
  5815. return I == RetI ? RedCost : 0;
  5816. } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
  5817. // Matched reduce.add(mul())
  5818. InstructionCost MulCost =
  5819. TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
  5820. InstructionCost RedCost = TTI.getMulAccReductionCost(
  5821. true, RdxDesc.getRecurrenceType(), VectorTy, CostKind);
  5822. if (RedCost.isValid() && RedCost < MulCost + BaseCost)
  5823. return I == RetI ? RedCost : 0;
  5824. }
  5825. }
  5826. return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
  5827. }
  5828. InstructionCost
  5829. LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
  5830. ElementCount VF) {
  5831. // Calculate scalar cost only. Vectorization cost should be ready at this
  5832. // moment.
  5833. if (VF.isScalar()) {
  5834. Type *ValTy = getLoadStoreType(I);
  5835. const Align Alignment = getLoadStoreAlignment(I);
  5836. unsigned AS = getLoadStoreAddressSpace(I);
  5837. TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
  5838. return TTI.getAddressComputationCost(ValTy) +
  5839. TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
  5840. TTI::TCK_RecipThroughput, OpInfo, I);
  5841. }
  5842. return getWideningCost(I, VF);
  5843. }
  5844. LoopVectorizationCostModel::VectorizationCostTy
  5845. LoopVectorizationCostModel::getInstructionCost(Instruction *I,
  5846. ElementCount VF) {
  5847. // If we know that this instruction will remain uniform, check the cost of
  5848. // the scalar version.
  5849. if (isUniformAfterVectorization(I, VF))
  5850. VF = ElementCount::getFixed(1);
  5851. if (VF.isVector() && isProfitableToScalarize(I, VF))
  5852. return VectorizationCostTy(InstsToScalarize[VF][I], false);
  5853. // Forced scalars do not have any scalarization overhead.
  5854. auto ForcedScalar = ForcedScalars.find(VF);
  5855. if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
  5856. auto InstSet = ForcedScalar->second;
  5857. if (InstSet.count(I))
  5858. return VectorizationCostTy(
  5859. (getInstructionCost(I, ElementCount::getFixed(1)).first *
  5860. VF.getKnownMinValue()),
  5861. false);
  5862. }
  5863. Type *VectorTy;
  5864. InstructionCost C = getInstructionCost(I, VF, VectorTy);
  5865. bool TypeNotScalarized = false;
  5866. if (VF.isVector() && VectorTy->isVectorTy()) {
  5867. if (unsigned NumParts = TTI.getNumberOfParts(VectorTy)) {
  5868. if (VF.isScalable())
  5869. // <vscale x 1 x iN> is assumed to be profitable over iN because
  5870. // scalable registers are a distinct register class from scalar ones.
  5871. // If we ever find a target which wants to lower scalable vectors
  5872. // back to scalars, we'll need to update this code to explicitly
  5873. // ask TTI about the register class uses for each part.
  5874. TypeNotScalarized = NumParts <= VF.getKnownMinValue();
  5875. else
  5876. TypeNotScalarized = NumParts < VF.getKnownMinValue();
  5877. } else
  5878. C = InstructionCost::getInvalid();
  5879. }
  5880. return VectorizationCostTy(C, TypeNotScalarized);
  5881. }
  5882. InstructionCost LoopVectorizationCostModel::getScalarizationOverhead(
  5883. Instruction *I, ElementCount VF, TTI::TargetCostKind CostKind) const {
  5884. // There is no mechanism yet to create a scalable scalarization loop,
  5885. // so this is currently Invalid.
  5886. if (VF.isScalable())
  5887. return InstructionCost::getInvalid();
  5888. if (VF.isScalar())
  5889. return 0;
  5890. InstructionCost Cost = 0;
  5891. Type *RetTy = ToVectorTy(I->getType(), VF);
  5892. if (!RetTy->isVoidTy() &&
  5893. (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
  5894. Cost += TTI.getScalarizationOverhead(
  5895. cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()),
  5896. /*Insert*/ true,
  5897. /*Extract*/ false, CostKind);
  5898. // Some targets keep addresses scalar.
  5899. if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
  5900. return Cost;
  5901. // Some targets support efficient element stores.
  5902. if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
  5903. return Cost;
  5904. // Collect operands to consider.
  5905. CallInst *CI = dyn_cast<CallInst>(I);
  5906. Instruction::op_range Ops = CI ? CI->args() : I->operands();
  5907. // Skip operands that do not require extraction/scalarization and do not incur
  5908. // any overhead.
  5909. SmallVector<Type *> Tys;
  5910. for (auto *V : filterExtractingOperands(Ops, VF))
  5911. Tys.push_back(MaybeVectorizeType(V->getType(), VF));
  5912. return Cost + TTI.getOperandsScalarizationOverhead(
  5913. filterExtractingOperands(Ops, VF), Tys, CostKind);
  5914. }
  5915. void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
  5916. if (VF.isScalar())
  5917. return;
  5918. NumPredStores = 0;
  5919. for (BasicBlock *BB : TheLoop->blocks()) {
  5920. // For each instruction in the old loop.
  5921. for (Instruction &I : *BB) {
  5922. Value *Ptr = getLoadStorePointerOperand(&I);
  5923. if (!Ptr)
  5924. continue;
  5925. // TODO: We should generate better code and update the cost model for
  5926. // predicated uniform stores. Today they are treated as any other
  5927. // predicated store (see added test cases in
  5928. // invariant-store-vectorization.ll).
  5929. if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF))
  5930. NumPredStores++;
  5931. if (Legal->isUniformMemOp(I)) {
  5932. auto isLegalToScalarize = [&]() {
  5933. if (!VF.isScalable())
  5934. // Scalarization of fixed length vectors "just works".
  5935. return true;
  5936. // We have dedicated lowering for unpredicated uniform loads and
  5937. // stores. Note that even with tail folding we know that at least
  5938. // one lane is active (i.e. generalized predication is not possible
  5939. // here), and the logic below depends on this fact.
  5940. if (!foldTailByMasking())
  5941. return true;
  5942. // For scalable vectors, a uniform memop load is always
  5943. // uniform-by-parts and we know how to scalarize that.
  5944. if (isa<LoadInst>(I))
  5945. return true;
  5946. // A uniform store isn't neccessarily uniform-by-part
  5947. // and we can't assume scalarization.
  5948. auto &SI = cast<StoreInst>(I);
  5949. return TheLoop->isLoopInvariant(SI.getValueOperand());
  5950. };
  5951. const InstructionCost GatherScatterCost =
  5952. isLegalGatherOrScatter(&I, VF) ?
  5953. getGatherScatterCost(&I, VF) : InstructionCost::getInvalid();
  5954. // Load: Scalar load + broadcast
  5955. // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
  5956. // FIXME: This cost is a significant under-estimate for tail folded
  5957. // memory ops.
  5958. const InstructionCost ScalarizationCost = isLegalToScalarize() ?
  5959. getUniformMemOpCost(&I, VF) : InstructionCost::getInvalid();
  5960. // Choose better solution for the current VF, Note that Invalid
  5961. // costs compare as maximumal large. If both are invalid, we get
  5962. // scalable invalid which signals a failure and a vectorization abort.
  5963. if (GatherScatterCost < ScalarizationCost)
  5964. setWideningDecision(&I, VF, CM_GatherScatter, GatherScatterCost);
  5965. else
  5966. setWideningDecision(&I, VF, CM_Scalarize, ScalarizationCost);
  5967. continue;
  5968. }
  5969. // We assume that widening is the best solution when possible.
  5970. if (memoryInstructionCanBeWidened(&I, VF)) {
  5971. InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
  5972. int ConsecutiveStride = Legal->isConsecutivePtr(
  5973. getLoadStoreType(&I), getLoadStorePointerOperand(&I));
  5974. assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
  5975. "Expected consecutive stride.");
  5976. InstWidening Decision =
  5977. ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
  5978. setWideningDecision(&I, VF, Decision, Cost);
  5979. continue;
  5980. }
  5981. // Choose between Interleaving, Gather/Scatter or Scalarization.
  5982. InstructionCost InterleaveCost = InstructionCost::getInvalid();
  5983. unsigned NumAccesses = 1;
  5984. if (isAccessInterleaved(&I)) {
  5985. auto Group = getInterleavedAccessGroup(&I);
  5986. assert(Group && "Fail to get an interleaved access group.");
  5987. // Make one decision for the whole group.
  5988. if (getWideningDecision(&I, VF) != CM_Unknown)
  5989. continue;
  5990. NumAccesses = Group->getNumMembers();
  5991. if (interleavedAccessCanBeWidened(&I, VF))
  5992. InterleaveCost = getInterleaveGroupCost(&I, VF);
  5993. }
  5994. InstructionCost GatherScatterCost =
  5995. isLegalGatherOrScatter(&I, VF)
  5996. ? getGatherScatterCost(&I, VF) * NumAccesses
  5997. : InstructionCost::getInvalid();
  5998. InstructionCost ScalarizationCost =
  5999. getMemInstScalarizationCost(&I, VF) * NumAccesses;
  6000. // Choose better solution for the current VF,
  6001. // write down this decision and use it during vectorization.
  6002. InstructionCost Cost;
  6003. InstWidening Decision;
  6004. if (InterleaveCost <= GatherScatterCost &&
  6005. InterleaveCost < ScalarizationCost) {
  6006. Decision = CM_Interleave;
  6007. Cost = InterleaveCost;
  6008. } else if (GatherScatterCost < ScalarizationCost) {
  6009. Decision = CM_GatherScatter;
  6010. Cost = GatherScatterCost;
  6011. } else {
  6012. Decision = CM_Scalarize;
  6013. Cost = ScalarizationCost;
  6014. }
  6015. // If the instructions belongs to an interleave group, the whole group
  6016. // receives the same decision. The whole group receives the cost, but
  6017. // the cost will actually be assigned to one instruction.
  6018. if (auto Group = getInterleavedAccessGroup(&I))
  6019. setWideningDecision(Group, VF, Decision, Cost);
  6020. else
  6021. setWideningDecision(&I, VF, Decision, Cost);
  6022. }
  6023. }
  6024. // Make sure that any load of address and any other address computation
  6025. // remains scalar unless there is gather/scatter support. This avoids
  6026. // inevitable extracts into address registers, and also has the benefit of
  6027. // activating LSR more, since that pass can't optimize vectorized
  6028. // addresses.
  6029. if (TTI.prefersVectorizedAddressing())
  6030. return;
  6031. // Start with all scalar pointer uses.
  6032. SmallPtrSet<Instruction *, 8> AddrDefs;
  6033. for (BasicBlock *BB : TheLoop->blocks())
  6034. for (Instruction &I : *BB) {
  6035. Instruction *PtrDef =
  6036. dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
  6037. if (PtrDef && TheLoop->contains(PtrDef) &&
  6038. getWideningDecision(&I, VF) != CM_GatherScatter)
  6039. AddrDefs.insert(PtrDef);
  6040. }
  6041. // Add all instructions used to generate the addresses.
  6042. SmallVector<Instruction *, 4> Worklist;
  6043. append_range(Worklist, AddrDefs);
  6044. while (!Worklist.empty()) {
  6045. Instruction *I = Worklist.pop_back_val();
  6046. for (auto &Op : I->operands())
  6047. if (auto *InstOp = dyn_cast<Instruction>(Op))
  6048. if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
  6049. AddrDefs.insert(InstOp).second)
  6050. Worklist.push_back(InstOp);
  6051. }
  6052. for (auto *I : AddrDefs) {
  6053. if (isa<LoadInst>(I)) {
  6054. // Setting the desired widening decision should ideally be handled in
  6055. // by cost functions, but since this involves the task of finding out
  6056. // if the loaded register is involved in an address computation, it is
  6057. // instead changed here when we know this is the case.
  6058. InstWidening Decision = getWideningDecision(I, VF);
  6059. if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
  6060. // Scalarize a widened load of address.
  6061. setWideningDecision(
  6062. I, VF, CM_Scalarize,
  6063. (VF.getKnownMinValue() *
  6064. getMemoryInstructionCost(I, ElementCount::getFixed(1))));
  6065. else if (auto Group = getInterleavedAccessGroup(I)) {
  6066. // Scalarize an interleave group of address loads.
  6067. for (unsigned I = 0; I < Group->getFactor(); ++I) {
  6068. if (Instruction *Member = Group->getMember(I))
  6069. setWideningDecision(
  6070. Member, VF, CM_Scalarize,
  6071. (VF.getKnownMinValue() *
  6072. getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
  6073. }
  6074. }
  6075. } else
  6076. // Make sure I gets scalarized and a cost estimate without
  6077. // scalarization overhead.
  6078. ForcedScalars[VF].insert(I);
  6079. }
  6080. }
  6081. InstructionCost
  6082. LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
  6083. Type *&VectorTy) {
  6084. Type *RetTy = I->getType();
  6085. if (canTruncateToMinimalBitwidth(I, VF))
  6086. RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
  6087. auto SE = PSE.getSE();
  6088. TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  6089. auto hasSingleCopyAfterVectorization = [this](Instruction *I,
  6090. ElementCount VF) -> bool {
  6091. if (VF.isScalar())
  6092. return true;
  6093. auto Scalarized = InstsToScalarize.find(VF);
  6094. assert(Scalarized != InstsToScalarize.end() &&
  6095. "VF not yet analyzed for scalarization profitability");
  6096. return !Scalarized->second.count(I) &&
  6097. llvm::all_of(I->users(), [&](User *U) {
  6098. auto *UI = cast<Instruction>(U);
  6099. return !Scalarized->second.count(UI);
  6100. });
  6101. };
  6102. (void) hasSingleCopyAfterVectorization;
  6103. if (isScalarAfterVectorization(I, VF)) {
  6104. // With the exception of GEPs and PHIs, after scalarization there should
  6105. // only be one copy of the instruction generated in the loop. This is
  6106. // because the VF is either 1, or any instructions that need scalarizing
  6107. // have already been dealt with by the the time we get here. As a result,
  6108. // it means we don't have to multiply the instruction cost by VF.
  6109. assert(I->getOpcode() == Instruction::GetElementPtr ||
  6110. I->getOpcode() == Instruction::PHI ||
  6111. (I->getOpcode() == Instruction::BitCast &&
  6112. I->getType()->isPointerTy()) ||
  6113. hasSingleCopyAfterVectorization(I, VF));
  6114. VectorTy = RetTy;
  6115. } else
  6116. VectorTy = ToVectorTy(RetTy, VF);
  6117. // TODO: We need to estimate the cost of intrinsic calls.
  6118. switch (I->getOpcode()) {
  6119. case Instruction::GetElementPtr:
  6120. // We mark this instruction as zero-cost because the cost of GEPs in
  6121. // vectorized code depends on whether the corresponding memory instruction
  6122. // is scalarized or not. Therefore, we handle GEPs with the memory
  6123. // instruction cost.
  6124. return 0;
  6125. case Instruction::Br: {
  6126. // In cases of scalarized and predicated instructions, there will be VF
  6127. // predicated blocks in the vectorized loop. Each branch around these
  6128. // blocks requires also an extract of its vector compare i1 element.
  6129. bool ScalarPredicatedBB = false;
  6130. BranchInst *BI = cast<BranchInst>(I);
  6131. if (VF.isVector() && BI->isConditional() &&
  6132. (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) ||
  6133. PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1))))
  6134. ScalarPredicatedBB = true;
  6135. if (ScalarPredicatedBB) {
  6136. // Not possible to scalarize scalable vector with predicated instructions.
  6137. if (VF.isScalable())
  6138. return InstructionCost::getInvalid();
  6139. // Return cost for branches around scalarized and predicated blocks.
  6140. auto *Vec_i1Ty =
  6141. VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
  6142. return (
  6143. TTI.getScalarizationOverhead(
  6144. Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()),
  6145. /*Insert*/ false, /*Extract*/ true, CostKind) +
  6146. (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
  6147. } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
  6148. // The back-edge branch will remain, as will all scalar branches.
  6149. return TTI.getCFInstrCost(Instruction::Br, CostKind);
  6150. else
  6151. // This branch will be eliminated by if-conversion.
  6152. return 0;
  6153. // Note: We currently assume zero cost for an unconditional branch inside
  6154. // a predicated block since it will become a fall-through, although we
  6155. // may decide in the future to call TTI for all branches.
  6156. }
  6157. case Instruction::PHI: {
  6158. auto *Phi = cast<PHINode>(I);
  6159. // First-order recurrences are replaced by vector shuffles inside the loop.
  6160. if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
  6161. SmallVector<int> Mask(VF.getKnownMinValue());
  6162. std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
  6163. return TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
  6164. cast<VectorType>(VectorTy), Mask, CostKind,
  6165. VF.getKnownMinValue() - 1);
  6166. }
  6167. // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
  6168. // converted into select instructions. We require N - 1 selects per phi
  6169. // node, where N is the number of incoming values.
  6170. if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
  6171. return (Phi->getNumIncomingValues() - 1) *
  6172. TTI.getCmpSelInstrCost(
  6173. Instruction::Select, ToVectorTy(Phi->getType(), VF),
  6174. ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
  6175. CmpInst::BAD_ICMP_PREDICATE, CostKind);
  6176. return TTI.getCFInstrCost(Instruction::PHI, CostKind);
  6177. }
  6178. case Instruction::UDiv:
  6179. case Instruction::SDiv:
  6180. case Instruction::URem:
  6181. case Instruction::SRem:
  6182. if (VF.isVector() && isPredicatedInst(I)) {
  6183. const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
  6184. return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost) ?
  6185. ScalarCost : SafeDivisorCost;
  6186. }
  6187. // We've proven all lanes safe to speculate, fall through.
  6188. [[fallthrough]];
  6189. case Instruction::Add:
  6190. case Instruction::FAdd:
  6191. case Instruction::Sub:
  6192. case Instruction::FSub:
  6193. case Instruction::Mul:
  6194. case Instruction::FMul:
  6195. case Instruction::FDiv:
  6196. case Instruction::FRem:
  6197. case Instruction::Shl:
  6198. case Instruction::LShr:
  6199. case Instruction::AShr:
  6200. case Instruction::And:
  6201. case Instruction::Or:
  6202. case Instruction::Xor: {
  6203. // Since we will replace the stride by 1 the multiplication should go away.
  6204. if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
  6205. return 0;
  6206. // Detect reduction patterns
  6207. if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
  6208. return *RedCost;
  6209. // Certain instructions can be cheaper to vectorize if they have a constant
  6210. // second vector operand. One example of this are shifts on x86.
  6211. Value *Op2 = I->getOperand(1);
  6212. auto Op2Info = TTI.getOperandInfo(Op2);
  6213. if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
  6214. Op2Info.Kind = TargetTransformInfo::OK_UniformValue;
  6215. SmallVector<const Value *, 4> Operands(I->operand_values());
  6216. return TTI.getArithmeticInstrCost(
  6217. I->getOpcode(), VectorTy, CostKind,
  6218. {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
  6219. Op2Info, Operands, I);
  6220. }
  6221. case Instruction::FNeg: {
  6222. return TTI.getArithmeticInstrCost(
  6223. I->getOpcode(), VectorTy, CostKind,
  6224. {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
  6225. {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
  6226. I->getOperand(0), I);
  6227. }
  6228. case Instruction::Select: {
  6229. SelectInst *SI = cast<SelectInst>(I);
  6230. const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
  6231. bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
  6232. const Value *Op0, *Op1;
  6233. using namespace llvm::PatternMatch;
  6234. if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
  6235. match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
  6236. // select x, y, false --> x & y
  6237. // select x, true, y --> x | y
  6238. const auto [Op1VK, Op1VP] = TTI::getOperandInfo(Op0);
  6239. const auto [Op2VK, Op2VP] = TTI::getOperandInfo(Op1);
  6240. assert(Op0->getType()->getScalarSizeInBits() == 1 &&
  6241. Op1->getType()->getScalarSizeInBits() == 1);
  6242. SmallVector<const Value *, 2> Operands{Op0, Op1};
  6243. return TTI.getArithmeticInstrCost(
  6244. match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
  6245. CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, Operands, I);
  6246. }
  6247. Type *CondTy = SI->getCondition()->getType();
  6248. if (!ScalarCond)
  6249. CondTy = VectorType::get(CondTy, VF);
  6250. CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
  6251. if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
  6252. Pred = Cmp->getPredicate();
  6253. return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
  6254. CostKind, I);
  6255. }
  6256. case Instruction::ICmp:
  6257. case Instruction::FCmp: {
  6258. Type *ValTy = I->getOperand(0)->getType();
  6259. Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
  6260. if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
  6261. ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
  6262. VectorTy = ToVectorTy(ValTy, VF);
  6263. return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
  6264. cast<CmpInst>(I)->getPredicate(), CostKind,
  6265. I);
  6266. }
  6267. case Instruction::Store:
  6268. case Instruction::Load: {
  6269. ElementCount Width = VF;
  6270. if (Width.isVector()) {
  6271. InstWidening Decision = getWideningDecision(I, Width);
  6272. assert(Decision != CM_Unknown &&
  6273. "CM decision should be taken at this point");
  6274. if (getWideningCost(I, VF) == InstructionCost::getInvalid())
  6275. return InstructionCost::getInvalid();
  6276. if (Decision == CM_Scalarize)
  6277. Width = ElementCount::getFixed(1);
  6278. }
  6279. VectorTy = ToVectorTy(getLoadStoreType(I), Width);
  6280. return getMemoryInstructionCost(I, VF);
  6281. }
  6282. case Instruction::BitCast:
  6283. if (I->getType()->isPointerTy())
  6284. return 0;
  6285. [[fallthrough]];
  6286. case Instruction::ZExt:
  6287. case Instruction::SExt:
  6288. case Instruction::FPToUI:
  6289. case Instruction::FPToSI:
  6290. case Instruction::FPExt:
  6291. case Instruction::PtrToInt:
  6292. case Instruction::IntToPtr:
  6293. case Instruction::SIToFP:
  6294. case Instruction::UIToFP:
  6295. case Instruction::Trunc:
  6296. case Instruction::FPTrunc: {
  6297. // Computes the CastContextHint from a Load/Store instruction.
  6298. auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
  6299. assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
  6300. "Expected a load or a store!");
  6301. if (VF.isScalar() || !TheLoop->contains(I))
  6302. return TTI::CastContextHint::Normal;
  6303. switch (getWideningDecision(I, VF)) {
  6304. case LoopVectorizationCostModel::CM_GatherScatter:
  6305. return TTI::CastContextHint::GatherScatter;
  6306. case LoopVectorizationCostModel::CM_Interleave:
  6307. return TTI::CastContextHint::Interleave;
  6308. case LoopVectorizationCostModel::CM_Scalarize:
  6309. case LoopVectorizationCostModel::CM_Widen:
  6310. return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
  6311. : TTI::CastContextHint::Normal;
  6312. case LoopVectorizationCostModel::CM_Widen_Reverse:
  6313. return TTI::CastContextHint::Reversed;
  6314. case LoopVectorizationCostModel::CM_Unknown:
  6315. llvm_unreachable("Instr did not go through cost modelling?");
  6316. }
  6317. llvm_unreachable("Unhandled case!");
  6318. };
  6319. unsigned Opcode = I->getOpcode();
  6320. TTI::CastContextHint CCH = TTI::CastContextHint::None;
  6321. // For Trunc, the context is the only user, which must be a StoreInst.
  6322. if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
  6323. if (I->hasOneUse())
  6324. if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
  6325. CCH = ComputeCCH(Store);
  6326. }
  6327. // For Z/Sext, the context is the operand, which must be a LoadInst.
  6328. else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
  6329. Opcode == Instruction::FPExt) {
  6330. if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
  6331. CCH = ComputeCCH(Load);
  6332. }
  6333. // We optimize the truncation of induction variables having constant
  6334. // integer steps. The cost of these truncations is the same as the scalar
  6335. // operation.
  6336. if (isOptimizableIVTruncate(I, VF)) {
  6337. auto *Trunc = cast<TruncInst>(I);
  6338. return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
  6339. Trunc->getSrcTy(), CCH, CostKind, Trunc);
  6340. }
  6341. // Detect reduction patterns
  6342. if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
  6343. return *RedCost;
  6344. Type *SrcScalarTy = I->getOperand(0)->getType();
  6345. Type *SrcVecTy =
  6346. VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
  6347. if (canTruncateToMinimalBitwidth(I, VF)) {
  6348. // This cast is going to be shrunk. This may remove the cast or it might
  6349. // turn it into slightly different cast. For example, if MinBW == 16,
  6350. // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
  6351. //
  6352. // Calculate the modified src and dest types.
  6353. Type *MinVecTy = VectorTy;
  6354. if (Opcode == Instruction::Trunc) {
  6355. SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
  6356. VectorTy =
  6357. largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
  6358. } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
  6359. SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
  6360. VectorTy =
  6361. smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
  6362. }
  6363. }
  6364. return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
  6365. }
  6366. case Instruction::Call: {
  6367. if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
  6368. if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
  6369. return *RedCost;
  6370. bool NeedToScalarize;
  6371. CallInst *CI = cast<CallInst>(I);
  6372. InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
  6373. if (getVectorIntrinsicIDForCall(CI, TLI)) {
  6374. InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
  6375. return std::min(CallCost, IntrinsicCost);
  6376. }
  6377. return CallCost;
  6378. }
  6379. case Instruction::ExtractValue:
  6380. return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
  6381. case Instruction::Alloca:
  6382. // We cannot easily widen alloca to a scalable alloca, as
  6383. // the result would need to be a vector of pointers.
  6384. if (VF.isScalable())
  6385. return InstructionCost::getInvalid();
  6386. [[fallthrough]];
  6387. default:
  6388. // This opcode is unknown. Assume that it is the same as 'mul'.
  6389. return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
  6390. } // end of switch.
  6391. }
  6392. char LoopVectorize::ID = 0;
  6393. static const char lv_name[] = "Loop Vectorization";
  6394. INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
  6395. INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
  6396. INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
  6397. INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
  6398. INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
  6399. INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
  6400. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  6401. INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
  6402. INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
  6403. INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
  6404. INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
  6405. INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
  6406. INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
  6407. INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
  6408. INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
  6409. namespace llvm {
  6410. Pass *createLoopVectorizePass() { return new LoopVectorize(); }
  6411. Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
  6412. bool VectorizeOnlyWhenForced) {
  6413. return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
  6414. }
  6415. } // end namespace llvm
  6416. void LoopVectorizationCostModel::collectValuesToIgnore() {
  6417. // Ignore ephemeral values.
  6418. CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
  6419. // Find all stores to invariant variables. Since they are going to sink
  6420. // outside the loop we do not need calculate cost for them.
  6421. for (BasicBlock *BB : TheLoop->blocks())
  6422. for (Instruction &I : *BB) {
  6423. StoreInst *SI;
  6424. if ((SI = dyn_cast<StoreInst>(&I)) &&
  6425. Legal->isInvariantAddressOfReduction(SI->getPointerOperand()))
  6426. ValuesToIgnore.insert(&I);
  6427. }
  6428. // Ignore type-promoting instructions we identified during reduction
  6429. // detection.
  6430. for (const auto &Reduction : Legal->getReductionVars()) {
  6431. const RecurrenceDescriptor &RedDes = Reduction.second;
  6432. const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
  6433. VecValuesToIgnore.insert(Casts.begin(), Casts.end());
  6434. }
  6435. // Ignore type-casting instructions we identified during induction
  6436. // detection.
  6437. for (const auto &Induction : Legal->getInductionVars()) {
  6438. const InductionDescriptor &IndDes = Induction.second;
  6439. const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
  6440. VecValuesToIgnore.insert(Casts.begin(), Casts.end());
  6441. }
  6442. }
  6443. void LoopVectorizationCostModel::collectInLoopReductions() {
  6444. for (const auto &Reduction : Legal->getReductionVars()) {
  6445. PHINode *Phi = Reduction.first;
  6446. const RecurrenceDescriptor &RdxDesc = Reduction.second;
  6447. // We don't collect reductions that are type promoted (yet).
  6448. if (RdxDesc.getRecurrenceType() != Phi->getType())
  6449. continue;
  6450. // If the target would prefer this reduction to happen "in-loop", then we
  6451. // want to record it as such.
  6452. unsigned Opcode = RdxDesc.getOpcode();
  6453. if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
  6454. !TTI.preferInLoopReduction(Opcode, Phi->getType(),
  6455. TargetTransformInfo::ReductionFlags()))
  6456. continue;
  6457. // Check that we can correctly put the reductions into the loop, by
  6458. // finding the chain of operations that leads from the phi to the loop
  6459. // exit value.
  6460. SmallVector<Instruction *, 4> ReductionOperations =
  6461. RdxDesc.getReductionOpChain(Phi, TheLoop);
  6462. bool InLoop = !ReductionOperations.empty();
  6463. if (InLoop) {
  6464. InLoopReductionChains[Phi] = ReductionOperations;
  6465. // Add the elements to InLoopReductionImmediateChains for cost modelling.
  6466. Instruction *LastChain = Phi;
  6467. for (auto *I : ReductionOperations) {
  6468. InLoopReductionImmediateChains[I] = LastChain;
  6469. LastChain = I;
  6470. }
  6471. }
  6472. LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
  6473. << " reduction for phi: " << *Phi << "\n");
  6474. }
  6475. }
  6476. // TODO: we could return a pair of values that specify the max VF and
  6477. // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
  6478. // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
  6479. // doesn't have a cost model that can choose which plan to execute if
  6480. // more than one is generated.
  6481. static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
  6482. LoopVectorizationCostModel &CM) {
  6483. unsigned WidestType;
  6484. std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
  6485. return WidestVectorRegBits / WidestType;
  6486. }
  6487. VectorizationFactor
  6488. LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
  6489. assert(!UserVF.isScalable() && "scalable vectors not yet supported");
  6490. ElementCount VF = UserVF;
  6491. // Outer loop handling: They may require CFG and instruction level
  6492. // transformations before even evaluating whether vectorization is profitable.
  6493. // Since we cannot modify the incoming IR, we need to build VPlan upfront in
  6494. // the vectorization pipeline.
  6495. if (!OrigLoop->isInnermost()) {
  6496. // If the user doesn't provide a vectorization factor, determine a
  6497. // reasonable one.
  6498. if (UserVF.isZero()) {
  6499. VF = ElementCount::getFixed(determineVPlanVF(
  6500. TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
  6501. .getFixedValue(),
  6502. CM));
  6503. LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
  6504. // Make sure we have a VF > 1 for stress testing.
  6505. if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
  6506. LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
  6507. << "overriding computed VF.\n");
  6508. VF = ElementCount::getFixed(4);
  6509. }
  6510. }
  6511. assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
  6512. assert(isPowerOf2_32(VF.getKnownMinValue()) &&
  6513. "VF needs to be a power of two");
  6514. LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
  6515. << "VF " << VF << " to build VPlans.\n");
  6516. buildVPlans(VF, VF);
  6517. // For VPlan build stress testing, we bail out after VPlan construction.
  6518. if (VPlanBuildStressTest)
  6519. return VectorizationFactor::Disabled();
  6520. return {VF, 0 /*Cost*/, 0 /* ScalarCost */};
  6521. }
  6522. LLVM_DEBUG(
  6523. dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
  6524. "VPlan-native path.\n");
  6525. return VectorizationFactor::Disabled();
  6526. }
  6527. std::optional<VectorizationFactor>
  6528. LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
  6529. assert(OrigLoop->isInnermost() && "Inner loop expected.");
  6530. FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
  6531. if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
  6532. return std::nullopt;
  6533. // Invalidate interleave groups if all blocks of loop will be predicated.
  6534. if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
  6535. !useMaskedInterleavedAccesses(*TTI)) {
  6536. LLVM_DEBUG(
  6537. dbgs()
  6538. << "LV: Invalidate all interleaved groups due to fold-tail by masking "
  6539. "which requires masked-interleaved support.\n");
  6540. if (CM.InterleaveInfo.invalidateGroups())
  6541. // Invalidating interleave groups also requires invalidating all decisions
  6542. // based on them, which includes widening decisions and uniform and scalar
  6543. // values.
  6544. CM.invalidateCostModelingDecisions();
  6545. }
  6546. ElementCount MaxUserVF =
  6547. UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
  6548. bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
  6549. if (!UserVF.isZero() && UserVFIsLegal) {
  6550. assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
  6551. "VF needs to be a power of two");
  6552. // Collect the instructions (and their associated costs) that will be more
  6553. // profitable to scalarize.
  6554. if (CM.selectUserVectorizationFactor(UserVF)) {
  6555. LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
  6556. CM.collectInLoopReductions();
  6557. buildVPlansWithVPRecipes(UserVF, UserVF);
  6558. LLVM_DEBUG(printPlans(dbgs()));
  6559. return {{UserVF, 0, 0}};
  6560. } else
  6561. reportVectorizationInfo("UserVF ignored because of invalid costs.",
  6562. "InvalidCost", ORE, OrigLoop);
  6563. }
  6564. // Populate the set of Vectorization Factor Candidates.
  6565. ElementCountSet VFCandidates;
  6566. for (auto VF = ElementCount::getFixed(1);
  6567. ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
  6568. VFCandidates.insert(VF);
  6569. for (auto VF = ElementCount::getScalable(1);
  6570. ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
  6571. VFCandidates.insert(VF);
  6572. for (const auto &VF : VFCandidates) {
  6573. // Collect Uniform and Scalar instructions after vectorization with VF.
  6574. CM.collectUniformsAndScalars(VF);
  6575. // Collect the instructions (and their associated costs) that will be more
  6576. // profitable to scalarize.
  6577. if (VF.isVector())
  6578. CM.collectInstsToScalarize(VF);
  6579. }
  6580. CM.collectInLoopReductions();
  6581. buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
  6582. buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
  6583. LLVM_DEBUG(printPlans(dbgs()));
  6584. if (!MaxFactors.hasVector())
  6585. return VectorizationFactor::Disabled();
  6586. // Select the optimal vectorization factor.
  6587. VectorizationFactor VF = CM.selectVectorizationFactor(VFCandidates);
  6588. assert((VF.Width.isScalar() || VF.ScalarCost > 0) && "when vectorizing, the scalar cost must be non-zero.");
  6589. return VF;
  6590. }
  6591. VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
  6592. assert(count_if(VPlans,
  6593. [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
  6594. 1 &&
  6595. "Best VF has not a single VPlan.");
  6596. for (const VPlanPtr &Plan : VPlans) {
  6597. if (Plan->hasVF(VF))
  6598. return *Plan.get();
  6599. }
  6600. llvm_unreachable("No plan found!");
  6601. }
  6602. static void AddRuntimeUnrollDisableMetaData(Loop *L) {
  6603. SmallVector<Metadata *, 4> MDs;
  6604. // Reserve first location for self reference to the LoopID metadata node.
  6605. MDs.push_back(nullptr);
  6606. bool IsUnrollMetadata = false;
  6607. MDNode *LoopID = L->getLoopID();
  6608. if (LoopID) {
  6609. // First find existing loop unrolling disable metadata.
  6610. for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
  6611. auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
  6612. if (MD) {
  6613. const auto *S = dyn_cast<MDString>(MD->getOperand(0));
  6614. IsUnrollMetadata =
  6615. S && S->getString().startswith("llvm.loop.unroll.disable");
  6616. }
  6617. MDs.push_back(LoopID->getOperand(i));
  6618. }
  6619. }
  6620. if (!IsUnrollMetadata) {
  6621. // Add runtime unroll disable metadata.
  6622. LLVMContext &Context = L->getHeader()->getContext();
  6623. SmallVector<Metadata *, 1> DisableOperands;
  6624. DisableOperands.push_back(
  6625. MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
  6626. MDNode *DisableNode = MDNode::get(Context, DisableOperands);
  6627. MDs.push_back(DisableNode);
  6628. MDNode *NewLoopID = MDNode::get(Context, MDs);
  6629. // Set operand 0 to refer to the loop id itself.
  6630. NewLoopID->replaceOperandWith(0, NewLoopID);
  6631. L->setLoopID(NewLoopID);
  6632. }
  6633. }
  6634. void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
  6635. VPlan &BestVPlan,
  6636. InnerLoopVectorizer &ILV,
  6637. DominatorTree *DT,
  6638. bool IsEpilogueVectorization) {
  6639. assert(BestVPlan.hasVF(BestVF) &&
  6640. "Trying to execute plan with unsupported VF");
  6641. assert(BestVPlan.hasUF(BestUF) &&
  6642. "Trying to execute plan with unsupported UF");
  6643. LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
  6644. << '\n');
  6645. // Workaround! Compute the trip count of the original loop and cache it
  6646. // before we start modifying the CFG. This code has a systemic problem
  6647. // wherein it tries to run analysis over partially constructed IR; this is
  6648. // wrong, and not simply for SCEV. The trip count of the original loop
  6649. // simply happens to be prone to hitting this in practice. In theory, we
  6650. // can hit the same issue for any SCEV, or ValueTracking query done during
  6651. // mutation. See PR49900.
  6652. ILV.getOrCreateTripCount(OrigLoop->getLoopPreheader());
  6653. if (!IsEpilogueVectorization)
  6654. VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE);
  6655. // Perform the actual loop transformation.
  6656. // 1. Set up the skeleton for vectorization, including vector pre-header and
  6657. // middle block. The vector loop is created during VPlan execution.
  6658. VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
  6659. Value *CanonicalIVStartValue;
  6660. std::tie(State.CFG.PrevBB, CanonicalIVStartValue) =
  6661. ILV.createVectorizedLoopSkeleton();
  6662. // Only use noalias metadata when using memory checks guaranteeing no overlap
  6663. // across all iterations.
  6664. const LoopAccessInfo *LAI = ILV.Legal->getLAI();
  6665. if (LAI && !LAI->getRuntimePointerChecking()->getChecks().empty() &&
  6666. !LAI->getRuntimePointerChecking()->getDiffChecks()) {
  6667. // We currently don't use LoopVersioning for the actual loop cloning but we
  6668. // still use it to add the noalias metadata.
  6669. // TODO: Find a better way to re-use LoopVersioning functionality to add
  6670. // metadata.
  6671. State.LVer = std::make_unique<LoopVersioning>(
  6672. *LAI, LAI->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, DT,
  6673. PSE.getSE());
  6674. State.LVer->prepareNoAliasMetadata();
  6675. }
  6676. ILV.collectPoisonGeneratingRecipes(State);
  6677. ILV.printDebugTracesAtStart();
  6678. //===------------------------------------------------===//
  6679. //
  6680. // Notice: any optimization or new instruction that go
  6681. // into the code below should also be implemented in
  6682. // the cost-model.
  6683. //
  6684. //===------------------------------------------------===//
  6685. // 2. Copy and widen instructions from the old loop into the new loop.
  6686. BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
  6687. ILV.getOrCreateVectorTripCount(nullptr),
  6688. CanonicalIVStartValue, State,
  6689. IsEpilogueVectorization);
  6690. BestVPlan.execute(&State);
  6691. // Keep all loop hints from the original loop on the vector loop (we'll
  6692. // replace the vectorizer-specific hints below).
  6693. MDNode *OrigLoopID = OrigLoop->getLoopID();
  6694. std::optional<MDNode *> VectorizedLoopID =
  6695. makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
  6696. LLVMLoopVectorizeFollowupVectorized});
  6697. VPBasicBlock *HeaderVPBB =
  6698. BestVPlan.getVectorLoopRegion()->getEntryBasicBlock();
  6699. Loop *L = LI->getLoopFor(State.CFG.VPBB2IRBB[HeaderVPBB]);
  6700. if (VectorizedLoopID)
  6701. L->setLoopID(*VectorizedLoopID);
  6702. else {
  6703. // Keep all loop hints from the original loop on the vector loop (we'll
  6704. // replace the vectorizer-specific hints below).
  6705. if (MDNode *LID = OrigLoop->getLoopID())
  6706. L->setLoopID(LID);
  6707. LoopVectorizeHints Hints(L, true, *ORE);
  6708. Hints.setAlreadyVectorized();
  6709. }
  6710. AddRuntimeUnrollDisableMetaData(L);
  6711. // 3. Fix the vectorized code: take care of header phi's, live-outs,
  6712. // predication, updating analyses.
  6713. ILV.fixVectorizedLoop(State, BestVPlan);
  6714. ILV.printDebugTracesAtEnd();
  6715. }
  6716. #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  6717. void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
  6718. for (const auto &Plan : VPlans)
  6719. if (PrintVPlansInDotFormat)
  6720. Plan->printDOT(O);
  6721. else
  6722. Plan->print(O);
  6723. }
  6724. #endif
  6725. Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
  6726. //===--------------------------------------------------------------------===//
  6727. // EpilogueVectorizerMainLoop
  6728. //===--------------------------------------------------------------------===//
  6729. /// This function is partially responsible for generating the control flow
  6730. /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
  6731. std::pair<BasicBlock *, Value *>
  6732. EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
  6733. createVectorLoopSkeleton("");
  6734. // Generate the code to check the minimum iteration count of the vector
  6735. // epilogue (see below).
  6736. EPI.EpilogueIterationCountCheck =
  6737. emitIterationCountCheck(LoopScalarPreHeader, true);
  6738. EPI.EpilogueIterationCountCheck->setName("iter.check");
  6739. // Generate the code to check any assumptions that we've made for SCEV
  6740. // expressions.
  6741. EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader);
  6742. // Generate the code that checks at runtime if arrays overlap. We put the
  6743. // checks into a separate block to make the more common case of few elements
  6744. // faster.
  6745. EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader);
  6746. // Generate the iteration count check for the main loop, *after* the check
  6747. // for the epilogue loop, so that the path-length is shorter for the case
  6748. // that goes directly through the vector epilogue. The longer-path length for
  6749. // the main loop is compensated for, by the gain from vectorizing the larger
  6750. // trip count. Note: the branch will get updated later on when we vectorize
  6751. // the epilogue.
  6752. EPI.MainLoopIterationCountCheck =
  6753. emitIterationCountCheck(LoopScalarPreHeader, false);
  6754. // Generate the induction variable.
  6755. EPI.VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader);
  6756. // Skip induction resume value creation here because they will be created in
  6757. // the second pass for the scalar loop. The induction resume values for the
  6758. // inductions in the epilogue loop are created before executing the plan for
  6759. // the epilogue loop.
  6760. return {completeLoopSkeleton(), nullptr};
  6761. }
  6762. void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
  6763. LLVM_DEBUG({
  6764. dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
  6765. << "Main Loop VF:" << EPI.MainLoopVF
  6766. << ", Main Loop UF:" << EPI.MainLoopUF
  6767. << ", Epilogue Loop VF:" << EPI.EpilogueVF
  6768. << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
  6769. });
  6770. }
  6771. void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
  6772. DEBUG_WITH_TYPE(VerboseDebug, {
  6773. dbgs() << "intermediate fn:\n"
  6774. << *OrigLoop->getHeader()->getParent() << "\n";
  6775. });
  6776. }
  6777. BasicBlock *
  6778. EpilogueVectorizerMainLoop::emitIterationCountCheck(BasicBlock *Bypass,
  6779. bool ForEpilogue) {
  6780. assert(Bypass && "Expected valid bypass basic block.");
  6781. ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
  6782. unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
  6783. Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
  6784. // Reuse existing vector loop preheader for TC checks.
  6785. // Note that new preheader block is generated for vector loop.
  6786. BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
  6787. IRBuilder<> Builder(TCCheckBlock->getTerminator());
  6788. // Generate code to check if the loop's trip count is less than VF * UF of the
  6789. // main vector loop.
  6790. auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
  6791. ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
  6792. Value *CheckMinIters = Builder.CreateICmp(
  6793. P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
  6794. "min.iters.check");
  6795. if (!ForEpilogue)
  6796. TCCheckBlock->setName("vector.main.loop.iter.check");
  6797. // Create new preheader for vector loop.
  6798. LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
  6799. DT, LI, nullptr, "vector.ph");
  6800. if (ForEpilogue) {
  6801. assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
  6802. DT->getNode(Bypass)->getIDom()) &&
  6803. "TC check is expected to dominate Bypass");
  6804. // Update dominator for Bypass & LoopExit.
  6805. DT->changeImmediateDominator(Bypass, TCCheckBlock);
  6806. if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
  6807. // For loops with multiple exits, there's no edge from the middle block
  6808. // to exit blocks (as the epilogue must run) and thus no need to update
  6809. // the immediate dominator of the exit blocks.
  6810. DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
  6811. LoopBypassBlocks.push_back(TCCheckBlock);
  6812. // Save the trip count so we don't have to regenerate it in the
  6813. // vec.epilog.iter.check. This is safe to do because the trip count
  6814. // generated here dominates the vector epilog iter check.
  6815. EPI.TripCount = Count;
  6816. }
  6817. ReplaceInstWithInst(
  6818. TCCheckBlock->getTerminator(),
  6819. BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
  6820. return TCCheckBlock;
  6821. }
  6822. //===--------------------------------------------------------------------===//
  6823. // EpilogueVectorizerEpilogueLoop
  6824. //===--------------------------------------------------------------------===//
  6825. /// This function is partially responsible for generating the control flow
  6826. /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
  6827. std::pair<BasicBlock *, Value *>
  6828. EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
  6829. createVectorLoopSkeleton("vec.epilog.");
  6830. // Now, compare the remaining count and if there aren't enough iterations to
  6831. // execute the vectorized epilogue skip to the scalar part.
  6832. BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
  6833. VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
  6834. LoopVectorPreHeader =
  6835. SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
  6836. LI, nullptr, "vec.epilog.ph");
  6837. emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader,
  6838. VecEpilogueIterationCountCheck);
  6839. // Adjust the control flow taking the state info from the main loop
  6840. // vectorization into account.
  6841. assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
  6842. "expected this to be saved from the previous pass.");
  6843. EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
  6844. VecEpilogueIterationCountCheck, LoopVectorPreHeader);
  6845. DT->changeImmediateDominator(LoopVectorPreHeader,
  6846. EPI.MainLoopIterationCountCheck);
  6847. EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
  6848. VecEpilogueIterationCountCheck, LoopScalarPreHeader);
  6849. if (EPI.SCEVSafetyCheck)
  6850. EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
  6851. VecEpilogueIterationCountCheck, LoopScalarPreHeader);
  6852. if (EPI.MemSafetyCheck)
  6853. EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
  6854. VecEpilogueIterationCountCheck, LoopScalarPreHeader);
  6855. DT->changeImmediateDominator(
  6856. VecEpilogueIterationCountCheck,
  6857. VecEpilogueIterationCountCheck->getSinglePredecessor());
  6858. DT->changeImmediateDominator(LoopScalarPreHeader,
  6859. EPI.EpilogueIterationCountCheck);
  6860. if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
  6861. // If there is an epilogue which must run, there's no edge from the
  6862. // middle block to exit blocks and thus no need to update the immediate
  6863. // dominator of the exit blocks.
  6864. DT->changeImmediateDominator(LoopExitBlock,
  6865. EPI.EpilogueIterationCountCheck);
  6866. // Keep track of bypass blocks, as they feed start values to the induction and
  6867. // reduction phis in the scalar loop preheader.
  6868. if (EPI.SCEVSafetyCheck)
  6869. LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
  6870. if (EPI.MemSafetyCheck)
  6871. LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
  6872. LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
  6873. // The vec.epilog.iter.check block may contain Phi nodes from inductions or
  6874. // reductions which merge control-flow from the latch block and the middle
  6875. // block. Update the incoming values here and move the Phi into the preheader.
  6876. SmallVector<PHINode *, 4> PhisInBlock;
  6877. for (PHINode &Phi : VecEpilogueIterationCountCheck->phis())
  6878. PhisInBlock.push_back(&Phi);
  6879. for (PHINode *Phi : PhisInBlock) {
  6880. Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI());
  6881. Phi->replaceIncomingBlockWith(
  6882. VecEpilogueIterationCountCheck->getSinglePredecessor(),
  6883. VecEpilogueIterationCountCheck);
  6884. // If the phi doesn't have an incoming value from the
  6885. // EpilogueIterationCountCheck, we are done. Otherwise remove the incoming
  6886. // value and also those from other check blocks. This is needed for
  6887. // reduction phis only.
  6888. if (none_of(Phi->blocks(), [&](BasicBlock *IncB) {
  6889. return EPI.EpilogueIterationCountCheck == IncB;
  6890. }))
  6891. continue;
  6892. Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
  6893. if (EPI.SCEVSafetyCheck)
  6894. Phi->removeIncomingValue(EPI.SCEVSafetyCheck);
  6895. if (EPI.MemSafetyCheck)
  6896. Phi->removeIncomingValue(EPI.MemSafetyCheck);
  6897. }
  6898. // Generate a resume induction for the vector epilogue and put it in the
  6899. // vector epilogue preheader
  6900. Type *IdxTy = Legal->getWidestInductionType();
  6901. PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
  6902. LoopVectorPreHeader->getFirstNonPHI());
  6903. EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
  6904. EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
  6905. EPI.MainLoopIterationCountCheck);
  6906. // Generate induction resume values. These variables save the new starting
  6907. // indexes for the scalar loop. They are used to test if there are any tail
  6908. // iterations left once the vector loop has completed.
  6909. // Note that when the vectorized epilogue is skipped due to iteration count
  6910. // check, then the resume value for the induction variable comes from
  6911. // the trip count of the main vector loop, hence passing the AdditionalBypass
  6912. // argument.
  6913. createInductionResumeValues({VecEpilogueIterationCountCheck,
  6914. EPI.VectorTripCount} /* AdditionalBypass */);
  6915. return {completeLoopSkeleton(), EPResumeVal};
  6916. }
  6917. BasicBlock *
  6918. EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
  6919. BasicBlock *Bypass, BasicBlock *Insert) {
  6920. assert(EPI.TripCount &&
  6921. "Expected trip count to have been safed in the first pass.");
  6922. assert(
  6923. (!isa<Instruction>(EPI.TripCount) ||
  6924. DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
  6925. "saved trip count does not dominate insertion point.");
  6926. Value *TC = EPI.TripCount;
  6927. IRBuilder<> Builder(Insert->getTerminator());
  6928. Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
  6929. // Generate code to check if the loop's trip count is less than VF * UF of the
  6930. // vector epilogue loop.
  6931. auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
  6932. ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
  6933. Value *CheckMinIters =
  6934. Builder.CreateICmp(P, Count,
  6935. createStepForVF(Builder, Count->getType(),
  6936. EPI.EpilogueVF, EPI.EpilogueUF),
  6937. "min.epilog.iters.check");
  6938. ReplaceInstWithInst(
  6939. Insert->getTerminator(),
  6940. BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
  6941. LoopBypassBlocks.push_back(Insert);
  6942. return Insert;
  6943. }
  6944. void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
  6945. LLVM_DEBUG({
  6946. dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
  6947. << "Epilogue Loop VF:" << EPI.EpilogueVF
  6948. << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
  6949. });
  6950. }
  6951. void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
  6952. DEBUG_WITH_TYPE(VerboseDebug, {
  6953. dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
  6954. });
  6955. }
  6956. bool LoopVectorizationPlanner::getDecisionAndClampRange(
  6957. const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
  6958. assert(!Range.isEmpty() && "Trying to test an empty VF range.");
  6959. bool PredicateAtRangeStart = Predicate(Range.Start);
  6960. for (ElementCount TmpVF = Range.Start * 2;
  6961. ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
  6962. if (Predicate(TmpVF) != PredicateAtRangeStart) {
  6963. Range.End = TmpVF;
  6964. break;
  6965. }
  6966. return PredicateAtRangeStart;
  6967. }
  6968. /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
  6969. /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
  6970. /// of VF's starting at a given VF and extending it as much as possible. Each
  6971. /// vectorization decision can potentially shorten this sub-range during
  6972. /// buildVPlan().
  6973. void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
  6974. ElementCount MaxVF) {
  6975. auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
  6976. for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
  6977. VFRange SubRange = {VF, MaxVFPlusOne};
  6978. VPlans.push_back(buildVPlan(SubRange));
  6979. VF = SubRange.End;
  6980. }
  6981. }
  6982. VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
  6983. VPlanPtr &Plan) {
  6984. assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
  6985. // Look for cached value.
  6986. std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
  6987. EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
  6988. if (ECEntryIt != EdgeMaskCache.end())
  6989. return ECEntryIt->second;
  6990. VPValue *SrcMask = createBlockInMask(Src, Plan);
  6991. // The terminator has to be a branch inst!
  6992. BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
  6993. assert(BI && "Unexpected terminator found");
  6994. if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
  6995. return EdgeMaskCache[Edge] = SrcMask;
  6996. // If source is an exiting block, we know the exit edge is dynamically dead
  6997. // in the vector loop, and thus we don't need to restrict the mask. Avoid
  6998. // adding uses of an otherwise potentially dead instruction.
  6999. if (OrigLoop->isLoopExiting(Src))
  7000. return EdgeMaskCache[Edge] = SrcMask;
  7001. VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
  7002. assert(EdgeMask && "No Edge Mask found for condition");
  7003. if (BI->getSuccessor(0) != Dst)
  7004. EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc());
  7005. if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
  7006. // The condition is 'SrcMask && EdgeMask', which is equivalent to
  7007. // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
  7008. // The select version does not introduce new UB if SrcMask is false and
  7009. // EdgeMask is poison. Using 'and' here introduces undefined behavior.
  7010. VPValue *False = Plan->getOrAddVPValue(
  7011. ConstantInt::getFalse(BI->getCondition()->getType()));
  7012. EdgeMask =
  7013. Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
  7014. }
  7015. return EdgeMaskCache[Edge] = EdgeMask;
  7016. }
  7017. VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
  7018. assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
  7019. // Look for cached value.
  7020. BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
  7021. if (BCEntryIt != BlockMaskCache.end())
  7022. return BCEntryIt->second;
  7023. // All-one mask is modelled as no-mask following the convention for masked
  7024. // load/store/gather/scatter. Initialize BlockMask to no-mask.
  7025. VPValue *BlockMask = nullptr;
  7026. if (OrigLoop->getHeader() == BB) {
  7027. if (!CM.blockNeedsPredicationForAnyReason(BB))
  7028. return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
  7029. assert(CM.foldTailByMasking() && "must fold the tail");
  7030. // If we're using the active lane mask for control flow, then we get the
  7031. // mask from the active lane mask PHI that is cached in the VPlan.
  7032. PredicationStyle EmitGetActiveLaneMask = CM.TTI.emitGetActiveLaneMask();
  7033. if (EmitGetActiveLaneMask == PredicationStyle::DataAndControlFlow)
  7034. return BlockMaskCache[BB] = Plan->getActiveLaneMaskPhi();
  7035. // Introduce the early-exit compare IV <= BTC to form header block mask.
  7036. // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
  7037. // constructing the desired canonical IV in the header block as its first
  7038. // non-phi instructions.
  7039. VPBasicBlock *HeaderVPBB =
  7040. Plan->getVectorLoopRegion()->getEntryBasicBlock();
  7041. auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
  7042. auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV());
  7043. HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi());
  7044. VPBuilder::InsertPointGuard Guard(Builder);
  7045. Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
  7046. if (EmitGetActiveLaneMask != PredicationStyle::None) {
  7047. VPValue *TC = Plan->getOrCreateTripCount();
  7048. BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC},
  7049. nullptr, "active.lane.mask");
  7050. } else {
  7051. VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
  7052. BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
  7053. }
  7054. return BlockMaskCache[BB] = BlockMask;
  7055. }
  7056. // This is the block mask. We OR all incoming edges.
  7057. for (auto *Predecessor : predecessors(BB)) {
  7058. VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
  7059. if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
  7060. return BlockMaskCache[BB] = EdgeMask;
  7061. if (!BlockMask) { // BlockMask has its initialized nullptr value.
  7062. BlockMask = EdgeMask;
  7063. continue;
  7064. }
  7065. BlockMask = Builder.createOr(BlockMask, EdgeMask, {});
  7066. }
  7067. return BlockMaskCache[BB] = BlockMask;
  7068. }
  7069. VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
  7070. ArrayRef<VPValue *> Operands,
  7071. VFRange &Range,
  7072. VPlanPtr &Plan) {
  7073. assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
  7074. "Must be called with either a load or store");
  7075. auto willWiden = [&](ElementCount VF) -> bool {
  7076. LoopVectorizationCostModel::InstWidening Decision =
  7077. CM.getWideningDecision(I, VF);
  7078. assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
  7079. "CM decision should be taken at this point.");
  7080. if (Decision == LoopVectorizationCostModel::CM_Interleave)
  7081. return true;
  7082. if (CM.isScalarAfterVectorization(I, VF) ||
  7083. CM.isProfitableToScalarize(I, VF))
  7084. return false;
  7085. return Decision != LoopVectorizationCostModel::CM_Scalarize;
  7086. };
  7087. if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
  7088. return nullptr;
  7089. VPValue *Mask = nullptr;
  7090. if (Legal->isMaskRequired(I))
  7091. Mask = createBlockInMask(I->getParent(), Plan);
  7092. // Determine if the pointer operand of the access is either consecutive or
  7093. // reverse consecutive.
  7094. LoopVectorizationCostModel::InstWidening Decision =
  7095. CM.getWideningDecision(I, Range.Start);
  7096. bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
  7097. bool Consecutive =
  7098. Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
  7099. if (LoadInst *Load = dyn_cast<LoadInst>(I))
  7100. return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
  7101. Consecutive, Reverse);
  7102. StoreInst *Store = cast<StoreInst>(I);
  7103. return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
  7104. Mask, Consecutive, Reverse);
  7105. }
  7106. /// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also
  7107. /// insert a recipe to expand the step for the induction recipe.
  7108. static VPWidenIntOrFpInductionRecipe *createWidenInductionRecipes(
  7109. PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start,
  7110. const InductionDescriptor &IndDesc, LoopVectorizationCostModel &CM,
  7111. VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop, VFRange &Range) {
  7112. // Returns true if an instruction \p I should be scalarized instead of
  7113. // vectorized for the chosen vectorization factor.
  7114. auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) {
  7115. return CM.isScalarAfterVectorization(I, VF) ||
  7116. CM.isProfitableToScalarize(I, VF);
  7117. };
  7118. bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange(
  7119. [&](ElementCount VF) {
  7120. return ShouldScalarizeInstruction(PhiOrTrunc, VF);
  7121. },
  7122. Range);
  7123. assert(IndDesc.getStartValue() ==
  7124. Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
  7125. assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
  7126. "step must be loop invariant");
  7127. VPValue *Step =
  7128. vputils::getOrCreateVPValueForSCEVExpr(Plan, IndDesc.getStep(), SE);
  7129. if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
  7130. return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, IndDesc, TruncI,
  7131. !NeedsScalarIVOnly);
  7132. }
  7133. assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here");
  7134. return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, IndDesc,
  7135. !NeedsScalarIVOnly);
  7136. }
  7137. VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI(
  7138. PHINode *Phi, ArrayRef<VPValue *> Operands, VPlan &Plan, VFRange &Range) {
  7139. // Check if this is an integer or fp induction. If so, build the recipe that
  7140. // produces its scalar and vector values.
  7141. if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
  7142. return createWidenInductionRecipes(Phi, Phi, Operands[0], *II, CM, Plan,
  7143. *PSE.getSE(), *OrigLoop, Range);
  7144. // Check if this is pointer induction. If so, build the recipe for it.
  7145. if (auto *II = Legal->getPointerInductionDescriptor(Phi)) {
  7146. VPValue *Step = vputils::getOrCreateVPValueForSCEVExpr(Plan, II->getStep(),
  7147. *PSE.getSE());
  7148. assert(isa<SCEVConstant>(II->getStep()));
  7149. return new VPWidenPointerInductionRecipe(
  7150. Phi, Operands[0], Step, *II,
  7151. LoopVectorizationPlanner::getDecisionAndClampRange(
  7152. [&](ElementCount VF) {
  7153. return CM.isScalarAfterVectorization(Phi, VF);
  7154. },
  7155. Range));
  7156. }
  7157. return nullptr;
  7158. }
  7159. VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
  7160. TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, VPlan &Plan) {
  7161. // Optimize the special case where the source is a constant integer
  7162. // induction variable. Notice that we can only optimize the 'trunc' case
  7163. // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
  7164. // (c) other casts depend on pointer size.
  7165. // Determine whether \p K is a truncation based on an induction variable that
  7166. // can be optimized.
  7167. auto isOptimizableIVTruncate =
  7168. [&](Instruction *K) -> std::function<bool(ElementCount)> {
  7169. return [=](ElementCount VF) -> bool {
  7170. return CM.isOptimizableIVTruncate(K, VF);
  7171. };
  7172. };
  7173. if (LoopVectorizationPlanner::getDecisionAndClampRange(
  7174. isOptimizableIVTruncate(I), Range)) {
  7175. auto *Phi = cast<PHINode>(I->getOperand(0));
  7176. const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
  7177. VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
  7178. return createWidenInductionRecipes(Phi, I, Start, II, CM, Plan,
  7179. *PSE.getSE(), *OrigLoop, Range);
  7180. }
  7181. return nullptr;
  7182. }
  7183. VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
  7184. ArrayRef<VPValue *> Operands,
  7185. VPlanPtr &Plan) {
  7186. // If all incoming values are equal, the incoming VPValue can be used directly
  7187. // instead of creating a new VPBlendRecipe.
  7188. if (llvm::all_equal(Operands))
  7189. return Operands[0];
  7190. unsigned NumIncoming = Phi->getNumIncomingValues();
  7191. // For in-loop reductions, we do not need to create an additional select.
  7192. VPValue *InLoopVal = nullptr;
  7193. for (unsigned In = 0; In < NumIncoming; In++) {
  7194. PHINode *PhiOp =
  7195. dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue());
  7196. if (PhiOp && CM.isInLoopReduction(PhiOp)) {
  7197. assert(!InLoopVal && "Found more than one in-loop reduction!");
  7198. InLoopVal = Operands[In];
  7199. }
  7200. }
  7201. assert((!InLoopVal || NumIncoming == 2) &&
  7202. "Found an in-loop reduction for PHI with unexpected number of "
  7203. "incoming values");
  7204. if (InLoopVal)
  7205. return Operands[Operands[0] == InLoopVal ? 1 : 0];
  7206. // We know that all PHIs in non-header blocks are converted into selects, so
  7207. // we don't have to worry about the insertion order and we can just use the
  7208. // builder. At this point we generate the predication tree. There may be
  7209. // duplications since this is a simple recursive scan, but future
  7210. // optimizations will clean it up.
  7211. SmallVector<VPValue *, 2> OperandsWithMask;
  7212. for (unsigned In = 0; In < NumIncoming; In++) {
  7213. VPValue *EdgeMask =
  7214. createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
  7215. assert((EdgeMask || NumIncoming == 1) &&
  7216. "Multiple predecessors with one having a full mask");
  7217. OperandsWithMask.push_back(Operands[In]);
  7218. if (EdgeMask)
  7219. OperandsWithMask.push_back(EdgeMask);
  7220. }
  7221. return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
  7222. }
  7223. VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
  7224. ArrayRef<VPValue *> Operands,
  7225. VFRange &Range) const {
  7226. bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
  7227. [this, CI](ElementCount VF) {
  7228. return CM.isScalarWithPredication(CI, VF);
  7229. },
  7230. Range);
  7231. if (IsPredicated)
  7232. return nullptr;
  7233. Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
  7234. if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
  7235. ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
  7236. ID == Intrinsic::pseudoprobe ||
  7237. ID == Intrinsic::experimental_noalias_scope_decl))
  7238. return nullptr;
  7239. ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
  7240. // Is it beneficial to perform intrinsic call compared to lib call?
  7241. bool ShouldUseVectorIntrinsic =
  7242. ID && LoopVectorizationPlanner::getDecisionAndClampRange(
  7243. [&](ElementCount VF) -> bool {
  7244. bool NeedToScalarize = false;
  7245. // Is it beneficial to perform intrinsic call compared to lib
  7246. // call?
  7247. InstructionCost CallCost =
  7248. CM.getVectorCallCost(CI, VF, NeedToScalarize);
  7249. InstructionCost IntrinsicCost =
  7250. CM.getVectorIntrinsicCost(CI, VF);
  7251. return IntrinsicCost <= CallCost;
  7252. },
  7253. Range);
  7254. if (ShouldUseVectorIntrinsic)
  7255. return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()), ID);
  7256. // Is better to call a vectorized version of the function than to to scalarize
  7257. // the call?
  7258. auto ShouldUseVectorCall = LoopVectorizationPlanner::getDecisionAndClampRange(
  7259. [&](ElementCount VF) -> bool {
  7260. // The following case may be scalarized depending on the VF.
  7261. // The flag shows whether we can use a usual Call for vectorized
  7262. // version of the instruction.
  7263. bool NeedToScalarize = false;
  7264. CM.getVectorCallCost(CI, VF, NeedToScalarize);
  7265. return !NeedToScalarize;
  7266. },
  7267. Range);
  7268. if (ShouldUseVectorCall)
  7269. return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()),
  7270. Intrinsic::not_intrinsic);
  7271. return nullptr;
  7272. }
  7273. bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
  7274. assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
  7275. !isa<StoreInst>(I) && "Instruction should have been handled earlier");
  7276. // Instruction should be widened, unless it is scalar after vectorization,
  7277. // scalarization is profitable or it is predicated.
  7278. auto WillScalarize = [this, I](ElementCount VF) -> bool {
  7279. return CM.isScalarAfterVectorization(I, VF) ||
  7280. CM.isProfitableToScalarize(I, VF) ||
  7281. CM.isScalarWithPredication(I, VF);
  7282. };
  7283. return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
  7284. Range);
  7285. }
  7286. VPRecipeBase *VPRecipeBuilder::tryToWiden(Instruction *I,
  7287. ArrayRef<VPValue *> Operands,
  7288. VPBasicBlock *VPBB, VPlanPtr &Plan) {
  7289. switch (I->getOpcode()) {
  7290. default:
  7291. return nullptr;
  7292. case Instruction::SDiv:
  7293. case Instruction::UDiv:
  7294. case Instruction::SRem:
  7295. case Instruction::URem: {
  7296. // If not provably safe, use a select to form a safe divisor before widening the
  7297. // div/rem operation itself. Otherwise fall through to general handling below.
  7298. if (CM.isPredicatedInst(I)) {
  7299. SmallVector<VPValue *> Ops(Operands.begin(), Operands.end());
  7300. VPValue *Mask = createBlockInMask(I->getParent(), Plan);
  7301. VPValue *One =
  7302. Plan->getOrAddExternalDef(ConstantInt::get(I->getType(), 1u, false));
  7303. auto *SafeRHS =
  7304. new VPInstruction(Instruction::Select, {Mask, Ops[1], One},
  7305. I->getDebugLoc());
  7306. VPBB->appendRecipe(SafeRHS);
  7307. Ops[1] = SafeRHS;
  7308. return new VPWidenRecipe(*I, make_range(Ops.begin(), Ops.end()));
  7309. }
  7310. LLVM_FALLTHROUGH;
  7311. }
  7312. case Instruction::Add:
  7313. case Instruction::And:
  7314. case Instruction::AShr:
  7315. case Instruction::BitCast:
  7316. case Instruction::FAdd:
  7317. case Instruction::FCmp:
  7318. case Instruction::FDiv:
  7319. case Instruction::FMul:
  7320. case Instruction::FNeg:
  7321. case Instruction::FPExt:
  7322. case Instruction::FPToSI:
  7323. case Instruction::FPToUI:
  7324. case Instruction::FPTrunc:
  7325. case Instruction::FRem:
  7326. case Instruction::FSub:
  7327. case Instruction::ICmp:
  7328. case Instruction::IntToPtr:
  7329. case Instruction::LShr:
  7330. case Instruction::Mul:
  7331. case Instruction::Or:
  7332. case Instruction::PtrToInt:
  7333. case Instruction::Select:
  7334. case Instruction::SExt:
  7335. case Instruction::Shl:
  7336. case Instruction::SIToFP:
  7337. case Instruction::Sub:
  7338. case Instruction::Trunc:
  7339. case Instruction::UIToFP:
  7340. case Instruction::Xor:
  7341. case Instruction::ZExt:
  7342. case Instruction::Freeze:
  7343. return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
  7344. };
  7345. }
  7346. void VPRecipeBuilder::fixHeaderPhis() {
  7347. BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
  7348. for (VPHeaderPHIRecipe *R : PhisToFix) {
  7349. auto *PN = cast<PHINode>(R->getUnderlyingValue());
  7350. VPRecipeBase *IncR =
  7351. getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
  7352. R->addOperand(IncR->getVPSingleValue());
  7353. }
  7354. }
  7355. VPBasicBlock *VPRecipeBuilder::handleReplication(
  7356. Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
  7357. VPlanPtr &Plan) {
  7358. bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
  7359. [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
  7360. Range);
  7361. bool IsPredicated = CM.isPredicatedInst(I);
  7362. // Even if the instruction is not marked as uniform, there are certain
  7363. // intrinsic calls that can be effectively treated as such, so we check for
  7364. // them here. Conservatively, we only do this for scalable vectors, since
  7365. // for fixed-width VFs we can always fall back on full scalarization.
  7366. if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
  7367. switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
  7368. case Intrinsic::assume:
  7369. case Intrinsic::lifetime_start:
  7370. case Intrinsic::lifetime_end:
  7371. // For scalable vectors if one of the operands is variant then we still
  7372. // want to mark as uniform, which will generate one instruction for just
  7373. // the first lane of the vector. We can't scalarize the call in the same
  7374. // way as for fixed-width vectors because we don't know how many lanes
  7375. // there are.
  7376. //
  7377. // The reasons for doing it this way for scalable vectors are:
  7378. // 1. For the assume intrinsic generating the instruction for the first
  7379. // lane is still be better than not generating any at all. For
  7380. // example, the input may be a splat across all lanes.
  7381. // 2. For the lifetime start/end intrinsics the pointer operand only
  7382. // does anything useful when the input comes from a stack object,
  7383. // which suggests it should always be uniform. For non-stack objects
  7384. // the effect is to poison the object, which still allows us to
  7385. // remove the call.
  7386. IsUniform = true;
  7387. break;
  7388. default:
  7389. break;
  7390. }
  7391. }
  7392. auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
  7393. IsUniform, IsPredicated);
  7394. // Find if I uses a predicated instruction. If so, it will use its scalar
  7395. // value. Avoid hoisting the insert-element which packs the scalar value into
  7396. // a vector value, as that happens iff all users use the vector value.
  7397. for (VPValue *Op : Recipe->operands()) {
  7398. auto *PredR =
  7399. dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDefiningRecipe());
  7400. if (!PredR)
  7401. continue;
  7402. auto *RepR = cast<VPReplicateRecipe>(
  7403. PredR->getOperand(0)->getDefiningRecipe());
  7404. assert(RepR->isPredicated() &&
  7405. "expected Replicate recipe to be predicated");
  7406. RepR->setAlsoPack(false);
  7407. }
  7408. // Finalize the recipe for Instr, first if it is not predicated.
  7409. if (!IsPredicated) {
  7410. LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
  7411. setRecipe(I, Recipe);
  7412. Plan->addVPValue(I, Recipe);
  7413. VPBB->appendRecipe(Recipe);
  7414. return VPBB;
  7415. }
  7416. LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
  7417. VPBlockBase *SingleSucc = VPBB->getSingleSuccessor();
  7418. assert(SingleSucc && "VPBB must have a single successor when handling "
  7419. "predicated replication.");
  7420. VPBlockUtils::disconnectBlocks(VPBB, SingleSucc);
  7421. // Record predicated instructions for above packing optimizations.
  7422. VPBlockBase *Region = createReplicateRegion(Recipe, Plan);
  7423. VPBlockUtils::insertBlockAfter(Region, VPBB);
  7424. auto *RegSucc = new VPBasicBlock();
  7425. VPBlockUtils::insertBlockAfter(RegSucc, Region);
  7426. VPBlockUtils::connectBlocks(RegSucc, SingleSucc);
  7427. return RegSucc;
  7428. }
  7429. VPRegionBlock *
  7430. VPRecipeBuilder::createReplicateRegion(VPReplicateRecipe *PredRecipe,
  7431. VPlanPtr &Plan) {
  7432. Instruction *Instr = PredRecipe->getUnderlyingInstr();
  7433. // Instructions marked for predication are replicated and placed under an
  7434. // if-then construct to prevent side-effects.
  7435. // Generate recipes to compute the block mask for this region.
  7436. VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
  7437. // Build the triangular if-then region.
  7438. std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
  7439. assert(Instr->getParent() && "Predicated instruction not in any basic block");
  7440. auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
  7441. auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
  7442. auto *PHIRecipe = Instr->getType()->isVoidTy()
  7443. ? nullptr
  7444. : new VPPredInstPHIRecipe(PredRecipe);
  7445. if (PHIRecipe) {
  7446. setRecipe(Instr, PHIRecipe);
  7447. Plan->addVPValue(Instr, PHIRecipe);
  7448. } else {
  7449. setRecipe(Instr, PredRecipe);
  7450. Plan->addVPValue(Instr, PredRecipe);
  7451. }
  7452. auto *Exiting = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
  7453. auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
  7454. VPRegionBlock *Region = new VPRegionBlock(Entry, Exiting, RegionName, true);
  7455. // Note: first set Entry as region entry and then connect successors starting
  7456. // from it in order, to propagate the "parent" of each VPBasicBlock.
  7457. VPBlockUtils::insertTwoBlocksAfter(Pred, Exiting, Entry);
  7458. VPBlockUtils::connectBlocks(Pred, Exiting);
  7459. return Region;
  7460. }
  7461. VPRecipeOrVPValueTy
  7462. VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
  7463. ArrayRef<VPValue *> Operands,
  7464. VFRange &Range, VPBasicBlock *VPBB,
  7465. VPlanPtr &Plan) {
  7466. // First, check for specific widening recipes that deal with inductions, Phi
  7467. // nodes, calls and memory operations.
  7468. VPRecipeBase *Recipe;
  7469. if (auto Phi = dyn_cast<PHINode>(Instr)) {
  7470. if (Phi->getParent() != OrigLoop->getHeader())
  7471. return tryToBlend(Phi, Operands, Plan);
  7472. // Always record recipes for header phis. Later first-order recurrence phis
  7473. // can have earlier phis as incoming values.
  7474. recordRecipeOf(Phi);
  7475. if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, *Plan, Range)))
  7476. return toVPRecipeResult(Recipe);
  7477. VPHeaderPHIRecipe *PhiRecipe = nullptr;
  7478. assert((Legal->isReductionVariable(Phi) ||
  7479. Legal->isFixedOrderRecurrence(Phi)) &&
  7480. "can only widen reductions and fixed-order recurrences here");
  7481. VPValue *StartV = Operands[0];
  7482. if (Legal->isReductionVariable(Phi)) {
  7483. const RecurrenceDescriptor &RdxDesc =
  7484. Legal->getReductionVars().find(Phi)->second;
  7485. assert(RdxDesc.getRecurrenceStartValue() ==
  7486. Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
  7487. PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
  7488. CM.isInLoopReduction(Phi),
  7489. CM.useOrderedReductions(RdxDesc));
  7490. } else {
  7491. // TODO: Currently fixed-order recurrences are modeled as chains of
  7492. // first-order recurrences. If there are no users of the intermediate
  7493. // recurrences in the chain, the fixed order recurrence should be modeled
  7494. // directly, enabling more efficient codegen.
  7495. PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
  7496. }
  7497. // Record the incoming value from the backedge, so we can add the incoming
  7498. // value from the backedge after all recipes have been created.
  7499. auto *Inc = cast<Instruction>(
  7500. Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
  7501. auto RecipeIter = Ingredient2Recipe.find(Inc);
  7502. if (RecipeIter == Ingredient2Recipe.end())
  7503. recordRecipeOf(Inc);
  7504. PhisToFix.push_back(PhiRecipe);
  7505. return toVPRecipeResult(PhiRecipe);
  7506. }
  7507. if (isa<TruncInst>(Instr) &&
  7508. (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
  7509. Range, *Plan)))
  7510. return toVPRecipeResult(Recipe);
  7511. // All widen recipes below deal only with VF > 1.
  7512. if (LoopVectorizationPlanner::getDecisionAndClampRange(
  7513. [&](ElementCount VF) { return VF.isScalar(); }, Range))
  7514. return nullptr;
  7515. if (auto *CI = dyn_cast<CallInst>(Instr))
  7516. return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
  7517. if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
  7518. return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
  7519. if (!shouldWiden(Instr, Range))
  7520. return nullptr;
  7521. if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
  7522. return toVPRecipeResult(new VPWidenGEPRecipe(
  7523. GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
  7524. if (auto *SI = dyn_cast<SelectInst>(Instr)) {
  7525. bool InvariantCond =
  7526. PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
  7527. return toVPRecipeResult(new VPWidenSelectRecipe(
  7528. *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
  7529. }
  7530. return toVPRecipeResult(tryToWiden(Instr, Operands, VPBB, Plan));
  7531. }
  7532. void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
  7533. ElementCount MaxVF) {
  7534. assert(OrigLoop->isInnermost() && "Inner loop expected.");
  7535. // Add assume instructions we need to drop to DeadInstructions, to prevent
  7536. // them from being added to the VPlan.
  7537. // TODO: We only need to drop assumes in blocks that get flattend. If the
  7538. // control flow is preserved, we should keep them.
  7539. SmallPtrSet<Instruction *, 4> DeadInstructions;
  7540. auto &ConditionalAssumes = Legal->getConditionalAssumes();
  7541. DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
  7542. MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
  7543. // Dead instructions do not need sinking. Remove them from SinkAfter.
  7544. for (Instruction *I : DeadInstructions)
  7545. SinkAfter.erase(I);
  7546. // Cannot sink instructions after dead instructions (there won't be any
  7547. // recipes for them). Instead, find the first non-dead previous instruction.
  7548. for (auto &P : Legal->getSinkAfter()) {
  7549. Instruction *SinkTarget = P.second;
  7550. Instruction *FirstInst = &*SinkTarget->getParent()->begin();
  7551. (void)FirstInst;
  7552. while (DeadInstructions.contains(SinkTarget)) {
  7553. assert(
  7554. SinkTarget != FirstInst &&
  7555. "Must find a live instruction (at least the one feeding the "
  7556. "fixed-order recurrence PHI) before reaching beginning of the block");
  7557. SinkTarget = SinkTarget->getPrevNode();
  7558. assert(SinkTarget != P.first &&
  7559. "sink source equals target, no sinking required");
  7560. }
  7561. P.second = SinkTarget;
  7562. }
  7563. auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
  7564. for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
  7565. VFRange SubRange = {VF, MaxVFPlusOne};
  7566. VPlans.push_back(
  7567. buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
  7568. VF = SubRange.End;
  7569. }
  7570. }
  7571. // Add the necessary canonical IV and branch recipes required to control the
  7572. // loop.
  7573. static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
  7574. bool HasNUW,
  7575. bool UseLaneMaskForLoopControlFlow) {
  7576. Value *StartIdx = ConstantInt::get(IdxTy, 0);
  7577. auto *StartV = Plan.getOrAddVPValue(StartIdx);
  7578. // Add a VPCanonicalIVPHIRecipe starting at 0 to the header.
  7579. auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
  7580. VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
  7581. VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
  7582. Header->insert(CanonicalIVPHI, Header->begin());
  7583. // Add a CanonicalIVIncrement{NUW} VPInstruction to increment the scalar
  7584. // IV by VF * UF.
  7585. auto *CanonicalIVIncrement =
  7586. new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
  7587. : VPInstruction::CanonicalIVIncrement,
  7588. {CanonicalIVPHI}, DL, "index.next");
  7589. CanonicalIVPHI->addOperand(CanonicalIVIncrement);
  7590. VPBasicBlock *EB = TopRegion->getExitingBasicBlock();
  7591. EB->appendRecipe(CanonicalIVIncrement);
  7592. if (UseLaneMaskForLoopControlFlow) {
  7593. // Create the active lane mask instruction in the vplan preheader.
  7594. VPBasicBlock *Preheader = Plan.getEntry()->getEntryBasicBlock();
  7595. // We can't use StartV directly in the ActiveLaneMask VPInstruction, since
  7596. // we have to take unrolling into account. Each part needs to start at
  7597. // Part * VF
  7598. auto *CanonicalIVIncrementParts =
  7599. new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementForPartNUW
  7600. : VPInstruction::CanonicalIVIncrementForPart,
  7601. {StartV}, DL, "index.part.next");
  7602. Preheader->appendRecipe(CanonicalIVIncrementParts);
  7603. // Create the ActiveLaneMask instruction using the correct start values.
  7604. VPValue *TC = Plan.getOrCreateTripCount();
  7605. auto *EntryALM = new VPInstruction(VPInstruction::ActiveLaneMask,
  7606. {CanonicalIVIncrementParts, TC}, DL,
  7607. "active.lane.mask.entry");
  7608. Preheader->appendRecipe(EntryALM);
  7609. // Now create the ActiveLaneMaskPhi recipe in the main loop using the
  7610. // preheader ActiveLaneMask instruction.
  7611. auto *LaneMaskPhi = new VPActiveLaneMaskPHIRecipe(EntryALM, DebugLoc());
  7612. Header->insert(LaneMaskPhi, Header->getFirstNonPhi());
  7613. // Create the active lane mask for the next iteration of the loop.
  7614. CanonicalIVIncrementParts =
  7615. new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementForPartNUW
  7616. : VPInstruction::CanonicalIVIncrementForPart,
  7617. {CanonicalIVIncrement}, DL);
  7618. EB->appendRecipe(CanonicalIVIncrementParts);
  7619. auto *ALM = new VPInstruction(VPInstruction::ActiveLaneMask,
  7620. {CanonicalIVIncrementParts, TC}, DL,
  7621. "active.lane.mask.next");
  7622. EB->appendRecipe(ALM);
  7623. LaneMaskPhi->addOperand(ALM);
  7624. // We have to invert the mask here because a true condition means jumping
  7625. // to the exit block.
  7626. auto *NotMask = new VPInstruction(VPInstruction::Not, ALM, DL);
  7627. EB->appendRecipe(NotMask);
  7628. VPInstruction *BranchBack =
  7629. new VPInstruction(VPInstruction::BranchOnCond, {NotMask}, DL);
  7630. EB->appendRecipe(BranchBack);
  7631. } else {
  7632. // Add the BranchOnCount VPInstruction to the latch.
  7633. VPInstruction *BranchBack = new VPInstruction(
  7634. VPInstruction::BranchOnCount,
  7635. {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
  7636. EB->appendRecipe(BranchBack);
  7637. }
  7638. }
  7639. // Add exit values to \p Plan. VPLiveOuts are added for each LCSSA phi in the
  7640. // original exit block.
  7641. static void addUsersInExitBlock(VPBasicBlock *HeaderVPBB,
  7642. VPBasicBlock *MiddleVPBB, Loop *OrigLoop,
  7643. VPlan &Plan) {
  7644. BasicBlock *ExitBB = OrigLoop->getUniqueExitBlock();
  7645. BasicBlock *ExitingBB = OrigLoop->getExitingBlock();
  7646. // Only handle single-exit loops with unique exit blocks for now.
  7647. if (!ExitBB || !ExitBB->getSinglePredecessor() || !ExitingBB)
  7648. return;
  7649. // Introduce VPUsers modeling the exit values.
  7650. for (PHINode &ExitPhi : ExitBB->phis()) {
  7651. Value *IncomingValue =
  7652. ExitPhi.getIncomingValueForBlock(ExitingBB);
  7653. VPValue *V = Plan.getOrAddVPValue(IncomingValue, true);
  7654. Plan.addLiveOut(&ExitPhi, V);
  7655. }
  7656. }
  7657. VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
  7658. VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
  7659. const MapVector<Instruction *, Instruction *> &SinkAfter) {
  7660. SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
  7661. VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
  7662. // ---------------------------------------------------------------------------
  7663. // Pre-construction: record ingredients whose recipes we'll need to further
  7664. // process after constructing the initial VPlan.
  7665. // ---------------------------------------------------------------------------
  7666. // Mark instructions we'll need to sink later and their targets as
  7667. // ingredients whose recipe we'll need to record.
  7668. for (const auto &Entry : SinkAfter) {
  7669. RecipeBuilder.recordRecipeOf(Entry.first);
  7670. RecipeBuilder.recordRecipeOf(Entry.second);
  7671. }
  7672. for (const auto &Reduction : CM.getInLoopReductionChains()) {
  7673. PHINode *Phi = Reduction.first;
  7674. RecurKind Kind =
  7675. Legal->getReductionVars().find(Phi)->second.getRecurrenceKind();
  7676. const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
  7677. RecipeBuilder.recordRecipeOf(Phi);
  7678. for (const auto &R : ReductionOperations) {
  7679. RecipeBuilder.recordRecipeOf(R);
  7680. // For min/max reductions, where we have a pair of icmp/select, we also
  7681. // need to record the ICmp recipe, so it can be removed later.
  7682. assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
  7683. "Only min/max recurrences allowed for inloop reductions");
  7684. if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
  7685. RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
  7686. }
  7687. }
  7688. // For each interleave group which is relevant for this (possibly trimmed)
  7689. // Range, add it to the set of groups to be later applied to the VPlan and add
  7690. // placeholders for its members' Recipes which we'll be replacing with a
  7691. // single VPInterleaveRecipe.
  7692. for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
  7693. auto applyIG = [IG, this](ElementCount VF) -> bool {
  7694. return (VF.isVector() && // Query is illegal for VF == 1
  7695. CM.getWideningDecision(IG->getInsertPos(), VF) ==
  7696. LoopVectorizationCostModel::CM_Interleave);
  7697. };
  7698. if (!getDecisionAndClampRange(applyIG, Range))
  7699. continue;
  7700. InterleaveGroups.insert(IG);
  7701. for (unsigned i = 0; i < IG->getFactor(); i++)
  7702. if (Instruction *Member = IG->getMember(i))
  7703. RecipeBuilder.recordRecipeOf(Member);
  7704. };
  7705. // ---------------------------------------------------------------------------
  7706. // Build initial VPlan: Scan the body of the loop in a topological order to
  7707. // visit each basic block after having visited its predecessor basic blocks.
  7708. // ---------------------------------------------------------------------------
  7709. // Create initial VPlan skeleton, starting with a block for the pre-header,
  7710. // followed by a region for the vector loop, followed by the middle block. The
  7711. // skeleton vector loop region contains a header and latch block.
  7712. VPBasicBlock *Preheader = new VPBasicBlock("vector.ph");
  7713. auto Plan = std::make_unique<VPlan>(Preheader);
  7714. VPBasicBlock *HeaderVPBB = new VPBasicBlock("vector.body");
  7715. VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
  7716. VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
  7717. auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
  7718. VPBlockUtils::insertBlockAfter(TopRegion, Preheader);
  7719. VPBasicBlock *MiddleVPBB = new VPBasicBlock("middle.block");
  7720. VPBlockUtils::insertBlockAfter(MiddleVPBB, TopRegion);
  7721. Instruction *DLInst =
  7722. getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
  7723. addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
  7724. DLInst ? DLInst->getDebugLoc() : DebugLoc(),
  7725. !CM.foldTailByMasking(),
  7726. CM.useActiveLaneMaskForControlFlow());
  7727. // Scan the body of the loop in a topological order to visit each basic block
  7728. // after having visited its predecessor basic blocks.
  7729. LoopBlocksDFS DFS(OrigLoop);
  7730. DFS.perform(LI);
  7731. VPBasicBlock *VPBB = HeaderVPBB;
  7732. SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
  7733. for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
  7734. // Relevant instructions from basic block BB will be grouped into VPRecipe
  7735. // ingredients and fill a new VPBasicBlock.
  7736. unsigned VPBBsForBB = 0;
  7737. if (VPBB != HeaderVPBB)
  7738. VPBB->setName(BB->getName());
  7739. Builder.setInsertPoint(VPBB);
  7740. // Introduce each ingredient into VPlan.
  7741. // TODO: Model and preserve debug intrinsics in VPlan.
  7742. for (Instruction &I : BB->instructionsWithoutDebug()) {
  7743. Instruction *Instr = &I;
  7744. // First filter out irrelevant instructions, to ensure no recipes are
  7745. // built for them.
  7746. if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
  7747. continue;
  7748. SmallVector<VPValue *, 4> Operands;
  7749. auto *Phi = dyn_cast<PHINode>(Instr);
  7750. if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
  7751. Operands.push_back(Plan->getOrAddVPValue(
  7752. Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
  7753. } else {
  7754. auto OpRange = Plan->mapToVPValues(Instr->operands());
  7755. Operands = {OpRange.begin(), OpRange.end()};
  7756. }
  7757. // Invariant stores inside loop will be deleted and a single store
  7758. // with the final reduction value will be added to the exit block
  7759. StoreInst *SI;
  7760. if ((SI = dyn_cast<StoreInst>(&I)) &&
  7761. Legal->isInvariantAddressOfReduction(SI->getPointerOperand()))
  7762. continue;
  7763. if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
  7764. Instr, Operands, Range, VPBB, Plan)) {
  7765. // If Instr can be simplified to an existing VPValue, use it.
  7766. if (RecipeOrValue.is<VPValue *>()) {
  7767. auto *VPV = RecipeOrValue.get<VPValue *>();
  7768. Plan->addVPValue(Instr, VPV);
  7769. // If the re-used value is a recipe, register the recipe for the
  7770. // instruction, in case the recipe for Instr needs to be recorded.
  7771. if (VPRecipeBase *R = VPV->getDefiningRecipe())
  7772. RecipeBuilder.setRecipe(Instr, R);
  7773. continue;
  7774. }
  7775. // Otherwise, add the new recipe.
  7776. VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
  7777. for (auto *Def : Recipe->definedValues()) {
  7778. auto *UV = Def->getUnderlyingValue();
  7779. Plan->addVPValue(UV, Def);
  7780. }
  7781. if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
  7782. HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
  7783. // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
  7784. // of the header block. That can happen for truncates of induction
  7785. // variables. Those recipes are moved to the phi section of the header
  7786. // block after applying SinkAfter, which relies on the original
  7787. // position of the trunc.
  7788. assert(isa<TruncInst>(Instr));
  7789. InductionsToMove.push_back(
  7790. cast<VPWidenIntOrFpInductionRecipe>(Recipe));
  7791. }
  7792. RecipeBuilder.setRecipe(Instr, Recipe);
  7793. VPBB->appendRecipe(Recipe);
  7794. continue;
  7795. }
  7796. // Otherwise, if all widening options failed, Instruction is to be
  7797. // replicated. This may create a successor for VPBB.
  7798. VPBasicBlock *NextVPBB =
  7799. RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
  7800. if (NextVPBB != VPBB) {
  7801. VPBB = NextVPBB;
  7802. VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
  7803. : "");
  7804. }
  7805. }
  7806. VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB);
  7807. VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor());
  7808. }
  7809. // After here, VPBB should not be used.
  7810. VPBB = nullptr;
  7811. addUsersInExitBlock(HeaderVPBB, MiddleVPBB, OrigLoop, *Plan);
  7812. assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) &&
  7813. !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() &&
  7814. "entry block must be set to a VPRegionBlock having a non-empty entry "
  7815. "VPBasicBlock");
  7816. RecipeBuilder.fixHeaderPhis();
  7817. // ---------------------------------------------------------------------------
  7818. // Transform initial VPlan: Apply previously taken decisions, in order, to
  7819. // bring the VPlan to its final state.
  7820. // ---------------------------------------------------------------------------
  7821. // Apply Sink-After legal constraints.
  7822. auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
  7823. auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
  7824. if (Region && Region->isReplicator()) {
  7825. assert(Region->getNumSuccessors() == 1 &&
  7826. Region->getNumPredecessors() == 1 && "Expected SESE region!");
  7827. assert(R->getParent()->size() == 1 &&
  7828. "A recipe in an original replicator region must be the only "
  7829. "recipe in its block");
  7830. return Region;
  7831. }
  7832. return nullptr;
  7833. };
  7834. for (const auto &Entry : SinkAfter) {
  7835. VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
  7836. VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
  7837. auto *TargetRegion = GetReplicateRegion(Target);
  7838. auto *SinkRegion = GetReplicateRegion(Sink);
  7839. if (!SinkRegion) {
  7840. // If the sink source is not a replicate region, sink the recipe directly.
  7841. if (TargetRegion) {
  7842. // The target is in a replication region, make sure to move Sink to
  7843. // the block after it, not into the replication region itself.
  7844. VPBasicBlock *NextBlock =
  7845. cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
  7846. Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
  7847. } else
  7848. Sink->moveAfter(Target);
  7849. continue;
  7850. }
  7851. // The sink source is in a replicate region. Unhook the region from the CFG.
  7852. auto *SinkPred = SinkRegion->getSinglePredecessor();
  7853. auto *SinkSucc = SinkRegion->getSingleSuccessor();
  7854. VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
  7855. VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
  7856. VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
  7857. if (TargetRegion) {
  7858. // The target recipe is also in a replicate region, move the sink region
  7859. // after the target region.
  7860. auto *TargetSucc = TargetRegion->getSingleSuccessor();
  7861. VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
  7862. VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
  7863. VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
  7864. } else {
  7865. // The sink source is in a replicate region, we need to move the whole
  7866. // replicate region, which should only contain a single recipe in the
  7867. // main block.
  7868. auto *SplitBlock =
  7869. Target->getParent()->splitAt(std::next(Target->getIterator()));
  7870. auto *SplitPred = SplitBlock->getSinglePredecessor();
  7871. VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
  7872. VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
  7873. VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
  7874. }
  7875. }
  7876. VPlanTransforms::removeRedundantCanonicalIVs(*Plan);
  7877. VPlanTransforms::removeRedundantInductionCasts(*Plan);
  7878. // Now that sink-after is done, move induction recipes for optimized truncates
  7879. // to the phi section of the header block.
  7880. for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
  7881. Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
  7882. // Adjust the recipes for any inloop reductions.
  7883. adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExiting()), Plan,
  7884. RecipeBuilder, Range.Start);
  7885. // Introduce a recipe to combine the incoming and previous values of a
  7886. // fixed-order recurrence.
  7887. for (VPRecipeBase &R :
  7888. Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
  7889. auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
  7890. if (!RecurPhi)
  7891. continue;
  7892. VPRecipeBase *PrevRecipe = &RecurPhi->getBackedgeRecipe();
  7893. // Fixed-order recurrences do not contain cycles, so this loop is guaranteed
  7894. // to terminate.
  7895. while (auto *PrevPhi =
  7896. dyn_cast<VPFirstOrderRecurrencePHIRecipe>(PrevRecipe))
  7897. PrevRecipe = &PrevPhi->getBackedgeRecipe();
  7898. VPBasicBlock *InsertBlock = PrevRecipe->getParent();
  7899. auto *Region = GetReplicateRegion(PrevRecipe);
  7900. if (Region)
  7901. InsertBlock = dyn_cast<VPBasicBlock>(Region->getSingleSuccessor());
  7902. if (!InsertBlock) {
  7903. InsertBlock = new VPBasicBlock(Region->getName() + ".succ");
  7904. VPBlockUtils::insertBlockAfter(InsertBlock, Region);
  7905. }
  7906. if (Region || PrevRecipe->isPhi())
  7907. Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
  7908. else
  7909. Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
  7910. auto *RecurSplice = cast<VPInstruction>(
  7911. Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
  7912. {RecurPhi, RecurPhi->getBackedgeValue()}));
  7913. RecurPhi->replaceAllUsesWith(RecurSplice);
  7914. // Set the first operand of RecurSplice to RecurPhi again, after replacing
  7915. // all users.
  7916. RecurSplice->setOperand(0, RecurPhi);
  7917. }
  7918. // Interleave memory: for each Interleave Group we marked earlier as relevant
  7919. // for this VPlan, replace the Recipes widening its memory instructions with a
  7920. // single VPInterleaveRecipe at its insertion point.
  7921. for (const auto *IG : InterleaveGroups) {
  7922. auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
  7923. RecipeBuilder.getRecipe(IG->getInsertPos()));
  7924. SmallVector<VPValue *, 4> StoredValues;
  7925. for (unsigned i = 0; i < IG->getFactor(); ++i)
  7926. if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
  7927. auto *StoreR =
  7928. cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
  7929. StoredValues.push_back(StoreR->getStoredValue());
  7930. }
  7931. auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
  7932. Recipe->getMask());
  7933. VPIG->insertBefore(Recipe);
  7934. unsigned J = 0;
  7935. for (unsigned i = 0; i < IG->getFactor(); ++i)
  7936. if (Instruction *Member = IG->getMember(i)) {
  7937. if (!Member->getType()->isVoidTy()) {
  7938. VPValue *OriginalV = Plan->getVPValue(Member);
  7939. Plan->removeVPValueFor(Member);
  7940. Plan->addVPValue(Member, VPIG->getVPValue(J));
  7941. OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
  7942. J++;
  7943. }
  7944. RecipeBuilder.getRecipe(Member)->eraseFromParent();
  7945. }
  7946. }
  7947. for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
  7948. VF *= 2)
  7949. Plan->addVF(VF);
  7950. Plan->setName("Initial VPlan");
  7951. // From this point onwards, VPlan-to-VPlan transformations may change the plan
  7952. // in ways that accessing values using original IR values is incorrect.
  7953. Plan->disableValue2VPValue();
  7954. VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE());
  7955. VPlanTransforms::removeDeadRecipes(*Plan);
  7956. bool ShouldSimplify = true;
  7957. while (ShouldSimplify) {
  7958. ShouldSimplify = VPlanTransforms::sinkScalarOperands(*Plan);
  7959. ShouldSimplify |=
  7960. VPlanTransforms::mergeReplicateRegionsIntoSuccessors(*Plan);
  7961. ShouldSimplify |= VPlanTransforms::mergeBlocksIntoPredecessors(*Plan);
  7962. }
  7963. VPlanTransforms::removeRedundantExpandSCEVRecipes(*Plan);
  7964. VPlanTransforms::mergeBlocksIntoPredecessors(*Plan);
  7965. assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
  7966. return Plan;
  7967. }
  7968. VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
  7969. // Outer loop handling: They may require CFG and instruction level
  7970. // transformations before even evaluating whether vectorization is profitable.
  7971. // Since we cannot modify the incoming IR, we need to build VPlan upfront in
  7972. // the vectorization pipeline.
  7973. assert(!OrigLoop->isInnermost());
  7974. assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
  7975. // Create new empty VPlan
  7976. auto Plan = std::make_unique<VPlan>();
  7977. // Build hierarchical CFG
  7978. VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
  7979. HCFGBuilder.buildHierarchicalCFG();
  7980. for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
  7981. VF *= 2)
  7982. Plan->addVF(VF);
  7983. SmallPtrSet<Instruction *, 1> DeadInstructions;
  7984. VPlanTransforms::VPInstructionsToVPRecipes(
  7985. OrigLoop, Plan,
  7986. [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
  7987. DeadInstructions, *PSE.getSE(), *TLI);
  7988. // Remove the existing terminator of the exiting block of the top-most region.
  7989. // A BranchOnCount will be added instead when adding the canonical IV recipes.
  7990. auto *Term =
  7991. Plan->getVectorLoopRegion()->getExitingBasicBlock()->getTerminator();
  7992. Term->eraseFromParent();
  7993. addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
  7994. true, CM.useActiveLaneMaskForControlFlow());
  7995. return Plan;
  7996. }
  7997. // Adjust the recipes for reductions. For in-loop reductions the chain of
  7998. // instructions leading from the loop exit instr to the phi need to be converted
  7999. // to reductions, with one operand being vector and the other being the scalar
  8000. // reduction chain. For other reductions, a select is introduced between the phi
  8001. // and live-out recipes when folding the tail.
  8002. void LoopVectorizationPlanner::adjustRecipesForReductions(
  8003. VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
  8004. ElementCount MinVF) {
  8005. for (const auto &Reduction : CM.getInLoopReductionChains()) {
  8006. PHINode *Phi = Reduction.first;
  8007. const RecurrenceDescriptor &RdxDesc =
  8008. Legal->getReductionVars().find(Phi)->second;
  8009. const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
  8010. if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
  8011. continue;
  8012. // ReductionOperations are orders top-down from the phi's use to the
  8013. // LoopExitValue. We keep a track of the previous item (the Chain) to tell
  8014. // which of the two operands will remain scalar and which will be reduced.
  8015. // For minmax the chain will be the select instructions.
  8016. Instruction *Chain = Phi;
  8017. for (Instruction *R : ReductionOperations) {
  8018. VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
  8019. RecurKind Kind = RdxDesc.getRecurrenceKind();
  8020. VPValue *ChainOp = Plan->getVPValue(Chain);
  8021. unsigned FirstOpId;
  8022. assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
  8023. "Only min/max recurrences allowed for inloop reductions");
  8024. // Recognize a call to the llvm.fmuladd intrinsic.
  8025. bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
  8026. assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
  8027. "Expected instruction to be a call to the llvm.fmuladd intrinsic");
  8028. if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
  8029. assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
  8030. "Expected to replace a VPWidenSelectSC");
  8031. FirstOpId = 1;
  8032. } else {
  8033. assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
  8034. (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
  8035. "Expected to replace a VPWidenSC");
  8036. FirstOpId = 0;
  8037. }
  8038. unsigned VecOpId =
  8039. R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
  8040. VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
  8041. VPValue *CondOp = nullptr;
  8042. if (CM.blockNeedsPredicationForAnyReason(R->getParent())) {
  8043. VPBuilder::InsertPointGuard Guard(Builder);
  8044. Builder.setInsertPoint(WidenRecipe->getParent(),
  8045. WidenRecipe->getIterator());
  8046. CondOp = RecipeBuilder.createBlockInMask(R->getParent(), Plan);
  8047. }
  8048. if (IsFMulAdd) {
  8049. // If the instruction is a call to the llvm.fmuladd intrinsic then we
  8050. // need to create an fmul recipe to use as the vector operand for the
  8051. // fadd reduction.
  8052. VPInstruction *FMulRecipe = new VPInstruction(
  8053. Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
  8054. FMulRecipe->setFastMathFlags(R->getFastMathFlags());
  8055. WidenRecipe->getParent()->insert(FMulRecipe,
  8056. WidenRecipe->getIterator());
  8057. VecOp = FMulRecipe;
  8058. }
  8059. VPReductionRecipe *RedRecipe =
  8060. new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
  8061. WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
  8062. Plan->removeVPValueFor(R);
  8063. Plan->addVPValue(R, RedRecipe);
  8064. // Append the recipe to the end of the VPBasicBlock because we need to
  8065. // ensure that it comes after all of it's inputs, including CondOp.
  8066. WidenRecipe->getParent()->appendRecipe(RedRecipe);
  8067. WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
  8068. WidenRecipe->eraseFromParent();
  8069. if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
  8070. VPRecipeBase *CompareRecipe =
  8071. RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
  8072. assert(isa<VPWidenRecipe>(CompareRecipe) &&
  8073. "Expected to replace a VPWidenSC");
  8074. assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
  8075. "Expected no remaining users");
  8076. CompareRecipe->eraseFromParent();
  8077. }
  8078. Chain = R;
  8079. }
  8080. }
  8081. // If tail is folded by masking, introduce selects between the phi
  8082. // and the live-out instruction of each reduction, at the beginning of the
  8083. // dedicated latch block.
  8084. if (CM.foldTailByMasking()) {
  8085. Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin());
  8086. for (VPRecipeBase &R :
  8087. Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
  8088. VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
  8089. if (!PhiR || PhiR->isInLoop())
  8090. continue;
  8091. VPValue *Cond =
  8092. RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
  8093. VPValue *Red = PhiR->getBackedgeValue();
  8094. assert(Red->getDefiningRecipe()->getParent() != LatchVPBB &&
  8095. "reduction recipe must be defined before latch");
  8096. Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
  8097. }
  8098. }
  8099. }
  8100. #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  8101. void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
  8102. VPSlotTracker &SlotTracker) const {
  8103. O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
  8104. IG->getInsertPos()->printAsOperand(O, false);
  8105. O << ", ";
  8106. getAddr()->printAsOperand(O, SlotTracker);
  8107. VPValue *Mask = getMask();
  8108. if (Mask) {
  8109. O << ", ";
  8110. Mask->printAsOperand(O, SlotTracker);
  8111. }
  8112. unsigned OpIdx = 0;
  8113. for (unsigned i = 0; i < IG->getFactor(); ++i) {
  8114. if (!IG->getMember(i))
  8115. continue;
  8116. if (getNumStoreOperands() > 0) {
  8117. O << "\n" << Indent << " store ";
  8118. getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
  8119. O << " to index " << i;
  8120. } else {
  8121. O << "\n" << Indent << " ";
  8122. getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
  8123. O << " = load from index " << i;
  8124. }
  8125. ++OpIdx;
  8126. }
  8127. }
  8128. #endif
  8129. void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
  8130. assert(!State.Instance && "Int or FP induction being replicated.");
  8131. Value *Start = getStartValue()->getLiveInIRValue();
  8132. const InductionDescriptor &ID = getInductionDescriptor();
  8133. TruncInst *Trunc = getTruncInst();
  8134. IRBuilderBase &Builder = State.Builder;
  8135. assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
  8136. assert(State.VF.isVector() && "must have vector VF");
  8137. // The value from the original loop to which we are mapping the new induction
  8138. // variable.
  8139. Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
  8140. // Fast-math-flags propagate from the original induction instruction.
  8141. IRBuilder<>::FastMathFlagGuard FMFG(Builder);
  8142. if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
  8143. Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
  8144. // Now do the actual transformations, and start with fetching the step value.
  8145. Value *Step = State.get(getStepValue(), VPIteration(0, 0));
  8146. assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
  8147. "Expected either an induction phi-node or a truncate of it!");
  8148. // Construct the initial value of the vector IV in the vector loop preheader
  8149. auto CurrIP = Builder.saveIP();
  8150. BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
  8151. Builder.SetInsertPoint(VectorPH->getTerminator());
  8152. if (isa<TruncInst>(EntryVal)) {
  8153. assert(Start->getType()->isIntegerTy() &&
  8154. "Truncation requires an integer type");
  8155. auto *TruncType = cast<IntegerType>(EntryVal->getType());
  8156. Step = Builder.CreateTrunc(Step, TruncType);
  8157. Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
  8158. }
  8159. Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
  8160. Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
  8161. Value *SteppedStart = getStepVector(
  8162. SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder);
  8163. // We create vector phi nodes for both integer and floating-point induction
  8164. // variables. Here, we determine the kind of arithmetic we will perform.
  8165. Instruction::BinaryOps AddOp;
  8166. Instruction::BinaryOps MulOp;
  8167. if (Step->getType()->isIntegerTy()) {
  8168. AddOp = Instruction::Add;
  8169. MulOp = Instruction::Mul;
  8170. } else {
  8171. AddOp = ID.getInductionOpcode();
  8172. MulOp = Instruction::FMul;
  8173. }
  8174. // Multiply the vectorization factor by the step using integer or
  8175. // floating-point arithmetic as appropriate.
  8176. Type *StepType = Step->getType();
  8177. Value *RuntimeVF;
  8178. if (Step->getType()->isFloatingPointTy())
  8179. RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
  8180. else
  8181. RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
  8182. Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
  8183. // Create a vector splat to use in the induction update.
  8184. //
  8185. // FIXME: If the step is non-constant, we create the vector splat with
  8186. // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
  8187. // handle a constant vector splat.
  8188. Value *SplatVF = isa<Constant>(Mul)
  8189. ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
  8190. : Builder.CreateVectorSplat(State.VF, Mul);
  8191. Builder.restoreIP(CurrIP);
  8192. // We may need to add the step a number of times, depending on the unroll
  8193. // factor. The last of those goes into the PHI.
  8194. PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
  8195. &*State.CFG.PrevBB->getFirstInsertionPt());
  8196. VecInd->setDebugLoc(EntryVal->getDebugLoc());
  8197. Instruction *LastInduction = VecInd;
  8198. for (unsigned Part = 0; Part < State.UF; ++Part) {
  8199. State.set(this, LastInduction, Part);
  8200. if (isa<TruncInst>(EntryVal))
  8201. State.addMetadata(LastInduction, EntryVal);
  8202. LastInduction = cast<Instruction>(
  8203. Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
  8204. LastInduction->setDebugLoc(EntryVal->getDebugLoc());
  8205. }
  8206. LastInduction->setName("vec.ind.next");
  8207. VecInd->addIncoming(SteppedStart, VectorPH);
  8208. // Add induction update using an incorrect block temporarily. The phi node
  8209. // will be fixed after VPlan execution. Note that at this point the latch
  8210. // block cannot be used, as it does not exist yet.
  8211. // TODO: Model increment value in VPlan, by turning the recipe into a
  8212. // multi-def and a subclass of VPHeaderPHIRecipe.
  8213. VecInd->addIncoming(LastInduction, VectorPH);
  8214. }
  8215. void VPWidenPointerInductionRecipe::execute(VPTransformState &State) {
  8216. assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction &&
  8217. "Not a pointer induction according to InductionDescriptor!");
  8218. assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() &&
  8219. "Unexpected type.");
  8220. auto *IVR = getParent()->getPlan()->getCanonicalIV();
  8221. PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
  8222. if (onlyScalarsGenerated(State.VF)) {
  8223. // This is the normalized GEP that starts counting at zero.
  8224. Value *PtrInd = State.Builder.CreateSExtOrTrunc(
  8225. CanonicalIV, IndDesc.getStep()->getType());
  8226. // Determine the number of scalars we need to generate for each unroll
  8227. // iteration. If the instruction is uniform, we only need to generate the
  8228. // first lane. Otherwise, we generate all VF values.
  8229. bool IsUniform = vputils::onlyFirstLaneUsed(this);
  8230. assert((IsUniform || !State.VF.isScalable()) &&
  8231. "Cannot scalarize a scalable VF");
  8232. unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
  8233. for (unsigned Part = 0; Part < State.UF; ++Part) {
  8234. Value *PartStart =
  8235. createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part);
  8236. for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
  8237. Value *Idx = State.Builder.CreateAdd(
  8238. PartStart, ConstantInt::get(PtrInd->getType(), Lane));
  8239. Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx);
  8240. Value *Step = State.get(getOperand(1), VPIteration(0, Part));
  8241. Value *SclrGep = emitTransformedIndex(
  8242. State.Builder, GlobalIdx, IndDesc.getStartValue(), Step, IndDesc);
  8243. SclrGep->setName("next.gep");
  8244. State.set(this, SclrGep, VPIteration(Part, Lane));
  8245. }
  8246. }
  8247. return;
  8248. }
  8249. assert(isa<SCEVConstant>(IndDesc.getStep()) &&
  8250. "Induction step not a SCEV constant!");
  8251. Type *PhiType = IndDesc.getStep()->getType();
  8252. // Build a pointer phi
  8253. Value *ScalarStartValue = getStartValue()->getLiveInIRValue();
  8254. Type *ScStValueType = ScalarStartValue->getType();
  8255. PHINode *NewPointerPhi =
  8256. PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
  8257. BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
  8258. NewPointerPhi->addIncoming(ScalarStartValue, VectorPH);
  8259. // A pointer induction, performed by using a gep
  8260. Instruction *InductionLoc = &*State.Builder.GetInsertPoint();
  8261. Value *ScalarStepValue = State.get(getOperand(1), VPIteration(0, 0));
  8262. Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF);
  8263. Value *NumUnrolledElems =
  8264. State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
  8265. Value *InductionGEP = GetElementPtrInst::Create(
  8266. IndDesc.getElementType(), NewPointerPhi,
  8267. State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
  8268. InductionLoc);
  8269. // Add induction update using an incorrect block temporarily. The phi node
  8270. // will be fixed after VPlan execution. Note that at this point the latch
  8271. // block cannot be used, as it does not exist yet.
  8272. // TODO: Model increment value in VPlan, by turning the recipe into a
  8273. // multi-def and a subclass of VPHeaderPHIRecipe.
  8274. NewPointerPhi->addIncoming(InductionGEP, VectorPH);
  8275. // Create UF many actual address geps that use the pointer
  8276. // phi as base and a vectorized version of the step value
  8277. // (<step*0, ..., step*N>) as offset.
  8278. for (unsigned Part = 0; Part < State.UF; ++Part) {
  8279. Type *VecPhiType = VectorType::get(PhiType, State.VF);
  8280. Value *StartOffsetScalar =
  8281. State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
  8282. Value *StartOffset =
  8283. State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
  8284. // Create a vector of consecutive numbers from zero to VF.
  8285. StartOffset = State.Builder.CreateAdd(
  8286. StartOffset, State.Builder.CreateStepVector(VecPhiType));
  8287. assert(ScalarStepValue == State.get(getOperand(1), VPIteration(0, Part)) &&
  8288. "scalar step must be the same across all parts");
  8289. Value *GEP = State.Builder.CreateGEP(
  8290. IndDesc.getElementType(), NewPointerPhi,
  8291. State.Builder.CreateMul(
  8292. StartOffset,
  8293. State.Builder.CreateVectorSplat(State.VF, ScalarStepValue),
  8294. "vector.gep"));
  8295. State.set(this, GEP, Part);
  8296. }
  8297. }
  8298. void VPDerivedIVRecipe::execute(VPTransformState &State) {
  8299. assert(!State.Instance && "VPDerivedIVRecipe being replicated.");
  8300. // Fast-math-flags propagate from the original induction instruction.
  8301. IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
  8302. if (IndDesc.getInductionBinOp() &&
  8303. isa<FPMathOperator>(IndDesc.getInductionBinOp()))
  8304. State.Builder.setFastMathFlags(
  8305. IndDesc.getInductionBinOp()->getFastMathFlags());
  8306. Value *Step = State.get(getStepValue(), VPIteration(0, 0));
  8307. Value *CanonicalIV = State.get(getCanonicalIV(), VPIteration(0, 0));
  8308. Value *DerivedIV =
  8309. emitTransformedIndex(State.Builder, CanonicalIV,
  8310. getStartValue()->getLiveInIRValue(), Step, IndDesc);
  8311. DerivedIV->setName("offset.idx");
  8312. if (ResultTy != DerivedIV->getType()) {
  8313. assert(Step->getType()->isIntegerTy() &&
  8314. "Truncation requires an integer step");
  8315. DerivedIV = State.Builder.CreateTrunc(DerivedIV, ResultTy);
  8316. }
  8317. assert(DerivedIV != CanonicalIV && "IV didn't need transforming?");
  8318. State.set(this, DerivedIV, VPIteration(0, 0));
  8319. }
  8320. void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
  8321. // Fast-math-flags propagate from the original induction instruction.
  8322. IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
  8323. if (IndDesc.getInductionBinOp() &&
  8324. isa<FPMathOperator>(IndDesc.getInductionBinOp()))
  8325. State.Builder.setFastMathFlags(
  8326. IndDesc.getInductionBinOp()->getFastMathFlags());
  8327. Value *BaseIV = State.get(getOperand(0), VPIteration(0, 0));
  8328. Value *Step = State.get(getStepValue(), VPIteration(0, 0));
  8329. buildScalarSteps(BaseIV, Step, IndDesc, this, State);
  8330. }
  8331. void VPInterleaveRecipe::execute(VPTransformState &State) {
  8332. assert(!State.Instance && "Interleave group being replicated.");
  8333. State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
  8334. getStoredValues(), getMask());
  8335. }
  8336. void VPReductionRecipe::execute(VPTransformState &State) {
  8337. assert(!State.Instance && "Reduction being replicated.");
  8338. Value *PrevInChain = State.get(getChainOp(), 0);
  8339. RecurKind Kind = RdxDesc->getRecurrenceKind();
  8340. bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
  8341. // Propagate the fast-math flags carried by the underlying instruction.
  8342. IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
  8343. State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
  8344. for (unsigned Part = 0; Part < State.UF; ++Part) {
  8345. Value *NewVecOp = State.get(getVecOp(), Part);
  8346. if (VPValue *Cond = getCondOp()) {
  8347. Value *NewCond = State.get(Cond, Part);
  8348. VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
  8349. Value *Iden = RdxDesc->getRecurrenceIdentity(
  8350. Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
  8351. Value *IdenVec =
  8352. State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
  8353. Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
  8354. NewVecOp = Select;
  8355. }
  8356. Value *NewRed;
  8357. Value *NextInChain;
  8358. if (IsOrdered) {
  8359. if (State.VF.isVector())
  8360. NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
  8361. PrevInChain);
  8362. else
  8363. NewRed = State.Builder.CreateBinOp(
  8364. (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
  8365. NewVecOp);
  8366. PrevInChain = NewRed;
  8367. } else {
  8368. PrevInChain = State.get(getChainOp(), Part);
  8369. NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
  8370. }
  8371. if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
  8372. NextInChain =
  8373. createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
  8374. NewRed, PrevInChain);
  8375. } else if (IsOrdered)
  8376. NextInChain = NewRed;
  8377. else
  8378. NextInChain = State.Builder.CreateBinOp(
  8379. (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
  8380. PrevInChain);
  8381. State.set(this, NextInChain, Part);
  8382. }
  8383. }
  8384. void VPReplicateRecipe::execute(VPTransformState &State) {
  8385. Instruction *UI = getUnderlyingInstr();
  8386. if (State.Instance) { // Generate a single instance.
  8387. assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
  8388. State.ILV->scalarizeInstruction(UI, this, *State.Instance,
  8389. IsPredicated, State);
  8390. // Insert scalar instance packing it into a vector.
  8391. if (AlsoPack && State.VF.isVector()) {
  8392. // If we're constructing lane 0, initialize to start from poison.
  8393. if (State.Instance->Lane.isFirstLane()) {
  8394. assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
  8395. Value *Poison = PoisonValue::get(
  8396. VectorType::get(UI->getType(), State.VF));
  8397. State.set(this, Poison, State.Instance->Part);
  8398. }
  8399. State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
  8400. }
  8401. return;
  8402. }
  8403. if (IsUniform) {
  8404. // If the recipe is uniform across all parts (instead of just per VF), only
  8405. // generate a single instance.
  8406. if ((isa<LoadInst>(UI) || isa<StoreInst>(UI)) &&
  8407. all_of(operands(), [](VPValue *Op) {
  8408. return Op->isDefinedOutsideVectorRegions();
  8409. })) {
  8410. State.ILV->scalarizeInstruction(UI, this, VPIteration(0, 0), IsPredicated,
  8411. State);
  8412. if (user_begin() != user_end()) {
  8413. for (unsigned Part = 1; Part < State.UF; ++Part)
  8414. State.set(this, State.get(this, VPIteration(0, 0)),
  8415. VPIteration(Part, 0));
  8416. }
  8417. return;
  8418. }
  8419. // Uniform within VL means we need to generate lane 0 only for each
  8420. // unrolled copy.
  8421. for (unsigned Part = 0; Part < State.UF; ++Part)
  8422. State.ILV->scalarizeInstruction(UI, this, VPIteration(Part, 0),
  8423. IsPredicated, State);
  8424. return;
  8425. }
  8426. // A store of a loop varying value to a loop invariant address only
  8427. // needs only the last copy of the store.
  8428. if (isa<StoreInst>(UI) && !getOperand(1)->hasDefiningRecipe()) {
  8429. auto Lane = VPLane::getLastLaneForVF(State.VF);
  8430. State.ILV->scalarizeInstruction(UI, this, VPIteration(State.UF - 1, Lane), IsPredicated,
  8431. State);
  8432. return;
  8433. }
  8434. // Generate scalar instances for all VF lanes of all UF parts.
  8435. assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
  8436. const unsigned EndLane = State.VF.getKnownMinValue();
  8437. for (unsigned Part = 0; Part < State.UF; ++Part)
  8438. for (unsigned Lane = 0; Lane < EndLane; ++Lane)
  8439. State.ILV->scalarizeInstruction(UI, this, VPIteration(Part, Lane),
  8440. IsPredicated, State);
  8441. }
  8442. void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
  8443. VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
  8444. // Attempt to issue a wide load.
  8445. LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
  8446. StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
  8447. assert((LI || SI) && "Invalid Load/Store instruction");
  8448. assert((!SI || StoredValue) && "No stored value provided for widened store");
  8449. assert((!LI || !StoredValue) && "Stored value provided for widened load");
  8450. Type *ScalarDataTy = getLoadStoreType(&Ingredient);
  8451. auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
  8452. const Align Alignment = getLoadStoreAlignment(&Ingredient);
  8453. bool CreateGatherScatter = !Consecutive;
  8454. auto &Builder = State.Builder;
  8455. InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
  8456. bool isMaskRequired = getMask();
  8457. if (isMaskRequired)
  8458. for (unsigned Part = 0; Part < State.UF; ++Part)
  8459. BlockInMaskParts[Part] = State.get(getMask(), Part);
  8460. const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
  8461. // Calculate the pointer for the specific unroll-part.
  8462. GetElementPtrInst *PartPtr = nullptr;
  8463. bool InBounds = false;
  8464. if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
  8465. InBounds = gep->isInBounds();
  8466. if (Reverse) {
  8467. // If the address is consecutive but reversed, then the
  8468. // wide store needs to start at the last vector element.
  8469. // RunTimeVF = VScale * VF.getKnownMinValue()
  8470. // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
  8471. Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
  8472. // NumElt = -Part * RunTimeVF
  8473. Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
  8474. // LastLane = 1 - RunTimeVF
  8475. Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
  8476. PartPtr =
  8477. cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
  8478. PartPtr->setIsInBounds(InBounds);
  8479. PartPtr = cast<GetElementPtrInst>(
  8480. Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
  8481. PartPtr->setIsInBounds(InBounds);
  8482. if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
  8483. BlockInMaskParts[Part] =
  8484. Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
  8485. } else {
  8486. Value *Increment =
  8487. createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
  8488. PartPtr = cast<GetElementPtrInst>(
  8489. Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
  8490. PartPtr->setIsInBounds(InBounds);
  8491. }
  8492. unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
  8493. return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
  8494. };
  8495. // Handle Stores:
  8496. if (SI) {
  8497. State.setDebugLocFromInst(SI);
  8498. for (unsigned Part = 0; Part < State.UF; ++Part) {
  8499. Instruction *NewSI = nullptr;
  8500. Value *StoredVal = State.get(StoredValue, Part);
  8501. if (CreateGatherScatter) {
  8502. Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
  8503. Value *VectorGep = State.get(getAddr(), Part);
  8504. NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
  8505. MaskPart);
  8506. } else {
  8507. if (Reverse) {
  8508. // If we store to reverse consecutive memory locations, then we need
  8509. // to reverse the order of elements in the stored value.
  8510. StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
  8511. // We don't want to update the value in the map as it might be used in
  8512. // another expression. So don't call resetVectorValue(StoredVal).
  8513. }
  8514. auto *VecPtr =
  8515. CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
  8516. if (isMaskRequired)
  8517. NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
  8518. BlockInMaskParts[Part]);
  8519. else
  8520. NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
  8521. }
  8522. State.addMetadata(NewSI, SI);
  8523. }
  8524. return;
  8525. }
  8526. // Handle loads.
  8527. assert(LI && "Must have a load instruction");
  8528. State.setDebugLocFromInst(LI);
  8529. for (unsigned Part = 0; Part < State.UF; ++Part) {
  8530. Value *NewLI;
  8531. if (CreateGatherScatter) {
  8532. Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
  8533. Value *VectorGep = State.get(getAddr(), Part);
  8534. NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
  8535. nullptr, "wide.masked.gather");
  8536. State.addMetadata(NewLI, LI);
  8537. } else {
  8538. auto *VecPtr =
  8539. CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
  8540. if (isMaskRequired)
  8541. NewLI = Builder.CreateMaskedLoad(
  8542. DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
  8543. PoisonValue::get(DataTy), "wide.masked.load");
  8544. else
  8545. NewLI =
  8546. Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
  8547. // Add metadata to the load, but setVectorValue to the reverse shuffle.
  8548. State.addMetadata(NewLI, LI);
  8549. if (Reverse)
  8550. NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
  8551. }
  8552. State.set(getVPSingleValue(), NewLI, Part);
  8553. }
  8554. }
  8555. // Determine how to lower the scalar epilogue, which depends on 1) optimising
  8556. // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
  8557. // predication, and 4) a TTI hook that analyses whether the loop is suitable
  8558. // for predication.
  8559. static ScalarEpilogueLowering getScalarEpilogueLowering(
  8560. Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
  8561. BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
  8562. AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
  8563. LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI) {
  8564. // 1) OptSize takes precedence over all other options, i.e. if this is set,
  8565. // don't look at hints or options, and don't request a scalar epilogue.
  8566. // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
  8567. // LoopAccessInfo (due to code dependency and not being able to reliably get
  8568. // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
  8569. // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
  8570. // versioning when the vectorization is forced, unlike hasOptSize. So revert
  8571. // back to the old way and vectorize with versioning when forced. See D81345.)
  8572. if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
  8573. PGSOQueryType::IRPass) &&
  8574. Hints.getForce() != LoopVectorizeHints::FK_Enabled))
  8575. return CM_ScalarEpilogueNotAllowedOptSize;
  8576. // 2) If set, obey the directives
  8577. if (PreferPredicateOverEpilogue.getNumOccurrences()) {
  8578. switch (PreferPredicateOverEpilogue) {
  8579. case PreferPredicateTy::ScalarEpilogue:
  8580. return CM_ScalarEpilogueAllowed;
  8581. case PreferPredicateTy::PredicateElseScalarEpilogue:
  8582. return CM_ScalarEpilogueNotNeededUsePredicate;
  8583. case PreferPredicateTy::PredicateOrDontVectorize:
  8584. return CM_ScalarEpilogueNotAllowedUsePredicate;
  8585. };
  8586. }
  8587. // 3) If set, obey the hints
  8588. switch (Hints.getPredicate()) {
  8589. case LoopVectorizeHints::FK_Enabled:
  8590. return CM_ScalarEpilogueNotNeededUsePredicate;
  8591. case LoopVectorizeHints::FK_Disabled:
  8592. return CM_ScalarEpilogueAllowed;
  8593. };
  8594. // 4) if the TTI hook indicates this is profitable, request predication.
  8595. if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, &LVL, IAI))
  8596. return CM_ScalarEpilogueNotNeededUsePredicate;
  8597. return CM_ScalarEpilogueAllowed;
  8598. }
  8599. Value *VPTransformState::get(VPValue *Def, unsigned Part) {
  8600. // If Values have been set for this Def return the one relevant for \p Part.
  8601. if (hasVectorValue(Def, Part))
  8602. return Data.PerPartOutput[Def][Part];
  8603. if (!hasScalarValue(Def, {Part, 0})) {
  8604. Value *IRV = Def->getLiveInIRValue();
  8605. Value *B = ILV->getBroadcastInstrs(IRV);
  8606. set(Def, B, Part);
  8607. return B;
  8608. }
  8609. Value *ScalarValue = get(Def, {Part, 0});
  8610. // If we aren't vectorizing, we can just copy the scalar map values over
  8611. // to the vector map.
  8612. if (VF.isScalar()) {
  8613. set(Def, ScalarValue, Part);
  8614. return ScalarValue;
  8615. }
  8616. bool IsUniform = vputils::isUniformAfterVectorization(Def);
  8617. unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
  8618. // Check if there is a scalar value for the selected lane.
  8619. if (!hasScalarValue(Def, {Part, LastLane})) {
  8620. // At the moment, VPWidenIntOrFpInductionRecipes and VPScalarIVStepsRecipes can also be uniform.
  8621. assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDefiningRecipe()) ||
  8622. isa<VPScalarIVStepsRecipe>(Def->getDefiningRecipe())) &&
  8623. "unexpected recipe found to be invariant");
  8624. IsUniform = true;
  8625. LastLane = 0;
  8626. }
  8627. auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
  8628. // Set the insert point after the last scalarized instruction or after the
  8629. // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
  8630. // will directly follow the scalar definitions.
  8631. auto OldIP = Builder.saveIP();
  8632. auto NewIP =
  8633. isa<PHINode>(LastInst)
  8634. ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
  8635. : std::next(BasicBlock::iterator(LastInst));
  8636. Builder.SetInsertPoint(&*NewIP);
  8637. // However, if we are vectorizing, we need to construct the vector values.
  8638. // If the value is known to be uniform after vectorization, we can just
  8639. // broadcast the scalar value corresponding to lane zero for each unroll
  8640. // iteration. Otherwise, we construct the vector values using
  8641. // insertelement instructions. Since the resulting vectors are stored in
  8642. // State, we will only generate the insertelements once.
  8643. Value *VectorValue = nullptr;
  8644. if (IsUniform) {
  8645. VectorValue = ILV->getBroadcastInstrs(ScalarValue);
  8646. set(Def, VectorValue, Part);
  8647. } else {
  8648. // Initialize packing with insertelements to start from undef.
  8649. assert(!VF.isScalable() && "VF is assumed to be non scalable.");
  8650. Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
  8651. set(Def, Undef, Part);
  8652. for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
  8653. ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
  8654. VectorValue = get(Def, Part);
  8655. }
  8656. Builder.restoreIP(OldIP);
  8657. return VectorValue;
  8658. }
  8659. // Process the loop in the VPlan-native vectorization path. This path builds
  8660. // VPlan upfront in the vectorization pipeline, which allows to apply
  8661. // VPlan-to-VPlan transformations from the very beginning without modifying the
  8662. // input LLVM IR.
  8663. static bool processLoopInVPlanNativePath(
  8664. Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
  8665. LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
  8666. TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
  8667. OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
  8668. ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
  8669. LoopVectorizationRequirements &Requirements) {
  8670. if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
  8671. LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
  8672. return false;
  8673. }
  8674. assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
  8675. Function *F = L->getHeader()->getParent();
  8676. InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
  8677. ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
  8678. F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL, &IAI);
  8679. LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
  8680. &Hints, IAI);
  8681. // Use the planner for outer loop vectorization.
  8682. // TODO: CM is not used at this point inside the planner. Turn CM into an
  8683. // optional argument if we don't need it in the future.
  8684. LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, ORE);
  8685. // Get user vectorization factor.
  8686. ElementCount UserVF = Hints.getWidth();
  8687. CM.collectElementTypesForWidening();
  8688. // Plan how to best vectorize, return the best VF and its cost.
  8689. const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
  8690. // If we are stress testing VPlan builds, do not attempt to generate vector
  8691. // code. Masked vector code generation support will follow soon.
  8692. // Also, do not attempt to vectorize if no vector code will be produced.
  8693. if (VPlanBuildStressTest || VectorizationFactor::Disabled() == VF)
  8694. return false;
  8695. VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
  8696. {
  8697. GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, TTI,
  8698. F->getParent()->getDataLayout());
  8699. InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width,
  8700. VF.Width, 1, LVL, &CM, BFI, PSI, Checks);
  8701. LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
  8702. << L->getHeader()->getParent()->getName() << "\"\n");
  8703. LVP.executePlan(VF.Width, 1, BestPlan, LB, DT, false);
  8704. }
  8705. // Mark the loop as already vectorized to avoid vectorizing again.
  8706. Hints.setAlreadyVectorized();
  8707. assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
  8708. return true;
  8709. }
  8710. // Emit a remark if there are stores to floats that required a floating point
  8711. // extension. If the vectorized loop was generated with floating point there
  8712. // will be a performance penalty from the conversion overhead and the change in
  8713. // the vector width.
  8714. static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
  8715. SmallVector<Instruction *, 4> Worklist;
  8716. for (BasicBlock *BB : L->getBlocks()) {
  8717. for (Instruction &Inst : *BB) {
  8718. if (auto *S = dyn_cast<StoreInst>(&Inst)) {
  8719. if (S->getValueOperand()->getType()->isFloatTy())
  8720. Worklist.push_back(S);
  8721. }
  8722. }
  8723. }
  8724. // Traverse the floating point stores upwards searching, for floating point
  8725. // conversions.
  8726. SmallPtrSet<const Instruction *, 4> Visited;
  8727. SmallPtrSet<const Instruction *, 4> EmittedRemark;
  8728. while (!Worklist.empty()) {
  8729. auto *I = Worklist.pop_back_val();
  8730. if (!L->contains(I))
  8731. continue;
  8732. if (!Visited.insert(I).second)
  8733. continue;
  8734. // Emit a remark if the floating point store required a floating
  8735. // point conversion.
  8736. // TODO: More work could be done to identify the root cause such as a
  8737. // constant or a function return type and point the user to it.
  8738. if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
  8739. ORE->emit([&]() {
  8740. return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
  8741. I->getDebugLoc(), L->getHeader())
  8742. << "floating point conversion changes vector width. "
  8743. << "Mixed floating point precision requires an up/down "
  8744. << "cast that will negatively impact performance.";
  8745. });
  8746. for (Use &Op : I->operands())
  8747. if (auto *OpI = dyn_cast<Instruction>(Op))
  8748. Worklist.push_back(OpI);
  8749. }
  8750. }
  8751. static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks,
  8752. VectorizationFactor &VF,
  8753. std::optional<unsigned> VScale, Loop *L,
  8754. ScalarEvolution &SE) {
  8755. InstructionCost CheckCost = Checks.getCost();
  8756. if (!CheckCost.isValid())
  8757. return false;
  8758. // When interleaving only scalar and vector cost will be equal, which in turn
  8759. // would lead to a divide by 0. Fall back to hard threshold.
  8760. if (VF.Width.isScalar()) {
  8761. if (CheckCost > VectorizeMemoryCheckThreshold) {
  8762. LLVM_DEBUG(
  8763. dbgs()
  8764. << "LV: Interleaving only is not profitable due to runtime checks\n");
  8765. return false;
  8766. }
  8767. return true;
  8768. }
  8769. // The scalar cost should only be 0 when vectorizing with a user specified VF/IC. In those cases, runtime checks should always be generated.
  8770. double ScalarC = *VF.ScalarCost.getValue();
  8771. if (ScalarC == 0)
  8772. return true;
  8773. // First, compute the minimum iteration count required so that the vector
  8774. // loop outperforms the scalar loop.
  8775. // The total cost of the scalar loop is
  8776. // ScalarC * TC
  8777. // where
  8778. // * TC is the actual trip count of the loop.
  8779. // * ScalarC is the cost of a single scalar iteration.
  8780. //
  8781. // The total cost of the vector loop is
  8782. // RtC + VecC * (TC / VF) + EpiC
  8783. // where
  8784. // * RtC is the cost of the generated runtime checks
  8785. // * VecC is the cost of a single vector iteration.
  8786. // * TC is the actual trip count of the loop
  8787. // * VF is the vectorization factor
  8788. // * EpiCost is the cost of the generated epilogue, including the cost
  8789. // of the remaining scalar operations.
  8790. //
  8791. // Vectorization is profitable once the total vector cost is less than the
  8792. // total scalar cost:
  8793. // RtC + VecC * (TC / VF) + EpiC < ScalarC * TC
  8794. //
  8795. // Now we can compute the minimum required trip count TC as
  8796. // (RtC + EpiC) / (ScalarC - (VecC / VF)) < TC
  8797. //
  8798. // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
  8799. // the computations are performed on doubles, not integers and the result
  8800. // is rounded up, hence we get an upper estimate of the TC.
  8801. unsigned IntVF = VF.Width.getKnownMinValue();
  8802. if (VF.Width.isScalable()) {
  8803. unsigned AssumedMinimumVscale = 1;
  8804. if (VScale)
  8805. AssumedMinimumVscale = *VScale;
  8806. IntVF *= AssumedMinimumVscale;
  8807. }
  8808. double VecCOverVF = double(*VF.Cost.getValue()) / IntVF;
  8809. double RtC = *CheckCost.getValue();
  8810. double MinTC1 = RtC / (ScalarC - VecCOverVF);
  8811. // Second, compute a minimum iteration count so that the cost of the
  8812. // runtime checks is only a fraction of the total scalar loop cost. This
  8813. // adds a loop-dependent bound on the overhead incurred if the runtime
  8814. // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC
  8815. // * TC. To bound the runtime check to be a fraction 1/X of the scalar
  8816. // cost, compute
  8817. // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC
  8818. double MinTC2 = RtC * 10 / ScalarC;
  8819. // Now pick the larger minimum. If it is not a multiple of VF, choose the
  8820. // next closest multiple of VF. This should partly compensate for ignoring
  8821. // the epilogue cost.
  8822. uint64_t MinTC = std::ceil(std::max(MinTC1, MinTC2));
  8823. VF.MinProfitableTripCount = ElementCount::getFixed(alignTo(MinTC, IntVF));
  8824. LLVM_DEBUG(
  8825. dbgs() << "LV: Minimum required TC for runtime checks to be profitable:"
  8826. << VF.MinProfitableTripCount << "\n");
  8827. // Skip vectorization if the expected trip count is less than the minimum
  8828. // required trip count.
  8829. if (auto ExpectedTC = getSmallBestKnownTC(SE, L)) {
  8830. if (ElementCount::isKnownLT(ElementCount::getFixed(*ExpectedTC),
  8831. VF.MinProfitableTripCount)) {
  8832. LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
  8833. "trip count < minimum profitable VF ("
  8834. << *ExpectedTC << " < " << VF.MinProfitableTripCount
  8835. << ")\n");
  8836. return false;
  8837. }
  8838. }
  8839. return true;
  8840. }
  8841. LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
  8842. : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
  8843. !EnableLoopInterleaving),
  8844. VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
  8845. !EnableLoopVectorization) {}
  8846. bool LoopVectorizePass::processLoop(Loop *L) {
  8847. assert((EnableVPlanNativePath || L->isInnermost()) &&
  8848. "VPlan-native path is not enabled. Only process inner loops.");
  8849. #ifndef NDEBUG
  8850. const std::string DebugLocStr = getDebugLocString(L);
  8851. #endif /* NDEBUG */
  8852. LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
  8853. << L->getHeader()->getParent()->getName() << "' from "
  8854. << DebugLocStr << "\n");
  8855. LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
  8856. LLVM_DEBUG(
  8857. dbgs() << "LV: Loop hints:"
  8858. << " force="
  8859. << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
  8860. ? "disabled"
  8861. : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
  8862. ? "enabled"
  8863. : "?"))
  8864. << " width=" << Hints.getWidth()
  8865. << " interleave=" << Hints.getInterleave() << "\n");
  8866. // Function containing loop
  8867. Function *F = L->getHeader()->getParent();
  8868. // Looking at the diagnostic output is the only way to determine if a loop
  8869. // was vectorized (other than looking at the IR or machine code), so it
  8870. // is important to generate an optimization remark for each loop. Most of
  8871. // these messages are generated as OptimizationRemarkAnalysis. Remarks
  8872. // generated as OptimizationRemark and OptimizationRemarkMissed are
  8873. // less verbose reporting vectorized loops and unvectorized loops that may
  8874. // benefit from vectorization, respectively.
  8875. if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
  8876. LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
  8877. return false;
  8878. }
  8879. PredicatedScalarEvolution PSE(*SE, *L);
  8880. // Check if it is legal to vectorize the loop.
  8881. LoopVectorizationRequirements Requirements;
  8882. LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, F, *LAIs, LI, ORE,
  8883. &Requirements, &Hints, DB, AC, BFI, PSI);
  8884. if (!LVL.canVectorize(EnableVPlanNativePath)) {
  8885. LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
  8886. Hints.emitRemarkWithHints();
  8887. return false;
  8888. }
  8889. // Entrance to the VPlan-native vectorization path. Outer loops are processed
  8890. // here. They may require CFG and instruction level transformations before
  8891. // even evaluating whether vectorization is profitable. Since we cannot modify
  8892. // the incoming IR, we need to build VPlan upfront in the vectorization
  8893. // pipeline.
  8894. if (!L->isInnermost())
  8895. return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
  8896. ORE, BFI, PSI, Hints, Requirements);
  8897. assert(L->isInnermost() && "Inner loop expected.");
  8898. InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
  8899. bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
  8900. // If an override option has been passed in for interleaved accesses, use it.
  8901. if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
  8902. UseInterleaved = EnableInterleavedMemAccesses;
  8903. // Analyze interleaved memory accesses.
  8904. if (UseInterleaved)
  8905. IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
  8906. // Check the function attributes and profiles to find out if this function
  8907. // should be optimized for size.
  8908. ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
  8909. F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL, &IAI);
  8910. // Check the loop for a trip count threshold: vectorize loops with a tiny trip
  8911. // count by optimizing for size, to minimize overheads.
  8912. auto ExpectedTC = getSmallBestKnownTC(*SE, L);
  8913. if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
  8914. LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
  8915. << "This loop is worth vectorizing only if no scalar "
  8916. << "iteration overheads are incurred.");
  8917. if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
  8918. LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
  8919. else {
  8920. if (*ExpectedTC > TTI->getMinTripCountTailFoldingThreshold()) {
  8921. LLVM_DEBUG(dbgs() << "\n");
  8922. SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
  8923. } else {
  8924. LLVM_DEBUG(dbgs() << " But the target considers the trip count too "
  8925. "small to consider vectorizing.\n");
  8926. reportVectorizationFailure(
  8927. "The trip count is below the minial threshold value.",
  8928. "loop trip count is too low, avoiding vectorization",
  8929. "LowTripCount", ORE, L);
  8930. Hints.emitRemarkWithHints();
  8931. return false;
  8932. }
  8933. }
  8934. }
  8935. // Check the function attributes to see if implicit floats or vectors are
  8936. // allowed.
  8937. if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
  8938. reportVectorizationFailure(
  8939. "Can't vectorize when the NoImplicitFloat attribute is used",
  8940. "loop not vectorized due to NoImplicitFloat attribute",
  8941. "NoImplicitFloat", ORE, L);
  8942. Hints.emitRemarkWithHints();
  8943. return false;
  8944. }
  8945. // Check if the target supports potentially unsafe FP vectorization.
  8946. // FIXME: Add a check for the type of safety issue (denormal, signaling)
  8947. // for the target we're vectorizing for, to make sure none of the
  8948. // additional fp-math flags can help.
  8949. if (Hints.isPotentiallyUnsafe() &&
  8950. TTI->isFPVectorizationPotentiallyUnsafe()) {
  8951. reportVectorizationFailure(
  8952. "Potentially unsafe FP op prevents vectorization",
  8953. "loop not vectorized due to unsafe FP support.",
  8954. "UnsafeFP", ORE, L);
  8955. Hints.emitRemarkWithHints();
  8956. return false;
  8957. }
  8958. bool AllowOrderedReductions;
  8959. // If the flag is set, use that instead and override the TTI behaviour.
  8960. if (ForceOrderedReductions.getNumOccurrences() > 0)
  8961. AllowOrderedReductions = ForceOrderedReductions;
  8962. else
  8963. AllowOrderedReductions = TTI->enableOrderedReductions();
  8964. if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
  8965. ORE->emit([&]() {
  8966. auto *ExactFPMathInst = Requirements.getExactFPInst();
  8967. return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
  8968. ExactFPMathInst->getDebugLoc(),
  8969. ExactFPMathInst->getParent())
  8970. << "loop not vectorized: cannot prove it is safe to reorder "
  8971. "floating-point operations";
  8972. });
  8973. LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
  8974. "reorder floating-point operations\n");
  8975. Hints.emitRemarkWithHints();
  8976. return false;
  8977. }
  8978. // Use the cost model.
  8979. LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
  8980. F, &Hints, IAI);
  8981. CM.collectValuesToIgnore();
  8982. CM.collectElementTypesForWidening();
  8983. // Use the planner for vectorization.
  8984. LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, ORE);
  8985. // Get user vectorization factor and interleave count.
  8986. ElementCount UserVF = Hints.getWidth();
  8987. unsigned UserIC = Hints.getInterleave();
  8988. // Plan how to best vectorize, return the best VF and its cost.
  8989. std::optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
  8990. VectorizationFactor VF = VectorizationFactor::Disabled();
  8991. unsigned IC = 1;
  8992. GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, TTI,
  8993. F->getParent()->getDataLayout());
  8994. if (MaybeVF) {
  8995. VF = *MaybeVF;
  8996. // Select the interleave count.
  8997. IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
  8998. unsigned SelectedIC = std::max(IC, UserIC);
  8999. // Optimistically generate runtime checks if they are needed. Drop them if
  9000. // they turn out to not be profitable.
  9001. if (VF.Width.isVector() || SelectedIC > 1)
  9002. Checks.Create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC);
  9003. // Check if it is profitable to vectorize with runtime checks.
  9004. bool ForceVectorization =
  9005. Hints.getForce() == LoopVectorizeHints::FK_Enabled;
  9006. if (!ForceVectorization &&
  9007. !areRuntimeChecksProfitable(Checks, VF, CM.getVScaleForTuning(), L,
  9008. *PSE.getSE())) {
  9009. ORE->emit([&]() {
  9010. return OptimizationRemarkAnalysisAliasing(
  9011. DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
  9012. L->getHeader())
  9013. << "loop not vectorized: cannot prove it is safe to reorder "
  9014. "memory operations";
  9015. });
  9016. LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
  9017. Hints.emitRemarkWithHints();
  9018. return false;
  9019. }
  9020. }
  9021. // Identify the diagnostic messages that should be produced.
  9022. std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
  9023. bool VectorizeLoop = true, InterleaveLoop = true;
  9024. if (VF.Width.isScalar()) {
  9025. LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
  9026. VecDiagMsg = std::make_pair(
  9027. "VectorizationNotBeneficial",
  9028. "the cost-model indicates that vectorization is not beneficial");
  9029. VectorizeLoop = false;
  9030. }
  9031. if (!MaybeVF && UserIC > 1) {
  9032. // Tell the user interleaving was avoided up-front, despite being explicitly
  9033. // requested.
  9034. LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
  9035. "interleaving should be avoided up front\n");
  9036. IntDiagMsg = std::make_pair(
  9037. "InterleavingAvoided",
  9038. "Ignoring UserIC, because interleaving was avoided up front");
  9039. InterleaveLoop = false;
  9040. } else if (IC == 1 && UserIC <= 1) {
  9041. // Tell the user interleaving is not beneficial.
  9042. LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
  9043. IntDiagMsg = std::make_pair(
  9044. "InterleavingNotBeneficial",
  9045. "the cost-model indicates that interleaving is not beneficial");
  9046. InterleaveLoop = false;
  9047. if (UserIC == 1) {
  9048. IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
  9049. IntDiagMsg.second +=
  9050. " and is explicitly disabled or interleave count is set to 1";
  9051. }
  9052. } else if (IC > 1 && UserIC == 1) {
  9053. // Tell the user interleaving is beneficial, but it explicitly disabled.
  9054. LLVM_DEBUG(
  9055. dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
  9056. IntDiagMsg = std::make_pair(
  9057. "InterleavingBeneficialButDisabled",
  9058. "the cost-model indicates that interleaving is beneficial "
  9059. "but is explicitly disabled or interleave count is set to 1");
  9060. InterleaveLoop = false;
  9061. }
  9062. // Override IC if user provided an interleave count.
  9063. IC = UserIC > 0 ? UserIC : IC;
  9064. // Emit diagnostic messages, if any.
  9065. const char *VAPassName = Hints.vectorizeAnalysisPassName();
  9066. if (!VectorizeLoop && !InterleaveLoop) {
  9067. // Do not vectorize or interleaving the loop.
  9068. ORE->emit([&]() {
  9069. return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
  9070. L->getStartLoc(), L->getHeader())
  9071. << VecDiagMsg.second;
  9072. });
  9073. ORE->emit([&]() {
  9074. return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
  9075. L->getStartLoc(), L->getHeader())
  9076. << IntDiagMsg.second;
  9077. });
  9078. return false;
  9079. } else if (!VectorizeLoop && InterleaveLoop) {
  9080. LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
  9081. ORE->emit([&]() {
  9082. return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
  9083. L->getStartLoc(), L->getHeader())
  9084. << VecDiagMsg.second;
  9085. });
  9086. } else if (VectorizeLoop && !InterleaveLoop) {
  9087. LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
  9088. << ") in " << DebugLocStr << '\n');
  9089. ORE->emit([&]() {
  9090. return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
  9091. L->getStartLoc(), L->getHeader())
  9092. << IntDiagMsg.second;
  9093. });
  9094. } else if (VectorizeLoop && InterleaveLoop) {
  9095. LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
  9096. << ") in " << DebugLocStr << '\n');
  9097. LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
  9098. }
  9099. bool DisableRuntimeUnroll = false;
  9100. MDNode *OrigLoopID = L->getLoopID();
  9101. {
  9102. using namespace ore;
  9103. if (!VectorizeLoop) {
  9104. assert(IC > 1 && "interleave count should not be 1 or 0");
  9105. // If we decided that it is not legal to vectorize the loop, then
  9106. // interleave it.
  9107. InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
  9108. &CM, BFI, PSI, Checks);
  9109. VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
  9110. LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT, false);
  9111. ORE->emit([&]() {
  9112. return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
  9113. L->getHeader())
  9114. << "interleaved loop (interleaved count: "
  9115. << NV("InterleaveCount", IC) << ")";
  9116. });
  9117. } else {
  9118. // If we decided that it is *legal* to vectorize the loop, then do it.
  9119. // Consider vectorizing the epilogue too if it's profitable.
  9120. VectorizationFactor EpilogueVF =
  9121. CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
  9122. if (EpilogueVF.Width.isVector()) {
  9123. // The first pass vectorizes the main loop and creates a scalar epilogue
  9124. // to be vectorized by executing the plan (potentially with a different
  9125. // factor) again shortly afterwards.
  9126. EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
  9127. EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
  9128. EPI, &LVL, &CM, BFI, PSI, Checks);
  9129. VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
  9130. LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
  9131. DT, true);
  9132. ++LoopsVectorized;
  9133. // Second pass vectorizes the epilogue and adjusts the control flow
  9134. // edges from the first pass.
  9135. EPI.MainLoopVF = EPI.EpilogueVF;
  9136. EPI.MainLoopUF = EPI.EpilogueUF;
  9137. EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
  9138. ORE, EPI, &LVL, &CM, BFI, PSI,
  9139. Checks);
  9140. VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
  9141. VPRegionBlock *VectorLoop = BestEpiPlan.getVectorLoopRegion();
  9142. VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
  9143. Header->setName("vec.epilog.vector.body");
  9144. // Ensure that the start values for any VPWidenIntOrFpInductionRecipe,
  9145. // VPWidenPointerInductionRecipe and VPReductionPHIRecipes are updated
  9146. // before vectorizing the epilogue loop.
  9147. for (VPRecipeBase &R : Header->phis()) {
  9148. if (isa<VPCanonicalIVPHIRecipe>(&R))
  9149. continue;
  9150. Value *ResumeV = nullptr;
  9151. // TODO: Move setting of resume values to prepareToExecute.
  9152. if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
  9153. ResumeV = MainILV.getReductionResumeValue(
  9154. ReductionPhi->getRecurrenceDescriptor());
  9155. } else {
  9156. // Create induction resume values for both widened pointer and
  9157. // integer/fp inductions and update the start value of the induction
  9158. // recipes to use the resume value.
  9159. PHINode *IndPhi = nullptr;
  9160. const InductionDescriptor *ID;
  9161. if (auto *Ind = dyn_cast<VPWidenPointerInductionRecipe>(&R)) {
  9162. IndPhi = cast<PHINode>(Ind->getUnderlyingValue());
  9163. ID = &Ind->getInductionDescriptor();
  9164. } else {
  9165. auto *WidenInd = cast<VPWidenIntOrFpInductionRecipe>(&R);
  9166. IndPhi = WidenInd->getPHINode();
  9167. ID = &WidenInd->getInductionDescriptor();
  9168. }
  9169. ResumeV = MainILV.createInductionResumeValue(
  9170. IndPhi, *ID, {EPI.MainLoopIterationCountCheck});
  9171. }
  9172. assert(ResumeV && "Must have a resume value");
  9173. VPValue *StartVal = BestEpiPlan.getOrAddExternalDef(ResumeV);
  9174. cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
  9175. }
  9176. LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
  9177. DT, true);
  9178. ++LoopsEpilogueVectorized;
  9179. if (!MainILV.areSafetyChecksAdded())
  9180. DisableRuntimeUnroll = true;
  9181. } else {
  9182. InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width,
  9183. VF.MinProfitableTripCount, IC, &LVL, &CM, BFI,
  9184. PSI, Checks);
  9185. VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
  9186. LVP.executePlan(VF.Width, IC, BestPlan, LB, DT, false);
  9187. ++LoopsVectorized;
  9188. // Add metadata to disable runtime unrolling a scalar loop when there
  9189. // are no runtime checks about strides and memory. A scalar loop that is
  9190. // rarely used is not worth unrolling.
  9191. if (!LB.areSafetyChecksAdded())
  9192. DisableRuntimeUnroll = true;
  9193. }
  9194. // Report the vectorization decision.
  9195. ORE->emit([&]() {
  9196. return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
  9197. L->getHeader())
  9198. << "vectorized loop (vectorization width: "
  9199. << NV("VectorizationFactor", VF.Width)
  9200. << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
  9201. });
  9202. }
  9203. if (ORE->allowExtraAnalysis(LV_NAME))
  9204. checkMixedPrecision(L, ORE);
  9205. }
  9206. std::optional<MDNode *> RemainderLoopID =
  9207. makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
  9208. LLVMLoopVectorizeFollowupEpilogue});
  9209. if (RemainderLoopID) {
  9210. L->setLoopID(*RemainderLoopID);
  9211. } else {
  9212. if (DisableRuntimeUnroll)
  9213. AddRuntimeUnrollDisableMetaData(L);
  9214. // Mark the loop as already vectorized to avoid vectorizing again.
  9215. Hints.setAlreadyVectorized();
  9216. }
  9217. assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
  9218. return true;
  9219. }
  9220. LoopVectorizeResult LoopVectorizePass::runImpl(
  9221. Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
  9222. DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
  9223. DemandedBits &DB_, AssumptionCache &AC_, LoopAccessInfoManager &LAIs_,
  9224. OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
  9225. SE = &SE_;
  9226. LI = &LI_;
  9227. TTI = &TTI_;
  9228. DT = &DT_;
  9229. BFI = &BFI_;
  9230. TLI = TLI_;
  9231. AC = &AC_;
  9232. LAIs = &LAIs_;
  9233. DB = &DB_;
  9234. ORE = &ORE_;
  9235. PSI = PSI_;
  9236. // Don't attempt if
  9237. // 1. the target claims to have no vector registers, and
  9238. // 2. interleaving won't help ILP.
  9239. //
  9240. // The second condition is necessary because, even if the target has no
  9241. // vector registers, loop vectorization may still enable scalar
  9242. // interleaving.
  9243. if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
  9244. TTI->getMaxInterleaveFactor(1) < 2)
  9245. return LoopVectorizeResult(false, false);
  9246. bool Changed = false, CFGChanged = false;
  9247. // The vectorizer requires loops to be in simplified form.
  9248. // Since simplification may add new inner loops, it has to run before the
  9249. // legality and profitability checks. This means running the loop vectorizer
  9250. // will simplify all loops, regardless of whether anything end up being
  9251. // vectorized.
  9252. for (const auto &L : *LI)
  9253. Changed |= CFGChanged |=
  9254. simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
  9255. // Build up a worklist of inner-loops to vectorize. This is necessary as
  9256. // the act of vectorizing or partially unrolling a loop creates new loops
  9257. // and can invalidate iterators across the loops.
  9258. SmallVector<Loop *, 8> Worklist;
  9259. for (Loop *L : *LI)
  9260. collectSupportedLoops(*L, LI, ORE, Worklist);
  9261. LoopsAnalyzed += Worklist.size();
  9262. // Now walk the identified inner loops.
  9263. while (!Worklist.empty()) {
  9264. Loop *L = Worklist.pop_back_val();
  9265. // For the inner loops we actually process, form LCSSA to simplify the
  9266. // transform.
  9267. Changed |= formLCSSARecursively(*L, *DT, LI, SE);
  9268. Changed |= CFGChanged |= processLoop(L);
  9269. if (Changed)
  9270. LAIs->clear();
  9271. }
  9272. // Process each loop nest in the function.
  9273. return LoopVectorizeResult(Changed, CFGChanged);
  9274. }
  9275. PreservedAnalyses LoopVectorizePass::run(Function &F,
  9276. FunctionAnalysisManager &AM) {
  9277. auto &LI = AM.getResult<LoopAnalysis>(F);
  9278. // There are no loops in the function. Return before computing other expensive
  9279. // analyses.
  9280. if (LI.empty())
  9281. return PreservedAnalyses::all();
  9282. auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
  9283. auto &TTI = AM.getResult<TargetIRAnalysis>(F);
  9284. auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
  9285. auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
  9286. auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
  9287. auto &AC = AM.getResult<AssumptionAnalysis>(F);
  9288. auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
  9289. auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
  9290. LoopAccessInfoManager &LAIs = AM.getResult<LoopAccessAnalysis>(F);
  9291. auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
  9292. ProfileSummaryInfo *PSI =
  9293. MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
  9294. LoopVectorizeResult Result =
  9295. runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AC, LAIs, ORE, PSI);
  9296. if (!Result.MadeAnyChange)
  9297. return PreservedAnalyses::all();
  9298. PreservedAnalyses PA;
  9299. // We currently do not preserve loopinfo/dominator analyses with outer loop
  9300. // vectorization. Until this is addressed, mark these analyses as preserved
  9301. // only for non-VPlan-native path.
  9302. // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
  9303. if (!EnableVPlanNativePath) {
  9304. PA.preserve<LoopAnalysis>();
  9305. PA.preserve<DominatorTreeAnalysis>();
  9306. }
  9307. if (Result.MadeCFGChange) {
  9308. // Making CFG changes likely means a loop got vectorized. Indicate that
  9309. // extra simplification passes should be run.
  9310. // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
  9311. // be run if runtime checks have been added.
  9312. AM.getResult<ShouldRunExtraVectorPasses>(F);
  9313. PA.preserve<ShouldRunExtraVectorPasses>();
  9314. } else {
  9315. PA.preserveSet<CFGAnalyses>();
  9316. }
  9317. return PA;
  9318. }
  9319. void LoopVectorizePass::printPipeline(
  9320. raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
  9321. static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
  9322. OS, MapClassName2PassName);
  9323. OS << "<";
  9324. OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
  9325. OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
  9326. OS << ">";
  9327. }