12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685 |
- //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- //
- // This provides a class for OpenMP runtime code generation.
- //
- //===----------------------------------------------------------------------===//
- #include "CGOpenMPRuntime.h"
- #include "CGCXXABI.h"
- #include "CGCleanup.h"
- #include "CGRecordLayout.h"
- #include "CodeGenFunction.h"
- #include "TargetInfo.h"
- #include "clang/AST/APValue.h"
- #include "clang/AST/Attr.h"
- #include "clang/AST/Decl.h"
- #include "clang/AST/OpenMPClause.h"
- #include "clang/AST/StmtOpenMP.h"
- #include "clang/AST/StmtVisitor.h"
- #include "clang/Basic/BitmaskEnum.h"
- #include "clang/Basic/FileManager.h"
- #include "clang/Basic/OpenMPKinds.h"
- #include "clang/Basic/SourceManager.h"
- #include "clang/CodeGen/ConstantInitBuilder.h"
- #include "llvm/ADT/ArrayRef.h"
- #include "llvm/ADT/SetOperations.h"
- #include "llvm/ADT/SmallBitVector.h"
- #include "llvm/ADT/StringExtras.h"
- #include "llvm/Bitcode/BitcodeReader.h"
- #include "llvm/IR/Constants.h"
- #include "llvm/IR/DerivedTypes.h"
- #include "llvm/IR/GlobalValue.h"
- #include "llvm/IR/InstrTypes.h"
- #include "llvm/IR/Value.h"
- #include "llvm/Support/AtomicOrdering.h"
- #include "llvm/Support/Format.h"
- #include "llvm/Support/raw_ostream.h"
- #include <cassert>
- #include <numeric>
- #include <optional>
- using namespace clang;
- using namespace CodeGen;
- using namespace llvm::omp;
- namespace {
- /// Base class for handling code generation inside OpenMP regions.
- class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
- public:
- /// Kinds of OpenMP regions used in codegen.
- enum CGOpenMPRegionKind {
- /// Region with outlined function for standalone 'parallel'
- /// directive.
- ParallelOutlinedRegion,
- /// Region with outlined function for standalone 'task' directive.
- TaskOutlinedRegion,
- /// Region for constructs that do not require function outlining,
- /// like 'for', 'sections', 'atomic' etc. directives.
- InlinedRegion,
- /// Region with outlined function for standalone 'target' directive.
- TargetRegion,
- };
- CGOpenMPRegionInfo(const CapturedStmt &CS,
- const CGOpenMPRegionKind RegionKind,
- const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
- bool HasCancel)
- : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
- CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
- CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
- const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
- bool HasCancel)
- : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
- Kind(Kind), HasCancel(HasCancel) {}
- /// Get a variable or parameter for storing global thread id
- /// inside OpenMP construct.
- virtual const VarDecl *getThreadIDVariable() const = 0;
- /// Emit the captured statement body.
- void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
- /// Get an LValue for the current ThreadID variable.
- /// \return LValue for thread id variable. This LValue always has type int32*.
- virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
- virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
- CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
- OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
- bool hasCancel() const { return HasCancel; }
- static bool classof(const CGCapturedStmtInfo *Info) {
- return Info->getKind() == CR_OpenMP;
- }
- ~CGOpenMPRegionInfo() override = default;
- protected:
- CGOpenMPRegionKind RegionKind;
- RegionCodeGenTy CodeGen;
- OpenMPDirectiveKind Kind;
- bool HasCancel;
- };
- /// API for captured statement code generation in OpenMP constructs.
- class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
- public:
- CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
- const RegionCodeGenTy &CodeGen,
- OpenMPDirectiveKind Kind, bool HasCancel,
- StringRef HelperName)
- : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
- HasCancel),
- ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
- assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
- }
- /// Get a variable or parameter for storing global thread id
- /// inside OpenMP construct.
- const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
- /// Get the name of the capture helper.
- StringRef getHelperName() const override { return HelperName; }
- static bool classof(const CGCapturedStmtInfo *Info) {
- return CGOpenMPRegionInfo::classof(Info) &&
- cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
- ParallelOutlinedRegion;
- }
- private:
- /// A variable or parameter storing global thread id for OpenMP
- /// constructs.
- const VarDecl *ThreadIDVar;
- StringRef HelperName;
- };
- /// API for captured statement code generation in OpenMP constructs.
- class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
- public:
- class UntiedTaskActionTy final : public PrePostActionTy {
- bool Untied;
- const VarDecl *PartIDVar;
- const RegionCodeGenTy UntiedCodeGen;
- llvm::SwitchInst *UntiedSwitch = nullptr;
- public:
- UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
- const RegionCodeGenTy &UntiedCodeGen)
- : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
- void Enter(CodeGenFunction &CGF) override {
- if (Untied) {
- // Emit task switching point.
- LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
- CGF.GetAddrOfLocalVar(PartIDVar),
- PartIDVar->getType()->castAs<PointerType>());
- llvm::Value *Res =
- CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
- llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
- UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
- CGF.EmitBlock(DoneBB);
- CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
- CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
- UntiedSwitch->addCase(CGF.Builder.getInt32(0),
- CGF.Builder.GetInsertBlock());
- emitUntiedSwitch(CGF);
- }
- }
- void emitUntiedSwitch(CodeGenFunction &CGF) const {
- if (Untied) {
- LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
- CGF.GetAddrOfLocalVar(PartIDVar),
- PartIDVar->getType()->castAs<PointerType>());
- CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
- PartIdLVal);
- UntiedCodeGen(CGF);
- CodeGenFunction::JumpDest CurPoint =
- CGF.getJumpDestInCurrentScope(".untied.next.");
- CGF.EmitBranch(CGF.ReturnBlock.getBlock());
- CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
- UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
- CGF.Builder.GetInsertBlock());
- CGF.EmitBranchThroughCleanup(CurPoint);
- CGF.EmitBlock(CurPoint.getBlock());
- }
- }
- unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
- };
- CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
- const VarDecl *ThreadIDVar,
- const RegionCodeGenTy &CodeGen,
- OpenMPDirectiveKind Kind, bool HasCancel,
- const UntiedTaskActionTy &Action)
- : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
- ThreadIDVar(ThreadIDVar), Action(Action) {
- assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
- }
- /// Get a variable or parameter for storing global thread id
- /// inside OpenMP construct.
- const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
- /// Get an LValue for the current ThreadID variable.
- LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
- /// Get the name of the capture helper.
- StringRef getHelperName() const override { return ".omp_outlined."; }
- void emitUntiedSwitch(CodeGenFunction &CGF) override {
- Action.emitUntiedSwitch(CGF);
- }
- static bool classof(const CGCapturedStmtInfo *Info) {
- return CGOpenMPRegionInfo::classof(Info) &&
- cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
- TaskOutlinedRegion;
- }
- private:
- /// A variable or parameter storing global thread id for OpenMP
- /// constructs.
- const VarDecl *ThreadIDVar;
- /// Action for emitting code for untied tasks.
- const UntiedTaskActionTy &Action;
- };
- /// API for inlined captured statement code generation in OpenMP
- /// constructs.
- class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
- public:
- CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
- const RegionCodeGenTy &CodeGen,
- OpenMPDirectiveKind Kind, bool HasCancel)
- : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
- OldCSI(OldCSI),
- OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
- // Retrieve the value of the context parameter.
- llvm::Value *getContextValue() const override {
- if (OuterRegionInfo)
- return OuterRegionInfo->getContextValue();
- llvm_unreachable("No context value for inlined OpenMP region");
- }
- void setContextValue(llvm::Value *V) override {
- if (OuterRegionInfo) {
- OuterRegionInfo->setContextValue(V);
- return;
- }
- llvm_unreachable("No context value for inlined OpenMP region");
- }
- /// Lookup the captured field decl for a variable.
- const FieldDecl *lookup(const VarDecl *VD) const override {
- if (OuterRegionInfo)
- return OuterRegionInfo->lookup(VD);
- // If there is no outer outlined region,no need to lookup in a list of
- // captured variables, we can use the original one.
- return nullptr;
- }
- FieldDecl *getThisFieldDecl() const override {
- if (OuterRegionInfo)
- return OuterRegionInfo->getThisFieldDecl();
- return nullptr;
- }
- /// Get a variable or parameter for storing global thread id
- /// inside OpenMP construct.
- const VarDecl *getThreadIDVariable() const override {
- if (OuterRegionInfo)
- return OuterRegionInfo->getThreadIDVariable();
- return nullptr;
- }
- /// Get an LValue for the current ThreadID variable.
- LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
- if (OuterRegionInfo)
- return OuterRegionInfo->getThreadIDVariableLValue(CGF);
- llvm_unreachable("No LValue for inlined OpenMP construct");
- }
- /// Get the name of the capture helper.
- StringRef getHelperName() const override {
- if (auto *OuterRegionInfo = getOldCSI())
- return OuterRegionInfo->getHelperName();
- llvm_unreachable("No helper name for inlined OpenMP construct");
- }
- void emitUntiedSwitch(CodeGenFunction &CGF) override {
- if (OuterRegionInfo)
- OuterRegionInfo->emitUntiedSwitch(CGF);
- }
- CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
- static bool classof(const CGCapturedStmtInfo *Info) {
- return CGOpenMPRegionInfo::classof(Info) &&
- cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
- }
- ~CGOpenMPInlinedRegionInfo() override = default;
- private:
- /// CodeGen info about outer OpenMP region.
- CodeGenFunction::CGCapturedStmtInfo *OldCSI;
- CGOpenMPRegionInfo *OuterRegionInfo;
- };
- /// API for captured statement code generation in OpenMP target
- /// constructs. For this captures, implicit parameters are used instead of the
- /// captured fields. The name of the target region has to be unique in a given
- /// application so it is provided by the client, because only the client has
- /// the information to generate that.
- class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
- public:
- CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
- const RegionCodeGenTy &CodeGen, StringRef HelperName)
- : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
- /*HasCancel=*/false),
- HelperName(HelperName) {}
- /// This is unused for target regions because each starts executing
- /// with a single thread.
- const VarDecl *getThreadIDVariable() const override { return nullptr; }
- /// Get the name of the capture helper.
- StringRef getHelperName() const override { return HelperName; }
- static bool classof(const CGCapturedStmtInfo *Info) {
- return CGOpenMPRegionInfo::classof(Info) &&
- cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
- }
- private:
- StringRef HelperName;
- };
- static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
- llvm_unreachable("No codegen for expressions");
- }
- /// API for generation of expressions captured in a innermost OpenMP
- /// region.
- class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
- public:
- CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
- : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
- OMPD_unknown,
- /*HasCancel=*/false),
- PrivScope(CGF) {
- // Make sure the globals captured in the provided statement are local by
- // using the privatization logic. We assume the same variable is not
- // captured more than once.
- for (const auto &C : CS.captures()) {
- if (!C.capturesVariable() && !C.capturesVariableByCopy())
- continue;
- const VarDecl *VD = C.getCapturedVar();
- if (VD->isLocalVarDeclOrParm())
- continue;
- DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
- /*RefersToEnclosingVariableOrCapture=*/false,
- VD->getType().getNonReferenceType(), VK_LValue,
- C.getLocation());
- PrivScope.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
- }
- (void)PrivScope.Privatize();
- }
- /// Lookup the captured field decl for a variable.
- const FieldDecl *lookup(const VarDecl *VD) const override {
- if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
- return FD;
- return nullptr;
- }
- /// Emit the captured statement body.
- void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
- llvm_unreachable("No body for expressions");
- }
- /// Get a variable or parameter for storing global thread id
- /// inside OpenMP construct.
- const VarDecl *getThreadIDVariable() const override {
- llvm_unreachable("No thread id for expressions");
- }
- /// Get the name of the capture helper.
- StringRef getHelperName() const override {
- llvm_unreachable("No helper name for expressions");
- }
- static bool classof(const CGCapturedStmtInfo *Info) { return false; }
- private:
- /// Private scope to capture global variables.
- CodeGenFunction::OMPPrivateScope PrivScope;
- };
- /// RAII for emitting code of OpenMP constructs.
- class InlinedOpenMPRegionRAII {
- CodeGenFunction &CGF;
- llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
- FieldDecl *LambdaThisCaptureField = nullptr;
- const CodeGen::CGBlockInfo *BlockInfo = nullptr;
- bool NoInheritance = false;
- public:
- /// Constructs region for combined constructs.
- /// \param CodeGen Code generation sequence for combined directives. Includes
- /// a list of functions used for code generation of implicitly inlined
- /// regions.
- InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
- OpenMPDirectiveKind Kind, bool HasCancel,
- bool NoInheritance = true)
- : CGF(CGF), NoInheritance(NoInheritance) {
- // Start emission for the construct.
- CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
- CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
- if (NoInheritance) {
- std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
- LambdaThisCaptureField = CGF.LambdaThisCaptureField;
- CGF.LambdaThisCaptureField = nullptr;
- BlockInfo = CGF.BlockInfo;
- CGF.BlockInfo = nullptr;
- }
- }
- ~InlinedOpenMPRegionRAII() {
- // Restore original CapturedStmtInfo only if we're done with code emission.
- auto *OldCSI =
- cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
- delete CGF.CapturedStmtInfo;
- CGF.CapturedStmtInfo = OldCSI;
- if (NoInheritance) {
- std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
- CGF.LambdaThisCaptureField = LambdaThisCaptureField;
- CGF.BlockInfo = BlockInfo;
- }
- }
- };
- /// Values for bit flags used in the ident_t to describe the fields.
- /// All enumeric elements are named and described in accordance with the code
- /// from https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
- enum OpenMPLocationFlags : unsigned {
- /// Use trampoline for internal microtask.
- OMP_IDENT_IMD = 0x01,
- /// Use c-style ident structure.
- OMP_IDENT_KMPC = 0x02,
- /// Atomic reduction option for kmpc_reduce.
- OMP_ATOMIC_REDUCE = 0x10,
- /// Explicit 'barrier' directive.
- OMP_IDENT_BARRIER_EXPL = 0x20,
- /// Implicit barrier in code.
- OMP_IDENT_BARRIER_IMPL = 0x40,
- /// Implicit barrier in 'for' directive.
- OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
- /// Implicit barrier in 'sections' directive.
- OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
- /// Implicit barrier in 'single' directive.
- OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
- /// Call of __kmp_for_static_init for static loop.
- OMP_IDENT_WORK_LOOP = 0x200,
- /// Call of __kmp_for_static_init for sections.
- OMP_IDENT_WORK_SECTIONS = 0x400,
- /// Call of __kmp_for_static_init for distribute.
- OMP_IDENT_WORK_DISTRIBUTE = 0x800,
- LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
- };
- namespace {
- LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
- /// Values for bit flags for marking which requires clauses have been used.
- enum OpenMPOffloadingRequiresDirFlags : int64_t {
- /// flag undefined.
- OMP_REQ_UNDEFINED = 0x000,
- /// no requires clause present.
- OMP_REQ_NONE = 0x001,
- /// reverse_offload clause.
- OMP_REQ_REVERSE_OFFLOAD = 0x002,
- /// unified_address clause.
- OMP_REQ_UNIFIED_ADDRESS = 0x004,
- /// unified_shared_memory clause.
- OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
- /// dynamic_allocators clause.
- OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
- LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)
- };
- enum OpenMPOffloadingReservedDeviceIDs {
- /// Device ID if the device was not defined, runtime should get it
- /// from environment variables in the spec.
- OMP_DEVICEID_UNDEF = -1,
- };
- } // anonymous namespace
- /// Describes ident structure that describes a source location.
- /// All descriptions are taken from
- /// https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
- /// Original structure:
- /// typedef struct ident {
- /// kmp_int32 reserved_1; /**< might be used in Fortran;
- /// see above */
- /// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
- /// KMP_IDENT_KMPC identifies this union
- /// member */
- /// kmp_int32 reserved_2; /**< not really used in Fortran any more;
- /// see above */
- ///#if USE_ITT_BUILD
- /// /* but currently used for storing
- /// region-specific ITT */
- /// /* contextual information. */
- ///#endif /* USE_ITT_BUILD */
- /// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
- /// C++ */
- /// char const *psource; /**< String describing the source location.
- /// The string is composed of semi-colon separated
- // fields which describe the source file,
- /// the function and a pair of line numbers that
- /// delimit the construct.
- /// */
- /// } ident_t;
- enum IdentFieldIndex {
- /// might be used in Fortran
- IdentField_Reserved_1,
- /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
- IdentField_Flags,
- /// Not really used in Fortran any more
- IdentField_Reserved_2,
- /// Source[4] in Fortran, do not use for C++
- IdentField_Reserved_3,
- /// String describing the source location. The string is composed of
- /// semi-colon separated fields which describe the source file, the function
- /// and a pair of line numbers that delimit the construct.
- IdentField_PSource
- };
- /// Schedule types for 'omp for' loops (these enumerators are taken from
- /// the enum sched_type in kmp.h).
- enum OpenMPSchedType {
- /// Lower bound for default (unordered) versions.
- OMP_sch_lower = 32,
- OMP_sch_static_chunked = 33,
- OMP_sch_static = 34,
- OMP_sch_dynamic_chunked = 35,
- OMP_sch_guided_chunked = 36,
- OMP_sch_runtime = 37,
- OMP_sch_auto = 38,
- /// static with chunk adjustment (e.g., simd)
- OMP_sch_static_balanced_chunked = 45,
- /// Lower bound for 'ordered' versions.
- OMP_ord_lower = 64,
- OMP_ord_static_chunked = 65,
- OMP_ord_static = 66,
- OMP_ord_dynamic_chunked = 67,
- OMP_ord_guided_chunked = 68,
- OMP_ord_runtime = 69,
- OMP_ord_auto = 70,
- OMP_sch_default = OMP_sch_static,
- /// dist_schedule types
- OMP_dist_sch_static_chunked = 91,
- OMP_dist_sch_static = 92,
- /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
- /// Set if the monotonic schedule modifier was present.
- OMP_sch_modifier_monotonic = (1 << 29),
- /// Set if the nonmonotonic schedule modifier was present.
- OMP_sch_modifier_nonmonotonic = (1 << 30),
- };
- /// A basic class for pre|post-action for advanced codegen sequence for OpenMP
- /// region.
- class CleanupTy final : public EHScopeStack::Cleanup {
- PrePostActionTy *Action;
- public:
- explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
- void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
- if (!CGF.HaveInsertPoint())
- return;
- Action->Exit(CGF);
- }
- };
- } // anonymous namespace
- void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
- CodeGenFunction::RunCleanupsScope Scope(CGF);
- if (PrePostAction) {
- CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
- Callback(CodeGen, CGF, *PrePostAction);
- } else {
- PrePostActionTy Action;
- Callback(CodeGen, CGF, Action);
- }
- }
- /// Check if the combiner is a call to UDR combiner and if it is so return the
- /// UDR decl used for reduction.
- static const OMPDeclareReductionDecl *
- getReductionInit(const Expr *ReductionOp) {
- if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
- if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
- if (const auto *DRE =
- dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
- if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
- return DRD;
- return nullptr;
- }
- static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
- const OMPDeclareReductionDecl *DRD,
- const Expr *InitOp,
- Address Private, Address Original,
- QualType Ty) {
- if (DRD->getInitializer()) {
- std::pair<llvm::Function *, llvm::Function *> Reduction =
- CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
- const auto *CE = cast<CallExpr>(InitOp);
- const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
- const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
- const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
- const auto *LHSDRE =
- cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
- const auto *RHSDRE =
- cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
- CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
- PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()), Private);
- PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()), Original);
- (void)PrivateScope.Privatize();
- RValue Func = RValue::get(Reduction.second);
- CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
- CGF.EmitIgnoredExpr(InitOp);
- } else {
- llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
- std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
- auto *GV = new llvm::GlobalVariable(
- CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage, Init, Name);
- LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
- RValue InitRVal;
- switch (CGF.getEvaluationKind(Ty)) {
- case TEK_Scalar:
- InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
- break;
- case TEK_Complex:
- InitRVal =
- RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
- break;
- case TEK_Aggregate: {
- OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_LValue);
- CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, LV);
- CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
- /*IsInitializer=*/false);
- return;
- }
- }
- OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_PRValue);
- CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
- CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
- /*IsInitializer=*/false);
- }
- }
- /// Emit initialization of arrays of complex types.
- /// \param DestAddr Address of the array.
- /// \param Type Type of array.
- /// \param Init Initial expression of array.
- /// \param SrcAddr Address of the original array.
- static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
- QualType Type, bool EmitDeclareReductionInit,
- const Expr *Init,
- const OMPDeclareReductionDecl *DRD,
- Address SrcAddr = Address::invalid()) {
- // Perform element-by-element initialization.
- QualType ElementTy;
- // Drill down to the base element type on both arrays.
- const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
- llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
- if (DRD)
- SrcAddr =
- CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
- llvm::Value *SrcBegin = nullptr;
- if (DRD)
- SrcBegin = SrcAddr.getPointer();
- llvm::Value *DestBegin = DestAddr.getPointer();
- // Cast from pointer to array type to pointer to single element.
- llvm::Value *DestEnd =
- CGF.Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
- // The basic structure here is a while-do loop.
- llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
- llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
- llvm::Value *IsEmpty =
- CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
- CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
- // Enter the loop body, making that address the current address.
- llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
- CGF.EmitBlock(BodyBB);
- CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
- llvm::PHINode *SrcElementPHI = nullptr;
- Address SrcElementCurrent = Address::invalid();
- if (DRD) {
- SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
- "omp.arraycpy.srcElementPast");
- SrcElementPHI->addIncoming(SrcBegin, EntryBB);
- SrcElementCurrent =
- Address(SrcElementPHI, SrcAddr.getElementType(),
- SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
- }
- llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
- DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
- DestElementPHI->addIncoming(DestBegin, EntryBB);
- Address DestElementCurrent =
- Address(DestElementPHI, DestAddr.getElementType(),
- DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
- // Emit copy.
- {
- CodeGenFunction::RunCleanupsScope InitScope(CGF);
- if (EmitDeclareReductionInit) {
- emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
- SrcElementCurrent, ElementTy);
- } else
- CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
- /*IsInitializer=*/false);
- }
- if (DRD) {
- // Shift the address forward by one element.
- llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
- SrcAddr.getElementType(), SrcElementPHI, /*Idx0=*/1,
- "omp.arraycpy.dest.element");
- SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
- }
- // Shift the address forward by one element.
- llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
- DestAddr.getElementType(), DestElementPHI, /*Idx0=*/1,
- "omp.arraycpy.dest.element");
- // Check whether we've reached the end.
- llvm::Value *Done =
- CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
- CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
- DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
- // Done.
- CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
- }
- LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
- return CGF.EmitOMPSharedLValue(E);
- }
- LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
- const Expr *E) {
- if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
- return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
- return LValue();
- }
- void ReductionCodeGen::emitAggregateInitialization(
- CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
- const OMPDeclareReductionDecl *DRD) {
- // Emit VarDecl with copy init for arrays.
- // Get the address of the original variable captured in current
- // captured region.
- const auto *PrivateVD =
- cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
- bool EmitDeclareReductionInit =
- DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
- EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
- EmitDeclareReductionInit,
- EmitDeclareReductionInit ? ClausesData[N].ReductionOp
- : PrivateVD->getInit(),
- DRD, SharedAddr);
- }
- ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
- ArrayRef<const Expr *> Origs,
- ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> ReductionOps) {
- ClausesData.reserve(Shareds.size());
- SharedAddresses.reserve(Shareds.size());
- Sizes.reserve(Shareds.size());
- BaseDecls.reserve(Shareds.size());
- const auto *IOrig = Origs.begin();
- const auto *IPriv = Privates.begin();
- const auto *IRed = ReductionOps.begin();
- for (const Expr *Ref : Shareds) {
- ClausesData.emplace_back(Ref, *IOrig, *IPriv, *IRed);
- std::advance(IOrig, 1);
- std::advance(IPriv, 1);
- std::advance(IRed, 1);
- }
- }
- void ReductionCodeGen::emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N) {
- assert(SharedAddresses.size() == N && OrigAddresses.size() == N &&
- "Number of generated lvalues must be exactly N.");
- LValue First = emitSharedLValue(CGF, ClausesData[N].Shared);
- LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Shared);
- SharedAddresses.emplace_back(First, Second);
- if (ClausesData[N].Shared == ClausesData[N].Ref) {
- OrigAddresses.emplace_back(First, Second);
- } else {
- LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
- LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
- OrigAddresses.emplace_back(First, Second);
- }
- }
- void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
- QualType PrivateType = getPrivateType(N);
- bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
- if (!PrivateType->isVariablyModifiedType()) {
- Sizes.emplace_back(
- CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()),
- nullptr);
- return;
- }
- llvm::Value *Size;
- llvm::Value *SizeInChars;
- auto *ElemType = OrigAddresses[N].first.getAddress(CGF).getElementType();
- auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
- if (AsArraySection) {
- Size = CGF.Builder.CreatePtrDiff(ElemType,
- OrigAddresses[N].second.getPointer(CGF),
- OrigAddresses[N].first.getPointer(CGF));
- Size = CGF.Builder.CreateNUWAdd(
- Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
- SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
- } else {
- SizeInChars =
- CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType());
- Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
- }
- Sizes.emplace_back(SizeInChars, Size);
- CodeGenFunction::OpaqueValueMapping OpaqueMap(
- CGF,
- cast<OpaqueValueExpr>(
- CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
- RValue::get(Size));
- CGF.EmitVariablyModifiedType(PrivateType);
- }
- void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
- llvm::Value *Size) {
- QualType PrivateType = getPrivateType(N);
- if (!PrivateType->isVariablyModifiedType()) {
- assert(!Size && !Sizes[N].second &&
- "Size should be nullptr for non-variably modified reduction "
- "items.");
- return;
- }
- CodeGenFunction::OpaqueValueMapping OpaqueMap(
- CGF,
- cast<OpaqueValueExpr>(
- CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
- RValue::get(Size));
- CGF.EmitVariablyModifiedType(PrivateType);
- }
- void ReductionCodeGen::emitInitialization(
- CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
- llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
- assert(SharedAddresses.size() > N && "No variable was generated");
- const auto *PrivateVD =
- cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
- const OMPDeclareReductionDecl *DRD =
- getReductionInit(ClausesData[N].ReductionOp);
- if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
- if (DRD && DRD->getInitializer())
- (void)DefaultInit(CGF);
- emitAggregateInitialization(CGF, N, PrivateAddr, SharedAddr, DRD);
- } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
- (void)DefaultInit(CGF);
- QualType SharedType = SharedAddresses[N].first.getType();
- emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
- PrivateAddr, SharedAddr, SharedType);
- } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
- !CGF.isTrivialInitializer(PrivateVD->getInit())) {
- CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
- PrivateVD->getType().getQualifiers(),
- /*IsInitializer=*/false);
- }
- }
- bool ReductionCodeGen::needCleanups(unsigned N) {
- QualType PrivateType = getPrivateType(N);
- QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
- return DTorKind != QualType::DK_none;
- }
- void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
- Address PrivateAddr) {
- QualType PrivateType = getPrivateType(N);
- QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
- if (needCleanups(N)) {
- PrivateAddr = CGF.Builder.CreateElementBitCast(
- PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
- CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
- }
- }
- static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
- LValue BaseLV) {
- BaseTy = BaseTy.getNonReferenceType();
- while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
- !CGF.getContext().hasSameType(BaseTy, ElTy)) {
- if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
- BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(CGF), PtrTy);
- } else {
- LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(CGF), BaseTy);
- BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
- }
- BaseTy = BaseTy->getPointeeType();
- }
- return CGF.MakeAddrLValue(
- CGF.Builder.CreateElementBitCast(BaseLV.getAddress(CGF),
- CGF.ConvertTypeForMem(ElTy)),
- BaseLV.getType(), BaseLV.getBaseInfo(),
- CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
- }
- static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
- Address OriginalBaseAddress, llvm::Value *Addr) {
- Address Tmp = Address::invalid();
- Address TopTmp = Address::invalid();
- Address MostTopTmp = Address::invalid();
- BaseTy = BaseTy.getNonReferenceType();
- while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
- !CGF.getContext().hasSameType(BaseTy, ElTy)) {
- Tmp = CGF.CreateMemTemp(BaseTy);
- if (TopTmp.isValid())
- CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
- else
- MostTopTmp = Tmp;
- TopTmp = Tmp;
- BaseTy = BaseTy->getPointeeType();
- }
- if (Tmp.isValid()) {
- Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Addr, Tmp.getElementType());
- CGF.Builder.CreateStore(Addr, Tmp);
- return MostTopTmp;
- }
- Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Addr, OriginalBaseAddress.getType());
- return OriginalBaseAddress.withPointer(Addr);
- }
- static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
- const VarDecl *OrigVD = nullptr;
- if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
- const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
- while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
- Base = TempOASE->getBase()->IgnoreParenImpCasts();
- while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
- Base = TempASE->getBase()->IgnoreParenImpCasts();
- DE = cast<DeclRefExpr>(Base);
- OrigVD = cast<VarDecl>(DE->getDecl());
- } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
- const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
- while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
- Base = TempASE->getBase()->IgnoreParenImpCasts();
- DE = cast<DeclRefExpr>(Base);
- OrigVD = cast<VarDecl>(DE->getDecl());
- }
- return OrigVD;
- }
- Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
- Address PrivateAddr) {
- const DeclRefExpr *DE;
- if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
- BaseDecls.emplace_back(OrigVD);
- LValue OriginalBaseLValue = CGF.EmitLValue(DE);
- LValue BaseLValue =
- loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
- OriginalBaseLValue);
- Address SharedAddr = SharedAddresses[N].first.getAddress(CGF);
- llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
- SharedAddr.getElementType(), BaseLValue.getPointer(CGF),
- SharedAddr.getPointer());
- llvm::Value *PrivatePointer =
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- PrivateAddr.getPointer(), SharedAddr.getType());
- llvm::Value *Ptr = CGF.Builder.CreateGEP(
- SharedAddr.getElementType(), PrivatePointer, Adjustment);
- return castToBase(CGF, OrigVD->getType(),
- SharedAddresses[N].first.getType(),
- OriginalBaseLValue.getAddress(CGF), Ptr);
- }
- BaseDecls.emplace_back(
- cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
- return PrivateAddr;
- }
- bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
- const OMPDeclareReductionDecl *DRD =
- getReductionInit(ClausesData[N].ReductionOp);
- return DRD && DRD->getInitializer();
- }
- LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
- return CGF.EmitLoadOfPointerLValue(
- CGF.GetAddrOfLocalVar(getThreadIDVariable()),
- getThreadIDVariable()->getType()->castAs<PointerType>());
- }
- void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt *S) {
- if (!CGF.HaveInsertPoint())
- return;
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CGF.EHStack.pushTerminate();
- if (S)
- CGF.incrementProfileCounter(S);
- CodeGen(CGF);
- CGF.EHStack.popTerminate();
- }
- LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
- CodeGenFunction &CGF) {
- return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
- getThreadIDVariable()->getType(),
- AlignmentSource::Decl);
- }
- static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
- QualType FieldTy) {
- auto *Field = FieldDecl::Create(
- C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
- C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
- /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
- Field->setAccess(AS_public);
- DC->addDecl(Field);
- return Field;
- }
- CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
- : CGM(CGM), OMPBuilder(CGM.getModule()), OffloadEntriesInfoManager() {
- KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
- llvm::OpenMPIRBuilderConfig Config(CGM.getLangOpts().OpenMPIsDevice, false,
- hasRequiresUnifiedSharedMemory(),
- CGM.getLangOpts().OpenMPOffloadMandatory);
- // Initialize Types used in OpenMPIRBuilder from OMPKinds.def
- OMPBuilder.initialize();
- OMPBuilder.setConfig(Config);
- OffloadEntriesInfoManager.setConfig(Config);
- loadOffloadInfoMetadata();
- }
- void CGOpenMPRuntime::clear() {
- InternalVars.clear();
- // Clean non-target variable declarations possibly used only in debug info.
- for (const auto &Data : EmittedNonTargetVariables) {
- if (!Data.getValue().pointsToAliveValue())
- continue;
- auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
- if (!GV)
- continue;
- if (!GV->isDeclaration() || GV->getNumUses() > 0)
- continue;
- GV->eraseFromParent();
- }
- }
- std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
- return OMPBuilder.createPlatformSpecificName(Parts);
- }
- static llvm::Function *
- emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
- const Expr *CombinerInitializer, const VarDecl *In,
- const VarDecl *Out, bool IsCombiner) {
- // void .omp_combiner.(Ty *in, Ty *out);
- ASTContext &C = CGM.getContext();
- QualType PtrTy = C.getPointerType(Ty).withRestrict();
- FunctionArgList Args;
- ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
- /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
- ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
- /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
- Args.push_back(&OmpOutParm);
- Args.push_back(&OmpInParm);
- const CGFunctionInfo &FnInfo =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
- std::string Name = CGM.getOpenMPRuntime().getName(
- {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
- auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
- Name, &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
- if (CGM.getLangOpts().Optimize) {
- Fn->removeFnAttr(llvm::Attribute::NoInline);
- Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
- Fn->addFnAttr(llvm::Attribute::AlwaysInline);
- }
- CodeGenFunction CGF(CGM);
- // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
- // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
- Out->getLocation());
- CodeGenFunction::OMPPrivateScope Scope(CGF);
- Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
- Scope.addPrivate(
- In, CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
- .getAddress(CGF));
- Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
- Scope.addPrivate(
- Out, CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
- .getAddress(CGF));
- (void)Scope.Privatize();
- if (!IsCombiner && Out->hasInit() &&
- !CGF.isTrivialInitializer(Out->getInit())) {
- CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
- Out->getType().getQualifiers(),
- /*IsInitializer=*/true);
- }
- if (CombinerInitializer)
- CGF.EmitIgnoredExpr(CombinerInitializer);
- Scope.ForceCleanup();
- CGF.FinishFunction();
- return Fn;
- }
- void CGOpenMPRuntime::emitUserDefinedReduction(
- CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
- if (UDRMap.count(D) > 0)
- return;
- llvm::Function *Combiner = emitCombinerOrInitializer(
- CGM, D->getType(), D->getCombiner(),
- cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerIn())->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerOut())->getDecl()),
- /*IsCombiner=*/true);
- llvm::Function *Initializer = nullptr;
- if (const Expr *Init = D->getInitializer()) {
- Initializer = emitCombinerOrInitializer(
- CGM, D->getType(),
- D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
- : nullptr,
- cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
- /*IsCombiner=*/false);
- }
- UDRMap.try_emplace(D, Combiner, Initializer);
- if (CGF) {
- auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
- Decls.second.push_back(D);
- }
- }
- std::pair<llvm::Function *, llvm::Function *>
- CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
- auto I = UDRMap.find(D);
- if (I != UDRMap.end())
- return I->second;
- emitUserDefinedReduction(/*CGF=*/nullptr, D);
- return UDRMap.lookup(D);
- }
- namespace {
- // Temporary RAII solution to perform a push/pop stack event on the OpenMP IR
- // Builder if one is present.
- struct PushAndPopStackRAII {
- PushAndPopStackRAII(llvm::OpenMPIRBuilder *OMPBuilder, CodeGenFunction &CGF,
- bool HasCancel, llvm::omp::Directive Kind)
- : OMPBuilder(OMPBuilder) {
- if (!OMPBuilder)
- return;
- // The following callback is the crucial part of clangs cleanup process.
- //
- // NOTE:
- // Once the OpenMPIRBuilder is used to create parallel regions (and
- // similar), the cancellation destination (Dest below) is determined via
- // IP. That means if we have variables to finalize we split the block at IP,
- // use the new block (=BB) as destination to build a JumpDest (via
- // getJumpDestInCurrentScope(BB)) which then is fed to
- // EmitBranchThroughCleanup. Furthermore, there will not be the need
- // to push & pop an FinalizationInfo object.
- // The FiniCB will still be needed but at the point where the
- // OpenMPIRBuilder is asked to construct a parallel (or similar) construct.
- auto FiniCB = [&CGF](llvm::OpenMPIRBuilder::InsertPointTy IP) {
- assert(IP.getBlock()->end() == IP.getPoint() &&
- "Clang CG should cause non-terminated block!");
- CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
- CGF.Builder.restoreIP(IP);
- CodeGenFunction::JumpDest Dest =
- CGF.getOMPCancelDestination(OMPD_parallel);
- CGF.EmitBranchThroughCleanup(Dest);
- };
- // TODO: Remove this once we emit parallel regions through the
- // OpenMPIRBuilder as it can do this setup internally.
- llvm::OpenMPIRBuilder::FinalizationInfo FI({FiniCB, Kind, HasCancel});
- OMPBuilder->pushFinalizationCB(std::move(FI));
- }
- ~PushAndPopStackRAII() {
- if (OMPBuilder)
- OMPBuilder->popFinalizationCB();
- }
- llvm::OpenMPIRBuilder *OMPBuilder;
- };
- } // namespace
- static llvm::Function *emitParallelOrTeamsOutlinedFunction(
- CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
- const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
- const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
- assert(ThreadIDVar->getType()->isPointerType() &&
- "thread id variable must be of type kmp_int32 *");
- CodeGenFunction CGF(CGM, true);
- bool HasCancel = false;
- if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
- HasCancel = OPD->hasCancel();
- else if (const auto *OPD = dyn_cast<OMPTargetParallelDirective>(&D))
- HasCancel = OPD->hasCancel();
- else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
- HasCancel = OPSD->hasCancel();
- else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
- HasCancel = OPFD->hasCancel();
- else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
- HasCancel = OPFD->hasCancel();
- else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
- HasCancel = OPFD->hasCancel();
- else if (const auto *OPFD =
- dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
- HasCancel = OPFD->hasCancel();
- else if (const auto *OPFD =
- dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
- HasCancel = OPFD->hasCancel();
- // TODO: Temporarily inform the OpenMPIRBuilder, if any, about the new
- // parallel region to make cancellation barriers work properly.
- llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
- PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel, InnermostKind);
- CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
- HasCancel, OutlinedHelperName);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- return CGF.GenerateOpenMPCapturedStmtFunction(*CS, D.getBeginLoc());
- }
- llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
- const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
- return emitParallelOrTeamsOutlinedFunction(
- CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
- }
- llvm::Function *CGOpenMPRuntime::emitTeamsOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
- const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
- return emitParallelOrTeamsOutlinedFunction(
- CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
- }
- llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- const VarDecl *PartIDVar, const VarDecl *TaskTVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
- bool Tied, unsigned &NumberOfParts) {
- auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
- PrePostActionTy &) {
- llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc());
- llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc());
- llvm::Value *TaskArgs[] = {
- UpLoc, ThreadID,
- CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
- TaskTVar->getType()->castAs<PointerType>())
- .getPointer(CGF)};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_omp_task),
- TaskArgs);
- };
- CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
- UntiedCodeGen);
- CodeGen.setAction(Action);
- assert(!ThreadIDVar->getType()->isPointerType() &&
- "thread id variable must be of type kmp_int32 for tasks");
- const OpenMPDirectiveKind Region =
- isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
- : OMPD_task;
- const CapturedStmt *CS = D.getCapturedStmt(Region);
- bool HasCancel = false;
- if (const auto *TD = dyn_cast<OMPTaskDirective>(&D))
- HasCancel = TD->hasCancel();
- else if (const auto *TD = dyn_cast<OMPTaskLoopDirective>(&D))
- HasCancel = TD->hasCancel();
- else if (const auto *TD = dyn_cast<OMPMasterTaskLoopDirective>(&D))
- HasCancel = TD->hasCancel();
- else if (const auto *TD = dyn_cast<OMPParallelMasterTaskLoopDirective>(&D))
- HasCancel = TD->hasCancel();
- CodeGenFunction CGF(CGM, true);
- CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
- InnermostKind, HasCancel, Action);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
- if (!Tied)
- NumberOfParts = Action.getNumberOfParts();
- return Res;
- }
- void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF,
- bool AtCurrentPoint) {
- auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
- assert(!Elem.second.ServiceInsertPt && "Insert point is set already.");
- llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
- if (AtCurrentPoint) {
- Elem.second.ServiceInsertPt = new llvm::BitCastInst(
- Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock());
- } else {
- Elem.second.ServiceInsertPt =
- new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
- Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
- }
- }
- void CGOpenMPRuntime::clearLocThreadIdInsertPt(CodeGenFunction &CGF) {
- auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
- if (Elem.second.ServiceInsertPt) {
- llvm::Instruction *Ptr = Elem.second.ServiceInsertPt;
- Elem.second.ServiceInsertPt = nullptr;
- Ptr->eraseFromParent();
- }
- }
- static StringRef getIdentStringFromSourceLocation(CodeGenFunction &CGF,
- SourceLocation Loc,
- SmallString<128> &Buffer) {
- llvm::raw_svector_ostream OS(Buffer);
- // Build debug location
- PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
- OS << ";" << PLoc.getFilename() << ";";
- if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
- OS << FD->getQualifiedNameAsString();
- OS << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
- return OS.str();
- }
- llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
- SourceLocation Loc,
- unsigned Flags, bool EmitLoc) {
- uint32_t SrcLocStrSize;
- llvm::Constant *SrcLocStr;
- if ((!EmitLoc &&
- CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo) ||
- Loc.isInvalid()) {
- SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
- } else {
- std::string FunctionName;
- if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
- FunctionName = FD->getQualifiedNameAsString();
- PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
- const char *FileName = PLoc.getFilename();
- unsigned Line = PLoc.getLine();
- unsigned Column = PLoc.getColumn();
- SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName, FileName, Line,
- Column, SrcLocStrSize);
- }
- unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
- return OMPBuilder.getOrCreateIdent(
- SrcLocStr, SrcLocStrSize, llvm::omp::IdentFlag(Flags), Reserved2Flags);
- }
- llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
- SourceLocation Loc) {
- assert(CGF.CurFn && "No function in current CodeGenFunction.");
- // If the OpenMPIRBuilder is used we need to use it for all thread id calls as
- // the clang invariants used below might be broken.
- if (CGM.getLangOpts().OpenMPIRBuilder) {
- SmallString<128> Buffer;
- OMPBuilder.updateToLocation(CGF.Builder.saveIP());
- uint32_t SrcLocStrSize;
- auto *SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(
- getIdentStringFromSourceLocation(CGF, Loc, Buffer), SrcLocStrSize);
- return OMPBuilder.getOrCreateThreadID(
- OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize));
- }
- llvm::Value *ThreadID = nullptr;
- // Check whether we've already cached a load of the thread id in this
- // function.
- auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
- if (I != OpenMPLocThreadIDMap.end()) {
- ThreadID = I->second.ThreadID;
- if (ThreadID != nullptr)
- return ThreadID;
- }
- // If exceptions are enabled, do not use parameter to avoid possible crash.
- if (auto *OMPRegionInfo =
- dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
- if (OMPRegionInfo->getThreadIDVariable()) {
- // Check if this an outlined function with thread id passed as argument.
- LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
- llvm::BasicBlock *TopBlock = CGF.AllocaInsertPt->getParent();
- if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
- !CGF.getLangOpts().CXXExceptions ||
- CGF.Builder.GetInsertBlock() == TopBlock ||
- !isa<llvm::Instruction>(LVal.getPointer(CGF)) ||
- cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
- TopBlock ||
- cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
- CGF.Builder.GetInsertBlock()) {
- ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
- // If value loaded in entry block, cache it and use it everywhere in
- // function.
- if (CGF.Builder.GetInsertBlock() == TopBlock) {
- auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
- Elem.second.ThreadID = ThreadID;
- }
- return ThreadID;
- }
- }
- }
- // This is not an outlined function region - need to call __kmpc_int32
- // kmpc_global_thread_num(ident_t *loc).
- // Generate thread id value and cache this value for use across the
- // function.
- auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
- if (!Elem.second.ServiceInsertPt)
- setLocThreadIdInsertPt(CGF);
- CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
- CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
- llvm::CallInst *Call = CGF.Builder.CreateCall(
- OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
- OMPRTL___kmpc_global_thread_num),
- emitUpdateLocation(CGF, Loc));
- Call->setCallingConv(CGF.getRuntimeCC());
- Elem.second.ThreadID = Call;
- return Call;
- }
- void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
- assert(CGF.CurFn && "No function in current CodeGenFunction.");
- if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
- clearLocThreadIdInsertPt(CGF);
- OpenMPLocThreadIDMap.erase(CGF.CurFn);
- }
- if (FunctionUDRMap.count(CGF.CurFn) > 0) {
- for(const auto *D : FunctionUDRMap[CGF.CurFn])
- UDRMap.erase(D);
- FunctionUDRMap.erase(CGF.CurFn);
- }
- auto I = FunctionUDMMap.find(CGF.CurFn);
- if (I != FunctionUDMMap.end()) {
- for(const auto *D : I->second)
- UDMMap.erase(D);
- FunctionUDMMap.erase(I);
- }
- LastprivateConditionalToTypes.erase(CGF.CurFn);
- FunctionToUntiedTaskStackMap.erase(CGF.CurFn);
- }
- llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
- return OMPBuilder.IdentPtr;
- }
- llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
- if (!Kmpc_MicroTy) {
- // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
- llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
- llvm::PointerType::getUnqual(CGM.Int32Ty)};
- Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
- }
- return llvm::PointerType::getUnqual(Kmpc_MicroTy);
- }
- llvm::FunctionCallee
- CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned,
- bool IsGPUDistribute) {
- assert((IVSize == 32 || IVSize == 64) &&
- "IV size is not compatible with the omp runtime");
- StringRef Name;
- if (IsGPUDistribute)
- Name = IVSize == 32 ? (IVSigned ? "__kmpc_distribute_static_init_4"
- : "__kmpc_distribute_static_init_4u")
- : (IVSigned ? "__kmpc_distribute_static_init_8"
- : "__kmpc_distribute_static_init_8u");
- else
- Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
- : "__kmpc_for_static_init_4u")
- : (IVSigned ? "__kmpc_for_static_init_8"
- : "__kmpc_for_static_init_8u");
- llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
- auto *PtrTy = llvm::PointerType::getUnqual(ITy);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), // loc
- CGM.Int32Ty, // tid
- CGM.Int32Ty, // schedtype
- llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
- PtrTy, // p_lower
- PtrTy, // p_upper
- PtrTy, // p_stride
- ITy, // incr
- ITy // chunk
- };
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- return CGM.CreateRuntimeFunction(FnTy, Name);
- }
- llvm::FunctionCallee
- CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, bool IVSigned) {
- assert((IVSize == 32 || IVSize == 64) &&
- "IV size is not compatible with the omp runtime");
- StringRef Name =
- IVSize == 32
- ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
- : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
- llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
- llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
- CGM.Int32Ty, // tid
- CGM.Int32Ty, // schedtype
- ITy, // lower
- ITy, // upper
- ITy, // stride
- ITy // chunk
- };
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- return CGM.CreateRuntimeFunction(FnTy, Name);
- }
- llvm::FunctionCallee
- CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, bool IVSigned) {
- assert((IVSize == 32 || IVSize == 64) &&
- "IV size is not compatible with the omp runtime");
- StringRef Name =
- IVSize == 32
- ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
- : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), // loc
- CGM.Int32Ty, // tid
- };
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- return CGM.CreateRuntimeFunction(FnTy, Name);
- }
- llvm::FunctionCallee
- CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) {
- assert((IVSize == 32 || IVSize == 64) &&
- "IV size is not compatible with the omp runtime");
- StringRef Name =
- IVSize == 32
- ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
- : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
- llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
- auto *PtrTy = llvm::PointerType::getUnqual(ITy);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), // loc
- CGM.Int32Ty, // tid
- llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
- PtrTy, // p_lower
- PtrTy, // p_upper
- PtrTy // p_stride
- };
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- return CGM.CreateRuntimeFunction(FnTy, Name);
- }
- /// Obtain information that uniquely identifies a target entry. This
- /// consists of the file and device IDs as well as line number associated with
- /// the relevant entry source location.
- static llvm::TargetRegionEntryInfo
- getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
- StringRef ParentName = "") {
- SourceManager &SM = C.getSourceManager();
- // The loc should be always valid and have a file ID (the user cannot use
- // #pragma directives in macros)
- assert(Loc.isValid() && "Source location is expected to be always valid.");
- PresumedLoc PLoc = SM.getPresumedLoc(Loc);
- assert(PLoc.isValid() && "Source location is expected to be always valid.");
- llvm::sys::fs::UniqueID ID;
- if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
- PLoc = SM.getPresumedLoc(Loc, /*UseLineDirectives=*/false);
- assert(PLoc.isValid() && "Source location is expected to be always valid.");
- if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
- SM.getDiagnostics().Report(diag::err_cannot_open_file)
- << PLoc.getFilename() << EC.message();
- }
- return llvm::TargetRegionEntryInfo(ParentName, ID.getDevice(), ID.getFile(),
- PLoc.getLine());
- }
- Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
- if (CGM.getLangOpts().OpenMPSimd)
- return Address::invalid();
- std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link ||
- ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
- *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
- HasRequiresUnifiedSharedMemory))) {
- SmallString<64> PtrName;
- {
- llvm::raw_svector_ostream OS(PtrName);
- OS << CGM.getMangledName(GlobalDecl(VD));
- if (!VD->isExternallyVisible()) {
- auto EntryInfo = getTargetEntryUniqueInfo(
- CGM.getContext(), VD->getCanonicalDecl()->getBeginLoc());
- OS << llvm::format("_%x", EntryInfo.FileID);
- }
- OS << "_decl_tgt_ref_ptr";
- }
- llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
- QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
- llvm::Type *LlvmPtrTy = CGM.getTypes().ConvertTypeForMem(PtrTy);
- if (!Ptr) {
- Ptr = OMPBuilder.getOrCreateInternalVariable(LlvmPtrTy, PtrName);
- auto *GV = cast<llvm::GlobalVariable>(Ptr);
- GV->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
- if (!CGM.getLangOpts().OpenMPIsDevice)
- GV->setInitializer(CGM.GetAddrOfGlobal(VD));
- registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
- }
- return Address(Ptr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD));
- }
- return Address::invalid();
- }
- llvm::Constant *
- CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
- assert(!CGM.getLangOpts().OpenMPUseTLS ||
- !CGM.getContext().getTargetInfo().isTLSSupported());
- // Lookup the entry, lazily creating it if necessary.
- std::string Suffix = getName({"cache", ""});
- return OMPBuilder.getOrCreateInternalVariable(
- CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix).str());
- }
- Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
- const VarDecl *VD,
- Address VDAddr,
- SourceLocation Loc) {
- if (CGM.getLangOpts().OpenMPUseTLS &&
- CGM.getContext().getTargetInfo().isTLSSupported())
- return VDAddr;
- llvm::Type *VarTy = VDAddr.getElementType();
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy),
- CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
- getOrCreateThreadPrivateCache(VD)};
- return Address(
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
- Args),
- CGF.Int8Ty, VDAddr.getAlignment());
- }
- void CGOpenMPRuntime::emitThreadPrivateVarInit(
- CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
- llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
- // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
- // library.
- llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_global_thread_num),
- OMPLoc);
- // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
- // to register constructor/destructor for variable.
- llvm::Value *Args[] = {
- OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
- Ctor, CopyCtor, Dtor};
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_threadprivate_register),
- Args);
- }
- llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
- const VarDecl *VD, Address VDAddr, SourceLocation Loc,
- bool PerformInit, CodeGenFunction *CGF) {
- if (CGM.getLangOpts().OpenMPUseTLS &&
- CGM.getContext().getTargetInfo().isTLSSupported())
- return nullptr;
- VD = VD->getDefinition(CGM.getContext());
- if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
- QualType ASTTy = VD->getType();
- llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
- const Expr *Init = VD->getAnyInitializer();
- if (CGM.getLangOpts().CPlusPlus && PerformInit) {
- // Generate function that re-emits the declaration's initializer into the
- // threadprivate copy of the variable VD
- CodeGenFunction CtorCGF(CGM);
- FunctionArgList Args;
- ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
- /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
- ImplicitParamDecl::Other);
- Args.push_back(&Dst);
- const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
- CGM.getContext().VoidPtrTy, Args);
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- std::string Name = getName({"__kmpc_global_ctor_", ""});
- llvm::Function *Fn =
- CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
- CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
- Args, Loc, Loc);
- llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
- CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
- CGM.getContext().VoidPtrTy, Dst.getLocation());
- Address Arg(ArgVal, CtorCGF.Int8Ty, VDAddr.getAlignment());
- Arg = CtorCGF.Builder.CreateElementBitCast(
- Arg, CtorCGF.ConvertTypeForMem(ASTTy));
- CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
- /*IsInitializer=*/true);
- ArgVal = CtorCGF.EmitLoadOfScalar(
- CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
- CGM.getContext().VoidPtrTy, Dst.getLocation());
- CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
- CtorCGF.FinishFunction();
- Ctor = Fn;
- }
- if (VD->getType().isDestructedType() != QualType::DK_none) {
- // Generate function that emits destructor call for the threadprivate copy
- // of the variable VD
- CodeGenFunction DtorCGF(CGM);
- FunctionArgList Args;
- ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
- /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
- ImplicitParamDecl::Other);
- Args.push_back(&Dst);
- const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
- CGM.getContext().VoidTy, Args);
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- std::string Name = getName({"__kmpc_global_dtor_", ""});
- llvm::Function *Fn =
- CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
- auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
- DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
- Loc, Loc);
- // Create a scope with an artificial location for the body of this function.
- auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
- llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
- DtorCGF.GetAddrOfLocalVar(&Dst),
- /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
- DtorCGF.emitDestroy(
- Address(ArgVal, DtorCGF.Int8Ty, VDAddr.getAlignment()), ASTTy,
- DtorCGF.getDestroyer(ASTTy.isDestructedType()),
- DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
- DtorCGF.FinishFunction();
- Dtor = Fn;
- }
- // Do not emit init function if it is not required.
- if (!Ctor && !Dtor)
- return nullptr;
- llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
- /*isVarArg=*/false)
- ->getPointerTo();
- // Copying constructor for the threadprivate variable.
- // Must be NULL - reserved by runtime, but currently it requires that this
- // parameter is always NULL. Otherwise it fires assertion.
- CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
- if (Ctor == nullptr) {
- auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
- /*isVarArg=*/false)
- ->getPointerTo();
- Ctor = llvm::Constant::getNullValue(CtorTy);
- }
- if (Dtor == nullptr) {
- auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
- /*isVarArg=*/false)
- ->getPointerTo();
- Dtor = llvm::Constant::getNullValue(DtorTy);
- }
- if (!CGF) {
- auto *InitFunctionTy =
- llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
- std::string Name = getName({"__omp_threadprivate_init_", ""});
- llvm::Function *InitFunction = CGM.CreateGlobalInitOrCleanUpFunction(
- InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
- CodeGenFunction InitCGF(CGM);
- FunctionArgList ArgList;
- InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
- CGM.getTypes().arrangeNullaryFunction(), ArgList,
- Loc, Loc);
- emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
- InitCGF.FinishFunction();
- return InitFunction;
- }
- emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
- }
- return nullptr;
- }
- bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
- llvm::GlobalVariable *Addr,
- bool PerformInit) {
- if (CGM.getLangOpts().OMPTargetTriples.empty() &&
- !CGM.getLangOpts().OpenMPIsDevice)
- return false;
- std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
- ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
- *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
- HasRequiresUnifiedSharedMemory))
- return CGM.getLangOpts().OpenMPIsDevice;
- VD = VD->getDefinition(CGM.getContext());
- assert(VD && "Unknown VarDecl");
- if (!DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
- return CGM.getLangOpts().OpenMPIsDevice;
- QualType ASTTy = VD->getType();
- SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc();
- // Produce the unique prefix to identify the new target regions. We use
- // the source location of the variable declaration which we know to not
- // conflict with any target region.
- auto EntryInfo =
- getTargetEntryUniqueInfo(CGM.getContext(), Loc, VD->getName());
- SmallString<128> Buffer, Out;
- OffloadEntriesInfoManager.getTargetRegionEntryFnName(Buffer, EntryInfo);
- const Expr *Init = VD->getAnyInitializer();
- if (CGM.getLangOpts().CPlusPlus && PerformInit) {
- llvm::Constant *Ctor;
- llvm::Constant *ID;
- if (CGM.getLangOpts().OpenMPIsDevice) {
- // Generate function that re-emits the declaration's initializer into
- // the threadprivate copy of the variable VD
- CodeGenFunction CtorCGF(CGM);
- const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
- FTy, Twine(Buffer, "_ctor"), FI, Loc, false,
- llvm::GlobalValue::WeakODRLinkage);
- Fn->setVisibility(llvm::GlobalValue::ProtectedVisibility);
- if (CGM.getTriple().isAMDGCN())
- Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
- auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
- CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
- FunctionArgList(), Loc, Loc);
- auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
- llvm::Constant *AddrInAS0 = Addr;
- if (Addr->getAddressSpace() != 0)
- AddrInAS0 = llvm::ConstantExpr::getAddrSpaceCast(
- Addr, llvm::PointerType::getWithSamePointeeType(
- cast<llvm::PointerType>(Addr->getType()), 0));
- CtorCGF.EmitAnyExprToMem(Init,
- Address(AddrInAS0, Addr->getValueType(),
- CGM.getContext().getDeclAlign(VD)),
- Init->getType().getQualifiers(),
- /*IsInitializer=*/true);
- CtorCGF.FinishFunction();
- Ctor = Fn;
- ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
- } else {
- Ctor = new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage,
- llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
- ID = Ctor;
- }
- // Register the information for the entry associated with the constructor.
- Out.clear();
- auto CtorEntryInfo = EntryInfo;
- CtorEntryInfo.ParentName = Twine(Buffer, "_ctor").toStringRef(Out);
- OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
- CtorEntryInfo, Ctor, ID,
- llvm::OffloadEntriesInfoManager::OMPTargetRegionEntryCtor);
- }
- if (VD->getType().isDestructedType() != QualType::DK_none) {
- llvm::Constant *Dtor;
- llvm::Constant *ID;
- if (CGM.getLangOpts().OpenMPIsDevice) {
- // Generate function that emits destructor call for the threadprivate
- // copy of the variable VD
- CodeGenFunction DtorCGF(CGM);
- const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
- FTy, Twine(Buffer, "_dtor"), FI, Loc, false,
- llvm::GlobalValue::WeakODRLinkage);
- Fn->setVisibility(llvm::GlobalValue::ProtectedVisibility);
- if (CGM.getTriple().isAMDGCN())
- Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
- auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
- DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
- FunctionArgList(), Loc, Loc);
- // Create a scope with an artificial location for the body of this
- // function.
- auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
- llvm::Constant *AddrInAS0 = Addr;
- if (Addr->getAddressSpace() != 0)
- AddrInAS0 = llvm::ConstantExpr::getAddrSpaceCast(
- Addr, llvm::PointerType::getWithSamePointeeType(
- cast<llvm::PointerType>(Addr->getType()), 0));
- DtorCGF.emitDestroy(Address(AddrInAS0, Addr->getValueType(),
- CGM.getContext().getDeclAlign(VD)),
- ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
- DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
- DtorCGF.FinishFunction();
- Dtor = Fn;
- ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
- } else {
- Dtor = new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage,
- llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
- ID = Dtor;
- }
- // Register the information for the entry associated with the destructor.
- Out.clear();
- auto DtorEntryInfo = EntryInfo;
- DtorEntryInfo.ParentName = Twine(Buffer, "_dtor").toStringRef(Out);
- OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
- DtorEntryInfo, Dtor, ID,
- llvm::OffloadEntriesInfoManager::OMPTargetRegionEntryDtor);
- }
- return CGM.getLangOpts().OpenMPIsDevice;
- }
- Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
- QualType VarType,
- StringRef Name) {
- std::string Suffix = getName({"artificial", ""});
- llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
- llvm::GlobalVariable *GAddr = OMPBuilder.getOrCreateInternalVariable(
- VarLVType, Twine(Name).concat(Suffix).str());
- if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPUseTLS &&
- CGM.getTarget().isTLSSupported()) {
- GAddr->setThreadLocal(/*Val=*/true);
- return Address(GAddr, GAddr->getValueType(),
- CGM.getContext().getTypeAlignInChars(VarType));
- }
- std::string CacheSuffix = getName({"cache", ""});
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, SourceLocation()),
- getThreadID(CGF, SourceLocation()),
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
- CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
- /*isSigned=*/false),
- OMPBuilder.getOrCreateInternalVariable(
- CGM.VoidPtrPtrTy,
- Twine(Name).concat(Suffix).concat(CacheSuffix).str())};
- return Address(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
- Args),
- VarLVType->getPointerTo(/*AddrSpace=*/0)),
- VarLVType, CGM.getContext().getTypeAlignInChars(VarType));
- }
- void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
- const RegionCodeGenTy &ThenGen,
- const RegionCodeGenTy &ElseGen) {
- CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
- // If the condition constant folds and can be elided, try to avoid emitting
- // the condition and the dead arm of the if/else.
- bool CondConstant;
- if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
- if (CondConstant)
- ThenGen(CGF);
- else
- ElseGen(CGF);
- return;
- }
- // Otherwise, the condition did not fold, or we couldn't elide it. Just
- // emit the conditional branch.
- llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
- llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
- llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
- CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
- // Emit the 'then' code.
- CGF.EmitBlock(ThenBlock);
- ThenGen(CGF);
- CGF.EmitBranch(ContBlock);
- // Emit the 'else' code if present.
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(ElseBlock);
- ElseGen(CGF);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBranch(ContBlock);
- // Emit the continuation block for code after the if.
- CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
- }
- void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond,
- llvm::Value *NumThreads) {
- if (!CGF.HaveInsertPoint())
- return;
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- auto &M = CGM.getModule();
- auto &&ThenGen = [&M, OutlinedFn, CapturedVars, RTLoc,
- this](CodeGenFunction &CGF, PrePostActionTy &) {
- // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
- CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
- llvm::Value *Args[] = {
- RTLoc,
- CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
- CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
- llvm::SmallVector<llvm::Value *, 16> RealArgs;
- RealArgs.append(std::begin(Args), std::end(Args));
- RealArgs.append(CapturedVars.begin(), CapturedVars.end());
- llvm::FunctionCallee RTLFn =
- OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_fork_call);
- CGF.EmitRuntimeCall(RTLFn, RealArgs);
- };
- auto &&ElseGen = [&M, OutlinedFn, CapturedVars, RTLoc, Loc,
- this](CodeGenFunction &CGF, PrePostActionTy &) {
- CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
- llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
- // Build calls:
- // __kmpc_serialized_parallel(&Loc, GTid);
- llvm::Value *Args[] = {RTLoc, ThreadID};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- M, OMPRTL___kmpc_serialized_parallel),
- Args);
- // OutlinedFn(>id, &zero_bound, CapturedStruct);
- Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
- Address ZeroAddrBound =
- CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".bound.zero.addr");
- CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddrBound);
- llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
- // ThreadId for serialized parallels is 0.
- OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
- OutlinedFnArgs.push_back(ZeroAddrBound.getPointer());
- OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
- // Ensure we do not inline the function. This is trivially true for the ones
- // passed to __kmpc_fork_call but the ones called in serialized regions
- // could be inlined. This is not a perfect but it is closer to the invariant
- // we want, namely, every data environment starts with a new function.
- // TODO: We should pass the if condition to the runtime function and do the
- // handling there. Much cleaner code.
- OutlinedFn->removeFnAttr(llvm::Attribute::AlwaysInline);
- OutlinedFn->addFnAttr(llvm::Attribute::NoInline);
- RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
- // __kmpc_end_serialized_parallel(&Loc, GTid);
- llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- M, OMPRTL___kmpc_end_serialized_parallel),
- EndArgs);
- };
- if (IfCond) {
- emitIfClause(CGF, IfCond, ThenGen, ElseGen);
- } else {
- RegionCodeGenTy ThenRCG(ThenGen);
- ThenRCG(CGF);
- }
- }
- // If we're inside an (outlined) parallel region, use the region info's
- // thread-ID variable (it is passed in a first argument of the outlined function
- // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
- // regular serial code region, get thread ID by calling kmp_int32
- // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
- // return the address of that temp.
- Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
- SourceLocation Loc) {
- if (auto *OMPRegionInfo =
- dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
- if (OMPRegionInfo->getThreadIDVariable())
- return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(CGF);
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- QualType Int32Ty =
- CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
- Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
- CGF.EmitStoreOfScalar(ThreadID,
- CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
- return ThreadIDTemp;
- }
- llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
- std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
- std::string Name = getName({Prefix, "var"});
- return OMPBuilder.getOrCreateInternalVariable(KmpCriticalNameTy, Name);
- }
- namespace {
- /// Common pre(post)-action for different OpenMP constructs.
- class CommonActionTy final : public PrePostActionTy {
- llvm::FunctionCallee EnterCallee;
- ArrayRef<llvm::Value *> EnterArgs;
- llvm::FunctionCallee ExitCallee;
- ArrayRef<llvm::Value *> ExitArgs;
- bool Conditional;
- llvm::BasicBlock *ContBlock = nullptr;
- public:
- CommonActionTy(llvm::FunctionCallee EnterCallee,
- ArrayRef<llvm::Value *> EnterArgs,
- llvm::FunctionCallee ExitCallee,
- ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
- : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
- ExitArgs(ExitArgs), Conditional(Conditional) {}
- void Enter(CodeGenFunction &CGF) override {
- llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
- if (Conditional) {
- llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
- auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
- ContBlock = CGF.createBasicBlock("omp_if.end");
- // Generate the branch (If-stmt)
- CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
- CGF.EmitBlock(ThenBlock);
- }
- }
- void Done(CodeGenFunction &CGF) {
- // Emit the rest of blocks/branches
- CGF.EmitBranch(ContBlock);
- CGF.EmitBlock(ContBlock, true);
- }
- void Exit(CodeGenFunction &CGF) override {
- CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
- }
- };
- } // anonymous namespace
- void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
- StringRef CriticalName,
- const RegionCodeGenTy &CriticalOpGen,
- SourceLocation Loc, const Expr *Hint) {
- // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
- // CriticalOpGen();
- // __kmpc_end_critical(ident_t *, gtid, Lock);
- // Prepare arguments and build a call to __kmpc_critical
- if (!CGF.HaveInsertPoint())
- return;
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- getCriticalRegionLock(CriticalName)};
- llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
- std::end(Args));
- if (Hint) {
- EnterArgs.push_back(CGF.Builder.CreateIntCast(
- CGF.EmitScalarExpr(Hint), CGM.Int32Ty, /*isSigned=*/false));
- }
- CommonActionTy Action(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(),
- Hint ? OMPRTL___kmpc_critical_with_hint : OMPRTL___kmpc_critical),
- EnterArgs,
- OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
- OMPRTL___kmpc_end_critical),
- Args);
- CriticalOpGen.setAction(Action);
- emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
- }
- void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
- const RegionCodeGenTy &MasterOpGen,
- SourceLocation Loc) {
- if (!CGF.HaveInsertPoint())
- return;
- // if(__kmpc_master(ident_t *, gtid)) {
- // MasterOpGen();
- // __kmpc_end_master(ident_t *, gtid);
- // }
- // Prepare arguments and build a call to __kmpc_master
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_master),
- Args,
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_end_master),
- Args,
- /*Conditional=*/true);
- MasterOpGen.setAction(Action);
- emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
- Action.Done(CGF);
- }
- void CGOpenMPRuntime::emitMaskedRegion(CodeGenFunction &CGF,
- const RegionCodeGenTy &MaskedOpGen,
- SourceLocation Loc, const Expr *Filter) {
- if (!CGF.HaveInsertPoint())
- return;
- // if(__kmpc_masked(ident_t *, gtid, filter)) {
- // MaskedOpGen();
- // __kmpc_end_masked(iden_t *, gtid);
- // }
- // Prepare arguments and build a call to __kmpc_masked
- llvm::Value *FilterVal = Filter
- ? CGF.EmitScalarExpr(Filter, CGF.Int32Ty)
- : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0);
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- FilterVal};
- llvm::Value *ArgsEnd[] = {emitUpdateLocation(CGF, Loc),
- getThreadID(CGF, Loc)};
- CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_masked),
- Args,
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_end_masked),
- ArgsEnd,
- /*Conditional=*/true);
- MaskedOpGen.setAction(Action);
- emitInlinedDirective(CGF, OMPD_masked, MaskedOpGen);
- Action.Done(CGF);
- }
- void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
- SourceLocation Loc) {
- if (!CGF.HaveInsertPoint())
- return;
- if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
- OMPBuilder.createTaskyield(CGF.Builder);
- } else {
- // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_omp_taskyield),
- Args);
- }
- if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
- Region->emitUntiedSwitch(CGF);
- }
- void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
- const RegionCodeGenTy &TaskgroupOpGen,
- SourceLocation Loc) {
- if (!CGF.HaveInsertPoint())
- return;
- // __kmpc_taskgroup(ident_t *, gtid);
- // TaskgroupOpGen();
- // __kmpc_end_taskgroup(ident_t *, gtid);
- // Prepare arguments and build a call to __kmpc_taskgroup
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_taskgroup),
- Args,
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_end_taskgroup),
- Args);
- TaskgroupOpGen.setAction(Action);
- emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
- }
- /// Given an array of pointers to variables, project the address of a
- /// given variable.
- static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
- unsigned Index, const VarDecl *Var) {
- // Pull out the pointer to the variable.
- Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
- llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
- llvm::Type *ElemTy = CGF.ConvertTypeForMem(Var->getType());
- return Address(
- CGF.Builder.CreateBitCast(
- Ptr, ElemTy->getPointerTo(Ptr->getType()->getPointerAddressSpace())),
- ElemTy, CGF.getContext().getDeclAlign(Var));
- }
- static llvm::Value *emitCopyprivateCopyFunction(
- CodeGenModule &CGM, llvm::Type *ArgsElemType,
- ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
- ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
- SourceLocation Loc) {
- ASTContext &C = CGM.getContext();
- // void copy_func(void *LHSArg, void *RHSArg);
- FunctionArgList Args;
- ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
- ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
- Args.push_back(&LHSArg);
- Args.push_back(&RHSArg);
- const auto &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- std::string Name =
- CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
- auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
- llvm::GlobalValue::InternalLinkage, Name,
- &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
- // Dest = (void*[n])(LHSArg);
- // Src = (void*[n])(RHSArg);
- Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
- ArgsElemType->getPointerTo()),
- ArgsElemType, CGF.getPointerAlign());
- Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
- ArgsElemType->getPointerTo()),
- ArgsElemType, CGF.getPointerAlign());
- // *(Type0*)Dst[0] = *(Type0*)Src[0];
- // *(Type1*)Dst[1] = *(Type1*)Src[1];
- // ...
- // *(Typen*)Dst[n] = *(Typen*)Src[n];
- for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
- const auto *DestVar =
- cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
- Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
- const auto *SrcVar =
- cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
- Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
- const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
- QualType Type = VD->getType();
- CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
- }
- CGF.FinishFunction();
- return Fn;
- }
- void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
- const RegionCodeGenTy &SingleOpGen,
- SourceLocation Loc,
- ArrayRef<const Expr *> CopyprivateVars,
- ArrayRef<const Expr *> SrcExprs,
- ArrayRef<const Expr *> DstExprs,
- ArrayRef<const Expr *> AssignmentOps) {
- if (!CGF.HaveInsertPoint())
- return;
- assert(CopyprivateVars.size() == SrcExprs.size() &&
- CopyprivateVars.size() == DstExprs.size() &&
- CopyprivateVars.size() == AssignmentOps.size());
- ASTContext &C = CGM.getContext();
- // int32 did_it = 0;
- // if(__kmpc_single(ident_t *, gtid)) {
- // SingleOpGen();
- // __kmpc_end_single(ident_t *, gtid);
- // did_it = 1;
- // }
- // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
- // <copy_func>, did_it);
- Address DidIt = Address::invalid();
- if (!CopyprivateVars.empty()) {
- // int32 did_it = 0;
- QualType KmpInt32Ty =
- C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
- DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
- CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
- }
- // Prepare arguments and build a call to __kmpc_single
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_single),
- Args,
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_end_single),
- Args,
- /*Conditional=*/true);
- SingleOpGen.setAction(Action);
- emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
- if (DidIt.isValid()) {
- // did_it = 1;
- CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
- }
- Action.Done(CGF);
- // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
- // <copy_func>, did_it);
- if (DidIt.isValid()) {
- llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
- QualType CopyprivateArrayTy = C.getConstantArrayType(
- C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- // Create a list of all private variables for copyprivate.
- Address CopyprivateList =
- CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
- for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
- Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I);
- CGF.Builder.CreateStore(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLValue(CopyprivateVars[I]).getPointer(CGF),
- CGF.VoidPtrTy),
- Elem);
- }
- // Build function that copies private values from single region to all other
- // threads in the corresponding parallel region.
- llvm::Value *CpyFn = emitCopyprivateCopyFunction(
- CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy), CopyprivateVars,
- SrcExprs, DstExprs, AssignmentOps, Loc);
- llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
- Address CL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CopyprivateList, CGF.VoidPtrTy, CGF.Int8Ty);
- llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc), // ident_t *<loc>
- getThreadID(CGF, Loc), // i32 <gtid>
- BufSize, // size_t <buf_size>
- CL.getPointer(), // void *<copyprivate list>
- CpyFn, // void (*) (void *, void *) <copy_func>
- DidItVal // i32 did_it
- };
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_copyprivate),
- Args);
- }
- }
- void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
- const RegionCodeGenTy &OrderedOpGen,
- SourceLocation Loc, bool IsThreads) {
- if (!CGF.HaveInsertPoint())
- return;
- // __kmpc_ordered(ident_t *, gtid);
- // OrderedOpGen();
- // __kmpc_end_ordered(ident_t *, gtid);
- // Prepare arguments and build a call to __kmpc_ordered
- if (IsThreads) {
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_ordered),
- Args,
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_end_ordered),
- Args);
- OrderedOpGen.setAction(Action);
- emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
- return;
- }
- emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
- }
- unsigned CGOpenMPRuntime::getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind) {
- unsigned Flags;
- if (Kind == OMPD_for)
- Flags = OMP_IDENT_BARRIER_IMPL_FOR;
- else if (Kind == OMPD_sections)
- Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
- else if (Kind == OMPD_single)
- Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
- else if (Kind == OMPD_barrier)
- Flags = OMP_IDENT_BARRIER_EXPL;
- else
- Flags = OMP_IDENT_BARRIER_IMPL;
- return Flags;
- }
- void CGOpenMPRuntime::getDefaultScheduleAndChunk(
- CodeGenFunction &CGF, const OMPLoopDirective &S,
- OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const {
- // Check if the loop directive is actually a doacross loop directive. In this
- // case choose static, 1 schedule.
- if (llvm::any_of(
- S.getClausesOfKind<OMPOrderedClause>(),
- [](const OMPOrderedClause *C) { return C->getNumForLoops(); })) {
- ScheduleKind = OMPC_SCHEDULE_static;
- // Chunk size is 1 in this case.
- llvm::APInt ChunkSize(32, 1);
- ChunkExpr = IntegerLiteral::Create(
- CGF.getContext(), ChunkSize,
- CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
- SourceLocation());
- }
- }
- void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPDirectiveKind Kind, bool EmitChecks,
- bool ForceSimpleCall) {
- // Check if we should use the OMPBuilder
- auto *OMPRegionInfo =
- dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
- if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
- CGF.Builder.restoreIP(OMPBuilder.createBarrier(
- CGF.Builder, Kind, ForceSimpleCall, EmitChecks));
- return;
- }
- if (!CGF.HaveInsertPoint())
- return;
- // Build call __kmpc_cancel_barrier(loc, thread_id);
- // Build call __kmpc_barrier(loc, thread_id);
- unsigned Flags = getDefaultFlagsForBarriers(Kind);
- // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
- // thread_id);
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
- getThreadID(CGF, Loc)};
- if (OMPRegionInfo) {
- if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
- llvm::Value *Result = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
- OMPRTL___kmpc_cancel_barrier),
- Args);
- if (EmitChecks) {
- // if (__kmpc_cancel_barrier()) {
- // exit from construct;
- // }
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
- llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
- llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
- CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
- CGF.EmitBlock(ExitBB);
- // exit from construct;
- CodeGenFunction::JumpDest CancelDestination =
- CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
- CGF.EmitBranchThroughCleanup(CancelDestination);
- CGF.EmitBlock(ContBB, /*IsFinished=*/true);
- }
- return;
- }
- }
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_barrier),
- Args);
- }
- void CGOpenMPRuntime::emitErrorCall(CodeGenFunction &CGF, SourceLocation Loc,
- Expr *ME, bool IsFatal) {
- llvm::Value *MVL =
- ME ? CGF.EmitStringLiteralLValue(cast<StringLiteral>(ME)).getPointer(CGF)
- : llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
- // Build call void __kmpc_error(ident_t *loc, int severity, const char
- // *message)
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc, /*Flags=*/0, /*GenLoc=*/true),
- llvm::ConstantInt::get(CGM.Int32Ty, IsFatal ? 2 : 1),
- CGF.Builder.CreatePointerCast(MVL, CGM.Int8PtrTy)};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_error),
- Args);
- }
- /// Map the OpenMP loop schedule to the runtime enumeration.
- static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
- bool Chunked, bool Ordered) {
- switch (ScheduleKind) {
- case OMPC_SCHEDULE_static:
- return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
- : (Ordered ? OMP_ord_static : OMP_sch_static);
- case OMPC_SCHEDULE_dynamic:
- return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
- case OMPC_SCHEDULE_guided:
- return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
- case OMPC_SCHEDULE_runtime:
- return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
- case OMPC_SCHEDULE_auto:
- return Ordered ? OMP_ord_auto : OMP_sch_auto;
- case OMPC_SCHEDULE_unknown:
- assert(!Chunked && "chunk was specified but schedule kind not known");
- return Ordered ? OMP_ord_static : OMP_sch_static;
- }
- llvm_unreachable("Unexpected runtime schedule");
- }
- /// Map the OpenMP distribute schedule to the runtime enumeration.
- static OpenMPSchedType
- getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
- // only static is allowed for dist_schedule
- return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
- }
- bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
- bool Chunked) const {
- OpenMPSchedType Schedule =
- getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
- return Schedule == OMP_sch_static;
- }
- bool CGOpenMPRuntime::isStaticNonchunked(
- OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
- OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
- return Schedule == OMP_dist_sch_static;
- }
- bool CGOpenMPRuntime::isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
- bool Chunked) const {
- OpenMPSchedType Schedule =
- getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
- return Schedule == OMP_sch_static_chunked;
- }
- bool CGOpenMPRuntime::isStaticChunked(
- OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
- OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
- return Schedule == OMP_dist_sch_static_chunked;
- }
- bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
- OpenMPSchedType Schedule =
- getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
- assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
- return Schedule != OMP_sch_static;
- }
- static int addMonoNonMonoModifier(CodeGenModule &CGM, OpenMPSchedType Schedule,
- OpenMPScheduleClauseModifier M1,
- OpenMPScheduleClauseModifier M2) {
- int Modifier = 0;
- switch (M1) {
- case OMPC_SCHEDULE_MODIFIER_monotonic:
- Modifier = OMP_sch_modifier_monotonic;
- break;
- case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
- Modifier = OMP_sch_modifier_nonmonotonic;
- break;
- case OMPC_SCHEDULE_MODIFIER_simd:
- if (Schedule == OMP_sch_static_chunked)
- Schedule = OMP_sch_static_balanced_chunked;
- break;
- case OMPC_SCHEDULE_MODIFIER_last:
- case OMPC_SCHEDULE_MODIFIER_unknown:
- break;
- }
- switch (M2) {
- case OMPC_SCHEDULE_MODIFIER_monotonic:
- Modifier = OMP_sch_modifier_monotonic;
- break;
- case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
- Modifier = OMP_sch_modifier_nonmonotonic;
- break;
- case OMPC_SCHEDULE_MODIFIER_simd:
- if (Schedule == OMP_sch_static_chunked)
- Schedule = OMP_sch_static_balanced_chunked;
- break;
- case OMPC_SCHEDULE_MODIFIER_last:
- case OMPC_SCHEDULE_MODIFIER_unknown:
- break;
- }
- // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Desription.
- // If the static schedule kind is specified or if the ordered clause is
- // specified, and if the nonmonotonic modifier is not specified, the effect is
- // as if the monotonic modifier is specified. Otherwise, unless the monotonic
- // modifier is specified, the effect is as if the nonmonotonic modifier is
- // specified.
- if (CGM.getLangOpts().OpenMP >= 50 && Modifier == 0) {
- if (!(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static ||
- Schedule == OMP_sch_static_balanced_chunked ||
- Schedule == OMP_ord_static_chunked || Schedule == OMP_ord_static ||
- Schedule == OMP_dist_sch_static_chunked ||
- Schedule == OMP_dist_sch_static))
- Modifier = OMP_sch_modifier_nonmonotonic;
- }
- return Schedule | Modifier;
- }
- void CGOpenMPRuntime::emitForDispatchInit(
- CodeGenFunction &CGF, SourceLocation Loc,
- const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
- bool Ordered, const DispatchRTInput &DispatchValues) {
- if (!CGF.HaveInsertPoint())
- return;
- OpenMPSchedType Schedule = getRuntimeSchedule(
- ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
- assert(Ordered ||
- (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
- Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
- Schedule != OMP_sch_static_balanced_chunked));
- // Call __kmpc_dispatch_init(
- // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
- // kmp_int[32|64] lower, kmp_int[32|64] upper,
- // kmp_int[32|64] stride, kmp_int[32|64] chunk);
- // If the Chunk was not specified in the clause - use default value 1.
- llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
- : CGF.Builder.getIntN(IVSize, 1);
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc),
- getThreadID(CGF, Loc),
- CGF.Builder.getInt32(addMonoNonMonoModifier(
- CGM, Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
- DispatchValues.LB, // Lower
- DispatchValues.UB, // Upper
- CGF.Builder.getIntN(IVSize, 1), // Stride
- Chunk // Chunk
- };
- CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
- }
- static void emitForStaticInitCall(
- CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
- llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
- OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
- const CGOpenMPRuntime::StaticRTInput &Values) {
- if (!CGF.HaveInsertPoint())
- return;
- assert(!Values.Ordered);
- assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
- Schedule == OMP_sch_static_balanced_chunked ||
- Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
- Schedule == OMP_dist_sch_static ||
- Schedule == OMP_dist_sch_static_chunked);
- // Call __kmpc_for_static_init(
- // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
- // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
- // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
- // kmp_int[32|64] incr, kmp_int[32|64] chunk);
- llvm::Value *Chunk = Values.Chunk;
- if (Chunk == nullptr) {
- assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
- Schedule == OMP_dist_sch_static) &&
- "expected static non-chunked schedule");
- // If the Chunk was not specified in the clause - use default value 1.
- Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
- } else {
- assert((Schedule == OMP_sch_static_chunked ||
- Schedule == OMP_sch_static_balanced_chunked ||
- Schedule == OMP_ord_static_chunked ||
- Schedule == OMP_dist_sch_static_chunked) &&
- "expected static chunked schedule");
- }
- llvm::Value *Args[] = {
- UpdateLocation,
- ThreadId,
- CGF.Builder.getInt32(addMonoNonMonoModifier(CGF.CGM, Schedule, M1,
- M2)), // Schedule type
- Values.IL.getPointer(), // &isLastIter
- Values.LB.getPointer(), // &LB
- Values.UB.getPointer(), // &UB
- Values.ST.getPointer(), // &Stride
- CGF.Builder.getIntN(Values.IVSize, 1), // Incr
- Chunk // Chunk
- };
- CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
- }
- void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
- SourceLocation Loc,
- OpenMPDirectiveKind DKind,
- const OpenMPScheduleTy &ScheduleKind,
- const StaticRTInput &Values) {
- OpenMPSchedType ScheduleNum = getRuntimeSchedule(
- ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
- assert(isOpenMPWorksharingDirective(DKind) &&
- "Expected loop-based or sections-based directive.");
- llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
- isOpenMPLoopDirective(DKind)
- ? OMP_IDENT_WORK_LOOP
- : OMP_IDENT_WORK_SECTIONS);
- llvm::Value *ThreadId = getThreadID(CGF, Loc);
- llvm::FunctionCallee StaticInitFunction =
- createForStaticInitFunction(Values.IVSize, Values.IVSigned, false);
- auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
- emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
- ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
- }
- void CGOpenMPRuntime::emitDistributeStaticInit(
- CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPDistScheduleClauseKind SchedKind,
- const CGOpenMPRuntime::StaticRTInput &Values) {
- OpenMPSchedType ScheduleNum =
- getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
- llvm::Value *UpdatedLocation =
- emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
- llvm::Value *ThreadId = getThreadID(CGF, Loc);
- llvm::FunctionCallee StaticInitFunction;
- bool isGPUDistribute =
- CGM.getLangOpts().OpenMPIsDevice &&
- (CGM.getTriple().isAMDGCN() || CGM.getTriple().isNVPTX());
- StaticInitFunction = createForStaticInitFunction(
- Values.IVSize, Values.IVSigned, isGPUDistribute);
- emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
- ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
- OMPC_SCHEDULE_MODIFIER_unknown, Values);
- }
- void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
- SourceLocation Loc,
- OpenMPDirectiveKind DKind) {
- if (!CGF.HaveInsertPoint())
- return;
- // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc,
- isOpenMPDistributeDirective(DKind)
- ? OMP_IDENT_WORK_DISTRIBUTE
- : isOpenMPLoopDirective(DKind)
- ? OMP_IDENT_WORK_LOOP
- : OMP_IDENT_WORK_SECTIONS),
- getThreadID(CGF, Loc)};
- auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
- if (isOpenMPDistributeDirective(DKind) && CGM.getLangOpts().OpenMPIsDevice &&
- (CGM.getTriple().isAMDGCN() || CGM.getTriple().isNVPTX()))
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_distribute_static_fini),
- Args);
- else
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_for_static_fini),
- Args);
- }
- void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
- SourceLocation Loc,
- unsigned IVSize,
- bool IVSigned) {
- if (!CGF.HaveInsertPoint())
- return;
- // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
- }
- llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
- SourceLocation Loc, unsigned IVSize,
- bool IVSigned, Address IL,
- Address LB, Address UB,
- Address ST) {
- // Call __kmpc_dispatch_next(
- // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
- // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
- // kmp_int[32|64] *p_stride);
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc),
- getThreadID(CGF, Loc),
- IL.getPointer(), // &isLastIter
- LB.getPointer(), // &Lower
- UB.getPointer(), // &Upper
- ST.getPointer() // &Stride
- };
- llvm::Value *Call =
- CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
- return CGF.EmitScalarConversion(
- Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
- CGF.getContext().BoolTy, Loc);
- }
- void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
- llvm::Value *NumThreads,
- SourceLocation Loc) {
- if (!CGF.HaveInsertPoint())
- return;
- // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_push_num_threads),
- Args);
- }
- void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
- ProcBindKind ProcBind,
- SourceLocation Loc) {
- if (!CGF.HaveInsertPoint())
- return;
- assert(ProcBind != OMP_PROC_BIND_unknown && "Unsupported proc_bind value.");
- // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- llvm::ConstantInt::get(CGM.IntTy, unsigned(ProcBind), /*isSigned=*/true)};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_push_proc_bind),
- Args);
- }
- void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
- SourceLocation Loc, llvm::AtomicOrdering AO) {
- if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
- OMPBuilder.createFlush(CGF.Builder);
- } else {
- if (!CGF.HaveInsertPoint())
- return;
- // Build call void __kmpc_flush(ident_t *loc)
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_flush),
- emitUpdateLocation(CGF, Loc));
- }
- }
- namespace {
- /// Indexes of fields for type kmp_task_t.
- enum KmpTaskTFields {
- /// List of shared variables.
- KmpTaskTShareds,
- /// Task routine.
- KmpTaskTRoutine,
- /// Partition id for the untied tasks.
- KmpTaskTPartId,
- /// Function with call of destructors for private variables.
- Data1,
- /// Task priority.
- Data2,
- /// (Taskloops only) Lower bound.
- KmpTaskTLowerBound,
- /// (Taskloops only) Upper bound.
- KmpTaskTUpperBound,
- /// (Taskloops only) Stride.
- KmpTaskTStride,
- /// (Taskloops only) Is last iteration flag.
- KmpTaskTLastIter,
- /// (Taskloops only) Reduction data.
- KmpTaskTReductions,
- };
- } // anonymous namespace
- void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
- // If we are in simd mode or there are no entries, we don't need to do
- // anything.
- if (CGM.getLangOpts().OpenMPSimd || OffloadEntriesInfoManager.empty())
- return;
- llvm::OpenMPIRBuilder::EmitMetadataErrorReportFunctionTy &&ErrorReportFn =
- [this](llvm::OpenMPIRBuilder::EmitMetadataErrorKind Kind,
- const llvm::TargetRegionEntryInfo &EntryInfo) -> void {
- SourceLocation Loc;
- if (Kind != llvm::OpenMPIRBuilder::EMIT_MD_GLOBAL_VAR_LINK_ERROR) {
- for (auto I = CGM.getContext().getSourceManager().fileinfo_begin(),
- E = CGM.getContext().getSourceManager().fileinfo_end();
- I != E; ++I) {
- if (I->getFirst()->getUniqueID().getDevice() == EntryInfo.DeviceID &&
- I->getFirst()->getUniqueID().getFile() == EntryInfo.FileID) {
- Loc = CGM.getContext().getSourceManager().translateFileLineCol(
- I->getFirst(), EntryInfo.Line, 1);
- break;
- }
- }
- }
- switch (Kind) {
- case llvm::OpenMPIRBuilder::EMIT_MD_TARGET_REGION_ERROR: {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error, "Offloading entry for target region in "
- "%0 is incorrect: either the "
- "address or the ID is invalid.");
- CGM.getDiags().Report(Loc, DiagID) << EntryInfo.ParentName;
- } break;
- case llvm::OpenMPIRBuilder::EMIT_MD_DECLARE_TARGET_ERROR: {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error, "Offloading entry for declare target "
- "variable %0 is incorrect: the "
- "address is invalid.");
- CGM.getDiags().Report(Loc, DiagID) << EntryInfo.ParentName;
- } break;
- case llvm::OpenMPIRBuilder::EMIT_MD_GLOBAL_VAR_LINK_ERROR: {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error,
- "Offloading entry for declare target variable is incorrect: the "
- "address is invalid.");
- CGM.getDiags().Report(DiagID);
- } break;
- }
- };
- OMPBuilder.createOffloadEntriesAndInfoMetadata(OffloadEntriesInfoManager,
- ErrorReportFn);
- }
- /// Loads all the offload entries information from the host IR
- /// metadata.
- void CGOpenMPRuntime::loadOffloadInfoMetadata() {
- // If we are in target mode, load the metadata from the host IR. This code has
- // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
- if (!CGM.getLangOpts().OpenMPIsDevice)
- return;
- if (CGM.getLangOpts().OMPHostIRFile.empty())
- return;
- auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
- if (auto EC = Buf.getError()) {
- CGM.getDiags().Report(diag::err_cannot_open_file)
- << CGM.getLangOpts().OMPHostIRFile << EC.message();
- return;
- }
- llvm::LLVMContext C;
- auto ME = expectedToErrorOrAndEmitErrors(
- C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
- if (auto EC = ME.getError()) {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
- CGM.getDiags().Report(DiagID)
- << CGM.getLangOpts().OMPHostIRFile << EC.message();
- return;
- }
- OMPBuilder.loadOffloadInfoMetadata(*ME.get(), OffloadEntriesInfoManager);
- }
- void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
- if (!KmpRoutineEntryPtrTy) {
- // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
- ASTContext &C = CGM.getContext();
- QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- KmpRoutineEntryPtrQTy = C.getPointerType(
- C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
- KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
- }
- }
- namespace {
- struct PrivateHelpersTy {
- PrivateHelpersTy(const Expr *OriginalRef, const VarDecl *Original,
- const VarDecl *PrivateCopy, const VarDecl *PrivateElemInit)
- : OriginalRef(OriginalRef), Original(Original), PrivateCopy(PrivateCopy),
- PrivateElemInit(PrivateElemInit) {}
- PrivateHelpersTy(const VarDecl *Original) : Original(Original) {}
- const Expr *OriginalRef = nullptr;
- const VarDecl *Original = nullptr;
- const VarDecl *PrivateCopy = nullptr;
- const VarDecl *PrivateElemInit = nullptr;
- bool isLocalPrivate() const {
- return !OriginalRef && !PrivateCopy && !PrivateElemInit;
- }
- };
- typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
- } // anonymous namespace
- static bool isAllocatableDecl(const VarDecl *VD) {
- const VarDecl *CVD = VD->getCanonicalDecl();
- if (!CVD->hasAttr<OMPAllocateDeclAttr>())
- return false;
- const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
- // Use the default allocation.
- return !(AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
- !AA->getAllocator());
- }
- static RecordDecl *
- createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
- if (!Privates.empty()) {
- ASTContext &C = CGM.getContext();
- // Build struct .kmp_privates_t. {
- // /* private vars */
- // };
- RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
- RD->startDefinition();
- for (const auto &Pair : Privates) {
- const VarDecl *VD = Pair.second.Original;
- QualType Type = VD->getType().getNonReferenceType();
- // If the private variable is a local variable with lvalue ref type,
- // allocate the pointer instead of the pointee type.
- if (Pair.second.isLocalPrivate()) {
- if (VD->getType()->isLValueReferenceType())
- Type = C.getPointerType(Type);
- if (isAllocatableDecl(VD))
- Type = C.getPointerType(Type);
- }
- FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
- if (VD->hasAttrs()) {
- for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
- E(VD->getAttrs().end());
- I != E; ++I)
- FD->addAttr(*I);
- }
- }
- RD->completeDefinition();
- return RD;
- }
- return nullptr;
- }
- static RecordDecl *
- createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
- QualType KmpInt32Ty,
- QualType KmpRoutineEntryPointerQTy) {
- ASTContext &C = CGM.getContext();
- // Build struct kmp_task_t {
- // void * shareds;
- // kmp_routine_entry_t routine;
- // kmp_int32 part_id;
- // kmp_cmplrdata_t data1;
- // kmp_cmplrdata_t data2;
- // For taskloops additional fields:
- // kmp_uint64 lb;
- // kmp_uint64 ub;
- // kmp_int64 st;
- // kmp_int32 liter;
- // void * reductions;
- // };
- RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
- UD->startDefinition();
- addFieldToRecordDecl(C, UD, KmpInt32Ty);
- addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
- UD->completeDefinition();
- QualType KmpCmplrdataTy = C.getRecordType(UD);
- RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
- RD->startDefinition();
- addFieldToRecordDecl(C, RD, C.VoidPtrTy);
- addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
- addFieldToRecordDecl(C, RD, KmpInt32Ty);
- addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
- addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
- if (isOpenMPTaskLoopDirective(Kind)) {
- QualType KmpUInt64Ty =
- CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
- QualType KmpInt64Ty =
- CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
- addFieldToRecordDecl(C, RD, KmpUInt64Ty);
- addFieldToRecordDecl(C, RD, KmpUInt64Ty);
- addFieldToRecordDecl(C, RD, KmpInt64Ty);
- addFieldToRecordDecl(C, RD, KmpInt32Ty);
- addFieldToRecordDecl(C, RD, C.VoidPtrTy);
- }
- RD->completeDefinition();
- return RD;
- }
- static RecordDecl *
- createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
- ArrayRef<PrivateDataTy> Privates) {
- ASTContext &C = CGM.getContext();
- // Build struct kmp_task_t_with_privates {
- // kmp_task_t task_data;
- // .kmp_privates_t. privates;
- // };
- RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
- RD->startDefinition();
- addFieldToRecordDecl(C, RD, KmpTaskTQTy);
- if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
- addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
- RD->completeDefinition();
- return RD;
- }
- /// Emit a proxy function which accepts kmp_task_t as the second
- /// argument.
- /// \code
- /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
- /// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
- /// For taskloops:
- /// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
- /// tt->reductions, tt->shareds);
- /// return 0;
- /// }
- /// \endcode
- static llvm::Function *
- emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
- OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
- QualType KmpTaskTWithPrivatesPtrQTy,
- QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
- QualType SharedsPtrTy, llvm::Function *TaskFunction,
- llvm::Value *TaskPrivatesMap) {
- ASTContext &C = CGM.getContext();
- FunctionArgList Args;
- ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
- ImplicitParamDecl::Other);
- ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- KmpTaskTWithPrivatesPtrQTy.withRestrict(),
- ImplicitParamDecl::Other);
- Args.push_back(&GtidArg);
- Args.push_back(&TaskTypeArg);
- const auto &TaskEntryFnInfo =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
- llvm::FunctionType *TaskEntryTy =
- CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
- std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
- auto *TaskEntry = llvm::Function::Create(
- TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
- TaskEntry->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
- Loc, Loc);
- // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
- // tt,
- // For taskloops:
- // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
- // tt->task_data.shareds);
- llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
- CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
- LValue TDBase = CGF.EmitLoadOfPointerLValue(
- CGF.GetAddrOfLocalVar(&TaskTypeArg),
- KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
- const auto *KmpTaskTWithPrivatesQTyRD =
- cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
- LValue Base =
- CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
- const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
- auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
- LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
- llvm::Value *PartidParam = PartIdLVal.getPointer(CGF);
- auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
- LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
- llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(SharedsLVal, Loc),
- CGF.ConvertTypeForMem(SharedsPtrTy));
- auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
- llvm::Value *PrivatesParam;
- if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
- LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
- PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- PrivatesLVal.getPointer(CGF), CGF.VoidPtrTy);
- } else {
- PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
- }
- llvm::Value *CommonArgs[] = {
- GtidParam, PartidParam, PrivatesParam, TaskPrivatesMap,
- CGF.Builder
- .CreatePointerBitCastOrAddrSpaceCast(TDBase.getAddress(CGF),
- CGF.VoidPtrTy, CGF.Int8Ty)
- .getPointer()};
- SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
- std::end(CommonArgs));
- if (isOpenMPTaskLoopDirective(Kind)) {
- auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
- LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
- llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
- auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
- LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
- llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
- auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
- LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
- llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
- auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
- LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
- llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
- auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
- LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
- llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
- CallArgs.push_back(LBParam);
- CallArgs.push_back(UBParam);
- CallArgs.push_back(StParam);
- CallArgs.push_back(LIParam);
- CallArgs.push_back(RParam);
- }
- CallArgs.push_back(SharedsParam);
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
- CallArgs);
- CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
- CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
- CGF.FinishFunction();
- return TaskEntry;
- }
- static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
- SourceLocation Loc,
- QualType KmpInt32Ty,
- QualType KmpTaskTWithPrivatesPtrQTy,
- QualType KmpTaskTWithPrivatesQTy) {
- ASTContext &C = CGM.getContext();
- FunctionArgList Args;
- ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
- ImplicitParamDecl::Other);
- ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- KmpTaskTWithPrivatesPtrQTy.withRestrict(),
- ImplicitParamDecl::Other);
- Args.push_back(&GtidArg);
- Args.push_back(&TaskTypeArg);
- const auto &DestructorFnInfo =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
- llvm::FunctionType *DestructorFnTy =
- CGM.getTypes().GetFunctionType(DestructorFnInfo);
- std::string Name =
- CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
- auto *DestructorFn =
- llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
- Name, &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
- DestructorFnInfo);
- DestructorFn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
- Args, Loc, Loc);
- LValue Base = CGF.EmitLoadOfPointerLValue(
- CGF.GetAddrOfLocalVar(&TaskTypeArg),
- KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
- const auto *KmpTaskTWithPrivatesQTyRD =
- cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
- auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
- Base = CGF.EmitLValueForField(Base, *FI);
- for (const auto *Field :
- cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
- if (QualType::DestructionKind DtorKind =
- Field->getType().isDestructedType()) {
- LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
- CGF.pushDestroy(DtorKind, FieldLValue.getAddress(CGF), Field->getType());
- }
- }
- CGF.FinishFunction();
- return DestructorFn;
- }
- /// Emit a privates mapping function for correct handling of private and
- /// firstprivate variables.
- /// \code
- /// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
- /// **noalias priv1,..., <tyn> **noalias privn) {
- /// *priv1 = &.privates.priv1;
- /// ...;
- /// *privn = &.privates.privn;
- /// }
- /// \endcode
- static llvm::Value *
- emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
- const OMPTaskDataTy &Data, QualType PrivatesQTy,
- ArrayRef<PrivateDataTy> Privates) {
- ASTContext &C = CGM.getContext();
- FunctionArgList Args;
- ImplicitParamDecl TaskPrivatesArg(
- C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.getPointerType(PrivatesQTy).withConst().withRestrict(),
- ImplicitParamDecl::Other);
- Args.push_back(&TaskPrivatesArg);
- llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, unsigned> PrivateVarsPos;
- unsigned Counter = 1;
- for (const Expr *E : Data.PrivateVars) {
- Args.push_back(ImplicitParamDecl::Create(
- C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.getPointerType(C.getPointerType(E->getType()))
- .withConst()
- .withRestrict(),
- ImplicitParamDecl::Other));
- const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- PrivateVarsPos[VD] = Counter;
- ++Counter;
- }
- for (const Expr *E : Data.FirstprivateVars) {
- Args.push_back(ImplicitParamDecl::Create(
- C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.getPointerType(C.getPointerType(E->getType()))
- .withConst()
- .withRestrict(),
- ImplicitParamDecl::Other));
- const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- PrivateVarsPos[VD] = Counter;
- ++Counter;
- }
- for (const Expr *E : Data.LastprivateVars) {
- Args.push_back(ImplicitParamDecl::Create(
- C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.getPointerType(C.getPointerType(E->getType()))
- .withConst()
- .withRestrict(),
- ImplicitParamDecl::Other));
- const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- PrivateVarsPos[VD] = Counter;
- ++Counter;
- }
- for (const VarDecl *VD : Data.PrivateLocals) {
- QualType Ty = VD->getType().getNonReferenceType();
- if (VD->getType()->isLValueReferenceType())
- Ty = C.getPointerType(Ty);
- if (isAllocatableDecl(VD))
- Ty = C.getPointerType(Ty);
- Args.push_back(ImplicitParamDecl::Create(
- C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.getPointerType(C.getPointerType(Ty)).withConst().withRestrict(),
- ImplicitParamDecl::Other));
- PrivateVarsPos[VD] = Counter;
- ++Counter;
- }
- const auto &TaskPrivatesMapFnInfo =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- llvm::FunctionType *TaskPrivatesMapTy =
- CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
- std::string Name =
- CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
- auto *TaskPrivatesMap = llvm::Function::Create(
- TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
- &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
- TaskPrivatesMapFnInfo);
- if (CGM.getLangOpts().Optimize) {
- TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
- TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
- TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
- }
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
- TaskPrivatesMapFnInfo, Args, Loc, Loc);
- // *privi = &.privates.privi;
- LValue Base = CGF.EmitLoadOfPointerLValue(
- CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
- TaskPrivatesArg.getType()->castAs<PointerType>());
- const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
- Counter = 0;
- for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
- LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
- const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
- LValue RefLVal =
- CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
- LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
- RefLVal.getAddress(CGF), RefLVal.getType()->castAs<PointerType>());
- CGF.EmitStoreOfScalar(FieldLVal.getPointer(CGF), RefLoadLVal);
- ++Counter;
- }
- CGF.FinishFunction();
- return TaskPrivatesMap;
- }
- /// Emit initialization for private variables in task-based directives.
- static void emitPrivatesInit(CodeGenFunction &CGF,
- const OMPExecutableDirective &D,
- Address KmpTaskSharedsPtr, LValue TDBase,
- const RecordDecl *KmpTaskTWithPrivatesQTyRD,
- QualType SharedsTy, QualType SharedsPtrTy,
- const OMPTaskDataTy &Data,
- ArrayRef<PrivateDataTy> Privates, bool ForDup) {
- ASTContext &C = CGF.getContext();
- auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
- LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
- OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
- ? OMPD_taskloop
- : OMPD_task;
- const CapturedStmt &CS = *D.getCapturedStmt(Kind);
- CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
- LValue SrcBase;
- bool IsTargetTask =
- isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
- isOpenMPTargetExecutionDirective(D.getDirectiveKind());
- // For target-based directives skip 4 firstprivate arrays BasePointersArray,
- // PointersArray, SizesArray, and MappersArray. The original variables for
- // these arrays are not captured and we get their addresses explicitly.
- if ((!IsTargetTask && !Data.FirstprivateVars.empty() && ForDup) ||
- (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
- SrcBase = CGF.MakeAddrLValue(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy),
- CGF.ConvertTypeForMem(SharedsTy)),
- SharedsTy);
- }
- FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
- for (const PrivateDataTy &Pair : Privates) {
- // Do not initialize private locals.
- if (Pair.second.isLocalPrivate()) {
- ++FI;
- continue;
- }
- const VarDecl *VD = Pair.second.PrivateCopy;
- const Expr *Init = VD->getAnyInitializer();
- if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
- !CGF.isTrivialInitializer(Init)))) {
- LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
- if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
- const VarDecl *OriginalVD = Pair.second.Original;
- // Check if the variable is the target-based BasePointersArray,
- // PointersArray, SizesArray, or MappersArray.
- LValue SharedRefLValue;
- QualType Type = PrivateLValue.getType();
- const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
- if (IsTargetTask && !SharedField) {
- assert(isa<ImplicitParamDecl>(OriginalVD) &&
- isa<CapturedDecl>(OriginalVD->getDeclContext()) &&
- cast<CapturedDecl>(OriginalVD->getDeclContext())
- ->getNumParams() == 0 &&
- isa<TranslationUnitDecl>(
- cast<CapturedDecl>(OriginalVD->getDeclContext())
- ->getDeclContext()) &&
- "Expected artificial target data variable.");
- SharedRefLValue =
- CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
- } else if (ForDup) {
- SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
- SharedRefLValue = CGF.MakeAddrLValue(
- SharedRefLValue.getAddress(CGF).withAlignment(
- C.getDeclAlign(OriginalVD)),
- SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
- SharedRefLValue.getTBAAInfo());
- } else if (CGF.LambdaCaptureFields.count(
- Pair.second.Original->getCanonicalDecl()) > 0 ||
- isa_and_nonnull<BlockDecl>(CGF.CurCodeDecl)) {
- SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
- } else {
- // Processing for implicitly captured variables.
- InlinedOpenMPRegionRAII Region(
- CGF, [](CodeGenFunction &, PrePostActionTy &) {}, OMPD_unknown,
- /*HasCancel=*/false, /*NoInheritance=*/true);
- SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
- }
- if (Type->isArrayType()) {
- // Initialize firstprivate array.
- if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
- // Perform simple memcpy.
- CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
- } else {
- // Initialize firstprivate array using element-by-element
- // initialization.
- CGF.EmitOMPAggregateAssign(
- PrivateLValue.getAddress(CGF), SharedRefLValue.getAddress(CGF),
- Type,
- [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
- Address SrcElement) {
- // Clean up any temporaries needed by the initialization.
- CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(Elem, SrcElement);
- (void)InitScope.Privatize();
- // Emit initialization for single element.
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
- CGF, &CapturesInfo);
- CGF.EmitAnyExprToMem(Init, DestElement,
- Init->getType().getQualifiers(),
- /*IsInitializer=*/false);
- });
- }
- } else {
- CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(Elem, SharedRefLValue.getAddress(CGF));
- (void)InitScope.Privatize();
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
- CGF.EmitExprAsInit(Init, VD, PrivateLValue,
- /*capturedByInit=*/false);
- }
- } else {
- CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
- }
- }
- ++FI;
- }
- }
- /// Check if duplication function is required for taskloops.
- static bool checkInitIsRequired(CodeGenFunction &CGF,
- ArrayRef<PrivateDataTy> Privates) {
- bool InitRequired = false;
- for (const PrivateDataTy &Pair : Privates) {
- if (Pair.second.isLocalPrivate())
- continue;
- const VarDecl *VD = Pair.second.PrivateCopy;
- const Expr *Init = VD->getAnyInitializer();
- InitRequired = InitRequired || (isa_and_nonnull<CXXConstructExpr>(Init) &&
- !CGF.isTrivialInitializer(Init));
- if (InitRequired)
- break;
- }
- return InitRequired;
- }
- /// Emit task_dup function (for initialization of
- /// private/firstprivate/lastprivate vars and last_iter flag)
- /// \code
- /// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
- /// lastpriv) {
- /// // setup lastprivate flag
- /// task_dst->last = lastpriv;
- /// // could be constructor calls here...
- /// }
- /// \endcode
- static llvm::Value *
- emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
- const OMPExecutableDirective &D,
- QualType KmpTaskTWithPrivatesPtrQTy,
- const RecordDecl *KmpTaskTWithPrivatesQTyRD,
- const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
- QualType SharedsPtrTy, const OMPTaskDataTy &Data,
- ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
- ASTContext &C = CGM.getContext();
- FunctionArgList Args;
- ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- KmpTaskTWithPrivatesPtrQTy,
- ImplicitParamDecl::Other);
- ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- KmpTaskTWithPrivatesPtrQTy,
- ImplicitParamDecl::Other);
- ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamDecl::Other);
- Args.push_back(&DstArg);
- Args.push_back(&SrcArg);
- Args.push_back(&LastprivArg);
- const auto &TaskDupFnInfo =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
- std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
- auto *TaskDup = llvm::Function::Create(
- TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
- TaskDup->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
- Loc);
- LValue TDBase = CGF.EmitLoadOfPointerLValue(
- CGF.GetAddrOfLocalVar(&DstArg),
- KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
- // task_dst->liter = lastpriv;
- if (WithLastIter) {
- auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
- LValue Base = CGF.EmitLValueForField(
- TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
- LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
- llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
- CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
- CGF.EmitStoreOfScalar(Lastpriv, LILVal);
- }
- // Emit initial values for private copies (if any).
- assert(!Privates.empty());
- Address KmpTaskSharedsPtr = Address::invalid();
- if (!Data.FirstprivateVars.empty()) {
- LValue TDBase = CGF.EmitLoadOfPointerLValue(
- CGF.GetAddrOfLocalVar(&SrcArg),
- KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
- LValue Base = CGF.EmitLValueForField(
- TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
- KmpTaskSharedsPtr = Address(
- CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
- Base, *std::next(KmpTaskTQTyRD->field_begin(),
- KmpTaskTShareds)),
- Loc),
- CGF.Int8Ty, CGM.getNaturalTypeAlignment(SharedsTy));
- }
- emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
- SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
- CGF.FinishFunction();
- return TaskDup;
- }
- /// Checks if destructor function is required to be generated.
- /// \return true if cleanups are required, false otherwise.
- static bool
- checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD,
- ArrayRef<PrivateDataTy> Privates) {
- for (const PrivateDataTy &P : Privates) {
- if (P.second.isLocalPrivate())
- continue;
- QualType Ty = P.second.Original->getType().getNonReferenceType();
- if (Ty.isDestructedType())
- return true;
- }
- return false;
- }
- namespace {
- /// Loop generator for OpenMP iterator expression.
- class OMPIteratorGeneratorScope final
- : public CodeGenFunction::OMPPrivateScope {
- CodeGenFunction &CGF;
- const OMPIteratorExpr *E = nullptr;
- SmallVector<CodeGenFunction::JumpDest, 4> ContDests;
- SmallVector<CodeGenFunction::JumpDest, 4> ExitDests;
- OMPIteratorGeneratorScope() = delete;
- OMPIteratorGeneratorScope(OMPIteratorGeneratorScope &) = delete;
- public:
- OMPIteratorGeneratorScope(CodeGenFunction &CGF, const OMPIteratorExpr *E)
- : CodeGenFunction::OMPPrivateScope(CGF), CGF(CGF), E(E) {
- if (!E)
- return;
- SmallVector<llvm::Value *, 4> Uppers;
- for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
- Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper));
- const auto *VD = cast<VarDecl>(E->getIteratorDecl(I));
- addPrivate(VD, CGF.CreateMemTemp(VD->getType(), VD->getName()));
- const OMPIteratorHelperData &HelperData = E->getHelper(I);
- addPrivate(
- HelperData.CounterVD,
- CGF.CreateMemTemp(HelperData.CounterVD->getType(), "counter.addr"));
- }
- Privatize();
- for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
- const OMPIteratorHelperData &HelperData = E->getHelper(I);
- LValue CLVal =
- CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(HelperData.CounterVD),
- HelperData.CounterVD->getType());
- // Counter = 0;
- CGF.EmitStoreOfScalar(
- llvm::ConstantInt::get(CLVal.getAddress(CGF).getElementType(), 0),
- CLVal);
- CodeGenFunction::JumpDest &ContDest =
- ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont"));
- CodeGenFunction::JumpDest &ExitDest =
- ExitDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.exit"));
- // N = <number-of_iterations>;
- llvm::Value *N = Uppers[I];
- // cont:
- // if (Counter < N) goto body; else goto exit;
- CGF.EmitBlock(ContDest.getBlock());
- auto *CVal =
- CGF.EmitLoadOfScalar(CLVal, HelperData.CounterVD->getLocation());
- llvm::Value *Cmp =
- HelperData.CounterVD->getType()->isSignedIntegerOrEnumerationType()
- ? CGF.Builder.CreateICmpSLT(CVal, N)
- : CGF.Builder.CreateICmpULT(CVal, N);
- llvm::BasicBlock *BodyBB = CGF.createBasicBlock("iter.body");
- CGF.Builder.CreateCondBr(Cmp, BodyBB, ExitDest.getBlock());
- // body:
- CGF.EmitBlock(BodyBB);
- // Iteri = Begini + Counter * Stepi;
- CGF.EmitIgnoredExpr(HelperData.Update);
- }
- }
- ~OMPIteratorGeneratorScope() {
- if (!E)
- return;
- for (unsigned I = E->numOfIterators(); I > 0; --I) {
- // Counter = Counter + 1;
- const OMPIteratorHelperData &HelperData = E->getHelper(I - 1);
- CGF.EmitIgnoredExpr(HelperData.CounterUpdate);
- // goto cont;
- CGF.EmitBranchThroughCleanup(ContDests[I - 1]);
- // exit:
- CGF.EmitBlock(ExitDests[I - 1].getBlock(), /*IsFinished=*/I == 1);
- }
- }
- };
- } // namespace
- static std::pair<llvm::Value *, llvm::Value *>
- getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
- const auto *OASE = dyn_cast<OMPArrayShapingExpr>(E);
- llvm::Value *Addr;
- if (OASE) {
- const Expr *Base = OASE->getBase();
- Addr = CGF.EmitScalarExpr(Base);
- } else {
- Addr = CGF.EmitLValue(E).getPointer(CGF);
- }
- llvm::Value *SizeVal;
- QualType Ty = E->getType();
- if (OASE) {
- SizeVal = CGF.getTypeSize(OASE->getBase()->getType()->getPointeeType());
- for (const Expr *SE : OASE->getDimensions()) {
- llvm::Value *Sz = CGF.EmitScalarExpr(SE);
- Sz = CGF.EmitScalarConversion(
- Sz, SE->getType(), CGF.getContext().getSizeType(), SE->getExprLoc());
- SizeVal = CGF.Builder.CreateNUWMul(SizeVal, Sz);
- }
- } else if (const auto *ASE =
- dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
- LValue UpAddrLVal =
- CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
- Address UpAddrAddress = UpAddrLVal.getAddress(CGF);
- llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32(
- UpAddrAddress.getElementType(), UpAddrAddress.getPointer(), /*Idx0=*/1);
- llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy);
- llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy);
- SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
- } else {
- SizeVal = CGF.getTypeSize(Ty);
- }
- return std::make_pair(Addr, SizeVal);
- }
- /// Builds kmp_depend_info, if it is not built yet, and builds flags type.
- static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) {
- QualType FlagsTy = C.getIntTypeForBitwidth(32, /*Signed=*/false);
- if (KmpTaskAffinityInfoTy.isNull()) {
- RecordDecl *KmpAffinityInfoRD =
- C.buildImplicitRecord("kmp_task_affinity_info_t");
- KmpAffinityInfoRD->startDefinition();
- addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getIntPtrType());
- addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType());
- addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy);
- KmpAffinityInfoRD->completeDefinition();
- KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD);
- }
- }
- CGOpenMPRuntime::TaskResultTy
- CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
- const OMPExecutableDirective &D,
- llvm::Function *TaskFunction, QualType SharedsTy,
- Address Shareds, const OMPTaskDataTy &Data) {
- ASTContext &C = CGM.getContext();
- llvm::SmallVector<PrivateDataTy, 4> Privates;
- // Aggregate privates and sort them by the alignment.
- const auto *I = Data.PrivateCopies.begin();
- for (const Expr *E : Data.PrivateVars) {
- const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Privates.emplace_back(
- C.getDeclAlign(VD),
- PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
- /*PrivateElemInit=*/nullptr));
- ++I;
- }
- I = Data.FirstprivateCopies.begin();
- const auto *IElemInitRef = Data.FirstprivateInits.begin();
- for (const Expr *E : Data.FirstprivateVars) {
- const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Privates.emplace_back(
- C.getDeclAlign(VD),
- PrivateHelpersTy(
- E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
- ++I;
- ++IElemInitRef;
- }
- I = Data.LastprivateCopies.begin();
- for (const Expr *E : Data.LastprivateVars) {
- const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Privates.emplace_back(
- C.getDeclAlign(VD),
- PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
- /*PrivateElemInit=*/nullptr));
- ++I;
- }
- for (const VarDecl *VD : Data.PrivateLocals) {
- if (isAllocatableDecl(VD))
- Privates.emplace_back(CGM.getPointerAlign(), PrivateHelpersTy(VD));
- else
- Privates.emplace_back(C.getDeclAlign(VD), PrivateHelpersTy(VD));
- }
- llvm::stable_sort(Privates,
- [](const PrivateDataTy &L, const PrivateDataTy &R) {
- return L.first > R.first;
- });
- QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
- // Build type kmp_routine_entry_t (if not built yet).
- emitKmpRoutineEntryT(KmpInt32Ty);
- // Build type kmp_task_t (if not built yet).
- if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
- if (SavedKmpTaskloopTQTy.isNull()) {
- SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
- CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
- }
- KmpTaskTQTy = SavedKmpTaskloopTQTy;
- } else {
- assert((D.getDirectiveKind() == OMPD_task ||
- isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
- isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&
- "Expected taskloop, task or target directive");
- if (SavedKmpTaskTQTy.isNull()) {
- SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
- CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
- }
- KmpTaskTQTy = SavedKmpTaskTQTy;
- }
- const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
- // Build particular struct kmp_task_t for the given task.
- const RecordDecl *KmpTaskTWithPrivatesQTyRD =
- createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
- QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
- QualType KmpTaskTWithPrivatesPtrQTy =
- C.getPointerType(KmpTaskTWithPrivatesQTy);
- llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
- llvm::Type *KmpTaskTWithPrivatesPtrTy =
- KmpTaskTWithPrivatesTy->getPointerTo();
- llvm::Value *KmpTaskTWithPrivatesTySize =
- CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
- QualType SharedsPtrTy = C.getPointerType(SharedsTy);
- // Emit initial values for private copies (if any).
- llvm::Value *TaskPrivatesMap = nullptr;
- llvm::Type *TaskPrivatesMapTy =
- std::next(TaskFunction->arg_begin(), 3)->getType();
- if (!Privates.empty()) {
- auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
- TaskPrivatesMap =
- emitTaskPrivateMappingFunction(CGM, Loc, Data, FI->getType(), Privates);
- TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TaskPrivatesMap, TaskPrivatesMapTy);
- } else {
- TaskPrivatesMap = llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(TaskPrivatesMapTy));
- }
- // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
- // kmp_task_t *tt);
- llvm::Function *TaskEntry = emitProxyTaskFunction(
- CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
- KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
- TaskPrivatesMap);
- // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
- // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
- // kmp_routine_entry_t *task_entry);
- // Task flags. Format is taken from
- // https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h,
- // description of kmp_tasking_flags struct.
- enum {
- TiedFlag = 0x1,
- FinalFlag = 0x2,
- DestructorsFlag = 0x8,
- PriorityFlag = 0x20,
- DetachableFlag = 0x40,
- };
- unsigned Flags = Data.Tied ? TiedFlag : 0;
- bool NeedsCleanup = false;
- if (!Privates.empty()) {
- NeedsCleanup =
- checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD, Privates);
- if (NeedsCleanup)
- Flags = Flags | DestructorsFlag;
- }
- if (Data.Priority.getInt())
- Flags = Flags | PriorityFlag;
- if (D.hasClausesOfKind<OMPDetachClause>())
- Flags = Flags | DetachableFlag;
- llvm::Value *TaskFlags =
- Data.Final.getPointer()
- ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
- CGF.Builder.getInt32(FinalFlag),
- CGF.Builder.getInt32(/*C=*/0))
- : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
- TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
- llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
- SmallVector<llvm::Value *, 8> AllocArgs = {emitUpdateLocation(CGF, Loc),
- getThreadID(CGF, Loc), TaskFlags, KmpTaskTWithPrivatesTySize,
- SharedsSize, CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TaskEntry, KmpRoutineEntryPtrTy)};
- llvm::Value *NewTask;
- if (D.hasClausesOfKind<OMPNowaitClause>()) {
- // Check if we have any device clause associated with the directive.
- const Expr *Device = nullptr;
- if (auto *C = D.getSingleClause<OMPDeviceClause>())
- Device = C->getDevice();
- // Emit device ID if any otherwise use default value.
- llvm::Value *DeviceID;
- if (Device)
- DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
- CGF.Int64Ty, /*isSigned=*/true);
- else
- DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
- AllocArgs.push_back(DeviceID);
- NewTask = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_omp_target_task_alloc),
- AllocArgs);
- } else {
- NewTask =
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_omp_task_alloc),
- AllocArgs);
- }
- // Emit detach clause initialization.
- // evt = (typeof(evt))__kmpc_task_allow_completion_event(loc, tid,
- // task_descriptor);
- if (const auto *DC = D.getSingleClause<OMPDetachClause>()) {
- const Expr *Evt = DC->getEventHandler()->IgnoreParenImpCasts();
- LValue EvtLVal = CGF.EmitLValue(Evt);
- // Build kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref,
- // int gtid, kmp_task_t *task);
- llvm::Value *Loc = emitUpdateLocation(CGF, DC->getBeginLoc());
- llvm::Value *Tid = getThreadID(CGF, DC->getBeginLoc());
- Tid = CGF.Builder.CreateIntCast(Tid, CGF.IntTy, /*isSigned=*/false);
- llvm::Value *EvtVal = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_task_allow_completion_event),
- {Loc, Tid, NewTask});
- EvtVal = CGF.EmitScalarConversion(EvtVal, C.VoidPtrTy, Evt->getType(),
- Evt->getExprLoc());
- CGF.EmitStoreOfScalar(EvtVal, EvtLVal);
- }
- // Process affinity clauses.
- if (D.hasClausesOfKind<OMPAffinityClause>()) {
- // Process list of affinity data.
- ASTContext &C = CGM.getContext();
- Address AffinitiesArray = Address::invalid();
- // Calculate number of elements to form the array of affinity data.
- llvm::Value *NumOfElements = nullptr;
- unsigned NumAffinities = 0;
- for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
- if (const Expr *Modifier = C->getModifier()) {
- const auto *IE = cast<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts());
- for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
- llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
- Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
- NumOfElements =
- NumOfElements ? CGF.Builder.CreateNUWMul(NumOfElements, Sz) : Sz;
- }
- } else {
- NumAffinities += C->varlist_size();
- }
- }
- getKmpAffinityType(CGM.getContext(), KmpTaskAffinityInfoTy);
- // Fields ids in kmp_task_affinity_info record.
- enum RTLAffinityInfoFieldsTy { BaseAddr, Len, Flags };
- QualType KmpTaskAffinityInfoArrayTy;
- if (NumOfElements) {
- NumOfElements = CGF.Builder.CreateNUWAdd(
- llvm::ConstantInt::get(CGF.SizeTy, NumAffinities), NumOfElements);
- auto *OVE = new (C) OpaqueValueExpr(
- Loc,
- C.getIntTypeForBitwidth(C.getTypeSize(C.getSizeType()), /*Signed=*/0),
- VK_PRValue);
- CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, OVE,
- RValue::get(NumOfElements));
- KmpTaskAffinityInfoArrayTy =
- C.getVariableArrayType(KmpTaskAffinityInfoTy, OVE, ArrayType::Normal,
- /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
- // Properly emit variable-sized array.
- auto *PD = ImplicitParamDecl::Create(C, KmpTaskAffinityInfoArrayTy,
- ImplicitParamDecl::Other);
- CGF.EmitVarDecl(*PD);
- AffinitiesArray = CGF.GetAddrOfLocalVar(PD);
- NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
- /*isSigned=*/false);
- } else {
- KmpTaskAffinityInfoArrayTy = C.getConstantArrayType(
- KmpTaskAffinityInfoTy,
- llvm::APInt(C.getTypeSize(C.getSizeType()), NumAffinities), nullptr,
- ArrayType::Normal, /*IndexTypeQuals=*/0);
- AffinitiesArray =
- CGF.CreateMemTemp(KmpTaskAffinityInfoArrayTy, ".affs.arr.addr");
- AffinitiesArray = CGF.Builder.CreateConstArrayGEP(AffinitiesArray, 0);
- NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumAffinities,
- /*isSigned=*/false);
- }
- const auto *KmpAffinityInfoRD = KmpTaskAffinityInfoTy->getAsRecordDecl();
- // Fill array by elements without iterators.
- unsigned Pos = 0;
- bool HasIterator = false;
- for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
- if (C->getModifier()) {
- HasIterator = true;
- continue;
- }
- for (const Expr *E : C->varlists()) {
- llvm::Value *Addr;
- llvm::Value *Size;
- std::tie(Addr, Size) = getPointerAndSize(CGF, E);
- LValue Base =
- CGF.MakeAddrLValue(CGF.Builder.CreateConstGEP(AffinitiesArray, Pos),
- KmpTaskAffinityInfoTy);
- // affs[i].base_addr = &<Affinities[i].second>;
- LValue BaseAddrLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
- CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
- BaseAddrLVal);
- // affs[i].len = sizeof(<Affinities[i].second>);
- LValue LenLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
- CGF.EmitStoreOfScalar(Size, LenLVal);
- ++Pos;
- }
- }
- LValue PosLVal;
- if (HasIterator) {
- PosLVal = CGF.MakeAddrLValue(
- CGF.CreateMemTemp(C.getSizeType(), "affs.counter.addr"),
- C.getSizeType());
- CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
- }
- // Process elements with iterators.
- for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
- const Expr *Modifier = C->getModifier();
- if (!Modifier)
- continue;
- OMPIteratorGeneratorScope IteratorScope(
- CGF, cast_or_null<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts()));
- for (const Expr *E : C->varlists()) {
- llvm::Value *Addr;
- llvm::Value *Size;
- std::tie(Addr, Size) = getPointerAndSize(CGF, E);
- llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
- LValue Base = CGF.MakeAddrLValue(
- CGF.Builder.CreateGEP(AffinitiesArray, Idx), KmpTaskAffinityInfoTy);
- // affs[i].base_addr = &<Affinities[i].second>;
- LValue BaseAddrLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
- CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
- BaseAddrLVal);
- // affs[i].len = sizeof(<Affinities[i].second>);
- LValue LenLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
- CGF.EmitStoreOfScalar(Size, LenLVal);
- Idx = CGF.Builder.CreateNUWAdd(
- Idx, llvm::ConstantInt::get(Idx->getType(), 1));
- CGF.EmitStoreOfScalar(Idx, PosLVal);
- }
- }
- // Call to kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref,
- // kmp_int32 gtid, kmp_task_t *new_task, kmp_int32
- // naffins, kmp_task_affinity_info_t *affin_list);
- llvm::Value *LocRef = emitUpdateLocation(CGF, Loc);
- llvm::Value *GTid = getThreadID(CGF, Loc);
- llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- AffinitiesArray.getPointer(), CGM.VoidPtrTy);
- // FIXME: Emit the function and ignore its result for now unless the
- // runtime function is properly implemented.
- (void)CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_omp_reg_task_with_affinity),
- {LocRef, GTid, NewTask, NumOfElements, AffinListPtr});
- }
- llvm::Value *NewTaskNewTaskTTy =
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- NewTask, KmpTaskTWithPrivatesPtrTy);
- LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
- KmpTaskTWithPrivatesQTy);
- LValue TDBase =
- CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
- // Fill the data in the resulting kmp_task_t record.
- // Copy shareds if there are any.
- Address KmpTaskSharedsPtr = Address::invalid();
- if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
- KmpTaskSharedsPtr = Address(
- CGF.EmitLoadOfScalar(
- CGF.EmitLValueForField(
- TDBase,
- *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds)),
- Loc),
- CGF.Int8Ty, CGM.getNaturalTypeAlignment(SharedsTy));
- LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
- LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
- CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
- }
- // Emit initial values for private copies (if any).
- TaskResultTy Result;
- if (!Privates.empty()) {
- emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
- SharedsTy, SharedsPtrTy, Data, Privates,
- /*ForDup=*/false);
- if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
- (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
- Result.TaskDupFn = emitTaskDupFunction(
- CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
- KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
- /*WithLastIter=*/!Data.LastprivateVars.empty());
- }
- }
- // Fields of union "kmp_cmplrdata_t" for destructors and priority.
- enum { Priority = 0, Destructors = 1 };
- // Provide pointer to function with destructors for privates.
- auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
- const RecordDecl *KmpCmplrdataUD =
- (*FI)->getType()->getAsUnionType()->getDecl();
- if (NeedsCleanup) {
- llvm::Value *DestructorFn = emitDestructorsFunction(
- CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
- KmpTaskTWithPrivatesQTy);
- LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
- LValue DestructorsLV = CGF.EmitLValueForField(
- Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
- CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- DestructorFn, KmpRoutineEntryPtrTy),
- DestructorsLV);
- }
- // Set priority.
- if (Data.Priority.getInt()) {
- LValue Data2LV = CGF.EmitLValueForField(
- TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
- LValue PriorityLV = CGF.EmitLValueForField(
- Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
- CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
- }
- Result.NewTask = NewTask;
- Result.TaskEntry = TaskEntry;
- Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
- Result.TDBase = TDBase;
- Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
- return Result;
- }
- /// Translates internal dependency kind into the runtime kind.
- static RTLDependenceKindTy translateDependencyKind(OpenMPDependClauseKind K) {
- RTLDependenceKindTy DepKind;
- switch (K) {
- case OMPC_DEPEND_in:
- DepKind = RTLDependenceKindTy::DepIn;
- break;
- // Out and InOut dependencies must use the same code.
- case OMPC_DEPEND_out:
- case OMPC_DEPEND_inout:
- DepKind = RTLDependenceKindTy::DepInOut;
- break;
- case OMPC_DEPEND_mutexinoutset:
- DepKind = RTLDependenceKindTy::DepMutexInOutSet;
- break;
- case OMPC_DEPEND_inoutset:
- DepKind = RTLDependenceKindTy::DepInOutSet;
- break;
- case OMPC_DEPEND_outallmemory:
- DepKind = RTLDependenceKindTy::DepOmpAllMem;
- break;
- case OMPC_DEPEND_source:
- case OMPC_DEPEND_sink:
- case OMPC_DEPEND_depobj:
- case OMPC_DEPEND_inoutallmemory:
- case OMPC_DEPEND_unknown:
- llvm_unreachable("Unknown task dependence type");
- }
- return DepKind;
- }
- /// Builds kmp_depend_info, if it is not built yet, and builds flags type.
- static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy,
- QualType &FlagsTy) {
- FlagsTy = C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
- if (KmpDependInfoTy.isNull()) {
- RecordDecl *KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
- KmpDependInfoRD->startDefinition();
- addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
- addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
- addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
- KmpDependInfoRD->completeDefinition();
- KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
- }
- }
- std::pair<llvm::Value *, LValue>
- CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
- SourceLocation Loc) {
- ASTContext &C = CGM.getContext();
- QualType FlagsTy;
- getDependTypes(C, KmpDependInfoTy, FlagsTy);
- RecordDecl *KmpDependInfoRD =
- cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
- QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
- LValue Base = CGF.EmitLoadOfPointerLValue(
- CGF.Builder.CreateElementBitCast(
- DepobjLVal.getAddress(CGF),
- CGF.ConvertTypeForMem(KmpDependInfoPtrTy)),
- KmpDependInfoPtrTy->castAs<PointerType>());
- Address DepObjAddr = CGF.Builder.CreateGEP(
- Base.getAddress(CGF),
- llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
- LValue NumDepsBase = CGF.MakeAddrLValue(
- DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
- // NumDeps = deps[i].base_addr;
- LValue BaseAddrLVal = CGF.EmitLValueForField(
- NumDepsBase,
- *std::next(KmpDependInfoRD->field_begin(),
- static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)));
- llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, Loc);
- return std::make_pair(NumDeps, Base);
- }
- static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
- llvm::PointerUnion<unsigned *, LValue *> Pos,
- const OMPTaskDataTy::DependData &Data,
- Address DependenciesArray) {
- CodeGenModule &CGM = CGF.CGM;
- ASTContext &C = CGM.getContext();
- QualType FlagsTy;
- getDependTypes(C, KmpDependInfoTy, FlagsTy);
- RecordDecl *KmpDependInfoRD =
- cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
- llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
- OMPIteratorGeneratorScope IteratorScope(
- CGF, cast_or_null<OMPIteratorExpr>(
- Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
- : nullptr));
- for (const Expr *E : Data.DepExprs) {
- llvm::Value *Addr;
- llvm::Value *Size;
- // The expression will be a nullptr in the 'omp_all_memory' case.
- if (E) {
- std::tie(Addr, Size) = getPointerAndSize(CGF, E);
- Addr = CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy);
- } else {
- Addr = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
- Size = llvm::ConstantInt::get(CGF.SizeTy, 0);
- }
- LValue Base;
- if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
- Base = CGF.MakeAddrLValue(
- CGF.Builder.CreateConstGEP(DependenciesArray, *P), KmpDependInfoTy);
- } else {
- assert(E && "Expected a non-null expression");
- LValue &PosLVal = *Pos.get<LValue *>();
- llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
- Base = CGF.MakeAddrLValue(
- CGF.Builder.CreateGEP(DependenciesArray, Idx), KmpDependInfoTy);
- }
- // deps[i].base_addr = &<Dependencies[i].second>;
- LValue BaseAddrLVal = CGF.EmitLValueForField(
- Base,
- *std::next(KmpDependInfoRD->field_begin(),
- static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)));
- CGF.EmitStoreOfScalar(Addr, BaseAddrLVal);
- // deps[i].len = sizeof(<Dependencies[i].second>);
- LValue LenLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(),
- static_cast<unsigned int>(RTLDependInfoFields::Len)));
- CGF.EmitStoreOfScalar(Size, LenLVal);
- // deps[i].flags = <Dependencies[i].first>;
- RTLDependenceKindTy DepKind = translateDependencyKind(Data.DepKind);
- LValue FlagsLVal = CGF.EmitLValueForField(
- Base,
- *std::next(KmpDependInfoRD->field_begin(),
- static_cast<unsigned int>(RTLDependInfoFields::Flags)));
- CGF.EmitStoreOfScalar(
- llvm::ConstantInt::get(LLVMFlagsTy, static_cast<unsigned int>(DepKind)),
- FlagsLVal);
- if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
- ++(*P);
- } else {
- LValue &PosLVal = *Pos.get<LValue *>();
- llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
- Idx = CGF.Builder.CreateNUWAdd(Idx,
- llvm::ConstantInt::get(Idx->getType(), 1));
- CGF.EmitStoreOfScalar(Idx, PosLVal);
- }
- }
- }
- SmallVector<llvm::Value *, 4> CGOpenMPRuntime::emitDepobjElementsSizes(
- CodeGenFunction &CGF, QualType &KmpDependInfoTy,
- const OMPTaskDataTy::DependData &Data) {
- assert(Data.DepKind == OMPC_DEPEND_depobj &&
- "Expected depobj dependency kind.");
- SmallVector<llvm::Value *, 4> Sizes;
- SmallVector<LValue, 4> SizeLVals;
- ASTContext &C = CGF.getContext();
- {
- OMPIteratorGeneratorScope IteratorScope(
- CGF, cast_or_null<OMPIteratorExpr>(
- Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
- : nullptr));
- for (const Expr *E : Data.DepExprs) {
- llvm::Value *NumDeps;
- LValue Base;
- LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
- std::tie(NumDeps, Base) =
- getDepobjElements(CGF, DepobjLVal, E->getExprLoc());
- LValue NumLVal = CGF.MakeAddrLValue(
- CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
- C.getUIntPtrType());
- CGF.Builder.CreateStore(llvm::ConstantInt::get(CGF.IntPtrTy, 0),
- NumLVal.getAddress(CGF));
- llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
- llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
- CGF.EmitStoreOfScalar(Add, NumLVal);
- SizeLVals.push_back(NumLVal);
- }
- }
- for (unsigned I = 0, E = SizeLVals.size(); I < E; ++I) {
- llvm::Value *Size =
- CGF.EmitLoadOfScalar(SizeLVals[I], Data.DepExprs[I]->getExprLoc());
- Sizes.push_back(Size);
- }
- return Sizes;
- }
- void CGOpenMPRuntime::emitDepobjElements(CodeGenFunction &CGF,
- QualType &KmpDependInfoTy,
- LValue PosLVal,
- const OMPTaskDataTy::DependData &Data,
- Address DependenciesArray) {
- assert(Data.DepKind == OMPC_DEPEND_depobj &&
- "Expected depobj dependency kind.");
- llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy);
- {
- OMPIteratorGeneratorScope IteratorScope(
- CGF, cast_or_null<OMPIteratorExpr>(
- Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
- : nullptr));
- for (unsigned I = 0, End = Data.DepExprs.size(); I < End; ++I) {
- const Expr *E = Data.DepExprs[I];
- llvm::Value *NumDeps;
- LValue Base;
- LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
- std::tie(NumDeps, Base) =
- getDepobjElements(CGF, DepobjLVal, E->getExprLoc());
- // memcopy dependency data.
- llvm::Value *Size = CGF.Builder.CreateNUWMul(
- ElSize,
- CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
- llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
- Address DepAddr = CGF.Builder.CreateGEP(DependenciesArray, Pos);
- CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
- // Increase pos.
- // pos += size;
- llvm::Value *Add = CGF.Builder.CreateNUWAdd(Pos, NumDeps);
- CGF.EmitStoreOfScalar(Add, PosLVal);
- }
- }
- }
- std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
- CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies,
- SourceLocation Loc) {
- if (llvm::all_of(Dependencies, [](const OMPTaskDataTy::DependData &D) {
- return D.DepExprs.empty();
- }))
- return std::make_pair(nullptr, Address::invalid());
- // Process list of dependencies.
- ASTContext &C = CGM.getContext();
- Address DependenciesArray = Address::invalid();
- llvm::Value *NumOfElements = nullptr;
- unsigned NumDependencies = std::accumulate(
- Dependencies.begin(), Dependencies.end(), 0,
- [](unsigned V, const OMPTaskDataTy::DependData &D) {
- return D.DepKind == OMPC_DEPEND_depobj
- ? V
- : (V + (D.IteratorExpr ? 0 : D.DepExprs.size()));
- });
- QualType FlagsTy;
- getDependTypes(C, KmpDependInfoTy, FlagsTy);
- bool HasDepobjDeps = false;
- bool HasRegularWithIterators = false;
- llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
- llvm::Value *NumOfRegularWithIterators =
- llvm::ConstantInt::get(CGF.IntPtrTy, 0);
- // Calculate number of depobj dependencies and regular deps with the
- // iterators.
- for (const OMPTaskDataTy::DependData &D : Dependencies) {
- if (D.DepKind == OMPC_DEPEND_depobj) {
- SmallVector<llvm::Value *, 4> Sizes =
- emitDepobjElementsSizes(CGF, KmpDependInfoTy, D);
- for (llvm::Value *Size : Sizes) {
- NumOfDepobjElements =
- CGF.Builder.CreateNUWAdd(NumOfDepobjElements, Size);
- }
- HasDepobjDeps = true;
- continue;
- }
- // Include number of iterations, if any.
- if (const auto *IE = cast_or_null<OMPIteratorExpr>(D.IteratorExpr)) {
- for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
- llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
- Sz = CGF.Builder.CreateIntCast(Sz, CGF.IntPtrTy, /*isSigned=*/false);
- llvm::Value *NumClauseDeps = CGF.Builder.CreateNUWMul(
- Sz, llvm::ConstantInt::get(CGF.IntPtrTy, D.DepExprs.size()));
- NumOfRegularWithIterators =
- CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumClauseDeps);
- }
- HasRegularWithIterators = true;
- continue;
- }
- }
- QualType KmpDependInfoArrayTy;
- if (HasDepobjDeps || HasRegularWithIterators) {
- NumOfElements = llvm::ConstantInt::get(CGM.IntPtrTy, NumDependencies,
- /*isSigned=*/false);
- if (HasDepobjDeps) {
- NumOfElements =
- CGF.Builder.CreateNUWAdd(NumOfDepobjElements, NumOfElements);
- }
- if (HasRegularWithIterators) {
- NumOfElements =
- CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumOfElements);
- }
- auto *OVE = new (C) OpaqueValueExpr(
- Loc, C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0),
- VK_PRValue);
- CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, OVE,
- RValue::get(NumOfElements));
- KmpDependInfoArrayTy =
- C.getVariableArrayType(KmpDependInfoTy, OVE, ArrayType::Normal,
- /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
- // CGF.EmitVariablyModifiedType(KmpDependInfoArrayTy);
- // Properly emit variable-sized array.
- auto *PD = ImplicitParamDecl::Create(C, KmpDependInfoArrayTy,
- ImplicitParamDecl::Other);
- CGF.EmitVarDecl(*PD);
- DependenciesArray = CGF.GetAddrOfLocalVar(PD);
- NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
- /*isSigned=*/false);
- } else {
- KmpDependInfoArrayTy = C.getConstantArrayType(
- KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies), nullptr,
- ArrayType::Normal, /*IndexTypeQuals=*/0);
- DependenciesArray =
- CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
- DependenciesArray = CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0);
- NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumDependencies,
- /*isSigned=*/false);
- }
- unsigned Pos = 0;
- for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
- if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
- Dependencies[I].IteratorExpr)
- continue;
- emitDependData(CGF, KmpDependInfoTy, &Pos, Dependencies[I],
- DependenciesArray);
- }
- // Copy regular dependencies with iterators.
- LValue PosLVal = CGF.MakeAddrLValue(
- CGF.CreateMemTemp(C.getSizeType(), "dep.counter.addr"), C.getSizeType());
- CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
- for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
- if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
- !Dependencies[I].IteratorExpr)
- continue;
- emitDependData(CGF, KmpDependInfoTy, &PosLVal, Dependencies[I],
- DependenciesArray);
- }
- // Copy final depobj arrays without iterators.
- if (HasDepobjDeps) {
- for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
- if (Dependencies[I].DepKind != OMPC_DEPEND_depobj)
- continue;
- emitDepobjElements(CGF, KmpDependInfoTy, PosLVal, Dependencies[I],
- DependenciesArray);
- }
- }
- DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- DependenciesArray, CGF.VoidPtrTy, CGF.Int8Ty);
- return std::make_pair(NumOfElements, DependenciesArray);
- }
- Address CGOpenMPRuntime::emitDepobjDependClause(
- CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies,
- SourceLocation Loc) {
- if (Dependencies.DepExprs.empty())
- return Address::invalid();
- // Process list of dependencies.
- ASTContext &C = CGM.getContext();
- Address DependenciesArray = Address::invalid();
- unsigned NumDependencies = Dependencies.DepExprs.size();
- QualType FlagsTy;
- getDependTypes(C, KmpDependInfoTy, FlagsTy);
- RecordDecl *KmpDependInfoRD =
- cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
- llvm::Value *Size;
- // Define type kmp_depend_info[<Dependencies.size()>];
- // For depobj reserve one extra element to store the number of elements.
- // It is required to handle depobj(x) update(in) construct.
- // kmp_depend_info[<Dependencies.size()>] deps;
- llvm::Value *NumDepsVal;
- CharUnits Align = C.getTypeAlignInChars(KmpDependInfoTy);
- if (const auto *IE =
- cast_or_null<OMPIteratorExpr>(Dependencies.IteratorExpr)) {
- NumDepsVal = llvm::ConstantInt::get(CGF.SizeTy, 1);
- for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
- llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
- Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
- NumDepsVal = CGF.Builder.CreateNUWMul(NumDepsVal, Sz);
- }
- Size = CGF.Builder.CreateNUWAdd(llvm::ConstantInt::get(CGF.SizeTy, 1),
- NumDepsVal);
- CharUnits SizeInBytes =
- C.getTypeSizeInChars(KmpDependInfoTy).alignTo(Align);
- llvm::Value *RecSize = CGM.getSize(SizeInBytes);
- Size = CGF.Builder.CreateNUWMul(Size, RecSize);
- NumDepsVal =
- CGF.Builder.CreateIntCast(NumDepsVal, CGF.IntPtrTy, /*isSigned=*/false);
- } else {
- QualType KmpDependInfoArrayTy = C.getConstantArrayType(
- KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies + 1),
- nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
- CharUnits Sz = C.getTypeSizeInChars(KmpDependInfoArrayTy);
- Size = CGM.getSize(Sz.alignTo(Align));
- NumDepsVal = llvm::ConstantInt::get(CGF.IntPtrTy, NumDependencies);
- }
- // Need to allocate on the dynamic memory.
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- // Use default allocator.
- llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
- llvm::Value *Args[] = {ThreadID, Size, Allocator};
- llvm::Value *Addr =
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_alloc),
- Args, ".dep.arr.addr");
- llvm::Type *KmpDependInfoLlvmTy = CGF.ConvertTypeForMem(KmpDependInfoTy);
- Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Addr, KmpDependInfoLlvmTy->getPointerTo());
- DependenciesArray = Address(Addr, KmpDependInfoLlvmTy, Align);
- // Write number of elements in the first element of array for depobj.
- LValue Base = CGF.MakeAddrLValue(DependenciesArray, KmpDependInfoTy);
- // deps[i].base_addr = NumDependencies;
- LValue BaseAddrLVal = CGF.EmitLValueForField(
- Base,
- *std::next(KmpDependInfoRD->field_begin(),
- static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)));
- CGF.EmitStoreOfScalar(NumDepsVal, BaseAddrLVal);
- llvm::PointerUnion<unsigned *, LValue *> Pos;
- unsigned Idx = 1;
- LValue PosLVal;
- if (Dependencies.IteratorExpr) {
- PosLVal = CGF.MakeAddrLValue(
- CGF.CreateMemTemp(C.getSizeType(), "iterator.counter.addr"),
- C.getSizeType());
- CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Idx), PosLVal,
- /*IsInit=*/true);
- Pos = &PosLVal;
- } else {
- Pos = &Idx;
- }
- emitDependData(CGF, KmpDependInfoTy, Pos, Dependencies, DependenciesArray);
- DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy,
- CGF.Int8Ty);
- return DependenciesArray;
- }
- void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
- SourceLocation Loc) {
- ASTContext &C = CGM.getContext();
- QualType FlagsTy;
- getDependTypes(C, KmpDependInfoTy, FlagsTy);
- LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF), C.VoidPtrTy.castAs<PointerType>());
- QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
- Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy),
- CGF.ConvertTypeForMem(KmpDependInfoTy));
- llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getElementType(), Addr.getPointer(),
- llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
- DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr,
- CGF.VoidPtrTy);
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- // Use default allocator.
- llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
- llvm::Value *Args[] = {ThreadID, DepObjAddr, Allocator};
- // _kmpc_free(gtid, addr, nullptr);
- (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_free),
- Args);
- }
- void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
- OpenMPDependClauseKind NewDepKind,
- SourceLocation Loc) {
- ASTContext &C = CGM.getContext();
- QualType FlagsTy;
- getDependTypes(C, KmpDependInfoTy, FlagsTy);
- RecordDecl *KmpDependInfoRD =
- cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
- llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
- llvm::Value *NumDeps;
- LValue Base;
- std::tie(NumDeps, Base) = getDepobjElements(CGF, DepobjLVal, Loc);
- Address Begin = Base.getAddress(CGF);
- // Cast from pointer to array type to pointer to single element.
- llvm::Value *End = CGF.Builder.CreateGEP(
- Begin.getElementType(), Begin.getPointer(), NumDeps);
- // The basic structure here is a while-do loop.
- llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body");
- llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done");
- llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
- CGF.EmitBlock(BodyBB);
- llvm::PHINode *ElementPHI =
- CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
- ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
- Begin = Begin.withPointer(ElementPHI);
- Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
- Base.getTBAAInfo());
- // deps[i].flags = NewDepKind;
- RTLDependenceKindTy DepKind = translateDependencyKind(NewDepKind);
- LValue FlagsLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(),
- static_cast<unsigned int>(RTLDependInfoFields::Flags)));
- CGF.EmitStoreOfScalar(
- llvm::ConstantInt::get(LLVMFlagsTy, static_cast<unsigned int>(DepKind)),
- FlagsLVal);
- // Shift the address forward by one element.
- Address ElementNext =
- CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext");
- ElementPHI->addIncoming(ElementNext.getPointer(),
- CGF.Builder.GetInsertBlock());
- llvm::Value *IsEmpty =
- CGF.Builder.CreateICmpEQ(ElementNext.getPointer(), End, "omp.isempty");
- CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
- // Done.
- CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
- }
- void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
- const OMPExecutableDirective &D,
- llvm::Function *TaskFunction,
- QualType SharedsTy, Address Shareds,
- const Expr *IfCond,
- const OMPTaskDataTy &Data) {
- if (!CGF.HaveInsertPoint())
- return;
- TaskResultTy Result =
- emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
- llvm::Value *NewTask = Result.NewTask;
- llvm::Function *TaskEntry = Result.TaskEntry;
- llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
- LValue TDBase = Result.TDBase;
- const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
- // Process list of dependences.
- Address DependenciesArray = Address::invalid();
- llvm::Value *NumOfElements;
- std::tie(NumOfElements, DependenciesArray) =
- emitDependClause(CGF, Data.Dependences, Loc);
- // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
- // libcall.
- // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
- // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
- // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
- // list is not empty
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
- llvm::Value *DepTaskArgs[7];
- if (!Data.Dependences.empty()) {
- DepTaskArgs[0] = UpLoc;
- DepTaskArgs[1] = ThreadID;
- DepTaskArgs[2] = NewTask;
- DepTaskArgs[3] = NumOfElements;
- DepTaskArgs[4] = DependenciesArray.getPointer();
- DepTaskArgs[5] = CGF.Builder.getInt32(0);
- DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
- }
- auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, &TaskArgs,
- &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
- if (!Data.Tied) {
- auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
- LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
- CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
- }
- if (!Data.Dependences.empty()) {
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_omp_task_with_deps),
- DepTaskArgs);
- } else {
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_omp_task),
- TaskArgs);
- }
- // Check if parent region is untied and build return for untied task;
- if (auto *Region =
- dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
- Region->emitUntiedSwitch(CGF);
- };
- llvm::Value *DepWaitTaskArgs[7];
- if (!Data.Dependences.empty()) {
- DepWaitTaskArgs[0] = UpLoc;
- DepWaitTaskArgs[1] = ThreadID;
- DepWaitTaskArgs[2] = NumOfElements;
- DepWaitTaskArgs[3] = DependenciesArray.getPointer();
- DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
- DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
- DepWaitTaskArgs[6] =
- llvm::ConstantInt::get(CGF.Int32Ty, Data.HasNowaitClause);
- }
- auto &M = CGM.getModule();
- auto &&ElseCodeGen = [this, &M, &TaskArgs, ThreadID, NewTaskNewTaskTTy,
- TaskEntry, &Data, &DepWaitTaskArgs,
- Loc](CodeGenFunction &CGF, PrePostActionTy &) {
- CodeGenFunction::RunCleanupsScope LocalScope(CGF);
- // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
- // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
- // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
- // is specified.
- if (!Data.Dependences.empty())
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- M, OMPRTL___kmpc_omp_taskwait_deps_51),
- DepWaitTaskArgs);
- // Call proxy_task_entry(gtid, new_task);
- auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
- Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
- Action.Enter(CGF);
- llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
- CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
- OutlinedFnArgs);
- };
- // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
- // kmp_task_t *new_task);
- // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
- // kmp_task_t *new_task);
- RegionCodeGenTy RCG(CodeGen);
- CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
- M, OMPRTL___kmpc_omp_task_begin_if0),
- TaskArgs,
- OMPBuilder.getOrCreateRuntimeFunction(
- M, OMPRTL___kmpc_omp_task_complete_if0),
- TaskArgs);
- RCG.setAction(Action);
- RCG(CGF);
- };
- if (IfCond) {
- emitIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
- } else {
- RegionCodeGenTy ThenRCG(ThenCodeGen);
- ThenRCG(CGF);
- }
- }
- void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
- const OMPLoopDirective &D,
- llvm::Function *TaskFunction,
- QualType SharedsTy, Address Shareds,
- const Expr *IfCond,
- const OMPTaskDataTy &Data) {
- if (!CGF.HaveInsertPoint())
- return;
- TaskResultTy Result =
- emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
- // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
- // libcall.
- // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
- // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
- // sched, kmp_uint64 grainsize, void *task_dup);
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *IfVal;
- if (IfCond) {
- IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
- /*isSigned=*/true);
- } else {
- IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
- }
- LValue LBLVal = CGF.EmitLValueForField(
- Result.TDBase,
- *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
- const auto *LBVar =
- cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
- CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(CGF),
- LBLVal.getQuals(),
- /*IsInitializer=*/true);
- LValue UBLVal = CGF.EmitLValueForField(
- Result.TDBase,
- *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
- const auto *UBVar =
- cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
- CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(CGF),
- UBLVal.getQuals(),
- /*IsInitializer=*/true);
- LValue StLVal = CGF.EmitLValueForField(
- Result.TDBase,
- *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
- const auto *StVar =
- cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
- CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(CGF),
- StLVal.getQuals(),
- /*IsInitializer=*/true);
- // Store reductions address.
- LValue RedLVal = CGF.EmitLValueForField(
- Result.TDBase,
- *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
- if (Data.Reductions) {
- CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
- } else {
- CGF.EmitNullInitialization(RedLVal.getAddress(CGF),
- CGF.getContext().VoidPtrTy);
- }
- enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
- llvm::Value *TaskArgs[] = {
- UpLoc,
- ThreadID,
- Result.NewTask,
- IfVal,
- LBLVal.getPointer(CGF),
- UBLVal.getPointer(CGF),
- CGF.EmitLoadOfScalar(StLVal, Loc),
- llvm::ConstantInt::getSigned(
- CGF.IntTy, 1), // Always 1 because taskgroup emitted by the compiler
- llvm::ConstantInt::getSigned(
- CGF.IntTy, Data.Schedule.getPointer()
- ? Data.Schedule.getInt() ? NumTasks : Grainsize
- : NoSchedule),
- Data.Schedule.getPointer()
- ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
- /*isSigned=*/false)
- : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0),
- Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Result.TaskDupFn, CGF.VoidPtrTy)
- : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_taskloop),
- TaskArgs);
- }
- /// Emit reduction operation for each element of array (required for
- /// array sections) LHS op = RHS.
- /// \param Type Type of array.
- /// \param LHSVar Variable on the left side of the reduction operation
- /// (references element of array in original variable).
- /// \param RHSVar Variable on the right side of the reduction operation
- /// (references element of array in original variable).
- /// \param RedOpGen Generator of reduction operation with use of LHSVar and
- /// RHSVar.
- static void EmitOMPAggregateReduction(
- CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
- const VarDecl *RHSVar,
- const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
- const Expr *, const Expr *)> &RedOpGen,
- const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
- const Expr *UpExpr = nullptr) {
- // Perform element-by-element initialization.
- QualType ElementTy;
- Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
- Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
- // Drill down to the base element type on both arrays.
- const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
- llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
- llvm::Value *RHSBegin = RHSAddr.getPointer();
- llvm::Value *LHSBegin = LHSAddr.getPointer();
- // Cast from pointer to array type to pointer to single element.
- llvm::Value *LHSEnd =
- CGF.Builder.CreateGEP(LHSAddr.getElementType(), LHSBegin, NumElements);
- // The basic structure here is a while-do loop.
- llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
- llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
- llvm::Value *IsEmpty =
- CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
- CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
- // Enter the loop body, making that address the current address.
- llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
- CGF.EmitBlock(BodyBB);
- CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
- llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
- RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
- RHSElementPHI->addIncoming(RHSBegin, EntryBB);
- Address RHSElementCurrent(
- RHSElementPHI, RHSAddr.getElementType(),
- RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
- llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
- LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
- LHSElementPHI->addIncoming(LHSBegin, EntryBB);
- Address LHSElementCurrent(
- LHSElementPHI, LHSAddr.getElementType(),
- LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
- // Emit copy.
- CodeGenFunction::OMPPrivateScope Scope(CGF);
- Scope.addPrivate(LHSVar, LHSElementCurrent);
- Scope.addPrivate(RHSVar, RHSElementCurrent);
- Scope.Privatize();
- RedOpGen(CGF, XExpr, EExpr, UpExpr);
- Scope.ForceCleanup();
- // Shift the address forward by one element.
- llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
- LHSAddr.getElementType(), LHSElementPHI, /*Idx0=*/1,
- "omp.arraycpy.dest.element");
- llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
- RHSAddr.getElementType(), RHSElementPHI, /*Idx0=*/1,
- "omp.arraycpy.src.element");
- // Check whether we've reached the end.
- llvm::Value *Done =
- CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
- CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
- LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
- RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
- // Done.
- CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
- }
- /// Emit reduction combiner. If the combiner is a simple expression emit it as
- /// is, otherwise consider it as combiner of UDR decl and emit it as a call of
- /// UDR combiner function.
- static void emitReductionCombiner(CodeGenFunction &CGF,
- const Expr *ReductionOp) {
- if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
- if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
- if (const auto *DRE =
- dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
- if (const auto *DRD =
- dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
- std::pair<llvm::Function *, llvm::Function *> Reduction =
- CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
- RValue Func = RValue::get(Reduction.first);
- CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
- CGF.EmitIgnoredExpr(ReductionOp);
- return;
- }
- CGF.EmitIgnoredExpr(ReductionOp);
- }
- llvm::Function *CGOpenMPRuntime::emitReductionFunction(
- SourceLocation Loc, llvm::Type *ArgsElemType,
- ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
- ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
- ASTContext &C = CGM.getContext();
- // void reduction_func(void *LHSArg, void *RHSArg);
- FunctionArgList Args;
- ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
- ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
- Args.push_back(&LHSArg);
- Args.push_back(&RHSArg);
- const auto &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- std::string Name = getName({"omp", "reduction", "reduction_func"});
- auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
- llvm::GlobalValue::InternalLinkage, Name,
- &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
- // Dst = (void*[n])(LHSArg);
- // Src = (void*[n])(RHSArg);
- Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
- ArgsElemType->getPointerTo()),
- ArgsElemType, CGF.getPointerAlign());
- Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
- ArgsElemType->getPointerTo()),
- ArgsElemType, CGF.getPointerAlign());
- // ...
- // *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
- // ...
- CodeGenFunction::OMPPrivateScope Scope(CGF);
- const auto *IPriv = Privates.begin();
- unsigned Idx = 0;
- for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
- const auto *RHSVar =
- cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
- Scope.addPrivate(RHSVar, emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar));
- const auto *LHSVar =
- cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
- Scope.addPrivate(LHSVar, emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar));
- QualType PrivTy = (*IPriv)->getType();
- if (PrivTy->isVariablyModifiedType()) {
- // Get array size and emit VLA type.
- ++Idx;
- Address Elem = CGF.Builder.CreateConstArrayGEP(LHS, Idx);
- llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
- const VariableArrayType *VLA =
- CGF.getContext().getAsVariableArrayType(PrivTy);
- const auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
- CodeGenFunction::OpaqueValueMapping OpaqueMap(
- CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
- CGF.EmitVariablyModifiedType(PrivTy);
- }
- }
- Scope.Privatize();
- IPriv = Privates.begin();
- const auto *ILHS = LHSExprs.begin();
- const auto *IRHS = RHSExprs.begin();
- for (const Expr *E : ReductionOps) {
- if ((*IPriv)->getType()->isArrayType()) {
- // Emit reduction for array section.
- const auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
- const auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
- EmitOMPAggregateReduction(
- CGF, (*IPriv)->getType(), LHSVar, RHSVar,
- [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
- emitReductionCombiner(CGF, E);
- });
- } else {
- // Emit reduction for array subscript or single variable.
- emitReductionCombiner(CGF, E);
- }
- ++IPriv;
- ++ILHS;
- ++IRHS;
- }
- Scope.ForceCleanup();
- CGF.FinishFunction();
- return Fn;
- }
- void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
- const Expr *ReductionOp,
- const Expr *PrivateRef,
- const DeclRefExpr *LHS,
- const DeclRefExpr *RHS) {
- if (PrivateRef->getType()->isArrayType()) {
- // Emit reduction for array section.
- const auto *LHSVar = cast<VarDecl>(LHS->getDecl());
- const auto *RHSVar = cast<VarDecl>(RHS->getDecl());
- EmitOMPAggregateReduction(
- CGF, PrivateRef->getType(), LHSVar, RHSVar,
- [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
- emitReductionCombiner(CGF, ReductionOp);
- });
- } else {
- // Emit reduction for array subscript or single variable.
- emitReductionCombiner(CGF, ReductionOp);
- }
- }
- void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
- ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> LHSExprs,
- ArrayRef<const Expr *> RHSExprs,
- ArrayRef<const Expr *> ReductionOps,
- ReductionOptionsTy Options) {
- if (!CGF.HaveInsertPoint())
- return;
- bool WithNowait = Options.WithNowait;
- bool SimpleReduction = Options.SimpleReduction;
- // Next code should be emitted for reduction:
- //
- // static kmp_critical_name lock = { 0 };
- //
- // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
- // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
- // ...
- // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
- // *(Type<n>-1*)rhs[<n>-1]);
- // }
- //
- // ...
- // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
- // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
- // RedList, reduce_func, &<lock>)) {
- // case 1:
- // ...
- // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
- // ...
- // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
- // break;
- // case 2:
- // ...
- // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
- // ...
- // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
- // break;
- // default:;
- // }
- //
- // if SimpleReduction is true, only the next code is generated:
- // ...
- // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
- // ...
- ASTContext &C = CGM.getContext();
- if (SimpleReduction) {
- CodeGenFunction::RunCleanupsScope Scope(CGF);
- const auto *IPriv = Privates.begin();
- const auto *ILHS = LHSExprs.begin();
- const auto *IRHS = RHSExprs.begin();
- for (const Expr *E : ReductionOps) {
- emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
- cast<DeclRefExpr>(*IRHS));
- ++IPriv;
- ++ILHS;
- ++IRHS;
- }
- return;
- }
- // 1. Build a list of reduction variables.
- // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
- auto Size = RHSExprs.size();
- for (const Expr *E : Privates) {
- if (E->getType()->isVariablyModifiedType())
- // Reserve place for array size.
- ++Size;
- }
- llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
- QualType ReductionArrayTy =
- C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- Address ReductionList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- const auto *IPriv = Privates.begin();
- unsigned Idx = 0;
- for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
- Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- CGF.Builder.CreateStore(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
- Elem);
- if ((*IPriv)->getType()->isVariablyModifiedType()) {
- // Store array size.
- ++Idx;
- Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- llvm::Value *Size = CGF.Builder.CreateIntCast(
- CGF.getVLASize(
- CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
- .NumElts,
- CGF.SizeTy, /*isSigned=*/false);
- CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
- Elem);
- }
- }
- // 2. Emit reduce_func().
- llvm::Function *ReductionFn =
- emitReductionFunction(Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
- Privates, LHSExprs, RHSExprs, ReductionOps);
- // 3. Create static kmp_critical_name lock = { 0 };
- std::string Name = getName({"reduction"});
- llvm::Value *Lock = getCriticalRegionLock(Name);
- // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
- // RedList, reduce_func, &<lock>);
- llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
- llvm::Value *ThreadId = getThreadID(CGF, Loc);
- llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
- llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- ReductionList.getPointer(), CGF.VoidPtrTy);
- llvm::Value *Args[] = {
- IdentTLoc, // ident_t *<loc>
- ThreadId, // i32 <gtid>
- CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
- ReductionArrayTySize, // size_type sizeof(RedList)
- RL, // void *RedList
- ReductionFn, // void (*) (void *, void *) <reduce_func>
- Lock // kmp_critical_name *&<lock>
- };
- llvm::Value *Res = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(),
- WithNowait ? OMPRTL___kmpc_reduce_nowait : OMPRTL___kmpc_reduce),
- Args);
- // 5. Build switch(res)
- llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
- llvm::SwitchInst *SwInst =
- CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
- // 6. Build case 1:
- // ...
- // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
- // ...
- // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
- // break;
- llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
- SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
- CGF.EmitBlock(Case1BB);
- // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
- llvm::Value *EndArgs[] = {
- IdentTLoc, // ident_t *<loc>
- ThreadId, // i32 <gtid>
- Lock // kmp_critical_name *&<lock>
- };
- auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
- CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
- const auto *IPriv = Privates.begin();
- const auto *ILHS = LHSExprs.begin();
- const auto *IRHS = RHSExprs.begin();
- for (const Expr *E : ReductionOps) {
- RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
- cast<DeclRefExpr>(*IRHS));
- ++IPriv;
- ++ILHS;
- ++IRHS;
- }
- };
- RegionCodeGenTy RCG(CodeGen);
- CommonActionTy Action(
- nullptr, std::nullopt,
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), WithNowait ? OMPRTL___kmpc_end_reduce_nowait
- : OMPRTL___kmpc_end_reduce),
- EndArgs);
- RCG.setAction(Action);
- RCG(CGF);
- CGF.EmitBranch(DefaultBB);
- // 7. Build case 2:
- // ...
- // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
- // ...
- // break;
- llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
- SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
- CGF.EmitBlock(Case2BB);
- auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
- const auto *ILHS = LHSExprs.begin();
- const auto *IRHS = RHSExprs.begin();
- const auto *IPriv = Privates.begin();
- for (const Expr *E : ReductionOps) {
- const Expr *XExpr = nullptr;
- const Expr *EExpr = nullptr;
- const Expr *UpExpr = nullptr;
- BinaryOperatorKind BO = BO_Comma;
- if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
- if (BO->getOpcode() == BO_Assign) {
- XExpr = BO->getLHS();
- UpExpr = BO->getRHS();
- }
- }
- // Try to emit update expression as a simple atomic.
- const Expr *RHSExpr = UpExpr;
- if (RHSExpr) {
- // Analyze RHS part of the whole expression.
- if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(
- RHSExpr->IgnoreParenImpCasts())) {
- // If this is a conditional operator, analyze its condition for
- // min/max reduction operator.
- RHSExpr = ACO->getCond();
- }
- if (const auto *BORHS =
- dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
- EExpr = BORHS->getRHS();
- BO = BORHS->getOpcode();
- }
- }
- if (XExpr) {
- const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
- auto &&AtomicRedGen = [BO, VD,
- Loc](CodeGenFunction &CGF, const Expr *XExpr,
- const Expr *EExpr, const Expr *UpExpr) {
- LValue X = CGF.EmitLValue(XExpr);
- RValue E;
- if (EExpr)
- E = CGF.EmitAnyExpr(EExpr);
- CGF.EmitOMPAtomicSimpleUpdateExpr(
- X, E, BO, /*IsXLHSInRHSPart=*/true,
- llvm::AtomicOrdering::Monotonic, Loc,
- [&CGF, UpExpr, VD, Loc](RValue XRValue) {
- CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
- Address LHSTemp = CGF.CreateMemTemp(VD->getType());
- CGF.emitOMPSimpleStore(
- CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
- VD->getType().getNonReferenceType(), Loc);
- PrivateScope.addPrivate(VD, LHSTemp);
- (void)PrivateScope.Privatize();
- return CGF.EmitAnyExpr(UpExpr);
- });
- };
- if ((*IPriv)->getType()->isArrayType()) {
- // Emit atomic reduction for array section.
- const auto *RHSVar =
- cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
- EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
- AtomicRedGen, XExpr, EExpr, UpExpr);
- } else {
- // Emit atomic reduction for array subscript or single variable.
- AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
- }
- } else {
- // Emit as a critical region.
- auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
- const Expr *, const Expr *) {
- CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
- std::string Name = RT.getName({"atomic_reduction"});
- RT.emitCriticalRegion(
- CGF, Name,
- [=](CodeGenFunction &CGF, PrePostActionTy &Action) {
- Action.Enter(CGF);
- emitReductionCombiner(CGF, E);
- },
- Loc);
- };
- if ((*IPriv)->getType()->isArrayType()) {
- const auto *LHSVar =
- cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
- const auto *RHSVar =
- cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
- EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
- CritRedGen);
- } else {
- CritRedGen(CGF, nullptr, nullptr, nullptr);
- }
- }
- ++ILHS;
- ++IRHS;
- ++IPriv;
- }
- };
- RegionCodeGenTy AtomicRCG(AtomicCodeGen);
- if (!WithNowait) {
- // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
- llvm::Value *EndArgs[] = {
- IdentTLoc, // ident_t *<loc>
- ThreadId, // i32 <gtid>
- Lock // kmp_critical_name *&<lock>
- };
- CommonActionTy Action(nullptr, std::nullopt,
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_end_reduce),
- EndArgs);
- AtomicRCG.setAction(Action);
- AtomicRCG(CGF);
- } else {
- AtomicRCG(CGF);
- }
- CGF.EmitBranch(DefaultBB);
- CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
- }
- /// Generates unique name for artificial threadprivate variables.
- /// Format is: <Prefix> "." <Decl_mangled_name> "_" "<Decl_start_loc_raw_enc>"
- static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
- const Expr *Ref) {
- SmallString<256> Buffer;
- llvm::raw_svector_ostream Out(Buffer);
- const clang::DeclRefExpr *DE;
- const VarDecl *D = ::getBaseDecl(Ref, DE);
- if (!D)
- D = cast<VarDecl>(cast<DeclRefExpr>(Ref)->getDecl());
- D = D->getCanonicalDecl();
- std::string Name = CGM.getOpenMPRuntime().getName(
- {D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
- Out << Prefix << Name << "_"
- << D->getCanonicalDecl()->getBeginLoc().getRawEncoding();
- return std::string(Out.str());
- }
- /// Emits reduction initializer function:
- /// \code
- /// void @.red_init(void* %arg, void* %orig) {
- /// %0 = bitcast void* %arg to <type>*
- /// store <type> <init>, <type>* %0
- /// ret void
- /// }
- /// \endcode
- static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
- SourceLocation Loc,
- ReductionCodeGen &RCG, unsigned N) {
- ASTContext &C = CGM.getContext();
- QualType VoidPtrTy = C.VoidPtrTy;
- VoidPtrTy.addRestrict();
- FunctionArgList Args;
- ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
- ImplicitParamDecl::Other);
- ImplicitParamDecl ParamOrig(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
- ImplicitParamDecl::Other);
- Args.emplace_back(&Param);
- Args.emplace_back(&ParamOrig);
- const auto &FnInfo =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
- std::string Name = CGM.getOpenMPRuntime().getName({"red_init", ""});
- auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
- Name, &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
- QualType PrivateType = RCG.getPrivateType(N);
- Address PrivateAddr = CGF.EmitLoadOfPointer(
- CGF.Builder.CreateElementBitCast(
- CGF.GetAddrOfLocalVar(&Param),
- CGF.ConvertTypeForMem(PrivateType)->getPointerTo()),
- C.getPointerType(PrivateType)->castAs<PointerType>());
- llvm::Value *Size = nullptr;
- // If the size of the reduction item is non-constant, load it from global
- // threadprivate variable.
- if (RCG.getSizes(N).second) {
- Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
- CGF, CGM.getContext().getSizeType(),
- generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
- Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
- CGM.getContext().getSizeType(), Loc);
- }
- RCG.emitAggregateType(CGF, N, Size);
- Address OrigAddr = Address::invalid();
- // If initializer uses initializer from declare reduction construct, emit a
- // pointer to the address of the original reduction item (reuired by reduction
- // initializer)
- if (RCG.usesReductionInitializer(N)) {
- Address SharedAddr = CGF.GetAddrOfLocalVar(&ParamOrig);
- OrigAddr = CGF.EmitLoadOfPointer(
- SharedAddr,
- CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
- }
- // Emit the initializer:
- // %0 = bitcast void* %arg to <type>*
- // store <type> <init>, <type>* %0
- RCG.emitInitialization(CGF, N, PrivateAddr, OrigAddr,
- [](CodeGenFunction &) { return false; });
- CGF.FinishFunction();
- return Fn;
- }
- /// Emits reduction combiner function:
- /// \code
- /// void @.red_comb(void* %arg0, void* %arg1) {
- /// %lhs = bitcast void* %arg0 to <type>*
- /// %rhs = bitcast void* %arg1 to <type>*
- /// %2 = <ReductionOp>(<type>* %lhs, <type>* %rhs)
- /// store <type> %2, <type>* %lhs
- /// ret void
- /// }
- /// \endcode
- static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
- SourceLocation Loc,
- ReductionCodeGen &RCG, unsigned N,
- const Expr *ReductionOp,
- const Expr *LHS, const Expr *RHS,
- const Expr *PrivateRef) {
- ASTContext &C = CGM.getContext();
- const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
- const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
- FunctionArgList Args;
- ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
- Args.emplace_back(&ParamInOut);
- Args.emplace_back(&ParamIn);
- const auto &FnInfo =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
- std::string Name = CGM.getOpenMPRuntime().getName({"red_comb", ""});
- auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
- Name, &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
- llvm::Value *Size = nullptr;
- // If the size of the reduction item is non-constant, load it from global
- // threadprivate variable.
- if (RCG.getSizes(N).second) {
- Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
- CGF, CGM.getContext().getSizeType(),
- generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
- Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
- CGM.getContext().getSizeType(), Loc);
- }
- RCG.emitAggregateType(CGF, N, Size);
- // Remap lhs and rhs variables to the addresses of the function arguments.
- // %lhs = bitcast void* %arg0 to <type>*
- // %rhs = bitcast void* %arg1 to <type>*
- CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
- PrivateScope.addPrivate(
- LHSVD,
- // Pull out the pointer to the variable.
- CGF.EmitLoadOfPointer(
- CGF.Builder.CreateElementBitCast(
- CGF.GetAddrOfLocalVar(&ParamInOut),
- CGF.ConvertTypeForMem(LHSVD->getType())->getPointerTo()),
- C.getPointerType(LHSVD->getType())->castAs<PointerType>()));
- PrivateScope.addPrivate(
- RHSVD,
- // Pull out the pointer to the variable.
- CGF.EmitLoadOfPointer(
- CGF.Builder.CreateElementBitCast(
- CGF.GetAddrOfLocalVar(&ParamIn),
- CGF.ConvertTypeForMem(RHSVD->getType())->getPointerTo()),
- C.getPointerType(RHSVD->getType())->castAs<PointerType>()));
- PrivateScope.Privatize();
- // Emit the combiner body:
- // %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
- // store <type> %2, <type>* %lhs
- CGM.getOpenMPRuntime().emitSingleReductionCombiner(
- CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS),
- cast<DeclRefExpr>(RHS));
- CGF.FinishFunction();
- return Fn;
- }
- /// Emits reduction finalizer function:
- /// \code
- /// void @.red_fini(void* %arg) {
- /// %0 = bitcast void* %arg to <type>*
- /// <destroy>(<type>* %0)
- /// ret void
- /// }
- /// \endcode
- static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
- SourceLocation Loc,
- ReductionCodeGen &RCG, unsigned N) {
- if (!RCG.needCleanups(N))
- return nullptr;
- ASTContext &C = CGM.getContext();
- FunctionArgList Args;
- ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
- Args.emplace_back(&Param);
- const auto &FnInfo =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
- std::string Name = CGM.getOpenMPRuntime().getName({"red_fini", ""});
- auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
- Name, &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
- Address PrivateAddr = CGF.EmitLoadOfPointer(
- CGF.GetAddrOfLocalVar(&Param), C.VoidPtrTy.castAs<PointerType>());
- llvm::Value *Size = nullptr;
- // If the size of the reduction item is non-constant, load it from global
- // threadprivate variable.
- if (RCG.getSizes(N).second) {
- Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
- CGF, CGM.getContext().getSizeType(),
- generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
- Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
- CGM.getContext().getSizeType(), Loc);
- }
- RCG.emitAggregateType(CGF, N, Size);
- // Emit the finalizer body:
- // <destroy>(<type>* %0)
- RCG.emitCleanups(CGF, N, PrivateAddr);
- CGF.FinishFunction(Loc);
- return Fn;
- }
- llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
- CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
- ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
- if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty())
- return nullptr;
- // Build typedef struct:
- // kmp_taskred_input {
- // void *reduce_shar; // shared reduction item
- // void *reduce_orig; // original reduction item used for initialization
- // size_t reduce_size; // size of data item
- // void *reduce_init; // data initialization routine
- // void *reduce_fini; // data finalization routine
- // void *reduce_comb; // data combiner routine
- // kmp_task_red_flags_t flags; // flags for additional info from compiler
- // } kmp_taskred_input_t;
- ASTContext &C = CGM.getContext();
- RecordDecl *RD = C.buildImplicitRecord("kmp_taskred_input_t");
- RD->startDefinition();
- const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
- const FieldDecl *OrigFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
- const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
- const FieldDecl *InitFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
- const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
- const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
- const FieldDecl *FlagsFD = addFieldToRecordDecl(
- C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
- RD->completeDefinition();
- QualType RDType = C.getRecordType(RD);
- unsigned Size = Data.ReductionVars.size();
- llvm::APInt ArraySize(/*numBits=*/64, Size);
- QualType ArrayRDType = C.getConstantArrayType(
- RDType, ArraySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
- // kmp_task_red_input_t .rd_input.[Size];
- Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
- ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionOrigs,
- Data.ReductionCopies, Data.ReductionOps);
- for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
- // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
- llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
- llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
- llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
- TaskRedInput.getElementType(), TaskRedInput.getPointer(), Idxs,
- /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
- ".rd_input.gep.");
- LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
- // ElemLVal.reduce_shar = &Shareds[Cnt];
- LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
- RCG.emitSharedOrigLValue(CGF, Cnt);
- llvm::Value *CastedShared =
- CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer(CGF));
- CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
- // ElemLVal.reduce_orig = &Origs[Cnt];
- LValue OrigLVal = CGF.EmitLValueForField(ElemLVal, OrigFD);
- llvm::Value *CastedOrig =
- CGF.EmitCastToVoidPtr(RCG.getOrigLValue(Cnt).getPointer(CGF));
- CGF.EmitStoreOfScalar(CastedOrig, OrigLVal);
- RCG.emitAggregateType(CGF, Cnt);
- llvm::Value *SizeValInChars;
- llvm::Value *SizeVal;
- std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
- // We use delayed creation/initialization for VLAs and array sections. It is
- // required because runtime does not provide the way to pass the sizes of
- // VLAs/array sections to initializer/combiner/finalizer functions. Instead
- // threadprivate global variables are used to store these values and use
- // them in the functions.
- bool DelayedCreation = !!SizeVal;
- SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
- /*isSigned=*/false);
- LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD);
- CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
- // ElemLVal.reduce_init = init;
- LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
- llvm::Value *InitAddr =
- CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
- CGF.EmitStoreOfScalar(InitAddr, InitLVal);
- // ElemLVal.reduce_fini = fini;
- LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
- llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
- llvm::Value *FiniAddr = Fini
- ? CGF.EmitCastToVoidPtr(Fini)
- : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
- CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
- // ElemLVal.reduce_comb = comb;
- LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
- llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
- CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
- RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
- CGF.EmitStoreOfScalar(CombAddr, CombLVal);
- // ElemLVal.flags = 0;
- LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
- if (DelayedCreation) {
- CGF.EmitStoreOfScalar(
- llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*isSigned=*/true),
- FlagsLVal);
- } else
- CGF.EmitNullInitialization(FlagsLVal.getAddress(CGF),
- FlagsLVal.getType());
- }
- if (Data.IsReductionWithTaskMod) {
- // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
- // is_ws, int num, void *data);
- llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
- CGM.IntTy, /*isSigned=*/true);
- llvm::Value *Args[] = {
- IdentTLoc, GTid,
- llvm::ConstantInt::get(CGM.IntTy, Data.IsWorksharingReduction ? 1 : 0,
- /*isSigned=*/true),
- llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TaskRedInput.getPointer(), CGM.VoidPtrTy)};
- return CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_taskred_modifier_init),
- Args);
- }
- // Build call void *__kmpc_taskred_init(int gtid, int num_data, void *data);
- llvm::Value *Args[] = {
- CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
- /*isSigned=*/true),
- llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
- CGM.VoidPtrTy)};
- return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_taskred_init),
- Args);
- }
- void CGOpenMPRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
- SourceLocation Loc,
- bool IsWorksharingReduction) {
- // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
- // is_ws, int num, void *data);
- llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
- CGM.IntTy, /*isSigned=*/true);
- llvm::Value *Args[] = {IdentTLoc, GTid,
- llvm::ConstantInt::get(CGM.IntTy,
- IsWorksharingReduction ? 1 : 0,
- /*isSigned=*/true)};
- (void)CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_task_reduction_modifier_fini),
- Args);
- }
- void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
- SourceLocation Loc,
- ReductionCodeGen &RCG,
- unsigned N) {
- auto Sizes = RCG.getSizes(N);
- // Emit threadprivate global variable if the type is non-constant
- // (Sizes.second = nullptr).
- if (Sizes.second) {
- llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy,
- /*isSigned=*/false);
- Address SizeAddr = getAddrOfArtificialThreadPrivate(
- CGF, CGM.getContext().getSizeType(),
- generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
- CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
- }
- }
- Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
- SourceLocation Loc,
- llvm::Value *ReductionsPtr,
- LValue SharedLVal) {
- // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
- // *d);
- llvm::Value *Args[] = {CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
- CGM.IntTy,
- /*isSigned=*/true),
- ReductionsPtr,
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- SharedLVal.getPointer(CGF), CGM.VoidPtrTy)};
- return Address(
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_task_reduction_get_th_data),
- Args),
- CGF.Int8Ty, SharedLVal.getAlignment());
- }
- void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc,
- const OMPTaskDataTy &Data) {
- if (!CGF.HaveInsertPoint())
- return;
- if (CGF.CGM.getLangOpts().OpenMPIRBuilder && Data.Dependences.empty()) {
- // TODO: Need to support taskwait with dependences in the OpenMPIRBuilder.
- OMPBuilder.createTaskwait(CGF.Builder);
- } else {
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
- auto &M = CGM.getModule();
- Address DependenciesArray = Address::invalid();
- llvm::Value *NumOfElements;
- std::tie(NumOfElements, DependenciesArray) =
- emitDependClause(CGF, Data.Dependences, Loc);
- if (!Data.Dependences.empty()) {
- llvm::Value *DepWaitTaskArgs[7];
- DepWaitTaskArgs[0] = UpLoc;
- DepWaitTaskArgs[1] = ThreadID;
- DepWaitTaskArgs[2] = NumOfElements;
- DepWaitTaskArgs[3] = DependenciesArray.getPointer();
- DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
- DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
- DepWaitTaskArgs[6] =
- llvm::ConstantInt::get(CGF.Int32Ty, Data.HasNowaitClause);
- CodeGenFunction::RunCleanupsScope LocalScope(CGF);
- // Build void __kmpc_omp_taskwait_deps_51(ident_t *, kmp_int32 gtid,
- // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
- // ndeps_noalias, kmp_depend_info_t *noalias_dep_list,
- // kmp_int32 has_no_wait); if dependence info is specified.
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- M, OMPRTL___kmpc_omp_taskwait_deps_51),
- DepWaitTaskArgs);
- } else {
- // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Value *Args[] = {UpLoc, ThreadID};
- // Ignore return result until untied tasks are supported.
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_taskwait),
- Args);
- }
- }
- if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
- Region->emitUntiedSwitch(CGF);
- }
- void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
- OpenMPDirectiveKind InnerKind,
- const RegionCodeGenTy &CodeGen,
- bool HasCancel) {
- if (!CGF.HaveInsertPoint())
- return;
- InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel,
- InnerKind != OMPD_critical &&
- InnerKind != OMPD_master &&
- InnerKind != OMPD_masked);
- CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
- }
- namespace {
- enum RTCancelKind {
- CancelNoreq = 0,
- CancelParallel = 1,
- CancelLoop = 2,
- CancelSections = 3,
- CancelTaskgroup = 4
- };
- } // anonymous namespace
- static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
- RTCancelKind CancelKind = CancelNoreq;
- if (CancelRegion == OMPD_parallel)
- CancelKind = CancelParallel;
- else if (CancelRegion == OMPD_for)
- CancelKind = CancelLoop;
- else if (CancelRegion == OMPD_sections)
- CancelKind = CancelSections;
- else {
- assert(CancelRegion == OMPD_taskgroup);
- CancelKind = CancelTaskgroup;
- }
- return CancelKind;
- }
- void CGOpenMPRuntime::emitCancellationPointCall(
- CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPDirectiveKind CancelRegion) {
- if (!CGF.HaveInsertPoint())
- return;
- // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
- // global_tid, kmp_int32 cncl_kind);
- if (auto *OMPRegionInfo =
- dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
- // For 'cancellation point taskgroup', the task region info may not have a
- // cancel. This may instead happen in another adjacent task.
- if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
- // Ignore return result until untied tasks are supported.
- llvm::Value *Result = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_cancellationpoint),
- Args);
- // if (__kmpc_cancellationpoint()) {
- // call i32 @__kmpc_cancel_barrier( // for parallel cancellation only
- // exit from construct;
- // }
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
- llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
- llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
- CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
- CGF.EmitBlock(ExitBB);
- if (CancelRegion == OMPD_parallel)
- emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false);
- // exit from construct;
- CodeGenFunction::JumpDest CancelDest =
- CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
- CGF.EmitBranchThroughCleanup(CancelDest);
- CGF.EmitBlock(ContBB, /*IsFinished=*/true);
- }
- }
- }
- void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
- const Expr *IfCond,
- OpenMPDirectiveKind CancelRegion) {
- if (!CGF.HaveInsertPoint())
- return;
- // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 cncl_kind);
- auto &M = CGM.getModule();
- if (auto *OMPRegionInfo =
- dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
- auto &&ThenGen = [this, &M, Loc, CancelRegion,
- OMPRegionInfo](CodeGenFunction &CGF, PrePostActionTy &) {
- CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
- llvm::Value *Args[] = {
- RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
- CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
- // Ignore return result until untied tasks are supported.
- llvm::Value *Result = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_cancel), Args);
- // if (__kmpc_cancel()) {
- // call i32 @__kmpc_cancel_barrier( // for parallel cancellation only
- // exit from construct;
- // }
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
- llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
- llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
- CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
- CGF.EmitBlock(ExitBB);
- if (CancelRegion == OMPD_parallel)
- RT.emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false);
- // exit from construct;
- CodeGenFunction::JumpDest CancelDest =
- CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
- CGF.EmitBranchThroughCleanup(CancelDest);
- CGF.EmitBlock(ContBB, /*IsFinished=*/true);
- };
- if (IfCond) {
- emitIfClause(CGF, IfCond, ThenGen,
- [](CodeGenFunction &, PrePostActionTy &) {});
- } else {
- RegionCodeGenTy ThenRCG(ThenGen);
- ThenRCG(CGF);
- }
- }
- }
- namespace {
- /// Cleanup action for uses_allocators support.
- class OMPUsesAllocatorsActionTy final : public PrePostActionTy {
- ArrayRef<std::pair<const Expr *, const Expr *>> Allocators;
- public:
- OMPUsesAllocatorsActionTy(
- ArrayRef<std::pair<const Expr *, const Expr *>> Allocators)
- : Allocators(Allocators) {}
- void Enter(CodeGenFunction &CGF) override {
- if (!CGF.HaveInsertPoint())
- return;
- for (const auto &AllocatorData : Allocators) {
- CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsInit(
- CGF, AllocatorData.first, AllocatorData.second);
- }
- }
- void Exit(CodeGenFunction &CGF) override {
- if (!CGF.HaveInsertPoint())
- return;
- for (const auto &AllocatorData : Allocators) {
- CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsFini(CGF,
- AllocatorData.first);
- }
- }
- };
- } // namespace
- void CGOpenMPRuntime::emitTargetOutlinedFunction(
- const OMPExecutableDirective &D, StringRef ParentName,
- llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
- bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
- assert(!ParentName.empty() && "Invalid target entry parent name!");
- HasEmittedTargetRegion = true;
- SmallVector<std::pair<const Expr *, const Expr *>, 4> Allocators;
- for (const auto *C : D.getClausesOfKind<OMPUsesAllocatorsClause>()) {
- for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
- const OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
- if (!D.AllocatorTraits)
- continue;
- Allocators.emplace_back(D.Allocator, D.AllocatorTraits);
- }
- }
- OMPUsesAllocatorsActionTy UsesAllocatorAction(Allocators);
- CodeGen.setAction(UsesAllocatorAction);
- emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
- IsOffloadEntry, CodeGen);
- }
- void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
- const Expr *Allocator,
- const Expr *AllocatorTraits) {
- llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
- ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true);
- // Use default memspace handle.
- llvm::Value *MemSpaceHandle = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
- llvm::Value *NumTraits = llvm::ConstantInt::get(
- CGF.IntTy, cast<ConstantArrayType>(
- AllocatorTraits->getType()->getAsArrayTypeUnsafe())
- ->getSize()
- .getLimitedValue());
- LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits);
- Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy, CGF.VoidPtrTy);
- AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
- AllocatorTraitsLVal.getBaseInfo(),
- AllocatorTraitsLVal.getTBAAInfo());
- llvm::Value *Traits =
- CGF.EmitLoadOfScalar(AllocatorTraitsLVal, AllocatorTraits->getExprLoc());
- llvm::Value *AllocatorVal =
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_init_allocator),
- {ThreadId, MemSpaceHandle, NumTraits, Traits});
- // Store to allocator.
- CGF.EmitVarDecl(*cast<VarDecl>(
- cast<DeclRefExpr>(Allocator->IgnoreParenImpCasts())->getDecl()));
- LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
- AllocatorVal =
- CGF.EmitScalarConversion(AllocatorVal, CGF.getContext().VoidPtrTy,
- Allocator->getType(), Allocator->getExprLoc());
- CGF.EmitStoreOfScalar(AllocatorVal, AllocatorLVal);
- }
- void CGOpenMPRuntime::emitUsesAllocatorsFini(CodeGenFunction &CGF,
- const Expr *Allocator) {
- llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
- ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true);
- LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
- llvm::Value *AllocatorVal =
- CGF.EmitLoadOfScalar(AllocatorLVal, Allocator->getExprLoc());
- AllocatorVal = CGF.EmitScalarConversion(AllocatorVal, Allocator->getType(),
- CGF.getContext().VoidPtrTy,
- Allocator->getExprLoc());
- (void)CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
- OMPRTL___kmpc_destroy_allocator),
- {ThreadId, AllocatorVal});
- }
- void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
- const OMPExecutableDirective &D, StringRef ParentName,
- llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
- bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
- auto EntryInfo =
- getTargetEntryUniqueInfo(CGM.getContext(), D.getBeginLoc(), ParentName);
- CodeGenFunction CGF(CGM, true);
- llvm::OpenMPIRBuilder::FunctionGenCallback &&GenerateOutlinedFunction =
- [&CGF, &D, &CodeGen](StringRef EntryFnName) {
- const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
- CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- return CGF.GenerateOpenMPCapturedStmtFunction(CS, D.getBeginLoc());
- };
- // Get NumTeams and ThreadLimit attributes
- int32_t DefaultValTeams = -1;
- int32_t DefaultValThreads = -1;
- getNumTeamsExprForTargetDirective(CGF, D, DefaultValTeams);
- getNumThreadsExprForTargetDirective(CGF, D, DefaultValThreads);
- OMPBuilder.emitTargetRegionFunction(OffloadEntriesInfoManager, EntryInfo,
- GenerateOutlinedFunction, DefaultValTeams,
- DefaultValThreads, IsOffloadEntry,
- OutlinedFn, OutlinedFnID);
- if (OutlinedFn != nullptr)
- CGM.getTargetCodeGenInfo().setTargetAttributes(nullptr, OutlinedFn, CGM);
- }
- /// Checks if the expression is constant or does not have non-trivial function
- /// calls.
- static bool isTrivial(ASTContext &Ctx, const Expr * E) {
- // We can skip constant expressions.
- // We can skip expressions with trivial calls or simple expressions.
- return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) ||
- !E->hasNonTrivialCall(Ctx)) &&
- !E->HasSideEffects(Ctx, /*IncludePossibleEffects=*/true);
- }
- const Stmt *CGOpenMPRuntime::getSingleCompoundChild(ASTContext &Ctx,
- const Stmt *Body) {
- const Stmt *Child = Body->IgnoreContainers();
- while (const auto *C = dyn_cast_or_null<CompoundStmt>(Child)) {
- Child = nullptr;
- for (const Stmt *S : C->body()) {
- if (const auto *E = dyn_cast<Expr>(S)) {
- if (isTrivial(Ctx, E))
- continue;
- }
- // Some of the statements can be ignored.
- if (isa<AsmStmt>(S) || isa<NullStmt>(S) || isa<OMPFlushDirective>(S) ||
- isa<OMPBarrierDirective>(S) || isa<OMPTaskyieldDirective>(S))
- continue;
- // Analyze declarations.
- if (const auto *DS = dyn_cast<DeclStmt>(S)) {
- if (llvm::all_of(DS->decls(), [](const Decl *D) {
- if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
- isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
- isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
- isa<UsingDirectiveDecl>(D) ||
- isa<OMPDeclareReductionDecl>(D) ||
- isa<OMPThreadPrivateDecl>(D) || isa<OMPAllocateDecl>(D))
- return true;
- const auto *VD = dyn_cast<VarDecl>(D);
- if (!VD)
- return false;
- return VD->hasGlobalStorage() || !VD->isUsed();
- }))
- continue;
- }
- // Found multiple children - cannot get the one child only.
- if (Child)
- return nullptr;
- Child = S;
- }
- if (Child)
- Child = Child->IgnoreContainers();
- }
- return Child;
- }
- const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
- CodeGenFunction &CGF, const OMPExecutableDirective &D,
- int32_t &DefaultVal) {
- OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
- assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
- "Expected target-based executable directive.");
- switch (DirectiveKind) {
- case OMPD_target: {
- const auto *CS = D.getInnermostCapturedStmt();
- const auto *Body =
- CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
- const Stmt *ChildStmt =
- CGOpenMPRuntime::getSingleCompoundChild(CGF.getContext(), Body);
- if (const auto *NestedDir =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- if (isOpenMPTeamsDirective(NestedDir->getDirectiveKind())) {
- if (NestedDir->hasClausesOfKind<OMPNumTeamsClause>()) {
- const Expr *NumTeams =
- NestedDir->getSingleClause<OMPNumTeamsClause>()->getNumTeams();
- if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
- if (auto Constant =
- NumTeams->getIntegerConstantExpr(CGF.getContext()))
- DefaultVal = Constant->getExtValue();
- return NumTeams;
- }
- DefaultVal = 0;
- return nullptr;
- }
- if (isOpenMPParallelDirective(NestedDir->getDirectiveKind()) ||
- isOpenMPSimdDirective(NestedDir->getDirectiveKind())) {
- DefaultVal = 1;
- return nullptr;
- }
- DefaultVal = 1;
- return nullptr;
- }
- // A value of -1 is used to check if we need to emit no teams region
- DefaultVal = -1;
- return nullptr;
- }
- case OMPD_target_teams:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd: {
- if (D.hasClausesOfKind<OMPNumTeamsClause>()) {
- const Expr *NumTeams =
- D.getSingleClause<OMPNumTeamsClause>()->getNumTeams();
- if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
- if (auto Constant = NumTeams->getIntegerConstantExpr(CGF.getContext()))
- DefaultVal = Constant->getExtValue();
- return NumTeams;
- }
- DefaultVal = 0;
- return nullptr;
- }
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_simd:
- DefaultVal = 1;
- return nullptr;
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_metadirective:
- case OMPD_unknown:
- break;
- default:
- break;
- }
- llvm_unreachable("Unexpected directive kind.");
- }
- llvm::Value *CGOpenMPRuntime::emitNumTeamsForTargetDirective(
- CodeGenFunction &CGF, const OMPExecutableDirective &D) {
- assert(!CGF.getLangOpts().OpenMPIsDevice &&
- "Clauses associated with the teams directive expected to be emitted "
- "only for the host!");
- CGBuilderTy &Bld = CGF.Builder;
- int32_t DefaultNT = -1;
- const Expr *NumTeams = getNumTeamsExprForTargetDirective(CGF, D, DefaultNT);
- if (NumTeams != nullptr) {
- OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
- switch (DirectiveKind) {
- case OMPD_target: {
- const auto *CS = D.getInnermostCapturedStmt();
- CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- llvm::Value *NumTeamsVal = CGF.EmitScalarExpr(NumTeams,
- /*IgnoreResultAssign*/ true);
- return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
- /*isSigned=*/true);
- }
- case OMPD_target_teams:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd: {
- CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
- llvm::Value *NumTeamsVal = CGF.EmitScalarExpr(NumTeams,
- /*IgnoreResultAssign*/ true);
- return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
- /*isSigned=*/true);
- }
- default:
- break;
- }
- }
- return llvm::ConstantInt::get(CGF.Int32Ty, DefaultNT);
- }
- static llvm::Value *getNumThreads(CodeGenFunction &CGF, const CapturedStmt *CS,
- llvm::Value *DefaultThreadLimitVal) {
- const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
- CGF.getContext(), CS->getCapturedStmt());
- if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
- if (isOpenMPParallelDirective(Dir->getDirectiveKind())) {
- llvm::Value *NumThreads = nullptr;
- llvm::Value *CondVal = nullptr;
- // Handle if clause. If if clause present, the number of threads is
- // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
- if (Dir->hasClausesOfKind<OMPIfClause>()) {
- CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- const OMPIfClause *IfClause = nullptr;
- for (const auto *C : Dir->getClausesOfKind<OMPIfClause>()) {
- if (C->getNameModifier() == OMPD_unknown ||
- C->getNameModifier() == OMPD_parallel) {
- IfClause = C;
- break;
- }
- }
- if (IfClause) {
- const Expr *Cond = IfClause->getCondition();
- bool Result;
- if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
- if (!Result)
- return CGF.Builder.getInt32(1);
- } else {
- CodeGenFunction::LexicalScope Scope(CGF, Cond->getSourceRange());
- if (const auto *PreInit =
- cast_or_null<DeclStmt>(IfClause->getPreInitStmt())) {
- for (const auto *I : PreInit->decls()) {
- if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
- CGF.EmitVarDecl(cast<VarDecl>(*I));
- } else {
- CodeGenFunction::AutoVarEmission Emission =
- CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
- CGF.EmitAutoVarCleanups(Emission);
- }
- }
- }
- CondVal = CGF.EvaluateExprAsBool(Cond);
- }
- }
- }
- // Check the value of num_threads clause iff if clause was not specified
- // or is not evaluated to false.
- if (Dir->hasClausesOfKind<OMPNumThreadsClause>()) {
- CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- const auto *NumThreadsClause =
- Dir->getSingleClause<OMPNumThreadsClause>();
- CodeGenFunction::LexicalScope Scope(
- CGF, NumThreadsClause->getNumThreads()->getSourceRange());
- if (const auto *PreInit =
- cast_or_null<DeclStmt>(NumThreadsClause->getPreInitStmt())) {
- for (const auto *I : PreInit->decls()) {
- if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
- CGF.EmitVarDecl(cast<VarDecl>(*I));
- } else {
- CodeGenFunction::AutoVarEmission Emission =
- CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
- CGF.EmitAutoVarCleanups(Emission);
- }
- }
- }
- NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads());
- NumThreads = CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty,
- /*isSigned=*/false);
- if (DefaultThreadLimitVal)
- NumThreads = CGF.Builder.CreateSelect(
- CGF.Builder.CreateICmpULT(DefaultThreadLimitVal, NumThreads),
- DefaultThreadLimitVal, NumThreads);
- } else {
- NumThreads = DefaultThreadLimitVal ? DefaultThreadLimitVal
- : CGF.Builder.getInt32(0);
- }
- // Process condition of the if clause.
- if (CondVal) {
- NumThreads = CGF.Builder.CreateSelect(CondVal, NumThreads,
- CGF.Builder.getInt32(1));
- }
- return NumThreads;
- }
- if (isOpenMPSimdDirective(Dir->getDirectiveKind()))
- return CGF.Builder.getInt32(1);
- }
- return DefaultThreadLimitVal;
- }
- const Expr *CGOpenMPRuntime::getNumThreadsExprForTargetDirective(
- CodeGenFunction &CGF, const OMPExecutableDirective &D,
- int32_t &DefaultVal) {
- OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
- assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
- "Expected target-based executable directive.");
- switch (DirectiveKind) {
- case OMPD_target:
- // Teams have no clause thread_limit
- return nullptr;
- case OMPD_target_teams:
- case OMPD_target_teams_distribute:
- if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
- const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
- const Expr *ThreadLimit = ThreadLimitClause->getThreadLimit();
- if (ThreadLimit->isIntegerConstantExpr(CGF.getContext()))
- if (auto Constant =
- ThreadLimit->getIntegerConstantExpr(CGF.getContext()))
- DefaultVal = Constant->getExtValue();
- return ThreadLimit;
- }
- return nullptr;
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd: {
- Expr *ThreadLimit = nullptr;
- Expr *NumThreads = nullptr;
- if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
- const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
- ThreadLimit = ThreadLimitClause->getThreadLimit();
- if (ThreadLimit->isIntegerConstantExpr(CGF.getContext()))
- if (auto Constant =
- ThreadLimit->getIntegerConstantExpr(CGF.getContext()))
- DefaultVal = Constant->getExtValue();
- }
- if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
- const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
- NumThreads = NumThreadsClause->getNumThreads();
- if (NumThreads->isIntegerConstantExpr(CGF.getContext())) {
- if (auto Constant =
- NumThreads->getIntegerConstantExpr(CGF.getContext())) {
- if (Constant->getExtValue() < DefaultVal) {
- DefaultVal = Constant->getExtValue();
- ThreadLimit = NumThreads;
- }
- }
- }
- }
- return ThreadLimit;
- }
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_simd:
- DefaultVal = 1;
- return nullptr;
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_unknown:
- break;
- default:
- break;
- }
- llvm_unreachable("Unsupported directive kind.");
- }
- llvm::Value *CGOpenMPRuntime::emitNumThreadsForTargetDirective(
- CodeGenFunction &CGF, const OMPExecutableDirective &D) {
- assert(!CGF.getLangOpts().OpenMPIsDevice &&
- "Clauses associated with the teams directive expected to be emitted "
- "only for the host!");
- OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
- assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
- "Expected target-based executable directive.");
- CGBuilderTy &Bld = CGF.Builder;
- llvm::Value *ThreadLimitVal = nullptr;
- llvm::Value *NumThreadsVal = nullptr;
- switch (DirectiveKind) {
- case OMPD_target: {
- const CapturedStmt *CS = D.getInnermostCapturedStmt();
- if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
- return NumThreads;
- const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
- CGF.getContext(), CS->getCapturedStmt());
- // TODO: The standard is not clear how to resolve two thread limit clauses,
- // let's pick the teams one if it's present, otherwise the target one.
- const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
- if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
- if (const auto *TLC = Dir->getSingleClause<OMPThreadLimitClause>()) {
- ThreadLimitClause = TLC;
- CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- CodeGenFunction::LexicalScope Scope(
- CGF, ThreadLimitClause->getThreadLimit()->getSourceRange());
- if (const auto *PreInit =
- cast_or_null<DeclStmt>(ThreadLimitClause->getPreInitStmt())) {
- for (const auto *I : PreInit->decls()) {
- if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
- CGF.EmitVarDecl(cast<VarDecl>(*I));
- } else {
- CodeGenFunction::AutoVarEmission Emission =
- CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
- CGF.EmitAutoVarCleanups(Emission);
- }
- }
- }
- }
- }
- if (ThreadLimitClause) {
- llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
- ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
- ThreadLimitVal =
- Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
- }
- if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
- if (isOpenMPTeamsDirective(Dir->getDirectiveKind()) &&
- !isOpenMPDistributeDirective(Dir->getDirectiveKind())) {
- CS = Dir->getInnermostCapturedStmt();
- const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
- CGF.getContext(), CS->getCapturedStmt());
- Dir = dyn_cast_or_null<OMPExecutableDirective>(Child);
- }
- if (Dir && isOpenMPDistributeDirective(Dir->getDirectiveKind()) &&
- !isOpenMPSimdDirective(Dir->getDirectiveKind())) {
- CS = Dir->getInnermostCapturedStmt();
- if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
- return NumThreads;
- }
- if (Dir && isOpenMPSimdDirective(Dir->getDirectiveKind()))
- return Bld.getInt32(1);
- }
- return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
- }
- case OMPD_target_teams: {
- if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
- CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
- const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
- llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
- ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
- ThreadLimitVal =
- Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
- }
- const CapturedStmt *CS = D.getInnermostCapturedStmt();
- if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
- return NumThreads;
- const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
- CGF.getContext(), CS->getCapturedStmt());
- if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
- if (Dir->getDirectiveKind() == OMPD_distribute) {
- CS = Dir->getInnermostCapturedStmt();
- if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
- return NumThreads;
- }
- }
- return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
- }
- case OMPD_target_teams_distribute:
- if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
- CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
- const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
- llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
- ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
- ThreadLimitVal =
- Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
- }
- if (llvm::Value *NumThreads =
- getNumThreads(CGF, D.getInnermostCapturedStmt(), ThreadLimitVal))
- return NumThreads;
- return Bld.getInt32(0);
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd: {
- llvm::Value *CondVal = nullptr;
- // Handle if clause. If if clause present, the number of threads is
- // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
- if (D.hasClausesOfKind<OMPIfClause>()) {
- const OMPIfClause *IfClause = nullptr;
- for (const auto *C : D.getClausesOfKind<OMPIfClause>()) {
- if (C->getNameModifier() == OMPD_unknown ||
- C->getNameModifier() == OMPD_parallel) {
- IfClause = C;
- break;
- }
- }
- if (IfClause) {
- const Expr *Cond = IfClause->getCondition();
- bool Result;
- if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
- if (!Result)
- return Bld.getInt32(1);
- } else {
- CodeGenFunction::RunCleanupsScope Scope(CGF);
- CondVal = CGF.EvaluateExprAsBool(Cond);
- }
- }
- }
- if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
- CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
- const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
- llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
- ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
- ThreadLimitVal =
- Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
- }
- if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
- CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
- const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
- llvm::Value *NumThreads = CGF.EmitScalarExpr(
- NumThreadsClause->getNumThreads(), /*IgnoreResultAssign=*/true);
- NumThreadsVal =
- Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned=*/false);
- ThreadLimitVal = ThreadLimitVal
- ? Bld.CreateSelect(Bld.CreateICmpULT(NumThreadsVal,
- ThreadLimitVal),
- NumThreadsVal, ThreadLimitVal)
- : NumThreadsVal;
- }
- if (!ThreadLimitVal)
- ThreadLimitVal = Bld.getInt32(0);
- if (CondVal)
- return Bld.CreateSelect(CondVal, ThreadLimitVal, Bld.getInt32(1));
- return ThreadLimitVal;
- }
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_simd:
- return Bld.getInt32(1);
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_metadirective:
- case OMPD_unknown:
- break;
- default:
- break;
- }
- llvm_unreachable("Unsupported directive kind.");
- }
- namespace {
- LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
- // Utility to handle information from clauses associated with a given
- // construct that use mappable expressions (e.g. 'map' clause, 'to' clause).
- // It provides a convenient interface to obtain the information and generate
- // code for that information.
- class MappableExprsHandler {
- public:
- /// Get the offset of the OMP_MAP_MEMBER_OF field.
- static unsigned getFlagMemberOffset() {
- unsigned Offset = 0;
- for (uint64_t Remain =
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF);
- !(Remain & 1); Remain = Remain >> 1)
- Offset++;
- return Offset;
- }
- /// Class that holds debugging information for a data mapping to be passed to
- /// the runtime library.
- class MappingExprInfo {
- /// The variable declaration used for the data mapping.
- const ValueDecl *MapDecl = nullptr;
- /// The original expression used in the map clause, or null if there is
- /// none.
- const Expr *MapExpr = nullptr;
- public:
- MappingExprInfo(const ValueDecl *MapDecl, const Expr *MapExpr = nullptr)
- : MapDecl(MapDecl), MapExpr(MapExpr) {}
- const ValueDecl *getMapDecl() const { return MapDecl; }
- const Expr *getMapExpr() const { return MapExpr; }
- };
- /// Class that associates information with a base pointer to be passed to the
- /// runtime library.
- class BasePointerInfo {
- /// The base pointer.
- llvm::Value *Ptr = nullptr;
- /// The base declaration that refers to this device pointer, or null if
- /// there is none.
- const ValueDecl *DevPtrDecl = nullptr;
- public:
- BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
- : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
- llvm::Value *operator*() const { return Ptr; }
- const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
- void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
- };
- using MapExprsArrayTy = SmallVector<MappingExprInfo, 4>;
- using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
- using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
- using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
- using MapMappersArrayTy = SmallVector<const ValueDecl *, 4>;
- using MapDimArrayTy = SmallVector<uint64_t, 4>;
- using MapNonContiguousArrayTy = SmallVector<MapValuesArrayTy, 4>;
- /// This structure contains combined information generated for mappable
- /// clauses, including base pointers, pointers, sizes, map types, user-defined
- /// mappers, and non-contiguous information.
- struct MapCombinedInfoTy {
- struct StructNonContiguousInfo {
- bool IsNonContiguous = false;
- MapDimArrayTy Dims;
- MapNonContiguousArrayTy Offsets;
- MapNonContiguousArrayTy Counts;
- MapNonContiguousArrayTy Strides;
- };
- MapExprsArrayTy Exprs;
- MapBaseValuesArrayTy BasePointers;
- MapValuesArrayTy Pointers;
- MapValuesArrayTy Sizes;
- MapFlagsArrayTy Types;
- MapMappersArrayTy Mappers;
- StructNonContiguousInfo NonContigInfo;
- /// Append arrays in \a CurInfo.
- void append(MapCombinedInfoTy &CurInfo) {
- Exprs.append(CurInfo.Exprs.begin(), CurInfo.Exprs.end());
- BasePointers.append(CurInfo.BasePointers.begin(),
- CurInfo.BasePointers.end());
- Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
- Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
- Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
- Mappers.append(CurInfo.Mappers.begin(), CurInfo.Mappers.end());
- NonContigInfo.Dims.append(CurInfo.NonContigInfo.Dims.begin(),
- CurInfo.NonContigInfo.Dims.end());
- NonContigInfo.Offsets.append(CurInfo.NonContigInfo.Offsets.begin(),
- CurInfo.NonContigInfo.Offsets.end());
- NonContigInfo.Counts.append(CurInfo.NonContigInfo.Counts.begin(),
- CurInfo.NonContigInfo.Counts.end());
- NonContigInfo.Strides.append(CurInfo.NonContigInfo.Strides.begin(),
- CurInfo.NonContigInfo.Strides.end());
- }
- };
- /// Map between a struct and the its lowest & highest elements which have been
- /// mapped.
- /// [ValueDecl *] --> {LE(FieldIndex, Pointer),
- /// HE(FieldIndex, Pointer)}
- struct StructRangeInfoTy {
- MapCombinedInfoTy PreliminaryMapData;
- std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> LowestElem = {
- 0, Address::invalid()};
- std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> HighestElem = {
- 0, Address::invalid()};
- Address Base = Address::invalid();
- Address LB = Address::invalid();
- bool IsArraySection = false;
- bool HasCompleteRecord = false;
- };
- private:
- /// Kind that defines how a device pointer has to be returned.
- struct MapInfo {
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
- OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
- ArrayRef<OpenMPMapModifierKind> MapModifiers;
- ArrayRef<OpenMPMotionModifierKind> MotionModifiers;
- bool ReturnDevicePointer = false;
- bool IsImplicit = false;
- const ValueDecl *Mapper = nullptr;
- const Expr *VarRef = nullptr;
- bool ForDeviceAddr = false;
- MapInfo() = default;
- MapInfo(
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
- OpenMPMapClauseKind MapType,
- ArrayRef<OpenMPMapModifierKind> MapModifiers,
- ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
- bool ReturnDevicePointer, bool IsImplicit,
- const ValueDecl *Mapper = nullptr, const Expr *VarRef = nullptr,
- bool ForDeviceAddr = false)
- : Components(Components), MapType(MapType), MapModifiers(MapModifiers),
- MotionModifiers(MotionModifiers),
- ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit),
- Mapper(Mapper), VarRef(VarRef), ForDeviceAddr(ForDeviceAddr) {}
- };
- /// If use_device_ptr or use_device_addr is used on a decl which is a struct
- /// member and there is no map information about it, then emission of that
- /// entry is deferred until the whole struct has been processed.
- struct DeferredDevicePtrEntryTy {
- const Expr *IE = nullptr;
- const ValueDecl *VD = nullptr;
- bool ForDeviceAddr = false;
- DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD,
- bool ForDeviceAddr)
- : IE(IE), VD(VD), ForDeviceAddr(ForDeviceAddr) {}
- };
- /// The target directive from where the mappable clauses were extracted. It
- /// is either a executable directive or a user-defined mapper directive.
- llvm::PointerUnion<const OMPExecutableDirective *,
- const OMPDeclareMapperDecl *>
- CurDir;
- /// Function the directive is being generated for.
- CodeGenFunction &CGF;
- /// Set of all first private variables in the current directive.
- /// bool data is set to true if the variable is implicitly marked as
- /// firstprivate, false otherwise.
- llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, bool> FirstPrivateDecls;
- /// Map between device pointer declarations and their expression components.
- /// The key value for declarations in 'this' is null.
- llvm::DenseMap<
- const ValueDecl *,
- SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
- DevPointersMap;
- /// Map between device addr declarations and their expression components.
- /// The key value for declarations in 'this' is null.
- llvm::DenseMap<
- const ValueDecl *,
- SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
- HasDevAddrsMap;
- /// Map between lambda declarations and their map type.
- llvm::DenseMap<const ValueDecl *, const OMPMapClause *> LambdasMap;
- llvm::Value *getExprTypeSize(const Expr *E) const {
- QualType ExprTy = E->getType().getCanonicalType();
- // Calculate the size for array shaping expression.
- if (const auto *OAE = dyn_cast<OMPArrayShapingExpr>(E)) {
- llvm::Value *Size =
- CGF.getTypeSize(OAE->getBase()->getType()->getPointeeType());
- for (const Expr *SE : OAE->getDimensions()) {
- llvm::Value *Sz = CGF.EmitScalarExpr(SE);
- Sz = CGF.EmitScalarConversion(Sz, SE->getType(),
- CGF.getContext().getSizeType(),
- SE->getExprLoc());
- Size = CGF.Builder.CreateNUWMul(Size, Sz);
- }
- return Size;
- }
- // Reference types are ignored for mapping purposes.
- if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
- ExprTy = RefTy->getPointeeType().getCanonicalType();
- // Given that an array section is considered a built-in type, we need to
- // do the calculation based on the length of the section instead of relying
- // on CGF.getTypeSize(E->getType()).
- if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) {
- QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(
- OAE->getBase()->IgnoreParenImpCasts())
- .getCanonicalType();
- // If there is no length associated with the expression and lower bound is
- // not specified too, that means we are using the whole length of the
- // base.
- if (!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
- !OAE->getLowerBound())
- return CGF.getTypeSize(BaseTy);
- llvm::Value *ElemSize;
- if (const auto *PTy = BaseTy->getAs<PointerType>()) {
- ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType());
- } else {
- const auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
- assert(ATy && "Expecting array type if not a pointer type.");
- ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType());
- }
- // If we don't have a length at this point, that is because we have an
- // array section with a single element.
- if (!OAE->getLength() && OAE->getColonLocFirst().isInvalid())
- return ElemSize;
- if (const Expr *LenExpr = OAE->getLength()) {
- llvm::Value *LengthVal = CGF.EmitScalarExpr(LenExpr);
- LengthVal = CGF.EmitScalarConversion(LengthVal, LenExpr->getType(),
- CGF.getContext().getSizeType(),
- LenExpr->getExprLoc());
- return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
- }
- assert(!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
- OAE->getLowerBound() && "expected array_section[lb:].");
- // Size = sizetype - lb * elemtype;
- llvm::Value *LengthVal = CGF.getTypeSize(BaseTy);
- llvm::Value *LBVal = CGF.EmitScalarExpr(OAE->getLowerBound());
- LBVal = CGF.EmitScalarConversion(LBVal, OAE->getLowerBound()->getType(),
- CGF.getContext().getSizeType(),
- OAE->getLowerBound()->getExprLoc());
- LBVal = CGF.Builder.CreateNUWMul(LBVal, ElemSize);
- llvm::Value *Cmp = CGF.Builder.CreateICmpUGT(LengthVal, LBVal);
- llvm::Value *TrueVal = CGF.Builder.CreateNUWSub(LengthVal, LBVal);
- LengthVal = CGF.Builder.CreateSelect(
- Cmp, TrueVal, llvm::ConstantInt::get(CGF.SizeTy, 0));
- return LengthVal;
- }
- return CGF.getTypeSize(ExprTy);
- }
- /// Return the corresponding bits for a given map clause modifier. Add
- /// a flag marking the map as a pointer if requested. Add a flag marking the
- /// map as the first one of a series of maps that relate to the same map
- /// expression.
- OpenMPOffloadMappingFlags getMapTypeBits(
- OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
- ArrayRef<OpenMPMotionModifierKind> MotionModifiers, bool IsImplicit,
- bool AddPtrFlag, bool AddIsTargetParamFlag, bool IsNonContiguous) const {
- OpenMPOffloadMappingFlags Bits =
- IsImplicit ? OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT
- : OpenMPOffloadMappingFlags::OMP_MAP_NONE;
- switch (MapType) {
- case OMPC_MAP_alloc:
- case OMPC_MAP_release:
- // alloc and release is the default behavior in the runtime library, i.e.
- // if we don't pass any bits alloc/release that is what the runtime is
- // going to do. Therefore, we don't need to signal anything for these two
- // type modifiers.
- break;
- case OMPC_MAP_to:
- Bits |= OpenMPOffloadMappingFlags::OMP_MAP_TO;
- break;
- case OMPC_MAP_from:
- Bits |= OpenMPOffloadMappingFlags::OMP_MAP_FROM;
- break;
- case OMPC_MAP_tofrom:
- Bits |= OpenMPOffloadMappingFlags::OMP_MAP_TO |
- OpenMPOffloadMappingFlags::OMP_MAP_FROM;
- break;
- case OMPC_MAP_delete:
- Bits |= OpenMPOffloadMappingFlags::OMP_MAP_DELETE;
- break;
- case OMPC_MAP_unknown:
- llvm_unreachable("Unexpected map type!");
- }
- if (AddPtrFlag)
- Bits |= OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ;
- if (AddIsTargetParamFlag)
- Bits |= OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
- if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_always))
- Bits |= OpenMPOffloadMappingFlags::OMP_MAP_ALWAYS;
- if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_close))
- Bits |= OpenMPOffloadMappingFlags::OMP_MAP_CLOSE;
- if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_present) ||
- llvm::is_contained(MotionModifiers, OMPC_MOTION_MODIFIER_present))
- Bits |= OpenMPOffloadMappingFlags::OMP_MAP_PRESENT;
- if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_ompx_hold))
- Bits |= OpenMPOffloadMappingFlags::OMP_MAP_OMPX_HOLD;
- if (IsNonContiguous)
- Bits |= OpenMPOffloadMappingFlags::OMP_MAP_NON_CONTIG;
- return Bits;
- }
- /// Return true if the provided expression is a final array section. A
- /// final array section, is one whose length can't be proved to be one.
- bool isFinalArraySectionExpression(const Expr *E) const {
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
- // It is not an array section and therefore not a unity-size one.
- if (!OASE)
- return false;
- // An array section with no colon always refer to a single element.
- if (OASE->getColonLocFirst().isInvalid())
- return false;
- const Expr *Length = OASE->getLength();
- // If we don't have a length we have to check if the array has size 1
- // for this dimension. Also, we should always expect a length if the
- // base type is pointer.
- if (!Length) {
- QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
- OASE->getBase()->IgnoreParenImpCasts())
- .getCanonicalType();
- if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
- return ATy->getSize().getSExtValue() != 1;
- // If we don't have a constant dimension length, we have to consider
- // the current section as having any size, so it is not necessarily
- // unitary. If it happen to be unity size, that's user fault.
- return true;
- }
- // Check if the length evaluates to 1.
- Expr::EvalResult Result;
- if (!Length->EvaluateAsInt(Result, CGF.getContext()))
- return true; // Can have more that size 1.
- llvm::APSInt ConstLength = Result.Val.getInt();
- return ConstLength.getSExtValue() != 1;
- }
- /// Generate the base pointers, section pointers, sizes, map type bits, and
- /// user-defined mappers (all included in \a CombinedInfo) for the provided
- /// map type, map or motion modifiers, and expression components.
- /// \a IsFirstComponent should be set to true if the provided set of
- /// components is the first associated with a capture.
- void generateInfoForComponentList(
- OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
- ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
- MapCombinedInfoTy &CombinedInfo, StructRangeInfoTy &PartialStruct,
- bool IsFirstComponentList, bool IsImplicit,
- const ValueDecl *Mapper = nullptr, bool ForDeviceAddr = false,
- const ValueDecl *BaseDecl = nullptr, const Expr *MapExpr = nullptr,
- ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
- OverlappedElements = std::nullopt) const {
- // The following summarizes what has to be generated for each map and the
- // types below. The generated information is expressed in this order:
- // base pointer, section pointer, size, flags
- // (to add to the ones that come from the map type and modifier).
- //
- // double d;
- // int i[100];
- // float *p;
- //
- // struct S1 {
- // int i;
- // float f[50];
- // }
- // struct S2 {
- // int i;
- // float f[50];
- // S1 s;
- // double *p;
- // struct S2 *ps;
- // int &ref;
- // }
- // S2 s;
- // S2 *ps;
- //
- // map(d)
- // &d, &d, sizeof(double), TARGET_PARAM | TO | FROM
- //
- // map(i)
- // &i, &i, 100*sizeof(int), TARGET_PARAM | TO | FROM
- //
- // map(i[1:23])
- // &i(=&i[0]), &i[1], 23*sizeof(int), TARGET_PARAM | TO | FROM
- //
- // map(p)
- // &p, &p, sizeof(float*), TARGET_PARAM | TO | FROM
- //
- // map(p[1:24])
- // &p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM | PTR_AND_OBJ
- // in unified shared memory mode or for local pointers
- // p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM
- //
- // map(s)
- // &s, &s, sizeof(S2), TARGET_PARAM | TO | FROM
- //
- // map(s.i)
- // &s, &(s.i), sizeof(int), TARGET_PARAM | TO | FROM
- //
- // map(s.s.f)
- // &s, &(s.s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
- //
- // map(s.p)
- // &s, &(s.p), sizeof(double*), TARGET_PARAM | TO | FROM
- //
- // map(to: s.p[:22])
- // &s, &(s.p), sizeof(double*), TARGET_PARAM (*)
- // &s, &(s.p), sizeof(double*), MEMBER_OF(1) (**)
- // &(s.p), &(s.p[0]), 22*sizeof(double),
- // MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
- // (*) alloc space for struct members, only this is a target parameter
- // (**) map the pointer (nothing to be mapped in this example) (the compiler
- // optimizes this entry out, same in the examples below)
- // (***) map the pointee (map: to)
- //
- // map(to: s.ref)
- // &s, &(s.ref), sizeof(int*), TARGET_PARAM (*)
- // &s, &(s.ref), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
- // (*) alloc space for struct members, only this is a target parameter
- // (**) map the pointer (nothing to be mapped in this example) (the compiler
- // optimizes this entry out, same in the examples below)
- // (***) map the pointee (map: to)
- //
- // map(s.ps)
- // &s, &(s.ps), sizeof(S2*), TARGET_PARAM | TO | FROM
- //
- // map(from: s.ps->s.i)
- // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
- // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
- // &(s.ps), &(s.ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
- //
- // map(to: s.ps->ps)
- // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
- // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
- // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | TO
- //
- // map(s.ps->ps->ps)
- // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
- // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
- // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
- // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
- //
- // map(to: s.ps->ps->s.f[:22])
- // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
- // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
- // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
- // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
- //
- // map(ps)
- // &ps, &ps, sizeof(S2*), TARGET_PARAM | TO | FROM
- //
- // map(ps->i)
- // ps, &(ps->i), sizeof(int), TARGET_PARAM | TO | FROM
- //
- // map(ps->s.f)
- // ps, &(ps->s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
- //
- // map(from: ps->p)
- // ps, &(ps->p), sizeof(double*), TARGET_PARAM | FROM
- //
- // map(to: ps->p[:22])
- // ps, &(ps->p), sizeof(double*), TARGET_PARAM
- // ps, &(ps->p), sizeof(double*), MEMBER_OF(1)
- // &(ps->p), &(ps->p[0]), 22*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | TO
- //
- // map(ps->ps)
- // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM | TO | FROM
- //
- // map(from: ps->ps->s.i)
- // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
- // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
- // &(ps->ps), &(ps->ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
- //
- // map(from: ps->ps->ps)
- // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
- // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
- // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | FROM
- //
- // map(ps->ps->ps->ps)
- // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
- // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
- // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
- // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
- //
- // map(to: ps->ps->ps->s.f[:22])
- // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
- // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
- // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
- // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
- //
- // map(to: s.f[:22]) map(from: s.p[:33])
- // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1) +
- // sizeof(double*) (**), TARGET_PARAM
- // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | TO
- // &s, &(s.p), sizeof(double*), MEMBER_OF(1)
- // &(s.p), &(s.p[0]), 33*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | FROM
- // (*) allocate contiguous space needed to fit all mapped members even if
- // we allocate space for members not mapped (in this example,
- // s.f[22..49] and s.s are not mapped, yet we must allocate space for
- // them as well because they fall between &s.f[0] and &s.p)
- //
- // map(from: s.f[:22]) map(to: ps->p[:33])
- // &s, &(s.f[0]), 22*sizeof(float), TARGET_PARAM | FROM
- // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
- // ps, &(ps->p), sizeof(double*), MEMBER_OF(2) (*)
- // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(2) | PTR_AND_OBJ | TO
- // (*) the struct this entry pertains to is the 2nd element in the list of
- // arguments, hence MEMBER_OF(2)
- //
- // map(from: s.f[:22], s.s) map(to: ps->p[:33])
- // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1), TARGET_PARAM
- // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | FROM
- // &s, &(s.s), sizeof(struct S1), MEMBER_OF(1) | FROM
- // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
- // ps, &(ps->p), sizeof(double*), MEMBER_OF(4) (*)
- // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(4) | PTR_AND_OBJ | TO
- // (*) the struct this entry pertains to is the 4th element in the list
- // of arguments, hence MEMBER_OF(4)
- // Track if the map information being generated is the first for a capture.
- bool IsCaptureFirstInfo = IsFirstComponentList;
- // When the variable is on a declare target link or in a to clause with
- // unified memory, a reference is needed to hold the host/device address
- // of the variable.
- bool RequiresReference = false;
- // Scan the components from the base to the complete expression.
- auto CI = Components.rbegin();
- auto CE = Components.rend();
- auto I = CI;
- // Track if the map information being generated is the first for a list of
- // components.
- bool IsExpressionFirstInfo = true;
- bool FirstPointerInComplexData = false;
- Address BP = Address::invalid();
- const Expr *AssocExpr = I->getAssociatedExpression();
- const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr);
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
- const auto *OAShE = dyn_cast<OMPArrayShapingExpr>(AssocExpr);
- if (isa<MemberExpr>(AssocExpr)) {
- // The base is the 'this' pointer. The content of the pointer is going
- // to be the base of the field being mapped.
- BP = CGF.LoadCXXThisAddress();
- } else if ((AE && isa<CXXThisExpr>(AE->getBase()->IgnoreParenImpCasts())) ||
- (OASE &&
- isa<CXXThisExpr>(OASE->getBase()->IgnoreParenImpCasts()))) {
- BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
- } else if (OAShE &&
- isa<CXXThisExpr>(OAShE->getBase()->IgnoreParenCasts())) {
- BP = Address(
- CGF.EmitScalarExpr(OAShE->getBase()),
- CGF.ConvertTypeForMem(OAShE->getBase()->getType()->getPointeeType()),
- CGF.getContext().getTypeAlignInChars(OAShE->getBase()->getType()));
- } else {
- // The base is the reference to the variable.
- // BP = &Var.
- BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
- if (const auto *VD =
- dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
- if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
- if ((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
- ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
- *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
- CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) {
- RequiresReference = true;
- BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
- }
- }
- }
- // If the variable is a pointer and is being dereferenced (i.e. is not
- // the last component), the base has to be the pointer itself, not its
- // reference. References are ignored for mapping purposes.
- QualType Ty =
- I->getAssociatedDeclaration()->getType().getNonReferenceType();
- if (Ty->isAnyPointerType() && std::next(I) != CE) {
- // No need to generate individual map information for the pointer, it
- // can be associated with the combined storage if shared memory mode is
- // active or the base declaration is not global variable.
- const auto *VD = dyn_cast<VarDecl>(I->getAssociatedDeclaration());
- if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
- !VD || VD->hasLocalStorage())
- BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
- else
- FirstPointerInComplexData = true;
- ++I;
- }
- }
- // Track whether a component of the list should be marked as MEMBER_OF some
- // combined entry (for partial structs). Only the first PTR_AND_OBJ entry
- // in a component list should be marked as MEMBER_OF, all subsequent entries
- // do not belong to the base struct. E.g.
- // struct S2 s;
- // s.ps->ps->ps->f[:]
- // (1) (2) (3) (4)
- // ps(1) is a member pointer, ps(2) is a pointee of ps(1), so it is a
- // PTR_AND_OBJ entry; the PTR is ps(1), so MEMBER_OF the base struct. ps(3)
- // is the pointee of ps(2) which is not member of struct s, so it should not
- // be marked as such (it is still PTR_AND_OBJ).
- // The variable is initialized to false so that PTR_AND_OBJ entries which
- // are not struct members are not considered (e.g. array of pointers to
- // data).
- bool ShouldBeMemberOf = false;
- // Variable keeping track of whether or not we have encountered a component
- // in the component list which is a member expression. Useful when we have a
- // pointer or a final array section, in which case it is the previous
- // component in the list which tells us whether we have a member expression.
- // E.g. X.f[:]
- // While processing the final array section "[:]" it is "f" which tells us
- // whether we are dealing with a member of a declared struct.
- const MemberExpr *EncounteredME = nullptr;
- // Track for the total number of dimension. Start from one for the dummy
- // dimension.
- uint64_t DimSize = 1;
- bool IsNonContiguous = CombinedInfo.NonContigInfo.IsNonContiguous;
- bool IsPrevMemberReference = false;
- for (; I != CE; ++I) {
- // If the current component is member of a struct (parent struct) mark it.
- if (!EncounteredME) {
- EncounteredME = dyn_cast<MemberExpr>(I->getAssociatedExpression());
- // If we encounter a PTR_AND_OBJ entry from now on it should be marked
- // as MEMBER_OF the parent struct.
- if (EncounteredME) {
- ShouldBeMemberOf = true;
- // Do not emit as complex pointer if this is actually not array-like
- // expression.
- if (FirstPointerInComplexData) {
- QualType Ty = std::prev(I)
- ->getAssociatedDeclaration()
- ->getType()
- .getNonReferenceType();
- BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
- FirstPointerInComplexData = false;
- }
- }
- }
- auto Next = std::next(I);
- // We need to generate the addresses and sizes if this is the last
- // component, if the component is a pointer or if it is an array section
- // whose length can't be proved to be one. If this is a pointer, it
- // becomes the base address for the following components.
- // A final array section, is one whose length can't be proved to be one.
- // If the map item is non-contiguous then we don't treat any array section
- // as final array section.
- bool IsFinalArraySection =
- !IsNonContiguous &&
- isFinalArraySectionExpression(I->getAssociatedExpression());
- // If we have a declaration for the mapping use that, otherwise use
- // the base declaration of the map clause.
- const ValueDecl *MapDecl = (I->getAssociatedDeclaration())
- ? I->getAssociatedDeclaration()
- : BaseDecl;
- MapExpr = (I->getAssociatedExpression()) ? I->getAssociatedExpression()
- : MapExpr;
- // Get information on whether the element is a pointer. Have to do a
- // special treatment for array sections given that they are built-in
- // types.
- const auto *OASE =
- dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
- const auto *OAShE =
- dyn_cast<OMPArrayShapingExpr>(I->getAssociatedExpression());
- const auto *UO = dyn_cast<UnaryOperator>(I->getAssociatedExpression());
- const auto *BO = dyn_cast<BinaryOperator>(I->getAssociatedExpression());
- bool IsPointer =
- OAShE ||
- (OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
- .getCanonicalType()
- ->isAnyPointerType()) ||
- I->getAssociatedExpression()->getType()->isAnyPointerType();
- bool IsMemberReference = isa<MemberExpr>(I->getAssociatedExpression()) &&
- MapDecl &&
- MapDecl->getType()->isLValueReferenceType();
- bool IsNonDerefPointer = IsPointer && !UO && !BO && !IsNonContiguous;
- if (OASE)
- ++DimSize;
- if (Next == CE || IsMemberReference || IsNonDerefPointer ||
- IsFinalArraySection) {
- // If this is not the last component, we expect the pointer to be
- // associated with an array expression or member expression.
- assert((Next == CE ||
- isa<MemberExpr>(Next->getAssociatedExpression()) ||
- isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) ||
- isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) ||
- isa<UnaryOperator>(Next->getAssociatedExpression()) ||
- isa<BinaryOperator>(Next->getAssociatedExpression())) &&
- "Unexpected expression");
- Address LB = Address::invalid();
- Address LowestElem = Address::invalid();
- auto &&EmitMemberExprBase = [](CodeGenFunction &CGF,
- const MemberExpr *E) {
- const Expr *BaseExpr = E->getBase();
- // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a
- // scalar.
- LValue BaseLV;
- if (E->isArrow()) {
- LValueBaseInfo BaseInfo;
- TBAAAccessInfo TBAAInfo;
- Address Addr =
- CGF.EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
- QualType PtrTy = BaseExpr->getType()->getPointeeType();
- BaseLV = CGF.MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
- } else {
- BaseLV = CGF.EmitOMPSharedLValue(BaseExpr);
- }
- return BaseLV;
- };
- if (OAShE) {
- LowestElem = LB =
- Address(CGF.EmitScalarExpr(OAShE->getBase()),
- CGF.ConvertTypeForMem(
- OAShE->getBase()->getType()->getPointeeType()),
- CGF.getContext().getTypeAlignInChars(
- OAShE->getBase()->getType()));
- } else if (IsMemberReference) {
- const auto *ME = cast<MemberExpr>(I->getAssociatedExpression());
- LValue BaseLVal = EmitMemberExprBase(CGF, ME);
- LowestElem = CGF.EmitLValueForFieldInitialization(
- BaseLVal, cast<FieldDecl>(MapDecl))
- .getAddress(CGF);
- LB = CGF.EmitLoadOfReferenceLValue(LowestElem, MapDecl->getType())
- .getAddress(CGF);
- } else {
- LowestElem = LB =
- CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
- .getAddress(CGF);
- }
- // If this component is a pointer inside the base struct then we don't
- // need to create any entry for it - it will be combined with the object
- // it is pointing to into a single PTR_AND_OBJ entry.
- bool IsMemberPointerOrAddr =
- EncounteredME &&
- (((IsPointer || ForDeviceAddr) &&
- I->getAssociatedExpression() == EncounteredME) ||
- (IsPrevMemberReference && !IsPointer) ||
- (IsMemberReference && Next != CE &&
- !Next->getAssociatedExpression()->getType()->isPointerType()));
- if (!OverlappedElements.empty() && Next == CE) {
- // Handle base element with the info for overlapped elements.
- assert(!PartialStruct.Base.isValid() && "The base element is set.");
- assert(!IsPointer &&
- "Unexpected base element with the pointer type.");
- // Mark the whole struct as the struct that requires allocation on the
- // device.
- PartialStruct.LowestElem = {0, LowestElem};
- CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(
- I->getAssociatedExpression()->getType());
- Address HB = CGF.Builder.CreateConstGEP(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- LowestElem, CGF.VoidPtrTy, CGF.Int8Ty),
- TypeSize.getQuantity() - 1);
- PartialStruct.HighestElem = {
- std::numeric_limits<decltype(
- PartialStruct.HighestElem.first)>::max(),
- HB};
- PartialStruct.Base = BP;
- PartialStruct.LB = LB;
- assert(
- PartialStruct.PreliminaryMapData.BasePointers.empty() &&
- "Overlapped elements must be used only once for the variable.");
- std::swap(PartialStruct.PreliminaryMapData, CombinedInfo);
- // Emit data for non-overlapped data.
- OpenMPOffloadMappingFlags Flags =
- OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
- getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
- /*AddPtrFlag=*/false,
- /*AddIsTargetParamFlag=*/false, IsNonContiguous);
- llvm::Value *Size = nullptr;
- // Do bitcopy of all non-overlapped structure elements.
- for (OMPClauseMappableExprCommon::MappableExprComponentListRef
- Component : OverlappedElements) {
- Address ComponentLB = Address::invalid();
- for (const OMPClauseMappableExprCommon::MappableComponent &MC :
- Component) {
- if (const ValueDecl *VD = MC.getAssociatedDeclaration()) {
- const auto *FD = dyn_cast<FieldDecl>(VD);
- if (FD && FD->getType()->isLValueReferenceType()) {
- const auto *ME =
- cast<MemberExpr>(MC.getAssociatedExpression());
- LValue BaseLVal = EmitMemberExprBase(CGF, ME);
- ComponentLB =
- CGF.EmitLValueForFieldInitialization(BaseLVal, FD)
- .getAddress(CGF);
- } else {
- ComponentLB =
- CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
- .getAddress(CGF);
- }
- Size = CGF.Builder.CreatePtrDiff(
- CGF.Int8Ty, CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
- CGF.EmitCastToVoidPtr(LB.getPointer()));
- break;
- }
- }
- assert(Size && "Failed to determine structure size");
- CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.getPointer());
- CombinedInfo.Pointers.push_back(LB.getPointer());
- CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
- Size, CGF.Int64Ty, /*isSigned=*/true));
- CombinedInfo.Types.push_back(Flags);
- CombinedInfo.Mappers.push_back(nullptr);
- CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
- : 1);
- LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
- }
- CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.getPointer());
- CombinedInfo.Pointers.push_back(LB.getPointer());
- Size = CGF.Builder.CreatePtrDiff(
- CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
- CGF.EmitCastToVoidPtr(LB.getPointer()));
- CombinedInfo.Sizes.push_back(
- CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
- CombinedInfo.Types.push_back(Flags);
- CombinedInfo.Mappers.push_back(nullptr);
- CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
- : 1);
- break;
- }
- llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
- if (!IsMemberPointerOrAddr ||
- (Next == CE && MapType != OMPC_MAP_unknown)) {
- CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.getPointer());
- CombinedInfo.Pointers.push_back(LB.getPointer());
- CombinedInfo.Sizes.push_back(
- CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
- CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
- : 1);
- // If Mapper is valid, the last component inherits the mapper.
- bool HasMapper = Mapper && Next == CE;
- CombinedInfo.Mappers.push_back(HasMapper ? Mapper : nullptr);
- // We need to add a pointer flag for each map that comes from the
- // same expression except for the first one. We also need to signal
- // this map is the first one that relates with the current capture
- // (there is a set of entries for each capture).
- OpenMPOffloadMappingFlags Flags = getMapTypeBits(
- MapType, MapModifiers, MotionModifiers, IsImplicit,
- !IsExpressionFirstInfo || RequiresReference ||
- FirstPointerInComplexData || IsMemberReference,
- IsCaptureFirstInfo && !RequiresReference, IsNonContiguous);
- if (!IsExpressionFirstInfo || IsMemberReference) {
- // If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
- // then we reset the TO/FROM/ALWAYS/DELETE/CLOSE flags.
- if (IsPointer || (IsMemberReference && Next != CE))
- Flags &= ~(OpenMPOffloadMappingFlags::OMP_MAP_TO |
- OpenMPOffloadMappingFlags::OMP_MAP_FROM |
- OpenMPOffloadMappingFlags::OMP_MAP_ALWAYS |
- OpenMPOffloadMappingFlags::OMP_MAP_DELETE |
- OpenMPOffloadMappingFlags::OMP_MAP_CLOSE);
- if (ShouldBeMemberOf) {
- // Set placeholder value MEMBER_OF=FFFF to indicate that the flag
- // should be later updated with the correct value of MEMBER_OF.
- Flags |= OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF;
- // From now on, all subsequent PTR_AND_OBJ entries should not be
- // marked as MEMBER_OF.
- ShouldBeMemberOf = false;
- }
- }
- CombinedInfo.Types.push_back(Flags);
- }
- // If we have encountered a member expression so far, keep track of the
- // mapped member. If the parent is "*this", then the value declaration
- // is nullptr.
- if (EncounteredME) {
- const auto *FD = cast<FieldDecl>(EncounteredME->getMemberDecl());
- unsigned FieldIndex = FD->getFieldIndex();
- // Update info about the lowest and highest elements for this struct
- if (!PartialStruct.Base.isValid()) {
- PartialStruct.LowestElem = {FieldIndex, LowestElem};
- if (IsFinalArraySection) {
- Address HB =
- CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false)
- .getAddress(CGF);
- PartialStruct.HighestElem = {FieldIndex, HB};
- } else {
- PartialStruct.HighestElem = {FieldIndex, LowestElem};
- }
- PartialStruct.Base = BP;
- PartialStruct.LB = BP;
- } else if (FieldIndex < PartialStruct.LowestElem.first) {
- PartialStruct.LowestElem = {FieldIndex, LowestElem};
- } else if (FieldIndex > PartialStruct.HighestElem.first) {
- PartialStruct.HighestElem = {FieldIndex, LowestElem};
- }
- }
- // Need to emit combined struct for array sections.
- if (IsFinalArraySection || IsNonContiguous)
- PartialStruct.IsArraySection = true;
- // If we have a final array section, we are done with this expression.
- if (IsFinalArraySection)
- break;
- // The pointer becomes the base for the next element.
- if (Next != CE)
- BP = IsMemberReference ? LowestElem : LB;
- IsExpressionFirstInfo = false;
- IsCaptureFirstInfo = false;
- FirstPointerInComplexData = false;
- IsPrevMemberReference = IsMemberReference;
- } else if (FirstPointerInComplexData) {
- QualType Ty = Components.rbegin()
- ->getAssociatedDeclaration()
- ->getType()
- .getNonReferenceType();
- BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
- FirstPointerInComplexData = false;
- }
- }
- // If ran into the whole component - allocate the space for the whole
- // record.
- if (!EncounteredME)
- PartialStruct.HasCompleteRecord = true;
- if (!IsNonContiguous)
- return;
- const ASTContext &Context = CGF.getContext();
- // For supporting stride in array section, we need to initialize the first
- // dimension size as 1, first offset as 0, and first count as 1
- MapValuesArrayTy CurOffsets = {llvm::ConstantInt::get(CGF.CGM.Int64Ty, 0)};
- MapValuesArrayTy CurCounts = {llvm::ConstantInt::get(CGF.CGM.Int64Ty, 1)};
- MapValuesArrayTy CurStrides;
- MapValuesArrayTy DimSizes{llvm::ConstantInt::get(CGF.CGM.Int64Ty, 1)};
- uint64_t ElementTypeSize;
- // Collect Size information for each dimension and get the element size as
- // the first Stride. For example, for `int arr[10][10]`, the DimSizes
- // should be [10, 10] and the first stride is 4 btyes.
- for (const OMPClauseMappableExprCommon::MappableComponent &Component :
- Components) {
- const Expr *AssocExpr = Component.getAssociatedExpression();
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
- if (!OASE)
- continue;
- QualType Ty = OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
- auto *CAT = Context.getAsConstantArrayType(Ty);
- auto *VAT = Context.getAsVariableArrayType(Ty);
- // We need all the dimension size except for the last dimension.
- assert((VAT || CAT || &Component == &*Components.begin()) &&
- "Should be either ConstantArray or VariableArray if not the "
- "first Component");
- // Get element size if CurStrides is empty.
- if (CurStrides.empty()) {
- const Type *ElementType = nullptr;
- if (CAT)
- ElementType = CAT->getElementType().getTypePtr();
- else if (VAT)
- ElementType = VAT->getElementType().getTypePtr();
- else
- assert(&Component == &*Components.begin() &&
- "Only expect pointer (non CAT or VAT) when this is the "
- "first Component");
- // If ElementType is null, then it means the base is a pointer
- // (neither CAT nor VAT) and we'll attempt to get ElementType again
- // for next iteration.
- if (ElementType) {
- // For the case that having pointer as base, we need to remove one
- // level of indirection.
- if (&Component != &*Components.begin())
- ElementType = ElementType->getPointeeOrArrayElementType();
- ElementTypeSize =
- Context.getTypeSizeInChars(ElementType).getQuantity();
- CurStrides.push_back(
- llvm::ConstantInt::get(CGF.Int64Ty, ElementTypeSize));
- }
- }
- // Get dimension value except for the last dimension since we don't need
- // it.
- if (DimSizes.size() < Components.size() - 1) {
- if (CAT)
- DimSizes.push_back(llvm::ConstantInt::get(
- CGF.Int64Ty, CAT->getSize().getZExtValue()));
- else if (VAT)
- DimSizes.push_back(CGF.Builder.CreateIntCast(
- CGF.EmitScalarExpr(VAT->getSizeExpr()), CGF.Int64Ty,
- /*IsSigned=*/false));
- }
- }
- // Skip the dummy dimension since we have already have its information.
- auto *DI = DimSizes.begin() + 1;
- // Product of dimension.
- llvm::Value *DimProd =
- llvm::ConstantInt::get(CGF.CGM.Int64Ty, ElementTypeSize);
- // Collect info for non-contiguous. Notice that offset, count, and stride
- // are only meaningful for array-section, so we insert a null for anything
- // other than array-section.
- // Also, the size of offset, count, and stride are not the same as
- // pointers, base_pointers, sizes, or dims. Instead, the size of offset,
- // count, and stride are the same as the number of non-contiguous
- // declaration in target update to/from clause.
- for (const OMPClauseMappableExprCommon::MappableComponent &Component :
- Components) {
- const Expr *AssocExpr = Component.getAssociatedExpression();
- if (const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr)) {
- llvm::Value *Offset = CGF.Builder.CreateIntCast(
- CGF.EmitScalarExpr(AE->getIdx()), CGF.Int64Ty,
- /*isSigned=*/false);
- CurOffsets.push_back(Offset);
- CurCounts.push_back(llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/1));
- CurStrides.push_back(CurStrides.back());
- continue;
- }
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
- if (!OASE)
- continue;
- // Offset
- const Expr *OffsetExpr = OASE->getLowerBound();
- llvm::Value *Offset = nullptr;
- if (!OffsetExpr) {
- // If offset is absent, then we just set it to zero.
- Offset = llvm::ConstantInt::get(CGF.Int64Ty, 0);
- } else {
- Offset = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(OffsetExpr),
- CGF.Int64Ty,
- /*isSigned=*/false);
- }
- CurOffsets.push_back(Offset);
- // Count
- const Expr *CountExpr = OASE->getLength();
- llvm::Value *Count = nullptr;
- if (!CountExpr) {
- // In Clang, once a high dimension is an array section, we construct all
- // the lower dimension as array section, however, for case like
- // arr[0:2][2], Clang construct the inner dimension as an array section
- // but it actually is not in an array section form according to spec.
- if (!OASE->getColonLocFirst().isValid() &&
- !OASE->getColonLocSecond().isValid()) {
- Count = llvm::ConstantInt::get(CGF.Int64Ty, 1);
- } else {
- // OpenMP 5.0, 2.1.5 Array Sections, Description.
- // When the length is absent it defaults to ⌈(size −
- // lower-bound)/stride⌉, where size is the size of the array
- // dimension.
- const Expr *StrideExpr = OASE->getStride();
- llvm::Value *Stride =
- StrideExpr
- ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(StrideExpr),
- CGF.Int64Ty, /*isSigned=*/false)
- : nullptr;
- if (Stride)
- Count = CGF.Builder.CreateUDiv(
- CGF.Builder.CreateNUWSub(*DI, Offset), Stride);
- else
- Count = CGF.Builder.CreateNUWSub(*DI, Offset);
- }
- } else {
- Count = CGF.EmitScalarExpr(CountExpr);
- }
- Count = CGF.Builder.CreateIntCast(Count, CGF.Int64Ty, /*isSigned=*/false);
- CurCounts.push_back(Count);
- // Stride_n' = Stride_n * (D_0 * D_1 ... * D_n-1) * Unit size
- // Take `int arr[5][5][5]` and `arr[0:2:2][1:2:1][0:2:2]` as an example:
- // Offset Count Stride
- // D0 0 1 4 (int) <- dummy dimension
- // D1 0 2 8 (2 * (1) * 4)
- // D2 1 2 20 (1 * (1 * 5) * 4)
- // D3 0 2 200 (2 * (1 * 5 * 4) * 4)
- const Expr *StrideExpr = OASE->getStride();
- llvm::Value *Stride =
- StrideExpr
- ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(StrideExpr),
- CGF.Int64Ty, /*isSigned=*/false)
- : nullptr;
- DimProd = CGF.Builder.CreateNUWMul(DimProd, *(DI - 1));
- if (Stride)
- CurStrides.push_back(CGF.Builder.CreateNUWMul(DimProd, Stride));
- else
- CurStrides.push_back(DimProd);
- if (DI != DimSizes.end())
- ++DI;
- }
- CombinedInfo.NonContigInfo.Offsets.push_back(CurOffsets);
- CombinedInfo.NonContigInfo.Counts.push_back(CurCounts);
- CombinedInfo.NonContigInfo.Strides.push_back(CurStrides);
- }
- /// Return the adjusted map modifiers if the declaration a capture refers to
- /// appears in a first-private clause. This is expected to be used only with
- /// directives that start with 'target'.
- OpenMPOffloadMappingFlags
- getMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap) const {
- assert(Cap.capturesVariable() && "Expected capture by reference only!");
- // A first private variable captured by reference will use only the
- // 'private ptr' and 'map to' flag. Return the right flags if the captured
- // declaration is known as first-private in this handler.
- if (FirstPrivateDecls.count(Cap.getCapturedVar())) {
- if (Cap.getCapturedVar()->getType()->isAnyPointerType())
- return OpenMPOffloadMappingFlags::OMP_MAP_TO |
- OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ;
- return OpenMPOffloadMappingFlags::OMP_MAP_PRIVATE |
- OpenMPOffloadMappingFlags::OMP_MAP_TO;
- }
- auto I = LambdasMap.find(Cap.getCapturedVar()->getCanonicalDecl());
- if (I != LambdasMap.end())
- // for map(to: lambda): using user specified map type.
- return getMapTypeBits(
- I->getSecond()->getMapType(), I->getSecond()->getMapTypeModifiers(),
- /*MotionModifiers=*/std::nullopt, I->getSecond()->isImplicit(),
- /*AddPtrFlag=*/false,
- /*AddIsTargetParamFlag=*/false,
- /*isNonContiguous=*/false);
- return OpenMPOffloadMappingFlags::OMP_MAP_TO |
- OpenMPOffloadMappingFlags::OMP_MAP_FROM;
- }
- static OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position) {
- // Rotate by getFlagMemberOffset() bits.
- return static_cast<OpenMPOffloadMappingFlags>(((uint64_t)Position + 1)
- << getFlagMemberOffset());
- }
- static void setCorrectMemberOfFlag(OpenMPOffloadMappingFlags &Flags,
- OpenMPOffloadMappingFlags MemberOfFlag) {
- // If the entry is PTR_AND_OBJ but has not been marked with the special
- // placeholder value 0xFFFF in the MEMBER_OF field, then it should not be
- // marked as MEMBER_OF.
- if (static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- Flags & OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ) &&
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- (Flags & OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF) !=
- OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF))
- return;
- // Reset the placeholder value to prepare the flag for the assignment of the
- // proper MEMBER_OF value.
- Flags &= ~OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF;
- Flags |= MemberOfFlag;
- }
- void getPlainLayout(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<const FieldDecl *> &Layout,
- bool AsBase) const {
- const CGRecordLayout &RL = CGF.getTypes().getCGRecordLayout(RD);
- llvm::StructType *St =
- AsBase ? RL.getBaseSubobjectLLVMType() : RL.getLLVMType();
- unsigned NumElements = St->getNumElements();
- llvm::SmallVector<
- llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>, 4>
- RecordLayout(NumElements);
- // Fill bases.
- for (const auto &I : RD->bases()) {
- if (I.isVirtual())
- continue;
- const auto *Base = I.getType()->getAsCXXRecordDecl();
- // Ignore empty bases.
- if (Base->isEmpty() || CGF.getContext()
- .getASTRecordLayout(Base)
- .getNonVirtualSize()
- .isZero())
- continue;
- unsigned FieldIndex = RL.getNonVirtualBaseLLVMFieldNo(Base);
- RecordLayout[FieldIndex] = Base;
- }
- // Fill in virtual bases.
- for (const auto &I : RD->vbases()) {
- const auto *Base = I.getType()->getAsCXXRecordDecl();
- // Ignore empty bases.
- if (Base->isEmpty())
- continue;
- unsigned FieldIndex = RL.getVirtualBaseIndex(Base);
- if (RecordLayout[FieldIndex])
- continue;
- RecordLayout[FieldIndex] = Base;
- }
- // Fill in all the fields.
- assert(!RD->isUnion() && "Unexpected union.");
- for (const auto *Field : RD->fields()) {
- // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
- // will fill in later.)
- if (!Field->isBitField() && !Field->isZeroSize(CGF.getContext())) {
- unsigned FieldIndex = RL.getLLVMFieldNo(Field);
- RecordLayout[FieldIndex] = Field;
- }
- }
- for (const llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>
- &Data : RecordLayout) {
- if (Data.isNull())
- continue;
- if (const auto *Base = Data.dyn_cast<const CXXRecordDecl *>())
- getPlainLayout(Base, Layout, /*AsBase=*/true);
- else
- Layout.push_back(Data.get<const FieldDecl *>());
- }
- }
- /// Generate all the base pointers, section pointers, sizes, map types, and
- /// mappers for the extracted mappable expressions (all included in \a
- /// CombinedInfo). Also, for each item that relates with a device pointer, a
- /// pair of the relevant declaration and index where it occurs is appended to
- /// the device pointers info array.
- void generateAllInfoForClauses(
- ArrayRef<const OMPClause *> Clauses, MapCombinedInfoTy &CombinedInfo,
- const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
- llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
- // We have to process the component lists that relate with the same
- // declaration in a single chunk so that we can generate the map flags
- // correctly. Therefore, we organize all lists in a map.
- enum MapKind { Present, Allocs, Other, Total };
- llvm::MapVector<CanonicalDeclPtr<const Decl>,
- SmallVector<SmallVector<MapInfo, 8>, 4>>
- Info;
- // Helper function to fill the information map for the different supported
- // clauses.
- auto &&InfoGen =
- [&Info, &SkipVarSet](
- const ValueDecl *D, MapKind Kind,
- OMPClauseMappableExprCommon::MappableExprComponentListRef L,
- OpenMPMapClauseKind MapType,
- ArrayRef<OpenMPMapModifierKind> MapModifiers,
- ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
- bool ReturnDevicePointer, bool IsImplicit, const ValueDecl *Mapper,
- const Expr *VarRef = nullptr, bool ForDeviceAddr = false) {
- if (SkipVarSet.contains(D))
- return;
- auto It = Info.find(D);
- if (It == Info.end())
- It = Info
- .insert(std::make_pair(
- D, SmallVector<SmallVector<MapInfo, 8>, 4>(Total)))
- .first;
- It->second[Kind].emplace_back(
- L, MapType, MapModifiers, MotionModifiers, ReturnDevicePointer,
- IsImplicit, Mapper, VarRef, ForDeviceAddr);
- };
- for (const auto *Cl : Clauses) {
- const auto *C = dyn_cast<OMPMapClause>(Cl);
- if (!C)
- continue;
- MapKind Kind = Other;
- if (llvm::is_contained(C->getMapTypeModifiers(),
- OMPC_MAP_MODIFIER_present))
- Kind = Present;
- else if (C->getMapType() == OMPC_MAP_alloc)
- Kind = Allocs;
- const auto *EI = C->getVarRefs().begin();
- for (const auto L : C->component_lists()) {
- const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr;
- InfoGen(std::get<0>(L), Kind, std::get<1>(L), C->getMapType(),
- C->getMapTypeModifiers(), std::nullopt,
- /*ReturnDevicePointer=*/false, C->isImplicit(), std::get<2>(L),
- E);
- ++EI;
- }
- }
- for (const auto *Cl : Clauses) {
- const auto *C = dyn_cast<OMPToClause>(Cl);
- if (!C)
- continue;
- MapKind Kind = Other;
- if (llvm::is_contained(C->getMotionModifiers(),
- OMPC_MOTION_MODIFIER_present))
- Kind = Present;
- const auto *EI = C->getVarRefs().begin();
- for (const auto L : C->component_lists()) {
- InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_to, std::nullopt,
- C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
- C->isImplicit(), std::get<2>(L), *EI);
- ++EI;
- }
- }
- for (const auto *Cl : Clauses) {
- const auto *C = dyn_cast<OMPFromClause>(Cl);
- if (!C)
- continue;
- MapKind Kind = Other;
- if (llvm::is_contained(C->getMotionModifiers(),
- OMPC_MOTION_MODIFIER_present))
- Kind = Present;
- const auto *EI = C->getVarRefs().begin();
- for (const auto L : C->component_lists()) {
- InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_from,
- std::nullopt, C->getMotionModifiers(),
- /*ReturnDevicePointer=*/false, C->isImplicit(), std::get<2>(L),
- *EI);
- ++EI;
- }
- }
- // Look at the use_device_ptr and use_device_addr clauses information and
- // mark the existing map entries as such. If there is no map information for
- // an entry in the use_device_ptr and use_device_addr list, we create one
- // with map type 'alloc' and zero size section. It is the user fault if that
- // was not mapped before. If there is no map information and the pointer is
- // a struct member, then we defer the emission of that entry until the whole
- // struct has been processed.
- llvm::MapVector<CanonicalDeclPtr<const Decl>,
- SmallVector<DeferredDevicePtrEntryTy, 4>>
- DeferredInfo;
- MapCombinedInfoTy UseDeviceDataCombinedInfo;
- auto &&UseDeviceDataCombinedInfoGen =
- [&UseDeviceDataCombinedInfo](const ValueDecl *VD, llvm::Value *Ptr,
- CodeGenFunction &CGF) {
- UseDeviceDataCombinedInfo.Exprs.push_back(VD);
- UseDeviceDataCombinedInfo.BasePointers.emplace_back(Ptr, VD);
- UseDeviceDataCombinedInfo.Pointers.push_back(Ptr);
- UseDeviceDataCombinedInfo.Sizes.push_back(
- llvm::Constant::getNullValue(CGF.Int64Ty));
- UseDeviceDataCombinedInfo.Types.push_back(
- OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM);
- UseDeviceDataCombinedInfo.Mappers.push_back(nullptr);
- };
- auto &&MapInfoGen =
- [&DeferredInfo, &UseDeviceDataCombinedInfoGen,
- &InfoGen](CodeGenFunction &CGF, const Expr *IE, const ValueDecl *VD,
- OMPClauseMappableExprCommon::MappableExprComponentListRef
- Components,
- bool IsImplicit, bool IsDevAddr) {
- // We didn't find any match in our map information - generate a zero
- // size array section - if the pointer is a struct member we defer
- // this action until the whole struct has been processed.
- if (isa<MemberExpr>(IE)) {
- // Insert the pointer into Info to be processed by
- // generateInfoForComponentList. Because it is a member pointer
- // without a pointee, no entry will be generated for it, therefore
- // we need to generate one after the whole struct has been
- // processed. Nonetheless, generateInfoForComponentList must be
- // called to take the pointer into account for the calculation of
- // the range of the partial struct.
- InfoGen(nullptr, Other, Components, OMPC_MAP_unknown, std::nullopt,
- std::nullopt, /*ReturnDevicePointer=*/false, IsImplicit,
- nullptr, nullptr, IsDevAddr);
- DeferredInfo[nullptr].emplace_back(IE, VD, IsDevAddr);
- } else {
- llvm::Value *Ptr;
- if (IsDevAddr) {
- if (IE->isGLValue())
- Ptr = CGF.EmitLValue(IE).getPointer(CGF);
- else
- Ptr = CGF.EmitScalarExpr(IE);
- } else {
- Ptr = CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc());
- }
- UseDeviceDataCombinedInfoGen(VD, Ptr, CGF);
- }
- };
- auto &&IsMapInfoExist = [&Info](CodeGenFunction &CGF, const ValueDecl *VD,
- const Expr *IE, bool IsDevAddr) -> bool {
- // We potentially have map information for this declaration already.
- // Look for the first set of components that refer to it. If found,
- // return true.
- // If the first component is a member expression, we have to look into
- // 'this', which maps to null in the map of map information. Otherwise
- // look directly for the information.
- auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
- if (It != Info.end()) {
- bool Found = false;
- for (auto &Data : It->second) {
- auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) {
- return MI.Components.back().getAssociatedDeclaration() == VD;
- });
- // If we found a map entry, signal that the pointer has to be
- // returned and move on to the next declaration. Exclude cases where
- // the base pointer is mapped as array subscript, array section or
- // array shaping. The base address is passed as a pointer to base in
- // this case and cannot be used as a base for use_device_ptr list
- // item.
- if (CI != Data.end()) {
- if (IsDevAddr) {
- CI->ReturnDevicePointer = true;
- Found = true;
- break;
- } else {
- auto PrevCI = std::next(CI->Components.rbegin());
- const auto *VarD = dyn_cast<VarDecl>(VD);
- if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
- isa<MemberExpr>(IE) ||
- !VD->getType().getNonReferenceType()->isPointerType() ||
- PrevCI == CI->Components.rend() ||
- isa<MemberExpr>(PrevCI->getAssociatedExpression()) || !VarD ||
- VarD->hasLocalStorage()) {
- CI->ReturnDevicePointer = true;
- Found = true;
- break;
- }
- }
- }
- }
- return Found;
- }
- return false;
- };
- // Look at the use_device_ptr clause information and mark the existing map
- // entries as such. If there is no map information for an entry in the
- // use_device_ptr list, we create one with map type 'alloc' and zero size
- // section. It is the user fault if that was not mapped before. If there is
- // no map information and the pointer is a struct member, then we defer the
- // emission of that entry until the whole struct has been processed.
- for (const auto *Cl : Clauses) {
- const auto *C = dyn_cast<OMPUseDevicePtrClause>(Cl);
- if (!C)
- continue;
- for (const auto L : C->component_lists()) {
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components =
- std::get<1>(L);
- assert(!Components.empty() &&
- "Not expecting empty list of components!");
- const ValueDecl *VD = Components.back().getAssociatedDeclaration();
- VD = cast<ValueDecl>(VD->getCanonicalDecl());
- const Expr *IE = Components.back().getAssociatedExpression();
- if (IsMapInfoExist(CGF, VD, IE, /*IsDevAddr=*/false))
- continue;
- MapInfoGen(CGF, IE, VD, Components, C->isImplicit(),
- /*IsDevAddr=*/false);
- }
- }
- llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
- for (const auto *Cl : Clauses) {
- const auto *C = dyn_cast<OMPUseDeviceAddrClause>(Cl);
- if (!C)
- continue;
- for (const auto L : C->component_lists()) {
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components =
- std::get<1>(L);
- assert(!std::get<1>(L).empty() &&
- "Not expecting empty list of components!");
- const ValueDecl *VD = std::get<1>(L).back().getAssociatedDeclaration();
- if (!Processed.insert(VD).second)
- continue;
- VD = cast<ValueDecl>(VD->getCanonicalDecl());
- const Expr *IE = std::get<1>(L).back().getAssociatedExpression();
- if (IsMapInfoExist(CGF, VD, IE, /*IsDevAddr=*/true))
- continue;
- MapInfoGen(CGF, IE, VD, Components, C->isImplicit(),
- /*IsDevAddr=*/true);
- }
- }
- for (const auto &Data : Info) {
- StructRangeInfoTy PartialStruct;
- // Temporary generated information.
- MapCombinedInfoTy CurInfo;
- const Decl *D = Data.first;
- const ValueDecl *VD = cast_or_null<ValueDecl>(D);
- for (const auto &M : Data.second) {
- for (const MapInfo &L : M) {
- assert(!L.Components.empty() &&
- "Not expecting declaration with no component lists.");
- // Remember the current base pointer index.
- unsigned CurrentBasePointersIdx = CurInfo.BasePointers.size();
- CurInfo.NonContigInfo.IsNonContiguous =
- L.Components.back().isNonContiguous();
- generateInfoForComponentList(
- L.MapType, L.MapModifiers, L.MotionModifiers, L.Components,
- CurInfo, PartialStruct, /*IsFirstComponentList=*/false,
- L.IsImplicit, L.Mapper, L.ForDeviceAddr, VD, L.VarRef);
- // If this entry relates with a device pointer, set the relevant
- // declaration and add the 'return pointer' flag.
- if (L.ReturnDevicePointer) {
- assert(CurInfo.BasePointers.size() > CurrentBasePointersIdx &&
- "Unexpected number of mapped base pointers.");
- const ValueDecl *RelevantVD =
- L.Components.back().getAssociatedDeclaration();
- assert(RelevantVD &&
- "No relevant declaration related with device pointer??");
- CurInfo.BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(
- RelevantVD);
- CurInfo.Types[CurrentBasePointersIdx] |=
- OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM;
- }
- }
- }
- // Append any pending zero-length pointers which are struct members and
- // used with use_device_ptr or use_device_addr.
- auto CI = DeferredInfo.find(Data.first);
- if (CI != DeferredInfo.end()) {
- for (const DeferredDevicePtrEntryTy &L : CI->second) {
- llvm::Value *BasePtr;
- llvm::Value *Ptr;
- if (L.ForDeviceAddr) {
- if (L.IE->isGLValue())
- Ptr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
- else
- Ptr = this->CGF.EmitScalarExpr(L.IE);
- BasePtr = Ptr;
- // Entry is RETURN_PARAM. Also, set the placeholder value
- // MEMBER_OF=FFFF so that the entry is later updated with the
- // correct value of MEMBER_OF.
- CurInfo.Types.push_back(
- OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM |
- OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF);
- } else {
- BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
- Ptr = this->CGF.EmitLoadOfScalar(this->CGF.EmitLValue(L.IE),
- L.IE->getExprLoc());
- // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the
- // placeholder value MEMBER_OF=FFFF so that the entry is later
- // updated with the correct value of MEMBER_OF.
- CurInfo.Types.push_back(
- OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ |
- OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM |
- OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF);
- }
- CurInfo.Exprs.push_back(L.VD);
- CurInfo.BasePointers.emplace_back(BasePtr, L.VD);
- CurInfo.Pointers.push_back(Ptr);
- CurInfo.Sizes.push_back(
- llvm::Constant::getNullValue(this->CGF.Int64Ty));
- CurInfo.Mappers.push_back(nullptr);
- }
- }
- // If there is an entry in PartialStruct it means we have a struct with
- // individual members mapped. Emit an extra combined entry.
- if (PartialStruct.Base.isValid()) {
- CurInfo.NonContigInfo.Dims.push_back(0);
- emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct, VD);
- }
- // We need to append the results of this capture to what we already
- // have.
- CombinedInfo.append(CurInfo);
- }
- // Append data for use_device_ptr clauses.
- CombinedInfo.append(UseDeviceDataCombinedInfo);
- }
- public:
- MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
- : CurDir(&Dir), CGF(CGF) {
- // Extract firstprivate clause information.
- for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
- for (const auto *D : C->varlists())
- FirstPrivateDecls.try_emplace(
- cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl()), C->isImplicit());
- // Extract implicit firstprivates from uses_allocators clauses.
- for (const auto *C : Dir.getClausesOfKind<OMPUsesAllocatorsClause>()) {
- for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
- OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
- if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(D.AllocatorTraits))
- FirstPrivateDecls.try_emplace(cast<VarDecl>(DRE->getDecl()),
- /*Implicit=*/true);
- else if (const auto *VD = dyn_cast<VarDecl>(
- cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts())
- ->getDecl()))
- FirstPrivateDecls.try_emplace(VD, /*Implicit=*/true);
- }
- }
- // Extract device pointer clause information.
- for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
- for (auto L : C->component_lists())
- DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
- // Extract device addr clause information.
- for (const auto *C : Dir.getClausesOfKind<OMPHasDeviceAddrClause>())
- for (auto L : C->component_lists())
- HasDevAddrsMap[std::get<0>(L)].push_back(std::get<1>(L));
- // Extract map information.
- for (const auto *C : Dir.getClausesOfKind<OMPMapClause>()) {
- if (C->getMapType() != OMPC_MAP_to)
- continue;
- for (auto L : C->component_lists()) {
- const ValueDecl *VD = std::get<0>(L);
- const auto *RD = VD ? VD->getType()
- .getCanonicalType()
- .getNonReferenceType()
- ->getAsCXXRecordDecl()
- : nullptr;
- if (RD && RD->isLambda())
- LambdasMap.try_emplace(std::get<0>(L), C);
- }
- }
- }
- /// Constructor for the declare mapper directive.
- MappableExprsHandler(const OMPDeclareMapperDecl &Dir, CodeGenFunction &CGF)
- : CurDir(&Dir), CGF(CGF) {}
- /// Generate code for the combined entry if we have a partially mapped struct
- /// and take care of the mapping flags of the arguments corresponding to
- /// individual struct members.
- void emitCombinedEntry(MapCombinedInfoTy &CombinedInfo,
- MapFlagsArrayTy &CurTypes,
- const StructRangeInfoTy &PartialStruct,
- const ValueDecl *VD = nullptr,
- bool NotTargetParams = true) const {
- if (CurTypes.size() == 1 &&
- ((CurTypes.back() & OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF) !=
- OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF) &&
- !PartialStruct.IsArraySection)
- return;
- Address LBAddr = PartialStruct.LowestElem.second;
- Address HBAddr = PartialStruct.HighestElem.second;
- if (PartialStruct.HasCompleteRecord) {
- LBAddr = PartialStruct.LB;
- HBAddr = PartialStruct.LB;
- }
- CombinedInfo.Exprs.push_back(VD);
- // Base is the base of the struct
- CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer());
- // Pointer is the address of the lowest element
- llvm::Value *LB = LBAddr.getPointer();
- const CXXMethodDecl *MD =
- CGF.CurFuncDecl ? dyn_cast<CXXMethodDecl>(CGF.CurFuncDecl) : nullptr;
- const CXXRecordDecl *RD = MD ? MD->getParent() : nullptr;
- bool HasBaseClass = RD ? RD->getNumBases() > 0 : false;
- // There should not be a mapper for a combined entry.
- if (HasBaseClass) {
- // OpenMP 5.2 148:21:
- // If the target construct is within a class non-static member function,
- // and a variable is an accessible data member of the object for which the
- // non-static data member function is invoked, the variable is treated as
- // if the this[:1] expression had appeared in a map clause with a map-type
- // of tofrom.
- // Emit this[:1]
- CombinedInfo.Pointers.push_back(PartialStruct.Base.getPointer());
- QualType Ty = MD->getThisType()->getPointeeType();
- llvm::Value *Size =
- CGF.Builder.CreateIntCast(CGF.getTypeSize(Ty), CGF.Int64Ty,
- /*isSigned=*/true);
- CombinedInfo.Sizes.push_back(Size);
- } else {
- CombinedInfo.Pointers.push_back(LB);
- // Size is (addr of {highest+1} element) - (addr of lowest element)
- llvm::Value *HB = HBAddr.getPointer();
- llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(
- HBAddr.getElementType(), HB, /*Idx0=*/1);
- llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
- llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
- llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CGF.Int8Ty, CHAddr, CLAddr);
- llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
- /*isSigned=*/false);
- CombinedInfo.Sizes.push_back(Size);
- }
- CombinedInfo.Mappers.push_back(nullptr);
- // Map type is always TARGET_PARAM, if generate info for captures.
- CombinedInfo.Types.push_back(
- NotTargetParams ? OpenMPOffloadMappingFlags::OMP_MAP_NONE
- : OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM);
- // If any element has the present modifier, then make sure the runtime
- // doesn't attempt to allocate the struct.
- if (CurTypes.end() !=
- llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
- return static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- Type & OpenMPOffloadMappingFlags::OMP_MAP_PRESENT);
- }))
- CombinedInfo.Types.back() |= OpenMPOffloadMappingFlags::OMP_MAP_PRESENT;
- // Remove TARGET_PARAM flag from the first element
- (*CurTypes.begin()) &= ~OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
- // If any element has the ompx_hold modifier, then make sure the runtime
- // uses the hold reference count for the struct as a whole so that it won't
- // be unmapped by an extra dynamic reference count decrement. Add it to all
- // elements as well so the runtime knows which reference count to check
- // when determining whether it's time for device-to-host transfers of
- // individual elements.
- if (CurTypes.end() !=
- llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
- return static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- Type & OpenMPOffloadMappingFlags::OMP_MAP_OMPX_HOLD);
- })) {
- CombinedInfo.Types.back() |= OpenMPOffloadMappingFlags::OMP_MAP_OMPX_HOLD;
- for (auto &M : CurTypes)
- M |= OpenMPOffloadMappingFlags::OMP_MAP_OMPX_HOLD;
- }
- // All other current entries will be MEMBER_OF the combined entry
- // (except for PTR_AND_OBJ entries which do not have a placeholder value
- // 0xFFFF in the MEMBER_OF field).
- OpenMPOffloadMappingFlags MemberOfFlag =
- getMemberOfFlag(CombinedInfo.BasePointers.size() - 1);
- for (auto &M : CurTypes)
- setCorrectMemberOfFlag(M, MemberOfFlag);
- }
- /// Generate all the base pointers, section pointers, sizes, map types, and
- /// mappers for the extracted mappable expressions (all included in \a
- /// CombinedInfo). Also, for each item that relates with a device pointer, a
- /// pair of the relevant declaration and index where it occurs is appended to
- /// the device pointers info array.
- void generateAllInfo(
- MapCombinedInfoTy &CombinedInfo,
- const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
- llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
- assert(CurDir.is<const OMPExecutableDirective *>() &&
- "Expect a executable directive");
- const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
- generateAllInfoForClauses(CurExecDir->clauses(), CombinedInfo, SkipVarSet);
- }
- /// Generate all the base pointers, section pointers, sizes, map types, and
- /// mappers for the extracted map clauses of user-defined mapper (all included
- /// in \a CombinedInfo).
- void generateAllInfoForMapper(MapCombinedInfoTy &CombinedInfo) const {
- assert(CurDir.is<const OMPDeclareMapperDecl *>() &&
- "Expect a declare mapper directive");
- const auto *CurMapperDir = CurDir.get<const OMPDeclareMapperDecl *>();
- generateAllInfoForClauses(CurMapperDir->clauses(), CombinedInfo);
- }
- /// Emit capture info for lambdas for variables captured by reference.
- void generateInfoForLambdaCaptures(
- const ValueDecl *VD, llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
- llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers) const {
- QualType VDType = VD->getType().getCanonicalType().getNonReferenceType();
- const auto *RD = VDType->getAsCXXRecordDecl();
- if (!RD || !RD->isLambda())
- return;
- Address VDAddr(Arg, CGF.ConvertTypeForMem(VDType),
- CGF.getContext().getDeclAlign(VD));
- LValue VDLVal = CGF.MakeAddrLValue(VDAddr, VDType);
- llvm::DenseMap<const ValueDecl *, FieldDecl *> Captures;
- FieldDecl *ThisCapture = nullptr;
- RD->getCaptureFields(Captures, ThisCapture);
- if (ThisCapture) {
- LValue ThisLVal =
- CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
- LValue ThisLValVal = CGF.EmitLValueForField(VDLVal, ThisCapture);
- LambdaPointers.try_emplace(ThisLVal.getPointer(CGF),
- VDLVal.getPointer(CGF));
- CombinedInfo.Exprs.push_back(VD);
- CombinedInfo.BasePointers.push_back(ThisLVal.getPointer(CGF));
- CombinedInfo.Pointers.push_back(ThisLValVal.getPointer(CGF));
- CombinedInfo.Sizes.push_back(
- CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy),
- CGF.Int64Ty, /*isSigned=*/true));
- CombinedInfo.Types.push_back(
- OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ |
- OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
- OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
- OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
- CombinedInfo.Mappers.push_back(nullptr);
- }
- for (const LambdaCapture &LC : RD->captures()) {
- if (!LC.capturesVariable())
- continue;
- const VarDecl *VD = cast<VarDecl>(LC.getCapturedVar());
- if (LC.getCaptureKind() != LCK_ByRef && !VD->getType()->isPointerType())
- continue;
- auto It = Captures.find(VD);
- assert(It != Captures.end() && "Found lambda capture without field.");
- LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
- if (LC.getCaptureKind() == LCK_ByRef) {
- LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second);
- LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
- VDLVal.getPointer(CGF));
- CombinedInfo.Exprs.push_back(VD);
- CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
- CombinedInfo.Pointers.push_back(VarLValVal.getPointer(CGF));
- CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
- CGF.getTypeSize(
- VD->getType().getCanonicalType().getNonReferenceType()),
- CGF.Int64Ty, /*isSigned=*/true));
- } else {
- RValue VarRVal = CGF.EmitLoadOfLValue(VarLVal, RD->getLocation());
- LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
- VDLVal.getPointer(CGF));
- CombinedInfo.Exprs.push_back(VD);
- CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
- CombinedInfo.Pointers.push_back(VarRVal.getScalarVal());
- CombinedInfo.Sizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0));
- }
- CombinedInfo.Types.push_back(
- OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ |
- OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
- OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
- OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
- CombinedInfo.Mappers.push_back(nullptr);
- }
- }
- /// Set correct indices for lambdas captures.
- void adjustMemberOfForLambdaCaptures(
- const llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers,
- MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
- MapFlagsArrayTy &Types) const {
- for (unsigned I = 0, E = Types.size(); I < E; ++I) {
- // Set correct member_of idx for all implicit lambda captures.
- if (Types[I] != (OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ |
- OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
- OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
- OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT))
- continue;
- llvm::Value *BasePtr = LambdaPointers.lookup(*BasePointers[I]);
- assert(BasePtr && "Unable to find base lambda address.");
- int TgtIdx = -1;
- for (unsigned J = I; J > 0; --J) {
- unsigned Idx = J - 1;
- if (Pointers[Idx] != BasePtr)
- continue;
- TgtIdx = Idx;
- break;
- }
- assert(TgtIdx != -1 && "Unable to find parent lambda.");
- // All other current entries will be MEMBER_OF the combined entry
- // (except for PTR_AND_OBJ entries which do not have a placeholder value
- // 0xFFFF in the MEMBER_OF field).
- OpenMPOffloadMappingFlags MemberOfFlag = getMemberOfFlag(TgtIdx);
- setCorrectMemberOfFlag(Types[I], MemberOfFlag);
- }
- }
- /// Generate the base pointers, section pointers, sizes, map types, and
- /// mappers associated to a given capture (all included in \a CombinedInfo).
- void generateInfoForCapture(const CapturedStmt::Capture *Cap,
- llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
- StructRangeInfoTy &PartialStruct) const {
- assert(!Cap->capturesVariableArrayType() &&
- "Not expecting to generate map info for a variable array type!");
- // We need to know when we generating information for the first component
- const ValueDecl *VD = Cap->capturesThis()
- ? nullptr
- : Cap->getCapturedVar()->getCanonicalDecl();
- // for map(to: lambda): skip here, processing it in
- // generateDefaultMapInfo
- if (LambdasMap.count(VD))
- return;
- // If this declaration appears in a is_device_ptr clause we just have to
- // pass the pointer by value. If it is a reference to a declaration, we just
- // pass its value.
- if (VD && (DevPointersMap.count(VD) || HasDevAddrsMap.count(VD))) {
- CombinedInfo.Exprs.push_back(VD);
- CombinedInfo.BasePointers.emplace_back(Arg, VD);
- CombinedInfo.Pointers.push_back(Arg);
- CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
- CGF.getTypeSize(CGF.getContext().VoidPtrTy), CGF.Int64Ty,
- /*isSigned=*/true));
- CombinedInfo.Types.push_back(
- (Cap->capturesVariable()
- ? OpenMPOffloadMappingFlags::OMP_MAP_TO
- : OpenMPOffloadMappingFlags::OMP_MAP_LITERAL) |
- OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM);
- CombinedInfo.Mappers.push_back(nullptr);
- return;
- }
- using MapData =
- std::tuple<OMPClauseMappableExprCommon::MappableExprComponentListRef,
- OpenMPMapClauseKind, ArrayRef<OpenMPMapModifierKind>, bool,
- const ValueDecl *, const Expr *>;
- SmallVector<MapData, 4> DeclComponentLists;
- // For member fields list in is_device_ptr, store it in
- // DeclComponentLists for generating components info.
- static const OpenMPMapModifierKind Unknown = OMPC_MAP_MODIFIER_unknown;
- auto It = DevPointersMap.find(VD);
- if (It != DevPointersMap.end())
- for (const auto &MCL : It->second)
- DeclComponentLists.emplace_back(MCL, OMPC_MAP_to, Unknown,
- /*IsImpicit = */ true, nullptr,
- nullptr);
- auto I = HasDevAddrsMap.find(VD);
- if (I != HasDevAddrsMap.end())
- for (const auto &MCL : I->second)
- DeclComponentLists.emplace_back(MCL, OMPC_MAP_tofrom, Unknown,
- /*IsImpicit = */ true, nullptr,
- nullptr);
- assert(CurDir.is<const OMPExecutableDirective *>() &&
- "Expect a executable directive");
- const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
- for (const auto *C : CurExecDir->getClausesOfKind<OMPMapClause>()) {
- const auto *EI = C->getVarRefs().begin();
- for (const auto L : C->decl_component_lists(VD)) {
- const ValueDecl *VDecl, *Mapper;
- // The Expression is not correct if the mapping is implicit
- const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr;
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
- std::tie(VDecl, Components, Mapper) = L;
- assert(VDecl == VD && "We got information for the wrong declaration??");
- assert(!Components.empty() &&
- "Not expecting declaration with no component lists.");
- DeclComponentLists.emplace_back(Components, C->getMapType(),
- C->getMapTypeModifiers(),
- C->isImplicit(), Mapper, E);
- ++EI;
- }
- }
- llvm::stable_sort(DeclComponentLists, [](const MapData &LHS,
- const MapData &RHS) {
- ArrayRef<OpenMPMapModifierKind> MapModifiers = std::get<2>(LHS);
- OpenMPMapClauseKind MapType = std::get<1>(RHS);
- bool HasPresent =
- llvm::is_contained(MapModifiers, clang::OMPC_MAP_MODIFIER_present);
- bool HasAllocs = MapType == OMPC_MAP_alloc;
- MapModifiers = std::get<2>(RHS);
- MapType = std::get<1>(LHS);
- bool HasPresentR =
- llvm::is_contained(MapModifiers, clang::OMPC_MAP_MODIFIER_present);
- bool HasAllocsR = MapType == OMPC_MAP_alloc;
- return (HasPresent && !HasPresentR) || (HasAllocs && !HasAllocsR);
- });
- // Find overlapping elements (including the offset from the base element).
- llvm::SmallDenseMap<
- const MapData *,
- llvm::SmallVector<
- OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>,
- 4>
- OverlappedData;
- size_t Count = 0;
- for (const MapData &L : DeclComponentLists) {
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
- OpenMPMapClauseKind MapType;
- ArrayRef<OpenMPMapModifierKind> MapModifiers;
- bool IsImplicit;
- const ValueDecl *Mapper;
- const Expr *VarRef;
- std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
- L;
- ++Count;
- for (const MapData &L1 : ArrayRef(DeclComponentLists).slice(Count)) {
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components1;
- std::tie(Components1, MapType, MapModifiers, IsImplicit, Mapper,
- VarRef) = L1;
- auto CI = Components.rbegin();
- auto CE = Components.rend();
- auto SI = Components1.rbegin();
- auto SE = Components1.rend();
- for (; CI != CE && SI != SE; ++CI, ++SI) {
- if (CI->getAssociatedExpression()->getStmtClass() !=
- SI->getAssociatedExpression()->getStmtClass())
- break;
- // Are we dealing with different variables/fields?
- if (CI->getAssociatedDeclaration() != SI->getAssociatedDeclaration())
- break;
- }
- // Found overlapping if, at least for one component, reached the head
- // of the components list.
- if (CI == CE || SI == SE) {
- // Ignore it if it is the same component.
- if (CI == CE && SI == SE)
- continue;
- const auto It = (SI == SE) ? CI : SI;
- // If one component is a pointer and another one is a kind of
- // dereference of this pointer (array subscript, section, dereference,
- // etc.), it is not an overlapping.
- // Same, if one component is a base and another component is a
- // dereferenced pointer memberexpr with the same base.
- if (!isa<MemberExpr>(It->getAssociatedExpression()) ||
- (std::prev(It)->getAssociatedDeclaration() &&
- std::prev(It)
- ->getAssociatedDeclaration()
- ->getType()
- ->isPointerType()) ||
- (It->getAssociatedDeclaration() &&
- It->getAssociatedDeclaration()->getType()->isPointerType() &&
- std::next(It) != CE && std::next(It) != SE))
- continue;
- const MapData &BaseData = CI == CE ? L : L1;
- OMPClauseMappableExprCommon::MappableExprComponentListRef SubData =
- SI == SE ? Components : Components1;
- auto &OverlappedElements = OverlappedData.FindAndConstruct(&BaseData);
- OverlappedElements.getSecond().push_back(SubData);
- }
- }
- }
- // Sort the overlapped elements for each item.
- llvm::SmallVector<const FieldDecl *, 4> Layout;
- if (!OverlappedData.empty()) {
- const Type *BaseType = VD->getType().getCanonicalType().getTypePtr();
- const Type *OrigType = BaseType->getPointeeOrArrayElementType();
- while (BaseType != OrigType) {
- BaseType = OrigType->getCanonicalTypeInternal().getTypePtr();
- OrigType = BaseType->getPointeeOrArrayElementType();
- }
- if (const auto *CRD = BaseType->getAsCXXRecordDecl())
- getPlainLayout(CRD, Layout, /*AsBase=*/false);
- else {
- const auto *RD = BaseType->getAsRecordDecl();
- Layout.append(RD->field_begin(), RD->field_end());
- }
- }
- for (auto &Pair : OverlappedData) {
- llvm::stable_sort(
- Pair.getSecond(),
- [&Layout](
- OMPClauseMappableExprCommon::MappableExprComponentListRef First,
- OMPClauseMappableExprCommon::MappableExprComponentListRef
- Second) {
- auto CI = First.rbegin();
- auto CE = First.rend();
- auto SI = Second.rbegin();
- auto SE = Second.rend();
- for (; CI != CE && SI != SE; ++CI, ++SI) {
- if (CI->getAssociatedExpression()->getStmtClass() !=
- SI->getAssociatedExpression()->getStmtClass())
- break;
- // Are we dealing with different variables/fields?
- if (CI->getAssociatedDeclaration() !=
- SI->getAssociatedDeclaration())
- break;
- }
- // Lists contain the same elements.
- if (CI == CE && SI == SE)
- return false;
- // List with less elements is less than list with more elements.
- if (CI == CE || SI == SE)
- return CI == CE;
- const auto *FD1 = cast<FieldDecl>(CI->getAssociatedDeclaration());
- const auto *FD2 = cast<FieldDecl>(SI->getAssociatedDeclaration());
- if (FD1->getParent() == FD2->getParent())
- return FD1->getFieldIndex() < FD2->getFieldIndex();
- const auto *It =
- llvm::find_if(Layout, [FD1, FD2](const FieldDecl *FD) {
- return FD == FD1 || FD == FD2;
- });
- return *It == FD1;
- });
- }
- // Associated with a capture, because the mapping flags depend on it.
- // Go through all of the elements with the overlapped elements.
- bool IsFirstComponentList = true;
- for (const auto &Pair : OverlappedData) {
- const MapData &L = *Pair.getFirst();
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
- OpenMPMapClauseKind MapType;
- ArrayRef<OpenMPMapModifierKind> MapModifiers;
- bool IsImplicit;
- const ValueDecl *Mapper;
- const Expr *VarRef;
- std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
- L;
- ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
- OverlappedComponents = Pair.getSecond();
- generateInfoForComponentList(
- MapType, MapModifiers, std::nullopt, Components, CombinedInfo,
- PartialStruct, IsFirstComponentList, IsImplicit, Mapper,
- /*ForDeviceAddr=*/false, VD, VarRef, OverlappedComponents);
- IsFirstComponentList = false;
- }
- // Go through other elements without overlapped elements.
- for (const MapData &L : DeclComponentLists) {
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
- OpenMPMapClauseKind MapType;
- ArrayRef<OpenMPMapModifierKind> MapModifiers;
- bool IsImplicit;
- const ValueDecl *Mapper;
- const Expr *VarRef;
- std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
- L;
- auto It = OverlappedData.find(&L);
- if (It == OverlappedData.end())
- generateInfoForComponentList(MapType, MapModifiers, std::nullopt,
- Components, CombinedInfo, PartialStruct,
- IsFirstComponentList, IsImplicit, Mapper,
- /*ForDeviceAddr=*/false, VD, VarRef);
- IsFirstComponentList = false;
- }
- }
- /// Generate the default map information for a given capture \a CI,
- /// record field declaration \a RI and captured value \a CV.
- void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
- const FieldDecl &RI, llvm::Value *CV,
- MapCombinedInfoTy &CombinedInfo) const {
- bool IsImplicit = true;
- // Do the default mapping.
- if (CI.capturesThis()) {
- CombinedInfo.Exprs.push_back(nullptr);
- CombinedInfo.BasePointers.push_back(CV);
- CombinedInfo.Pointers.push_back(CV);
- const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
- CombinedInfo.Sizes.push_back(
- CGF.Builder.CreateIntCast(CGF.getTypeSize(PtrTy->getPointeeType()),
- CGF.Int64Ty, /*isSigned=*/true));
- // Default map type.
- CombinedInfo.Types.push_back(OpenMPOffloadMappingFlags::OMP_MAP_TO |
- OpenMPOffloadMappingFlags::OMP_MAP_FROM);
- } else if (CI.capturesVariableByCopy()) {
- const VarDecl *VD = CI.getCapturedVar();
- CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
- CombinedInfo.BasePointers.push_back(CV);
- CombinedInfo.Pointers.push_back(CV);
- if (!RI.getType()->isAnyPointerType()) {
- // We have to signal to the runtime captures passed by value that are
- // not pointers.
- CombinedInfo.Types.push_back(
- OpenMPOffloadMappingFlags::OMP_MAP_LITERAL);
- CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
- CGF.getTypeSize(RI.getType()), CGF.Int64Ty, /*isSigned=*/true));
- } else {
- // Pointers are implicitly mapped with a zero size and no flags
- // (other than first map that is added for all implicit maps).
- CombinedInfo.Types.push_back(OpenMPOffloadMappingFlags::OMP_MAP_NONE);
- CombinedInfo.Sizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
- }
- auto I = FirstPrivateDecls.find(VD);
- if (I != FirstPrivateDecls.end())
- IsImplicit = I->getSecond();
- } else {
- assert(CI.capturesVariable() && "Expected captured reference.");
- const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
- QualType ElementType = PtrTy->getPointeeType();
- CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
- CGF.getTypeSize(ElementType), CGF.Int64Ty, /*isSigned=*/true));
- // The default map type for a scalar/complex type is 'to' because by
- // default the value doesn't have to be retrieved. For an aggregate
- // type, the default is 'tofrom'.
- CombinedInfo.Types.push_back(getMapModifiersForPrivateClauses(CI));
- const VarDecl *VD = CI.getCapturedVar();
- auto I = FirstPrivateDecls.find(VD);
- CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
- CombinedInfo.BasePointers.push_back(CV);
- if (I != FirstPrivateDecls.end() && ElementType->isAnyPointerType()) {
- Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
- CV, ElementType, CGF.getContext().getDeclAlign(VD),
- AlignmentSource::Decl));
- CombinedInfo.Pointers.push_back(PtrAddr.getPointer());
- } else {
- CombinedInfo.Pointers.push_back(CV);
- }
- if (I != FirstPrivateDecls.end())
- IsImplicit = I->getSecond();
- }
- // Every default map produces a single argument which is a target parameter.
- CombinedInfo.Types.back() |=
- OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
- // Add flag stating this is an implicit map.
- if (IsImplicit)
- CombinedInfo.Types.back() |= OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT;
- // No user-defined mapper for default mapping.
- CombinedInfo.Mappers.push_back(nullptr);
- }
- };
- } // anonymous namespace
- static void emitNonContiguousDescriptor(
- CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
- CGOpenMPRuntime::TargetDataInfo &Info) {
- CodeGenModule &CGM = CGF.CGM;
- MappableExprsHandler::MapCombinedInfoTy::StructNonContiguousInfo
- &NonContigInfo = CombinedInfo.NonContigInfo;
- // Build an array of struct descriptor_dim and then assign it to
- // offload_args.
- //
- // struct descriptor_dim {
- // uint64_t offset;
- // uint64_t count;
- // uint64_t stride
- // };
- ASTContext &C = CGF.getContext();
- QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
- RecordDecl *RD;
- RD = C.buildImplicitRecord("descriptor_dim");
- RD->startDefinition();
- addFieldToRecordDecl(C, RD, Int64Ty);
- addFieldToRecordDecl(C, RD, Int64Ty);
- addFieldToRecordDecl(C, RD, Int64Ty);
- RD->completeDefinition();
- QualType DimTy = C.getRecordType(RD);
- enum { OffsetFD = 0, CountFD, StrideFD };
- // We need two index variable here since the size of "Dims" is the same as the
- // size of Components, however, the size of offset, count, and stride is equal
- // to the size of base declaration that is non-contiguous.
- for (unsigned I = 0, L = 0, E = NonContigInfo.Dims.size(); I < E; ++I) {
- // Skip emitting ir if dimension size is 1 since it cannot be
- // non-contiguous.
- if (NonContigInfo.Dims[I] == 1)
- continue;
- llvm::APInt Size(/*numBits=*/32, NonContigInfo.Dims[I]);
- QualType ArrayTy =
- C.getConstantArrayType(DimTy, Size, nullptr, ArrayType::Normal, 0);
- Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
- for (unsigned II = 0, EE = NonContigInfo.Dims[I]; II < EE; ++II) {
- unsigned RevIdx = EE - II - 1;
- LValue DimsLVal = CGF.MakeAddrLValue(
- CGF.Builder.CreateConstArrayGEP(DimsAddr, II), DimTy);
- // Offset
- LValue OffsetLVal = CGF.EmitLValueForField(
- DimsLVal, *std::next(RD->field_begin(), OffsetFD));
- CGF.EmitStoreOfScalar(NonContigInfo.Offsets[L][RevIdx], OffsetLVal);
- // Count
- LValue CountLVal = CGF.EmitLValueForField(
- DimsLVal, *std::next(RD->field_begin(), CountFD));
- CGF.EmitStoreOfScalar(NonContigInfo.Counts[L][RevIdx], CountLVal);
- // Stride
- LValue StrideLVal = CGF.EmitLValueForField(
- DimsLVal, *std::next(RD->field_begin(), StrideFD));
- CGF.EmitStoreOfScalar(NonContigInfo.Strides[L][RevIdx], StrideLVal);
- }
- // args[I] = &dims
- Address DAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- DimsAddr, CGM.Int8PtrTy, CGM.Int8Ty);
- llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.RTArgs.PointersArray, 0, I);
- Address PAddr(P, CGM.VoidPtrTy, CGF.getPointerAlign());
- CGF.Builder.CreateStore(DAddr.getPointer(), PAddr);
- ++L;
- }
- }
- // Try to extract the base declaration from a `this->x` expression if possible.
- static ValueDecl *getDeclFromThisExpr(const Expr *E) {
- if (!E)
- return nullptr;
- if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenCasts()))
- if (const MemberExpr *ME =
- dyn_cast<MemberExpr>(OASE->getBase()->IgnoreParenImpCasts()))
- return ME->getMemberDecl();
- return nullptr;
- }
- /// Emit a string constant containing the names of the values mapped to the
- /// offloading runtime library.
- llvm::Constant *
- emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
- MappableExprsHandler::MappingExprInfo &MapExprs) {
- uint32_t SrcLocStrSize;
- if (!MapExprs.getMapDecl() && !MapExprs.getMapExpr())
- return OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
- SourceLocation Loc;
- if (!MapExprs.getMapDecl() && MapExprs.getMapExpr()) {
- if (const ValueDecl *VD = getDeclFromThisExpr(MapExprs.getMapExpr()))
- Loc = VD->getLocation();
- else
- Loc = MapExprs.getMapExpr()->getExprLoc();
- } else {
- Loc = MapExprs.getMapDecl()->getLocation();
- }
- std::string ExprName;
- if (MapExprs.getMapExpr()) {
- PrintingPolicy P(CGF.getContext().getLangOpts());
- llvm::raw_string_ostream OS(ExprName);
- MapExprs.getMapExpr()->printPretty(OS, nullptr, P);
- OS.flush();
- } else {
- ExprName = MapExprs.getMapDecl()->getNameAsString();
- }
- PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
- return OMPBuilder.getOrCreateSrcLocStr(PLoc.getFilename(), ExprName,
- PLoc.getLine(), PLoc.getColumn(),
- SrcLocStrSize);
- }
- /// Emit the arrays used to pass the captures and map information to the
- /// offloading runtime library. If there is no map or capture information,
- /// return nullptr by reference.
- static void emitOffloadingArrays(
- CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
- CGOpenMPRuntime::TargetDataInfo &Info, llvm::OpenMPIRBuilder &OMPBuilder,
- bool IsNonContiguous = false) {
- CodeGenModule &CGM = CGF.CGM;
- ASTContext &Ctx = CGF.getContext();
- // Reset the array information.
- Info.clearArrayInfo();
- Info.NumberOfPtrs = CombinedInfo.BasePointers.size();
- if (Info.NumberOfPtrs) {
- // Detect if we have any capture size requiring runtime evaluation of the
- // size so that a constant array could be eventually used.
- llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
- QualType PointerArrayType = Ctx.getConstantArrayType(
- Ctx.VoidPtrTy, PointerNumAP, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- Info.RTArgs.BasePointersArray =
- CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
- Info.RTArgs.PointersArray =
- CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
- Address MappersArray =
- CGF.CreateMemTemp(PointerArrayType, ".offload_mappers");
- Info.RTArgs.MappersArray = MappersArray.getPointer();
- // If we don't have any VLA types or other types that require runtime
- // evaluation, we can use a constant array for the map sizes, otherwise we
- // need to fill up the arrays as we do for the pointers.
- QualType Int64Ty =
- Ctx.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
- SmallVector<llvm::Constant *> ConstSizes(
- CombinedInfo.Sizes.size(), llvm::ConstantInt::get(CGF.Int64Ty, 0));
- llvm::SmallBitVector RuntimeSizes(CombinedInfo.Sizes.size());
- for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) {
- if (auto *CI = dyn_cast<llvm::Constant>(CombinedInfo.Sizes[I])) {
- if (!isa<llvm::ConstantExpr>(CI) && !isa<llvm::GlobalValue>(CI)) {
- if (IsNonContiguous &&
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- CombinedInfo.Types[I] &
- OpenMPOffloadMappingFlags::OMP_MAP_NON_CONTIG))
- ConstSizes[I] = llvm::ConstantInt::get(
- CGF.Int64Ty, CombinedInfo.NonContigInfo.Dims[I]);
- else
- ConstSizes[I] = CI;
- continue;
- }
- }
- RuntimeSizes.set(I);
- }
- if (RuntimeSizes.all()) {
- QualType SizeArrayType = Ctx.getConstantArrayType(
- Int64Ty, PointerNumAP, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- Info.RTArgs.SizesArray =
- CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
- } else {
- auto *SizesArrayInit = llvm::ConstantArray::get(
- llvm::ArrayType::get(CGM.Int64Ty, ConstSizes.size()), ConstSizes);
- std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
- auto *SizesArrayGbl = new llvm::GlobalVariable(
- CGM.getModule(), SizesArrayInit->getType(), /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage, SizesArrayInit, Name);
- SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- if (RuntimeSizes.any()) {
- QualType SizeArrayType = Ctx.getConstantArrayType(
- Int64Ty, PointerNumAP, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- Address Buffer = CGF.CreateMemTemp(SizeArrayType, ".offload_sizes");
- llvm::Value *GblConstPtr =
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- SizesArrayGbl, CGM.Int64Ty->getPointerTo());
- CGF.Builder.CreateMemCpy(
- Buffer,
- Address(GblConstPtr, CGM.Int64Ty,
- CGM.getNaturalTypeAlignment(Ctx.getIntTypeForBitwidth(
- /*DestWidth=*/64, /*Signed=*/false))),
- CGF.getTypeSize(SizeArrayType));
- Info.RTArgs.SizesArray = Buffer.getPointer();
- } else {
- Info.RTArgs.SizesArray = SizesArrayGbl;
- }
- }
- // The map types are always constant so we don't need to generate code to
- // fill arrays. Instead, we create an array constant.
- SmallVector<uint64_t, 4> Mapping;
- for (auto mapFlag : CombinedInfo.Types)
- Mapping.push_back(
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- mapFlag));
- std::string MaptypesName =
- CGM.getOpenMPRuntime().getName({"offload_maptypes"});
- auto *MapTypesArrayGbl =
- OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
- Info.RTArgs.MapTypesArray = MapTypesArrayGbl;
- // The information types are only built if there is debug information
- // requested.
- if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo) {
- Info.RTArgs.MapNamesArray = llvm::Constant::getNullValue(
- llvm::Type::getInt8Ty(CGF.Builder.getContext())->getPointerTo());
- } else {
- auto fillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
- return emitMappingInformation(CGF, OMPBuilder, MapExpr);
- };
- SmallVector<llvm::Constant *, 4> InfoMap(CombinedInfo.Exprs.size());
- llvm::transform(CombinedInfo.Exprs, InfoMap.begin(), fillInfoMap);
- std::string MapnamesName =
- CGM.getOpenMPRuntime().getName({"offload_mapnames"});
- auto *MapNamesArrayGbl =
- OMPBuilder.createOffloadMapnames(InfoMap, MapnamesName);
- Info.RTArgs.MapNamesArray = MapNamesArrayGbl;
- }
- // If there's a present map type modifier, it must not be applied to the end
- // of a region, so generate a separate map type array in that case.
- if (Info.separateBeginEndCalls()) {
- bool EndMapTypesDiffer = false;
- for (uint64_t &Type : Mapping) {
- if (Type &
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_PRESENT)) {
- Type &=
- ~static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_PRESENT);
- EndMapTypesDiffer = true;
- }
- }
- if (EndMapTypesDiffer) {
- MapTypesArrayGbl =
- OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
- Info.RTArgs.MapTypesArrayEnd = MapTypesArrayGbl;
- }
- }
- for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
- llvm::Value *BPVal = *CombinedInfo.BasePointers[I];
- llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.RTArgs.BasePointersArray, 0, I);
- BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
- Address BPAddr(BP, BPVal->getType(),
- Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
- CGF.Builder.CreateStore(BPVal, BPAddr);
- if (Info.requiresDevicePointerInfo())
- if (const ValueDecl *DevVD =
- CombinedInfo.BasePointers[I].getDevicePtrDecl())
- Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
- llvm::Value *PVal = CombinedInfo.Pointers[I];
- llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.RTArgs.PointersArray, 0, I);
- P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
- Address PAddr(P, PVal->getType(), Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
- CGF.Builder.CreateStore(PVal, PAddr);
- if (RuntimeSizes.test(I)) {
- llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
- Info.RTArgs.SizesArray,
- /*Idx0=*/0,
- /*Idx1=*/I);
- Address SAddr(S, CGM.Int64Ty, Ctx.getTypeAlignInChars(Int64Ty));
- CGF.Builder.CreateStore(CGF.Builder.CreateIntCast(CombinedInfo.Sizes[I],
- CGM.Int64Ty,
- /*isSigned=*/true),
- SAddr);
- }
- // Fill up the mapper array.
- llvm::Value *MFunc = llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
- if (CombinedInfo.Mappers[I]) {
- MFunc = CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc(
- cast<OMPDeclareMapperDecl>(CombinedInfo.Mappers[I]));
- MFunc = CGF.Builder.CreatePointerCast(MFunc, CGM.VoidPtrTy);
- Info.HasMapper = true;
- }
- Address MAddr = CGF.Builder.CreateConstArrayGEP(MappersArray, I);
- CGF.Builder.CreateStore(MFunc, MAddr);
- }
- }
- if (!IsNonContiguous || CombinedInfo.NonContigInfo.Offsets.empty() ||
- Info.NumberOfPtrs == 0)
- return;
- emitNonContiguousDescriptor(CGF, CombinedInfo, Info);
- }
- /// Check for inner distribute directive.
- static const OMPExecutableDirective *
- getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
- const auto *CS = D.getInnermostCapturedStmt();
- const auto *Body =
- CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
- const Stmt *ChildStmt =
- CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NestedDir =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
- switch (D.getDirectiveKind()) {
- case OMPD_target:
- if (isOpenMPDistributeDirective(DKind))
- return NestedDir;
- if (DKind == OMPD_teams) {
- Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
- /*IgnoreCaptured=*/true);
- if (!Body)
- return nullptr;
- ChildStmt = CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NND =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- DKind = NND->getDirectiveKind();
- if (isOpenMPDistributeDirective(DKind))
- return NND;
- }
- }
- return nullptr;
- case OMPD_target_teams:
- if (isOpenMPDistributeDirective(DKind))
- return NestedDir;
- return nullptr;
- case OMPD_target_parallel:
- case OMPD_target_simd:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- return nullptr;
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_metadirective:
- case OMPD_unknown:
- default:
- llvm_unreachable("Unexpected directive.");
- }
- }
- return nullptr;
- }
- /// Emit the user-defined mapper function. The code generation follows the
- /// pattern in the example below.
- /// \code
- /// void .omp_mapper.<type_name>.<mapper_id>.(void *rt_mapper_handle,
- /// void *base, void *begin,
- /// int64_t size, int64_t type,
- /// void *name = nullptr) {
- /// // Allocate space for an array section first or add a base/begin for
- /// // pointer dereference.
- /// if ((size > 1 || (base != begin && maptype.IsPtrAndObj)) &&
- /// !maptype.IsDelete)
- /// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
- /// size*sizeof(Ty), clearToFromMember(type));
- /// // Map members.
- /// for (unsigned i = 0; i < size; i++) {
- /// // For each component specified by this mapper:
- /// for (auto c : begin[i]->all_components) {
- /// if (c.hasMapper())
- /// (*c.Mapper())(rt_mapper_handle, c.arg_base, c.arg_begin, c.arg_size,
- /// c.arg_type, c.arg_name);
- /// else
- /// __tgt_push_mapper_component(rt_mapper_handle, c.arg_base,
- /// c.arg_begin, c.arg_size, c.arg_type,
- /// c.arg_name);
- /// }
- /// }
- /// // Delete the array section.
- /// if (size > 1 && maptype.IsDelete)
- /// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
- /// size*sizeof(Ty), clearToFromMember(type));
- /// }
- /// \endcode
- void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
- CodeGenFunction *CGF) {
- if (UDMMap.count(D) > 0)
- return;
- ASTContext &C = CGM.getContext();
- QualType Ty = D->getType();
- QualType PtrTy = C.getPointerType(Ty).withRestrict();
- QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
- auto *MapperVarDecl =
- cast<VarDecl>(cast<DeclRefExpr>(D->getMapperVarRef())->getDecl());
- SourceLocation Loc = D->getLocation();
- CharUnits ElementSize = C.getTypeSizeInChars(Ty);
- llvm::Type *ElemTy = CGM.getTypes().ConvertTypeForMem(Ty);
- // Prepare mapper function arguments and attributes.
- ImplicitParamDecl HandleArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- ImplicitParamDecl BaseArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
- ImplicitParamDecl BeginArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- ImplicitParamDecl SizeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty,
- ImplicitParamDecl::Other);
- ImplicitParamDecl TypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty,
- ImplicitParamDecl::Other);
- ImplicitParamDecl NameArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
- FunctionArgList Args;
- Args.push_back(&HandleArg);
- Args.push_back(&BaseArg);
- Args.push_back(&BeginArg);
- Args.push_back(&SizeArg);
- Args.push_back(&TypeArg);
- Args.push_back(&NameArg);
- const CGFunctionInfo &FnInfo =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
- SmallString<64> TyStr;
- llvm::raw_svector_ostream Out(TyStr);
- CGM.getCXXABI().getMangleContext().mangleTypeName(Ty, Out);
- std::string Name = getName({"omp_mapper", TyStr, D->getName()});
- auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
- Name, &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
- Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
- // Start the mapper function code generation.
- CodeGenFunction MapperCGF(CGM);
- MapperCGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
- // Compute the starting and end addresses of array elements.
- llvm::Value *Size = MapperCGF.EmitLoadOfScalar(
- MapperCGF.GetAddrOfLocalVar(&SizeArg), /*Volatile=*/false,
- C.getPointerType(Int64Ty), Loc);
- // Prepare common arguments for array initiation and deletion.
- llvm::Value *Handle = MapperCGF.EmitLoadOfScalar(
- MapperCGF.GetAddrOfLocalVar(&HandleArg),
- /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
- llvm::Value *BaseIn = MapperCGF.EmitLoadOfScalar(
- MapperCGF.GetAddrOfLocalVar(&BaseArg),
- /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
- llvm::Value *BeginIn = MapperCGF.EmitLoadOfScalar(
- MapperCGF.GetAddrOfLocalVar(&BeginArg),
- /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
- // Convert the size in bytes into the number of array elements.
- Size = MapperCGF.Builder.CreateExactUDiv(
- Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
- llvm::Value *PtrBegin = MapperCGF.Builder.CreateBitCast(
- BeginIn, CGM.getTypes().ConvertTypeForMem(PtrTy));
- llvm::Value *PtrEnd = MapperCGF.Builder.CreateGEP(ElemTy, PtrBegin, Size);
- llvm::Value *MapType = MapperCGF.EmitLoadOfScalar(
- MapperCGF.GetAddrOfLocalVar(&TypeArg), /*Volatile=*/false,
- C.getPointerType(Int64Ty), Loc);
- llvm::Value *MapName = MapperCGF.EmitLoadOfScalar(
- MapperCGF.GetAddrOfLocalVar(&NameArg),
- /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
- // Emit array initiation if this is an array section and \p MapType indicates
- // that memory allocation is required.
- llvm::BasicBlock *HeadBB = MapperCGF.createBasicBlock("omp.arraymap.head");
- emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType,
- MapName, ElementSize, HeadBB, /*IsInit=*/true);
- // Emit a for loop to iterate through SizeArg of elements and map all of them.
- // Emit the loop header block.
- MapperCGF.EmitBlock(HeadBB);
- llvm::BasicBlock *BodyBB = MapperCGF.createBasicBlock("omp.arraymap.body");
- llvm::BasicBlock *DoneBB = MapperCGF.createBasicBlock("omp.done");
- // Evaluate whether the initial condition is satisfied.
- llvm::Value *IsEmpty =
- MapperCGF.Builder.CreateICmpEQ(PtrBegin, PtrEnd, "omp.arraymap.isempty");
- MapperCGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
- llvm::BasicBlock *EntryBB = MapperCGF.Builder.GetInsertBlock();
- // Emit the loop body block.
- MapperCGF.EmitBlock(BodyBB);
- llvm::BasicBlock *LastBB = BodyBB;
- llvm::PHINode *PtrPHI = MapperCGF.Builder.CreatePHI(
- PtrBegin->getType(), 2, "omp.arraymap.ptrcurrent");
- PtrPHI->addIncoming(PtrBegin, EntryBB);
- Address PtrCurrent(PtrPHI, ElemTy,
- MapperCGF.GetAddrOfLocalVar(&BeginArg)
- .getAlignment()
- .alignmentOfArrayElement(ElementSize));
- // Privatize the declared variable of mapper to be the current array element.
- CodeGenFunction::OMPPrivateScope Scope(MapperCGF);
- Scope.addPrivate(MapperVarDecl, PtrCurrent);
- (void)Scope.Privatize();
- // Get map clause information. Fill up the arrays with all mapped variables.
- MappableExprsHandler::MapCombinedInfoTy Info;
- MappableExprsHandler MEHandler(*D, MapperCGF);
- MEHandler.generateAllInfoForMapper(Info);
- // Call the runtime API __tgt_mapper_num_components to get the number of
- // pre-existing components.
- llvm::Value *OffloadingArgs[] = {Handle};
- llvm::Value *PreviousSize = MapperCGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
- OMPRTL___tgt_mapper_num_components),
- OffloadingArgs);
- llvm::Value *ShiftedPreviousSize = MapperCGF.Builder.CreateShl(
- PreviousSize,
- MapperCGF.Builder.getInt64(MappableExprsHandler::getFlagMemberOffset()));
- // Fill up the runtime mapper handle for all components.
- for (unsigned I = 0; I < Info.BasePointers.size(); ++I) {
- llvm::Value *CurBaseArg = MapperCGF.Builder.CreateBitCast(
- *Info.BasePointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
- llvm::Value *CurBeginArg = MapperCGF.Builder.CreateBitCast(
- Info.Pointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
- llvm::Value *CurSizeArg = Info.Sizes[I];
- llvm::Value *CurNameArg =
- (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo)
- ? llvm::ConstantPointerNull::get(CGM.VoidPtrTy)
- : emitMappingInformation(MapperCGF, OMPBuilder, Info.Exprs[I]);
- // Extract the MEMBER_OF field from the map type.
- llvm::Value *OriMapType = MapperCGF.Builder.getInt64(
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- Info.Types[I]));
- llvm::Value *MemberMapType =
- MapperCGF.Builder.CreateNUWAdd(OriMapType, ShiftedPreviousSize);
- // Combine the map type inherited from user-defined mapper with that
- // specified in the program. According to the OMP_MAP_TO and OMP_MAP_FROM
- // bits of the \a MapType, which is the input argument of the mapper
- // function, the following code will set the OMP_MAP_TO and OMP_MAP_FROM
- // bits of MemberMapType.
- // [OpenMP 5.0], 1.2.6. map-type decay.
- // | alloc | to | from | tofrom | release | delete
- // ----------------------------------------------------------
- // alloc | alloc | alloc | alloc | alloc | release | delete
- // to | alloc | to | alloc | to | release | delete
- // from | alloc | alloc | from | from | release | delete
- // tofrom | alloc | to | from | tofrom | release | delete
- llvm::Value *LeftToFrom = MapperCGF.Builder.CreateAnd(
- MapType,
- MapperCGF.Builder.getInt64(
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_TO |
- OpenMPOffloadMappingFlags::OMP_MAP_FROM)));
- llvm::BasicBlock *AllocBB = MapperCGF.createBasicBlock("omp.type.alloc");
- llvm::BasicBlock *AllocElseBB =
- MapperCGF.createBasicBlock("omp.type.alloc.else");
- llvm::BasicBlock *ToBB = MapperCGF.createBasicBlock("omp.type.to");
- llvm::BasicBlock *ToElseBB = MapperCGF.createBasicBlock("omp.type.to.else");
- llvm::BasicBlock *FromBB = MapperCGF.createBasicBlock("omp.type.from");
- llvm::BasicBlock *EndBB = MapperCGF.createBasicBlock("omp.type.end");
- llvm::Value *IsAlloc = MapperCGF.Builder.CreateIsNull(LeftToFrom);
- MapperCGF.Builder.CreateCondBr(IsAlloc, AllocBB, AllocElseBB);
- // In case of alloc, clear OMP_MAP_TO and OMP_MAP_FROM.
- MapperCGF.EmitBlock(AllocBB);
- llvm::Value *AllocMapType = MapperCGF.Builder.CreateAnd(
- MemberMapType,
- MapperCGF.Builder.getInt64(
- ~static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_TO |
- OpenMPOffloadMappingFlags::OMP_MAP_FROM)));
- MapperCGF.Builder.CreateBr(EndBB);
- MapperCGF.EmitBlock(AllocElseBB);
- llvm::Value *IsTo = MapperCGF.Builder.CreateICmpEQ(
- LeftToFrom,
- MapperCGF.Builder.getInt64(
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_TO)));
- MapperCGF.Builder.CreateCondBr(IsTo, ToBB, ToElseBB);
- // In case of to, clear OMP_MAP_FROM.
- MapperCGF.EmitBlock(ToBB);
- llvm::Value *ToMapType = MapperCGF.Builder.CreateAnd(
- MemberMapType,
- MapperCGF.Builder.getInt64(
- ~static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_FROM)));
- MapperCGF.Builder.CreateBr(EndBB);
- MapperCGF.EmitBlock(ToElseBB);
- llvm::Value *IsFrom = MapperCGF.Builder.CreateICmpEQ(
- LeftToFrom,
- MapperCGF.Builder.getInt64(
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_FROM)));
- MapperCGF.Builder.CreateCondBr(IsFrom, FromBB, EndBB);
- // In case of from, clear OMP_MAP_TO.
- MapperCGF.EmitBlock(FromBB);
- llvm::Value *FromMapType = MapperCGF.Builder.CreateAnd(
- MemberMapType,
- MapperCGF.Builder.getInt64(
- ~static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_TO)));
- // In case of tofrom, do nothing.
- MapperCGF.EmitBlock(EndBB);
- LastBB = EndBB;
- llvm::PHINode *CurMapType =
- MapperCGF.Builder.CreatePHI(CGM.Int64Ty, 4, "omp.maptype");
- CurMapType->addIncoming(AllocMapType, AllocBB);
- CurMapType->addIncoming(ToMapType, ToBB);
- CurMapType->addIncoming(FromMapType, FromBB);
- CurMapType->addIncoming(MemberMapType, ToElseBB);
- llvm::Value *OffloadingArgs[] = {Handle, CurBaseArg, CurBeginArg,
- CurSizeArg, CurMapType, CurNameArg};
- if (Info.Mappers[I]) {
- // Call the corresponding mapper function.
- llvm::Function *MapperFunc = getOrCreateUserDefinedMapperFunc(
- cast<OMPDeclareMapperDecl>(Info.Mappers[I]));
- assert(MapperFunc && "Expect a valid mapper function is available.");
- MapperCGF.EmitNounwindRuntimeCall(MapperFunc, OffloadingArgs);
- } else {
- // Call the runtime API __tgt_push_mapper_component to fill up the runtime
- // data structure.
- MapperCGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___tgt_push_mapper_component),
- OffloadingArgs);
- }
- }
- // Update the pointer to point to the next element that needs to be mapped,
- // and check whether we have mapped all elements.
- llvm::Value *PtrNext = MapperCGF.Builder.CreateConstGEP1_32(
- ElemTy, PtrPHI, /*Idx0=*/1, "omp.arraymap.next");
- PtrPHI->addIncoming(PtrNext, LastBB);
- llvm::Value *IsDone =
- MapperCGF.Builder.CreateICmpEQ(PtrNext, PtrEnd, "omp.arraymap.isdone");
- llvm::BasicBlock *ExitBB = MapperCGF.createBasicBlock("omp.arraymap.exit");
- MapperCGF.Builder.CreateCondBr(IsDone, ExitBB, BodyBB);
- MapperCGF.EmitBlock(ExitBB);
- // Emit array deletion if this is an array section and \p MapType indicates
- // that deletion is required.
- emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType,
- MapName, ElementSize, DoneBB, /*IsInit=*/false);
- // Emit the function exit block.
- MapperCGF.EmitBlock(DoneBB, /*IsFinished=*/true);
- MapperCGF.FinishFunction();
- UDMMap.try_emplace(D, Fn);
- if (CGF) {
- auto &Decls = FunctionUDMMap.FindAndConstruct(CGF->CurFn);
- Decls.second.push_back(D);
- }
- }
- /// Emit the array initialization or deletion portion for user-defined mapper
- /// code generation. First, it evaluates whether an array section is mapped and
- /// whether the \a MapType instructs to delete this section. If \a IsInit is
- /// true, and \a MapType indicates to not delete this array, array
- /// initialization code is generated. If \a IsInit is false, and \a MapType
- /// indicates to not this array, array deletion code is generated.
- void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
- CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *Base,
- llvm::Value *Begin, llvm::Value *Size, llvm::Value *MapType,
- llvm::Value *MapName, CharUnits ElementSize, llvm::BasicBlock *ExitBB,
- bool IsInit) {
- StringRef Prefix = IsInit ? ".init" : ".del";
- // Evaluate if this is an array section.
- llvm::BasicBlock *BodyBB =
- MapperCGF.createBasicBlock(getName({"omp.array", Prefix}));
- llvm::Value *IsArray = MapperCGF.Builder.CreateICmpSGT(
- Size, MapperCGF.Builder.getInt64(1), "omp.arrayinit.isarray");
- llvm::Value *DeleteBit = MapperCGF.Builder.CreateAnd(
- MapType,
- MapperCGF.Builder.getInt64(
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_DELETE)));
- llvm::Value *DeleteCond;
- llvm::Value *Cond;
- if (IsInit) {
- // base != begin?
- llvm::Value *BaseIsBegin = MapperCGF.Builder.CreateICmpNE(Base, Begin);
- // IsPtrAndObj?
- llvm::Value *PtrAndObjBit = MapperCGF.Builder.CreateAnd(
- MapType,
- MapperCGF.Builder.getInt64(
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ)));
- PtrAndObjBit = MapperCGF.Builder.CreateIsNotNull(PtrAndObjBit);
- BaseIsBegin = MapperCGF.Builder.CreateAnd(BaseIsBegin, PtrAndObjBit);
- Cond = MapperCGF.Builder.CreateOr(IsArray, BaseIsBegin);
- DeleteCond = MapperCGF.Builder.CreateIsNull(
- DeleteBit, getName({"omp.array", Prefix, ".delete"}));
- } else {
- Cond = IsArray;
- DeleteCond = MapperCGF.Builder.CreateIsNotNull(
- DeleteBit, getName({"omp.array", Prefix, ".delete"}));
- }
- Cond = MapperCGF.Builder.CreateAnd(Cond, DeleteCond);
- MapperCGF.Builder.CreateCondBr(Cond, BodyBB, ExitBB);
- MapperCGF.EmitBlock(BodyBB);
- // Get the array size by multiplying element size and element number (i.e., \p
- // Size).
- llvm::Value *ArraySize = MapperCGF.Builder.CreateNUWMul(
- Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
- // Remove OMP_MAP_TO and OMP_MAP_FROM from the map type, so that it achieves
- // memory allocation/deletion purpose only.
- llvm::Value *MapTypeArg = MapperCGF.Builder.CreateAnd(
- MapType,
- MapperCGF.Builder.getInt64(
- ~static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_TO |
- OpenMPOffloadMappingFlags::OMP_MAP_FROM)));
- MapTypeArg = MapperCGF.Builder.CreateOr(
- MapTypeArg,
- MapperCGF.Builder.getInt64(
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT)));
- // Call the runtime API __tgt_push_mapper_component to fill up the runtime
- // data structure.
- llvm::Value *OffloadingArgs[] = {Handle, Base, Begin,
- ArraySize, MapTypeArg, MapName};
- MapperCGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
- OMPRTL___tgt_push_mapper_component),
- OffloadingArgs);
- }
- llvm::Function *CGOpenMPRuntime::getOrCreateUserDefinedMapperFunc(
- const OMPDeclareMapperDecl *D) {
- auto I = UDMMap.find(D);
- if (I != UDMMap.end())
- return I->second;
- emitUserDefinedMapper(D);
- return UDMMap.lookup(D);
- }
- llvm::Value *CGOpenMPRuntime::emitTargetNumIterationsCall(
- CodeGenFunction &CGF, const OMPExecutableDirective &D,
- llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
- const OMPLoopDirective &D)>
- SizeEmitter) {
- OpenMPDirectiveKind Kind = D.getDirectiveKind();
- const OMPExecutableDirective *TD = &D;
- // Get nested teams distribute kind directive, if any.
- if (!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind))
- TD = getNestedDistributeDirective(CGM.getContext(), D);
- if (!TD)
- return llvm::ConstantInt::get(CGF.Int64Ty, 0);
- const auto *LD = cast<OMPLoopDirective>(TD);
- if (llvm::Value *NumIterations = SizeEmitter(CGF, *LD))
- return NumIterations;
- return llvm::ConstantInt::get(CGF.Int64Ty, 0);
- }
- void CGOpenMPRuntime::emitTargetCall(
- CodeGenFunction &CGF, const OMPExecutableDirective &D,
- llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
- llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
- llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
- const OMPLoopDirective &D)>
- SizeEmitter) {
- if (!CGF.HaveInsertPoint())
- return;
- const bool OffloadingMandatory = !CGM.getLangOpts().OpenMPIsDevice &&
- CGM.getLangOpts().OpenMPOffloadMandatory;
- assert((OffloadingMandatory || OutlinedFn) && "Invalid outlined function!");
- const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
- D.hasClausesOfKind<OMPNowaitClause>() ||
- D.hasClausesOfKind<OMPInReductionClause>();
- llvm::SmallVector<llvm::Value *, 16> CapturedVars;
- const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
- auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
- PrePostActionTy &) {
- CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
- };
- emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen);
- CodeGenFunction::OMPTargetDataInfo InputInfo;
- llvm::Value *MapTypesArray = nullptr;
- llvm::Value *MapNamesArray = nullptr;
- // Generate code for the host fallback function.
- auto &&FallbackGen = [this, OutlinedFn, &D, &CapturedVars, RequiresOuterTask,
- &CS, OffloadingMandatory](CodeGenFunction &CGF) {
- if (OffloadingMandatory) {
- CGF.Builder.CreateUnreachable();
- } else {
- if (RequiresOuterTask) {
- CapturedVars.clear();
- CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
- }
- emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
- }
- };
- // Fill up the pointer arrays and transfer execution to the device.
- auto &&ThenGen = [this, Device, OutlinedFnID, &D, &InputInfo, &MapTypesArray,
- &MapNamesArray, SizeEmitter,
- FallbackGen](CodeGenFunction &CGF, PrePostActionTy &) {
- if (Device.getInt() == OMPC_DEVICE_ancestor) {
- // Reverse offloading is not supported, so just execute on the host.
- FallbackGen(CGF);
- return;
- }
- // On top of the arrays that were filled up, the target offloading call
- // takes as arguments the device id as well as the host pointer. The host
- // pointer is used by the runtime library to identify the current target
- // region, so it only has to be unique and not necessarily point to
- // anything. It could be the pointer to the outlined function that
- // implements the target region, but we aren't using that so that the
- // compiler doesn't need to keep that, and could therefore inline the host
- // function if proven worthwhile during optimization.
- // From this point on, we need to have an ID of the target region defined.
- assert(OutlinedFnID && "Invalid outlined function ID!");
- (void)OutlinedFnID;
- // Emit device ID if any.
- llvm::Value *DeviceID;
- if (Device.getPointer()) {
- assert((Device.getInt() == OMPC_DEVICE_unknown ||
- Device.getInt() == OMPC_DEVICE_device_num) &&
- "Expected device_num modifier.");
- llvm::Value *DevVal = CGF.EmitScalarExpr(Device.getPointer());
- DeviceID =
- CGF.Builder.CreateIntCast(DevVal, CGF.Int64Ty, /*isSigned=*/true);
- } else {
- DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
- }
- // Emit the number of elements in the offloading arrays.
- llvm::Value *PointerNum =
- CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
- // Return value of the runtime offloading call.
- llvm::Value *Return;
- llvm::Value *NumTeams = emitNumTeamsForTargetDirective(CGF, D);
- llvm::Value *NumThreads = emitNumThreadsForTargetDirective(CGF, D);
- // Source location for the ident struct
- llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
- // Get tripcount for the target loop-based directive.
- llvm::Value *NumIterations =
- emitTargetNumIterationsCall(CGF, D, SizeEmitter);
- llvm::Value *DynCGroupMem = CGF.Builder.getInt32(0);
- if (auto *DynMemClause = D.getSingleClause<OMPXDynCGroupMemClause>()) {
- CodeGenFunction::RunCleanupsScope DynCGroupMemScope(CGF);
- llvm::Value *DynCGroupMemVal = CGF.EmitScalarExpr(
- DynMemClause->getSize(), /*IgnoreResultAssign=*/true);
- DynCGroupMem = CGF.Builder.CreateIntCast(DynCGroupMemVal, CGF.Int32Ty,
- /*isSigned=*/false);
- }
- llvm::Value *ZeroArray =
- llvm::Constant::getNullValue(llvm::ArrayType::get(CGF.CGM.Int32Ty, 3));
- bool HasNoWait = D.hasClausesOfKind<OMPNowaitClause>();
- llvm::Value *Flags = CGF.Builder.getInt64(HasNoWait);
- llvm::Value *NumTeams3D =
- CGF.Builder.CreateInsertValue(ZeroArray, NumTeams, {0});
- llvm::Value *NumThreads3D =
- CGF.Builder.CreateInsertValue(ZeroArray, NumThreads, {0});
- // Arguments for the target kernel.
- SmallVector<llvm::Value *> KernelArgs{
- CGF.Builder.getInt32(/* Version */ 2),
- PointerNum,
- InputInfo.BasePointersArray.getPointer(),
- InputInfo.PointersArray.getPointer(),
- InputInfo.SizesArray.getPointer(),
- MapTypesArray,
- MapNamesArray,
- InputInfo.MappersArray.getPointer(),
- NumIterations,
- Flags,
- NumTeams3D,
- NumThreads3D,
- DynCGroupMem,
- };
- // The target region is an outlined function launched by the runtime
- // via calls to __tgt_target_kernel().
- //
- // Note that on the host and CPU targets, the runtime implementation of
- // these calls simply call the outlined function without forking threads.
- // The outlined functions themselves have runtime calls to
- // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
- // the compiler in emitTeamsCall() and emitParallelCall().
- //
- // In contrast, on the NVPTX target, the implementation of
- // __tgt_target_teams() launches a GPU kernel with the requested number
- // of teams and threads so no additional calls to the runtime are required.
- // Check the error code and execute the host version if required.
- CGF.Builder.restoreIP(OMPBuilder.emitTargetKernel(
- CGF.Builder, Return, RTLoc, DeviceID, NumTeams, NumThreads,
- OutlinedFnID, KernelArgs));
- llvm::BasicBlock *OffloadFailedBlock =
- CGF.createBasicBlock("omp_offload.failed");
- llvm::BasicBlock *OffloadContBlock =
- CGF.createBasicBlock("omp_offload.cont");
- llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
- CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
- CGF.EmitBlock(OffloadFailedBlock);
- FallbackGen(CGF);
- CGF.EmitBranch(OffloadContBlock);
- CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
- };
- // Notify that the host version must be executed.
- auto &&ElseGen = [FallbackGen](CodeGenFunction &CGF, PrePostActionTy &) {
- FallbackGen(CGF);
- };
- auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
- &MapNamesArray, &CapturedVars, RequiresOuterTask,
- &CS](CodeGenFunction &CGF, PrePostActionTy &) {
- // Fill up the arrays with all the captured variables.
- MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
- // Get mappable expression information.
- MappableExprsHandler MEHandler(D, CGF);
- llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
- llvm::DenseSet<CanonicalDeclPtr<const Decl>> MappedVarSet;
- auto RI = CS.getCapturedRecordDecl()->field_begin();
- auto *CV = CapturedVars.begin();
- for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
- CE = CS.capture_end();
- CI != CE; ++CI, ++RI, ++CV) {
- MappableExprsHandler::MapCombinedInfoTy CurInfo;
- MappableExprsHandler::StructRangeInfoTy PartialStruct;
- // VLA sizes are passed to the outlined region by copy and do not have map
- // information associated.
- if (CI->capturesVariableArrayType()) {
- CurInfo.Exprs.push_back(nullptr);
- CurInfo.BasePointers.push_back(*CV);
- CurInfo.Pointers.push_back(*CV);
- CurInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
- CGF.getTypeSize(RI->getType()), CGF.Int64Ty, /*isSigned=*/true));
- // Copy to the device as an argument. No need to retrieve it.
- CurInfo.Types.push_back(
- OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
- OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM |
- OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
- CurInfo.Mappers.push_back(nullptr);
- } else {
- // If we have any information in the map clause, we use it, otherwise we
- // just do a default mapping.
- MEHandler.generateInfoForCapture(CI, *CV, CurInfo, PartialStruct);
- if (!CI->capturesThis())
- MappedVarSet.insert(CI->getCapturedVar());
- else
- MappedVarSet.insert(nullptr);
- if (CurInfo.BasePointers.empty() && !PartialStruct.Base.isValid())
- MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurInfo);
- // Generate correct mapping for variables captured by reference in
- // lambdas.
- if (CI->capturesVariable())
- MEHandler.generateInfoForLambdaCaptures(CI->getCapturedVar(), *CV,
- CurInfo, LambdaPointers);
- }
- // We expect to have at least an element of information for this capture.
- assert((!CurInfo.BasePointers.empty() || PartialStruct.Base.isValid()) &&
- "Non-existing map pointer for capture!");
- assert(CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
- CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
- CurInfo.BasePointers.size() == CurInfo.Types.size() &&
- CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
- "Inconsistent map information sizes!");
- // If there is an entry in PartialStruct it means we have a struct with
- // individual members mapped. Emit an extra combined entry.
- if (PartialStruct.Base.isValid()) {
- CombinedInfo.append(PartialStruct.PreliminaryMapData);
- MEHandler.emitCombinedEntry(
- CombinedInfo, CurInfo.Types, PartialStruct, nullptr,
- !PartialStruct.PreliminaryMapData.BasePointers.empty());
- }
- // We need to append the results of this capture to what we already have.
- CombinedInfo.append(CurInfo);
- }
- // Adjust MEMBER_OF flags for the lambdas captures.
- MEHandler.adjustMemberOfForLambdaCaptures(
- LambdaPointers, CombinedInfo.BasePointers, CombinedInfo.Pointers,
- CombinedInfo.Types);
- // Map any list items in a map clause that were not captures because they
- // weren't referenced within the construct.
- MEHandler.generateAllInfo(CombinedInfo, MappedVarSet);
- CGOpenMPRuntime::TargetDataInfo Info;
- // Fill up the arrays and create the arguments.
- emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder);
- bool EmitDebug =
- CGF.CGM.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo;
- OMPBuilder.emitOffloadingArraysArgument(CGF.Builder, Info.RTArgs, Info,
- EmitDebug,
- /*ForEndCall=*/false);
- InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
- InputInfo.BasePointersArray = Address(Info.RTArgs.BasePointersArray,
- CGF.VoidPtrTy, CGM.getPointerAlign());
- InputInfo.PointersArray = Address(Info.RTArgs.PointersArray, CGF.VoidPtrTy,
- CGM.getPointerAlign());
- InputInfo.SizesArray =
- Address(Info.RTArgs.SizesArray, CGF.Int64Ty, CGM.getPointerAlign());
- InputInfo.MappersArray =
- Address(Info.RTArgs.MappersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
- MapTypesArray = Info.RTArgs.MapTypesArray;
- MapNamesArray = Info.RTArgs.MapNamesArray;
- if (RequiresOuterTask)
- CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
- else
- emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
- };
- auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
- CodeGenFunction &CGF, PrePostActionTy &) {
- if (RequiresOuterTask) {
- CodeGenFunction::OMPTargetDataInfo InputInfo;
- CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
- } else {
- emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
- }
- };
- // If we have a target function ID it means that we need to support
- // offloading, otherwise, just execute on the host. We need to execute on host
- // regardless of the conditional in the if clause if, e.g., the user do not
- // specify target triples.
- if (OutlinedFnID) {
- if (IfCond) {
- emitIfClause(CGF, IfCond, TargetThenGen, TargetElseGen);
- } else {
- RegionCodeGenTy ThenRCG(TargetThenGen);
- ThenRCG(CGF);
- }
- } else {
- RegionCodeGenTy ElseRCG(TargetElseGen);
- ElseRCG(CGF);
- }
- }
- void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
- StringRef ParentName) {
- if (!S)
- return;
- // Codegen OMP target directives that offload compute to the device.
- bool RequiresDeviceCodegen =
- isa<OMPExecutableDirective>(S) &&
- isOpenMPTargetExecutionDirective(
- cast<OMPExecutableDirective>(S)->getDirectiveKind());
- if (RequiresDeviceCodegen) {
- const auto &E = *cast<OMPExecutableDirective>(S);
- auto EntryInfo =
- getTargetEntryUniqueInfo(CGM.getContext(), E.getBeginLoc(), ParentName);
- // Is this a target region that should not be emitted as an entry point? If
- // so just signal we are done with this target region.
- if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(EntryInfo))
- return;
- switch (E.getDirectiveKind()) {
- case OMPD_target:
- CodeGenFunction::EmitOMPTargetDeviceFunction(CGM, ParentName,
- cast<OMPTargetDirective>(E));
- break;
- case OMPD_target_parallel:
- CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
- CGM, ParentName, cast<OMPTargetParallelDirective>(E));
- break;
- case OMPD_target_teams:
- CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
- CGM, ParentName, cast<OMPTargetTeamsDirective>(E));
- break;
- case OMPD_target_teams_distribute:
- CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
- CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(E));
- break;
- case OMPD_target_teams_distribute_simd:
- CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
- CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(E));
- break;
- case OMPD_target_parallel_for:
- CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
- CGM, ParentName, cast<OMPTargetParallelForDirective>(E));
- break;
- case OMPD_target_parallel_for_simd:
- CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
- CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(E));
- break;
- case OMPD_target_simd:
- CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
- CGM, ParentName, cast<OMPTargetSimdDirective>(E));
- break;
- case OMPD_target_teams_distribute_parallel_for:
- CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
- CGM, ParentName,
- cast<OMPTargetTeamsDistributeParallelForDirective>(E));
- break;
- case OMPD_target_teams_distribute_parallel_for_simd:
- CodeGenFunction::
- EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
- CGM, ParentName,
- cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
- break;
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_metadirective:
- case OMPD_unknown:
- default:
- llvm_unreachable("Unknown target directive for OpenMP device codegen.");
- }
- return;
- }
- if (const auto *E = dyn_cast<OMPExecutableDirective>(S)) {
- if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
- return;
- scanForTargetRegionsFunctions(E->getRawStmt(), ParentName);
- return;
- }
- // If this is a lambda function, look into its body.
- if (const auto *L = dyn_cast<LambdaExpr>(S))
- S = L->getBody();
- // Keep looking for target regions recursively.
- for (const Stmt *II : S->children())
- scanForTargetRegionsFunctions(II, ParentName);
- }
- static bool isAssumedToBeNotEmitted(const ValueDecl *VD, bool IsDevice) {
- std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(VD);
- if (!DevTy)
- return false;
- // Do not emit device_type(nohost) functions for the host.
- if (!IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
- return true;
- // Do not emit device_type(host) functions for the device.
- if (IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_Host)
- return true;
- return false;
- }
- bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
- // If emitting code for the host, we do not process FD here. Instead we do
- // the normal code generation.
- if (!CGM.getLangOpts().OpenMPIsDevice) {
- if (const auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
- if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
- CGM.getLangOpts().OpenMPIsDevice))
- return true;
- return false;
- }
- const ValueDecl *VD = cast<ValueDecl>(GD.getDecl());
- // Try to detect target regions in the function.
- if (const auto *FD = dyn_cast<FunctionDecl>(VD)) {
- StringRef Name = CGM.getMangledName(GD);
- scanForTargetRegionsFunctions(FD->getBody(), Name);
- if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
- CGM.getLangOpts().OpenMPIsDevice))
- return true;
- }
- // Do not to emit function if it is not marked as declare target.
- return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
- AlreadyEmittedTargetDecls.count(VD) == 0;
- }
- bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
- if (isAssumedToBeNotEmitted(cast<ValueDecl>(GD.getDecl()),
- CGM.getLangOpts().OpenMPIsDevice))
- return true;
- if (!CGM.getLangOpts().OpenMPIsDevice)
- return false;
- // Check if there are Ctors/Dtors in this declaration and look for target
- // regions in it. We use the complete variant to produce the kernel name
- // mangling.
- QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
- if (const auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
- for (const CXXConstructorDecl *Ctor : RD->ctors()) {
- StringRef ParentName =
- CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
- scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
- }
- if (const CXXDestructorDecl *Dtor = RD->getDestructor()) {
- StringRef ParentName =
- CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
- scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
- }
- }
- // Do not to emit variable if it is not marked as declare target.
- std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
- cast<VarDecl>(GD.getDecl()));
- if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
- ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
- *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
- HasRequiresUnifiedSharedMemory)) {
- DeferredGlobalVariables.insert(cast<VarDecl>(GD.getDecl()));
- return true;
- }
- return false;
- }
- void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
- llvm::Constant *Addr) {
- if (CGM.getLangOpts().OMPTargetTriples.empty() &&
- !CGM.getLangOpts().OpenMPIsDevice)
- return;
- // If we have host/nohost variables, they do not need to be registered.
- std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(VD);
- if (DevTy && *DevTy != OMPDeclareTargetDeclAttr::DT_Any)
- return;
- std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (!Res) {
- if (CGM.getLangOpts().OpenMPIsDevice) {
- // Register non-target variables being emitted in device code (debug info
- // may cause this).
- StringRef VarName = CGM.getMangledName(VD);
- EmittedNonTargetVariables.try_emplace(VarName, Addr);
- }
- return;
- }
- // Register declare target variables.
- llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind Flags;
- StringRef VarName;
- int64_t VarSize;
- llvm::GlobalValue::LinkageTypes Linkage;
- if ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
- *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
- !HasRequiresUnifiedSharedMemory) {
- Flags = llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryTo;
- VarName = CGM.getMangledName(VD);
- if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) {
- VarSize =
- CGM.getContext().getTypeSizeInChars(VD->getType()).getQuantity();
- assert(VarSize != 0 && "Expected non-zero size of the variable");
- } else {
- VarSize = 0;
- }
- Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
- // Temp solution to prevent optimizations of the internal variables.
- if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
- // Do not create a "ref-variable" if the original is not also available
- // on the host.
- if (!OffloadEntriesInfoManager.hasDeviceGlobalVarEntryInfo(VarName))
- return;
- std::string RefName = getName({VarName, "ref"});
- if (!CGM.GetGlobalValue(RefName)) {
- llvm::Constant *AddrRef =
- OMPBuilder.getOrCreateInternalVariable(Addr->getType(), RefName);
- auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
- GVAddrRef->setConstant(/*Val=*/true);
- GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
- GVAddrRef->setInitializer(Addr);
- CGM.addCompilerUsedGlobal(GVAddrRef);
- }
- }
- } else {
- assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
- ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
- *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
- HasRequiresUnifiedSharedMemory)) &&
- "Declare target attribute must link or to with unified memory.");
- if (*Res == OMPDeclareTargetDeclAttr::MT_Link)
- Flags = llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryLink;
- else
- Flags = llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryTo;
- if (CGM.getLangOpts().OpenMPIsDevice) {
- VarName = Addr->getName();
- Addr = nullptr;
- } else {
- VarName = getAddrOfDeclareTargetVar(VD).getName();
- Addr = cast<llvm::Constant>(getAddrOfDeclareTargetVar(VD).getPointer());
- }
- VarSize = CGM.getPointerSize().getQuantity();
- Linkage = llvm::GlobalValue::WeakAnyLinkage;
- }
- OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
- VarName, Addr, VarSize, Flags, Linkage);
- }
- bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
- if (isa<FunctionDecl>(GD.getDecl()) ||
- isa<OMPDeclareReductionDecl>(GD.getDecl()))
- return emitTargetFunctions(GD);
- return emitTargetGlobalVariable(GD);
- }
- void CGOpenMPRuntime::emitDeferredTargetDecls() const {
- for (const VarDecl *VD : DeferredGlobalVariables) {
- std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (!Res)
- continue;
- if ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
- *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
- !HasRequiresUnifiedSharedMemory) {
- CGM.EmitGlobal(VD);
- } else {
- assert((*Res == OMPDeclareTargetDeclAttr::MT_Link ||
- ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
- *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
- HasRequiresUnifiedSharedMemory)) &&
- "Expected link clause or to clause with unified memory.");
- (void)CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
- }
- }
- }
- void CGOpenMPRuntime::adjustTargetSpecificDataForLambdas(
- CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
- assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
- " Expected target-based directive.");
- }
- void CGOpenMPRuntime::processRequiresDirective(const OMPRequiresDecl *D) {
- for (const OMPClause *Clause : D->clauselists()) {
- if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
- HasRequiresUnifiedSharedMemory = true;
- OMPBuilder.Config.setHasRequiresUnifiedSharedMemory(true);
- } else if (const auto *AC =
- dyn_cast<OMPAtomicDefaultMemOrderClause>(Clause)) {
- switch (AC->getAtomicDefaultMemOrderKind()) {
- case OMPC_ATOMIC_DEFAULT_MEM_ORDER_acq_rel:
- RequiresAtomicOrdering = llvm::AtomicOrdering::AcquireRelease;
- break;
- case OMPC_ATOMIC_DEFAULT_MEM_ORDER_seq_cst:
- RequiresAtomicOrdering = llvm::AtomicOrdering::SequentiallyConsistent;
- break;
- case OMPC_ATOMIC_DEFAULT_MEM_ORDER_relaxed:
- RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
- break;
- case OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown:
- break;
- }
- }
- }
- }
- llvm::AtomicOrdering CGOpenMPRuntime::getDefaultMemoryOrdering() const {
- return RequiresAtomicOrdering;
- }
- bool CGOpenMPRuntime::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
- LangAS &AS) {
- if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
- return false;
- const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
- switch(A->getAllocatorType()) {
- case OMPAllocateDeclAttr::OMPNullMemAlloc:
- case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
- // Not supported, fallback to the default mem space.
- case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
- case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
- case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
- case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
- case OMPAllocateDeclAttr::OMPThreadMemAlloc:
- case OMPAllocateDeclAttr::OMPConstMemAlloc:
- case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
- AS = LangAS::Default;
- return true;
- case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
- llvm_unreachable("Expected predefined allocator for the variables with the "
- "static storage.");
- }
- return false;
- }
- bool CGOpenMPRuntime::hasRequiresUnifiedSharedMemory() const {
- return HasRequiresUnifiedSharedMemory;
- }
- CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
- CodeGenModule &CGM)
- : CGM(CGM) {
- if (CGM.getLangOpts().OpenMPIsDevice) {
- SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
- CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
- }
- }
- CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
- if (CGM.getLangOpts().OpenMPIsDevice)
- CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
- }
- bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
- if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
- return true;
- const auto *D = cast<FunctionDecl>(GD.getDecl());
- // Do not to emit function if it is marked as declare target as it was already
- // emitted.
- if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) {
- if (D->hasBody() && AlreadyEmittedTargetDecls.count(D) == 0) {
- if (auto *F = dyn_cast_or_null<llvm::Function>(
- CGM.GetGlobalValue(CGM.getMangledName(GD))))
- return !F->isDeclaration();
- return false;
- }
- return true;
- }
- return !AlreadyEmittedTargetDecls.insert(D).second;
- }
- llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
- // If we don't have entries or if we are emitting code for the device, we
- // don't need to do anything.
- if (CGM.getLangOpts().OMPTargetTriples.empty() ||
- CGM.getLangOpts().OpenMPSimd || CGM.getLangOpts().OpenMPIsDevice ||
- (OffloadEntriesInfoManager.empty() &&
- !HasEmittedDeclareTargetRegion &&
- !HasEmittedTargetRegion))
- return nullptr;
- // Create and register the function that handles the requires directives.
- ASTContext &C = CGM.getContext();
- llvm::Function *RequiresRegFn;
- {
- CodeGenFunction CGF(CGM);
- const auto &FI = CGM.getTypes().arrangeNullaryFunction();
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- std::string ReqName = getName({"omp_offloading", "requires_reg"});
- RequiresRegFn = CGM.CreateGlobalInitOrCleanUpFunction(FTy, ReqName, FI);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, RequiresRegFn, FI, {});
- OpenMPOffloadingRequiresDirFlags Flags = OMP_REQ_NONE;
- // TODO: check for other requires clauses.
- // The requires directive takes effect only when a target region is
- // present in the compilation unit. Otherwise it is ignored and not
- // passed to the runtime. This avoids the runtime from throwing an error
- // for mismatching requires clauses across compilation units that don't
- // contain at least 1 target region.
- assert((HasEmittedTargetRegion ||
- HasEmittedDeclareTargetRegion ||
- !OffloadEntriesInfoManager.empty()) &&
- "Target or declare target region expected.");
- if (HasRequiresUnifiedSharedMemory)
- Flags = OMP_REQ_UNIFIED_SHARED_MEMORY;
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___tgt_register_requires),
- llvm::ConstantInt::get(CGM.Int64Ty, Flags));
- CGF.FinishFunction();
- }
- return RequiresRegFn;
- }
- void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
- const OMPExecutableDirective &D,
- SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars) {
- if (!CGF.HaveInsertPoint())
- return;
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- CodeGenFunction::RunCleanupsScope Scope(CGF);
- // Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn);
- llvm::Value *Args[] = {
- RTLoc,
- CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
- CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
- llvm::SmallVector<llvm::Value *, 16> RealArgs;
- RealArgs.append(std::begin(Args), std::end(Args));
- RealArgs.append(CapturedVars.begin(), CapturedVars.end());
- llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_fork_teams);
- CGF.EmitRuntimeCall(RTLFn, RealArgs);
- }
- void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
- const Expr *NumTeams,
- const Expr *ThreadLimit,
- SourceLocation Loc) {
- if (!CGF.HaveInsertPoint())
- return;
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *NumTeamsVal =
- NumTeams
- ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams),
- CGF.CGM.Int32Ty, /* isSigned = */ true)
- : CGF.Builder.getInt32(0);
- llvm::Value *ThreadLimitVal =
- ThreadLimit
- ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
- CGF.CGM.Int32Ty, /* isSigned = */ true)
- : CGF.Builder.getInt32(0);
- // Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit)
- llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
- ThreadLimitVal};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_push_num_teams),
- PushNumTeamsArgs);
- }
- void CGOpenMPRuntime::emitTargetDataCalls(
- CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
- const Expr *Device, const RegionCodeGenTy &CodeGen,
- CGOpenMPRuntime::TargetDataInfo &Info) {
- if (!CGF.HaveInsertPoint())
- return;
- // Action used to replace the default codegen action and turn privatization
- // off.
- PrePostActionTy NoPrivAction;
- // Generate the code for the opening of the data environment. Capture all the
- // arguments of the runtime call by reference because they are used in the
- // closing of the region.
- auto &&BeginThenGen = [this, &D, Device, &Info,
- &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
- // Fill up the arrays with all the mapped variables.
- MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
- // Get map clause information.
- MappableExprsHandler MEHandler(D, CGF);
- MEHandler.generateAllInfo(CombinedInfo);
- // Fill up the arrays and create the arguments.
- emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
- /*IsNonContiguous=*/true);
- llvm::OpenMPIRBuilder::TargetDataRTArgs RTArgs;
- bool EmitDebug =
- CGF.CGM.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo;
- OMPBuilder.emitOffloadingArraysArgument(CGF.Builder, RTArgs, Info,
- EmitDebug);
- // Emit device ID if any.
- llvm::Value *DeviceID = nullptr;
- if (Device) {
- DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
- CGF.Int64Ty, /*isSigned=*/true);
- } else {
- DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
- }
- // Emit the number of elements in the offloading arrays.
- llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
- //
- // Source location for the ident struct
- llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
- llvm::Value *OffloadingArgs[] = {RTLoc,
- DeviceID,
- PointerNum,
- RTArgs.BasePointersArray,
- RTArgs.PointersArray,
- RTArgs.SizesArray,
- RTArgs.MapTypesArray,
- RTArgs.MapNamesArray,
- RTArgs.MappersArray};
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___tgt_target_data_begin_mapper),
- OffloadingArgs);
- // If device pointer privatization is required, emit the body of the region
- // here. It will have to be duplicated: with and without privatization.
- if (!Info.CaptureDeviceAddrMap.empty())
- CodeGen(CGF);
- };
- // Generate code for the closing of the data region.
- auto &&EndThenGen = [this, Device, &Info, &D](CodeGenFunction &CGF,
- PrePostActionTy &) {
- assert(Info.isValid() && "Invalid data environment closing arguments.");
- llvm::OpenMPIRBuilder::TargetDataRTArgs RTArgs;
- bool EmitDebug =
- CGF.CGM.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo;
- OMPBuilder.emitOffloadingArraysArgument(CGF.Builder, RTArgs, Info,
- EmitDebug,
- /*ForEndCall=*/true);
- // Emit device ID if any.
- llvm::Value *DeviceID = nullptr;
- if (Device) {
- DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
- CGF.Int64Ty, /*isSigned=*/true);
- } else {
- DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
- }
- // Emit the number of elements in the offloading arrays.
- llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
- // Source location for the ident struct
- llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
- llvm::Value *OffloadingArgs[] = {RTLoc,
- DeviceID,
- PointerNum,
- RTArgs.BasePointersArray,
- RTArgs.PointersArray,
- RTArgs.SizesArray,
- RTArgs.MapTypesArray,
- RTArgs.MapNamesArray,
- RTArgs.MappersArray};
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___tgt_target_data_end_mapper),
- OffloadingArgs);
- };
- // If we need device pointer privatization, we need to emit the body of the
- // region with no privatization in the 'else' branch of the conditional.
- // Otherwise, we don't have to do anything.
- auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
- PrePostActionTy &) {
- if (!Info.CaptureDeviceAddrMap.empty()) {
- CodeGen.setAction(NoPrivAction);
- CodeGen(CGF);
- }
- };
- // We don't have to do anything to close the region if the if clause evaluates
- // to false.
- auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
- if (IfCond) {
- emitIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
- } else {
- RegionCodeGenTy RCG(BeginThenGen);
- RCG(CGF);
- }
- // If we don't require privatization of device pointers, we emit the body in
- // between the runtime calls. This avoids duplicating the body code.
- if (Info.CaptureDeviceAddrMap.empty()) {
- CodeGen.setAction(NoPrivAction);
- CodeGen(CGF);
- }
- if (IfCond) {
- emitIfClause(CGF, IfCond, EndThenGen, EndElseGen);
- } else {
- RegionCodeGenTy RCG(EndThenGen);
- RCG(CGF);
- }
- }
- void CGOpenMPRuntime::emitTargetDataStandAloneCall(
- CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
- const Expr *Device) {
- if (!CGF.HaveInsertPoint())
- return;
- assert((isa<OMPTargetEnterDataDirective>(D) ||
- isa<OMPTargetExitDataDirective>(D) ||
- isa<OMPTargetUpdateDirective>(D)) &&
- "Expecting either target enter, exit data, or update directives.");
- CodeGenFunction::OMPTargetDataInfo InputInfo;
- llvm::Value *MapTypesArray = nullptr;
- llvm::Value *MapNamesArray = nullptr;
- // Generate the code for the opening of the data environment.
- auto &&ThenGen = [this, &D, Device, &InputInfo, &MapTypesArray,
- &MapNamesArray](CodeGenFunction &CGF, PrePostActionTy &) {
- // Emit device ID if any.
- llvm::Value *DeviceID = nullptr;
- if (Device) {
- DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
- CGF.Int64Ty, /*isSigned=*/true);
- } else {
- DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
- }
- // Emit the number of elements in the offloading arrays.
- llvm::Constant *PointerNum =
- CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
- // Source location for the ident struct
- llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
- llvm::Value *OffloadingArgs[] = {RTLoc,
- DeviceID,
- PointerNum,
- InputInfo.BasePointersArray.getPointer(),
- InputInfo.PointersArray.getPointer(),
- InputInfo.SizesArray.getPointer(),
- MapTypesArray,
- MapNamesArray,
- InputInfo.MappersArray.getPointer()};
- // Select the right runtime function call for each standalone
- // directive.
- const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
- RuntimeFunction RTLFn;
- switch (D.getDirectiveKind()) {
- case OMPD_target_enter_data:
- RTLFn = HasNowait ? OMPRTL___tgt_target_data_begin_nowait_mapper
- : OMPRTL___tgt_target_data_begin_mapper;
- break;
- case OMPD_target_exit_data:
- RTLFn = HasNowait ? OMPRTL___tgt_target_data_end_nowait_mapper
- : OMPRTL___tgt_target_data_end_mapper;
- break;
- case OMPD_target_update:
- RTLFn = HasNowait ? OMPRTL___tgt_target_data_update_nowait_mapper
- : OMPRTL___tgt_target_data_update_mapper;
- break;
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_target:
- case OMPD_target_simd:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_target_teams:
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_requires:
- case OMPD_metadirective:
- case OMPD_unknown:
- default:
- llvm_unreachable("Unexpected standalone target data directive.");
- break;
- }
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), RTLFn),
- OffloadingArgs);
- };
- auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
- &MapNamesArray](CodeGenFunction &CGF,
- PrePostActionTy &) {
- // Fill up the arrays with all the mapped variables.
- MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
- // Get map clause information.
- MappableExprsHandler MEHandler(D, CGF);
- MEHandler.generateAllInfo(CombinedInfo);
- CGOpenMPRuntime::TargetDataInfo Info;
- // Fill up the arrays and create the arguments.
- emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
- /*IsNonContiguous=*/true);
- bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
- D.hasClausesOfKind<OMPNowaitClause>();
- bool EmitDebug =
- CGF.CGM.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo;
- OMPBuilder.emitOffloadingArraysArgument(CGF.Builder, Info.RTArgs, Info,
- EmitDebug,
- /*ForEndCall=*/false);
- InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
- InputInfo.BasePointersArray = Address(Info.RTArgs.BasePointersArray,
- CGF.VoidPtrTy, CGM.getPointerAlign());
- InputInfo.PointersArray = Address(Info.RTArgs.PointersArray, CGF.VoidPtrTy,
- CGM.getPointerAlign());
- InputInfo.SizesArray =
- Address(Info.RTArgs.SizesArray, CGF.Int64Ty, CGM.getPointerAlign());
- InputInfo.MappersArray =
- Address(Info.RTArgs.MappersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
- MapTypesArray = Info.RTArgs.MapTypesArray;
- MapNamesArray = Info.RTArgs.MapNamesArray;
- if (RequiresOuterTask)
- CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
- else
- emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
- };
- if (IfCond) {
- emitIfClause(CGF, IfCond, TargetThenGen,
- [](CodeGenFunction &CGF, PrePostActionTy &) {});
- } else {
- RegionCodeGenTy ThenRCG(TargetThenGen);
- ThenRCG(CGF);
- }
- }
- namespace {
- /// Kind of parameter in a function with 'declare simd' directive.
- enum ParamKindTy {
- Linear,
- LinearRef,
- LinearUVal,
- LinearVal,
- Uniform,
- Vector,
- };
- /// Attribute set of the parameter.
- struct ParamAttrTy {
- ParamKindTy Kind = Vector;
- llvm::APSInt StrideOrArg;
- llvm::APSInt Alignment;
- bool HasVarStride = false;
- };
- } // namespace
- static unsigned evaluateCDTSize(const FunctionDecl *FD,
- ArrayRef<ParamAttrTy> ParamAttrs) {
- // Every vector variant of a SIMD-enabled function has a vector length (VLEN).
- // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument
- // of that clause. The VLEN value must be power of 2.
- // In other case the notion of the function`s "characteristic data type" (CDT)
- // is used to compute the vector length.
- // CDT is defined in the following order:
- // a) For non-void function, the CDT is the return type.
- // b) If the function has any non-uniform, non-linear parameters, then the
- // CDT is the type of the first such parameter.
- // c) If the CDT determined by a) or b) above is struct, union, or class
- // type which is pass-by-value (except for the type that maps to the
- // built-in complex data type), the characteristic data type is int.
- // d) If none of the above three cases is applicable, the CDT is int.
- // The VLEN is then determined based on the CDT and the size of vector
- // register of that ISA for which current vector version is generated. The
- // VLEN is computed using the formula below:
- // VLEN = sizeof(vector_register) / sizeof(CDT),
- // where vector register size specified in section 3.2.1 Registers and the
- // Stack Frame of original AMD64 ABI document.
- QualType RetType = FD->getReturnType();
- if (RetType.isNull())
- return 0;
- ASTContext &C = FD->getASTContext();
- QualType CDT;
- if (!RetType.isNull() && !RetType->isVoidType()) {
- CDT = RetType;
- } else {
- unsigned Offset = 0;
- if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
- if (ParamAttrs[Offset].Kind == Vector)
- CDT = C.getPointerType(C.getRecordType(MD->getParent()));
- ++Offset;
- }
- if (CDT.isNull()) {
- for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
- if (ParamAttrs[I + Offset].Kind == Vector) {
- CDT = FD->getParamDecl(I)->getType();
- break;
- }
- }
- }
- }
- if (CDT.isNull())
- CDT = C.IntTy;
- CDT = CDT->getCanonicalTypeUnqualified();
- if (CDT->isRecordType() || CDT->isUnionType())
- CDT = C.IntTy;
- return C.getTypeSize(CDT);
- }
- /// Mangle the parameter part of the vector function name according to
- /// their OpenMP classification. The mangling function is defined in
- /// section 4.5 of the AAVFABI(2021Q1).
- static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
- SmallString<256> Buffer;
- llvm::raw_svector_ostream Out(Buffer);
- for (const auto &ParamAttr : ParamAttrs) {
- switch (ParamAttr.Kind) {
- case Linear:
- Out << 'l';
- break;
- case LinearRef:
- Out << 'R';
- break;
- case LinearUVal:
- Out << 'U';
- break;
- case LinearVal:
- Out << 'L';
- break;
- case Uniform:
- Out << 'u';
- break;
- case Vector:
- Out << 'v';
- break;
- }
- if (ParamAttr.HasVarStride)
- Out << "s" << ParamAttr.StrideOrArg;
- else if (ParamAttr.Kind == Linear || ParamAttr.Kind == LinearRef ||
- ParamAttr.Kind == LinearUVal || ParamAttr.Kind == LinearVal) {
- // Don't print the step value if it is not present or if it is
- // equal to 1.
- if (ParamAttr.StrideOrArg < 0)
- Out << 'n' << -ParamAttr.StrideOrArg;
- else if (ParamAttr.StrideOrArg != 1)
- Out << ParamAttr.StrideOrArg;
- }
- if (!!ParamAttr.Alignment)
- Out << 'a' << ParamAttr.Alignment;
- }
- return std::string(Out.str());
- }
- static void
- emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
- const llvm::APSInt &VLENVal,
- ArrayRef<ParamAttrTy> ParamAttrs,
- OMPDeclareSimdDeclAttr::BranchStateTy State) {
- struct ISADataTy {
- char ISA;
- unsigned VecRegSize;
- };
- ISADataTy ISAData[] = {
- {
- 'b', 128
- }, // SSE
- {
- 'c', 256
- }, // AVX
- {
- 'd', 256
- }, // AVX2
- {
- 'e', 512
- }, // AVX512
- };
- llvm::SmallVector<char, 2> Masked;
- switch (State) {
- case OMPDeclareSimdDeclAttr::BS_Undefined:
- Masked.push_back('N');
- Masked.push_back('M');
- break;
- case OMPDeclareSimdDeclAttr::BS_Notinbranch:
- Masked.push_back('N');
- break;
- case OMPDeclareSimdDeclAttr::BS_Inbranch:
- Masked.push_back('M');
- break;
- }
- for (char Mask : Masked) {
- for (const ISADataTy &Data : ISAData) {
- SmallString<256> Buffer;
- llvm::raw_svector_ostream Out(Buffer);
- Out << "_ZGV" << Data.ISA << Mask;
- if (!VLENVal) {
- unsigned NumElts = evaluateCDTSize(FD, ParamAttrs);
- assert(NumElts && "Non-zero simdlen/cdtsize expected");
- Out << llvm::APSInt::getUnsigned(Data.VecRegSize / NumElts);
- } else {
- Out << VLENVal;
- }
- Out << mangleVectorParameters(ParamAttrs);
- Out << '_' << Fn->getName();
- Fn->addFnAttr(Out.str());
- }
- }
- }
- // This are the Functions that are needed to mangle the name of the
- // vector functions generated by the compiler, according to the rules
- // defined in the "Vector Function ABI specifications for AArch64",
- // available at
- // https://developer.arm.com/products/software-development-tools/hpc/arm-compiler-for-hpc/vector-function-abi.
- /// Maps To Vector (MTV), as defined in 4.1.1 of the AAVFABI (2021Q1).
- static bool getAArch64MTV(QualType QT, ParamKindTy Kind) {
- QT = QT.getCanonicalType();
- if (QT->isVoidType())
- return false;
- if (Kind == ParamKindTy::Uniform)
- return false;
- if (Kind == ParamKindTy::LinearUVal || ParamKindTy::LinearRef)
- return false;
- if ((Kind == ParamKindTy::Linear || Kind == ParamKindTy::LinearVal) &&
- !QT->isReferenceType())
- return false;
- return true;
- }
- /// Pass By Value (PBV), as defined in 3.1.2 of the AAVFABI.
- static bool getAArch64PBV(QualType QT, ASTContext &C) {
- QT = QT.getCanonicalType();
- unsigned Size = C.getTypeSize(QT);
- // Only scalars and complex within 16 bytes wide set PVB to true.
- if (Size != 8 && Size != 16 && Size != 32 && Size != 64 && Size != 128)
- return false;
- if (QT->isFloatingType())
- return true;
- if (QT->isIntegerType())
- return true;
- if (QT->isPointerType())
- return true;
- // TODO: Add support for complex types (section 3.1.2, item 2).
- return false;
- }
- /// Computes the lane size (LS) of a return type or of an input parameter,
- /// as defined by `LS(P)` in 3.2.1 of the AAVFABI.
- /// TODO: Add support for references, section 3.2.1, item 1.
- static unsigned getAArch64LS(QualType QT, ParamKindTy Kind, ASTContext &C) {
- if (!getAArch64MTV(QT, Kind) && QT.getCanonicalType()->isPointerType()) {
- QualType PTy = QT.getCanonicalType()->getPointeeType();
- if (getAArch64PBV(PTy, C))
- return C.getTypeSize(PTy);
- }
- if (getAArch64PBV(QT, C))
- return C.getTypeSize(QT);
- return C.getTypeSize(C.getUIntPtrType());
- }
- // Get Narrowest Data Size (NDS) and Widest Data Size (WDS) from the
- // signature of the scalar function, as defined in 3.2.2 of the
- // AAVFABI.
- static std::tuple<unsigned, unsigned, bool>
- getNDSWDS(const FunctionDecl *FD, ArrayRef<ParamAttrTy> ParamAttrs) {
- QualType RetType = FD->getReturnType().getCanonicalType();
- ASTContext &C = FD->getASTContext();
- bool OutputBecomesInput = false;
- llvm::SmallVector<unsigned, 8> Sizes;
- if (!RetType->isVoidType()) {
- Sizes.push_back(getAArch64LS(RetType, ParamKindTy::Vector, C));
- if (!getAArch64PBV(RetType, C) && getAArch64MTV(RetType, {}))
- OutputBecomesInput = true;
- }
- for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
- QualType QT = FD->getParamDecl(I)->getType().getCanonicalType();
- Sizes.push_back(getAArch64LS(QT, ParamAttrs[I].Kind, C));
- }
- assert(!Sizes.empty() && "Unable to determine NDS and WDS.");
- // The LS of a function parameter / return value can only be a power
- // of 2, starting from 8 bits, up to 128.
- assert(llvm::all_of(Sizes,
- [](unsigned Size) {
- return Size == 8 || Size == 16 || Size == 32 ||
- Size == 64 || Size == 128;
- }) &&
- "Invalid size");
- return std::make_tuple(*std::min_element(std::begin(Sizes), std::end(Sizes)),
- *std::max_element(std::begin(Sizes), std::end(Sizes)),
- OutputBecomesInput);
- }
- // Function used to add the attribute. The parameter `VLEN` is
- // templated to allow the use of "x" when targeting scalable functions
- // for SVE.
- template <typename T>
- static void addAArch64VectorName(T VLEN, StringRef LMask, StringRef Prefix,
- char ISA, StringRef ParSeq,
- StringRef MangledName, bool OutputBecomesInput,
- llvm::Function *Fn) {
- SmallString<256> Buffer;
- llvm::raw_svector_ostream Out(Buffer);
- Out << Prefix << ISA << LMask << VLEN;
- if (OutputBecomesInput)
- Out << "v";
- Out << ParSeq << "_" << MangledName;
- Fn->addFnAttr(Out.str());
- }
- // Helper function to generate the Advanced SIMD names depending on
- // the value of the NDS when simdlen is not present.
- static void addAArch64AdvSIMDNDSNames(unsigned NDS, StringRef Mask,
- StringRef Prefix, char ISA,
- StringRef ParSeq, StringRef MangledName,
- bool OutputBecomesInput,
- llvm::Function *Fn) {
- switch (NDS) {
- case 8:
- addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- addAArch64VectorName(16, Mask, Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- break;
- case 16:
- addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- break;
- case 32:
- addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- break;
- case 64:
- case 128:
- addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- break;
- default:
- llvm_unreachable("Scalar type is too wide.");
- }
- }
- /// Emit vector function attributes for AArch64, as defined in the AAVFABI.
- static void emitAArch64DeclareSimdFunction(
- CodeGenModule &CGM, const FunctionDecl *FD, unsigned UserVLEN,
- ArrayRef<ParamAttrTy> ParamAttrs,
- OMPDeclareSimdDeclAttr::BranchStateTy State, StringRef MangledName,
- char ISA, unsigned VecRegSize, llvm::Function *Fn, SourceLocation SLoc) {
- // Get basic data for building the vector signature.
- const auto Data = getNDSWDS(FD, ParamAttrs);
- const unsigned NDS = std::get<0>(Data);
- const unsigned WDS = std::get<1>(Data);
- const bool OutputBecomesInput = std::get<2>(Data);
- // Check the values provided via `simdlen` by the user.
- // 1. A `simdlen(1)` doesn't produce vector signatures,
- if (UserVLEN == 1) {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Warning,
- "The clause simdlen(1) has no effect when targeting aarch64.");
- CGM.getDiags().Report(SLoc, DiagID);
- return;
- }
- // 2. Section 3.3.1, item 1: user input must be a power of 2 for
- // Advanced SIMD output.
- if (ISA == 'n' && UserVLEN && !llvm::isPowerOf2_32(UserVLEN)) {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Warning, "The value specified in simdlen must be a "
- "power of 2 when targeting Advanced SIMD.");
- CGM.getDiags().Report(SLoc, DiagID);
- return;
- }
- // 3. Section 3.4.1. SVE fixed lengh must obey the architectural
- // limits.
- if (ISA == 's' && UserVLEN != 0) {
- if ((UserVLEN * WDS > 2048) || (UserVLEN * WDS % 128 != 0)) {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Warning, "The clause simdlen must fit the %0-bit "
- "lanes in the architectural constraints "
- "for SVE (min is 128-bit, max is "
- "2048-bit, by steps of 128-bit)");
- CGM.getDiags().Report(SLoc, DiagID) << WDS;
- return;
- }
- }
- // Sort out parameter sequence.
- const std::string ParSeq = mangleVectorParameters(ParamAttrs);
- StringRef Prefix = "_ZGV";
- // Generate simdlen from user input (if any).
- if (UserVLEN) {
- if (ISA == 's') {
- // SVE generates only a masked function.
- addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- } else {
- assert(ISA == 'n' && "Expected ISA either 's' or 'n'.");
- // Advanced SIMD generates one or two functions, depending on
- // the `[not]inbranch` clause.
- switch (State) {
- case OMPDeclareSimdDeclAttr::BS_Undefined:
- addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- break;
- case OMPDeclareSimdDeclAttr::BS_Notinbranch:
- addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- break;
- case OMPDeclareSimdDeclAttr::BS_Inbranch:
- addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- break;
- }
- }
- } else {
- // If no user simdlen is provided, follow the AAVFABI rules for
- // generating the vector length.
- if (ISA == 's') {
- // SVE, section 3.4.1, item 1.
- addAArch64VectorName("x", "M", Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- } else {
- assert(ISA == 'n' && "Expected ISA either 's' or 'n'.");
- // Advanced SIMD, Section 3.3.1 of the AAVFABI, generates one or
- // two vector names depending on the use of the clause
- // `[not]inbranch`.
- switch (State) {
- case OMPDeclareSimdDeclAttr::BS_Undefined:
- addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- break;
- case OMPDeclareSimdDeclAttr::BS_Notinbranch:
- addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- break;
- case OMPDeclareSimdDeclAttr::BS_Inbranch:
- addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName,
- OutputBecomesInput, Fn);
- break;
- }
- }
- }
- }
- void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
- llvm::Function *Fn) {
- ASTContext &C = CGM.getContext();
- FD = FD->getMostRecentDecl();
- while (FD) {
- // Map params to their positions in function decl.
- llvm::DenseMap<const Decl *, unsigned> ParamPositions;
- if (isa<CXXMethodDecl>(FD))
- ParamPositions.try_emplace(FD, 0);
- unsigned ParamPos = ParamPositions.size();
- for (const ParmVarDecl *P : FD->parameters()) {
- ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
- ++ParamPos;
- }
- for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
- llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
- // Mark uniform parameters.
- for (const Expr *E : Attr->uniforms()) {
- E = E->IgnoreParenImpCasts();
- unsigned Pos;
- if (isa<CXXThisExpr>(E)) {
- Pos = ParamPositions[FD];
- } else {
- const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
- ->getCanonicalDecl();
- auto It = ParamPositions.find(PVD);
- assert(It != ParamPositions.end() && "Function parameter not found");
- Pos = It->second;
- }
- ParamAttrs[Pos].Kind = Uniform;
- }
- // Get alignment info.
- auto *NI = Attr->alignments_begin();
- for (const Expr *E : Attr->aligneds()) {
- E = E->IgnoreParenImpCasts();
- unsigned Pos;
- QualType ParmTy;
- if (isa<CXXThisExpr>(E)) {
- Pos = ParamPositions[FD];
- ParmTy = E->getType();
- } else {
- const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
- ->getCanonicalDecl();
- auto It = ParamPositions.find(PVD);
- assert(It != ParamPositions.end() && "Function parameter not found");
- Pos = It->second;
- ParmTy = PVD->getType();
- }
- ParamAttrs[Pos].Alignment =
- (*NI)
- ? (*NI)->EvaluateKnownConstInt(C)
- : llvm::APSInt::getUnsigned(
- C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy))
- .getQuantity());
- ++NI;
- }
- // Mark linear parameters.
- auto *SI = Attr->steps_begin();
- auto *MI = Attr->modifiers_begin();
- for (const Expr *E : Attr->linears()) {
- E = E->IgnoreParenImpCasts();
- unsigned Pos;
- bool IsReferenceType = false;
- // Rescaling factor needed to compute the linear parameter
- // value in the mangled name.
- unsigned PtrRescalingFactor = 1;
- if (isa<CXXThisExpr>(E)) {
- Pos = ParamPositions[FD];
- auto *P = cast<PointerType>(E->getType());
- PtrRescalingFactor = CGM.getContext()
- .getTypeSizeInChars(P->getPointeeType())
- .getQuantity();
- } else {
- const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
- ->getCanonicalDecl();
- auto It = ParamPositions.find(PVD);
- assert(It != ParamPositions.end() && "Function parameter not found");
- Pos = It->second;
- if (auto *P = dyn_cast<PointerType>(PVD->getType()))
- PtrRescalingFactor = CGM.getContext()
- .getTypeSizeInChars(P->getPointeeType())
- .getQuantity();
- else if (PVD->getType()->isReferenceType()) {
- IsReferenceType = true;
- PtrRescalingFactor =
- CGM.getContext()
- .getTypeSizeInChars(PVD->getType().getNonReferenceType())
- .getQuantity();
- }
- }
- ParamAttrTy &ParamAttr = ParamAttrs[Pos];
- if (*MI == OMPC_LINEAR_ref)
- ParamAttr.Kind = LinearRef;
- else if (*MI == OMPC_LINEAR_uval)
- ParamAttr.Kind = LinearUVal;
- else if (IsReferenceType)
- ParamAttr.Kind = LinearVal;
- else
- ParamAttr.Kind = Linear;
- // Assuming a stride of 1, for `linear` without modifiers.
- ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(1);
- if (*SI) {
- Expr::EvalResult Result;
- if (!(*SI)->EvaluateAsInt(Result, C, Expr::SE_AllowSideEffects)) {
- if (const auto *DRE =
- cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
- if (const auto *StridePVD =
- dyn_cast<ParmVarDecl>(DRE->getDecl())) {
- ParamAttr.HasVarStride = true;
- auto It = ParamPositions.find(StridePVD->getCanonicalDecl());
- assert(It != ParamPositions.end() &&
- "Function parameter not found");
- ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(It->second);
- }
- }
- } else {
- ParamAttr.StrideOrArg = Result.Val.getInt();
- }
- }
- // If we are using a linear clause on a pointer, we need to
- // rescale the value of linear_step with the byte size of the
- // pointee type.
- if (!ParamAttr.HasVarStride &&
- (ParamAttr.Kind == Linear || ParamAttr.Kind == LinearRef))
- ParamAttr.StrideOrArg = ParamAttr.StrideOrArg * PtrRescalingFactor;
- ++SI;
- ++MI;
- }
- llvm::APSInt VLENVal;
- SourceLocation ExprLoc;
- const Expr *VLENExpr = Attr->getSimdlen();
- if (VLENExpr) {
- VLENVal = VLENExpr->EvaluateKnownConstInt(C);
- ExprLoc = VLENExpr->getExprLoc();
- }
- OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
- if (CGM.getTriple().isX86()) {
- emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
- } else if (CGM.getTriple().getArch() == llvm::Triple::aarch64) {
- unsigned VLEN = VLENVal.getExtValue();
- StringRef MangledName = Fn->getName();
- if (CGM.getTarget().hasFeature("sve"))
- emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
- MangledName, 's', 128, Fn, ExprLoc);
- else if (CGM.getTarget().hasFeature("neon"))
- emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
- MangledName, 'n', 128, Fn, ExprLoc);
- }
- }
- FD = FD->getPreviousDecl();
- }
- }
- namespace {
- /// Cleanup action for doacross support.
- class DoacrossCleanupTy final : public EHScopeStack::Cleanup {
- public:
- static const int DoacrossFinArgs = 2;
- private:
- llvm::FunctionCallee RTLFn;
- llvm::Value *Args[DoacrossFinArgs];
- public:
- DoacrossCleanupTy(llvm::FunctionCallee RTLFn,
- ArrayRef<llvm::Value *> CallArgs)
- : RTLFn(RTLFn) {
- assert(CallArgs.size() == DoacrossFinArgs);
- std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
- }
- void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
- if (!CGF.HaveInsertPoint())
- return;
- CGF.EmitRuntimeCall(RTLFn, Args);
- }
- };
- } // namespace
- void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
- const OMPLoopDirective &D,
- ArrayRef<Expr *> NumIterations) {
- if (!CGF.HaveInsertPoint())
- return;
- ASTContext &C = CGM.getContext();
- QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
- RecordDecl *RD;
- if (KmpDimTy.isNull()) {
- // Build struct kmp_dim { // loop bounds info casted to kmp_int64
- // kmp_int64 lo; // lower
- // kmp_int64 up; // upper
- // kmp_int64 st; // stride
- // };
- RD = C.buildImplicitRecord("kmp_dim");
- RD->startDefinition();
- addFieldToRecordDecl(C, RD, Int64Ty);
- addFieldToRecordDecl(C, RD, Int64Ty);
- addFieldToRecordDecl(C, RD, Int64Ty);
- RD->completeDefinition();
- KmpDimTy = C.getRecordType(RD);
- } else {
- RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
- }
- llvm::APInt Size(/*numBits=*/32, NumIterations.size());
- QualType ArrayTy =
- C.getConstantArrayType(KmpDimTy, Size, nullptr, ArrayType::Normal, 0);
- Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
- CGF.EmitNullInitialization(DimsAddr, ArrayTy);
- enum { LowerFD = 0, UpperFD, StrideFD };
- // Fill dims with data.
- for (unsigned I = 0, E = NumIterations.size(); I < E; ++I) {
- LValue DimsLVal = CGF.MakeAddrLValue(
- CGF.Builder.CreateConstArrayGEP(DimsAddr, I), KmpDimTy);
- // dims.upper = num_iterations;
- LValue UpperLVal = CGF.EmitLValueForField(
- DimsLVal, *std::next(RD->field_begin(), UpperFD));
- llvm::Value *NumIterVal = CGF.EmitScalarConversion(
- CGF.EmitScalarExpr(NumIterations[I]), NumIterations[I]->getType(),
- Int64Ty, NumIterations[I]->getExprLoc());
- CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
- // dims.stride = 1;
- LValue StrideLVal = CGF.EmitLValueForField(
- DimsLVal, *std::next(RD->field_begin(), StrideFD));
- CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
- StrideLVal);
- }
- // Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
- // kmp_int32 num_dims, struct kmp_dim * dims);
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, D.getBeginLoc()),
- getThreadID(CGF, D.getBeginLoc()),
- llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).getPointer(),
- CGM.VoidPtrTy)};
- llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_doacross_init);
- CGF.EmitRuntimeCall(RTLFn, Args);
- llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
- emitUpdateLocation(CGF, D.getEndLoc()), getThreadID(CGF, D.getEndLoc())};
- llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_doacross_fini);
- CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
- llvm::ArrayRef(FiniArgs));
- }
- void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
- const OMPDependClause *C) {
- QualType Int64Ty =
- CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
- llvm::APInt Size(/*numBits=*/32, C->getNumLoops());
- QualType ArrayTy = CGM.getContext().getConstantArrayType(
- Int64Ty, Size, nullptr, ArrayType::Normal, 0);
- Address CntAddr = CGF.CreateMemTemp(ArrayTy, ".cnt.addr");
- for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I) {
- const Expr *CounterVal = C->getLoopData(I);
- assert(CounterVal);
- llvm::Value *CntVal = CGF.EmitScalarConversion(
- CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
- CounterVal->getExprLoc());
- CGF.EmitStoreOfScalar(CntVal, CGF.Builder.CreateConstArrayGEP(CntAddr, I),
- /*Volatile=*/false, Int64Ty);
- }
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, C->getBeginLoc()),
- getThreadID(CGF, C->getBeginLoc()),
- CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
- llvm::FunctionCallee RTLFn;
- if (C->getDependencyKind() == OMPC_DEPEND_source) {
- RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
- OMPRTL___kmpc_doacross_post);
- } else {
- assert(C->getDependencyKind() == OMPC_DEPEND_sink);
- RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
- OMPRTL___kmpc_doacross_wait);
- }
- CGF.EmitRuntimeCall(RTLFn, Args);
- }
- void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::FunctionCallee Callee,
- ArrayRef<llvm::Value *> Args) const {
- assert(Loc.isValid() && "Outlined function call location must be valid.");
- auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
- if (auto *Fn = dyn_cast<llvm::Function>(Callee.getCallee())) {
- if (Fn->doesNotThrow()) {
- CGF.EmitNounwindRuntimeCall(Fn, Args);
- return;
- }
- }
- CGF.EmitRuntimeCall(Callee, Args);
- }
- void CGOpenMPRuntime::emitOutlinedFunctionCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
- ArrayRef<llvm::Value *> Args) const {
- emitCall(CGF, Loc, OutlinedFn, Args);
- }
- void CGOpenMPRuntime::emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) {
- if (const auto *FD = dyn_cast<FunctionDecl>(D))
- if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD))
- HasEmittedDeclareTargetRegion = true;
- }
- Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
- const VarDecl *NativeParam,
- const VarDecl *TargetParam) const {
- return CGF.GetAddrOfLocalVar(NativeParam);
- }
- /// Return allocator value from expression, or return a null allocator (default
- /// when no allocator specified).
- static llvm::Value *getAllocatorVal(CodeGenFunction &CGF,
- const Expr *Allocator) {
- llvm::Value *AllocVal;
- if (Allocator) {
- AllocVal = CGF.EmitScalarExpr(Allocator);
- // According to the standard, the original allocator type is a enum
- // (integer). Convert to pointer type, if required.
- AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(),
- CGF.getContext().VoidPtrTy,
- Allocator->getExprLoc());
- } else {
- // If no allocator specified, it defaults to the null allocator.
- AllocVal = llvm::Constant::getNullValue(
- CGF.CGM.getTypes().ConvertType(CGF.getContext().VoidPtrTy));
- }
- return AllocVal;
- }
- /// Return the alignment from an allocate directive if present.
- static llvm::Value *getAlignmentValue(CodeGenModule &CGM, const VarDecl *VD) {
- std::optional<CharUnits> AllocateAlignment = CGM.getOMPAllocateAlignment(VD);
- if (!AllocateAlignment)
- return nullptr;
- return llvm::ConstantInt::get(CGM.SizeTy, AllocateAlignment->getQuantity());
- }
- Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
- const VarDecl *VD) {
- if (!VD)
- return Address::invalid();
- Address UntiedAddr = Address::invalid();
- Address UntiedRealAddr = Address::invalid();
- auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn);
- if (It != FunctionToUntiedTaskStackMap.end()) {
- const UntiedLocalVarsAddressesMap &UntiedData =
- UntiedLocalVarsStack[It->second];
- auto I = UntiedData.find(VD);
- if (I != UntiedData.end()) {
- UntiedAddr = I->second.first;
- UntiedRealAddr = I->second.second;
- }
- }
- const VarDecl *CVD = VD->getCanonicalDecl();
- if (CVD->hasAttr<OMPAllocateDeclAttr>()) {
- // Use the default allocation.
- if (!isAllocatableDecl(VD))
- return UntiedAddr;
- llvm::Value *Size;
- CharUnits Align = CGM.getContext().getDeclAlign(CVD);
- if (CVD->getType()->isVariablyModifiedType()) {
- Size = CGF.getTypeSize(CVD->getType());
- // Align the size: ((size + align - 1) / align) * align
- Size = CGF.Builder.CreateNUWAdd(
- Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
- Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
- Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
- } else {
- CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
- Size = CGM.getSize(Sz.alignTo(Align));
- }
- llvm::Value *ThreadID = getThreadID(CGF, CVD->getBeginLoc());
- const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
- const Expr *Allocator = AA->getAllocator();
- llvm::Value *AllocVal = getAllocatorVal(CGF, Allocator);
- llvm::Value *Alignment = getAlignmentValue(CGM, CVD);
- SmallVector<llvm::Value *, 4> Args;
- Args.push_back(ThreadID);
- if (Alignment)
- Args.push_back(Alignment);
- Args.push_back(Size);
- Args.push_back(AllocVal);
- llvm::omp::RuntimeFunction FnID =
- Alignment ? OMPRTL___kmpc_aligned_alloc : OMPRTL___kmpc_alloc;
- llvm::Value *Addr = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), FnID), Args,
- getName({CVD->getName(), ".void.addr"}));
- llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_free);
- QualType Ty = CGM.getContext().getPointerType(CVD->getType());
- Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Addr, CGF.ConvertTypeForMem(Ty), getName({CVD->getName(), ".addr"}));
- if (UntiedAddr.isValid())
- CGF.EmitStoreOfScalar(Addr, UntiedAddr, /*Volatile=*/false, Ty);
- // Cleanup action for allocate support.
- class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
- llvm::FunctionCallee RTLFn;
- SourceLocation::UIntTy LocEncoding;
- Address Addr;
- const Expr *AllocExpr;
- public:
- OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn,
- SourceLocation::UIntTy LocEncoding, Address Addr,
- const Expr *AllocExpr)
- : RTLFn(RTLFn), LocEncoding(LocEncoding), Addr(Addr),
- AllocExpr(AllocExpr) {}
- void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
- if (!CGF.HaveInsertPoint())
- return;
- llvm::Value *Args[3];
- Args[0] = CGF.CGM.getOpenMPRuntime().getThreadID(
- CGF, SourceLocation::getFromRawEncoding(LocEncoding));
- Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Addr.getPointer(), CGF.VoidPtrTy);
- llvm::Value *AllocVal = getAllocatorVal(CGF, AllocExpr);
- Args[2] = AllocVal;
- CGF.EmitRuntimeCall(RTLFn, Args);
- }
- };
- Address VDAddr =
- UntiedRealAddr.isValid()
- ? UntiedRealAddr
- : Address(Addr, CGF.ConvertTypeForMem(CVD->getType()), Align);
- CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(
- NormalAndEHCleanup, FiniRTLFn, CVD->getLocation().getRawEncoding(),
- VDAddr, Allocator);
- if (UntiedRealAddr.isValid())
- if (auto *Region =
- dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
- Region->emitUntiedSwitch(CGF);
- return VDAddr;
- }
- return UntiedAddr;
- }
- bool CGOpenMPRuntime::isLocalVarInUntiedTask(CodeGenFunction &CGF,
- const VarDecl *VD) const {
- auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn);
- if (It == FunctionToUntiedTaskStackMap.end())
- return false;
- return UntiedLocalVarsStack[It->second].count(VD) > 0;
- }
- CGOpenMPRuntime::NontemporalDeclsRAII::NontemporalDeclsRAII(
- CodeGenModule &CGM, const OMPLoopDirective &S)
- : CGM(CGM), NeedToPush(S.hasClausesOfKind<OMPNontemporalClause>()) {
- assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
- if (!NeedToPush)
- return;
- NontemporalDeclsSet &DS =
- CGM.getOpenMPRuntime().NontemporalDeclsStack.emplace_back();
- for (const auto *C : S.getClausesOfKind<OMPNontemporalClause>()) {
- for (const Stmt *Ref : C->private_refs()) {
- const auto *SimpleRefExpr = cast<Expr>(Ref)->IgnoreParenImpCasts();
- const ValueDecl *VD;
- if (const auto *DRE = dyn_cast<DeclRefExpr>(SimpleRefExpr)) {
- VD = DRE->getDecl();
- } else {
- const auto *ME = cast<MemberExpr>(SimpleRefExpr);
- assert((ME->isImplicitCXXThis() ||
- isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts())) &&
- "Expected member of current class.");
- VD = ME->getMemberDecl();
- }
- DS.insert(VD);
- }
- }
- }
- CGOpenMPRuntime::NontemporalDeclsRAII::~NontemporalDeclsRAII() {
- if (!NeedToPush)
- return;
- CGM.getOpenMPRuntime().NontemporalDeclsStack.pop_back();
- }
- CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::UntiedTaskLocalDeclsRAII(
- CodeGenFunction &CGF,
- const llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
- std::pair<Address, Address>> &LocalVars)
- : CGM(CGF.CGM), NeedToPush(!LocalVars.empty()) {
- if (!NeedToPush)
- return;
- CGM.getOpenMPRuntime().FunctionToUntiedTaskStackMap.try_emplace(
- CGF.CurFn, CGM.getOpenMPRuntime().UntiedLocalVarsStack.size());
- CGM.getOpenMPRuntime().UntiedLocalVarsStack.push_back(LocalVars);
- }
- CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::~UntiedTaskLocalDeclsRAII() {
- if (!NeedToPush)
- return;
- CGM.getOpenMPRuntime().UntiedLocalVarsStack.pop_back();
- }
- bool CGOpenMPRuntime::isNontemporalDecl(const ValueDecl *VD) const {
- assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
- return llvm::any_of(
- CGM.getOpenMPRuntime().NontemporalDeclsStack,
- [VD](const NontemporalDeclsSet &Set) { return Set.contains(VD); });
- }
- void CGOpenMPRuntime::LastprivateConditionalRAII::tryToDisableInnerAnalysis(
- const OMPExecutableDirective &S,
- llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled)
- const {
- llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToCheckForLPCs;
- // Vars in target/task regions must be excluded completely.
- if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()) ||
- isOpenMPTaskingDirective(S.getDirectiveKind())) {
- SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
- getOpenMPCaptureRegions(CaptureRegions, S.getDirectiveKind());
- const CapturedStmt *CS = S.getCapturedStmt(CaptureRegions.front());
- for (const CapturedStmt::Capture &Cap : CS->captures()) {
- if (Cap.capturesVariable() || Cap.capturesVariableByCopy())
- NeedToCheckForLPCs.insert(Cap.getCapturedVar());
- }
- }
- // Exclude vars in private clauses.
- for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
- for (const Expr *Ref : C->varlists()) {
- if (!Ref->getType()->isScalarType())
- continue;
- const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
- if (!DRE)
- continue;
- NeedToCheckForLPCs.insert(DRE->getDecl());
- }
- }
- for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
- for (const Expr *Ref : C->varlists()) {
- if (!Ref->getType()->isScalarType())
- continue;
- const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
- if (!DRE)
- continue;
- NeedToCheckForLPCs.insert(DRE->getDecl());
- }
- }
- for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
- for (const Expr *Ref : C->varlists()) {
- if (!Ref->getType()->isScalarType())
- continue;
- const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
- if (!DRE)
- continue;
- NeedToCheckForLPCs.insert(DRE->getDecl());
- }
- }
- for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
- for (const Expr *Ref : C->varlists()) {
- if (!Ref->getType()->isScalarType())
- continue;
- const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
- if (!DRE)
- continue;
- NeedToCheckForLPCs.insert(DRE->getDecl());
- }
- }
- for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
- for (const Expr *Ref : C->varlists()) {
- if (!Ref->getType()->isScalarType())
- continue;
- const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
- if (!DRE)
- continue;
- NeedToCheckForLPCs.insert(DRE->getDecl());
- }
- }
- for (const Decl *VD : NeedToCheckForLPCs) {
- for (const LastprivateConditionalData &Data :
- llvm::reverse(CGM.getOpenMPRuntime().LastprivateConditionalStack)) {
- if (Data.DeclToUniqueName.count(VD) > 0) {
- if (!Data.Disabled)
- NeedToAddForLPCsAsDisabled.insert(VD);
- break;
- }
- }
- }
- }
- CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
- CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal)
- : CGM(CGF.CGM),
- Action((CGM.getLangOpts().OpenMP >= 50 &&
- llvm::any_of(S.getClausesOfKind<OMPLastprivateClause>(),
- [](const OMPLastprivateClause *C) {
- return C->getKind() ==
- OMPC_LASTPRIVATE_conditional;
- }))
- ? ActionToDo::PushAsLastprivateConditional
- : ActionToDo::DoNotPush) {
- assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
- if (CGM.getLangOpts().OpenMP < 50 || Action == ActionToDo::DoNotPush)
- return;
- assert(Action == ActionToDo::PushAsLastprivateConditional &&
- "Expected a push action.");
- LastprivateConditionalData &Data =
- CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
- for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
- if (C->getKind() != OMPC_LASTPRIVATE_conditional)
- continue;
- for (const Expr *Ref : C->varlists()) {
- Data.DeclToUniqueName.insert(std::make_pair(
- cast<DeclRefExpr>(Ref->IgnoreParenImpCasts())->getDecl(),
- SmallString<16>(generateUniqueName(CGM, "pl_cond", Ref))));
- }
- }
- Data.IVLVal = IVLVal;
- Data.Fn = CGF.CurFn;
- }
- CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
- CodeGenFunction &CGF, const OMPExecutableDirective &S)
- : CGM(CGF.CGM), Action(ActionToDo::DoNotPush) {
- assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
- if (CGM.getLangOpts().OpenMP < 50)
- return;
- llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToAddForLPCsAsDisabled;
- tryToDisableInnerAnalysis(S, NeedToAddForLPCsAsDisabled);
- if (!NeedToAddForLPCsAsDisabled.empty()) {
- Action = ActionToDo::DisableLastprivateConditional;
- LastprivateConditionalData &Data =
- CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
- for (const Decl *VD : NeedToAddForLPCsAsDisabled)
- Data.DeclToUniqueName.insert(std::make_pair(VD, SmallString<16>()));
- Data.Fn = CGF.CurFn;
- Data.Disabled = true;
- }
- }
- CGOpenMPRuntime::LastprivateConditionalRAII
- CGOpenMPRuntime::LastprivateConditionalRAII::disable(
- CodeGenFunction &CGF, const OMPExecutableDirective &S) {
- return LastprivateConditionalRAII(CGF, S);
- }
- CGOpenMPRuntime::LastprivateConditionalRAII::~LastprivateConditionalRAII() {
- if (CGM.getLangOpts().OpenMP < 50)
- return;
- if (Action == ActionToDo::DisableLastprivateConditional) {
- assert(CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&
- "Expected list of disabled private vars.");
- CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
- }
- if (Action == ActionToDo::PushAsLastprivateConditional) {
- assert(
- !CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&
- "Expected list of lastprivate conditional vars.");
- CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
- }
- }
- Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
- const VarDecl *VD) {
- ASTContext &C = CGM.getContext();
- auto I = LastprivateConditionalToTypes.find(CGF.CurFn);
- if (I == LastprivateConditionalToTypes.end())
- I = LastprivateConditionalToTypes.try_emplace(CGF.CurFn).first;
- QualType NewType;
- const FieldDecl *VDField;
- const FieldDecl *FiredField;
- LValue BaseLVal;
- auto VI = I->getSecond().find(VD);
- if (VI == I->getSecond().end()) {
- RecordDecl *RD = C.buildImplicitRecord("lasprivate.conditional");
- RD->startDefinition();
- VDField = addFieldToRecordDecl(C, RD, VD->getType().getNonReferenceType());
- FiredField = addFieldToRecordDecl(C, RD, C.CharTy);
- RD->completeDefinition();
- NewType = C.getRecordType(RD);
- Address Addr = CGF.CreateMemTemp(NewType, C.getDeclAlign(VD), VD->getName());
- BaseLVal = CGF.MakeAddrLValue(Addr, NewType, AlignmentSource::Decl);
- I->getSecond().try_emplace(VD, NewType, VDField, FiredField, BaseLVal);
- } else {
- NewType = std::get<0>(VI->getSecond());
- VDField = std::get<1>(VI->getSecond());
- FiredField = std::get<2>(VI->getSecond());
- BaseLVal = std::get<3>(VI->getSecond());
- }
- LValue FiredLVal =
- CGF.EmitLValueForField(BaseLVal, FiredField);
- CGF.EmitStoreOfScalar(
- llvm::ConstantInt::getNullValue(CGF.ConvertTypeForMem(C.CharTy)),
- FiredLVal);
- return CGF.EmitLValueForField(BaseLVal, VDField).getAddress(CGF);
- }
- namespace {
- /// Checks if the lastprivate conditional variable is referenced in LHS.
- class LastprivateConditionalRefChecker final
- : public ConstStmtVisitor<LastprivateConditionalRefChecker, bool> {
- ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM;
- const Expr *FoundE = nullptr;
- const Decl *FoundD = nullptr;
- StringRef UniqueDeclName;
- LValue IVLVal;
- llvm::Function *FoundFn = nullptr;
- SourceLocation Loc;
- public:
- bool VisitDeclRefExpr(const DeclRefExpr *E) {
- for (const CGOpenMPRuntime::LastprivateConditionalData &D :
- llvm::reverse(LPM)) {
- auto It = D.DeclToUniqueName.find(E->getDecl());
- if (It == D.DeclToUniqueName.end())
- continue;
- if (D.Disabled)
- return false;
- FoundE = E;
- FoundD = E->getDecl()->getCanonicalDecl();
- UniqueDeclName = It->second;
- IVLVal = D.IVLVal;
- FoundFn = D.Fn;
- break;
- }
- return FoundE == E;
- }
- bool VisitMemberExpr(const MemberExpr *E) {
- if (!CodeGenFunction::IsWrappedCXXThis(E->getBase()))
- return false;
- for (const CGOpenMPRuntime::LastprivateConditionalData &D :
- llvm::reverse(LPM)) {
- auto It = D.DeclToUniqueName.find(E->getMemberDecl());
- if (It == D.DeclToUniqueName.end())
- continue;
- if (D.Disabled)
- return false;
- FoundE = E;
- FoundD = E->getMemberDecl()->getCanonicalDecl();
- UniqueDeclName = It->second;
- IVLVal = D.IVLVal;
- FoundFn = D.Fn;
- break;
- }
- return FoundE == E;
- }
- bool VisitStmt(const Stmt *S) {
- for (const Stmt *Child : S->children()) {
- if (!Child)
- continue;
- if (const auto *E = dyn_cast<Expr>(Child))
- if (!E->isGLValue())
- continue;
- if (Visit(Child))
- return true;
- }
- return false;
- }
- explicit LastprivateConditionalRefChecker(
- ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM)
- : LPM(LPM) {}
- std::tuple<const Expr *, const Decl *, StringRef, LValue, llvm::Function *>
- getFoundData() const {
- return std::make_tuple(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn);
- }
- };
- } // namespace
- void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF,
- LValue IVLVal,
- StringRef UniqueDeclName,
- LValue LVal,
- SourceLocation Loc) {
- // Last updated loop counter for the lastprivate conditional var.
- // int<xx> last_iv = 0;
- llvm::Type *LLIVTy = CGF.ConvertTypeForMem(IVLVal.getType());
- llvm::Constant *LastIV = OMPBuilder.getOrCreateInternalVariable(
- LLIVTy, getName({UniqueDeclName, "iv"}));
- cast<llvm::GlobalVariable>(LastIV)->setAlignment(
- IVLVal.getAlignment().getAsAlign());
- LValue LastIVLVal = CGF.MakeNaturalAlignAddrLValue(LastIV, IVLVal.getType());
- // Last value of the lastprivate conditional.
- // decltype(priv_a) last_a;
- llvm::GlobalVariable *Last = OMPBuilder.getOrCreateInternalVariable(
- CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName);
- Last->setAlignment(LVal.getAlignment().getAsAlign());
- LValue LastLVal = CGF.MakeAddrLValue(
- Address(Last, Last->getValueType(), LVal.getAlignment()), LVal.getType());
- // Global loop counter. Required to handle inner parallel-for regions.
- // iv
- llvm::Value *IVVal = CGF.EmitLoadOfScalar(IVLVal, Loc);
- // #pragma omp critical(a)
- // if (last_iv <= iv) {
- // last_iv = iv;
- // last_a = priv_a;
- // }
- auto &&CodeGen = [&LastIVLVal, &IVLVal, IVVal, &LVal, &LastLVal,
- Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
- Action.Enter(CGF);
- llvm::Value *LastIVVal = CGF.EmitLoadOfScalar(LastIVLVal, Loc);
- // (last_iv <= iv) ? Check if the variable is updated and store new
- // value in global var.
- llvm::Value *CmpRes;
- if (IVLVal.getType()->isSignedIntegerType()) {
- CmpRes = CGF.Builder.CreateICmpSLE(LastIVVal, IVVal);
- } else {
- assert(IVLVal.getType()->isUnsignedIntegerType() &&
- "Loop iteration variable must be integer.");
- CmpRes = CGF.Builder.CreateICmpULE(LastIVVal, IVVal);
- }
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lp_cond_then");
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock("lp_cond_exit");
- CGF.Builder.CreateCondBr(CmpRes, ThenBB, ExitBB);
- // {
- CGF.EmitBlock(ThenBB);
- // last_iv = iv;
- CGF.EmitStoreOfScalar(IVVal, LastIVLVal);
- // last_a = priv_a;
- switch (CGF.getEvaluationKind(LVal.getType())) {
- case TEK_Scalar: {
- llvm::Value *PrivVal = CGF.EmitLoadOfScalar(LVal, Loc);
- CGF.EmitStoreOfScalar(PrivVal, LastLVal);
- break;
- }
- case TEK_Complex: {
- CodeGenFunction::ComplexPairTy PrivVal = CGF.EmitLoadOfComplex(LVal, Loc);
- CGF.EmitStoreOfComplex(PrivVal, LastLVal, /*isInit=*/false);
- break;
- }
- case TEK_Aggregate:
- llvm_unreachable(
- "Aggregates are not supported in lastprivate conditional.");
- }
- // }
- CGF.EmitBranch(ExitBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
- };
- if (CGM.getLangOpts().OpenMPSimd) {
- // Do not emit as a critical region as no parallel region could be emitted.
- RegionCodeGenTy ThenRCG(CodeGen);
- ThenRCG(CGF);
- } else {
- emitCriticalRegion(CGF, UniqueDeclName, CodeGen, Loc);
- }
- }
- void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
- const Expr *LHS) {
- if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
- return;
- LastprivateConditionalRefChecker Checker(LastprivateConditionalStack);
- if (!Checker.Visit(LHS))
- return;
- const Expr *FoundE;
- const Decl *FoundD;
- StringRef UniqueDeclName;
- LValue IVLVal;
- llvm::Function *FoundFn;
- std::tie(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn) =
- Checker.getFoundData();
- if (FoundFn != CGF.CurFn) {
- // Special codegen for inner parallel regions.
- // ((struct.lastprivate.conditional*)&priv_a)->Fired = 1;
- auto It = LastprivateConditionalToTypes[FoundFn].find(FoundD);
- assert(It != LastprivateConditionalToTypes[FoundFn].end() &&
- "Lastprivate conditional is not found in outer region.");
- QualType StructTy = std::get<0>(It->getSecond());
- const FieldDecl* FiredDecl = std::get<2>(It->getSecond());
- LValue PrivLVal = CGF.EmitLValue(FoundE);
- Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- PrivLVal.getAddress(CGF),
- CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)),
- CGF.ConvertTypeForMem(StructTy));
- LValue BaseLVal =
- CGF.MakeAddrLValue(StructAddr, StructTy, AlignmentSource::Decl);
- LValue FiredLVal = CGF.EmitLValueForField(BaseLVal, FiredDecl);
- CGF.EmitAtomicStore(RValue::get(llvm::ConstantInt::get(
- CGF.ConvertTypeForMem(FiredDecl->getType()), 1)),
- FiredLVal, llvm::AtomicOrdering::Unordered,
- /*IsVolatile=*/true, /*isInit=*/false);
- return;
- }
- // Private address of the lastprivate conditional in the current context.
- // priv_a
- LValue LVal = CGF.EmitLValue(FoundE);
- emitLastprivateConditionalUpdate(CGF, IVLVal, UniqueDeclName, LVal,
- FoundE->getExprLoc());
- }
- void CGOpenMPRuntime::checkAndEmitSharedLastprivateConditional(
- CodeGenFunction &CGF, const OMPExecutableDirective &D,
- const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls) {
- if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
- return;
- auto Range = llvm::reverse(LastprivateConditionalStack);
- auto It = llvm::find_if(
- Range, [](const LastprivateConditionalData &D) { return !D.Disabled; });
- if (It == Range.end() || It->Fn != CGF.CurFn)
- return;
- auto LPCI = LastprivateConditionalToTypes.find(It->Fn);
- assert(LPCI != LastprivateConditionalToTypes.end() &&
- "Lastprivates must be registered already.");
- SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
- getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
- const CapturedStmt *CS = D.getCapturedStmt(CaptureRegions.back());
- for (const auto &Pair : It->DeclToUniqueName) {
- const auto *VD = cast<VarDecl>(Pair.first->getCanonicalDecl());
- if (!CS->capturesVariable(VD) || IgnoredDecls.contains(VD))
- continue;
- auto I = LPCI->getSecond().find(Pair.first);
- assert(I != LPCI->getSecond().end() &&
- "Lastprivate must be rehistered already.");
- // bool Cmp = priv_a.Fired != 0;
- LValue BaseLVal = std::get<3>(I->getSecond());
- LValue FiredLVal =
- CGF.EmitLValueForField(BaseLVal, std::get<2>(I->getSecond()));
- llvm::Value *Res = CGF.EmitLoadOfScalar(FiredLVal, D.getBeginLoc());
- llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Res);
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lpc.then");
- llvm::BasicBlock *DoneBB = CGF.createBasicBlock("lpc.done");
- // if (Cmp) {
- CGF.Builder.CreateCondBr(Cmp, ThenBB, DoneBB);
- CGF.EmitBlock(ThenBB);
- Address Addr = CGF.GetAddrOfLocalVar(VD);
- LValue LVal;
- if (VD->getType()->isReferenceType())
- LVal = CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
- AlignmentSource::Decl);
- else
- LVal = CGF.MakeAddrLValue(Addr, VD->getType().getNonReferenceType(),
- AlignmentSource::Decl);
- emitLastprivateConditionalUpdate(CGF, It->IVLVal, Pair.second, LVal,
- D.getBeginLoc());
- auto AL = ApplyDebugLocation::CreateArtificial(CGF);
- CGF.EmitBlock(DoneBB, /*IsFinal=*/true);
- // }
- }
- }
- void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
- CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD,
- SourceLocation Loc) {
- if (CGF.getLangOpts().OpenMP < 50)
- return;
- auto It = LastprivateConditionalStack.back().DeclToUniqueName.find(VD);
- assert(It != LastprivateConditionalStack.back().DeclToUniqueName.end() &&
- "Unknown lastprivate conditional variable.");
- StringRef UniqueName = It->second;
- llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(UniqueName);
- // The variable was not updated in the region - exit.
- if (!GV)
- return;
- LValue LPLVal = CGF.MakeAddrLValue(
- Address(GV, GV->getValueType(), PrivLVal.getAlignment()),
- PrivLVal.getType().getNonReferenceType());
- llvm::Value *Res = CGF.EmitLoadOfScalar(LPLVal, Loc);
- CGF.EmitStoreOfScalar(Res, PrivLVal);
- }
- llvm::Function *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- llvm::Function *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- llvm::Function *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- const VarDecl *PartIDVar, const VarDecl *TaskTVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
- bool Tied, unsigned &NumberOfParts) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
- SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond,
- llvm::Value *NumThreads) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitCriticalRegion(
- CodeGenFunction &CGF, StringRef CriticalName,
- const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
- const Expr *Hint) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF,
- const RegionCodeGenTy &MasterOpGen,
- SourceLocation Loc) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitMaskedRegion(CodeGenFunction &CGF,
- const RegionCodeGenTy &MasterOpGen,
- SourceLocation Loc,
- const Expr *Filter) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
- SourceLocation Loc) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitTaskgroupRegion(
- CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen,
- SourceLocation Loc) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitSingleRegion(
- CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen,
- SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars,
- ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs,
- ArrayRef<const Expr *> AssignmentOps) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF,
- const RegionCodeGenTy &OrderedOpGen,
- SourceLocation Loc,
- bool IsThreads) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF,
- SourceLocation Loc,
- OpenMPDirectiveKind Kind,
- bool EmitChecks,
- bool ForceSimpleCall) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitForDispatchInit(
- CodeGenFunction &CGF, SourceLocation Loc,
- const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
- bool Ordered, const DispatchRTInput &DispatchValues) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitForStaticInit(
- CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind,
- const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitDistributeStaticInit(
- CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
- SourceLocation Loc,
- unsigned IVSize,
- bool IVSigned) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF,
- SourceLocation Loc,
- OpenMPDirectiveKind DKind) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF,
- SourceLocation Loc,
- unsigned IVSize, bool IVSigned,
- Address IL, Address LB,
- Address UB, Address ST) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
- llvm::Value *NumThreads,
- SourceLocation Loc) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF,
- ProcBindKind ProcBind,
- SourceLocation Loc) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
- const VarDecl *VD,
- Address VDAddr,
- SourceLocation Loc) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition(
- const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit,
- CodeGenFunction *CGF) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
- CodeGenFunction &CGF, QualType VarType, StringRef Name) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
- ArrayRef<const Expr *> Vars,
- SourceLocation Loc,
- llvm::AtomicOrdering AO) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
- const OMPExecutableDirective &D,
- llvm::Function *TaskFunction,
- QualType SharedsTy, Address Shareds,
- const Expr *IfCond,
- const OMPTaskDataTy &Data) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitTaskLoopCall(
- CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
- llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds,
- const Expr *IfCond, const OMPTaskDataTy &Data) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitReduction(
- CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
- ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
- assert(Options.SimpleReduction && "Only simple reduction is expected.");
- CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
- ReductionOps, Options);
- }
- llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
- CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
- ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
- SourceLocation Loc,
- bool IsWorksharingReduction) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
- SourceLocation Loc,
- ReductionCodeGen &RCG,
- unsigned N) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF,
- SourceLocation Loc,
- llvm::Value *ReductionsPtr,
- LValue SharedLVal) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
- SourceLocation Loc,
- const OMPTaskDataTy &Data) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitCancellationPointCall(
- CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPDirectiveKind CancelRegion) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF,
- SourceLocation Loc, const Expr *IfCond,
- OpenMPDirectiveKind CancelRegion) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
- const OMPExecutableDirective &D, StringRef ParentName,
- llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
- bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitTargetCall(
- CodeGenFunction &CGF, const OMPExecutableDirective &D,
- llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
- llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
- llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
- const OMPLoopDirective &D)>
- SizeEmitter) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) {
- return false;
- }
- void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF,
- const OMPExecutableDirective &D,
- SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
- const Expr *NumTeams,
- const Expr *ThreadLimit,
- SourceLocation Loc) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitTargetDataCalls(
- CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
- const Expr *Device, const RegionCodeGenTy &CodeGen,
- CGOpenMPRuntime::TargetDataInfo &Info) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
- CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
- const Expr *Device) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
- const OMPLoopDirective &D,
- ArrayRef<Expr *> NumIterations) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
- const OMPDependClause *C) {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- const VarDecl *
- CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
- const VarDecl *NativeParam) const {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
- Address
- CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF,
- const VarDecl *NativeParam,
- const VarDecl *TargetParam) const {
- llvm_unreachable("Not supported in SIMD-only mode");
- }
|