CGOpenMPRuntime.cpp 551 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170
  1. //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This provides a class for OpenMP runtime code generation.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "CGOpenMPRuntime.h"
  13. #include "CGCXXABI.h"
  14. #include "CGCleanup.h"
  15. #include "CGRecordLayout.h"
  16. #include "CodeGenFunction.h"
  17. #include "TargetInfo.h"
  18. #include "clang/AST/APValue.h"
  19. #include "clang/AST/Attr.h"
  20. #include "clang/AST/Decl.h"
  21. #include "clang/AST/OpenMPClause.h"
  22. #include "clang/AST/StmtOpenMP.h"
  23. #include "clang/AST/StmtVisitor.h"
  24. #include "clang/Basic/BitmaskEnum.h"
  25. #include "clang/Basic/FileManager.h"
  26. #include "clang/Basic/OpenMPKinds.h"
  27. #include "clang/Basic/SourceManager.h"
  28. #include "clang/CodeGen/ConstantInitBuilder.h"
  29. #include "llvm/ADT/ArrayRef.h"
  30. #include "llvm/ADT/SetOperations.h"
  31. #include "llvm/ADT/StringExtras.h"
  32. #include "llvm/Bitcode/BitcodeReader.h"
  33. #include "llvm/IR/Constants.h"
  34. #include "llvm/IR/DerivedTypes.h"
  35. #include "llvm/IR/GlobalValue.h"
  36. #include "llvm/IR/Value.h"
  37. #include "llvm/Support/AtomicOrdering.h"
  38. #include "llvm/Support/Format.h"
  39. #include "llvm/Support/raw_ostream.h"
  40. #include <cassert>
  41. #include <numeric>
  42. using namespace clang;
  43. using namespace CodeGen;
  44. using namespace llvm::omp;
  45. namespace {
  46. /// Base class for handling code generation inside OpenMP regions.
  47. class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
  48. public:
  49. /// Kinds of OpenMP regions used in codegen.
  50. enum CGOpenMPRegionKind {
  51. /// Region with outlined function for standalone 'parallel'
  52. /// directive.
  53. ParallelOutlinedRegion,
  54. /// Region with outlined function for standalone 'task' directive.
  55. TaskOutlinedRegion,
  56. /// Region for constructs that do not require function outlining,
  57. /// like 'for', 'sections', 'atomic' etc. directives.
  58. InlinedRegion,
  59. /// Region with outlined function for standalone 'target' directive.
  60. TargetRegion,
  61. };
  62. CGOpenMPRegionInfo(const CapturedStmt &CS,
  63. const CGOpenMPRegionKind RegionKind,
  64. const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
  65. bool HasCancel)
  66. : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
  67. CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
  68. CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
  69. const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
  70. bool HasCancel)
  71. : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
  72. Kind(Kind), HasCancel(HasCancel) {}
  73. /// Get a variable or parameter for storing global thread id
  74. /// inside OpenMP construct.
  75. virtual const VarDecl *getThreadIDVariable() const = 0;
  76. /// Emit the captured statement body.
  77. void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
  78. /// Get an LValue for the current ThreadID variable.
  79. /// \return LValue for thread id variable. This LValue always has type int32*.
  80. virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
  81. virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
  82. CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
  83. OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
  84. bool hasCancel() const { return HasCancel; }
  85. static bool classof(const CGCapturedStmtInfo *Info) {
  86. return Info->getKind() == CR_OpenMP;
  87. }
  88. ~CGOpenMPRegionInfo() override = default;
  89. protected:
  90. CGOpenMPRegionKind RegionKind;
  91. RegionCodeGenTy CodeGen;
  92. OpenMPDirectiveKind Kind;
  93. bool HasCancel;
  94. };
  95. /// API for captured statement code generation in OpenMP constructs.
  96. class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
  97. public:
  98. CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
  99. const RegionCodeGenTy &CodeGen,
  100. OpenMPDirectiveKind Kind, bool HasCancel,
  101. StringRef HelperName)
  102. : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
  103. HasCancel),
  104. ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
  105. assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
  106. }
  107. /// Get a variable or parameter for storing global thread id
  108. /// inside OpenMP construct.
  109. const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
  110. /// Get the name of the capture helper.
  111. StringRef getHelperName() const override { return HelperName; }
  112. static bool classof(const CGCapturedStmtInfo *Info) {
  113. return CGOpenMPRegionInfo::classof(Info) &&
  114. cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
  115. ParallelOutlinedRegion;
  116. }
  117. private:
  118. /// A variable or parameter storing global thread id for OpenMP
  119. /// constructs.
  120. const VarDecl *ThreadIDVar;
  121. StringRef HelperName;
  122. };
  123. /// API for captured statement code generation in OpenMP constructs.
  124. class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
  125. public:
  126. class UntiedTaskActionTy final : public PrePostActionTy {
  127. bool Untied;
  128. const VarDecl *PartIDVar;
  129. const RegionCodeGenTy UntiedCodeGen;
  130. llvm::SwitchInst *UntiedSwitch = nullptr;
  131. public:
  132. UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
  133. const RegionCodeGenTy &UntiedCodeGen)
  134. : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
  135. void Enter(CodeGenFunction &CGF) override {
  136. if (Untied) {
  137. // Emit task switching point.
  138. LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
  139. CGF.GetAddrOfLocalVar(PartIDVar),
  140. PartIDVar->getType()->castAs<PointerType>());
  141. llvm::Value *Res =
  142. CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
  143. llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
  144. UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
  145. CGF.EmitBlock(DoneBB);
  146. CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
  147. CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
  148. UntiedSwitch->addCase(CGF.Builder.getInt32(0),
  149. CGF.Builder.GetInsertBlock());
  150. emitUntiedSwitch(CGF);
  151. }
  152. }
  153. void emitUntiedSwitch(CodeGenFunction &CGF) const {
  154. if (Untied) {
  155. LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
  156. CGF.GetAddrOfLocalVar(PartIDVar),
  157. PartIDVar->getType()->castAs<PointerType>());
  158. CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
  159. PartIdLVal);
  160. UntiedCodeGen(CGF);
  161. CodeGenFunction::JumpDest CurPoint =
  162. CGF.getJumpDestInCurrentScope(".untied.next.");
  163. CGF.EmitBranch(CGF.ReturnBlock.getBlock());
  164. CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
  165. UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
  166. CGF.Builder.GetInsertBlock());
  167. CGF.EmitBranchThroughCleanup(CurPoint);
  168. CGF.EmitBlock(CurPoint.getBlock());
  169. }
  170. }
  171. unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
  172. };
  173. CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
  174. const VarDecl *ThreadIDVar,
  175. const RegionCodeGenTy &CodeGen,
  176. OpenMPDirectiveKind Kind, bool HasCancel,
  177. const UntiedTaskActionTy &Action)
  178. : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
  179. ThreadIDVar(ThreadIDVar), Action(Action) {
  180. assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
  181. }
  182. /// Get a variable or parameter for storing global thread id
  183. /// inside OpenMP construct.
  184. const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
  185. /// Get an LValue for the current ThreadID variable.
  186. LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
  187. /// Get the name of the capture helper.
  188. StringRef getHelperName() const override { return ".omp_outlined."; }
  189. void emitUntiedSwitch(CodeGenFunction &CGF) override {
  190. Action.emitUntiedSwitch(CGF);
  191. }
  192. static bool classof(const CGCapturedStmtInfo *Info) {
  193. return CGOpenMPRegionInfo::classof(Info) &&
  194. cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
  195. TaskOutlinedRegion;
  196. }
  197. private:
  198. /// A variable or parameter storing global thread id for OpenMP
  199. /// constructs.
  200. const VarDecl *ThreadIDVar;
  201. /// Action for emitting code for untied tasks.
  202. const UntiedTaskActionTy &Action;
  203. };
  204. /// API for inlined captured statement code generation in OpenMP
  205. /// constructs.
  206. class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
  207. public:
  208. CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
  209. const RegionCodeGenTy &CodeGen,
  210. OpenMPDirectiveKind Kind, bool HasCancel)
  211. : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
  212. OldCSI(OldCSI),
  213. OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
  214. // Retrieve the value of the context parameter.
  215. llvm::Value *getContextValue() const override {
  216. if (OuterRegionInfo)
  217. return OuterRegionInfo->getContextValue();
  218. llvm_unreachable("No context value for inlined OpenMP region");
  219. }
  220. void setContextValue(llvm::Value *V) override {
  221. if (OuterRegionInfo) {
  222. OuterRegionInfo->setContextValue(V);
  223. return;
  224. }
  225. llvm_unreachable("No context value for inlined OpenMP region");
  226. }
  227. /// Lookup the captured field decl for a variable.
  228. const FieldDecl *lookup(const VarDecl *VD) const override {
  229. if (OuterRegionInfo)
  230. return OuterRegionInfo->lookup(VD);
  231. // If there is no outer outlined region,no need to lookup in a list of
  232. // captured variables, we can use the original one.
  233. return nullptr;
  234. }
  235. FieldDecl *getThisFieldDecl() const override {
  236. if (OuterRegionInfo)
  237. return OuterRegionInfo->getThisFieldDecl();
  238. return nullptr;
  239. }
  240. /// Get a variable or parameter for storing global thread id
  241. /// inside OpenMP construct.
  242. const VarDecl *getThreadIDVariable() const override {
  243. if (OuterRegionInfo)
  244. return OuterRegionInfo->getThreadIDVariable();
  245. return nullptr;
  246. }
  247. /// Get an LValue for the current ThreadID variable.
  248. LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
  249. if (OuterRegionInfo)
  250. return OuterRegionInfo->getThreadIDVariableLValue(CGF);
  251. llvm_unreachable("No LValue for inlined OpenMP construct");
  252. }
  253. /// Get the name of the capture helper.
  254. StringRef getHelperName() const override {
  255. if (auto *OuterRegionInfo = getOldCSI())
  256. return OuterRegionInfo->getHelperName();
  257. llvm_unreachable("No helper name for inlined OpenMP construct");
  258. }
  259. void emitUntiedSwitch(CodeGenFunction &CGF) override {
  260. if (OuterRegionInfo)
  261. OuterRegionInfo->emitUntiedSwitch(CGF);
  262. }
  263. CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
  264. static bool classof(const CGCapturedStmtInfo *Info) {
  265. return CGOpenMPRegionInfo::classof(Info) &&
  266. cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
  267. }
  268. ~CGOpenMPInlinedRegionInfo() override = default;
  269. private:
  270. /// CodeGen info about outer OpenMP region.
  271. CodeGenFunction::CGCapturedStmtInfo *OldCSI;
  272. CGOpenMPRegionInfo *OuterRegionInfo;
  273. };
  274. /// API for captured statement code generation in OpenMP target
  275. /// constructs. For this captures, implicit parameters are used instead of the
  276. /// captured fields. The name of the target region has to be unique in a given
  277. /// application so it is provided by the client, because only the client has
  278. /// the information to generate that.
  279. class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
  280. public:
  281. CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
  282. const RegionCodeGenTy &CodeGen, StringRef HelperName)
  283. : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
  284. /*HasCancel=*/false),
  285. HelperName(HelperName) {}
  286. /// This is unused for target regions because each starts executing
  287. /// with a single thread.
  288. const VarDecl *getThreadIDVariable() const override { return nullptr; }
  289. /// Get the name of the capture helper.
  290. StringRef getHelperName() const override { return HelperName; }
  291. static bool classof(const CGCapturedStmtInfo *Info) {
  292. return CGOpenMPRegionInfo::classof(Info) &&
  293. cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
  294. }
  295. private:
  296. StringRef HelperName;
  297. };
  298. static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
  299. llvm_unreachable("No codegen for expressions");
  300. }
  301. /// API for generation of expressions captured in a innermost OpenMP
  302. /// region.
  303. class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
  304. public:
  305. CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
  306. : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
  307. OMPD_unknown,
  308. /*HasCancel=*/false),
  309. PrivScope(CGF) {
  310. // Make sure the globals captured in the provided statement are local by
  311. // using the privatization logic. We assume the same variable is not
  312. // captured more than once.
  313. for (const auto &C : CS.captures()) {
  314. if (!C.capturesVariable() && !C.capturesVariableByCopy())
  315. continue;
  316. const VarDecl *VD = C.getCapturedVar();
  317. if (VD->isLocalVarDeclOrParm())
  318. continue;
  319. DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
  320. /*RefersToEnclosingVariableOrCapture=*/false,
  321. VD->getType().getNonReferenceType(), VK_LValue,
  322. C.getLocation());
  323. PrivScope.addPrivate(
  324. VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(CGF); });
  325. }
  326. (void)PrivScope.Privatize();
  327. }
  328. /// Lookup the captured field decl for a variable.
  329. const FieldDecl *lookup(const VarDecl *VD) const override {
  330. if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
  331. return FD;
  332. return nullptr;
  333. }
  334. /// Emit the captured statement body.
  335. void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
  336. llvm_unreachable("No body for expressions");
  337. }
  338. /// Get a variable or parameter for storing global thread id
  339. /// inside OpenMP construct.
  340. const VarDecl *getThreadIDVariable() const override {
  341. llvm_unreachable("No thread id for expressions");
  342. }
  343. /// Get the name of the capture helper.
  344. StringRef getHelperName() const override {
  345. llvm_unreachable("No helper name for expressions");
  346. }
  347. static bool classof(const CGCapturedStmtInfo *Info) { return false; }
  348. private:
  349. /// Private scope to capture global variables.
  350. CodeGenFunction::OMPPrivateScope PrivScope;
  351. };
  352. /// RAII for emitting code of OpenMP constructs.
  353. class InlinedOpenMPRegionRAII {
  354. CodeGenFunction &CGF;
  355. llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
  356. FieldDecl *LambdaThisCaptureField = nullptr;
  357. const CodeGen::CGBlockInfo *BlockInfo = nullptr;
  358. bool NoInheritance = false;
  359. public:
  360. /// Constructs region for combined constructs.
  361. /// \param CodeGen Code generation sequence for combined directives. Includes
  362. /// a list of functions used for code generation of implicitly inlined
  363. /// regions.
  364. InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
  365. OpenMPDirectiveKind Kind, bool HasCancel,
  366. bool NoInheritance = true)
  367. : CGF(CGF), NoInheritance(NoInheritance) {
  368. // Start emission for the construct.
  369. CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
  370. CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
  371. if (NoInheritance) {
  372. std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
  373. LambdaThisCaptureField = CGF.LambdaThisCaptureField;
  374. CGF.LambdaThisCaptureField = nullptr;
  375. BlockInfo = CGF.BlockInfo;
  376. CGF.BlockInfo = nullptr;
  377. }
  378. }
  379. ~InlinedOpenMPRegionRAII() {
  380. // Restore original CapturedStmtInfo only if we're done with code emission.
  381. auto *OldCSI =
  382. cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
  383. delete CGF.CapturedStmtInfo;
  384. CGF.CapturedStmtInfo = OldCSI;
  385. if (NoInheritance) {
  386. std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
  387. CGF.LambdaThisCaptureField = LambdaThisCaptureField;
  388. CGF.BlockInfo = BlockInfo;
  389. }
  390. }
  391. };
  392. /// Values for bit flags used in the ident_t to describe the fields.
  393. /// All enumeric elements are named and described in accordance with the code
  394. /// from https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
  395. enum OpenMPLocationFlags : unsigned {
  396. /// Use trampoline for internal microtask.
  397. OMP_IDENT_IMD = 0x01,
  398. /// Use c-style ident structure.
  399. OMP_IDENT_KMPC = 0x02,
  400. /// Atomic reduction option for kmpc_reduce.
  401. OMP_ATOMIC_REDUCE = 0x10,
  402. /// Explicit 'barrier' directive.
  403. OMP_IDENT_BARRIER_EXPL = 0x20,
  404. /// Implicit barrier in code.
  405. OMP_IDENT_BARRIER_IMPL = 0x40,
  406. /// Implicit barrier in 'for' directive.
  407. OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
  408. /// Implicit barrier in 'sections' directive.
  409. OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
  410. /// Implicit barrier in 'single' directive.
  411. OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
  412. /// Call of __kmp_for_static_init for static loop.
  413. OMP_IDENT_WORK_LOOP = 0x200,
  414. /// Call of __kmp_for_static_init for sections.
  415. OMP_IDENT_WORK_SECTIONS = 0x400,
  416. /// Call of __kmp_for_static_init for distribute.
  417. OMP_IDENT_WORK_DISTRIBUTE = 0x800,
  418. LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
  419. };
  420. namespace {
  421. LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
  422. /// Values for bit flags for marking which requires clauses have been used.
  423. enum OpenMPOffloadingRequiresDirFlags : int64_t {
  424. /// flag undefined.
  425. OMP_REQ_UNDEFINED = 0x000,
  426. /// no requires clause present.
  427. OMP_REQ_NONE = 0x001,
  428. /// reverse_offload clause.
  429. OMP_REQ_REVERSE_OFFLOAD = 0x002,
  430. /// unified_address clause.
  431. OMP_REQ_UNIFIED_ADDRESS = 0x004,
  432. /// unified_shared_memory clause.
  433. OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
  434. /// dynamic_allocators clause.
  435. OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
  436. LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)
  437. };
  438. enum OpenMPOffloadingReservedDeviceIDs {
  439. /// Device ID if the device was not defined, runtime should get it
  440. /// from environment variables in the spec.
  441. OMP_DEVICEID_UNDEF = -1,
  442. };
  443. } // anonymous namespace
  444. /// Describes ident structure that describes a source location.
  445. /// All descriptions are taken from
  446. /// https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
  447. /// Original structure:
  448. /// typedef struct ident {
  449. /// kmp_int32 reserved_1; /**< might be used in Fortran;
  450. /// see above */
  451. /// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
  452. /// KMP_IDENT_KMPC identifies this union
  453. /// member */
  454. /// kmp_int32 reserved_2; /**< not really used in Fortran any more;
  455. /// see above */
  456. ///#if USE_ITT_BUILD
  457. /// /* but currently used for storing
  458. /// region-specific ITT */
  459. /// /* contextual information. */
  460. ///#endif /* USE_ITT_BUILD */
  461. /// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
  462. /// C++ */
  463. /// char const *psource; /**< String describing the source location.
  464. /// The string is composed of semi-colon separated
  465. // fields which describe the source file,
  466. /// the function and a pair of line numbers that
  467. /// delimit the construct.
  468. /// */
  469. /// } ident_t;
  470. enum IdentFieldIndex {
  471. /// might be used in Fortran
  472. IdentField_Reserved_1,
  473. /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
  474. IdentField_Flags,
  475. /// Not really used in Fortran any more
  476. IdentField_Reserved_2,
  477. /// Source[4] in Fortran, do not use for C++
  478. IdentField_Reserved_3,
  479. /// String describing the source location. The string is composed of
  480. /// semi-colon separated fields which describe the source file, the function
  481. /// and a pair of line numbers that delimit the construct.
  482. IdentField_PSource
  483. };
  484. /// Schedule types for 'omp for' loops (these enumerators are taken from
  485. /// the enum sched_type in kmp.h).
  486. enum OpenMPSchedType {
  487. /// Lower bound for default (unordered) versions.
  488. OMP_sch_lower = 32,
  489. OMP_sch_static_chunked = 33,
  490. OMP_sch_static = 34,
  491. OMP_sch_dynamic_chunked = 35,
  492. OMP_sch_guided_chunked = 36,
  493. OMP_sch_runtime = 37,
  494. OMP_sch_auto = 38,
  495. /// static with chunk adjustment (e.g., simd)
  496. OMP_sch_static_balanced_chunked = 45,
  497. /// Lower bound for 'ordered' versions.
  498. OMP_ord_lower = 64,
  499. OMP_ord_static_chunked = 65,
  500. OMP_ord_static = 66,
  501. OMP_ord_dynamic_chunked = 67,
  502. OMP_ord_guided_chunked = 68,
  503. OMP_ord_runtime = 69,
  504. OMP_ord_auto = 70,
  505. OMP_sch_default = OMP_sch_static,
  506. /// dist_schedule types
  507. OMP_dist_sch_static_chunked = 91,
  508. OMP_dist_sch_static = 92,
  509. /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
  510. /// Set if the monotonic schedule modifier was present.
  511. OMP_sch_modifier_monotonic = (1 << 29),
  512. /// Set if the nonmonotonic schedule modifier was present.
  513. OMP_sch_modifier_nonmonotonic = (1 << 30),
  514. };
  515. /// A basic class for pre|post-action for advanced codegen sequence for OpenMP
  516. /// region.
  517. class CleanupTy final : public EHScopeStack::Cleanup {
  518. PrePostActionTy *Action;
  519. public:
  520. explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
  521. void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
  522. if (!CGF.HaveInsertPoint())
  523. return;
  524. Action->Exit(CGF);
  525. }
  526. };
  527. } // anonymous namespace
  528. void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
  529. CodeGenFunction::RunCleanupsScope Scope(CGF);
  530. if (PrePostAction) {
  531. CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
  532. Callback(CodeGen, CGF, *PrePostAction);
  533. } else {
  534. PrePostActionTy Action;
  535. Callback(CodeGen, CGF, Action);
  536. }
  537. }
  538. /// Check if the combiner is a call to UDR combiner and if it is so return the
  539. /// UDR decl used for reduction.
  540. static const OMPDeclareReductionDecl *
  541. getReductionInit(const Expr *ReductionOp) {
  542. if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
  543. if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
  544. if (const auto *DRE =
  545. dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
  546. if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
  547. return DRD;
  548. return nullptr;
  549. }
  550. static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
  551. const OMPDeclareReductionDecl *DRD,
  552. const Expr *InitOp,
  553. Address Private, Address Original,
  554. QualType Ty) {
  555. if (DRD->getInitializer()) {
  556. std::pair<llvm::Function *, llvm::Function *> Reduction =
  557. CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
  558. const auto *CE = cast<CallExpr>(InitOp);
  559. const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
  560. const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
  561. const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
  562. const auto *LHSDRE =
  563. cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
  564. const auto *RHSDRE =
  565. cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
  566. CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
  567. PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
  568. [=]() { return Private; });
  569. PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
  570. [=]() { return Original; });
  571. (void)PrivateScope.Privatize();
  572. RValue Func = RValue::get(Reduction.second);
  573. CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
  574. CGF.EmitIgnoredExpr(InitOp);
  575. } else {
  576. llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
  577. std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
  578. auto *GV = new llvm::GlobalVariable(
  579. CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
  580. llvm::GlobalValue::PrivateLinkage, Init, Name);
  581. LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
  582. RValue InitRVal;
  583. switch (CGF.getEvaluationKind(Ty)) {
  584. case TEK_Scalar:
  585. InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
  586. break;
  587. case TEK_Complex:
  588. InitRVal =
  589. RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
  590. break;
  591. case TEK_Aggregate: {
  592. OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_LValue);
  593. CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, LV);
  594. CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
  595. /*IsInitializer=*/false);
  596. return;
  597. }
  598. }
  599. OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_PRValue);
  600. CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
  601. CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
  602. /*IsInitializer=*/false);
  603. }
  604. }
  605. /// Emit initialization of arrays of complex types.
  606. /// \param DestAddr Address of the array.
  607. /// \param Type Type of array.
  608. /// \param Init Initial expression of array.
  609. /// \param SrcAddr Address of the original array.
  610. static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
  611. QualType Type, bool EmitDeclareReductionInit,
  612. const Expr *Init,
  613. const OMPDeclareReductionDecl *DRD,
  614. Address SrcAddr = Address::invalid()) {
  615. // Perform element-by-element initialization.
  616. QualType ElementTy;
  617. // Drill down to the base element type on both arrays.
  618. const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
  619. llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
  620. if (DRD)
  621. SrcAddr =
  622. CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
  623. llvm::Value *SrcBegin = nullptr;
  624. if (DRD)
  625. SrcBegin = SrcAddr.getPointer();
  626. llvm::Value *DestBegin = DestAddr.getPointer();
  627. // Cast from pointer to array type to pointer to single element.
  628. llvm::Value *DestEnd =
  629. CGF.Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
  630. // The basic structure here is a while-do loop.
  631. llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
  632. llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
  633. llvm::Value *IsEmpty =
  634. CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
  635. CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
  636. // Enter the loop body, making that address the current address.
  637. llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
  638. CGF.EmitBlock(BodyBB);
  639. CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
  640. llvm::PHINode *SrcElementPHI = nullptr;
  641. Address SrcElementCurrent = Address::invalid();
  642. if (DRD) {
  643. SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
  644. "omp.arraycpy.srcElementPast");
  645. SrcElementPHI->addIncoming(SrcBegin, EntryBB);
  646. SrcElementCurrent =
  647. Address(SrcElementPHI,
  648. SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
  649. }
  650. llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
  651. DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
  652. DestElementPHI->addIncoming(DestBegin, EntryBB);
  653. Address DestElementCurrent =
  654. Address(DestElementPHI,
  655. DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
  656. // Emit copy.
  657. {
  658. CodeGenFunction::RunCleanupsScope InitScope(CGF);
  659. if (EmitDeclareReductionInit) {
  660. emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
  661. SrcElementCurrent, ElementTy);
  662. } else
  663. CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
  664. /*IsInitializer=*/false);
  665. }
  666. if (DRD) {
  667. // Shift the address forward by one element.
  668. llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
  669. SrcAddr.getElementType(), SrcElementPHI, /*Idx0=*/1,
  670. "omp.arraycpy.dest.element");
  671. SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
  672. }
  673. // Shift the address forward by one element.
  674. llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
  675. DestAddr.getElementType(), DestElementPHI, /*Idx0=*/1,
  676. "omp.arraycpy.dest.element");
  677. // Check whether we've reached the end.
  678. llvm::Value *Done =
  679. CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
  680. CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
  681. DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
  682. // Done.
  683. CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
  684. }
  685. LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
  686. return CGF.EmitOMPSharedLValue(E);
  687. }
  688. LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
  689. const Expr *E) {
  690. if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
  691. return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
  692. return LValue();
  693. }
  694. void ReductionCodeGen::emitAggregateInitialization(
  695. CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
  696. const OMPDeclareReductionDecl *DRD) {
  697. // Emit VarDecl with copy init for arrays.
  698. // Get the address of the original variable captured in current
  699. // captured region.
  700. const auto *PrivateVD =
  701. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
  702. bool EmitDeclareReductionInit =
  703. DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
  704. EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
  705. EmitDeclareReductionInit,
  706. EmitDeclareReductionInit ? ClausesData[N].ReductionOp
  707. : PrivateVD->getInit(),
  708. DRD, SharedAddr);
  709. }
  710. ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
  711. ArrayRef<const Expr *> Origs,
  712. ArrayRef<const Expr *> Privates,
  713. ArrayRef<const Expr *> ReductionOps) {
  714. ClausesData.reserve(Shareds.size());
  715. SharedAddresses.reserve(Shareds.size());
  716. Sizes.reserve(Shareds.size());
  717. BaseDecls.reserve(Shareds.size());
  718. const auto *IOrig = Origs.begin();
  719. const auto *IPriv = Privates.begin();
  720. const auto *IRed = ReductionOps.begin();
  721. for (const Expr *Ref : Shareds) {
  722. ClausesData.emplace_back(Ref, *IOrig, *IPriv, *IRed);
  723. std::advance(IOrig, 1);
  724. std::advance(IPriv, 1);
  725. std::advance(IRed, 1);
  726. }
  727. }
  728. void ReductionCodeGen::emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N) {
  729. assert(SharedAddresses.size() == N && OrigAddresses.size() == N &&
  730. "Number of generated lvalues must be exactly N.");
  731. LValue First = emitSharedLValue(CGF, ClausesData[N].Shared);
  732. LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Shared);
  733. SharedAddresses.emplace_back(First, Second);
  734. if (ClausesData[N].Shared == ClausesData[N].Ref) {
  735. OrigAddresses.emplace_back(First, Second);
  736. } else {
  737. LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
  738. LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
  739. OrigAddresses.emplace_back(First, Second);
  740. }
  741. }
  742. void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
  743. const auto *PrivateVD =
  744. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
  745. QualType PrivateType = PrivateVD->getType();
  746. bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
  747. if (!PrivateType->isVariablyModifiedType()) {
  748. Sizes.emplace_back(
  749. CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()),
  750. nullptr);
  751. return;
  752. }
  753. llvm::Value *Size;
  754. llvm::Value *SizeInChars;
  755. auto *ElemType = OrigAddresses[N].first.getAddress(CGF).getElementType();
  756. auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
  757. if (AsArraySection) {
  758. Size = CGF.Builder.CreatePtrDiff(ElemType,
  759. OrigAddresses[N].second.getPointer(CGF),
  760. OrigAddresses[N].first.getPointer(CGF));
  761. Size = CGF.Builder.CreateNUWAdd(
  762. Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
  763. SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
  764. } else {
  765. SizeInChars =
  766. CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType());
  767. Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
  768. }
  769. Sizes.emplace_back(SizeInChars, Size);
  770. CodeGenFunction::OpaqueValueMapping OpaqueMap(
  771. CGF,
  772. cast<OpaqueValueExpr>(
  773. CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
  774. RValue::get(Size));
  775. CGF.EmitVariablyModifiedType(PrivateType);
  776. }
  777. void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
  778. llvm::Value *Size) {
  779. const auto *PrivateVD =
  780. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
  781. QualType PrivateType = PrivateVD->getType();
  782. if (!PrivateType->isVariablyModifiedType()) {
  783. assert(!Size && !Sizes[N].second &&
  784. "Size should be nullptr for non-variably modified reduction "
  785. "items.");
  786. return;
  787. }
  788. CodeGenFunction::OpaqueValueMapping OpaqueMap(
  789. CGF,
  790. cast<OpaqueValueExpr>(
  791. CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
  792. RValue::get(Size));
  793. CGF.EmitVariablyModifiedType(PrivateType);
  794. }
  795. void ReductionCodeGen::emitInitialization(
  796. CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
  797. llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
  798. assert(SharedAddresses.size() > N && "No variable was generated");
  799. const auto *PrivateVD =
  800. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
  801. const OMPDeclareReductionDecl *DRD =
  802. getReductionInit(ClausesData[N].ReductionOp);
  803. QualType PrivateType = PrivateVD->getType();
  804. PrivateAddr = CGF.Builder.CreateElementBitCast(
  805. PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
  806. if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
  807. if (DRD && DRD->getInitializer())
  808. (void)DefaultInit(CGF);
  809. emitAggregateInitialization(CGF, N, PrivateAddr, SharedAddr, DRD);
  810. } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
  811. (void)DefaultInit(CGF);
  812. QualType SharedType = SharedAddresses[N].first.getType();
  813. emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
  814. PrivateAddr, SharedAddr, SharedType);
  815. } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
  816. !CGF.isTrivialInitializer(PrivateVD->getInit())) {
  817. CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
  818. PrivateVD->getType().getQualifiers(),
  819. /*IsInitializer=*/false);
  820. }
  821. }
  822. bool ReductionCodeGen::needCleanups(unsigned N) {
  823. const auto *PrivateVD =
  824. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
  825. QualType PrivateType = PrivateVD->getType();
  826. QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
  827. return DTorKind != QualType::DK_none;
  828. }
  829. void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
  830. Address PrivateAddr) {
  831. const auto *PrivateVD =
  832. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
  833. QualType PrivateType = PrivateVD->getType();
  834. QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
  835. if (needCleanups(N)) {
  836. PrivateAddr = CGF.Builder.CreateElementBitCast(
  837. PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
  838. CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
  839. }
  840. }
  841. static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
  842. LValue BaseLV) {
  843. BaseTy = BaseTy.getNonReferenceType();
  844. while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
  845. !CGF.getContext().hasSameType(BaseTy, ElTy)) {
  846. if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
  847. BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(CGF), PtrTy);
  848. } else {
  849. LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(CGF), BaseTy);
  850. BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
  851. }
  852. BaseTy = BaseTy->getPointeeType();
  853. }
  854. return CGF.MakeAddrLValue(
  855. CGF.Builder.CreateElementBitCast(BaseLV.getAddress(CGF),
  856. CGF.ConvertTypeForMem(ElTy)),
  857. BaseLV.getType(), BaseLV.getBaseInfo(),
  858. CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
  859. }
  860. static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
  861. llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
  862. llvm::Value *Addr) {
  863. Address Tmp = Address::invalid();
  864. Address TopTmp = Address::invalid();
  865. Address MostTopTmp = Address::invalid();
  866. BaseTy = BaseTy.getNonReferenceType();
  867. while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
  868. !CGF.getContext().hasSameType(BaseTy, ElTy)) {
  869. Tmp = CGF.CreateMemTemp(BaseTy);
  870. if (TopTmp.isValid())
  871. CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
  872. else
  873. MostTopTmp = Tmp;
  874. TopTmp = Tmp;
  875. BaseTy = BaseTy->getPointeeType();
  876. }
  877. llvm::Type *Ty = BaseLVType;
  878. if (Tmp.isValid())
  879. Ty = Tmp.getElementType();
  880. Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
  881. if (Tmp.isValid()) {
  882. CGF.Builder.CreateStore(Addr, Tmp);
  883. return MostTopTmp;
  884. }
  885. return Address(Addr, BaseLVAlignment);
  886. }
  887. static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
  888. const VarDecl *OrigVD = nullptr;
  889. if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
  890. const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
  891. while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
  892. Base = TempOASE->getBase()->IgnoreParenImpCasts();
  893. while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
  894. Base = TempASE->getBase()->IgnoreParenImpCasts();
  895. DE = cast<DeclRefExpr>(Base);
  896. OrigVD = cast<VarDecl>(DE->getDecl());
  897. } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
  898. const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
  899. while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
  900. Base = TempASE->getBase()->IgnoreParenImpCasts();
  901. DE = cast<DeclRefExpr>(Base);
  902. OrigVD = cast<VarDecl>(DE->getDecl());
  903. }
  904. return OrigVD;
  905. }
  906. Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
  907. Address PrivateAddr) {
  908. const DeclRefExpr *DE;
  909. if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
  910. BaseDecls.emplace_back(OrigVD);
  911. LValue OriginalBaseLValue = CGF.EmitLValue(DE);
  912. LValue BaseLValue =
  913. loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
  914. OriginalBaseLValue);
  915. Address SharedAddr = SharedAddresses[N].first.getAddress(CGF);
  916. llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
  917. SharedAddr.getElementType(), BaseLValue.getPointer(CGF),
  918. SharedAddr.getPointer());
  919. llvm::Value *PrivatePointer =
  920. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  921. PrivateAddr.getPointer(), SharedAddr.getType());
  922. llvm::Value *Ptr = CGF.Builder.CreateGEP(
  923. SharedAddr.getElementType(), PrivatePointer, Adjustment);
  924. return castToBase(CGF, OrigVD->getType(),
  925. SharedAddresses[N].first.getType(),
  926. OriginalBaseLValue.getAddress(CGF).getType(),
  927. OriginalBaseLValue.getAlignment(), Ptr);
  928. }
  929. BaseDecls.emplace_back(
  930. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
  931. return PrivateAddr;
  932. }
  933. bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
  934. const OMPDeclareReductionDecl *DRD =
  935. getReductionInit(ClausesData[N].ReductionOp);
  936. return DRD && DRD->getInitializer();
  937. }
  938. LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
  939. return CGF.EmitLoadOfPointerLValue(
  940. CGF.GetAddrOfLocalVar(getThreadIDVariable()),
  941. getThreadIDVariable()->getType()->castAs<PointerType>());
  942. }
  943. void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt *S) {
  944. if (!CGF.HaveInsertPoint())
  945. return;
  946. // 1.2.2 OpenMP Language Terminology
  947. // Structured block - An executable statement with a single entry at the
  948. // top and a single exit at the bottom.
  949. // The point of exit cannot be a branch out of the structured block.
  950. // longjmp() and throw() must not violate the entry/exit criteria.
  951. CGF.EHStack.pushTerminate();
  952. if (S)
  953. CGF.incrementProfileCounter(S);
  954. CodeGen(CGF);
  955. CGF.EHStack.popTerminate();
  956. }
  957. LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
  958. CodeGenFunction &CGF) {
  959. return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
  960. getThreadIDVariable()->getType(),
  961. AlignmentSource::Decl);
  962. }
  963. static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
  964. QualType FieldTy) {
  965. auto *Field = FieldDecl::Create(
  966. C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
  967. C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
  968. /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
  969. Field->setAccess(AS_public);
  970. DC->addDecl(Field);
  971. return Field;
  972. }
  973. CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
  974. StringRef Separator)
  975. : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
  976. OMPBuilder(CGM.getModule()), OffloadEntriesInfoManager(CGM) {
  977. KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
  978. // Initialize Types used in OpenMPIRBuilder from OMPKinds.def
  979. OMPBuilder.initialize();
  980. loadOffloadInfoMetadata();
  981. }
  982. void CGOpenMPRuntime::clear() {
  983. InternalVars.clear();
  984. // Clean non-target variable declarations possibly used only in debug info.
  985. for (const auto &Data : EmittedNonTargetVariables) {
  986. if (!Data.getValue().pointsToAliveValue())
  987. continue;
  988. auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
  989. if (!GV)
  990. continue;
  991. if (!GV->isDeclaration() || GV->getNumUses() > 0)
  992. continue;
  993. GV->eraseFromParent();
  994. }
  995. }
  996. std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
  997. SmallString<128> Buffer;
  998. llvm::raw_svector_ostream OS(Buffer);
  999. StringRef Sep = FirstSeparator;
  1000. for (StringRef Part : Parts) {
  1001. OS << Sep << Part;
  1002. Sep = Separator;
  1003. }
  1004. return std::string(OS.str());
  1005. }
  1006. static llvm::Function *
  1007. emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
  1008. const Expr *CombinerInitializer, const VarDecl *In,
  1009. const VarDecl *Out, bool IsCombiner) {
  1010. // void .omp_combiner.(Ty *in, Ty *out);
  1011. ASTContext &C = CGM.getContext();
  1012. QualType PtrTy = C.getPointerType(Ty).withRestrict();
  1013. FunctionArgList Args;
  1014. ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
  1015. /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
  1016. ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
  1017. /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
  1018. Args.push_back(&OmpOutParm);
  1019. Args.push_back(&OmpInParm);
  1020. const CGFunctionInfo &FnInfo =
  1021. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  1022. llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
  1023. std::string Name = CGM.getOpenMPRuntime().getName(
  1024. {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
  1025. auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
  1026. Name, &CGM.getModule());
  1027. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
  1028. if (CGM.getLangOpts().Optimize) {
  1029. Fn->removeFnAttr(llvm::Attribute::NoInline);
  1030. Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
  1031. Fn->addFnAttr(llvm::Attribute::AlwaysInline);
  1032. }
  1033. CodeGenFunction CGF(CGM);
  1034. // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
  1035. // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
  1036. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
  1037. Out->getLocation());
  1038. CodeGenFunction::OMPPrivateScope Scope(CGF);
  1039. Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
  1040. Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
  1041. return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
  1042. .getAddress(CGF);
  1043. });
  1044. Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
  1045. Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
  1046. return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
  1047. .getAddress(CGF);
  1048. });
  1049. (void)Scope.Privatize();
  1050. if (!IsCombiner && Out->hasInit() &&
  1051. !CGF.isTrivialInitializer(Out->getInit())) {
  1052. CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
  1053. Out->getType().getQualifiers(),
  1054. /*IsInitializer=*/true);
  1055. }
  1056. if (CombinerInitializer)
  1057. CGF.EmitIgnoredExpr(CombinerInitializer);
  1058. Scope.ForceCleanup();
  1059. CGF.FinishFunction();
  1060. return Fn;
  1061. }
  1062. void CGOpenMPRuntime::emitUserDefinedReduction(
  1063. CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
  1064. if (UDRMap.count(D) > 0)
  1065. return;
  1066. llvm::Function *Combiner = emitCombinerOrInitializer(
  1067. CGM, D->getType(), D->getCombiner(),
  1068. cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerIn())->getDecl()),
  1069. cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerOut())->getDecl()),
  1070. /*IsCombiner=*/true);
  1071. llvm::Function *Initializer = nullptr;
  1072. if (const Expr *Init = D->getInitializer()) {
  1073. Initializer = emitCombinerOrInitializer(
  1074. CGM, D->getType(),
  1075. D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
  1076. : nullptr,
  1077. cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
  1078. cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
  1079. /*IsCombiner=*/false);
  1080. }
  1081. UDRMap.try_emplace(D, Combiner, Initializer);
  1082. if (CGF) {
  1083. auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
  1084. Decls.second.push_back(D);
  1085. }
  1086. }
  1087. std::pair<llvm::Function *, llvm::Function *>
  1088. CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
  1089. auto I = UDRMap.find(D);
  1090. if (I != UDRMap.end())
  1091. return I->second;
  1092. emitUserDefinedReduction(/*CGF=*/nullptr, D);
  1093. return UDRMap.lookup(D);
  1094. }
  1095. namespace {
  1096. // Temporary RAII solution to perform a push/pop stack event on the OpenMP IR
  1097. // Builder if one is present.
  1098. struct PushAndPopStackRAII {
  1099. PushAndPopStackRAII(llvm::OpenMPIRBuilder *OMPBuilder, CodeGenFunction &CGF,
  1100. bool HasCancel, llvm::omp::Directive Kind)
  1101. : OMPBuilder(OMPBuilder) {
  1102. if (!OMPBuilder)
  1103. return;
  1104. // The following callback is the crucial part of clangs cleanup process.
  1105. //
  1106. // NOTE:
  1107. // Once the OpenMPIRBuilder is used to create parallel regions (and
  1108. // similar), the cancellation destination (Dest below) is determined via
  1109. // IP. That means if we have variables to finalize we split the block at IP,
  1110. // use the new block (=BB) as destination to build a JumpDest (via
  1111. // getJumpDestInCurrentScope(BB)) which then is fed to
  1112. // EmitBranchThroughCleanup. Furthermore, there will not be the need
  1113. // to push & pop an FinalizationInfo object.
  1114. // The FiniCB will still be needed but at the point where the
  1115. // OpenMPIRBuilder is asked to construct a parallel (or similar) construct.
  1116. auto FiniCB = [&CGF](llvm::OpenMPIRBuilder::InsertPointTy IP) {
  1117. assert(IP.getBlock()->end() == IP.getPoint() &&
  1118. "Clang CG should cause non-terminated block!");
  1119. CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
  1120. CGF.Builder.restoreIP(IP);
  1121. CodeGenFunction::JumpDest Dest =
  1122. CGF.getOMPCancelDestination(OMPD_parallel);
  1123. CGF.EmitBranchThroughCleanup(Dest);
  1124. };
  1125. // TODO: Remove this once we emit parallel regions through the
  1126. // OpenMPIRBuilder as it can do this setup internally.
  1127. llvm::OpenMPIRBuilder::FinalizationInfo FI({FiniCB, Kind, HasCancel});
  1128. OMPBuilder->pushFinalizationCB(std::move(FI));
  1129. }
  1130. ~PushAndPopStackRAII() {
  1131. if (OMPBuilder)
  1132. OMPBuilder->popFinalizationCB();
  1133. }
  1134. llvm::OpenMPIRBuilder *OMPBuilder;
  1135. };
  1136. } // namespace
  1137. static llvm::Function *emitParallelOrTeamsOutlinedFunction(
  1138. CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
  1139. const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
  1140. const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
  1141. assert(ThreadIDVar->getType()->isPointerType() &&
  1142. "thread id variable must be of type kmp_int32 *");
  1143. CodeGenFunction CGF(CGM, true);
  1144. bool HasCancel = false;
  1145. if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
  1146. HasCancel = OPD->hasCancel();
  1147. else if (const auto *OPD = dyn_cast<OMPTargetParallelDirective>(&D))
  1148. HasCancel = OPD->hasCancel();
  1149. else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
  1150. HasCancel = OPSD->hasCancel();
  1151. else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
  1152. HasCancel = OPFD->hasCancel();
  1153. else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
  1154. HasCancel = OPFD->hasCancel();
  1155. else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
  1156. HasCancel = OPFD->hasCancel();
  1157. else if (const auto *OPFD =
  1158. dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
  1159. HasCancel = OPFD->hasCancel();
  1160. else if (const auto *OPFD =
  1161. dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
  1162. HasCancel = OPFD->hasCancel();
  1163. // TODO: Temporarily inform the OpenMPIRBuilder, if any, about the new
  1164. // parallel region to make cancellation barriers work properly.
  1165. llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
  1166. PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel, InnermostKind);
  1167. CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
  1168. HasCancel, OutlinedHelperName);
  1169. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
  1170. return CGF.GenerateOpenMPCapturedStmtFunction(*CS, D.getBeginLoc());
  1171. }
  1172. llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction(
  1173. const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
  1174. OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
  1175. const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
  1176. return emitParallelOrTeamsOutlinedFunction(
  1177. CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
  1178. }
  1179. llvm::Function *CGOpenMPRuntime::emitTeamsOutlinedFunction(
  1180. const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
  1181. OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
  1182. const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
  1183. return emitParallelOrTeamsOutlinedFunction(
  1184. CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
  1185. }
  1186. llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
  1187. const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
  1188. const VarDecl *PartIDVar, const VarDecl *TaskTVar,
  1189. OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
  1190. bool Tied, unsigned &NumberOfParts) {
  1191. auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
  1192. PrePostActionTy &) {
  1193. llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc());
  1194. llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc());
  1195. llvm::Value *TaskArgs[] = {
  1196. UpLoc, ThreadID,
  1197. CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
  1198. TaskTVar->getType()->castAs<PointerType>())
  1199. .getPointer(CGF)};
  1200. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  1201. CGM.getModule(), OMPRTL___kmpc_omp_task),
  1202. TaskArgs);
  1203. };
  1204. CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
  1205. UntiedCodeGen);
  1206. CodeGen.setAction(Action);
  1207. assert(!ThreadIDVar->getType()->isPointerType() &&
  1208. "thread id variable must be of type kmp_int32 for tasks");
  1209. const OpenMPDirectiveKind Region =
  1210. isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
  1211. : OMPD_task;
  1212. const CapturedStmt *CS = D.getCapturedStmt(Region);
  1213. bool HasCancel = false;
  1214. if (const auto *TD = dyn_cast<OMPTaskDirective>(&D))
  1215. HasCancel = TD->hasCancel();
  1216. else if (const auto *TD = dyn_cast<OMPTaskLoopDirective>(&D))
  1217. HasCancel = TD->hasCancel();
  1218. else if (const auto *TD = dyn_cast<OMPMasterTaskLoopDirective>(&D))
  1219. HasCancel = TD->hasCancel();
  1220. else if (const auto *TD = dyn_cast<OMPParallelMasterTaskLoopDirective>(&D))
  1221. HasCancel = TD->hasCancel();
  1222. CodeGenFunction CGF(CGM, true);
  1223. CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
  1224. InnermostKind, HasCancel, Action);
  1225. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
  1226. llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
  1227. if (!Tied)
  1228. NumberOfParts = Action.getNumberOfParts();
  1229. return Res;
  1230. }
  1231. static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
  1232. const RecordDecl *RD, const CGRecordLayout &RL,
  1233. ArrayRef<llvm::Constant *> Data) {
  1234. llvm::StructType *StructTy = RL.getLLVMType();
  1235. unsigned PrevIdx = 0;
  1236. ConstantInitBuilder CIBuilder(CGM);
  1237. auto DI = Data.begin();
  1238. for (const FieldDecl *FD : RD->fields()) {
  1239. unsigned Idx = RL.getLLVMFieldNo(FD);
  1240. // Fill the alignment.
  1241. for (unsigned I = PrevIdx; I < Idx; ++I)
  1242. Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
  1243. PrevIdx = Idx + 1;
  1244. Fields.add(*DI);
  1245. ++DI;
  1246. }
  1247. }
  1248. template <class... As>
  1249. static llvm::GlobalVariable *
  1250. createGlobalStruct(CodeGenModule &CGM, QualType Ty, bool IsConstant,
  1251. ArrayRef<llvm::Constant *> Data, const Twine &Name,
  1252. As &&... Args) {
  1253. const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
  1254. const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
  1255. ConstantInitBuilder CIBuilder(CGM);
  1256. ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
  1257. buildStructValue(Fields, CGM, RD, RL, Data);
  1258. return Fields.finishAndCreateGlobal(
  1259. Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty), IsConstant,
  1260. std::forward<As>(Args)...);
  1261. }
  1262. template <typename T>
  1263. static void
  1264. createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
  1265. ArrayRef<llvm::Constant *> Data,
  1266. T &Parent) {
  1267. const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
  1268. const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
  1269. ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
  1270. buildStructValue(Fields, CGM, RD, RL, Data);
  1271. Fields.finishAndAddTo(Parent);
  1272. }
  1273. void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF,
  1274. bool AtCurrentPoint) {
  1275. auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
  1276. assert(!Elem.second.ServiceInsertPt && "Insert point is set already.");
  1277. llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
  1278. if (AtCurrentPoint) {
  1279. Elem.second.ServiceInsertPt = new llvm::BitCastInst(
  1280. Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock());
  1281. } else {
  1282. Elem.second.ServiceInsertPt =
  1283. new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
  1284. Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
  1285. }
  1286. }
  1287. void CGOpenMPRuntime::clearLocThreadIdInsertPt(CodeGenFunction &CGF) {
  1288. auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
  1289. if (Elem.second.ServiceInsertPt) {
  1290. llvm::Instruction *Ptr = Elem.second.ServiceInsertPt;
  1291. Elem.second.ServiceInsertPt = nullptr;
  1292. Ptr->eraseFromParent();
  1293. }
  1294. }
  1295. static StringRef getIdentStringFromSourceLocation(CodeGenFunction &CGF,
  1296. SourceLocation Loc,
  1297. SmallString<128> &Buffer) {
  1298. llvm::raw_svector_ostream OS(Buffer);
  1299. // Build debug location
  1300. PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
  1301. OS << ";" << PLoc.getFilename() << ";";
  1302. if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
  1303. OS << FD->getQualifiedNameAsString();
  1304. OS << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
  1305. return OS.str();
  1306. }
  1307. llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
  1308. SourceLocation Loc,
  1309. unsigned Flags) {
  1310. uint32_t SrcLocStrSize;
  1311. llvm::Constant *SrcLocStr;
  1312. if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
  1313. Loc.isInvalid()) {
  1314. SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
  1315. } else {
  1316. std::string FunctionName;
  1317. if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
  1318. FunctionName = FD->getQualifiedNameAsString();
  1319. PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
  1320. const char *FileName = PLoc.getFilename();
  1321. unsigned Line = PLoc.getLine();
  1322. unsigned Column = PLoc.getColumn();
  1323. SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName, FileName, Line,
  1324. Column, SrcLocStrSize);
  1325. }
  1326. unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
  1327. return OMPBuilder.getOrCreateIdent(
  1328. SrcLocStr, SrcLocStrSize, llvm::omp::IdentFlag(Flags), Reserved2Flags);
  1329. }
  1330. llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
  1331. SourceLocation Loc) {
  1332. assert(CGF.CurFn && "No function in current CodeGenFunction.");
  1333. // If the OpenMPIRBuilder is used we need to use it for all thread id calls as
  1334. // the clang invariants used below might be broken.
  1335. if (CGM.getLangOpts().OpenMPIRBuilder) {
  1336. SmallString<128> Buffer;
  1337. OMPBuilder.updateToLocation(CGF.Builder.saveIP());
  1338. uint32_t SrcLocStrSize;
  1339. auto *SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(
  1340. getIdentStringFromSourceLocation(CGF, Loc, Buffer), SrcLocStrSize);
  1341. return OMPBuilder.getOrCreateThreadID(
  1342. OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize));
  1343. }
  1344. llvm::Value *ThreadID = nullptr;
  1345. // Check whether we've already cached a load of the thread id in this
  1346. // function.
  1347. auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
  1348. if (I != OpenMPLocThreadIDMap.end()) {
  1349. ThreadID = I->second.ThreadID;
  1350. if (ThreadID != nullptr)
  1351. return ThreadID;
  1352. }
  1353. // If exceptions are enabled, do not use parameter to avoid possible crash.
  1354. if (auto *OMPRegionInfo =
  1355. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
  1356. if (OMPRegionInfo->getThreadIDVariable()) {
  1357. // Check if this an outlined function with thread id passed as argument.
  1358. LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
  1359. llvm::BasicBlock *TopBlock = CGF.AllocaInsertPt->getParent();
  1360. if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
  1361. !CGF.getLangOpts().CXXExceptions ||
  1362. CGF.Builder.GetInsertBlock() == TopBlock ||
  1363. !isa<llvm::Instruction>(LVal.getPointer(CGF)) ||
  1364. cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
  1365. TopBlock ||
  1366. cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
  1367. CGF.Builder.GetInsertBlock()) {
  1368. ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
  1369. // If value loaded in entry block, cache it and use it everywhere in
  1370. // function.
  1371. if (CGF.Builder.GetInsertBlock() == TopBlock) {
  1372. auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
  1373. Elem.second.ThreadID = ThreadID;
  1374. }
  1375. return ThreadID;
  1376. }
  1377. }
  1378. }
  1379. // This is not an outlined function region - need to call __kmpc_int32
  1380. // kmpc_global_thread_num(ident_t *loc).
  1381. // Generate thread id value and cache this value for use across the
  1382. // function.
  1383. auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
  1384. if (!Elem.second.ServiceInsertPt)
  1385. setLocThreadIdInsertPt(CGF);
  1386. CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
  1387. CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
  1388. llvm::CallInst *Call = CGF.Builder.CreateCall(
  1389. OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
  1390. OMPRTL___kmpc_global_thread_num),
  1391. emitUpdateLocation(CGF, Loc));
  1392. Call->setCallingConv(CGF.getRuntimeCC());
  1393. Elem.second.ThreadID = Call;
  1394. return Call;
  1395. }
  1396. void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
  1397. assert(CGF.CurFn && "No function in current CodeGenFunction.");
  1398. if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
  1399. clearLocThreadIdInsertPt(CGF);
  1400. OpenMPLocThreadIDMap.erase(CGF.CurFn);
  1401. }
  1402. if (FunctionUDRMap.count(CGF.CurFn) > 0) {
  1403. for(const auto *D : FunctionUDRMap[CGF.CurFn])
  1404. UDRMap.erase(D);
  1405. FunctionUDRMap.erase(CGF.CurFn);
  1406. }
  1407. auto I = FunctionUDMMap.find(CGF.CurFn);
  1408. if (I != FunctionUDMMap.end()) {
  1409. for(const auto *D : I->second)
  1410. UDMMap.erase(D);
  1411. FunctionUDMMap.erase(I);
  1412. }
  1413. LastprivateConditionalToTypes.erase(CGF.CurFn);
  1414. FunctionToUntiedTaskStackMap.erase(CGF.CurFn);
  1415. }
  1416. llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
  1417. return OMPBuilder.IdentPtr;
  1418. }
  1419. llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
  1420. if (!Kmpc_MicroTy) {
  1421. // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
  1422. llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
  1423. llvm::PointerType::getUnqual(CGM.Int32Ty)};
  1424. Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
  1425. }
  1426. return llvm::PointerType::getUnqual(Kmpc_MicroTy);
  1427. }
  1428. llvm::FunctionCallee
  1429. CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned,
  1430. bool IsGPUDistribute) {
  1431. assert((IVSize == 32 || IVSize == 64) &&
  1432. "IV size is not compatible with the omp runtime");
  1433. StringRef Name;
  1434. if (IsGPUDistribute)
  1435. Name = IVSize == 32 ? (IVSigned ? "__kmpc_distribute_static_init_4"
  1436. : "__kmpc_distribute_static_init_4u")
  1437. : (IVSigned ? "__kmpc_distribute_static_init_8"
  1438. : "__kmpc_distribute_static_init_8u");
  1439. else
  1440. Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
  1441. : "__kmpc_for_static_init_4u")
  1442. : (IVSigned ? "__kmpc_for_static_init_8"
  1443. : "__kmpc_for_static_init_8u");
  1444. llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
  1445. auto *PtrTy = llvm::PointerType::getUnqual(ITy);
  1446. llvm::Type *TypeParams[] = {
  1447. getIdentTyPointerTy(), // loc
  1448. CGM.Int32Ty, // tid
  1449. CGM.Int32Ty, // schedtype
  1450. llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
  1451. PtrTy, // p_lower
  1452. PtrTy, // p_upper
  1453. PtrTy, // p_stride
  1454. ITy, // incr
  1455. ITy // chunk
  1456. };
  1457. auto *FnTy =
  1458. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  1459. return CGM.CreateRuntimeFunction(FnTy, Name);
  1460. }
  1461. llvm::FunctionCallee
  1462. CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, bool IVSigned) {
  1463. assert((IVSize == 32 || IVSize == 64) &&
  1464. "IV size is not compatible with the omp runtime");
  1465. StringRef Name =
  1466. IVSize == 32
  1467. ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
  1468. : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
  1469. llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
  1470. llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
  1471. CGM.Int32Ty, // tid
  1472. CGM.Int32Ty, // schedtype
  1473. ITy, // lower
  1474. ITy, // upper
  1475. ITy, // stride
  1476. ITy // chunk
  1477. };
  1478. auto *FnTy =
  1479. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  1480. return CGM.CreateRuntimeFunction(FnTy, Name);
  1481. }
  1482. llvm::FunctionCallee
  1483. CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, bool IVSigned) {
  1484. assert((IVSize == 32 || IVSize == 64) &&
  1485. "IV size is not compatible with the omp runtime");
  1486. StringRef Name =
  1487. IVSize == 32
  1488. ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
  1489. : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
  1490. llvm::Type *TypeParams[] = {
  1491. getIdentTyPointerTy(), // loc
  1492. CGM.Int32Ty, // tid
  1493. };
  1494. auto *FnTy =
  1495. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1496. return CGM.CreateRuntimeFunction(FnTy, Name);
  1497. }
  1498. llvm::FunctionCallee
  1499. CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) {
  1500. assert((IVSize == 32 || IVSize == 64) &&
  1501. "IV size is not compatible with the omp runtime");
  1502. StringRef Name =
  1503. IVSize == 32
  1504. ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
  1505. : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
  1506. llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
  1507. auto *PtrTy = llvm::PointerType::getUnqual(ITy);
  1508. llvm::Type *TypeParams[] = {
  1509. getIdentTyPointerTy(), // loc
  1510. CGM.Int32Ty, // tid
  1511. llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
  1512. PtrTy, // p_lower
  1513. PtrTy, // p_upper
  1514. PtrTy // p_stride
  1515. };
  1516. auto *FnTy =
  1517. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  1518. return CGM.CreateRuntimeFunction(FnTy, Name);
  1519. }
  1520. /// Obtain information that uniquely identifies a target entry. This
  1521. /// consists of the file and device IDs as well as line number associated with
  1522. /// the relevant entry source location.
  1523. static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
  1524. unsigned &DeviceID, unsigned &FileID,
  1525. unsigned &LineNum) {
  1526. SourceManager &SM = C.getSourceManager();
  1527. // The loc should be always valid and have a file ID (the user cannot use
  1528. // #pragma directives in macros)
  1529. assert(Loc.isValid() && "Source location is expected to be always valid.");
  1530. PresumedLoc PLoc = SM.getPresumedLoc(Loc);
  1531. assert(PLoc.isValid() && "Source location is expected to be always valid.");
  1532. llvm::sys::fs::UniqueID ID;
  1533. if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
  1534. PLoc = SM.getPresumedLoc(Loc, /*UseLineDirectives=*/false);
  1535. assert(PLoc.isValid() && "Source location is expected to be always valid.");
  1536. if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
  1537. SM.getDiagnostics().Report(diag::err_cannot_open_file)
  1538. << PLoc.getFilename() << EC.message();
  1539. }
  1540. DeviceID = ID.getDevice();
  1541. FileID = ID.getFile();
  1542. LineNum = PLoc.getLine();
  1543. }
  1544. Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
  1545. if (CGM.getLangOpts().OpenMPSimd)
  1546. return Address::invalid();
  1547. llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  1548. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
  1549. if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link ||
  1550. (*Res == OMPDeclareTargetDeclAttr::MT_To &&
  1551. HasRequiresUnifiedSharedMemory))) {
  1552. SmallString<64> PtrName;
  1553. {
  1554. llvm::raw_svector_ostream OS(PtrName);
  1555. OS << CGM.getMangledName(GlobalDecl(VD));
  1556. if (!VD->isExternallyVisible()) {
  1557. unsigned DeviceID, FileID, Line;
  1558. getTargetEntryUniqueInfo(CGM.getContext(),
  1559. VD->getCanonicalDecl()->getBeginLoc(),
  1560. DeviceID, FileID, Line);
  1561. OS << llvm::format("_%x", FileID);
  1562. }
  1563. OS << "_decl_tgt_ref_ptr";
  1564. }
  1565. llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
  1566. if (!Ptr) {
  1567. QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
  1568. Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy),
  1569. PtrName);
  1570. auto *GV = cast<llvm::GlobalVariable>(Ptr);
  1571. GV->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
  1572. if (!CGM.getLangOpts().OpenMPIsDevice)
  1573. GV->setInitializer(CGM.GetAddrOfGlobal(VD));
  1574. registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
  1575. }
  1576. return Address(Ptr, CGM.getContext().getDeclAlign(VD));
  1577. }
  1578. return Address::invalid();
  1579. }
  1580. llvm::Constant *
  1581. CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
  1582. assert(!CGM.getLangOpts().OpenMPUseTLS ||
  1583. !CGM.getContext().getTargetInfo().isTLSSupported());
  1584. // Lookup the entry, lazily creating it if necessary.
  1585. std::string Suffix = getName({"cache", ""});
  1586. return getOrCreateInternalVariable(
  1587. CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
  1588. }
  1589. Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
  1590. const VarDecl *VD,
  1591. Address VDAddr,
  1592. SourceLocation Loc) {
  1593. if (CGM.getLangOpts().OpenMPUseTLS &&
  1594. CGM.getContext().getTargetInfo().isTLSSupported())
  1595. return VDAddr;
  1596. llvm::Type *VarTy = VDAddr.getElementType();
  1597. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  1598. CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
  1599. CGM.Int8PtrTy),
  1600. CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
  1601. getOrCreateThreadPrivateCache(VD)};
  1602. return Address(CGF.EmitRuntimeCall(
  1603. OMPBuilder.getOrCreateRuntimeFunction(
  1604. CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
  1605. Args),
  1606. VDAddr.getAlignment());
  1607. }
  1608. void CGOpenMPRuntime::emitThreadPrivateVarInit(
  1609. CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
  1610. llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
  1611. // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
  1612. // library.
  1613. llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
  1614. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  1615. CGM.getModule(), OMPRTL___kmpc_global_thread_num),
  1616. OMPLoc);
  1617. // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
  1618. // to register constructor/destructor for variable.
  1619. llvm::Value *Args[] = {
  1620. OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
  1621. Ctor, CopyCtor, Dtor};
  1622. CGF.EmitRuntimeCall(
  1623. OMPBuilder.getOrCreateRuntimeFunction(
  1624. CGM.getModule(), OMPRTL___kmpc_threadprivate_register),
  1625. Args);
  1626. }
  1627. llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
  1628. const VarDecl *VD, Address VDAddr, SourceLocation Loc,
  1629. bool PerformInit, CodeGenFunction *CGF) {
  1630. if (CGM.getLangOpts().OpenMPUseTLS &&
  1631. CGM.getContext().getTargetInfo().isTLSSupported())
  1632. return nullptr;
  1633. VD = VD->getDefinition(CGM.getContext());
  1634. if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
  1635. QualType ASTTy = VD->getType();
  1636. llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
  1637. const Expr *Init = VD->getAnyInitializer();
  1638. if (CGM.getLangOpts().CPlusPlus && PerformInit) {
  1639. // Generate function that re-emits the declaration's initializer into the
  1640. // threadprivate copy of the variable VD
  1641. CodeGenFunction CtorCGF(CGM);
  1642. FunctionArgList Args;
  1643. ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
  1644. /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
  1645. ImplicitParamDecl::Other);
  1646. Args.push_back(&Dst);
  1647. const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
  1648. CGM.getContext().VoidPtrTy, Args);
  1649. llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
  1650. std::string Name = getName({"__kmpc_global_ctor_", ""});
  1651. llvm::Function *Fn =
  1652. CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
  1653. CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
  1654. Args, Loc, Loc);
  1655. llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
  1656. CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
  1657. CGM.getContext().VoidPtrTy, Dst.getLocation());
  1658. Address Arg = Address(ArgVal, VDAddr.getAlignment());
  1659. Arg = CtorCGF.Builder.CreateElementBitCast(
  1660. Arg, CtorCGF.ConvertTypeForMem(ASTTy));
  1661. CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
  1662. /*IsInitializer=*/true);
  1663. ArgVal = CtorCGF.EmitLoadOfScalar(
  1664. CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
  1665. CGM.getContext().VoidPtrTy, Dst.getLocation());
  1666. CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
  1667. CtorCGF.FinishFunction();
  1668. Ctor = Fn;
  1669. }
  1670. if (VD->getType().isDestructedType() != QualType::DK_none) {
  1671. // Generate function that emits destructor call for the threadprivate copy
  1672. // of the variable VD
  1673. CodeGenFunction DtorCGF(CGM);
  1674. FunctionArgList Args;
  1675. ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
  1676. /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
  1677. ImplicitParamDecl::Other);
  1678. Args.push_back(&Dst);
  1679. const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
  1680. CGM.getContext().VoidTy, Args);
  1681. llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
  1682. std::string Name = getName({"__kmpc_global_dtor_", ""});
  1683. llvm::Function *Fn =
  1684. CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
  1685. auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
  1686. DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
  1687. Loc, Loc);
  1688. // Create a scope with an artificial location for the body of this function.
  1689. auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
  1690. llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
  1691. DtorCGF.GetAddrOfLocalVar(&Dst),
  1692. /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
  1693. DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
  1694. DtorCGF.getDestroyer(ASTTy.isDestructedType()),
  1695. DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
  1696. DtorCGF.FinishFunction();
  1697. Dtor = Fn;
  1698. }
  1699. // Do not emit init function if it is not required.
  1700. if (!Ctor && !Dtor)
  1701. return nullptr;
  1702. llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
  1703. auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
  1704. /*isVarArg=*/false)
  1705. ->getPointerTo();
  1706. // Copying constructor for the threadprivate variable.
  1707. // Must be NULL - reserved by runtime, but currently it requires that this
  1708. // parameter is always NULL. Otherwise it fires assertion.
  1709. CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
  1710. if (Ctor == nullptr) {
  1711. auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
  1712. /*isVarArg=*/false)
  1713. ->getPointerTo();
  1714. Ctor = llvm::Constant::getNullValue(CtorTy);
  1715. }
  1716. if (Dtor == nullptr) {
  1717. auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
  1718. /*isVarArg=*/false)
  1719. ->getPointerTo();
  1720. Dtor = llvm::Constant::getNullValue(DtorTy);
  1721. }
  1722. if (!CGF) {
  1723. auto *InitFunctionTy =
  1724. llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
  1725. std::string Name = getName({"__omp_threadprivate_init_", ""});
  1726. llvm::Function *InitFunction = CGM.CreateGlobalInitOrCleanUpFunction(
  1727. InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
  1728. CodeGenFunction InitCGF(CGM);
  1729. FunctionArgList ArgList;
  1730. InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
  1731. CGM.getTypes().arrangeNullaryFunction(), ArgList,
  1732. Loc, Loc);
  1733. emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
  1734. InitCGF.FinishFunction();
  1735. return InitFunction;
  1736. }
  1737. emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
  1738. }
  1739. return nullptr;
  1740. }
  1741. bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
  1742. llvm::GlobalVariable *Addr,
  1743. bool PerformInit) {
  1744. if (CGM.getLangOpts().OMPTargetTriples.empty() &&
  1745. !CGM.getLangOpts().OpenMPIsDevice)
  1746. return false;
  1747. Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  1748. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
  1749. if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
  1750. (*Res == OMPDeclareTargetDeclAttr::MT_To &&
  1751. HasRequiresUnifiedSharedMemory))
  1752. return CGM.getLangOpts().OpenMPIsDevice;
  1753. VD = VD->getDefinition(CGM.getContext());
  1754. assert(VD && "Unknown VarDecl");
  1755. if (!DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
  1756. return CGM.getLangOpts().OpenMPIsDevice;
  1757. QualType ASTTy = VD->getType();
  1758. SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc();
  1759. // Produce the unique prefix to identify the new target regions. We use
  1760. // the source location of the variable declaration which we know to not
  1761. // conflict with any target region.
  1762. unsigned DeviceID;
  1763. unsigned FileID;
  1764. unsigned Line;
  1765. getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
  1766. SmallString<128> Buffer, Out;
  1767. {
  1768. llvm::raw_svector_ostream OS(Buffer);
  1769. OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
  1770. << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
  1771. }
  1772. const Expr *Init = VD->getAnyInitializer();
  1773. if (CGM.getLangOpts().CPlusPlus && PerformInit) {
  1774. llvm::Constant *Ctor;
  1775. llvm::Constant *ID;
  1776. if (CGM.getLangOpts().OpenMPIsDevice) {
  1777. // Generate function that re-emits the declaration's initializer into
  1778. // the threadprivate copy of the variable VD
  1779. CodeGenFunction CtorCGF(CGM);
  1780. const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
  1781. llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
  1782. llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
  1783. FTy, Twine(Buffer, "_ctor"), FI, Loc);
  1784. auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
  1785. CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
  1786. FunctionArgList(), Loc, Loc);
  1787. auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
  1788. CtorCGF.EmitAnyExprToMem(Init,
  1789. Address(Addr, CGM.getContext().getDeclAlign(VD)),
  1790. Init->getType().getQualifiers(),
  1791. /*IsInitializer=*/true);
  1792. CtorCGF.FinishFunction();
  1793. Ctor = Fn;
  1794. ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
  1795. CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
  1796. } else {
  1797. Ctor = new llvm::GlobalVariable(
  1798. CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
  1799. llvm::GlobalValue::PrivateLinkage,
  1800. llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
  1801. ID = Ctor;
  1802. }
  1803. // Register the information for the entry associated with the constructor.
  1804. Out.clear();
  1805. OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
  1806. DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
  1807. ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor);
  1808. }
  1809. if (VD->getType().isDestructedType() != QualType::DK_none) {
  1810. llvm::Constant *Dtor;
  1811. llvm::Constant *ID;
  1812. if (CGM.getLangOpts().OpenMPIsDevice) {
  1813. // Generate function that emits destructor call for the threadprivate
  1814. // copy of the variable VD
  1815. CodeGenFunction DtorCGF(CGM);
  1816. const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
  1817. llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
  1818. llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
  1819. FTy, Twine(Buffer, "_dtor"), FI, Loc);
  1820. auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
  1821. DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
  1822. FunctionArgList(), Loc, Loc);
  1823. // Create a scope with an artificial location for the body of this
  1824. // function.
  1825. auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
  1826. DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
  1827. ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
  1828. DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
  1829. DtorCGF.FinishFunction();
  1830. Dtor = Fn;
  1831. ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
  1832. CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
  1833. } else {
  1834. Dtor = new llvm::GlobalVariable(
  1835. CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
  1836. llvm::GlobalValue::PrivateLinkage,
  1837. llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
  1838. ID = Dtor;
  1839. }
  1840. // Register the information for the entry associated with the destructor.
  1841. Out.clear();
  1842. OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
  1843. DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
  1844. ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor);
  1845. }
  1846. return CGM.getLangOpts().OpenMPIsDevice;
  1847. }
  1848. Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
  1849. QualType VarType,
  1850. StringRef Name) {
  1851. std::string Suffix = getName({"artificial", ""});
  1852. llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
  1853. llvm::GlobalVariable *GAddr =
  1854. getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
  1855. if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPUseTLS &&
  1856. CGM.getTarget().isTLSSupported()) {
  1857. GAddr->setThreadLocal(/*Val=*/true);
  1858. return Address(GAddr, GAddr->getValueType(),
  1859. CGM.getContext().getTypeAlignInChars(VarType));
  1860. }
  1861. std::string CacheSuffix = getName({"cache", ""});
  1862. llvm::Value *Args[] = {
  1863. emitUpdateLocation(CGF, SourceLocation()),
  1864. getThreadID(CGF, SourceLocation()),
  1865. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
  1866. CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
  1867. /*isSigned=*/false),
  1868. getOrCreateInternalVariable(
  1869. CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
  1870. return Address(
  1871. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  1872. CGF.EmitRuntimeCall(
  1873. OMPBuilder.getOrCreateRuntimeFunction(
  1874. CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
  1875. Args),
  1876. VarLVType->getPointerTo(/*AddrSpace=*/0)),
  1877. CGM.getContext().getTypeAlignInChars(VarType));
  1878. }
  1879. void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
  1880. const RegionCodeGenTy &ThenGen,
  1881. const RegionCodeGenTy &ElseGen) {
  1882. CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
  1883. // If the condition constant folds and can be elided, try to avoid emitting
  1884. // the condition and the dead arm of the if/else.
  1885. bool CondConstant;
  1886. if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
  1887. if (CondConstant)
  1888. ThenGen(CGF);
  1889. else
  1890. ElseGen(CGF);
  1891. return;
  1892. }
  1893. // Otherwise, the condition did not fold, or we couldn't elide it. Just
  1894. // emit the conditional branch.
  1895. llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
  1896. llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
  1897. llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
  1898. CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
  1899. // Emit the 'then' code.
  1900. CGF.EmitBlock(ThenBlock);
  1901. ThenGen(CGF);
  1902. CGF.EmitBranch(ContBlock);
  1903. // Emit the 'else' code if present.
  1904. // There is no need to emit line number for unconditional branch.
  1905. (void)ApplyDebugLocation::CreateEmpty(CGF);
  1906. CGF.EmitBlock(ElseBlock);
  1907. ElseGen(CGF);
  1908. // There is no need to emit line number for unconditional branch.
  1909. (void)ApplyDebugLocation::CreateEmpty(CGF);
  1910. CGF.EmitBranch(ContBlock);
  1911. // Emit the continuation block for code after the if.
  1912. CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
  1913. }
  1914. void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
  1915. llvm::Function *OutlinedFn,
  1916. ArrayRef<llvm::Value *> CapturedVars,
  1917. const Expr *IfCond,
  1918. llvm::Value *NumThreads) {
  1919. if (!CGF.HaveInsertPoint())
  1920. return;
  1921. llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
  1922. auto &M = CGM.getModule();
  1923. auto &&ThenGen = [&M, OutlinedFn, CapturedVars, RTLoc,
  1924. this](CodeGenFunction &CGF, PrePostActionTy &) {
  1925. // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
  1926. CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
  1927. llvm::Value *Args[] = {
  1928. RTLoc,
  1929. CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
  1930. CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
  1931. llvm::SmallVector<llvm::Value *, 16> RealArgs;
  1932. RealArgs.append(std::begin(Args), std::end(Args));
  1933. RealArgs.append(CapturedVars.begin(), CapturedVars.end());
  1934. llvm::FunctionCallee RTLFn =
  1935. OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_fork_call);
  1936. CGF.EmitRuntimeCall(RTLFn, RealArgs);
  1937. };
  1938. auto &&ElseGen = [&M, OutlinedFn, CapturedVars, RTLoc, Loc,
  1939. this](CodeGenFunction &CGF, PrePostActionTy &) {
  1940. CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
  1941. llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
  1942. // Build calls:
  1943. // __kmpc_serialized_parallel(&Loc, GTid);
  1944. llvm::Value *Args[] = {RTLoc, ThreadID};
  1945. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  1946. M, OMPRTL___kmpc_serialized_parallel),
  1947. Args);
  1948. // OutlinedFn(&GTid, &zero_bound, CapturedStruct);
  1949. Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
  1950. Address ZeroAddrBound =
  1951. CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
  1952. /*Name=*/".bound.zero.addr");
  1953. CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddrBound);
  1954. llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
  1955. // ThreadId for serialized parallels is 0.
  1956. OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
  1957. OutlinedFnArgs.push_back(ZeroAddrBound.getPointer());
  1958. OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
  1959. // Ensure we do not inline the function. This is trivially true for the ones
  1960. // passed to __kmpc_fork_call but the ones called in serialized regions
  1961. // could be inlined. This is not a perfect but it is closer to the invariant
  1962. // we want, namely, every data environment starts with a new function.
  1963. // TODO: We should pass the if condition to the runtime function and do the
  1964. // handling there. Much cleaner code.
  1965. OutlinedFn->removeFnAttr(llvm::Attribute::AlwaysInline);
  1966. OutlinedFn->addFnAttr(llvm::Attribute::NoInline);
  1967. RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
  1968. // __kmpc_end_serialized_parallel(&Loc, GTid);
  1969. llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
  1970. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  1971. M, OMPRTL___kmpc_end_serialized_parallel),
  1972. EndArgs);
  1973. };
  1974. if (IfCond) {
  1975. emitIfClause(CGF, IfCond, ThenGen, ElseGen);
  1976. } else {
  1977. RegionCodeGenTy ThenRCG(ThenGen);
  1978. ThenRCG(CGF);
  1979. }
  1980. }
  1981. // If we're inside an (outlined) parallel region, use the region info's
  1982. // thread-ID variable (it is passed in a first argument of the outlined function
  1983. // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
  1984. // regular serial code region, get thread ID by calling kmp_int32
  1985. // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
  1986. // return the address of that temp.
  1987. Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
  1988. SourceLocation Loc) {
  1989. if (auto *OMPRegionInfo =
  1990. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
  1991. if (OMPRegionInfo->getThreadIDVariable())
  1992. return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(CGF);
  1993. llvm::Value *ThreadID = getThreadID(CGF, Loc);
  1994. QualType Int32Ty =
  1995. CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
  1996. Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
  1997. CGF.EmitStoreOfScalar(ThreadID,
  1998. CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
  1999. return ThreadIDTemp;
  2000. }
  2001. llvm::GlobalVariable *CGOpenMPRuntime::getOrCreateInternalVariable(
  2002. llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
  2003. SmallString<256> Buffer;
  2004. llvm::raw_svector_ostream Out(Buffer);
  2005. Out << Name;
  2006. StringRef RuntimeName = Out.str();
  2007. auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
  2008. if (Elem.second) {
  2009. assert(Elem.second->getType()->isOpaqueOrPointeeTypeMatches(Ty) &&
  2010. "OMP internal variable has different type than requested");
  2011. return &*Elem.second;
  2012. }
  2013. return Elem.second = new llvm::GlobalVariable(
  2014. CGM.getModule(), Ty, /*IsConstant*/ false,
  2015. llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
  2016. Elem.first(), /*InsertBefore=*/nullptr,
  2017. llvm::GlobalValue::NotThreadLocal, AddressSpace);
  2018. }
  2019. llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
  2020. std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
  2021. std::string Name = getName({Prefix, "var"});
  2022. return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
  2023. }
  2024. namespace {
  2025. /// Common pre(post)-action for different OpenMP constructs.
  2026. class CommonActionTy final : public PrePostActionTy {
  2027. llvm::FunctionCallee EnterCallee;
  2028. ArrayRef<llvm::Value *> EnterArgs;
  2029. llvm::FunctionCallee ExitCallee;
  2030. ArrayRef<llvm::Value *> ExitArgs;
  2031. bool Conditional;
  2032. llvm::BasicBlock *ContBlock = nullptr;
  2033. public:
  2034. CommonActionTy(llvm::FunctionCallee EnterCallee,
  2035. ArrayRef<llvm::Value *> EnterArgs,
  2036. llvm::FunctionCallee ExitCallee,
  2037. ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
  2038. : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
  2039. ExitArgs(ExitArgs), Conditional(Conditional) {}
  2040. void Enter(CodeGenFunction &CGF) override {
  2041. llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
  2042. if (Conditional) {
  2043. llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
  2044. auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
  2045. ContBlock = CGF.createBasicBlock("omp_if.end");
  2046. // Generate the branch (If-stmt)
  2047. CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
  2048. CGF.EmitBlock(ThenBlock);
  2049. }
  2050. }
  2051. void Done(CodeGenFunction &CGF) {
  2052. // Emit the rest of blocks/branches
  2053. CGF.EmitBranch(ContBlock);
  2054. CGF.EmitBlock(ContBlock, true);
  2055. }
  2056. void Exit(CodeGenFunction &CGF) override {
  2057. CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
  2058. }
  2059. };
  2060. } // anonymous namespace
  2061. void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
  2062. StringRef CriticalName,
  2063. const RegionCodeGenTy &CriticalOpGen,
  2064. SourceLocation Loc, const Expr *Hint) {
  2065. // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
  2066. // CriticalOpGen();
  2067. // __kmpc_end_critical(ident_t *, gtid, Lock);
  2068. // Prepare arguments and build a call to __kmpc_critical
  2069. if (!CGF.HaveInsertPoint())
  2070. return;
  2071. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  2072. getCriticalRegionLock(CriticalName)};
  2073. llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
  2074. std::end(Args));
  2075. if (Hint) {
  2076. EnterArgs.push_back(CGF.Builder.CreateIntCast(
  2077. CGF.EmitScalarExpr(Hint), CGM.Int32Ty, /*isSigned=*/false));
  2078. }
  2079. CommonActionTy Action(
  2080. OMPBuilder.getOrCreateRuntimeFunction(
  2081. CGM.getModule(),
  2082. Hint ? OMPRTL___kmpc_critical_with_hint : OMPRTL___kmpc_critical),
  2083. EnterArgs,
  2084. OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
  2085. OMPRTL___kmpc_end_critical),
  2086. Args);
  2087. CriticalOpGen.setAction(Action);
  2088. emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
  2089. }
  2090. void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
  2091. const RegionCodeGenTy &MasterOpGen,
  2092. SourceLocation Loc) {
  2093. if (!CGF.HaveInsertPoint())
  2094. return;
  2095. // if(__kmpc_master(ident_t *, gtid)) {
  2096. // MasterOpGen();
  2097. // __kmpc_end_master(ident_t *, gtid);
  2098. // }
  2099. // Prepare arguments and build a call to __kmpc_master
  2100. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
  2101. CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
  2102. CGM.getModule(), OMPRTL___kmpc_master),
  2103. Args,
  2104. OMPBuilder.getOrCreateRuntimeFunction(
  2105. CGM.getModule(), OMPRTL___kmpc_end_master),
  2106. Args,
  2107. /*Conditional=*/true);
  2108. MasterOpGen.setAction(Action);
  2109. emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
  2110. Action.Done(CGF);
  2111. }
  2112. void CGOpenMPRuntime::emitMaskedRegion(CodeGenFunction &CGF,
  2113. const RegionCodeGenTy &MaskedOpGen,
  2114. SourceLocation Loc, const Expr *Filter) {
  2115. if (!CGF.HaveInsertPoint())
  2116. return;
  2117. // if(__kmpc_masked(ident_t *, gtid, filter)) {
  2118. // MaskedOpGen();
  2119. // __kmpc_end_masked(iden_t *, gtid);
  2120. // }
  2121. // Prepare arguments and build a call to __kmpc_masked
  2122. llvm::Value *FilterVal = Filter
  2123. ? CGF.EmitScalarExpr(Filter, CGF.Int32Ty)
  2124. : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0);
  2125. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  2126. FilterVal};
  2127. llvm::Value *ArgsEnd[] = {emitUpdateLocation(CGF, Loc),
  2128. getThreadID(CGF, Loc)};
  2129. CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
  2130. CGM.getModule(), OMPRTL___kmpc_masked),
  2131. Args,
  2132. OMPBuilder.getOrCreateRuntimeFunction(
  2133. CGM.getModule(), OMPRTL___kmpc_end_masked),
  2134. ArgsEnd,
  2135. /*Conditional=*/true);
  2136. MaskedOpGen.setAction(Action);
  2137. emitInlinedDirective(CGF, OMPD_masked, MaskedOpGen);
  2138. Action.Done(CGF);
  2139. }
  2140. void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
  2141. SourceLocation Loc) {
  2142. if (!CGF.HaveInsertPoint())
  2143. return;
  2144. if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
  2145. OMPBuilder.createTaskyield(CGF.Builder);
  2146. } else {
  2147. // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
  2148. llvm::Value *Args[] = {
  2149. emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  2150. llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
  2151. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  2152. CGM.getModule(), OMPRTL___kmpc_omp_taskyield),
  2153. Args);
  2154. }
  2155. if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
  2156. Region->emitUntiedSwitch(CGF);
  2157. }
  2158. void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
  2159. const RegionCodeGenTy &TaskgroupOpGen,
  2160. SourceLocation Loc) {
  2161. if (!CGF.HaveInsertPoint())
  2162. return;
  2163. // __kmpc_taskgroup(ident_t *, gtid);
  2164. // TaskgroupOpGen();
  2165. // __kmpc_end_taskgroup(ident_t *, gtid);
  2166. // Prepare arguments and build a call to __kmpc_taskgroup
  2167. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
  2168. CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
  2169. CGM.getModule(), OMPRTL___kmpc_taskgroup),
  2170. Args,
  2171. OMPBuilder.getOrCreateRuntimeFunction(
  2172. CGM.getModule(), OMPRTL___kmpc_end_taskgroup),
  2173. Args);
  2174. TaskgroupOpGen.setAction(Action);
  2175. emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
  2176. }
  2177. /// Given an array of pointers to variables, project the address of a
  2178. /// given variable.
  2179. static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
  2180. unsigned Index, const VarDecl *Var) {
  2181. // Pull out the pointer to the variable.
  2182. Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
  2183. llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
  2184. Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
  2185. Addr = CGF.Builder.CreateElementBitCast(
  2186. Addr, CGF.ConvertTypeForMem(Var->getType()));
  2187. return Addr;
  2188. }
  2189. static llvm::Value *emitCopyprivateCopyFunction(
  2190. CodeGenModule &CGM, llvm::Type *ArgsType,
  2191. ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
  2192. ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
  2193. SourceLocation Loc) {
  2194. ASTContext &C = CGM.getContext();
  2195. // void copy_func(void *LHSArg, void *RHSArg);
  2196. FunctionArgList Args;
  2197. ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  2198. ImplicitParamDecl::Other);
  2199. ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  2200. ImplicitParamDecl::Other);
  2201. Args.push_back(&LHSArg);
  2202. Args.push_back(&RHSArg);
  2203. const auto &CGFI =
  2204. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  2205. std::string Name =
  2206. CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
  2207. auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
  2208. llvm::GlobalValue::InternalLinkage, Name,
  2209. &CGM.getModule());
  2210. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
  2211. Fn->setDoesNotRecurse();
  2212. CodeGenFunction CGF(CGM);
  2213. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
  2214. // Dest = (void*[n])(LHSArg);
  2215. // Src = (void*[n])(RHSArg);
  2216. Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  2217. CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
  2218. ArgsType), CGF.getPointerAlign());
  2219. Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  2220. CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
  2221. ArgsType), CGF.getPointerAlign());
  2222. // *(Type0*)Dst[0] = *(Type0*)Src[0];
  2223. // *(Type1*)Dst[1] = *(Type1*)Src[1];
  2224. // ...
  2225. // *(Typen*)Dst[n] = *(Typen*)Src[n];
  2226. for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
  2227. const auto *DestVar =
  2228. cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
  2229. Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
  2230. const auto *SrcVar =
  2231. cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
  2232. Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
  2233. const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
  2234. QualType Type = VD->getType();
  2235. CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
  2236. }
  2237. CGF.FinishFunction();
  2238. return Fn;
  2239. }
  2240. void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
  2241. const RegionCodeGenTy &SingleOpGen,
  2242. SourceLocation Loc,
  2243. ArrayRef<const Expr *> CopyprivateVars,
  2244. ArrayRef<const Expr *> SrcExprs,
  2245. ArrayRef<const Expr *> DstExprs,
  2246. ArrayRef<const Expr *> AssignmentOps) {
  2247. if (!CGF.HaveInsertPoint())
  2248. return;
  2249. assert(CopyprivateVars.size() == SrcExprs.size() &&
  2250. CopyprivateVars.size() == DstExprs.size() &&
  2251. CopyprivateVars.size() == AssignmentOps.size());
  2252. ASTContext &C = CGM.getContext();
  2253. // int32 did_it = 0;
  2254. // if(__kmpc_single(ident_t *, gtid)) {
  2255. // SingleOpGen();
  2256. // __kmpc_end_single(ident_t *, gtid);
  2257. // did_it = 1;
  2258. // }
  2259. // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
  2260. // <copy_func>, did_it);
  2261. Address DidIt = Address::invalid();
  2262. if (!CopyprivateVars.empty()) {
  2263. // int32 did_it = 0;
  2264. QualType KmpInt32Ty =
  2265. C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
  2266. DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
  2267. CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
  2268. }
  2269. // Prepare arguments and build a call to __kmpc_single
  2270. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
  2271. CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
  2272. CGM.getModule(), OMPRTL___kmpc_single),
  2273. Args,
  2274. OMPBuilder.getOrCreateRuntimeFunction(
  2275. CGM.getModule(), OMPRTL___kmpc_end_single),
  2276. Args,
  2277. /*Conditional=*/true);
  2278. SingleOpGen.setAction(Action);
  2279. emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
  2280. if (DidIt.isValid()) {
  2281. // did_it = 1;
  2282. CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
  2283. }
  2284. Action.Done(CGF);
  2285. // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
  2286. // <copy_func>, did_it);
  2287. if (DidIt.isValid()) {
  2288. llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
  2289. QualType CopyprivateArrayTy = C.getConstantArrayType(
  2290. C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
  2291. /*IndexTypeQuals=*/0);
  2292. // Create a list of all private variables for copyprivate.
  2293. Address CopyprivateList =
  2294. CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
  2295. for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
  2296. Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I);
  2297. CGF.Builder.CreateStore(
  2298. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  2299. CGF.EmitLValue(CopyprivateVars[I]).getPointer(CGF),
  2300. CGF.VoidPtrTy),
  2301. Elem);
  2302. }
  2303. // Build function that copies private values from single region to all other
  2304. // threads in the corresponding parallel region.
  2305. llvm::Value *CpyFn = emitCopyprivateCopyFunction(
  2306. CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
  2307. CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
  2308. llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
  2309. Address CL =
  2310. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
  2311. CGF.VoidPtrTy);
  2312. llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
  2313. llvm::Value *Args[] = {
  2314. emitUpdateLocation(CGF, Loc), // ident_t *<loc>
  2315. getThreadID(CGF, Loc), // i32 <gtid>
  2316. BufSize, // size_t <buf_size>
  2317. CL.getPointer(), // void *<copyprivate list>
  2318. CpyFn, // void (*) (void *, void *) <copy_func>
  2319. DidItVal // i32 did_it
  2320. };
  2321. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  2322. CGM.getModule(), OMPRTL___kmpc_copyprivate),
  2323. Args);
  2324. }
  2325. }
  2326. void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
  2327. const RegionCodeGenTy &OrderedOpGen,
  2328. SourceLocation Loc, bool IsThreads) {
  2329. if (!CGF.HaveInsertPoint())
  2330. return;
  2331. // __kmpc_ordered(ident_t *, gtid);
  2332. // OrderedOpGen();
  2333. // __kmpc_end_ordered(ident_t *, gtid);
  2334. // Prepare arguments and build a call to __kmpc_ordered
  2335. if (IsThreads) {
  2336. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
  2337. CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
  2338. CGM.getModule(), OMPRTL___kmpc_ordered),
  2339. Args,
  2340. OMPBuilder.getOrCreateRuntimeFunction(
  2341. CGM.getModule(), OMPRTL___kmpc_end_ordered),
  2342. Args);
  2343. OrderedOpGen.setAction(Action);
  2344. emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
  2345. return;
  2346. }
  2347. emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
  2348. }
  2349. unsigned CGOpenMPRuntime::getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind) {
  2350. unsigned Flags;
  2351. if (Kind == OMPD_for)
  2352. Flags = OMP_IDENT_BARRIER_IMPL_FOR;
  2353. else if (Kind == OMPD_sections)
  2354. Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
  2355. else if (Kind == OMPD_single)
  2356. Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
  2357. else if (Kind == OMPD_barrier)
  2358. Flags = OMP_IDENT_BARRIER_EXPL;
  2359. else
  2360. Flags = OMP_IDENT_BARRIER_IMPL;
  2361. return Flags;
  2362. }
  2363. void CGOpenMPRuntime::getDefaultScheduleAndChunk(
  2364. CodeGenFunction &CGF, const OMPLoopDirective &S,
  2365. OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const {
  2366. // Check if the loop directive is actually a doacross loop directive. In this
  2367. // case choose static, 1 schedule.
  2368. if (llvm::any_of(
  2369. S.getClausesOfKind<OMPOrderedClause>(),
  2370. [](const OMPOrderedClause *C) { return C->getNumForLoops(); })) {
  2371. ScheduleKind = OMPC_SCHEDULE_static;
  2372. // Chunk size is 1 in this case.
  2373. llvm::APInt ChunkSize(32, 1);
  2374. ChunkExpr = IntegerLiteral::Create(
  2375. CGF.getContext(), ChunkSize,
  2376. CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
  2377. SourceLocation());
  2378. }
  2379. }
  2380. void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
  2381. OpenMPDirectiveKind Kind, bool EmitChecks,
  2382. bool ForceSimpleCall) {
  2383. // Check if we should use the OMPBuilder
  2384. auto *OMPRegionInfo =
  2385. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
  2386. if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
  2387. CGF.Builder.restoreIP(OMPBuilder.createBarrier(
  2388. CGF.Builder, Kind, ForceSimpleCall, EmitChecks));
  2389. return;
  2390. }
  2391. if (!CGF.HaveInsertPoint())
  2392. return;
  2393. // Build call __kmpc_cancel_barrier(loc, thread_id);
  2394. // Build call __kmpc_barrier(loc, thread_id);
  2395. unsigned Flags = getDefaultFlagsForBarriers(Kind);
  2396. // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
  2397. // thread_id);
  2398. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
  2399. getThreadID(CGF, Loc)};
  2400. if (OMPRegionInfo) {
  2401. if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
  2402. llvm::Value *Result = CGF.EmitRuntimeCall(
  2403. OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
  2404. OMPRTL___kmpc_cancel_barrier),
  2405. Args);
  2406. if (EmitChecks) {
  2407. // if (__kmpc_cancel_barrier()) {
  2408. // exit from construct;
  2409. // }
  2410. llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
  2411. llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
  2412. llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
  2413. CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
  2414. CGF.EmitBlock(ExitBB);
  2415. // exit from construct;
  2416. CodeGenFunction::JumpDest CancelDestination =
  2417. CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
  2418. CGF.EmitBranchThroughCleanup(CancelDestination);
  2419. CGF.EmitBlock(ContBB, /*IsFinished=*/true);
  2420. }
  2421. return;
  2422. }
  2423. }
  2424. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  2425. CGM.getModule(), OMPRTL___kmpc_barrier),
  2426. Args);
  2427. }
  2428. /// Map the OpenMP loop schedule to the runtime enumeration.
  2429. static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
  2430. bool Chunked, bool Ordered) {
  2431. switch (ScheduleKind) {
  2432. case OMPC_SCHEDULE_static:
  2433. return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
  2434. : (Ordered ? OMP_ord_static : OMP_sch_static);
  2435. case OMPC_SCHEDULE_dynamic:
  2436. return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
  2437. case OMPC_SCHEDULE_guided:
  2438. return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
  2439. case OMPC_SCHEDULE_runtime:
  2440. return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
  2441. case OMPC_SCHEDULE_auto:
  2442. return Ordered ? OMP_ord_auto : OMP_sch_auto;
  2443. case OMPC_SCHEDULE_unknown:
  2444. assert(!Chunked && "chunk was specified but schedule kind not known");
  2445. return Ordered ? OMP_ord_static : OMP_sch_static;
  2446. }
  2447. llvm_unreachable("Unexpected runtime schedule");
  2448. }
  2449. /// Map the OpenMP distribute schedule to the runtime enumeration.
  2450. static OpenMPSchedType
  2451. getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
  2452. // only static is allowed for dist_schedule
  2453. return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
  2454. }
  2455. bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
  2456. bool Chunked) const {
  2457. OpenMPSchedType Schedule =
  2458. getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
  2459. return Schedule == OMP_sch_static;
  2460. }
  2461. bool CGOpenMPRuntime::isStaticNonchunked(
  2462. OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
  2463. OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
  2464. return Schedule == OMP_dist_sch_static;
  2465. }
  2466. bool CGOpenMPRuntime::isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
  2467. bool Chunked) const {
  2468. OpenMPSchedType Schedule =
  2469. getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
  2470. return Schedule == OMP_sch_static_chunked;
  2471. }
  2472. bool CGOpenMPRuntime::isStaticChunked(
  2473. OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
  2474. OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
  2475. return Schedule == OMP_dist_sch_static_chunked;
  2476. }
  2477. bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
  2478. OpenMPSchedType Schedule =
  2479. getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
  2480. assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
  2481. return Schedule != OMP_sch_static;
  2482. }
  2483. static int addMonoNonMonoModifier(CodeGenModule &CGM, OpenMPSchedType Schedule,
  2484. OpenMPScheduleClauseModifier M1,
  2485. OpenMPScheduleClauseModifier M2) {
  2486. int Modifier = 0;
  2487. switch (M1) {
  2488. case OMPC_SCHEDULE_MODIFIER_monotonic:
  2489. Modifier = OMP_sch_modifier_monotonic;
  2490. break;
  2491. case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
  2492. Modifier = OMP_sch_modifier_nonmonotonic;
  2493. break;
  2494. case OMPC_SCHEDULE_MODIFIER_simd:
  2495. if (Schedule == OMP_sch_static_chunked)
  2496. Schedule = OMP_sch_static_balanced_chunked;
  2497. break;
  2498. case OMPC_SCHEDULE_MODIFIER_last:
  2499. case OMPC_SCHEDULE_MODIFIER_unknown:
  2500. break;
  2501. }
  2502. switch (M2) {
  2503. case OMPC_SCHEDULE_MODIFIER_monotonic:
  2504. Modifier = OMP_sch_modifier_monotonic;
  2505. break;
  2506. case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
  2507. Modifier = OMP_sch_modifier_nonmonotonic;
  2508. break;
  2509. case OMPC_SCHEDULE_MODIFIER_simd:
  2510. if (Schedule == OMP_sch_static_chunked)
  2511. Schedule = OMP_sch_static_balanced_chunked;
  2512. break;
  2513. case OMPC_SCHEDULE_MODIFIER_last:
  2514. case OMPC_SCHEDULE_MODIFIER_unknown:
  2515. break;
  2516. }
  2517. // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Desription.
  2518. // If the static schedule kind is specified or if the ordered clause is
  2519. // specified, and if the nonmonotonic modifier is not specified, the effect is
  2520. // as if the monotonic modifier is specified. Otherwise, unless the monotonic
  2521. // modifier is specified, the effect is as if the nonmonotonic modifier is
  2522. // specified.
  2523. if (CGM.getLangOpts().OpenMP >= 50 && Modifier == 0) {
  2524. if (!(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static ||
  2525. Schedule == OMP_sch_static_balanced_chunked ||
  2526. Schedule == OMP_ord_static_chunked || Schedule == OMP_ord_static ||
  2527. Schedule == OMP_dist_sch_static_chunked ||
  2528. Schedule == OMP_dist_sch_static))
  2529. Modifier = OMP_sch_modifier_nonmonotonic;
  2530. }
  2531. return Schedule | Modifier;
  2532. }
  2533. void CGOpenMPRuntime::emitForDispatchInit(
  2534. CodeGenFunction &CGF, SourceLocation Loc,
  2535. const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
  2536. bool Ordered, const DispatchRTInput &DispatchValues) {
  2537. if (!CGF.HaveInsertPoint())
  2538. return;
  2539. OpenMPSchedType Schedule = getRuntimeSchedule(
  2540. ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
  2541. assert(Ordered ||
  2542. (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
  2543. Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
  2544. Schedule != OMP_sch_static_balanced_chunked));
  2545. // Call __kmpc_dispatch_init(
  2546. // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
  2547. // kmp_int[32|64] lower, kmp_int[32|64] upper,
  2548. // kmp_int[32|64] stride, kmp_int[32|64] chunk);
  2549. // If the Chunk was not specified in the clause - use default value 1.
  2550. llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
  2551. : CGF.Builder.getIntN(IVSize, 1);
  2552. llvm::Value *Args[] = {
  2553. emitUpdateLocation(CGF, Loc),
  2554. getThreadID(CGF, Loc),
  2555. CGF.Builder.getInt32(addMonoNonMonoModifier(
  2556. CGM, Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
  2557. DispatchValues.LB, // Lower
  2558. DispatchValues.UB, // Upper
  2559. CGF.Builder.getIntN(IVSize, 1), // Stride
  2560. Chunk // Chunk
  2561. };
  2562. CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
  2563. }
  2564. static void emitForStaticInitCall(
  2565. CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
  2566. llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
  2567. OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
  2568. const CGOpenMPRuntime::StaticRTInput &Values) {
  2569. if (!CGF.HaveInsertPoint())
  2570. return;
  2571. assert(!Values.Ordered);
  2572. assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
  2573. Schedule == OMP_sch_static_balanced_chunked ||
  2574. Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
  2575. Schedule == OMP_dist_sch_static ||
  2576. Schedule == OMP_dist_sch_static_chunked);
  2577. // Call __kmpc_for_static_init(
  2578. // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
  2579. // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
  2580. // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
  2581. // kmp_int[32|64] incr, kmp_int[32|64] chunk);
  2582. llvm::Value *Chunk = Values.Chunk;
  2583. if (Chunk == nullptr) {
  2584. assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
  2585. Schedule == OMP_dist_sch_static) &&
  2586. "expected static non-chunked schedule");
  2587. // If the Chunk was not specified in the clause - use default value 1.
  2588. Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
  2589. } else {
  2590. assert((Schedule == OMP_sch_static_chunked ||
  2591. Schedule == OMP_sch_static_balanced_chunked ||
  2592. Schedule == OMP_ord_static_chunked ||
  2593. Schedule == OMP_dist_sch_static_chunked) &&
  2594. "expected static chunked schedule");
  2595. }
  2596. llvm::Value *Args[] = {
  2597. UpdateLocation,
  2598. ThreadId,
  2599. CGF.Builder.getInt32(addMonoNonMonoModifier(CGF.CGM, Schedule, M1,
  2600. M2)), // Schedule type
  2601. Values.IL.getPointer(), // &isLastIter
  2602. Values.LB.getPointer(), // &LB
  2603. Values.UB.getPointer(), // &UB
  2604. Values.ST.getPointer(), // &Stride
  2605. CGF.Builder.getIntN(Values.IVSize, 1), // Incr
  2606. Chunk // Chunk
  2607. };
  2608. CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
  2609. }
  2610. void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
  2611. SourceLocation Loc,
  2612. OpenMPDirectiveKind DKind,
  2613. const OpenMPScheduleTy &ScheduleKind,
  2614. const StaticRTInput &Values) {
  2615. OpenMPSchedType ScheduleNum = getRuntimeSchedule(
  2616. ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
  2617. assert(isOpenMPWorksharingDirective(DKind) &&
  2618. "Expected loop-based or sections-based directive.");
  2619. llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
  2620. isOpenMPLoopDirective(DKind)
  2621. ? OMP_IDENT_WORK_LOOP
  2622. : OMP_IDENT_WORK_SECTIONS);
  2623. llvm::Value *ThreadId = getThreadID(CGF, Loc);
  2624. llvm::FunctionCallee StaticInitFunction =
  2625. createForStaticInitFunction(Values.IVSize, Values.IVSigned, false);
  2626. auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
  2627. emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
  2628. ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
  2629. }
  2630. void CGOpenMPRuntime::emitDistributeStaticInit(
  2631. CodeGenFunction &CGF, SourceLocation Loc,
  2632. OpenMPDistScheduleClauseKind SchedKind,
  2633. const CGOpenMPRuntime::StaticRTInput &Values) {
  2634. OpenMPSchedType ScheduleNum =
  2635. getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
  2636. llvm::Value *UpdatedLocation =
  2637. emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
  2638. llvm::Value *ThreadId = getThreadID(CGF, Loc);
  2639. llvm::FunctionCallee StaticInitFunction;
  2640. bool isGPUDistribute =
  2641. CGM.getLangOpts().OpenMPIsDevice &&
  2642. (CGM.getTriple().isAMDGCN() || CGM.getTriple().isNVPTX());
  2643. StaticInitFunction = createForStaticInitFunction(
  2644. Values.IVSize, Values.IVSigned, isGPUDistribute);
  2645. emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
  2646. ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
  2647. OMPC_SCHEDULE_MODIFIER_unknown, Values);
  2648. }
  2649. void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
  2650. SourceLocation Loc,
  2651. OpenMPDirectiveKind DKind) {
  2652. if (!CGF.HaveInsertPoint())
  2653. return;
  2654. // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
  2655. llvm::Value *Args[] = {
  2656. emitUpdateLocation(CGF, Loc,
  2657. isOpenMPDistributeDirective(DKind)
  2658. ? OMP_IDENT_WORK_DISTRIBUTE
  2659. : isOpenMPLoopDirective(DKind)
  2660. ? OMP_IDENT_WORK_LOOP
  2661. : OMP_IDENT_WORK_SECTIONS),
  2662. getThreadID(CGF, Loc)};
  2663. auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
  2664. if (isOpenMPDistributeDirective(DKind) && CGM.getLangOpts().OpenMPIsDevice &&
  2665. (CGM.getTriple().isAMDGCN() || CGM.getTriple().isNVPTX()))
  2666. CGF.EmitRuntimeCall(
  2667. OMPBuilder.getOrCreateRuntimeFunction(
  2668. CGM.getModule(), OMPRTL___kmpc_distribute_static_fini),
  2669. Args);
  2670. else
  2671. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  2672. CGM.getModule(), OMPRTL___kmpc_for_static_fini),
  2673. Args);
  2674. }
  2675. void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
  2676. SourceLocation Loc,
  2677. unsigned IVSize,
  2678. bool IVSigned) {
  2679. if (!CGF.HaveInsertPoint())
  2680. return;
  2681. // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
  2682. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
  2683. CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
  2684. }
  2685. llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
  2686. SourceLocation Loc, unsigned IVSize,
  2687. bool IVSigned, Address IL,
  2688. Address LB, Address UB,
  2689. Address ST) {
  2690. // Call __kmpc_dispatch_next(
  2691. // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
  2692. // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
  2693. // kmp_int[32|64] *p_stride);
  2694. llvm::Value *Args[] = {
  2695. emitUpdateLocation(CGF, Loc),
  2696. getThreadID(CGF, Loc),
  2697. IL.getPointer(), // &isLastIter
  2698. LB.getPointer(), // &Lower
  2699. UB.getPointer(), // &Upper
  2700. ST.getPointer() // &Stride
  2701. };
  2702. llvm::Value *Call =
  2703. CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
  2704. return CGF.EmitScalarConversion(
  2705. Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
  2706. CGF.getContext().BoolTy, Loc);
  2707. }
  2708. void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
  2709. llvm::Value *NumThreads,
  2710. SourceLocation Loc) {
  2711. if (!CGF.HaveInsertPoint())
  2712. return;
  2713. // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
  2714. llvm::Value *Args[] = {
  2715. emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  2716. CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
  2717. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  2718. CGM.getModule(), OMPRTL___kmpc_push_num_threads),
  2719. Args);
  2720. }
  2721. void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
  2722. ProcBindKind ProcBind,
  2723. SourceLocation Loc) {
  2724. if (!CGF.HaveInsertPoint())
  2725. return;
  2726. assert(ProcBind != OMP_PROC_BIND_unknown && "Unsupported proc_bind value.");
  2727. // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
  2728. llvm::Value *Args[] = {
  2729. emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  2730. llvm::ConstantInt::get(CGM.IntTy, unsigned(ProcBind), /*isSigned=*/true)};
  2731. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  2732. CGM.getModule(), OMPRTL___kmpc_push_proc_bind),
  2733. Args);
  2734. }
  2735. void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
  2736. SourceLocation Loc, llvm::AtomicOrdering AO) {
  2737. if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
  2738. OMPBuilder.createFlush(CGF.Builder);
  2739. } else {
  2740. if (!CGF.HaveInsertPoint())
  2741. return;
  2742. // Build call void __kmpc_flush(ident_t *loc)
  2743. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  2744. CGM.getModule(), OMPRTL___kmpc_flush),
  2745. emitUpdateLocation(CGF, Loc));
  2746. }
  2747. }
  2748. namespace {
  2749. /// Indexes of fields for type kmp_task_t.
  2750. enum KmpTaskTFields {
  2751. /// List of shared variables.
  2752. KmpTaskTShareds,
  2753. /// Task routine.
  2754. KmpTaskTRoutine,
  2755. /// Partition id for the untied tasks.
  2756. KmpTaskTPartId,
  2757. /// Function with call of destructors for private variables.
  2758. Data1,
  2759. /// Task priority.
  2760. Data2,
  2761. /// (Taskloops only) Lower bound.
  2762. KmpTaskTLowerBound,
  2763. /// (Taskloops only) Upper bound.
  2764. KmpTaskTUpperBound,
  2765. /// (Taskloops only) Stride.
  2766. KmpTaskTStride,
  2767. /// (Taskloops only) Is last iteration flag.
  2768. KmpTaskTLastIter,
  2769. /// (Taskloops only) Reduction data.
  2770. KmpTaskTReductions,
  2771. };
  2772. } // anonymous namespace
  2773. bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
  2774. return OffloadEntriesTargetRegion.empty() &&
  2775. OffloadEntriesDeviceGlobalVar.empty();
  2776. }
  2777. /// Initialize target region entry.
  2778. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
  2779. initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
  2780. StringRef ParentName, unsigned LineNum,
  2781. unsigned Order) {
  2782. assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
  2783. "only required for the device "
  2784. "code generation.");
  2785. OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
  2786. OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
  2787. OMPTargetRegionEntryTargetRegion);
  2788. ++OffloadingEntriesNum;
  2789. }
  2790. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
  2791. registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
  2792. StringRef ParentName, unsigned LineNum,
  2793. llvm::Constant *Addr, llvm::Constant *ID,
  2794. OMPTargetRegionEntryKind Flags) {
  2795. // If we are emitting code for a target, the entry is already initialized,
  2796. // only has to be registered.
  2797. if (CGM.getLangOpts().OpenMPIsDevice) {
  2798. // This could happen if the device compilation is invoked standalone.
  2799. if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum))
  2800. return;
  2801. auto &Entry =
  2802. OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
  2803. Entry.setAddress(Addr);
  2804. Entry.setID(ID);
  2805. Entry.setFlags(Flags);
  2806. } else {
  2807. if (Flags ==
  2808. OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion &&
  2809. hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum,
  2810. /*IgnoreAddressId*/ true))
  2811. return;
  2812. assert(!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) &&
  2813. "Target region entry already registered!");
  2814. OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
  2815. OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
  2816. ++OffloadingEntriesNum;
  2817. }
  2818. }
  2819. bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
  2820. unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum,
  2821. bool IgnoreAddressId) const {
  2822. auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
  2823. if (PerDevice == OffloadEntriesTargetRegion.end())
  2824. return false;
  2825. auto PerFile = PerDevice->second.find(FileID);
  2826. if (PerFile == PerDevice->second.end())
  2827. return false;
  2828. auto PerParentName = PerFile->second.find(ParentName);
  2829. if (PerParentName == PerFile->second.end())
  2830. return false;
  2831. auto PerLine = PerParentName->second.find(LineNum);
  2832. if (PerLine == PerParentName->second.end())
  2833. return false;
  2834. // Fail if this entry is already registered.
  2835. if (!IgnoreAddressId &&
  2836. (PerLine->second.getAddress() || PerLine->second.getID()))
  2837. return false;
  2838. return true;
  2839. }
  2840. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
  2841. const OffloadTargetRegionEntryInfoActTy &Action) {
  2842. // Scan all target region entries and perform the provided action.
  2843. for (const auto &D : OffloadEntriesTargetRegion)
  2844. for (const auto &F : D.second)
  2845. for (const auto &P : F.second)
  2846. for (const auto &L : P.second)
  2847. Action(D.first, F.first, P.first(), L.first, L.second);
  2848. }
  2849. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
  2850. initializeDeviceGlobalVarEntryInfo(StringRef Name,
  2851. OMPTargetGlobalVarEntryKind Flags,
  2852. unsigned Order) {
  2853. assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
  2854. "only required for the device "
  2855. "code generation.");
  2856. OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
  2857. ++OffloadingEntriesNum;
  2858. }
  2859. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
  2860. registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
  2861. CharUnits VarSize,
  2862. OMPTargetGlobalVarEntryKind Flags,
  2863. llvm::GlobalValue::LinkageTypes Linkage) {
  2864. if (CGM.getLangOpts().OpenMPIsDevice) {
  2865. // This could happen if the device compilation is invoked standalone.
  2866. if (!hasDeviceGlobalVarEntryInfo(VarName))
  2867. return;
  2868. auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
  2869. if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) {
  2870. if (Entry.getVarSize().isZero()) {
  2871. Entry.setVarSize(VarSize);
  2872. Entry.setLinkage(Linkage);
  2873. }
  2874. return;
  2875. }
  2876. Entry.setVarSize(VarSize);
  2877. Entry.setLinkage(Linkage);
  2878. Entry.setAddress(Addr);
  2879. } else {
  2880. if (hasDeviceGlobalVarEntryInfo(VarName)) {
  2881. auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
  2882. assert(Entry.isValid() && Entry.getFlags() == Flags &&
  2883. "Entry not initialized!");
  2884. if (Entry.getVarSize().isZero()) {
  2885. Entry.setVarSize(VarSize);
  2886. Entry.setLinkage(Linkage);
  2887. }
  2888. return;
  2889. }
  2890. OffloadEntriesDeviceGlobalVar.try_emplace(
  2891. VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
  2892. ++OffloadingEntriesNum;
  2893. }
  2894. }
  2895. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
  2896. actOnDeviceGlobalVarEntriesInfo(
  2897. const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
  2898. // Scan all target region entries and perform the provided action.
  2899. for (const auto &E : OffloadEntriesDeviceGlobalVar)
  2900. Action(E.getKey(), E.getValue());
  2901. }
  2902. void CGOpenMPRuntime::createOffloadEntry(
  2903. llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
  2904. llvm::GlobalValue::LinkageTypes Linkage) {
  2905. StringRef Name = Addr->getName();
  2906. llvm::Module &M = CGM.getModule();
  2907. llvm::LLVMContext &C = M.getContext();
  2908. // Create constant string with the name.
  2909. llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
  2910. std::string StringName = getName({"omp_offloading", "entry_name"});
  2911. auto *Str = new llvm::GlobalVariable(
  2912. M, StrPtrInit->getType(), /*isConstant=*/true,
  2913. llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
  2914. Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
  2915. llvm::Constant *Data[] = {
  2916. llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(ID, CGM.VoidPtrTy),
  2917. llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(Str, CGM.Int8PtrTy),
  2918. llvm::ConstantInt::get(CGM.SizeTy, Size),
  2919. llvm::ConstantInt::get(CGM.Int32Ty, Flags),
  2920. llvm::ConstantInt::get(CGM.Int32Ty, 0)};
  2921. std::string EntryName = getName({"omp_offloading", "entry", ""});
  2922. llvm::GlobalVariable *Entry = createGlobalStruct(
  2923. CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data,
  2924. Twine(EntryName).concat(Name), llvm::GlobalValue::WeakAnyLinkage);
  2925. // The entry has to be created in the section the linker expects it to be.
  2926. Entry->setSection("omp_offloading_entries");
  2927. }
  2928. void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
  2929. // Emit the offloading entries and metadata so that the device codegen side
  2930. // can easily figure out what to emit. The produced metadata looks like
  2931. // this:
  2932. //
  2933. // !omp_offload.info = !{!1, ...}
  2934. //
  2935. // Right now we only generate metadata for function that contain target
  2936. // regions.
  2937. // If we are in simd mode or there are no entries, we don't need to do
  2938. // anything.
  2939. if (CGM.getLangOpts().OpenMPSimd || OffloadEntriesInfoManager.empty())
  2940. return;
  2941. llvm::Module &M = CGM.getModule();
  2942. llvm::LLVMContext &C = M.getContext();
  2943. SmallVector<std::tuple<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *,
  2944. SourceLocation, StringRef>,
  2945. 16>
  2946. OrderedEntries(OffloadEntriesInfoManager.size());
  2947. llvm::SmallVector<StringRef, 16> ParentFunctions(
  2948. OffloadEntriesInfoManager.size());
  2949. // Auxiliary methods to create metadata values and strings.
  2950. auto &&GetMDInt = [this](unsigned V) {
  2951. return llvm::ConstantAsMetadata::get(
  2952. llvm::ConstantInt::get(CGM.Int32Ty, V));
  2953. };
  2954. auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
  2955. // Create the offloading info metadata node.
  2956. llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
  2957. // Create function that emits metadata for each target region entry;
  2958. auto &&TargetRegionMetadataEmitter =
  2959. [this, &C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt,
  2960. &GetMDString](
  2961. unsigned DeviceID, unsigned FileID, StringRef ParentName,
  2962. unsigned Line,
  2963. const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
  2964. // Generate metadata for target regions. Each entry of this metadata
  2965. // contains:
  2966. // - Entry 0 -> Kind of this type of metadata (0).
  2967. // - Entry 1 -> Device ID of the file where the entry was identified.
  2968. // - Entry 2 -> File ID of the file where the entry was identified.
  2969. // - Entry 3 -> Mangled name of the function where the entry was
  2970. // identified.
  2971. // - Entry 4 -> Line in the file where the entry was identified.
  2972. // - Entry 5 -> Order the entry was created.
  2973. // The first element of the metadata node is the kind.
  2974. llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
  2975. GetMDInt(FileID), GetMDString(ParentName),
  2976. GetMDInt(Line), GetMDInt(E.getOrder())};
  2977. SourceLocation Loc;
  2978. for (auto I = CGM.getContext().getSourceManager().fileinfo_begin(),
  2979. E = CGM.getContext().getSourceManager().fileinfo_end();
  2980. I != E; ++I) {
  2981. if (I->getFirst()->getUniqueID().getDevice() == DeviceID &&
  2982. I->getFirst()->getUniqueID().getFile() == FileID) {
  2983. Loc = CGM.getContext().getSourceManager().translateFileLineCol(
  2984. I->getFirst(), Line, 1);
  2985. break;
  2986. }
  2987. }
  2988. // Save this entry in the right position of the ordered entries array.
  2989. OrderedEntries[E.getOrder()] = std::make_tuple(&E, Loc, ParentName);
  2990. ParentFunctions[E.getOrder()] = ParentName;
  2991. // Add metadata to the named metadata node.
  2992. MD->addOperand(llvm::MDNode::get(C, Ops));
  2993. };
  2994. OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
  2995. TargetRegionMetadataEmitter);
  2996. // Create function that emits metadata for each device global variable entry;
  2997. auto &&DeviceGlobalVarMetadataEmitter =
  2998. [&C, &OrderedEntries, &GetMDInt, &GetMDString,
  2999. MD](StringRef MangledName,
  3000. const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar
  3001. &E) {
  3002. // Generate metadata for global variables. Each entry of this metadata
  3003. // contains:
  3004. // - Entry 0 -> Kind of this type of metadata (1).
  3005. // - Entry 1 -> Mangled name of the variable.
  3006. // - Entry 2 -> Declare target kind.
  3007. // - Entry 3 -> Order the entry was created.
  3008. // The first element of the metadata node is the kind.
  3009. llvm::Metadata *Ops[] = {
  3010. GetMDInt(E.getKind()), GetMDString(MangledName),
  3011. GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
  3012. // Save this entry in the right position of the ordered entries array.
  3013. OrderedEntries[E.getOrder()] =
  3014. std::make_tuple(&E, SourceLocation(), MangledName);
  3015. // Add metadata to the named metadata node.
  3016. MD->addOperand(llvm::MDNode::get(C, Ops));
  3017. };
  3018. OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo(
  3019. DeviceGlobalVarMetadataEmitter);
  3020. for (const auto &E : OrderedEntries) {
  3021. assert(std::get<0>(E) && "All ordered entries must exist!");
  3022. if (const auto *CE =
  3023. dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
  3024. std::get<0>(E))) {
  3025. if (!CE->getID() || !CE->getAddress()) {
  3026. // Do not blame the entry if the parent funtion is not emitted.
  3027. StringRef FnName = ParentFunctions[CE->getOrder()];
  3028. if (!CGM.GetGlobalValue(FnName))
  3029. continue;
  3030. unsigned DiagID = CGM.getDiags().getCustomDiagID(
  3031. DiagnosticsEngine::Error,
  3032. "Offloading entry for target region in %0 is incorrect: either the "
  3033. "address or the ID is invalid.");
  3034. CGM.getDiags().Report(std::get<1>(E), DiagID) << FnName;
  3035. continue;
  3036. }
  3037. createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
  3038. CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
  3039. } else if (const auto *CE = dyn_cast<OffloadEntriesInfoManagerTy::
  3040. OffloadEntryInfoDeviceGlobalVar>(
  3041. std::get<0>(E))) {
  3042. OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags =
  3043. static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
  3044. CE->getFlags());
  3045. switch (Flags) {
  3046. case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: {
  3047. if (CGM.getLangOpts().OpenMPIsDevice &&
  3048. CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())
  3049. continue;
  3050. if (!CE->getAddress()) {
  3051. unsigned DiagID = CGM.getDiags().getCustomDiagID(
  3052. DiagnosticsEngine::Error, "Offloading entry for declare target "
  3053. "variable %0 is incorrect: the "
  3054. "address is invalid.");
  3055. CGM.getDiags().Report(std::get<1>(E), DiagID) << std::get<2>(E);
  3056. continue;
  3057. }
  3058. // The vaiable has no definition - no need to add the entry.
  3059. if (CE->getVarSize().isZero())
  3060. continue;
  3061. break;
  3062. }
  3063. case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink:
  3064. assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||
  3065. (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&
  3066. "Declaret target link address is set.");
  3067. if (CGM.getLangOpts().OpenMPIsDevice)
  3068. continue;
  3069. if (!CE->getAddress()) {
  3070. unsigned DiagID = CGM.getDiags().getCustomDiagID(
  3071. DiagnosticsEngine::Error,
  3072. "Offloading entry for declare target variable is incorrect: the "
  3073. "address is invalid.");
  3074. CGM.getDiags().Report(DiagID);
  3075. continue;
  3076. }
  3077. break;
  3078. }
  3079. createOffloadEntry(CE->getAddress(), CE->getAddress(),
  3080. CE->getVarSize().getQuantity(), Flags,
  3081. CE->getLinkage());
  3082. } else {
  3083. llvm_unreachable("Unsupported entry kind.");
  3084. }
  3085. }
  3086. }
  3087. /// Loads all the offload entries information from the host IR
  3088. /// metadata.
  3089. void CGOpenMPRuntime::loadOffloadInfoMetadata() {
  3090. // If we are in target mode, load the metadata from the host IR. This code has
  3091. // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
  3092. if (!CGM.getLangOpts().OpenMPIsDevice)
  3093. return;
  3094. if (CGM.getLangOpts().OMPHostIRFile.empty())
  3095. return;
  3096. auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
  3097. if (auto EC = Buf.getError()) {
  3098. CGM.getDiags().Report(diag::err_cannot_open_file)
  3099. << CGM.getLangOpts().OMPHostIRFile << EC.message();
  3100. return;
  3101. }
  3102. llvm::LLVMContext C;
  3103. auto ME = expectedToErrorOrAndEmitErrors(
  3104. C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
  3105. if (auto EC = ME.getError()) {
  3106. unsigned DiagID = CGM.getDiags().getCustomDiagID(
  3107. DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
  3108. CGM.getDiags().Report(DiagID)
  3109. << CGM.getLangOpts().OMPHostIRFile << EC.message();
  3110. return;
  3111. }
  3112. llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
  3113. if (!MD)
  3114. return;
  3115. for (llvm::MDNode *MN : MD->operands()) {
  3116. auto &&GetMDInt = [MN](unsigned Idx) {
  3117. auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
  3118. return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
  3119. };
  3120. auto &&GetMDString = [MN](unsigned Idx) {
  3121. auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
  3122. return V->getString();
  3123. };
  3124. switch (GetMDInt(0)) {
  3125. default:
  3126. llvm_unreachable("Unexpected metadata!");
  3127. break;
  3128. case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
  3129. OffloadingEntryInfoTargetRegion:
  3130. OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
  3131. /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
  3132. /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
  3133. /*Order=*/GetMDInt(5));
  3134. break;
  3135. case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
  3136. OffloadingEntryInfoDeviceGlobalVar:
  3137. OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo(
  3138. /*MangledName=*/GetMDString(1),
  3139. static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
  3140. /*Flags=*/GetMDInt(2)),
  3141. /*Order=*/GetMDInt(3));
  3142. break;
  3143. }
  3144. }
  3145. }
  3146. void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
  3147. if (!KmpRoutineEntryPtrTy) {
  3148. // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
  3149. ASTContext &C = CGM.getContext();
  3150. QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
  3151. FunctionProtoType::ExtProtoInfo EPI;
  3152. KmpRoutineEntryPtrQTy = C.getPointerType(
  3153. C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
  3154. KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
  3155. }
  3156. }
  3157. QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
  3158. // Make sure the type of the entry is already created. This is the type we
  3159. // have to create:
  3160. // struct __tgt_offload_entry{
  3161. // void *addr; // Pointer to the offload entry info.
  3162. // // (function or global)
  3163. // char *name; // Name of the function or global.
  3164. // size_t size; // Size of the entry info (0 if it a function).
  3165. // int32_t flags; // Flags associated with the entry, e.g. 'link'.
  3166. // int32_t reserved; // Reserved, to use by the runtime library.
  3167. // };
  3168. if (TgtOffloadEntryQTy.isNull()) {
  3169. ASTContext &C = CGM.getContext();
  3170. RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
  3171. RD->startDefinition();
  3172. addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  3173. addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
  3174. addFieldToRecordDecl(C, RD, C.getSizeType());
  3175. addFieldToRecordDecl(
  3176. C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
  3177. addFieldToRecordDecl(
  3178. C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
  3179. RD->completeDefinition();
  3180. RD->addAttr(PackedAttr::CreateImplicit(C));
  3181. TgtOffloadEntryQTy = C.getRecordType(RD);
  3182. }
  3183. return TgtOffloadEntryQTy;
  3184. }
  3185. namespace {
  3186. struct PrivateHelpersTy {
  3187. PrivateHelpersTy(const Expr *OriginalRef, const VarDecl *Original,
  3188. const VarDecl *PrivateCopy, const VarDecl *PrivateElemInit)
  3189. : OriginalRef(OriginalRef), Original(Original), PrivateCopy(PrivateCopy),
  3190. PrivateElemInit(PrivateElemInit) {}
  3191. PrivateHelpersTy(const VarDecl *Original) : Original(Original) {}
  3192. const Expr *OriginalRef = nullptr;
  3193. const VarDecl *Original = nullptr;
  3194. const VarDecl *PrivateCopy = nullptr;
  3195. const VarDecl *PrivateElemInit = nullptr;
  3196. bool isLocalPrivate() const {
  3197. return !OriginalRef && !PrivateCopy && !PrivateElemInit;
  3198. }
  3199. };
  3200. typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
  3201. } // anonymous namespace
  3202. static bool isAllocatableDecl(const VarDecl *VD) {
  3203. const VarDecl *CVD = VD->getCanonicalDecl();
  3204. if (!CVD->hasAttr<OMPAllocateDeclAttr>())
  3205. return false;
  3206. const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
  3207. // Use the default allocation.
  3208. return !(AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
  3209. !AA->getAllocator());
  3210. }
  3211. static RecordDecl *
  3212. createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
  3213. if (!Privates.empty()) {
  3214. ASTContext &C = CGM.getContext();
  3215. // Build struct .kmp_privates_t. {
  3216. // /* private vars */
  3217. // };
  3218. RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
  3219. RD->startDefinition();
  3220. for (const auto &Pair : Privates) {
  3221. const VarDecl *VD = Pair.second.Original;
  3222. QualType Type = VD->getType().getNonReferenceType();
  3223. // If the private variable is a local variable with lvalue ref type,
  3224. // allocate the pointer instead of the pointee type.
  3225. if (Pair.second.isLocalPrivate()) {
  3226. if (VD->getType()->isLValueReferenceType())
  3227. Type = C.getPointerType(Type);
  3228. if (isAllocatableDecl(VD))
  3229. Type = C.getPointerType(Type);
  3230. }
  3231. FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
  3232. if (VD->hasAttrs()) {
  3233. for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
  3234. E(VD->getAttrs().end());
  3235. I != E; ++I)
  3236. FD->addAttr(*I);
  3237. }
  3238. }
  3239. RD->completeDefinition();
  3240. return RD;
  3241. }
  3242. return nullptr;
  3243. }
  3244. static RecordDecl *
  3245. createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
  3246. QualType KmpInt32Ty,
  3247. QualType KmpRoutineEntryPointerQTy) {
  3248. ASTContext &C = CGM.getContext();
  3249. // Build struct kmp_task_t {
  3250. // void * shareds;
  3251. // kmp_routine_entry_t routine;
  3252. // kmp_int32 part_id;
  3253. // kmp_cmplrdata_t data1;
  3254. // kmp_cmplrdata_t data2;
  3255. // For taskloops additional fields:
  3256. // kmp_uint64 lb;
  3257. // kmp_uint64 ub;
  3258. // kmp_int64 st;
  3259. // kmp_int32 liter;
  3260. // void * reductions;
  3261. // };
  3262. RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
  3263. UD->startDefinition();
  3264. addFieldToRecordDecl(C, UD, KmpInt32Ty);
  3265. addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
  3266. UD->completeDefinition();
  3267. QualType KmpCmplrdataTy = C.getRecordType(UD);
  3268. RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
  3269. RD->startDefinition();
  3270. addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  3271. addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
  3272. addFieldToRecordDecl(C, RD, KmpInt32Ty);
  3273. addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
  3274. addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
  3275. if (isOpenMPTaskLoopDirective(Kind)) {
  3276. QualType KmpUInt64Ty =
  3277. CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
  3278. QualType KmpInt64Ty =
  3279. CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
  3280. addFieldToRecordDecl(C, RD, KmpUInt64Ty);
  3281. addFieldToRecordDecl(C, RD, KmpUInt64Ty);
  3282. addFieldToRecordDecl(C, RD, KmpInt64Ty);
  3283. addFieldToRecordDecl(C, RD, KmpInt32Ty);
  3284. addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  3285. }
  3286. RD->completeDefinition();
  3287. return RD;
  3288. }
  3289. static RecordDecl *
  3290. createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
  3291. ArrayRef<PrivateDataTy> Privates) {
  3292. ASTContext &C = CGM.getContext();
  3293. // Build struct kmp_task_t_with_privates {
  3294. // kmp_task_t task_data;
  3295. // .kmp_privates_t. privates;
  3296. // };
  3297. RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
  3298. RD->startDefinition();
  3299. addFieldToRecordDecl(C, RD, KmpTaskTQTy);
  3300. if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
  3301. addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
  3302. RD->completeDefinition();
  3303. return RD;
  3304. }
  3305. /// Emit a proxy function which accepts kmp_task_t as the second
  3306. /// argument.
  3307. /// \code
  3308. /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
  3309. /// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
  3310. /// For taskloops:
  3311. /// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
  3312. /// tt->reductions, tt->shareds);
  3313. /// return 0;
  3314. /// }
  3315. /// \endcode
  3316. static llvm::Function *
  3317. emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
  3318. OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
  3319. QualType KmpTaskTWithPrivatesPtrQTy,
  3320. QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
  3321. QualType SharedsPtrTy, llvm::Function *TaskFunction,
  3322. llvm::Value *TaskPrivatesMap) {
  3323. ASTContext &C = CGM.getContext();
  3324. FunctionArgList Args;
  3325. ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
  3326. ImplicitParamDecl::Other);
  3327. ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  3328. KmpTaskTWithPrivatesPtrQTy.withRestrict(),
  3329. ImplicitParamDecl::Other);
  3330. Args.push_back(&GtidArg);
  3331. Args.push_back(&TaskTypeArg);
  3332. const auto &TaskEntryFnInfo =
  3333. CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
  3334. llvm::FunctionType *TaskEntryTy =
  3335. CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
  3336. std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
  3337. auto *TaskEntry = llvm::Function::Create(
  3338. TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
  3339. CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
  3340. TaskEntry->setDoesNotRecurse();
  3341. CodeGenFunction CGF(CGM);
  3342. CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
  3343. Loc, Loc);
  3344. // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
  3345. // tt,
  3346. // For taskloops:
  3347. // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
  3348. // tt->task_data.shareds);
  3349. llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
  3350. CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
  3351. LValue TDBase = CGF.EmitLoadOfPointerLValue(
  3352. CGF.GetAddrOfLocalVar(&TaskTypeArg),
  3353. KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
  3354. const auto *KmpTaskTWithPrivatesQTyRD =
  3355. cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
  3356. LValue Base =
  3357. CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
  3358. const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
  3359. auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
  3360. LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
  3361. llvm::Value *PartidParam = PartIdLVal.getPointer(CGF);
  3362. auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
  3363. LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
  3364. llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  3365. CGF.EmitLoadOfScalar(SharedsLVal, Loc),
  3366. CGF.ConvertTypeForMem(SharedsPtrTy));
  3367. auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
  3368. llvm::Value *PrivatesParam;
  3369. if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
  3370. LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
  3371. PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  3372. PrivatesLVal.getPointer(CGF), CGF.VoidPtrTy);
  3373. } else {
  3374. PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
  3375. }
  3376. llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
  3377. TaskPrivatesMap,
  3378. CGF.Builder
  3379. .CreatePointerBitCastOrAddrSpaceCast(
  3380. TDBase.getAddress(CGF), CGF.VoidPtrTy)
  3381. .getPointer()};
  3382. SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
  3383. std::end(CommonArgs));
  3384. if (isOpenMPTaskLoopDirective(Kind)) {
  3385. auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
  3386. LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
  3387. llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
  3388. auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
  3389. LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
  3390. llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
  3391. auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
  3392. LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
  3393. llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
  3394. auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
  3395. LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
  3396. llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
  3397. auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
  3398. LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
  3399. llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
  3400. CallArgs.push_back(LBParam);
  3401. CallArgs.push_back(UBParam);
  3402. CallArgs.push_back(StParam);
  3403. CallArgs.push_back(LIParam);
  3404. CallArgs.push_back(RParam);
  3405. }
  3406. CallArgs.push_back(SharedsParam);
  3407. CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
  3408. CallArgs);
  3409. CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
  3410. CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
  3411. CGF.FinishFunction();
  3412. return TaskEntry;
  3413. }
  3414. static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
  3415. SourceLocation Loc,
  3416. QualType KmpInt32Ty,
  3417. QualType KmpTaskTWithPrivatesPtrQTy,
  3418. QualType KmpTaskTWithPrivatesQTy) {
  3419. ASTContext &C = CGM.getContext();
  3420. FunctionArgList Args;
  3421. ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
  3422. ImplicitParamDecl::Other);
  3423. ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  3424. KmpTaskTWithPrivatesPtrQTy.withRestrict(),
  3425. ImplicitParamDecl::Other);
  3426. Args.push_back(&GtidArg);
  3427. Args.push_back(&TaskTypeArg);
  3428. const auto &DestructorFnInfo =
  3429. CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
  3430. llvm::FunctionType *DestructorFnTy =
  3431. CGM.getTypes().GetFunctionType(DestructorFnInfo);
  3432. std::string Name =
  3433. CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
  3434. auto *DestructorFn =
  3435. llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
  3436. Name, &CGM.getModule());
  3437. CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
  3438. DestructorFnInfo);
  3439. DestructorFn->setDoesNotRecurse();
  3440. CodeGenFunction CGF(CGM);
  3441. CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
  3442. Args, Loc, Loc);
  3443. LValue Base = CGF.EmitLoadOfPointerLValue(
  3444. CGF.GetAddrOfLocalVar(&TaskTypeArg),
  3445. KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
  3446. const auto *KmpTaskTWithPrivatesQTyRD =
  3447. cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
  3448. auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
  3449. Base = CGF.EmitLValueForField(Base, *FI);
  3450. for (const auto *Field :
  3451. cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
  3452. if (QualType::DestructionKind DtorKind =
  3453. Field->getType().isDestructedType()) {
  3454. LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
  3455. CGF.pushDestroy(DtorKind, FieldLValue.getAddress(CGF), Field->getType());
  3456. }
  3457. }
  3458. CGF.FinishFunction();
  3459. return DestructorFn;
  3460. }
  3461. /// Emit a privates mapping function for correct handling of private and
  3462. /// firstprivate variables.
  3463. /// \code
  3464. /// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
  3465. /// **noalias priv1,..., <tyn> **noalias privn) {
  3466. /// *priv1 = &.privates.priv1;
  3467. /// ...;
  3468. /// *privn = &.privates.privn;
  3469. /// }
  3470. /// \endcode
  3471. static llvm::Value *
  3472. emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
  3473. const OMPTaskDataTy &Data, QualType PrivatesQTy,
  3474. ArrayRef<PrivateDataTy> Privates) {
  3475. ASTContext &C = CGM.getContext();
  3476. FunctionArgList Args;
  3477. ImplicitParamDecl TaskPrivatesArg(
  3478. C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  3479. C.getPointerType(PrivatesQTy).withConst().withRestrict(),
  3480. ImplicitParamDecl::Other);
  3481. Args.push_back(&TaskPrivatesArg);
  3482. llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, unsigned> PrivateVarsPos;
  3483. unsigned Counter = 1;
  3484. for (const Expr *E : Data.PrivateVars) {
  3485. Args.push_back(ImplicitParamDecl::Create(
  3486. C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  3487. C.getPointerType(C.getPointerType(E->getType()))
  3488. .withConst()
  3489. .withRestrict(),
  3490. ImplicitParamDecl::Other));
  3491. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
  3492. PrivateVarsPos[VD] = Counter;
  3493. ++Counter;
  3494. }
  3495. for (const Expr *E : Data.FirstprivateVars) {
  3496. Args.push_back(ImplicitParamDecl::Create(
  3497. C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  3498. C.getPointerType(C.getPointerType(E->getType()))
  3499. .withConst()
  3500. .withRestrict(),
  3501. ImplicitParamDecl::Other));
  3502. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
  3503. PrivateVarsPos[VD] = Counter;
  3504. ++Counter;
  3505. }
  3506. for (const Expr *E : Data.LastprivateVars) {
  3507. Args.push_back(ImplicitParamDecl::Create(
  3508. C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  3509. C.getPointerType(C.getPointerType(E->getType()))
  3510. .withConst()
  3511. .withRestrict(),
  3512. ImplicitParamDecl::Other));
  3513. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
  3514. PrivateVarsPos[VD] = Counter;
  3515. ++Counter;
  3516. }
  3517. for (const VarDecl *VD : Data.PrivateLocals) {
  3518. QualType Ty = VD->getType().getNonReferenceType();
  3519. if (VD->getType()->isLValueReferenceType())
  3520. Ty = C.getPointerType(Ty);
  3521. if (isAllocatableDecl(VD))
  3522. Ty = C.getPointerType(Ty);
  3523. Args.push_back(ImplicitParamDecl::Create(
  3524. C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  3525. C.getPointerType(C.getPointerType(Ty)).withConst().withRestrict(),
  3526. ImplicitParamDecl::Other));
  3527. PrivateVarsPos[VD] = Counter;
  3528. ++Counter;
  3529. }
  3530. const auto &TaskPrivatesMapFnInfo =
  3531. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  3532. llvm::FunctionType *TaskPrivatesMapTy =
  3533. CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
  3534. std::string Name =
  3535. CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
  3536. auto *TaskPrivatesMap = llvm::Function::Create(
  3537. TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
  3538. &CGM.getModule());
  3539. CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
  3540. TaskPrivatesMapFnInfo);
  3541. if (CGM.getLangOpts().Optimize) {
  3542. TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
  3543. TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
  3544. TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
  3545. }
  3546. CodeGenFunction CGF(CGM);
  3547. CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
  3548. TaskPrivatesMapFnInfo, Args, Loc, Loc);
  3549. // *privi = &.privates.privi;
  3550. LValue Base = CGF.EmitLoadOfPointerLValue(
  3551. CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
  3552. TaskPrivatesArg.getType()->castAs<PointerType>());
  3553. const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
  3554. Counter = 0;
  3555. for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
  3556. LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
  3557. const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
  3558. LValue RefLVal =
  3559. CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
  3560. LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
  3561. RefLVal.getAddress(CGF), RefLVal.getType()->castAs<PointerType>());
  3562. CGF.EmitStoreOfScalar(FieldLVal.getPointer(CGF), RefLoadLVal);
  3563. ++Counter;
  3564. }
  3565. CGF.FinishFunction();
  3566. return TaskPrivatesMap;
  3567. }
  3568. /// Emit initialization for private variables in task-based directives.
  3569. static void emitPrivatesInit(CodeGenFunction &CGF,
  3570. const OMPExecutableDirective &D,
  3571. Address KmpTaskSharedsPtr, LValue TDBase,
  3572. const RecordDecl *KmpTaskTWithPrivatesQTyRD,
  3573. QualType SharedsTy, QualType SharedsPtrTy,
  3574. const OMPTaskDataTy &Data,
  3575. ArrayRef<PrivateDataTy> Privates, bool ForDup) {
  3576. ASTContext &C = CGF.getContext();
  3577. auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
  3578. LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
  3579. OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
  3580. ? OMPD_taskloop
  3581. : OMPD_task;
  3582. const CapturedStmt &CS = *D.getCapturedStmt(Kind);
  3583. CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
  3584. LValue SrcBase;
  3585. bool IsTargetTask =
  3586. isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
  3587. isOpenMPTargetExecutionDirective(D.getDirectiveKind());
  3588. // For target-based directives skip 4 firstprivate arrays BasePointersArray,
  3589. // PointersArray, SizesArray, and MappersArray. The original variables for
  3590. // these arrays are not captured and we get their addresses explicitly.
  3591. if ((!IsTargetTask && !Data.FirstprivateVars.empty() && ForDup) ||
  3592. (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
  3593. SrcBase = CGF.MakeAddrLValue(
  3594. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  3595. KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
  3596. SharedsTy);
  3597. }
  3598. FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
  3599. for (const PrivateDataTy &Pair : Privates) {
  3600. // Do not initialize private locals.
  3601. if (Pair.second.isLocalPrivate()) {
  3602. ++FI;
  3603. continue;
  3604. }
  3605. const VarDecl *VD = Pair.second.PrivateCopy;
  3606. const Expr *Init = VD->getAnyInitializer();
  3607. if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
  3608. !CGF.isTrivialInitializer(Init)))) {
  3609. LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
  3610. if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
  3611. const VarDecl *OriginalVD = Pair.second.Original;
  3612. // Check if the variable is the target-based BasePointersArray,
  3613. // PointersArray, SizesArray, or MappersArray.
  3614. LValue SharedRefLValue;
  3615. QualType Type = PrivateLValue.getType();
  3616. const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
  3617. if (IsTargetTask && !SharedField) {
  3618. assert(isa<ImplicitParamDecl>(OriginalVD) &&
  3619. isa<CapturedDecl>(OriginalVD->getDeclContext()) &&
  3620. cast<CapturedDecl>(OriginalVD->getDeclContext())
  3621. ->getNumParams() == 0 &&
  3622. isa<TranslationUnitDecl>(
  3623. cast<CapturedDecl>(OriginalVD->getDeclContext())
  3624. ->getDeclContext()) &&
  3625. "Expected artificial target data variable.");
  3626. SharedRefLValue =
  3627. CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
  3628. } else if (ForDup) {
  3629. SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
  3630. SharedRefLValue = CGF.MakeAddrLValue(
  3631. Address(SharedRefLValue.getPointer(CGF),
  3632. C.getDeclAlign(OriginalVD)),
  3633. SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
  3634. SharedRefLValue.getTBAAInfo());
  3635. } else if (CGF.LambdaCaptureFields.count(
  3636. Pair.second.Original->getCanonicalDecl()) > 0 ||
  3637. isa_and_nonnull<BlockDecl>(CGF.CurCodeDecl)) {
  3638. SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
  3639. } else {
  3640. // Processing for implicitly captured variables.
  3641. InlinedOpenMPRegionRAII Region(
  3642. CGF, [](CodeGenFunction &, PrePostActionTy &) {}, OMPD_unknown,
  3643. /*HasCancel=*/false, /*NoInheritance=*/true);
  3644. SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
  3645. }
  3646. if (Type->isArrayType()) {
  3647. // Initialize firstprivate array.
  3648. if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
  3649. // Perform simple memcpy.
  3650. CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
  3651. } else {
  3652. // Initialize firstprivate array using element-by-element
  3653. // initialization.
  3654. CGF.EmitOMPAggregateAssign(
  3655. PrivateLValue.getAddress(CGF), SharedRefLValue.getAddress(CGF),
  3656. Type,
  3657. [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
  3658. Address SrcElement) {
  3659. // Clean up any temporaries needed by the initialization.
  3660. CodeGenFunction::OMPPrivateScope InitScope(CGF);
  3661. InitScope.addPrivate(
  3662. Elem, [SrcElement]() -> Address { return SrcElement; });
  3663. (void)InitScope.Privatize();
  3664. // Emit initialization for single element.
  3665. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
  3666. CGF, &CapturesInfo);
  3667. CGF.EmitAnyExprToMem(Init, DestElement,
  3668. Init->getType().getQualifiers(),
  3669. /*IsInitializer=*/false);
  3670. });
  3671. }
  3672. } else {
  3673. CodeGenFunction::OMPPrivateScope InitScope(CGF);
  3674. InitScope.addPrivate(Elem, [SharedRefLValue, &CGF]() -> Address {
  3675. return SharedRefLValue.getAddress(CGF);
  3676. });
  3677. (void)InitScope.Privatize();
  3678. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
  3679. CGF.EmitExprAsInit(Init, VD, PrivateLValue,
  3680. /*capturedByInit=*/false);
  3681. }
  3682. } else {
  3683. CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
  3684. }
  3685. }
  3686. ++FI;
  3687. }
  3688. }
  3689. /// Check if duplication function is required for taskloops.
  3690. static bool checkInitIsRequired(CodeGenFunction &CGF,
  3691. ArrayRef<PrivateDataTy> Privates) {
  3692. bool InitRequired = false;
  3693. for (const PrivateDataTy &Pair : Privates) {
  3694. if (Pair.second.isLocalPrivate())
  3695. continue;
  3696. const VarDecl *VD = Pair.second.PrivateCopy;
  3697. const Expr *Init = VD->getAnyInitializer();
  3698. InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
  3699. !CGF.isTrivialInitializer(Init));
  3700. if (InitRequired)
  3701. break;
  3702. }
  3703. return InitRequired;
  3704. }
  3705. /// Emit task_dup function (for initialization of
  3706. /// private/firstprivate/lastprivate vars and last_iter flag)
  3707. /// \code
  3708. /// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
  3709. /// lastpriv) {
  3710. /// // setup lastprivate flag
  3711. /// task_dst->last = lastpriv;
  3712. /// // could be constructor calls here...
  3713. /// }
  3714. /// \endcode
  3715. static llvm::Value *
  3716. emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
  3717. const OMPExecutableDirective &D,
  3718. QualType KmpTaskTWithPrivatesPtrQTy,
  3719. const RecordDecl *KmpTaskTWithPrivatesQTyRD,
  3720. const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
  3721. QualType SharedsPtrTy, const OMPTaskDataTy &Data,
  3722. ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
  3723. ASTContext &C = CGM.getContext();
  3724. FunctionArgList Args;
  3725. ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  3726. KmpTaskTWithPrivatesPtrQTy,
  3727. ImplicitParamDecl::Other);
  3728. ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  3729. KmpTaskTWithPrivatesPtrQTy,
  3730. ImplicitParamDecl::Other);
  3731. ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
  3732. ImplicitParamDecl::Other);
  3733. Args.push_back(&DstArg);
  3734. Args.push_back(&SrcArg);
  3735. Args.push_back(&LastprivArg);
  3736. const auto &TaskDupFnInfo =
  3737. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  3738. llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
  3739. std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
  3740. auto *TaskDup = llvm::Function::Create(
  3741. TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
  3742. CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
  3743. TaskDup->setDoesNotRecurse();
  3744. CodeGenFunction CGF(CGM);
  3745. CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
  3746. Loc);
  3747. LValue TDBase = CGF.EmitLoadOfPointerLValue(
  3748. CGF.GetAddrOfLocalVar(&DstArg),
  3749. KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
  3750. // task_dst->liter = lastpriv;
  3751. if (WithLastIter) {
  3752. auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
  3753. LValue Base = CGF.EmitLValueForField(
  3754. TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
  3755. LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
  3756. llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
  3757. CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
  3758. CGF.EmitStoreOfScalar(Lastpriv, LILVal);
  3759. }
  3760. // Emit initial values for private copies (if any).
  3761. assert(!Privates.empty());
  3762. Address KmpTaskSharedsPtr = Address::invalid();
  3763. if (!Data.FirstprivateVars.empty()) {
  3764. LValue TDBase = CGF.EmitLoadOfPointerLValue(
  3765. CGF.GetAddrOfLocalVar(&SrcArg),
  3766. KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
  3767. LValue Base = CGF.EmitLValueForField(
  3768. TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
  3769. KmpTaskSharedsPtr = Address(
  3770. CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
  3771. Base, *std::next(KmpTaskTQTyRD->field_begin(),
  3772. KmpTaskTShareds)),
  3773. Loc),
  3774. CGM.getNaturalTypeAlignment(SharedsTy));
  3775. }
  3776. emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
  3777. SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
  3778. CGF.FinishFunction();
  3779. return TaskDup;
  3780. }
  3781. /// Checks if destructor function is required to be generated.
  3782. /// \return true if cleanups are required, false otherwise.
  3783. static bool
  3784. checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD,
  3785. ArrayRef<PrivateDataTy> Privates) {
  3786. for (const PrivateDataTy &P : Privates) {
  3787. if (P.second.isLocalPrivate())
  3788. continue;
  3789. QualType Ty = P.second.Original->getType().getNonReferenceType();
  3790. if (Ty.isDestructedType())
  3791. return true;
  3792. }
  3793. return false;
  3794. }
  3795. namespace {
  3796. /// Loop generator for OpenMP iterator expression.
  3797. class OMPIteratorGeneratorScope final
  3798. : public CodeGenFunction::OMPPrivateScope {
  3799. CodeGenFunction &CGF;
  3800. const OMPIteratorExpr *E = nullptr;
  3801. SmallVector<CodeGenFunction::JumpDest, 4> ContDests;
  3802. SmallVector<CodeGenFunction::JumpDest, 4> ExitDests;
  3803. OMPIteratorGeneratorScope() = delete;
  3804. OMPIteratorGeneratorScope(OMPIteratorGeneratorScope &) = delete;
  3805. public:
  3806. OMPIteratorGeneratorScope(CodeGenFunction &CGF, const OMPIteratorExpr *E)
  3807. : CodeGenFunction::OMPPrivateScope(CGF), CGF(CGF), E(E) {
  3808. if (!E)
  3809. return;
  3810. SmallVector<llvm::Value *, 4> Uppers;
  3811. for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
  3812. Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper));
  3813. const auto *VD = cast<VarDecl>(E->getIteratorDecl(I));
  3814. addPrivate(VD, [&CGF, VD]() {
  3815. return CGF.CreateMemTemp(VD->getType(), VD->getName());
  3816. });
  3817. const OMPIteratorHelperData &HelperData = E->getHelper(I);
  3818. addPrivate(HelperData.CounterVD, [&CGF, &HelperData]() {
  3819. return CGF.CreateMemTemp(HelperData.CounterVD->getType(),
  3820. "counter.addr");
  3821. });
  3822. }
  3823. Privatize();
  3824. for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
  3825. const OMPIteratorHelperData &HelperData = E->getHelper(I);
  3826. LValue CLVal =
  3827. CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(HelperData.CounterVD),
  3828. HelperData.CounterVD->getType());
  3829. // Counter = 0;
  3830. CGF.EmitStoreOfScalar(
  3831. llvm::ConstantInt::get(CLVal.getAddress(CGF).getElementType(), 0),
  3832. CLVal);
  3833. CodeGenFunction::JumpDest &ContDest =
  3834. ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont"));
  3835. CodeGenFunction::JumpDest &ExitDest =
  3836. ExitDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.exit"));
  3837. // N = <number-of_iterations>;
  3838. llvm::Value *N = Uppers[I];
  3839. // cont:
  3840. // if (Counter < N) goto body; else goto exit;
  3841. CGF.EmitBlock(ContDest.getBlock());
  3842. auto *CVal =
  3843. CGF.EmitLoadOfScalar(CLVal, HelperData.CounterVD->getLocation());
  3844. llvm::Value *Cmp =
  3845. HelperData.CounterVD->getType()->isSignedIntegerOrEnumerationType()
  3846. ? CGF.Builder.CreateICmpSLT(CVal, N)
  3847. : CGF.Builder.CreateICmpULT(CVal, N);
  3848. llvm::BasicBlock *BodyBB = CGF.createBasicBlock("iter.body");
  3849. CGF.Builder.CreateCondBr(Cmp, BodyBB, ExitDest.getBlock());
  3850. // body:
  3851. CGF.EmitBlock(BodyBB);
  3852. // Iteri = Begini + Counter * Stepi;
  3853. CGF.EmitIgnoredExpr(HelperData.Update);
  3854. }
  3855. }
  3856. ~OMPIteratorGeneratorScope() {
  3857. if (!E)
  3858. return;
  3859. for (unsigned I = E->numOfIterators(); I > 0; --I) {
  3860. // Counter = Counter + 1;
  3861. const OMPIteratorHelperData &HelperData = E->getHelper(I - 1);
  3862. CGF.EmitIgnoredExpr(HelperData.CounterUpdate);
  3863. // goto cont;
  3864. CGF.EmitBranchThroughCleanup(ContDests[I - 1]);
  3865. // exit:
  3866. CGF.EmitBlock(ExitDests[I - 1].getBlock(), /*IsFinished=*/I == 1);
  3867. }
  3868. }
  3869. };
  3870. } // namespace
  3871. static std::pair<llvm::Value *, llvm::Value *>
  3872. getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
  3873. const auto *OASE = dyn_cast<OMPArrayShapingExpr>(E);
  3874. llvm::Value *Addr;
  3875. if (OASE) {
  3876. const Expr *Base = OASE->getBase();
  3877. Addr = CGF.EmitScalarExpr(Base);
  3878. } else {
  3879. Addr = CGF.EmitLValue(E).getPointer(CGF);
  3880. }
  3881. llvm::Value *SizeVal;
  3882. QualType Ty = E->getType();
  3883. if (OASE) {
  3884. SizeVal = CGF.getTypeSize(OASE->getBase()->getType()->getPointeeType());
  3885. for (const Expr *SE : OASE->getDimensions()) {
  3886. llvm::Value *Sz = CGF.EmitScalarExpr(SE);
  3887. Sz = CGF.EmitScalarConversion(
  3888. Sz, SE->getType(), CGF.getContext().getSizeType(), SE->getExprLoc());
  3889. SizeVal = CGF.Builder.CreateNUWMul(SizeVal, Sz);
  3890. }
  3891. } else if (const auto *ASE =
  3892. dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
  3893. LValue UpAddrLVal =
  3894. CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
  3895. Address UpAddrAddress = UpAddrLVal.getAddress(CGF);
  3896. llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32(
  3897. UpAddrAddress.getElementType(), UpAddrAddress.getPointer(), /*Idx0=*/1);
  3898. llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy);
  3899. llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy);
  3900. SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
  3901. } else {
  3902. SizeVal = CGF.getTypeSize(Ty);
  3903. }
  3904. return std::make_pair(Addr, SizeVal);
  3905. }
  3906. /// Builds kmp_depend_info, if it is not built yet, and builds flags type.
  3907. static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) {
  3908. QualType FlagsTy = C.getIntTypeForBitwidth(32, /*Signed=*/false);
  3909. if (KmpTaskAffinityInfoTy.isNull()) {
  3910. RecordDecl *KmpAffinityInfoRD =
  3911. C.buildImplicitRecord("kmp_task_affinity_info_t");
  3912. KmpAffinityInfoRD->startDefinition();
  3913. addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getIntPtrType());
  3914. addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType());
  3915. addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy);
  3916. KmpAffinityInfoRD->completeDefinition();
  3917. KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD);
  3918. }
  3919. }
  3920. CGOpenMPRuntime::TaskResultTy
  3921. CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
  3922. const OMPExecutableDirective &D,
  3923. llvm::Function *TaskFunction, QualType SharedsTy,
  3924. Address Shareds, const OMPTaskDataTy &Data) {
  3925. ASTContext &C = CGM.getContext();
  3926. llvm::SmallVector<PrivateDataTy, 4> Privates;
  3927. // Aggregate privates and sort them by the alignment.
  3928. const auto *I = Data.PrivateCopies.begin();
  3929. for (const Expr *E : Data.PrivateVars) {
  3930. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
  3931. Privates.emplace_back(
  3932. C.getDeclAlign(VD),
  3933. PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
  3934. /*PrivateElemInit=*/nullptr));
  3935. ++I;
  3936. }
  3937. I = Data.FirstprivateCopies.begin();
  3938. const auto *IElemInitRef = Data.FirstprivateInits.begin();
  3939. for (const Expr *E : Data.FirstprivateVars) {
  3940. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
  3941. Privates.emplace_back(
  3942. C.getDeclAlign(VD),
  3943. PrivateHelpersTy(
  3944. E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
  3945. cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
  3946. ++I;
  3947. ++IElemInitRef;
  3948. }
  3949. I = Data.LastprivateCopies.begin();
  3950. for (const Expr *E : Data.LastprivateVars) {
  3951. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
  3952. Privates.emplace_back(
  3953. C.getDeclAlign(VD),
  3954. PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
  3955. /*PrivateElemInit=*/nullptr));
  3956. ++I;
  3957. }
  3958. for (const VarDecl *VD : Data.PrivateLocals) {
  3959. if (isAllocatableDecl(VD))
  3960. Privates.emplace_back(CGM.getPointerAlign(), PrivateHelpersTy(VD));
  3961. else
  3962. Privates.emplace_back(C.getDeclAlign(VD), PrivateHelpersTy(VD));
  3963. }
  3964. llvm::stable_sort(Privates,
  3965. [](const PrivateDataTy &L, const PrivateDataTy &R) {
  3966. return L.first > R.first;
  3967. });
  3968. QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
  3969. // Build type kmp_routine_entry_t (if not built yet).
  3970. emitKmpRoutineEntryT(KmpInt32Ty);
  3971. // Build type kmp_task_t (if not built yet).
  3972. if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
  3973. if (SavedKmpTaskloopTQTy.isNull()) {
  3974. SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
  3975. CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
  3976. }
  3977. KmpTaskTQTy = SavedKmpTaskloopTQTy;
  3978. } else {
  3979. assert((D.getDirectiveKind() == OMPD_task ||
  3980. isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
  3981. isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&
  3982. "Expected taskloop, task or target directive");
  3983. if (SavedKmpTaskTQTy.isNull()) {
  3984. SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
  3985. CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
  3986. }
  3987. KmpTaskTQTy = SavedKmpTaskTQTy;
  3988. }
  3989. const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
  3990. // Build particular struct kmp_task_t for the given task.
  3991. const RecordDecl *KmpTaskTWithPrivatesQTyRD =
  3992. createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
  3993. QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
  3994. QualType KmpTaskTWithPrivatesPtrQTy =
  3995. C.getPointerType(KmpTaskTWithPrivatesQTy);
  3996. llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
  3997. llvm::Type *KmpTaskTWithPrivatesPtrTy =
  3998. KmpTaskTWithPrivatesTy->getPointerTo();
  3999. llvm::Value *KmpTaskTWithPrivatesTySize =
  4000. CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
  4001. QualType SharedsPtrTy = C.getPointerType(SharedsTy);
  4002. // Emit initial values for private copies (if any).
  4003. llvm::Value *TaskPrivatesMap = nullptr;
  4004. llvm::Type *TaskPrivatesMapTy =
  4005. std::next(TaskFunction->arg_begin(), 3)->getType();
  4006. if (!Privates.empty()) {
  4007. auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
  4008. TaskPrivatesMap =
  4009. emitTaskPrivateMappingFunction(CGM, Loc, Data, FI->getType(), Privates);
  4010. TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4011. TaskPrivatesMap, TaskPrivatesMapTy);
  4012. } else {
  4013. TaskPrivatesMap = llvm::ConstantPointerNull::get(
  4014. cast<llvm::PointerType>(TaskPrivatesMapTy));
  4015. }
  4016. // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
  4017. // kmp_task_t *tt);
  4018. llvm::Function *TaskEntry = emitProxyTaskFunction(
  4019. CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
  4020. KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
  4021. TaskPrivatesMap);
  4022. // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
  4023. // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
  4024. // kmp_routine_entry_t *task_entry);
  4025. // Task flags. Format is taken from
  4026. // https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h,
  4027. // description of kmp_tasking_flags struct.
  4028. enum {
  4029. TiedFlag = 0x1,
  4030. FinalFlag = 0x2,
  4031. DestructorsFlag = 0x8,
  4032. PriorityFlag = 0x20,
  4033. DetachableFlag = 0x40,
  4034. };
  4035. unsigned Flags = Data.Tied ? TiedFlag : 0;
  4036. bool NeedsCleanup = false;
  4037. if (!Privates.empty()) {
  4038. NeedsCleanup =
  4039. checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD, Privates);
  4040. if (NeedsCleanup)
  4041. Flags = Flags | DestructorsFlag;
  4042. }
  4043. if (Data.Priority.getInt())
  4044. Flags = Flags | PriorityFlag;
  4045. if (D.hasClausesOfKind<OMPDetachClause>())
  4046. Flags = Flags | DetachableFlag;
  4047. llvm::Value *TaskFlags =
  4048. Data.Final.getPointer()
  4049. ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
  4050. CGF.Builder.getInt32(FinalFlag),
  4051. CGF.Builder.getInt32(/*C=*/0))
  4052. : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
  4053. TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
  4054. llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
  4055. SmallVector<llvm::Value *, 8> AllocArgs = {emitUpdateLocation(CGF, Loc),
  4056. getThreadID(CGF, Loc), TaskFlags, KmpTaskTWithPrivatesTySize,
  4057. SharedsSize, CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4058. TaskEntry, KmpRoutineEntryPtrTy)};
  4059. llvm::Value *NewTask;
  4060. if (D.hasClausesOfKind<OMPNowaitClause>()) {
  4061. // Check if we have any device clause associated with the directive.
  4062. const Expr *Device = nullptr;
  4063. if (auto *C = D.getSingleClause<OMPDeviceClause>())
  4064. Device = C->getDevice();
  4065. // Emit device ID if any otherwise use default value.
  4066. llvm::Value *DeviceID;
  4067. if (Device)
  4068. DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
  4069. CGF.Int64Ty, /*isSigned=*/true);
  4070. else
  4071. DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
  4072. AllocArgs.push_back(DeviceID);
  4073. NewTask = CGF.EmitRuntimeCall(
  4074. OMPBuilder.getOrCreateRuntimeFunction(
  4075. CGM.getModule(), OMPRTL___kmpc_omp_target_task_alloc),
  4076. AllocArgs);
  4077. } else {
  4078. NewTask =
  4079. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  4080. CGM.getModule(), OMPRTL___kmpc_omp_task_alloc),
  4081. AllocArgs);
  4082. }
  4083. // Emit detach clause initialization.
  4084. // evt = (typeof(evt))__kmpc_task_allow_completion_event(loc, tid,
  4085. // task_descriptor);
  4086. if (const auto *DC = D.getSingleClause<OMPDetachClause>()) {
  4087. const Expr *Evt = DC->getEventHandler()->IgnoreParenImpCasts();
  4088. LValue EvtLVal = CGF.EmitLValue(Evt);
  4089. // Build kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref,
  4090. // int gtid, kmp_task_t *task);
  4091. llvm::Value *Loc = emitUpdateLocation(CGF, DC->getBeginLoc());
  4092. llvm::Value *Tid = getThreadID(CGF, DC->getBeginLoc());
  4093. Tid = CGF.Builder.CreateIntCast(Tid, CGF.IntTy, /*isSigned=*/false);
  4094. llvm::Value *EvtVal = CGF.EmitRuntimeCall(
  4095. OMPBuilder.getOrCreateRuntimeFunction(
  4096. CGM.getModule(), OMPRTL___kmpc_task_allow_completion_event),
  4097. {Loc, Tid, NewTask});
  4098. EvtVal = CGF.EmitScalarConversion(EvtVal, C.VoidPtrTy, Evt->getType(),
  4099. Evt->getExprLoc());
  4100. CGF.EmitStoreOfScalar(EvtVal, EvtLVal);
  4101. }
  4102. // Process affinity clauses.
  4103. if (D.hasClausesOfKind<OMPAffinityClause>()) {
  4104. // Process list of affinity data.
  4105. ASTContext &C = CGM.getContext();
  4106. Address AffinitiesArray = Address::invalid();
  4107. // Calculate number of elements to form the array of affinity data.
  4108. llvm::Value *NumOfElements = nullptr;
  4109. unsigned NumAffinities = 0;
  4110. for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
  4111. if (const Expr *Modifier = C->getModifier()) {
  4112. const auto *IE = cast<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts());
  4113. for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
  4114. llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
  4115. Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
  4116. NumOfElements =
  4117. NumOfElements ? CGF.Builder.CreateNUWMul(NumOfElements, Sz) : Sz;
  4118. }
  4119. } else {
  4120. NumAffinities += C->varlist_size();
  4121. }
  4122. }
  4123. getKmpAffinityType(CGM.getContext(), KmpTaskAffinityInfoTy);
  4124. // Fields ids in kmp_task_affinity_info record.
  4125. enum RTLAffinityInfoFieldsTy { BaseAddr, Len, Flags };
  4126. QualType KmpTaskAffinityInfoArrayTy;
  4127. if (NumOfElements) {
  4128. NumOfElements = CGF.Builder.CreateNUWAdd(
  4129. llvm::ConstantInt::get(CGF.SizeTy, NumAffinities), NumOfElements);
  4130. auto *OVE = new (C) OpaqueValueExpr(
  4131. Loc,
  4132. C.getIntTypeForBitwidth(C.getTypeSize(C.getSizeType()), /*Signed=*/0),
  4133. VK_PRValue);
  4134. CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, OVE,
  4135. RValue::get(NumOfElements));
  4136. KmpTaskAffinityInfoArrayTy =
  4137. C.getVariableArrayType(KmpTaskAffinityInfoTy, OVE, ArrayType::Normal,
  4138. /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
  4139. // Properly emit variable-sized array.
  4140. auto *PD = ImplicitParamDecl::Create(C, KmpTaskAffinityInfoArrayTy,
  4141. ImplicitParamDecl::Other);
  4142. CGF.EmitVarDecl(*PD);
  4143. AffinitiesArray = CGF.GetAddrOfLocalVar(PD);
  4144. NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
  4145. /*isSigned=*/false);
  4146. } else {
  4147. KmpTaskAffinityInfoArrayTy = C.getConstantArrayType(
  4148. KmpTaskAffinityInfoTy,
  4149. llvm::APInt(C.getTypeSize(C.getSizeType()), NumAffinities), nullptr,
  4150. ArrayType::Normal, /*IndexTypeQuals=*/0);
  4151. AffinitiesArray =
  4152. CGF.CreateMemTemp(KmpTaskAffinityInfoArrayTy, ".affs.arr.addr");
  4153. AffinitiesArray = CGF.Builder.CreateConstArrayGEP(AffinitiesArray, 0);
  4154. NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumAffinities,
  4155. /*isSigned=*/false);
  4156. }
  4157. const auto *KmpAffinityInfoRD = KmpTaskAffinityInfoTy->getAsRecordDecl();
  4158. // Fill array by elements without iterators.
  4159. unsigned Pos = 0;
  4160. bool HasIterator = false;
  4161. for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
  4162. if (C->getModifier()) {
  4163. HasIterator = true;
  4164. continue;
  4165. }
  4166. for (const Expr *E : C->varlists()) {
  4167. llvm::Value *Addr;
  4168. llvm::Value *Size;
  4169. std::tie(Addr, Size) = getPointerAndSize(CGF, E);
  4170. LValue Base =
  4171. CGF.MakeAddrLValue(CGF.Builder.CreateConstGEP(AffinitiesArray, Pos),
  4172. KmpTaskAffinityInfoTy);
  4173. // affs[i].base_addr = &<Affinities[i].second>;
  4174. LValue BaseAddrLVal = CGF.EmitLValueForField(
  4175. Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
  4176. CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
  4177. BaseAddrLVal);
  4178. // affs[i].len = sizeof(<Affinities[i].second>);
  4179. LValue LenLVal = CGF.EmitLValueForField(
  4180. Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
  4181. CGF.EmitStoreOfScalar(Size, LenLVal);
  4182. ++Pos;
  4183. }
  4184. }
  4185. LValue PosLVal;
  4186. if (HasIterator) {
  4187. PosLVal = CGF.MakeAddrLValue(
  4188. CGF.CreateMemTemp(C.getSizeType(), "affs.counter.addr"),
  4189. C.getSizeType());
  4190. CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
  4191. }
  4192. // Process elements with iterators.
  4193. for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
  4194. const Expr *Modifier = C->getModifier();
  4195. if (!Modifier)
  4196. continue;
  4197. OMPIteratorGeneratorScope IteratorScope(
  4198. CGF, cast_or_null<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts()));
  4199. for (const Expr *E : C->varlists()) {
  4200. llvm::Value *Addr;
  4201. llvm::Value *Size;
  4202. std::tie(Addr, Size) = getPointerAndSize(CGF, E);
  4203. llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
  4204. LValue Base = CGF.MakeAddrLValue(
  4205. CGF.Builder.CreateGEP(AffinitiesArray, Idx), KmpTaskAffinityInfoTy);
  4206. // affs[i].base_addr = &<Affinities[i].second>;
  4207. LValue BaseAddrLVal = CGF.EmitLValueForField(
  4208. Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
  4209. CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
  4210. BaseAddrLVal);
  4211. // affs[i].len = sizeof(<Affinities[i].second>);
  4212. LValue LenLVal = CGF.EmitLValueForField(
  4213. Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
  4214. CGF.EmitStoreOfScalar(Size, LenLVal);
  4215. Idx = CGF.Builder.CreateNUWAdd(
  4216. Idx, llvm::ConstantInt::get(Idx->getType(), 1));
  4217. CGF.EmitStoreOfScalar(Idx, PosLVal);
  4218. }
  4219. }
  4220. // Call to kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref,
  4221. // kmp_int32 gtid, kmp_task_t *new_task, kmp_int32
  4222. // naffins, kmp_task_affinity_info_t *affin_list);
  4223. llvm::Value *LocRef = emitUpdateLocation(CGF, Loc);
  4224. llvm::Value *GTid = getThreadID(CGF, Loc);
  4225. llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4226. AffinitiesArray.getPointer(), CGM.VoidPtrTy);
  4227. // FIXME: Emit the function and ignore its result for now unless the
  4228. // runtime function is properly implemented.
  4229. (void)CGF.EmitRuntimeCall(
  4230. OMPBuilder.getOrCreateRuntimeFunction(
  4231. CGM.getModule(), OMPRTL___kmpc_omp_reg_task_with_affinity),
  4232. {LocRef, GTid, NewTask, NumOfElements, AffinListPtr});
  4233. }
  4234. llvm::Value *NewTaskNewTaskTTy =
  4235. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4236. NewTask, KmpTaskTWithPrivatesPtrTy);
  4237. LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
  4238. KmpTaskTWithPrivatesQTy);
  4239. LValue TDBase =
  4240. CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
  4241. // Fill the data in the resulting kmp_task_t record.
  4242. // Copy shareds if there are any.
  4243. Address KmpTaskSharedsPtr = Address::invalid();
  4244. if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
  4245. KmpTaskSharedsPtr =
  4246. Address(CGF.EmitLoadOfScalar(
  4247. CGF.EmitLValueForField(
  4248. TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
  4249. KmpTaskTShareds)),
  4250. Loc),
  4251. CGM.getNaturalTypeAlignment(SharedsTy));
  4252. LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
  4253. LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
  4254. CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
  4255. }
  4256. // Emit initial values for private copies (if any).
  4257. TaskResultTy Result;
  4258. if (!Privates.empty()) {
  4259. emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
  4260. SharedsTy, SharedsPtrTy, Data, Privates,
  4261. /*ForDup=*/false);
  4262. if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
  4263. (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
  4264. Result.TaskDupFn = emitTaskDupFunction(
  4265. CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
  4266. KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
  4267. /*WithLastIter=*/!Data.LastprivateVars.empty());
  4268. }
  4269. }
  4270. // Fields of union "kmp_cmplrdata_t" for destructors and priority.
  4271. enum { Priority = 0, Destructors = 1 };
  4272. // Provide pointer to function with destructors for privates.
  4273. auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
  4274. const RecordDecl *KmpCmplrdataUD =
  4275. (*FI)->getType()->getAsUnionType()->getDecl();
  4276. if (NeedsCleanup) {
  4277. llvm::Value *DestructorFn = emitDestructorsFunction(
  4278. CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
  4279. KmpTaskTWithPrivatesQTy);
  4280. LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
  4281. LValue DestructorsLV = CGF.EmitLValueForField(
  4282. Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
  4283. CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4284. DestructorFn, KmpRoutineEntryPtrTy),
  4285. DestructorsLV);
  4286. }
  4287. // Set priority.
  4288. if (Data.Priority.getInt()) {
  4289. LValue Data2LV = CGF.EmitLValueForField(
  4290. TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
  4291. LValue PriorityLV = CGF.EmitLValueForField(
  4292. Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
  4293. CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
  4294. }
  4295. Result.NewTask = NewTask;
  4296. Result.TaskEntry = TaskEntry;
  4297. Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
  4298. Result.TDBase = TDBase;
  4299. Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
  4300. return Result;
  4301. }
  4302. namespace {
  4303. /// Dependence kind for RTL.
  4304. enum RTLDependenceKindTy {
  4305. DepIn = 0x01,
  4306. DepInOut = 0x3,
  4307. DepMutexInOutSet = 0x4
  4308. };
  4309. /// Fields ids in kmp_depend_info record.
  4310. enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
  4311. } // namespace
  4312. /// Translates internal dependency kind into the runtime kind.
  4313. static RTLDependenceKindTy translateDependencyKind(OpenMPDependClauseKind K) {
  4314. RTLDependenceKindTy DepKind;
  4315. switch (K) {
  4316. case OMPC_DEPEND_in:
  4317. DepKind = DepIn;
  4318. break;
  4319. // Out and InOut dependencies must use the same code.
  4320. case OMPC_DEPEND_out:
  4321. case OMPC_DEPEND_inout:
  4322. DepKind = DepInOut;
  4323. break;
  4324. case OMPC_DEPEND_mutexinoutset:
  4325. DepKind = DepMutexInOutSet;
  4326. break;
  4327. case OMPC_DEPEND_source:
  4328. case OMPC_DEPEND_sink:
  4329. case OMPC_DEPEND_depobj:
  4330. case OMPC_DEPEND_unknown:
  4331. llvm_unreachable("Unknown task dependence type");
  4332. }
  4333. return DepKind;
  4334. }
  4335. /// Builds kmp_depend_info, if it is not built yet, and builds flags type.
  4336. static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy,
  4337. QualType &FlagsTy) {
  4338. FlagsTy = C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
  4339. if (KmpDependInfoTy.isNull()) {
  4340. RecordDecl *KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
  4341. KmpDependInfoRD->startDefinition();
  4342. addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
  4343. addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
  4344. addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
  4345. KmpDependInfoRD->completeDefinition();
  4346. KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
  4347. }
  4348. }
  4349. std::pair<llvm::Value *, LValue>
  4350. CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
  4351. SourceLocation Loc) {
  4352. ASTContext &C = CGM.getContext();
  4353. QualType FlagsTy;
  4354. getDependTypes(C, KmpDependInfoTy, FlagsTy);
  4355. RecordDecl *KmpDependInfoRD =
  4356. cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
  4357. LValue Base = CGF.EmitLoadOfPointerLValue(
  4358. DepobjLVal.getAddress(CGF),
  4359. C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
  4360. QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
  4361. Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4362. Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
  4363. Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
  4364. Base.getTBAAInfo());
  4365. Address DepObjAddr = CGF.Builder.CreateGEP(
  4366. Addr, llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
  4367. LValue NumDepsBase = CGF.MakeAddrLValue(
  4368. DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
  4369. // NumDeps = deps[i].base_addr;
  4370. LValue BaseAddrLVal = CGF.EmitLValueForField(
  4371. NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
  4372. llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, Loc);
  4373. return std::make_pair(NumDeps, Base);
  4374. }
  4375. static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
  4376. llvm::PointerUnion<unsigned *, LValue *> Pos,
  4377. const OMPTaskDataTy::DependData &Data,
  4378. Address DependenciesArray) {
  4379. CodeGenModule &CGM = CGF.CGM;
  4380. ASTContext &C = CGM.getContext();
  4381. QualType FlagsTy;
  4382. getDependTypes(C, KmpDependInfoTy, FlagsTy);
  4383. RecordDecl *KmpDependInfoRD =
  4384. cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
  4385. llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
  4386. OMPIteratorGeneratorScope IteratorScope(
  4387. CGF, cast_or_null<OMPIteratorExpr>(
  4388. Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
  4389. : nullptr));
  4390. for (const Expr *E : Data.DepExprs) {
  4391. llvm::Value *Addr;
  4392. llvm::Value *Size;
  4393. std::tie(Addr, Size) = getPointerAndSize(CGF, E);
  4394. LValue Base;
  4395. if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
  4396. Base = CGF.MakeAddrLValue(
  4397. CGF.Builder.CreateConstGEP(DependenciesArray, *P), KmpDependInfoTy);
  4398. } else {
  4399. LValue &PosLVal = *Pos.get<LValue *>();
  4400. llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
  4401. Base = CGF.MakeAddrLValue(
  4402. CGF.Builder.CreateGEP(DependenciesArray, Idx), KmpDependInfoTy);
  4403. }
  4404. // deps[i].base_addr = &<Dependencies[i].second>;
  4405. LValue BaseAddrLVal = CGF.EmitLValueForField(
  4406. Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
  4407. CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
  4408. BaseAddrLVal);
  4409. // deps[i].len = sizeof(<Dependencies[i].second>);
  4410. LValue LenLVal = CGF.EmitLValueForField(
  4411. Base, *std::next(KmpDependInfoRD->field_begin(), Len));
  4412. CGF.EmitStoreOfScalar(Size, LenLVal);
  4413. // deps[i].flags = <Dependencies[i].first>;
  4414. RTLDependenceKindTy DepKind = translateDependencyKind(Data.DepKind);
  4415. LValue FlagsLVal = CGF.EmitLValueForField(
  4416. Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
  4417. CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
  4418. FlagsLVal);
  4419. if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
  4420. ++(*P);
  4421. } else {
  4422. LValue &PosLVal = *Pos.get<LValue *>();
  4423. llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
  4424. Idx = CGF.Builder.CreateNUWAdd(Idx,
  4425. llvm::ConstantInt::get(Idx->getType(), 1));
  4426. CGF.EmitStoreOfScalar(Idx, PosLVal);
  4427. }
  4428. }
  4429. }
  4430. static SmallVector<llvm::Value *, 4>
  4431. emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
  4432. const OMPTaskDataTy::DependData &Data) {
  4433. assert(Data.DepKind == OMPC_DEPEND_depobj &&
  4434. "Expected depobj dependecy kind.");
  4435. SmallVector<llvm::Value *, 4> Sizes;
  4436. SmallVector<LValue, 4> SizeLVals;
  4437. ASTContext &C = CGF.getContext();
  4438. QualType FlagsTy;
  4439. getDependTypes(C, KmpDependInfoTy, FlagsTy);
  4440. RecordDecl *KmpDependInfoRD =
  4441. cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
  4442. QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
  4443. llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
  4444. {
  4445. OMPIteratorGeneratorScope IteratorScope(
  4446. CGF, cast_or_null<OMPIteratorExpr>(
  4447. Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
  4448. : nullptr));
  4449. for (const Expr *E : Data.DepExprs) {
  4450. LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
  4451. LValue Base = CGF.EmitLoadOfPointerLValue(
  4452. DepobjLVal.getAddress(CGF),
  4453. C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
  4454. Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4455. Base.getAddress(CGF), KmpDependInfoPtrT);
  4456. Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
  4457. Base.getTBAAInfo());
  4458. Address DepObjAddr = CGF.Builder.CreateGEP(
  4459. Addr, llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
  4460. LValue NumDepsBase = CGF.MakeAddrLValue(
  4461. DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
  4462. // NumDeps = deps[i].base_addr;
  4463. LValue BaseAddrLVal = CGF.EmitLValueForField(
  4464. NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
  4465. llvm::Value *NumDeps =
  4466. CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
  4467. LValue NumLVal = CGF.MakeAddrLValue(
  4468. CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
  4469. C.getUIntPtrType());
  4470. CGF.Builder.CreateStore(llvm::ConstantInt::get(CGF.IntPtrTy, 0),
  4471. NumLVal.getAddress(CGF));
  4472. llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
  4473. llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
  4474. CGF.EmitStoreOfScalar(Add, NumLVal);
  4475. SizeLVals.push_back(NumLVal);
  4476. }
  4477. }
  4478. for (unsigned I = 0, E = SizeLVals.size(); I < E; ++I) {
  4479. llvm::Value *Size =
  4480. CGF.EmitLoadOfScalar(SizeLVals[I], Data.DepExprs[I]->getExprLoc());
  4481. Sizes.push_back(Size);
  4482. }
  4483. return Sizes;
  4484. }
  4485. static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
  4486. LValue PosLVal,
  4487. const OMPTaskDataTy::DependData &Data,
  4488. Address DependenciesArray) {
  4489. assert(Data.DepKind == OMPC_DEPEND_depobj &&
  4490. "Expected depobj dependecy kind.");
  4491. ASTContext &C = CGF.getContext();
  4492. QualType FlagsTy;
  4493. getDependTypes(C, KmpDependInfoTy, FlagsTy);
  4494. RecordDecl *KmpDependInfoRD =
  4495. cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
  4496. QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
  4497. llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
  4498. llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy);
  4499. {
  4500. OMPIteratorGeneratorScope IteratorScope(
  4501. CGF, cast_or_null<OMPIteratorExpr>(
  4502. Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
  4503. : nullptr));
  4504. for (unsigned I = 0, End = Data.DepExprs.size(); I < End; ++I) {
  4505. const Expr *E = Data.DepExprs[I];
  4506. LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
  4507. LValue Base = CGF.EmitLoadOfPointerLValue(
  4508. DepobjLVal.getAddress(CGF),
  4509. C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
  4510. Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4511. Base.getAddress(CGF), KmpDependInfoPtrT);
  4512. Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
  4513. Base.getTBAAInfo());
  4514. // Get number of elements in a single depobj.
  4515. Address DepObjAddr = CGF.Builder.CreateGEP(
  4516. Addr, llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
  4517. LValue NumDepsBase = CGF.MakeAddrLValue(
  4518. DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
  4519. // NumDeps = deps[i].base_addr;
  4520. LValue BaseAddrLVal = CGF.EmitLValueForField(
  4521. NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
  4522. llvm::Value *NumDeps =
  4523. CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
  4524. // memcopy dependency data.
  4525. llvm::Value *Size = CGF.Builder.CreateNUWMul(
  4526. ElSize,
  4527. CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
  4528. llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
  4529. Address DepAddr = CGF.Builder.CreateGEP(DependenciesArray, Pos);
  4530. CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
  4531. // Increase pos.
  4532. // pos += size;
  4533. llvm::Value *Add = CGF.Builder.CreateNUWAdd(Pos, NumDeps);
  4534. CGF.EmitStoreOfScalar(Add, PosLVal);
  4535. }
  4536. }
  4537. }
  4538. std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
  4539. CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies,
  4540. SourceLocation Loc) {
  4541. if (llvm::all_of(Dependencies, [](const OMPTaskDataTy::DependData &D) {
  4542. return D.DepExprs.empty();
  4543. }))
  4544. return std::make_pair(nullptr, Address::invalid());
  4545. // Process list of dependencies.
  4546. ASTContext &C = CGM.getContext();
  4547. Address DependenciesArray = Address::invalid();
  4548. llvm::Value *NumOfElements = nullptr;
  4549. unsigned NumDependencies = std::accumulate(
  4550. Dependencies.begin(), Dependencies.end(), 0,
  4551. [](unsigned V, const OMPTaskDataTy::DependData &D) {
  4552. return D.DepKind == OMPC_DEPEND_depobj
  4553. ? V
  4554. : (V + (D.IteratorExpr ? 0 : D.DepExprs.size()));
  4555. });
  4556. QualType FlagsTy;
  4557. getDependTypes(C, KmpDependInfoTy, FlagsTy);
  4558. bool HasDepobjDeps = false;
  4559. bool HasRegularWithIterators = false;
  4560. llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
  4561. llvm::Value *NumOfRegularWithIterators =
  4562. llvm::ConstantInt::get(CGF.IntPtrTy, 0);
  4563. // Calculate number of depobj dependecies and regular deps with the iterators.
  4564. for (const OMPTaskDataTy::DependData &D : Dependencies) {
  4565. if (D.DepKind == OMPC_DEPEND_depobj) {
  4566. SmallVector<llvm::Value *, 4> Sizes =
  4567. emitDepobjElementsSizes(CGF, KmpDependInfoTy, D);
  4568. for (llvm::Value *Size : Sizes) {
  4569. NumOfDepobjElements =
  4570. CGF.Builder.CreateNUWAdd(NumOfDepobjElements, Size);
  4571. }
  4572. HasDepobjDeps = true;
  4573. continue;
  4574. }
  4575. // Include number of iterations, if any.
  4576. if (const auto *IE = cast_or_null<OMPIteratorExpr>(D.IteratorExpr)) {
  4577. for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
  4578. llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
  4579. Sz = CGF.Builder.CreateIntCast(Sz, CGF.IntPtrTy, /*isSigned=*/false);
  4580. llvm::Value *NumClauseDeps = CGF.Builder.CreateNUWMul(
  4581. Sz, llvm::ConstantInt::get(CGF.IntPtrTy, D.DepExprs.size()));
  4582. NumOfRegularWithIterators =
  4583. CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumClauseDeps);
  4584. }
  4585. HasRegularWithIterators = true;
  4586. continue;
  4587. }
  4588. }
  4589. QualType KmpDependInfoArrayTy;
  4590. if (HasDepobjDeps || HasRegularWithIterators) {
  4591. NumOfElements = llvm::ConstantInt::get(CGM.IntPtrTy, NumDependencies,
  4592. /*isSigned=*/false);
  4593. if (HasDepobjDeps) {
  4594. NumOfElements =
  4595. CGF.Builder.CreateNUWAdd(NumOfDepobjElements, NumOfElements);
  4596. }
  4597. if (HasRegularWithIterators) {
  4598. NumOfElements =
  4599. CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumOfElements);
  4600. }
  4601. auto *OVE = new (C) OpaqueValueExpr(
  4602. Loc, C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0),
  4603. VK_PRValue);
  4604. CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, OVE,
  4605. RValue::get(NumOfElements));
  4606. KmpDependInfoArrayTy =
  4607. C.getVariableArrayType(KmpDependInfoTy, OVE, ArrayType::Normal,
  4608. /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
  4609. // CGF.EmitVariablyModifiedType(KmpDependInfoArrayTy);
  4610. // Properly emit variable-sized array.
  4611. auto *PD = ImplicitParamDecl::Create(C, KmpDependInfoArrayTy,
  4612. ImplicitParamDecl::Other);
  4613. CGF.EmitVarDecl(*PD);
  4614. DependenciesArray = CGF.GetAddrOfLocalVar(PD);
  4615. NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
  4616. /*isSigned=*/false);
  4617. } else {
  4618. KmpDependInfoArrayTy = C.getConstantArrayType(
  4619. KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies), nullptr,
  4620. ArrayType::Normal, /*IndexTypeQuals=*/0);
  4621. DependenciesArray =
  4622. CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
  4623. DependenciesArray = CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0);
  4624. NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumDependencies,
  4625. /*isSigned=*/false);
  4626. }
  4627. unsigned Pos = 0;
  4628. for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
  4629. if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
  4630. Dependencies[I].IteratorExpr)
  4631. continue;
  4632. emitDependData(CGF, KmpDependInfoTy, &Pos, Dependencies[I],
  4633. DependenciesArray);
  4634. }
  4635. // Copy regular dependecies with iterators.
  4636. LValue PosLVal = CGF.MakeAddrLValue(
  4637. CGF.CreateMemTemp(C.getSizeType(), "dep.counter.addr"), C.getSizeType());
  4638. CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
  4639. for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
  4640. if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
  4641. !Dependencies[I].IteratorExpr)
  4642. continue;
  4643. emitDependData(CGF, KmpDependInfoTy, &PosLVal, Dependencies[I],
  4644. DependenciesArray);
  4645. }
  4646. // Copy final depobj arrays without iterators.
  4647. if (HasDepobjDeps) {
  4648. for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
  4649. if (Dependencies[I].DepKind != OMPC_DEPEND_depobj)
  4650. continue;
  4651. emitDepobjElements(CGF, KmpDependInfoTy, PosLVal, Dependencies[I],
  4652. DependenciesArray);
  4653. }
  4654. }
  4655. DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4656. DependenciesArray, CGF.VoidPtrTy);
  4657. return std::make_pair(NumOfElements, DependenciesArray);
  4658. }
  4659. Address CGOpenMPRuntime::emitDepobjDependClause(
  4660. CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies,
  4661. SourceLocation Loc) {
  4662. if (Dependencies.DepExprs.empty())
  4663. return Address::invalid();
  4664. // Process list of dependencies.
  4665. ASTContext &C = CGM.getContext();
  4666. Address DependenciesArray = Address::invalid();
  4667. unsigned NumDependencies = Dependencies.DepExprs.size();
  4668. QualType FlagsTy;
  4669. getDependTypes(C, KmpDependInfoTy, FlagsTy);
  4670. RecordDecl *KmpDependInfoRD =
  4671. cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
  4672. llvm::Value *Size;
  4673. // Define type kmp_depend_info[<Dependencies.size()>];
  4674. // For depobj reserve one extra element to store the number of elements.
  4675. // It is required to handle depobj(x) update(in) construct.
  4676. // kmp_depend_info[<Dependencies.size()>] deps;
  4677. llvm::Value *NumDepsVal;
  4678. CharUnits Align = C.getTypeAlignInChars(KmpDependInfoTy);
  4679. if (const auto *IE =
  4680. cast_or_null<OMPIteratorExpr>(Dependencies.IteratorExpr)) {
  4681. NumDepsVal = llvm::ConstantInt::get(CGF.SizeTy, 1);
  4682. for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
  4683. llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
  4684. Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
  4685. NumDepsVal = CGF.Builder.CreateNUWMul(NumDepsVal, Sz);
  4686. }
  4687. Size = CGF.Builder.CreateNUWAdd(llvm::ConstantInt::get(CGF.SizeTy, 1),
  4688. NumDepsVal);
  4689. CharUnits SizeInBytes =
  4690. C.getTypeSizeInChars(KmpDependInfoTy).alignTo(Align);
  4691. llvm::Value *RecSize = CGM.getSize(SizeInBytes);
  4692. Size = CGF.Builder.CreateNUWMul(Size, RecSize);
  4693. NumDepsVal =
  4694. CGF.Builder.CreateIntCast(NumDepsVal, CGF.IntPtrTy, /*isSigned=*/false);
  4695. } else {
  4696. QualType KmpDependInfoArrayTy = C.getConstantArrayType(
  4697. KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies + 1),
  4698. nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
  4699. CharUnits Sz = C.getTypeSizeInChars(KmpDependInfoArrayTy);
  4700. Size = CGM.getSize(Sz.alignTo(Align));
  4701. NumDepsVal = llvm::ConstantInt::get(CGF.IntPtrTy, NumDependencies);
  4702. }
  4703. // Need to allocate on the dynamic memory.
  4704. llvm::Value *ThreadID = getThreadID(CGF, Loc);
  4705. // Use default allocator.
  4706. llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
  4707. llvm::Value *Args[] = {ThreadID, Size, Allocator};
  4708. llvm::Value *Addr =
  4709. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  4710. CGM.getModule(), OMPRTL___kmpc_alloc),
  4711. Args, ".dep.arr.addr");
  4712. Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4713. Addr, CGF.ConvertTypeForMem(KmpDependInfoTy)->getPointerTo());
  4714. DependenciesArray = Address(Addr, Align);
  4715. // Write number of elements in the first element of array for depobj.
  4716. LValue Base = CGF.MakeAddrLValue(DependenciesArray, KmpDependInfoTy);
  4717. // deps[i].base_addr = NumDependencies;
  4718. LValue BaseAddrLVal = CGF.EmitLValueForField(
  4719. Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
  4720. CGF.EmitStoreOfScalar(NumDepsVal, BaseAddrLVal);
  4721. llvm::PointerUnion<unsigned *, LValue *> Pos;
  4722. unsigned Idx = 1;
  4723. LValue PosLVal;
  4724. if (Dependencies.IteratorExpr) {
  4725. PosLVal = CGF.MakeAddrLValue(
  4726. CGF.CreateMemTemp(C.getSizeType(), "iterator.counter.addr"),
  4727. C.getSizeType());
  4728. CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Idx), PosLVal,
  4729. /*IsInit=*/true);
  4730. Pos = &PosLVal;
  4731. } else {
  4732. Pos = &Idx;
  4733. }
  4734. emitDependData(CGF, KmpDependInfoTy, Pos, Dependencies, DependenciesArray);
  4735. DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4736. CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy);
  4737. return DependenciesArray;
  4738. }
  4739. void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
  4740. SourceLocation Loc) {
  4741. ASTContext &C = CGM.getContext();
  4742. QualType FlagsTy;
  4743. getDependTypes(C, KmpDependInfoTy, FlagsTy);
  4744. LValue Base = CGF.EmitLoadOfPointerLValue(
  4745. DepobjLVal.getAddress(CGF),
  4746. C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
  4747. QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
  4748. Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4749. Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
  4750. llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
  4751. Addr.getElementType(), Addr.getPointer(),
  4752. llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
  4753. DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr,
  4754. CGF.VoidPtrTy);
  4755. llvm::Value *ThreadID = getThreadID(CGF, Loc);
  4756. // Use default allocator.
  4757. llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
  4758. llvm::Value *Args[] = {ThreadID, DepObjAddr, Allocator};
  4759. // _kmpc_free(gtid, addr, nullptr);
  4760. (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  4761. CGM.getModule(), OMPRTL___kmpc_free),
  4762. Args);
  4763. }
  4764. void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
  4765. OpenMPDependClauseKind NewDepKind,
  4766. SourceLocation Loc) {
  4767. ASTContext &C = CGM.getContext();
  4768. QualType FlagsTy;
  4769. getDependTypes(C, KmpDependInfoTy, FlagsTy);
  4770. RecordDecl *KmpDependInfoRD =
  4771. cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
  4772. llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
  4773. llvm::Value *NumDeps;
  4774. LValue Base;
  4775. std::tie(NumDeps, Base) = getDepobjElements(CGF, DepobjLVal, Loc);
  4776. Address Begin = Base.getAddress(CGF);
  4777. // Cast from pointer to array type to pointer to single element.
  4778. llvm::Value *End = CGF.Builder.CreateGEP(
  4779. Begin.getElementType(), Begin.getPointer(), NumDeps);
  4780. // The basic structure here is a while-do loop.
  4781. llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body");
  4782. llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done");
  4783. llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
  4784. CGF.EmitBlock(BodyBB);
  4785. llvm::PHINode *ElementPHI =
  4786. CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
  4787. ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
  4788. Begin = Address(ElementPHI, Begin.getAlignment());
  4789. Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
  4790. Base.getTBAAInfo());
  4791. // deps[i].flags = NewDepKind;
  4792. RTLDependenceKindTy DepKind = translateDependencyKind(NewDepKind);
  4793. LValue FlagsLVal = CGF.EmitLValueForField(
  4794. Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
  4795. CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
  4796. FlagsLVal);
  4797. // Shift the address forward by one element.
  4798. Address ElementNext =
  4799. CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext");
  4800. ElementPHI->addIncoming(ElementNext.getPointer(),
  4801. CGF.Builder.GetInsertBlock());
  4802. llvm::Value *IsEmpty =
  4803. CGF.Builder.CreateICmpEQ(ElementNext.getPointer(), End, "omp.isempty");
  4804. CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
  4805. // Done.
  4806. CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
  4807. }
  4808. void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
  4809. const OMPExecutableDirective &D,
  4810. llvm::Function *TaskFunction,
  4811. QualType SharedsTy, Address Shareds,
  4812. const Expr *IfCond,
  4813. const OMPTaskDataTy &Data) {
  4814. if (!CGF.HaveInsertPoint())
  4815. return;
  4816. TaskResultTy Result =
  4817. emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
  4818. llvm::Value *NewTask = Result.NewTask;
  4819. llvm::Function *TaskEntry = Result.TaskEntry;
  4820. llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
  4821. LValue TDBase = Result.TDBase;
  4822. const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
  4823. // Process list of dependences.
  4824. Address DependenciesArray = Address::invalid();
  4825. llvm::Value *NumOfElements;
  4826. std::tie(NumOfElements, DependenciesArray) =
  4827. emitDependClause(CGF, Data.Dependences, Loc);
  4828. // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
  4829. // libcall.
  4830. // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
  4831. // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
  4832. // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
  4833. // list is not empty
  4834. llvm::Value *ThreadID = getThreadID(CGF, Loc);
  4835. llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
  4836. llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
  4837. llvm::Value *DepTaskArgs[7];
  4838. if (!Data.Dependences.empty()) {
  4839. DepTaskArgs[0] = UpLoc;
  4840. DepTaskArgs[1] = ThreadID;
  4841. DepTaskArgs[2] = NewTask;
  4842. DepTaskArgs[3] = NumOfElements;
  4843. DepTaskArgs[4] = DependenciesArray.getPointer();
  4844. DepTaskArgs[5] = CGF.Builder.getInt32(0);
  4845. DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
  4846. }
  4847. auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, &TaskArgs,
  4848. &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
  4849. if (!Data.Tied) {
  4850. auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
  4851. LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
  4852. CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
  4853. }
  4854. if (!Data.Dependences.empty()) {
  4855. CGF.EmitRuntimeCall(
  4856. OMPBuilder.getOrCreateRuntimeFunction(
  4857. CGM.getModule(), OMPRTL___kmpc_omp_task_with_deps),
  4858. DepTaskArgs);
  4859. } else {
  4860. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  4861. CGM.getModule(), OMPRTL___kmpc_omp_task),
  4862. TaskArgs);
  4863. }
  4864. // Check if parent region is untied and build return for untied task;
  4865. if (auto *Region =
  4866. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
  4867. Region->emitUntiedSwitch(CGF);
  4868. };
  4869. llvm::Value *DepWaitTaskArgs[6];
  4870. if (!Data.Dependences.empty()) {
  4871. DepWaitTaskArgs[0] = UpLoc;
  4872. DepWaitTaskArgs[1] = ThreadID;
  4873. DepWaitTaskArgs[2] = NumOfElements;
  4874. DepWaitTaskArgs[3] = DependenciesArray.getPointer();
  4875. DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
  4876. DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
  4877. }
  4878. auto &M = CGM.getModule();
  4879. auto &&ElseCodeGen = [this, &M, &TaskArgs, ThreadID, NewTaskNewTaskTTy,
  4880. TaskEntry, &Data, &DepWaitTaskArgs,
  4881. Loc](CodeGenFunction &CGF, PrePostActionTy &) {
  4882. CodeGenFunction::RunCleanupsScope LocalScope(CGF);
  4883. // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
  4884. // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
  4885. // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
  4886. // is specified.
  4887. if (!Data.Dependences.empty())
  4888. CGF.EmitRuntimeCall(
  4889. OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_wait_deps),
  4890. DepWaitTaskArgs);
  4891. // Call proxy_task_entry(gtid, new_task);
  4892. auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
  4893. Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
  4894. Action.Enter(CGF);
  4895. llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
  4896. CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
  4897. OutlinedFnArgs);
  4898. };
  4899. // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
  4900. // kmp_task_t *new_task);
  4901. // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
  4902. // kmp_task_t *new_task);
  4903. RegionCodeGenTy RCG(CodeGen);
  4904. CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
  4905. M, OMPRTL___kmpc_omp_task_begin_if0),
  4906. TaskArgs,
  4907. OMPBuilder.getOrCreateRuntimeFunction(
  4908. M, OMPRTL___kmpc_omp_task_complete_if0),
  4909. TaskArgs);
  4910. RCG.setAction(Action);
  4911. RCG(CGF);
  4912. };
  4913. if (IfCond) {
  4914. emitIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
  4915. } else {
  4916. RegionCodeGenTy ThenRCG(ThenCodeGen);
  4917. ThenRCG(CGF);
  4918. }
  4919. }
  4920. void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
  4921. const OMPLoopDirective &D,
  4922. llvm::Function *TaskFunction,
  4923. QualType SharedsTy, Address Shareds,
  4924. const Expr *IfCond,
  4925. const OMPTaskDataTy &Data) {
  4926. if (!CGF.HaveInsertPoint())
  4927. return;
  4928. TaskResultTy Result =
  4929. emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
  4930. // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
  4931. // libcall.
  4932. // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
  4933. // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
  4934. // sched, kmp_uint64 grainsize, void *task_dup);
  4935. llvm::Value *ThreadID = getThreadID(CGF, Loc);
  4936. llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
  4937. llvm::Value *IfVal;
  4938. if (IfCond) {
  4939. IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
  4940. /*isSigned=*/true);
  4941. } else {
  4942. IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
  4943. }
  4944. LValue LBLVal = CGF.EmitLValueForField(
  4945. Result.TDBase,
  4946. *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
  4947. const auto *LBVar =
  4948. cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
  4949. CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(CGF),
  4950. LBLVal.getQuals(),
  4951. /*IsInitializer=*/true);
  4952. LValue UBLVal = CGF.EmitLValueForField(
  4953. Result.TDBase,
  4954. *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
  4955. const auto *UBVar =
  4956. cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
  4957. CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(CGF),
  4958. UBLVal.getQuals(),
  4959. /*IsInitializer=*/true);
  4960. LValue StLVal = CGF.EmitLValueForField(
  4961. Result.TDBase,
  4962. *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
  4963. const auto *StVar =
  4964. cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
  4965. CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(CGF),
  4966. StLVal.getQuals(),
  4967. /*IsInitializer=*/true);
  4968. // Store reductions address.
  4969. LValue RedLVal = CGF.EmitLValueForField(
  4970. Result.TDBase,
  4971. *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
  4972. if (Data.Reductions) {
  4973. CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
  4974. } else {
  4975. CGF.EmitNullInitialization(RedLVal.getAddress(CGF),
  4976. CGF.getContext().VoidPtrTy);
  4977. }
  4978. enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
  4979. llvm::Value *TaskArgs[] = {
  4980. UpLoc,
  4981. ThreadID,
  4982. Result.NewTask,
  4983. IfVal,
  4984. LBLVal.getPointer(CGF),
  4985. UBLVal.getPointer(CGF),
  4986. CGF.EmitLoadOfScalar(StLVal, Loc),
  4987. llvm::ConstantInt::getSigned(
  4988. CGF.IntTy, 1), // Always 1 because taskgroup emitted by the compiler
  4989. llvm::ConstantInt::getSigned(
  4990. CGF.IntTy, Data.Schedule.getPointer()
  4991. ? Data.Schedule.getInt() ? NumTasks : Grainsize
  4992. : NoSchedule),
  4993. Data.Schedule.getPointer()
  4994. ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
  4995. /*isSigned=*/false)
  4996. : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0),
  4997. Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4998. Result.TaskDupFn, CGF.VoidPtrTy)
  4999. : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
  5000. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  5001. CGM.getModule(), OMPRTL___kmpc_taskloop),
  5002. TaskArgs);
  5003. }
  5004. /// Emit reduction operation for each element of array (required for
  5005. /// array sections) LHS op = RHS.
  5006. /// \param Type Type of array.
  5007. /// \param LHSVar Variable on the left side of the reduction operation
  5008. /// (references element of array in original variable).
  5009. /// \param RHSVar Variable on the right side of the reduction operation
  5010. /// (references element of array in original variable).
  5011. /// \param RedOpGen Generator of reduction operation with use of LHSVar and
  5012. /// RHSVar.
  5013. static void EmitOMPAggregateReduction(
  5014. CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
  5015. const VarDecl *RHSVar,
  5016. const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
  5017. const Expr *, const Expr *)> &RedOpGen,
  5018. const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
  5019. const Expr *UpExpr = nullptr) {
  5020. // Perform element-by-element initialization.
  5021. QualType ElementTy;
  5022. Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
  5023. Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
  5024. // Drill down to the base element type on both arrays.
  5025. const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
  5026. llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
  5027. llvm::Value *RHSBegin = RHSAddr.getPointer();
  5028. llvm::Value *LHSBegin = LHSAddr.getPointer();
  5029. // Cast from pointer to array type to pointer to single element.
  5030. llvm::Value *LHSEnd =
  5031. CGF.Builder.CreateGEP(LHSAddr.getElementType(), LHSBegin, NumElements);
  5032. // The basic structure here is a while-do loop.
  5033. llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
  5034. llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
  5035. llvm::Value *IsEmpty =
  5036. CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
  5037. CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
  5038. // Enter the loop body, making that address the current address.
  5039. llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
  5040. CGF.EmitBlock(BodyBB);
  5041. CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
  5042. llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
  5043. RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
  5044. RHSElementPHI->addIncoming(RHSBegin, EntryBB);
  5045. Address RHSElementCurrent =
  5046. Address(RHSElementPHI,
  5047. RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
  5048. llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
  5049. LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
  5050. LHSElementPHI->addIncoming(LHSBegin, EntryBB);
  5051. Address LHSElementCurrent =
  5052. Address(LHSElementPHI,
  5053. LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
  5054. // Emit copy.
  5055. CodeGenFunction::OMPPrivateScope Scope(CGF);
  5056. Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; });
  5057. Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; });
  5058. Scope.Privatize();
  5059. RedOpGen(CGF, XExpr, EExpr, UpExpr);
  5060. Scope.ForceCleanup();
  5061. // Shift the address forward by one element.
  5062. llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
  5063. LHSAddr.getElementType(), LHSElementPHI, /*Idx0=*/1,
  5064. "omp.arraycpy.dest.element");
  5065. llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
  5066. RHSAddr.getElementType(), RHSElementPHI, /*Idx0=*/1,
  5067. "omp.arraycpy.src.element");
  5068. // Check whether we've reached the end.
  5069. llvm::Value *Done =
  5070. CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
  5071. CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
  5072. LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
  5073. RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
  5074. // Done.
  5075. CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
  5076. }
  5077. /// Emit reduction combiner. If the combiner is a simple expression emit it as
  5078. /// is, otherwise consider it as combiner of UDR decl and emit it as a call of
  5079. /// UDR combiner function.
  5080. static void emitReductionCombiner(CodeGenFunction &CGF,
  5081. const Expr *ReductionOp) {
  5082. if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
  5083. if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
  5084. if (const auto *DRE =
  5085. dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
  5086. if (const auto *DRD =
  5087. dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
  5088. std::pair<llvm::Function *, llvm::Function *> Reduction =
  5089. CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
  5090. RValue Func = RValue::get(Reduction.first);
  5091. CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
  5092. CGF.EmitIgnoredExpr(ReductionOp);
  5093. return;
  5094. }
  5095. CGF.EmitIgnoredExpr(ReductionOp);
  5096. }
  5097. llvm::Function *CGOpenMPRuntime::emitReductionFunction(
  5098. SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates,
  5099. ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
  5100. ArrayRef<const Expr *> ReductionOps) {
  5101. ASTContext &C = CGM.getContext();
  5102. // void reduction_func(void *LHSArg, void *RHSArg);
  5103. FunctionArgList Args;
  5104. ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  5105. ImplicitParamDecl::Other);
  5106. ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  5107. ImplicitParamDecl::Other);
  5108. Args.push_back(&LHSArg);
  5109. Args.push_back(&RHSArg);
  5110. const auto &CGFI =
  5111. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  5112. std::string Name = getName({"omp", "reduction", "reduction_func"});
  5113. auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
  5114. llvm::GlobalValue::InternalLinkage, Name,
  5115. &CGM.getModule());
  5116. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
  5117. Fn->setDoesNotRecurse();
  5118. CodeGenFunction CGF(CGM);
  5119. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
  5120. // Dst = (void*[n])(LHSArg);
  5121. // Src = (void*[n])(RHSArg);
  5122. Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  5123. CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
  5124. ArgsType), CGF.getPointerAlign());
  5125. Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  5126. CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
  5127. ArgsType), CGF.getPointerAlign());
  5128. // ...
  5129. // *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
  5130. // ...
  5131. CodeGenFunction::OMPPrivateScope Scope(CGF);
  5132. auto IPriv = Privates.begin();
  5133. unsigned Idx = 0;
  5134. for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
  5135. const auto *RHSVar =
  5136. cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
  5137. Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() {
  5138. return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
  5139. });
  5140. const auto *LHSVar =
  5141. cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
  5142. Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() {
  5143. return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
  5144. });
  5145. QualType PrivTy = (*IPriv)->getType();
  5146. if (PrivTy->isVariablyModifiedType()) {
  5147. // Get array size and emit VLA type.
  5148. ++Idx;
  5149. Address Elem = CGF.Builder.CreateConstArrayGEP(LHS, Idx);
  5150. llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
  5151. const VariableArrayType *VLA =
  5152. CGF.getContext().getAsVariableArrayType(PrivTy);
  5153. const auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
  5154. CodeGenFunction::OpaqueValueMapping OpaqueMap(
  5155. CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
  5156. CGF.EmitVariablyModifiedType(PrivTy);
  5157. }
  5158. }
  5159. Scope.Privatize();
  5160. IPriv = Privates.begin();
  5161. auto ILHS = LHSExprs.begin();
  5162. auto IRHS = RHSExprs.begin();
  5163. for (const Expr *E : ReductionOps) {
  5164. if ((*IPriv)->getType()->isArrayType()) {
  5165. // Emit reduction for array section.
  5166. const auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
  5167. const auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
  5168. EmitOMPAggregateReduction(
  5169. CGF, (*IPriv)->getType(), LHSVar, RHSVar,
  5170. [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
  5171. emitReductionCombiner(CGF, E);
  5172. });
  5173. } else {
  5174. // Emit reduction for array subscript or single variable.
  5175. emitReductionCombiner(CGF, E);
  5176. }
  5177. ++IPriv;
  5178. ++ILHS;
  5179. ++IRHS;
  5180. }
  5181. Scope.ForceCleanup();
  5182. CGF.FinishFunction();
  5183. return Fn;
  5184. }
  5185. void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
  5186. const Expr *ReductionOp,
  5187. const Expr *PrivateRef,
  5188. const DeclRefExpr *LHS,
  5189. const DeclRefExpr *RHS) {
  5190. if (PrivateRef->getType()->isArrayType()) {
  5191. // Emit reduction for array section.
  5192. const auto *LHSVar = cast<VarDecl>(LHS->getDecl());
  5193. const auto *RHSVar = cast<VarDecl>(RHS->getDecl());
  5194. EmitOMPAggregateReduction(
  5195. CGF, PrivateRef->getType(), LHSVar, RHSVar,
  5196. [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
  5197. emitReductionCombiner(CGF, ReductionOp);
  5198. });
  5199. } else {
  5200. // Emit reduction for array subscript or single variable.
  5201. emitReductionCombiner(CGF, ReductionOp);
  5202. }
  5203. }
  5204. void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
  5205. ArrayRef<const Expr *> Privates,
  5206. ArrayRef<const Expr *> LHSExprs,
  5207. ArrayRef<const Expr *> RHSExprs,
  5208. ArrayRef<const Expr *> ReductionOps,
  5209. ReductionOptionsTy Options) {
  5210. if (!CGF.HaveInsertPoint())
  5211. return;
  5212. bool WithNowait = Options.WithNowait;
  5213. bool SimpleReduction = Options.SimpleReduction;
  5214. // Next code should be emitted for reduction:
  5215. //
  5216. // static kmp_critical_name lock = { 0 };
  5217. //
  5218. // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
  5219. // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
  5220. // ...
  5221. // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
  5222. // *(Type<n>-1*)rhs[<n>-1]);
  5223. // }
  5224. //
  5225. // ...
  5226. // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
  5227. // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
  5228. // RedList, reduce_func, &<lock>)) {
  5229. // case 1:
  5230. // ...
  5231. // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
  5232. // ...
  5233. // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
  5234. // break;
  5235. // case 2:
  5236. // ...
  5237. // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
  5238. // ...
  5239. // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
  5240. // break;
  5241. // default:;
  5242. // }
  5243. //
  5244. // if SimpleReduction is true, only the next code is generated:
  5245. // ...
  5246. // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
  5247. // ...
  5248. ASTContext &C = CGM.getContext();
  5249. if (SimpleReduction) {
  5250. CodeGenFunction::RunCleanupsScope Scope(CGF);
  5251. auto IPriv = Privates.begin();
  5252. auto ILHS = LHSExprs.begin();
  5253. auto IRHS = RHSExprs.begin();
  5254. for (const Expr *E : ReductionOps) {
  5255. emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
  5256. cast<DeclRefExpr>(*IRHS));
  5257. ++IPriv;
  5258. ++ILHS;
  5259. ++IRHS;
  5260. }
  5261. return;
  5262. }
  5263. // 1. Build a list of reduction variables.
  5264. // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
  5265. auto Size = RHSExprs.size();
  5266. for (const Expr *E : Privates) {
  5267. if (E->getType()->isVariablyModifiedType())
  5268. // Reserve place for array size.
  5269. ++Size;
  5270. }
  5271. llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
  5272. QualType ReductionArrayTy =
  5273. C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
  5274. /*IndexTypeQuals=*/0);
  5275. Address ReductionList =
  5276. CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
  5277. auto IPriv = Privates.begin();
  5278. unsigned Idx = 0;
  5279. for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
  5280. Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
  5281. CGF.Builder.CreateStore(
  5282. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  5283. CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
  5284. Elem);
  5285. if ((*IPriv)->getType()->isVariablyModifiedType()) {
  5286. // Store array size.
  5287. ++Idx;
  5288. Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
  5289. llvm::Value *Size = CGF.Builder.CreateIntCast(
  5290. CGF.getVLASize(
  5291. CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
  5292. .NumElts,
  5293. CGF.SizeTy, /*isSigned=*/false);
  5294. CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
  5295. Elem);
  5296. }
  5297. }
  5298. // 2. Emit reduce_func().
  5299. llvm::Function *ReductionFn = emitReductionFunction(
  5300. Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
  5301. LHSExprs, RHSExprs, ReductionOps);
  5302. // 3. Create static kmp_critical_name lock = { 0 };
  5303. std::string Name = getName({"reduction"});
  5304. llvm::Value *Lock = getCriticalRegionLock(Name);
  5305. // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
  5306. // RedList, reduce_func, &<lock>);
  5307. llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
  5308. llvm::Value *ThreadId = getThreadID(CGF, Loc);
  5309. llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
  5310. llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  5311. ReductionList.getPointer(), CGF.VoidPtrTy);
  5312. llvm::Value *Args[] = {
  5313. IdentTLoc, // ident_t *<loc>
  5314. ThreadId, // i32 <gtid>
  5315. CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
  5316. ReductionArrayTySize, // size_type sizeof(RedList)
  5317. RL, // void *RedList
  5318. ReductionFn, // void (*) (void *, void *) <reduce_func>
  5319. Lock // kmp_critical_name *&<lock>
  5320. };
  5321. llvm::Value *Res = CGF.EmitRuntimeCall(
  5322. OMPBuilder.getOrCreateRuntimeFunction(
  5323. CGM.getModule(),
  5324. WithNowait ? OMPRTL___kmpc_reduce_nowait : OMPRTL___kmpc_reduce),
  5325. Args);
  5326. // 5. Build switch(res)
  5327. llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
  5328. llvm::SwitchInst *SwInst =
  5329. CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
  5330. // 6. Build case 1:
  5331. // ...
  5332. // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
  5333. // ...
  5334. // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
  5335. // break;
  5336. llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
  5337. SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
  5338. CGF.EmitBlock(Case1BB);
  5339. // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
  5340. llvm::Value *EndArgs[] = {
  5341. IdentTLoc, // ident_t *<loc>
  5342. ThreadId, // i32 <gtid>
  5343. Lock // kmp_critical_name *&<lock>
  5344. };
  5345. auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
  5346. CodeGenFunction &CGF, PrePostActionTy &Action) {
  5347. CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
  5348. auto IPriv = Privates.begin();
  5349. auto ILHS = LHSExprs.begin();
  5350. auto IRHS = RHSExprs.begin();
  5351. for (const Expr *E : ReductionOps) {
  5352. RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
  5353. cast<DeclRefExpr>(*IRHS));
  5354. ++IPriv;
  5355. ++ILHS;
  5356. ++IRHS;
  5357. }
  5358. };
  5359. RegionCodeGenTy RCG(CodeGen);
  5360. CommonActionTy Action(
  5361. nullptr, llvm::None,
  5362. OMPBuilder.getOrCreateRuntimeFunction(
  5363. CGM.getModule(), WithNowait ? OMPRTL___kmpc_end_reduce_nowait
  5364. : OMPRTL___kmpc_end_reduce),
  5365. EndArgs);
  5366. RCG.setAction(Action);
  5367. RCG(CGF);
  5368. CGF.EmitBranch(DefaultBB);
  5369. // 7. Build case 2:
  5370. // ...
  5371. // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
  5372. // ...
  5373. // break;
  5374. llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
  5375. SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
  5376. CGF.EmitBlock(Case2BB);
  5377. auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
  5378. CodeGenFunction &CGF, PrePostActionTy &Action) {
  5379. auto ILHS = LHSExprs.begin();
  5380. auto IRHS = RHSExprs.begin();
  5381. auto IPriv = Privates.begin();
  5382. for (const Expr *E : ReductionOps) {
  5383. const Expr *XExpr = nullptr;
  5384. const Expr *EExpr = nullptr;
  5385. const Expr *UpExpr = nullptr;
  5386. BinaryOperatorKind BO = BO_Comma;
  5387. if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
  5388. if (BO->getOpcode() == BO_Assign) {
  5389. XExpr = BO->getLHS();
  5390. UpExpr = BO->getRHS();
  5391. }
  5392. }
  5393. // Try to emit update expression as a simple atomic.
  5394. const Expr *RHSExpr = UpExpr;
  5395. if (RHSExpr) {
  5396. // Analyze RHS part of the whole expression.
  5397. if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(
  5398. RHSExpr->IgnoreParenImpCasts())) {
  5399. // If this is a conditional operator, analyze its condition for
  5400. // min/max reduction operator.
  5401. RHSExpr = ACO->getCond();
  5402. }
  5403. if (const auto *BORHS =
  5404. dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
  5405. EExpr = BORHS->getRHS();
  5406. BO = BORHS->getOpcode();
  5407. }
  5408. }
  5409. if (XExpr) {
  5410. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
  5411. auto &&AtomicRedGen = [BO, VD,
  5412. Loc](CodeGenFunction &CGF, const Expr *XExpr,
  5413. const Expr *EExpr, const Expr *UpExpr) {
  5414. LValue X = CGF.EmitLValue(XExpr);
  5415. RValue E;
  5416. if (EExpr)
  5417. E = CGF.EmitAnyExpr(EExpr);
  5418. CGF.EmitOMPAtomicSimpleUpdateExpr(
  5419. X, E, BO, /*IsXLHSInRHSPart=*/true,
  5420. llvm::AtomicOrdering::Monotonic, Loc,
  5421. [&CGF, UpExpr, VD, Loc](RValue XRValue) {
  5422. CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
  5423. PrivateScope.addPrivate(
  5424. VD, [&CGF, VD, XRValue, Loc]() {
  5425. Address LHSTemp = CGF.CreateMemTemp(VD->getType());
  5426. CGF.emitOMPSimpleStore(
  5427. CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
  5428. VD->getType().getNonReferenceType(), Loc);
  5429. return LHSTemp;
  5430. });
  5431. (void)PrivateScope.Privatize();
  5432. return CGF.EmitAnyExpr(UpExpr);
  5433. });
  5434. };
  5435. if ((*IPriv)->getType()->isArrayType()) {
  5436. // Emit atomic reduction for array section.
  5437. const auto *RHSVar =
  5438. cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
  5439. EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
  5440. AtomicRedGen, XExpr, EExpr, UpExpr);
  5441. } else {
  5442. // Emit atomic reduction for array subscript or single variable.
  5443. AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
  5444. }
  5445. } else {
  5446. // Emit as a critical region.
  5447. auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
  5448. const Expr *, const Expr *) {
  5449. CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
  5450. std::string Name = RT.getName({"atomic_reduction"});
  5451. RT.emitCriticalRegion(
  5452. CGF, Name,
  5453. [=](CodeGenFunction &CGF, PrePostActionTy &Action) {
  5454. Action.Enter(CGF);
  5455. emitReductionCombiner(CGF, E);
  5456. },
  5457. Loc);
  5458. };
  5459. if ((*IPriv)->getType()->isArrayType()) {
  5460. const auto *LHSVar =
  5461. cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
  5462. const auto *RHSVar =
  5463. cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
  5464. EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
  5465. CritRedGen);
  5466. } else {
  5467. CritRedGen(CGF, nullptr, nullptr, nullptr);
  5468. }
  5469. }
  5470. ++ILHS;
  5471. ++IRHS;
  5472. ++IPriv;
  5473. }
  5474. };
  5475. RegionCodeGenTy AtomicRCG(AtomicCodeGen);
  5476. if (!WithNowait) {
  5477. // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
  5478. llvm::Value *EndArgs[] = {
  5479. IdentTLoc, // ident_t *<loc>
  5480. ThreadId, // i32 <gtid>
  5481. Lock // kmp_critical_name *&<lock>
  5482. };
  5483. CommonActionTy Action(nullptr, llvm::None,
  5484. OMPBuilder.getOrCreateRuntimeFunction(
  5485. CGM.getModule(), OMPRTL___kmpc_end_reduce),
  5486. EndArgs);
  5487. AtomicRCG.setAction(Action);
  5488. AtomicRCG(CGF);
  5489. } else {
  5490. AtomicRCG(CGF);
  5491. }
  5492. CGF.EmitBranch(DefaultBB);
  5493. CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
  5494. }
  5495. /// Generates unique name for artificial threadprivate variables.
  5496. /// Format is: <Prefix> "." <Decl_mangled_name> "_" "<Decl_start_loc_raw_enc>"
  5497. static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
  5498. const Expr *Ref) {
  5499. SmallString<256> Buffer;
  5500. llvm::raw_svector_ostream Out(Buffer);
  5501. const clang::DeclRefExpr *DE;
  5502. const VarDecl *D = ::getBaseDecl(Ref, DE);
  5503. if (!D)
  5504. D = cast<VarDecl>(cast<DeclRefExpr>(Ref)->getDecl());
  5505. D = D->getCanonicalDecl();
  5506. std::string Name = CGM.getOpenMPRuntime().getName(
  5507. {D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
  5508. Out << Prefix << Name << "_"
  5509. << D->getCanonicalDecl()->getBeginLoc().getRawEncoding();
  5510. return std::string(Out.str());
  5511. }
  5512. /// Emits reduction initializer function:
  5513. /// \code
  5514. /// void @.red_init(void* %arg, void* %orig) {
  5515. /// %0 = bitcast void* %arg to <type>*
  5516. /// store <type> <init>, <type>* %0
  5517. /// ret void
  5518. /// }
  5519. /// \endcode
  5520. static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
  5521. SourceLocation Loc,
  5522. ReductionCodeGen &RCG, unsigned N) {
  5523. ASTContext &C = CGM.getContext();
  5524. QualType VoidPtrTy = C.VoidPtrTy;
  5525. VoidPtrTy.addRestrict();
  5526. FunctionArgList Args;
  5527. ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
  5528. ImplicitParamDecl::Other);
  5529. ImplicitParamDecl ParamOrig(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
  5530. ImplicitParamDecl::Other);
  5531. Args.emplace_back(&Param);
  5532. Args.emplace_back(&ParamOrig);
  5533. const auto &FnInfo =
  5534. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  5535. llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
  5536. std::string Name = CGM.getOpenMPRuntime().getName({"red_init", ""});
  5537. auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
  5538. Name, &CGM.getModule());
  5539. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
  5540. Fn->setDoesNotRecurse();
  5541. CodeGenFunction CGF(CGM);
  5542. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
  5543. Address PrivateAddr = CGF.EmitLoadOfPointer(
  5544. CGF.GetAddrOfLocalVar(&Param),
  5545. C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
  5546. llvm::Value *Size = nullptr;
  5547. // If the size of the reduction item is non-constant, load it from global
  5548. // threadprivate variable.
  5549. if (RCG.getSizes(N).second) {
  5550. Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
  5551. CGF, CGM.getContext().getSizeType(),
  5552. generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
  5553. Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
  5554. CGM.getContext().getSizeType(), Loc);
  5555. }
  5556. RCG.emitAggregateType(CGF, N, Size);
  5557. Address OrigAddr = Address::invalid();
  5558. // If initializer uses initializer from declare reduction construct, emit a
  5559. // pointer to the address of the original reduction item (reuired by reduction
  5560. // initializer)
  5561. if (RCG.usesReductionInitializer(N)) {
  5562. Address SharedAddr = CGF.GetAddrOfLocalVar(&ParamOrig);
  5563. OrigAddr = CGF.EmitLoadOfPointer(
  5564. SharedAddr,
  5565. CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
  5566. }
  5567. // Emit the initializer:
  5568. // %0 = bitcast void* %arg to <type>*
  5569. // store <type> <init>, <type>* %0
  5570. RCG.emitInitialization(CGF, N, PrivateAddr, OrigAddr,
  5571. [](CodeGenFunction &) { return false; });
  5572. CGF.FinishFunction();
  5573. return Fn;
  5574. }
  5575. /// Emits reduction combiner function:
  5576. /// \code
  5577. /// void @.red_comb(void* %arg0, void* %arg1) {
  5578. /// %lhs = bitcast void* %arg0 to <type>*
  5579. /// %rhs = bitcast void* %arg1 to <type>*
  5580. /// %2 = <ReductionOp>(<type>* %lhs, <type>* %rhs)
  5581. /// store <type> %2, <type>* %lhs
  5582. /// ret void
  5583. /// }
  5584. /// \endcode
  5585. static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
  5586. SourceLocation Loc,
  5587. ReductionCodeGen &RCG, unsigned N,
  5588. const Expr *ReductionOp,
  5589. const Expr *LHS, const Expr *RHS,
  5590. const Expr *PrivateRef) {
  5591. ASTContext &C = CGM.getContext();
  5592. const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
  5593. const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
  5594. FunctionArgList Args;
  5595. ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  5596. C.VoidPtrTy, ImplicitParamDecl::Other);
  5597. ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  5598. ImplicitParamDecl::Other);
  5599. Args.emplace_back(&ParamInOut);
  5600. Args.emplace_back(&ParamIn);
  5601. const auto &FnInfo =
  5602. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  5603. llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
  5604. std::string Name = CGM.getOpenMPRuntime().getName({"red_comb", ""});
  5605. auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
  5606. Name, &CGM.getModule());
  5607. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
  5608. Fn->setDoesNotRecurse();
  5609. CodeGenFunction CGF(CGM);
  5610. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
  5611. llvm::Value *Size = nullptr;
  5612. // If the size of the reduction item is non-constant, load it from global
  5613. // threadprivate variable.
  5614. if (RCG.getSizes(N).second) {
  5615. Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
  5616. CGF, CGM.getContext().getSizeType(),
  5617. generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
  5618. Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
  5619. CGM.getContext().getSizeType(), Loc);
  5620. }
  5621. RCG.emitAggregateType(CGF, N, Size);
  5622. // Remap lhs and rhs variables to the addresses of the function arguments.
  5623. // %lhs = bitcast void* %arg0 to <type>*
  5624. // %rhs = bitcast void* %arg1 to <type>*
  5625. CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
  5626. PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() {
  5627. // Pull out the pointer to the variable.
  5628. Address PtrAddr = CGF.EmitLoadOfPointer(
  5629. CGF.GetAddrOfLocalVar(&ParamInOut),
  5630. C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
  5631. return CGF.Builder.CreateElementBitCast(
  5632. PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
  5633. });
  5634. PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() {
  5635. // Pull out the pointer to the variable.
  5636. Address PtrAddr = CGF.EmitLoadOfPointer(
  5637. CGF.GetAddrOfLocalVar(&ParamIn),
  5638. C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
  5639. return CGF.Builder.CreateElementBitCast(
  5640. PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType()));
  5641. });
  5642. PrivateScope.Privatize();
  5643. // Emit the combiner body:
  5644. // %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
  5645. // store <type> %2, <type>* %lhs
  5646. CGM.getOpenMPRuntime().emitSingleReductionCombiner(
  5647. CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS),
  5648. cast<DeclRefExpr>(RHS));
  5649. CGF.FinishFunction();
  5650. return Fn;
  5651. }
  5652. /// Emits reduction finalizer function:
  5653. /// \code
  5654. /// void @.red_fini(void* %arg) {
  5655. /// %0 = bitcast void* %arg to <type>*
  5656. /// <destroy>(<type>* %0)
  5657. /// ret void
  5658. /// }
  5659. /// \endcode
  5660. static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
  5661. SourceLocation Loc,
  5662. ReductionCodeGen &RCG, unsigned N) {
  5663. if (!RCG.needCleanups(N))
  5664. return nullptr;
  5665. ASTContext &C = CGM.getContext();
  5666. FunctionArgList Args;
  5667. ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  5668. ImplicitParamDecl::Other);
  5669. Args.emplace_back(&Param);
  5670. const auto &FnInfo =
  5671. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  5672. llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
  5673. std::string Name = CGM.getOpenMPRuntime().getName({"red_fini", ""});
  5674. auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
  5675. Name, &CGM.getModule());
  5676. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
  5677. Fn->setDoesNotRecurse();
  5678. CodeGenFunction CGF(CGM);
  5679. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
  5680. Address PrivateAddr = CGF.EmitLoadOfPointer(
  5681. CGF.GetAddrOfLocalVar(&Param),
  5682. C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
  5683. llvm::Value *Size = nullptr;
  5684. // If the size of the reduction item is non-constant, load it from global
  5685. // threadprivate variable.
  5686. if (RCG.getSizes(N).second) {
  5687. Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
  5688. CGF, CGM.getContext().getSizeType(),
  5689. generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
  5690. Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
  5691. CGM.getContext().getSizeType(), Loc);
  5692. }
  5693. RCG.emitAggregateType(CGF, N, Size);
  5694. // Emit the finalizer body:
  5695. // <destroy>(<type>* %0)
  5696. RCG.emitCleanups(CGF, N, PrivateAddr);
  5697. CGF.FinishFunction(Loc);
  5698. return Fn;
  5699. }
  5700. llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
  5701. CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
  5702. ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
  5703. if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty())
  5704. return nullptr;
  5705. // Build typedef struct:
  5706. // kmp_taskred_input {
  5707. // void *reduce_shar; // shared reduction item
  5708. // void *reduce_orig; // original reduction item used for initialization
  5709. // size_t reduce_size; // size of data item
  5710. // void *reduce_init; // data initialization routine
  5711. // void *reduce_fini; // data finalization routine
  5712. // void *reduce_comb; // data combiner routine
  5713. // kmp_task_red_flags_t flags; // flags for additional info from compiler
  5714. // } kmp_taskred_input_t;
  5715. ASTContext &C = CGM.getContext();
  5716. RecordDecl *RD = C.buildImplicitRecord("kmp_taskred_input_t");
  5717. RD->startDefinition();
  5718. const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  5719. const FieldDecl *OrigFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  5720. const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
  5721. const FieldDecl *InitFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  5722. const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  5723. const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  5724. const FieldDecl *FlagsFD = addFieldToRecordDecl(
  5725. C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
  5726. RD->completeDefinition();
  5727. QualType RDType = C.getRecordType(RD);
  5728. unsigned Size = Data.ReductionVars.size();
  5729. llvm::APInt ArraySize(/*numBits=*/64, Size);
  5730. QualType ArrayRDType = C.getConstantArrayType(
  5731. RDType, ArraySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
  5732. // kmp_task_red_input_t .rd_input.[Size];
  5733. Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
  5734. ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionOrigs,
  5735. Data.ReductionCopies, Data.ReductionOps);
  5736. for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
  5737. // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
  5738. llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
  5739. llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
  5740. llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
  5741. TaskRedInput.getElementType(), TaskRedInput.getPointer(), Idxs,
  5742. /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
  5743. ".rd_input.gep.");
  5744. LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
  5745. // ElemLVal.reduce_shar = &Shareds[Cnt];
  5746. LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
  5747. RCG.emitSharedOrigLValue(CGF, Cnt);
  5748. llvm::Value *CastedShared =
  5749. CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer(CGF));
  5750. CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
  5751. // ElemLVal.reduce_orig = &Origs[Cnt];
  5752. LValue OrigLVal = CGF.EmitLValueForField(ElemLVal, OrigFD);
  5753. llvm::Value *CastedOrig =
  5754. CGF.EmitCastToVoidPtr(RCG.getOrigLValue(Cnt).getPointer(CGF));
  5755. CGF.EmitStoreOfScalar(CastedOrig, OrigLVal);
  5756. RCG.emitAggregateType(CGF, Cnt);
  5757. llvm::Value *SizeValInChars;
  5758. llvm::Value *SizeVal;
  5759. std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
  5760. // We use delayed creation/initialization for VLAs and array sections. It is
  5761. // required because runtime does not provide the way to pass the sizes of
  5762. // VLAs/array sections to initializer/combiner/finalizer functions. Instead
  5763. // threadprivate global variables are used to store these values and use
  5764. // them in the functions.
  5765. bool DelayedCreation = !!SizeVal;
  5766. SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
  5767. /*isSigned=*/false);
  5768. LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD);
  5769. CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
  5770. // ElemLVal.reduce_init = init;
  5771. LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
  5772. llvm::Value *InitAddr =
  5773. CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
  5774. CGF.EmitStoreOfScalar(InitAddr, InitLVal);
  5775. // ElemLVal.reduce_fini = fini;
  5776. LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
  5777. llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
  5778. llvm::Value *FiniAddr = Fini
  5779. ? CGF.EmitCastToVoidPtr(Fini)
  5780. : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
  5781. CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
  5782. // ElemLVal.reduce_comb = comb;
  5783. LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
  5784. llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
  5785. CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
  5786. RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
  5787. CGF.EmitStoreOfScalar(CombAddr, CombLVal);
  5788. // ElemLVal.flags = 0;
  5789. LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
  5790. if (DelayedCreation) {
  5791. CGF.EmitStoreOfScalar(
  5792. llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*isSigned=*/true),
  5793. FlagsLVal);
  5794. } else
  5795. CGF.EmitNullInitialization(FlagsLVal.getAddress(CGF),
  5796. FlagsLVal.getType());
  5797. }
  5798. if (Data.IsReductionWithTaskMod) {
  5799. // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
  5800. // is_ws, int num, void *data);
  5801. llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
  5802. llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
  5803. CGM.IntTy, /*isSigned=*/true);
  5804. llvm::Value *Args[] = {
  5805. IdentTLoc, GTid,
  5806. llvm::ConstantInt::get(CGM.IntTy, Data.IsWorksharingReduction ? 1 : 0,
  5807. /*isSigned=*/true),
  5808. llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
  5809. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  5810. TaskRedInput.getPointer(), CGM.VoidPtrTy)};
  5811. return CGF.EmitRuntimeCall(
  5812. OMPBuilder.getOrCreateRuntimeFunction(
  5813. CGM.getModule(), OMPRTL___kmpc_taskred_modifier_init),
  5814. Args);
  5815. }
  5816. // Build call void *__kmpc_taskred_init(int gtid, int num_data, void *data);
  5817. llvm::Value *Args[] = {
  5818. CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
  5819. /*isSigned=*/true),
  5820. llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
  5821. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
  5822. CGM.VoidPtrTy)};
  5823. return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  5824. CGM.getModule(), OMPRTL___kmpc_taskred_init),
  5825. Args);
  5826. }
  5827. void CGOpenMPRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
  5828. SourceLocation Loc,
  5829. bool IsWorksharingReduction) {
  5830. // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
  5831. // is_ws, int num, void *data);
  5832. llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
  5833. llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
  5834. CGM.IntTy, /*isSigned=*/true);
  5835. llvm::Value *Args[] = {IdentTLoc, GTid,
  5836. llvm::ConstantInt::get(CGM.IntTy,
  5837. IsWorksharingReduction ? 1 : 0,
  5838. /*isSigned=*/true)};
  5839. (void)CGF.EmitRuntimeCall(
  5840. OMPBuilder.getOrCreateRuntimeFunction(
  5841. CGM.getModule(), OMPRTL___kmpc_task_reduction_modifier_fini),
  5842. Args);
  5843. }
  5844. void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
  5845. SourceLocation Loc,
  5846. ReductionCodeGen &RCG,
  5847. unsigned N) {
  5848. auto Sizes = RCG.getSizes(N);
  5849. // Emit threadprivate global variable if the type is non-constant
  5850. // (Sizes.second = nullptr).
  5851. if (Sizes.second) {
  5852. llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy,
  5853. /*isSigned=*/false);
  5854. Address SizeAddr = getAddrOfArtificialThreadPrivate(
  5855. CGF, CGM.getContext().getSizeType(),
  5856. generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
  5857. CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
  5858. }
  5859. }
  5860. Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
  5861. SourceLocation Loc,
  5862. llvm::Value *ReductionsPtr,
  5863. LValue SharedLVal) {
  5864. // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
  5865. // *d);
  5866. llvm::Value *Args[] = {CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
  5867. CGM.IntTy,
  5868. /*isSigned=*/true),
  5869. ReductionsPtr,
  5870. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  5871. SharedLVal.getPointer(CGF), CGM.VoidPtrTy)};
  5872. return Address(
  5873. CGF.EmitRuntimeCall(
  5874. OMPBuilder.getOrCreateRuntimeFunction(
  5875. CGM.getModule(), OMPRTL___kmpc_task_reduction_get_th_data),
  5876. Args),
  5877. SharedLVal.getAlignment());
  5878. }
  5879. void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc,
  5880. const OMPTaskDataTy &Data) {
  5881. if (!CGF.HaveInsertPoint())
  5882. return;
  5883. if (CGF.CGM.getLangOpts().OpenMPIRBuilder && Data.Dependences.empty()) {
  5884. // TODO: Need to support taskwait with dependences in the OpenMPIRBuilder.
  5885. OMPBuilder.createTaskwait(CGF.Builder);
  5886. } else {
  5887. llvm::Value *ThreadID = getThreadID(CGF, Loc);
  5888. llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
  5889. auto &M = CGM.getModule();
  5890. Address DependenciesArray = Address::invalid();
  5891. llvm::Value *NumOfElements;
  5892. std::tie(NumOfElements, DependenciesArray) =
  5893. emitDependClause(CGF, Data.Dependences, Loc);
  5894. llvm::Value *DepWaitTaskArgs[6];
  5895. if (!Data.Dependences.empty()) {
  5896. DepWaitTaskArgs[0] = UpLoc;
  5897. DepWaitTaskArgs[1] = ThreadID;
  5898. DepWaitTaskArgs[2] = NumOfElements;
  5899. DepWaitTaskArgs[3] = DependenciesArray.getPointer();
  5900. DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
  5901. DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
  5902. CodeGenFunction::RunCleanupsScope LocalScope(CGF);
  5903. // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
  5904. // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
  5905. // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
  5906. // is specified.
  5907. CGF.EmitRuntimeCall(
  5908. OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_wait_deps),
  5909. DepWaitTaskArgs);
  5910. } else {
  5911. // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
  5912. // global_tid);
  5913. llvm::Value *Args[] = {UpLoc, ThreadID};
  5914. // Ignore return result until untied tasks are supported.
  5915. CGF.EmitRuntimeCall(
  5916. OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_taskwait),
  5917. Args);
  5918. }
  5919. }
  5920. if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
  5921. Region->emitUntiedSwitch(CGF);
  5922. }
  5923. void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
  5924. OpenMPDirectiveKind InnerKind,
  5925. const RegionCodeGenTy &CodeGen,
  5926. bool HasCancel) {
  5927. if (!CGF.HaveInsertPoint())
  5928. return;
  5929. InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel,
  5930. InnerKind != OMPD_critical &&
  5931. InnerKind != OMPD_master &&
  5932. InnerKind != OMPD_masked);
  5933. CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
  5934. }
  5935. namespace {
  5936. enum RTCancelKind {
  5937. CancelNoreq = 0,
  5938. CancelParallel = 1,
  5939. CancelLoop = 2,
  5940. CancelSections = 3,
  5941. CancelTaskgroup = 4
  5942. };
  5943. } // anonymous namespace
  5944. static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
  5945. RTCancelKind CancelKind = CancelNoreq;
  5946. if (CancelRegion == OMPD_parallel)
  5947. CancelKind = CancelParallel;
  5948. else if (CancelRegion == OMPD_for)
  5949. CancelKind = CancelLoop;
  5950. else if (CancelRegion == OMPD_sections)
  5951. CancelKind = CancelSections;
  5952. else {
  5953. assert(CancelRegion == OMPD_taskgroup);
  5954. CancelKind = CancelTaskgroup;
  5955. }
  5956. return CancelKind;
  5957. }
  5958. void CGOpenMPRuntime::emitCancellationPointCall(
  5959. CodeGenFunction &CGF, SourceLocation Loc,
  5960. OpenMPDirectiveKind CancelRegion) {
  5961. if (!CGF.HaveInsertPoint())
  5962. return;
  5963. // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
  5964. // global_tid, kmp_int32 cncl_kind);
  5965. if (auto *OMPRegionInfo =
  5966. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
  5967. // For 'cancellation point taskgroup', the task region info may not have a
  5968. // cancel. This may instead happen in another adjacent task.
  5969. if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
  5970. llvm::Value *Args[] = {
  5971. emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  5972. CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
  5973. // Ignore return result until untied tasks are supported.
  5974. llvm::Value *Result = CGF.EmitRuntimeCall(
  5975. OMPBuilder.getOrCreateRuntimeFunction(
  5976. CGM.getModule(), OMPRTL___kmpc_cancellationpoint),
  5977. Args);
  5978. // if (__kmpc_cancellationpoint()) {
  5979. // call i32 @__kmpc_cancel_barrier( // for parallel cancellation only
  5980. // exit from construct;
  5981. // }
  5982. llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
  5983. llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
  5984. llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
  5985. CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
  5986. CGF.EmitBlock(ExitBB);
  5987. if (CancelRegion == OMPD_parallel)
  5988. emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false);
  5989. // exit from construct;
  5990. CodeGenFunction::JumpDest CancelDest =
  5991. CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
  5992. CGF.EmitBranchThroughCleanup(CancelDest);
  5993. CGF.EmitBlock(ContBB, /*IsFinished=*/true);
  5994. }
  5995. }
  5996. }
  5997. void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
  5998. const Expr *IfCond,
  5999. OpenMPDirectiveKind CancelRegion) {
  6000. if (!CGF.HaveInsertPoint())
  6001. return;
  6002. // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
  6003. // kmp_int32 cncl_kind);
  6004. auto &M = CGM.getModule();
  6005. if (auto *OMPRegionInfo =
  6006. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
  6007. auto &&ThenGen = [this, &M, Loc, CancelRegion,
  6008. OMPRegionInfo](CodeGenFunction &CGF, PrePostActionTy &) {
  6009. CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
  6010. llvm::Value *Args[] = {
  6011. RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
  6012. CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
  6013. // Ignore return result until untied tasks are supported.
  6014. llvm::Value *Result = CGF.EmitRuntimeCall(
  6015. OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_cancel), Args);
  6016. // if (__kmpc_cancel()) {
  6017. // call i32 @__kmpc_cancel_barrier( // for parallel cancellation only
  6018. // exit from construct;
  6019. // }
  6020. llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
  6021. llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
  6022. llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
  6023. CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
  6024. CGF.EmitBlock(ExitBB);
  6025. if (CancelRegion == OMPD_parallel)
  6026. RT.emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false);
  6027. // exit from construct;
  6028. CodeGenFunction::JumpDest CancelDest =
  6029. CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
  6030. CGF.EmitBranchThroughCleanup(CancelDest);
  6031. CGF.EmitBlock(ContBB, /*IsFinished=*/true);
  6032. };
  6033. if (IfCond) {
  6034. emitIfClause(CGF, IfCond, ThenGen,
  6035. [](CodeGenFunction &, PrePostActionTy &) {});
  6036. } else {
  6037. RegionCodeGenTy ThenRCG(ThenGen);
  6038. ThenRCG(CGF);
  6039. }
  6040. }
  6041. }
  6042. namespace {
  6043. /// Cleanup action for uses_allocators support.
  6044. class OMPUsesAllocatorsActionTy final : public PrePostActionTy {
  6045. ArrayRef<std::pair<const Expr *, const Expr *>> Allocators;
  6046. public:
  6047. OMPUsesAllocatorsActionTy(
  6048. ArrayRef<std::pair<const Expr *, const Expr *>> Allocators)
  6049. : Allocators(Allocators) {}
  6050. void Enter(CodeGenFunction &CGF) override {
  6051. if (!CGF.HaveInsertPoint())
  6052. return;
  6053. for (const auto &AllocatorData : Allocators) {
  6054. CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsInit(
  6055. CGF, AllocatorData.first, AllocatorData.second);
  6056. }
  6057. }
  6058. void Exit(CodeGenFunction &CGF) override {
  6059. if (!CGF.HaveInsertPoint())
  6060. return;
  6061. for (const auto &AllocatorData : Allocators) {
  6062. CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsFini(CGF,
  6063. AllocatorData.first);
  6064. }
  6065. }
  6066. };
  6067. } // namespace
  6068. void CGOpenMPRuntime::emitTargetOutlinedFunction(
  6069. const OMPExecutableDirective &D, StringRef ParentName,
  6070. llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
  6071. bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
  6072. assert(!ParentName.empty() && "Invalid target region parent name!");
  6073. HasEmittedTargetRegion = true;
  6074. SmallVector<std::pair<const Expr *, const Expr *>, 4> Allocators;
  6075. for (const auto *C : D.getClausesOfKind<OMPUsesAllocatorsClause>()) {
  6076. for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
  6077. const OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
  6078. if (!D.AllocatorTraits)
  6079. continue;
  6080. Allocators.emplace_back(D.Allocator, D.AllocatorTraits);
  6081. }
  6082. }
  6083. OMPUsesAllocatorsActionTy UsesAllocatorAction(Allocators);
  6084. CodeGen.setAction(UsesAllocatorAction);
  6085. emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
  6086. IsOffloadEntry, CodeGen);
  6087. }
  6088. void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
  6089. const Expr *Allocator,
  6090. const Expr *AllocatorTraits) {
  6091. llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
  6092. ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true);
  6093. // Use default memspace handle.
  6094. llvm::Value *MemSpaceHandle = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
  6095. llvm::Value *NumTraits = llvm::ConstantInt::get(
  6096. CGF.IntTy, cast<ConstantArrayType>(
  6097. AllocatorTraits->getType()->getAsArrayTypeUnsafe())
  6098. ->getSize()
  6099. .getLimitedValue());
  6100. LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits);
  6101. Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  6102. AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy);
  6103. AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
  6104. AllocatorTraitsLVal.getBaseInfo(),
  6105. AllocatorTraitsLVal.getTBAAInfo());
  6106. llvm::Value *Traits =
  6107. CGF.EmitLoadOfScalar(AllocatorTraitsLVal, AllocatorTraits->getExprLoc());
  6108. llvm::Value *AllocatorVal =
  6109. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  6110. CGM.getModule(), OMPRTL___kmpc_init_allocator),
  6111. {ThreadId, MemSpaceHandle, NumTraits, Traits});
  6112. // Store to allocator.
  6113. CGF.EmitVarDecl(*cast<VarDecl>(
  6114. cast<DeclRefExpr>(Allocator->IgnoreParenImpCasts())->getDecl()));
  6115. LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
  6116. AllocatorVal =
  6117. CGF.EmitScalarConversion(AllocatorVal, CGF.getContext().VoidPtrTy,
  6118. Allocator->getType(), Allocator->getExprLoc());
  6119. CGF.EmitStoreOfScalar(AllocatorVal, AllocatorLVal);
  6120. }
  6121. void CGOpenMPRuntime::emitUsesAllocatorsFini(CodeGenFunction &CGF,
  6122. const Expr *Allocator) {
  6123. llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
  6124. ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true);
  6125. LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
  6126. llvm::Value *AllocatorVal =
  6127. CGF.EmitLoadOfScalar(AllocatorLVal, Allocator->getExprLoc());
  6128. AllocatorVal = CGF.EmitScalarConversion(AllocatorVal, Allocator->getType(),
  6129. CGF.getContext().VoidPtrTy,
  6130. Allocator->getExprLoc());
  6131. (void)CGF.EmitRuntimeCall(
  6132. OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
  6133. OMPRTL___kmpc_destroy_allocator),
  6134. {ThreadId, AllocatorVal});
  6135. }
  6136. void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
  6137. const OMPExecutableDirective &D, StringRef ParentName,
  6138. llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
  6139. bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
  6140. // Create a unique name for the entry function using the source location
  6141. // information of the current target region. The name will be something like:
  6142. //
  6143. // __omp_offloading_DD_FFFF_PP_lBB
  6144. //
  6145. // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
  6146. // mangled name of the function that encloses the target region and BB is the
  6147. // line number of the target region.
  6148. unsigned DeviceID;
  6149. unsigned FileID;
  6150. unsigned Line;
  6151. getTargetEntryUniqueInfo(CGM.getContext(), D.getBeginLoc(), DeviceID, FileID,
  6152. Line);
  6153. SmallString<64> EntryFnName;
  6154. {
  6155. llvm::raw_svector_ostream OS(EntryFnName);
  6156. OS << "__omp_offloading" << llvm::format("_%x", DeviceID)
  6157. << llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
  6158. }
  6159. const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
  6160. CodeGenFunction CGF(CGM, true);
  6161. CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
  6162. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
  6163. OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS, D.getBeginLoc());
  6164. // If this target outline function is not an offload entry, we don't need to
  6165. // register it.
  6166. if (!IsOffloadEntry)
  6167. return;
  6168. // The target region ID is used by the runtime library to identify the current
  6169. // target region, so it only has to be unique and not necessarily point to
  6170. // anything. It could be the pointer to the outlined function that implements
  6171. // the target region, but we aren't using that so that the compiler doesn't
  6172. // need to keep that, and could therefore inline the host function if proven
  6173. // worthwhile during optimization. In the other hand, if emitting code for the
  6174. // device, the ID has to be the function address so that it can retrieved from
  6175. // the offloading entry and launched by the runtime library. We also mark the
  6176. // outlined function to have external linkage in case we are emitting code for
  6177. // the device, because these functions will be entry points to the device.
  6178. if (CGM.getLangOpts().OpenMPIsDevice) {
  6179. OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
  6180. OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
  6181. OutlinedFn->setDSOLocal(false);
  6182. if (CGM.getTriple().isAMDGCN())
  6183. OutlinedFn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
  6184. } else {
  6185. std::string Name = getName({EntryFnName, "region_id"});
  6186. OutlinedFnID = new llvm::GlobalVariable(
  6187. CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
  6188. llvm::GlobalValue::WeakAnyLinkage,
  6189. llvm::Constant::getNullValue(CGM.Int8Ty), Name);
  6190. }
  6191. // Register the information for the entry associated with this target region.
  6192. OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
  6193. DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
  6194. OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
  6195. // Add NumTeams and ThreadLimit attributes to the outlined GPU function
  6196. int32_t DefaultValTeams = -1;
  6197. getNumTeamsExprForTargetDirective(CGF, D, DefaultValTeams);
  6198. if (DefaultValTeams > 0) {
  6199. OutlinedFn->addFnAttr("omp_target_num_teams",
  6200. std::to_string(DefaultValTeams));
  6201. }
  6202. int32_t DefaultValThreads = -1;
  6203. getNumThreadsExprForTargetDirective(CGF, D, DefaultValThreads);
  6204. if (DefaultValThreads > 0) {
  6205. OutlinedFn->addFnAttr("omp_target_thread_limit",
  6206. std::to_string(DefaultValThreads));
  6207. }
  6208. CGM.getTargetCodeGenInfo().setTargetAttributes(nullptr, OutlinedFn, CGM);
  6209. }
  6210. /// Checks if the expression is constant or does not have non-trivial function
  6211. /// calls.
  6212. static bool isTrivial(ASTContext &Ctx, const Expr * E) {
  6213. // We can skip constant expressions.
  6214. // We can skip expressions with trivial calls or simple expressions.
  6215. return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) ||
  6216. !E->hasNonTrivialCall(Ctx)) &&
  6217. !E->HasSideEffects(Ctx, /*IncludePossibleEffects=*/true);
  6218. }
  6219. const Stmt *CGOpenMPRuntime::getSingleCompoundChild(ASTContext &Ctx,
  6220. const Stmt *Body) {
  6221. const Stmt *Child = Body->IgnoreContainers();
  6222. while (const auto *C = dyn_cast_or_null<CompoundStmt>(Child)) {
  6223. Child = nullptr;
  6224. for (const Stmt *S : C->body()) {
  6225. if (const auto *E = dyn_cast<Expr>(S)) {
  6226. if (isTrivial(Ctx, E))
  6227. continue;
  6228. }
  6229. // Some of the statements can be ignored.
  6230. if (isa<AsmStmt>(S) || isa<NullStmt>(S) || isa<OMPFlushDirective>(S) ||
  6231. isa<OMPBarrierDirective>(S) || isa<OMPTaskyieldDirective>(S))
  6232. continue;
  6233. // Analyze declarations.
  6234. if (const auto *DS = dyn_cast<DeclStmt>(S)) {
  6235. if (llvm::all_of(DS->decls(), [](const Decl *D) {
  6236. if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
  6237. isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
  6238. isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
  6239. isa<UsingDirectiveDecl>(D) ||
  6240. isa<OMPDeclareReductionDecl>(D) ||
  6241. isa<OMPThreadPrivateDecl>(D) || isa<OMPAllocateDecl>(D))
  6242. return true;
  6243. const auto *VD = dyn_cast<VarDecl>(D);
  6244. if (!VD)
  6245. return false;
  6246. return VD->hasGlobalStorage() || !VD->isUsed();
  6247. }))
  6248. continue;
  6249. }
  6250. // Found multiple children - cannot get the one child only.
  6251. if (Child)
  6252. return nullptr;
  6253. Child = S;
  6254. }
  6255. if (Child)
  6256. Child = Child->IgnoreContainers();
  6257. }
  6258. return Child;
  6259. }
  6260. const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
  6261. CodeGenFunction &CGF, const OMPExecutableDirective &D,
  6262. int32_t &DefaultVal) {
  6263. OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
  6264. assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
  6265. "Expected target-based executable directive.");
  6266. switch (DirectiveKind) {
  6267. case OMPD_target: {
  6268. const auto *CS = D.getInnermostCapturedStmt();
  6269. const auto *Body =
  6270. CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
  6271. const Stmt *ChildStmt =
  6272. CGOpenMPRuntime::getSingleCompoundChild(CGF.getContext(), Body);
  6273. if (const auto *NestedDir =
  6274. dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
  6275. if (isOpenMPTeamsDirective(NestedDir->getDirectiveKind())) {
  6276. if (NestedDir->hasClausesOfKind<OMPNumTeamsClause>()) {
  6277. const Expr *NumTeams =
  6278. NestedDir->getSingleClause<OMPNumTeamsClause>()->getNumTeams();
  6279. if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
  6280. if (auto Constant =
  6281. NumTeams->getIntegerConstantExpr(CGF.getContext()))
  6282. DefaultVal = Constant->getExtValue();
  6283. return NumTeams;
  6284. }
  6285. DefaultVal = 0;
  6286. return nullptr;
  6287. }
  6288. if (isOpenMPParallelDirective(NestedDir->getDirectiveKind()) ||
  6289. isOpenMPSimdDirective(NestedDir->getDirectiveKind())) {
  6290. DefaultVal = 1;
  6291. return nullptr;
  6292. }
  6293. DefaultVal = 1;
  6294. return nullptr;
  6295. }
  6296. // A value of -1 is used to check if we need to emit no teams region
  6297. DefaultVal = -1;
  6298. return nullptr;
  6299. }
  6300. case OMPD_target_teams:
  6301. case OMPD_target_teams_distribute:
  6302. case OMPD_target_teams_distribute_simd:
  6303. case OMPD_target_teams_distribute_parallel_for:
  6304. case OMPD_target_teams_distribute_parallel_for_simd: {
  6305. if (D.hasClausesOfKind<OMPNumTeamsClause>()) {
  6306. const Expr *NumTeams =
  6307. D.getSingleClause<OMPNumTeamsClause>()->getNumTeams();
  6308. if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
  6309. if (auto Constant = NumTeams->getIntegerConstantExpr(CGF.getContext()))
  6310. DefaultVal = Constant->getExtValue();
  6311. return NumTeams;
  6312. }
  6313. DefaultVal = 0;
  6314. return nullptr;
  6315. }
  6316. case OMPD_target_parallel:
  6317. case OMPD_target_parallel_for:
  6318. case OMPD_target_parallel_for_simd:
  6319. case OMPD_target_simd:
  6320. DefaultVal = 1;
  6321. return nullptr;
  6322. case OMPD_parallel:
  6323. case OMPD_for:
  6324. case OMPD_parallel_for:
  6325. case OMPD_parallel_master:
  6326. case OMPD_parallel_sections:
  6327. case OMPD_for_simd:
  6328. case OMPD_parallel_for_simd:
  6329. case OMPD_cancel:
  6330. case OMPD_cancellation_point:
  6331. case OMPD_ordered:
  6332. case OMPD_threadprivate:
  6333. case OMPD_allocate:
  6334. case OMPD_task:
  6335. case OMPD_simd:
  6336. case OMPD_tile:
  6337. case OMPD_unroll:
  6338. case OMPD_sections:
  6339. case OMPD_section:
  6340. case OMPD_single:
  6341. case OMPD_master:
  6342. case OMPD_critical:
  6343. case OMPD_taskyield:
  6344. case OMPD_barrier:
  6345. case OMPD_taskwait:
  6346. case OMPD_taskgroup:
  6347. case OMPD_atomic:
  6348. case OMPD_flush:
  6349. case OMPD_depobj:
  6350. case OMPD_scan:
  6351. case OMPD_teams:
  6352. case OMPD_target_data:
  6353. case OMPD_target_exit_data:
  6354. case OMPD_target_enter_data:
  6355. case OMPD_distribute:
  6356. case OMPD_distribute_simd:
  6357. case OMPD_distribute_parallel_for:
  6358. case OMPD_distribute_parallel_for_simd:
  6359. case OMPD_teams_distribute:
  6360. case OMPD_teams_distribute_simd:
  6361. case OMPD_teams_distribute_parallel_for:
  6362. case OMPD_teams_distribute_parallel_for_simd:
  6363. case OMPD_target_update:
  6364. case OMPD_declare_simd:
  6365. case OMPD_declare_variant:
  6366. case OMPD_begin_declare_variant:
  6367. case OMPD_end_declare_variant:
  6368. case OMPD_declare_target:
  6369. case OMPD_end_declare_target:
  6370. case OMPD_declare_reduction:
  6371. case OMPD_declare_mapper:
  6372. case OMPD_taskloop:
  6373. case OMPD_taskloop_simd:
  6374. case OMPD_master_taskloop:
  6375. case OMPD_master_taskloop_simd:
  6376. case OMPD_parallel_master_taskloop:
  6377. case OMPD_parallel_master_taskloop_simd:
  6378. case OMPD_requires:
  6379. case OMPD_metadirective:
  6380. case OMPD_unknown:
  6381. break;
  6382. default:
  6383. break;
  6384. }
  6385. llvm_unreachable("Unexpected directive kind.");
  6386. }
  6387. llvm::Value *CGOpenMPRuntime::emitNumTeamsForTargetDirective(
  6388. CodeGenFunction &CGF, const OMPExecutableDirective &D) {
  6389. assert(!CGF.getLangOpts().OpenMPIsDevice &&
  6390. "Clauses associated with the teams directive expected to be emitted "
  6391. "only for the host!");
  6392. CGBuilderTy &Bld = CGF.Builder;
  6393. int32_t DefaultNT = -1;
  6394. const Expr *NumTeams = getNumTeamsExprForTargetDirective(CGF, D, DefaultNT);
  6395. if (NumTeams != nullptr) {
  6396. OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
  6397. switch (DirectiveKind) {
  6398. case OMPD_target: {
  6399. const auto *CS = D.getInnermostCapturedStmt();
  6400. CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
  6401. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
  6402. llvm::Value *NumTeamsVal = CGF.EmitScalarExpr(NumTeams,
  6403. /*IgnoreResultAssign*/ true);
  6404. return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
  6405. /*isSigned=*/true);
  6406. }
  6407. case OMPD_target_teams:
  6408. case OMPD_target_teams_distribute:
  6409. case OMPD_target_teams_distribute_simd:
  6410. case OMPD_target_teams_distribute_parallel_for:
  6411. case OMPD_target_teams_distribute_parallel_for_simd: {
  6412. CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
  6413. llvm::Value *NumTeamsVal = CGF.EmitScalarExpr(NumTeams,
  6414. /*IgnoreResultAssign*/ true);
  6415. return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
  6416. /*isSigned=*/true);
  6417. }
  6418. default:
  6419. break;
  6420. }
  6421. } else if (DefaultNT == -1) {
  6422. return nullptr;
  6423. }
  6424. return Bld.getInt32(DefaultNT);
  6425. }
  6426. static llvm::Value *getNumThreads(CodeGenFunction &CGF, const CapturedStmt *CS,
  6427. llvm::Value *DefaultThreadLimitVal) {
  6428. const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
  6429. CGF.getContext(), CS->getCapturedStmt());
  6430. if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
  6431. if (isOpenMPParallelDirective(Dir->getDirectiveKind())) {
  6432. llvm::Value *NumThreads = nullptr;
  6433. llvm::Value *CondVal = nullptr;
  6434. // Handle if clause. If if clause present, the number of threads is
  6435. // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
  6436. if (Dir->hasClausesOfKind<OMPIfClause>()) {
  6437. CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
  6438. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
  6439. const OMPIfClause *IfClause = nullptr;
  6440. for (const auto *C : Dir->getClausesOfKind<OMPIfClause>()) {
  6441. if (C->getNameModifier() == OMPD_unknown ||
  6442. C->getNameModifier() == OMPD_parallel) {
  6443. IfClause = C;
  6444. break;
  6445. }
  6446. }
  6447. if (IfClause) {
  6448. const Expr *Cond = IfClause->getCondition();
  6449. bool Result;
  6450. if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
  6451. if (!Result)
  6452. return CGF.Builder.getInt32(1);
  6453. } else {
  6454. CodeGenFunction::LexicalScope Scope(CGF, Cond->getSourceRange());
  6455. if (const auto *PreInit =
  6456. cast_or_null<DeclStmt>(IfClause->getPreInitStmt())) {
  6457. for (const auto *I : PreInit->decls()) {
  6458. if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
  6459. CGF.EmitVarDecl(cast<VarDecl>(*I));
  6460. } else {
  6461. CodeGenFunction::AutoVarEmission Emission =
  6462. CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
  6463. CGF.EmitAutoVarCleanups(Emission);
  6464. }
  6465. }
  6466. }
  6467. CondVal = CGF.EvaluateExprAsBool(Cond);
  6468. }
  6469. }
  6470. }
  6471. // Check the value of num_threads clause iff if clause was not specified
  6472. // or is not evaluated to false.
  6473. if (Dir->hasClausesOfKind<OMPNumThreadsClause>()) {
  6474. CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
  6475. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
  6476. const auto *NumThreadsClause =
  6477. Dir->getSingleClause<OMPNumThreadsClause>();
  6478. CodeGenFunction::LexicalScope Scope(
  6479. CGF, NumThreadsClause->getNumThreads()->getSourceRange());
  6480. if (const auto *PreInit =
  6481. cast_or_null<DeclStmt>(NumThreadsClause->getPreInitStmt())) {
  6482. for (const auto *I : PreInit->decls()) {
  6483. if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
  6484. CGF.EmitVarDecl(cast<VarDecl>(*I));
  6485. } else {
  6486. CodeGenFunction::AutoVarEmission Emission =
  6487. CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
  6488. CGF.EmitAutoVarCleanups(Emission);
  6489. }
  6490. }
  6491. }
  6492. NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads());
  6493. NumThreads = CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty,
  6494. /*isSigned=*/false);
  6495. if (DefaultThreadLimitVal)
  6496. NumThreads = CGF.Builder.CreateSelect(
  6497. CGF.Builder.CreateICmpULT(DefaultThreadLimitVal, NumThreads),
  6498. DefaultThreadLimitVal, NumThreads);
  6499. } else {
  6500. NumThreads = DefaultThreadLimitVal ? DefaultThreadLimitVal
  6501. : CGF.Builder.getInt32(0);
  6502. }
  6503. // Process condition of the if clause.
  6504. if (CondVal) {
  6505. NumThreads = CGF.Builder.CreateSelect(CondVal, NumThreads,
  6506. CGF.Builder.getInt32(1));
  6507. }
  6508. return NumThreads;
  6509. }
  6510. if (isOpenMPSimdDirective(Dir->getDirectiveKind()))
  6511. return CGF.Builder.getInt32(1);
  6512. return DefaultThreadLimitVal;
  6513. }
  6514. return DefaultThreadLimitVal ? DefaultThreadLimitVal
  6515. : CGF.Builder.getInt32(0);
  6516. }
  6517. const Expr *CGOpenMPRuntime::getNumThreadsExprForTargetDirective(
  6518. CodeGenFunction &CGF, const OMPExecutableDirective &D,
  6519. int32_t &DefaultVal) {
  6520. OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
  6521. assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
  6522. "Expected target-based executable directive.");
  6523. switch (DirectiveKind) {
  6524. case OMPD_target:
  6525. // Teams have no clause thread_limit
  6526. return nullptr;
  6527. case OMPD_target_teams:
  6528. case OMPD_target_teams_distribute:
  6529. if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
  6530. const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
  6531. const Expr *ThreadLimit = ThreadLimitClause->getThreadLimit();
  6532. if (ThreadLimit->isIntegerConstantExpr(CGF.getContext()))
  6533. if (auto Constant =
  6534. ThreadLimit->getIntegerConstantExpr(CGF.getContext()))
  6535. DefaultVal = Constant->getExtValue();
  6536. return ThreadLimit;
  6537. }
  6538. return nullptr;
  6539. case OMPD_target_parallel:
  6540. case OMPD_target_parallel_for:
  6541. case OMPD_target_parallel_for_simd:
  6542. case OMPD_target_teams_distribute_parallel_for:
  6543. case OMPD_target_teams_distribute_parallel_for_simd: {
  6544. Expr *ThreadLimit = nullptr;
  6545. Expr *NumThreads = nullptr;
  6546. if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
  6547. const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
  6548. ThreadLimit = ThreadLimitClause->getThreadLimit();
  6549. if (ThreadLimit->isIntegerConstantExpr(CGF.getContext()))
  6550. if (auto Constant =
  6551. ThreadLimit->getIntegerConstantExpr(CGF.getContext()))
  6552. DefaultVal = Constant->getExtValue();
  6553. }
  6554. if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
  6555. const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
  6556. NumThreads = NumThreadsClause->getNumThreads();
  6557. if (NumThreads->isIntegerConstantExpr(CGF.getContext())) {
  6558. if (auto Constant =
  6559. NumThreads->getIntegerConstantExpr(CGF.getContext())) {
  6560. if (Constant->getExtValue() < DefaultVal) {
  6561. DefaultVal = Constant->getExtValue();
  6562. ThreadLimit = NumThreads;
  6563. }
  6564. }
  6565. }
  6566. }
  6567. return ThreadLimit;
  6568. }
  6569. case OMPD_target_teams_distribute_simd:
  6570. case OMPD_target_simd:
  6571. DefaultVal = 1;
  6572. return nullptr;
  6573. case OMPD_parallel:
  6574. case OMPD_for:
  6575. case OMPD_parallel_for:
  6576. case OMPD_parallel_master:
  6577. case OMPD_parallel_sections:
  6578. case OMPD_for_simd:
  6579. case OMPD_parallel_for_simd:
  6580. case OMPD_cancel:
  6581. case OMPD_cancellation_point:
  6582. case OMPD_ordered:
  6583. case OMPD_threadprivate:
  6584. case OMPD_allocate:
  6585. case OMPD_task:
  6586. case OMPD_simd:
  6587. case OMPD_tile:
  6588. case OMPD_unroll:
  6589. case OMPD_sections:
  6590. case OMPD_section:
  6591. case OMPD_single:
  6592. case OMPD_master:
  6593. case OMPD_critical:
  6594. case OMPD_taskyield:
  6595. case OMPD_barrier:
  6596. case OMPD_taskwait:
  6597. case OMPD_taskgroup:
  6598. case OMPD_atomic:
  6599. case OMPD_flush:
  6600. case OMPD_depobj:
  6601. case OMPD_scan:
  6602. case OMPD_teams:
  6603. case OMPD_target_data:
  6604. case OMPD_target_exit_data:
  6605. case OMPD_target_enter_data:
  6606. case OMPD_distribute:
  6607. case OMPD_distribute_simd:
  6608. case OMPD_distribute_parallel_for:
  6609. case OMPD_distribute_parallel_for_simd:
  6610. case OMPD_teams_distribute:
  6611. case OMPD_teams_distribute_simd:
  6612. case OMPD_teams_distribute_parallel_for:
  6613. case OMPD_teams_distribute_parallel_for_simd:
  6614. case OMPD_target_update:
  6615. case OMPD_declare_simd:
  6616. case OMPD_declare_variant:
  6617. case OMPD_begin_declare_variant:
  6618. case OMPD_end_declare_variant:
  6619. case OMPD_declare_target:
  6620. case OMPD_end_declare_target:
  6621. case OMPD_declare_reduction:
  6622. case OMPD_declare_mapper:
  6623. case OMPD_taskloop:
  6624. case OMPD_taskloop_simd:
  6625. case OMPD_master_taskloop:
  6626. case OMPD_master_taskloop_simd:
  6627. case OMPD_parallel_master_taskloop:
  6628. case OMPD_parallel_master_taskloop_simd:
  6629. case OMPD_requires:
  6630. case OMPD_unknown:
  6631. break;
  6632. default:
  6633. break;
  6634. }
  6635. llvm_unreachable("Unsupported directive kind.");
  6636. }
  6637. llvm::Value *CGOpenMPRuntime::emitNumThreadsForTargetDirective(
  6638. CodeGenFunction &CGF, const OMPExecutableDirective &D) {
  6639. assert(!CGF.getLangOpts().OpenMPIsDevice &&
  6640. "Clauses associated with the teams directive expected to be emitted "
  6641. "only for the host!");
  6642. OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
  6643. assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
  6644. "Expected target-based executable directive.");
  6645. CGBuilderTy &Bld = CGF.Builder;
  6646. llvm::Value *ThreadLimitVal = nullptr;
  6647. llvm::Value *NumThreadsVal = nullptr;
  6648. switch (DirectiveKind) {
  6649. case OMPD_target: {
  6650. const CapturedStmt *CS = D.getInnermostCapturedStmt();
  6651. if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
  6652. return NumThreads;
  6653. const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
  6654. CGF.getContext(), CS->getCapturedStmt());
  6655. if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
  6656. if (Dir->hasClausesOfKind<OMPThreadLimitClause>()) {
  6657. CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
  6658. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
  6659. const auto *ThreadLimitClause =
  6660. Dir->getSingleClause<OMPThreadLimitClause>();
  6661. CodeGenFunction::LexicalScope Scope(
  6662. CGF, ThreadLimitClause->getThreadLimit()->getSourceRange());
  6663. if (const auto *PreInit =
  6664. cast_or_null<DeclStmt>(ThreadLimitClause->getPreInitStmt())) {
  6665. for (const auto *I : PreInit->decls()) {
  6666. if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
  6667. CGF.EmitVarDecl(cast<VarDecl>(*I));
  6668. } else {
  6669. CodeGenFunction::AutoVarEmission Emission =
  6670. CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
  6671. CGF.EmitAutoVarCleanups(Emission);
  6672. }
  6673. }
  6674. }
  6675. llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
  6676. ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
  6677. ThreadLimitVal =
  6678. Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
  6679. }
  6680. if (isOpenMPTeamsDirective(Dir->getDirectiveKind()) &&
  6681. !isOpenMPDistributeDirective(Dir->getDirectiveKind())) {
  6682. CS = Dir->getInnermostCapturedStmt();
  6683. const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
  6684. CGF.getContext(), CS->getCapturedStmt());
  6685. Dir = dyn_cast_or_null<OMPExecutableDirective>(Child);
  6686. }
  6687. if (Dir && isOpenMPDistributeDirective(Dir->getDirectiveKind()) &&
  6688. !isOpenMPSimdDirective(Dir->getDirectiveKind())) {
  6689. CS = Dir->getInnermostCapturedStmt();
  6690. if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
  6691. return NumThreads;
  6692. }
  6693. if (Dir && isOpenMPSimdDirective(Dir->getDirectiveKind()))
  6694. return Bld.getInt32(1);
  6695. }
  6696. return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
  6697. }
  6698. case OMPD_target_teams: {
  6699. if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
  6700. CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
  6701. const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
  6702. llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
  6703. ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
  6704. ThreadLimitVal =
  6705. Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
  6706. }
  6707. const CapturedStmt *CS = D.getInnermostCapturedStmt();
  6708. if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
  6709. return NumThreads;
  6710. const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
  6711. CGF.getContext(), CS->getCapturedStmt());
  6712. if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
  6713. if (Dir->getDirectiveKind() == OMPD_distribute) {
  6714. CS = Dir->getInnermostCapturedStmt();
  6715. if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
  6716. return NumThreads;
  6717. }
  6718. }
  6719. return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
  6720. }
  6721. case OMPD_target_teams_distribute:
  6722. if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
  6723. CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
  6724. const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
  6725. llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
  6726. ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
  6727. ThreadLimitVal =
  6728. Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
  6729. }
  6730. return getNumThreads(CGF, D.getInnermostCapturedStmt(), ThreadLimitVal);
  6731. case OMPD_target_parallel:
  6732. case OMPD_target_parallel_for:
  6733. case OMPD_target_parallel_for_simd:
  6734. case OMPD_target_teams_distribute_parallel_for:
  6735. case OMPD_target_teams_distribute_parallel_for_simd: {
  6736. llvm::Value *CondVal = nullptr;
  6737. // Handle if clause. If if clause present, the number of threads is
  6738. // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
  6739. if (D.hasClausesOfKind<OMPIfClause>()) {
  6740. const OMPIfClause *IfClause = nullptr;
  6741. for (const auto *C : D.getClausesOfKind<OMPIfClause>()) {
  6742. if (C->getNameModifier() == OMPD_unknown ||
  6743. C->getNameModifier() == OMPD_parallel) {
  6744. IfClause = C;
  6745. break;
  6746. }
  6747. }
  6748. if (IfClause) {
  6749. const Expr *Cond = IfClause->getCondition();
  6750. bool Result;
  6751. if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
  6752. if (!Result)
  6753. return Bld.getInt32(1);
  6754. } else {
  6755. CodeGenFunction::RunCleanupsScope Scope(CGF);
  6756. CondVal = CGF.EvaluateExprAsBool(Cond);
  6757. }
  6758. }
  6759. }
  6760. if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
  6761. CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
  6762. const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
  6763. llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
  6764. ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
  6765. ThreadLimitVal =
  6766. Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
  6767. }
  6768. if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
  6769. CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
  6770. const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
  6771. llvm::Value *NumThreads = CGF.EmitScalarExpr(
  6772. NumThreadsClause->getNumThreads(), /*IgnoreResultAssign=*/true);
  6773. NumThreadsVal =
  6774. Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned=*/false);
  6775. ThreadLimitVal = ThreadLimitVal
  6776. ? Bld.CreateSelect(Bld.CreateICmpULT(NumThreadsVal,
  6777. ThreadLimitVal),
  6778. NumThreadsVal, ThreadLimitVal)
  6779. : NumThreadsVal;
  6780. }
  6781. if (!ThreadLimitVal)
  6782. ThreadLimitVal = Bld.getInt32(0);
  6783. if (CondVal)
  6784. return Bld.CreateSelect(CondVal, ThreadLimitVal, Bld.getInt32(1));
  6785. return ThreadLimitVal;
  6786. }
  6787. case OMPD_target_teams_distribute_simd:
  6788. case OMPD_target_simd:
  6789. return Bld.getInt32(1);
  6790. case OMPD_parallel:
  6791. case OMPD_for:
  6792. case OMPD_parallel_for:
  6793. case OMPD_parallel_master:
  6794. case OMPD_parallel_sections:
  6795. case OMPD_for_simd:
  6796. case OMPD_parallel_for_simd:
  6797. case OMPD_cancel:
  6798. case OMPD_cancellation_point:
  6799. case OMPD_ordered:
  6800. case OMPD_threadprivate:
  6801. case OMPD_allocate:
  6802. case OMPD_task:
  6803. case OMPD_simd:
  6804. case OMPD_tile:
  6805. case OMPD_unroll:
  6806. case OMPD_sections:
  6807. case OMPD_section:
  6808. case OMPD_single:
  6809. case OMPD_master:
  6810. case OMPD_critical:
  6811. case OMPD_taskyield:
  6812. case OMPD_barrier:
  6813. case OMPD_taskwait:
  6814. case OMPD_taskgroup:
  6815. case OMPD_atomic:
  6816. case OMPD_flush:
  6817. case OMPD_depobj:
  6818. case OMPD_scan:
  6819. case OMPD_teams:
  6820. case OMPD_target_data:
  6821. case OMPD_target_exit_data:
  6822. case OMPD_target_enter_data:
  6823. case OMPD_distribute:
  6824. case OMPD_distribute_simd:
  6825. case OMPD_distribute_parallel_for:
  6826. case OMPD_distribute_parallel_for_simd:
  6827. case OMPD_teams_distribute:
  6828. case OMPD_teams_distribute_simd:
  6829. case OMPD_teams_distribute_parallel_for:
  6830. case OMPD_teams_distribute_parallel_for_simd:
  6831. case OMPD_target_update:
  6832. case OMPD_declare_simd:
  6833. case OMPD_declare_variant:
  6834. case OMPD_begin_declare_variant:
  6835. case OMPD_end_declare_variant:
  6836. case OMPD_declare_target:
  6837. case OMPD_end_declare_target:
  6838. case OMPD_declare_reduction:
  6839. case OMPD_declare_mapper:
  6840. case OMPD_taskloop:
  6841. case OMPD_taskloop_simd:
  6842. case OMPD_master_taskloop:
  6843. case OMPD_master_taskloop_simd:
  6844. case OMPD_parallel_master_taskloop:
  6845. case OMPD_parallel_master_taskloop_simd:
  6846. case OMPD_requires:
  6847. case OMPD_metadirective:
  6848. case OMPD_unknown:
  6849. break;
  6850. default:
  6851. break;
  6852. }
  6853. llvm_unreachable("Unsupported directive kind.");
  6854. }
  6855. namespace {
  6856. LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
  6857. // Utility to handle information from clauses associated with a given
  6858. // construct that use mappable expressions (e.g. 'map' clause, 'to' clause).
  6859. // It provides a convenient interface to obtain the information and generate
  6860. // code for that information.
  6861. class MappableExprsHandler {
  6862. public:
  6863. /// Values for bit flags used to specify the mapping type for
  6864. /// offloading.
  6865. enum OpenMPOffloadMappingFlags : uint64_t {
  6866. /// No flags
  6867. OMP_MAP_NONE = 0x0,
  6868. /// Allocate memory on the device and move data from host to device.
  6869. OMP_MAP_TO = 0x01,
  6870. /// Allocate memory on the device and move data from device to host.
  6871. OMP_MAP_FROM = 0x02,
  6872. /// Always perform the requested mapping action on the element, even
  6873. /// if it was already mapped before.
  6874. OMP_MAP_ALWAYS = 0x04,
  6875. /// Delete the element from the device environment, ignoring the
  6876. /// current reference count associated with the element.
  6877. OMP_MAP_DELETE = 0x08,
  6878. /// The element being mapped is a pointer-pointee pair; both the
  6879. /// pointer and the pointee should be mapped.
  6880. OMP_MAP_PTR_AND_OBJ = 0x10,
  6881. /// This flags signals that the base address of an entry should be
  6882. /// passed to the target kernel as an argument.
  6883. OMP_MAP_TARGET_PARAM = 0x20,
  6884. /// Signal that the runtime library has to return the device pointer
  6885. /// in the current position for the data being mapped. Used when we have the
  6886. /// use_device_ptr or use_device_addr clause.
  6887. OMP_MAP_RETURN_PARAM = 0x40,
  6888. /// This flag signals that the reference being passed is a pointer to
  6889. /// private data.
  6890. OMP_MAP_PRIVATE = 0x80,
  6891. /// Pass the element to the device by value.
  6892. OMP_MAP_LITERAL = 0x100,
  6893. /// Implicit map
  6894. OMP_MAP_IMPLICIT = 0x200,
  6895. /// Close is a hint to the runtime to allocate memory close to
  6896. /// the target device.
  6897. OMP_MAP_CLOSE = 0x400,
  6898. /// 0x800 is reserved for compatibility with XLC.
  6899. /// Produce a runtime error if the data is not already allocated.
  6900. OMP_MAP_PRESENT = 0x1000,
  6901. // Increment and decrement a separate reference counter so that the data
  6902. // cannot be unmapped within the associated region. Thus, this flag is
  6903. // intended to be used on 'target' and 'target data' directives because they
  6904. // are inherently structured. It is not intended to be used on 'target
  6905. // enter data' and 'target exit data' directives because they are inherently
  6906. // dynamic.
  6907. // This is an OpenMP extension for the sake of OpenACC support.
  6908. OMP_MAP_OMPX_HOLD = 0x2000,
  6909. /// Signal that the runtime library should use args as an array of
  6910. /// descriptor_dim pointers and use args_size as dims. Used when we have
  6911. /// non-contiguous list items in target update directive
  6912. OMP_MAP_NON_CONTIG = 0x100000000000,
  6913. /// The 16 MSBs of the flags indicate whether the entry is member of some
  6914. /// struct/class.
  6915. OMP_MAP_MEMBER_OF = 0xffff000000000000,
  6916. LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ OMP_MAP_MEMBER_OF),
  6917. };
  6918. /// Get the offset of the OMP_MAP_MEMBER_OF field.
  6919. static unsigned getFlagMemberOffset() {
  6920. unsigned Offset = 0;
  6921. for (uint64_t Remain = OMP_MAP_MEMBER_OF; !(Remain & 1);
  6922. Remain = Remain >> 1)
  6923. Offset++;
  6924. return Offset;
  6925. }
  6926. /// Class that holds debugging information for a data mapping to be passed to
  6927. /// the runtime library.
  6928. class MappingExprInfo {
  6929. /// The variable declaration used for the data mapping.
  6930. const ValueDecl *MapDecl = nullptr;
  6931. /// The original expression used in the map clause, or null if there is
  6932. /// none.
  6933. const Expr *MapExpr = nullptr;
  6934. public:
  6935. MappingExprInfo(const ValueDecl *MapDecl, const Expr *MapExpr = nullptr)
  6936. : MapDecl(MapDecl), MapExpr(MapExpr) {}
  6937. const ValueDecl *getMapDecl() const { return MapDecl; }
  6938. const Expr *getMapExpr() const { return MapExpr; }
  6939. };
  6940. /// Class that associates information with a base pointer to be passed to the
  6941. /// runtime library.
  6942. class BasePointerInfo {
  6943. /// The base pointer.
  6944. llvm::Value *Ptr = nullptr;
  6945. /// The base declaration that refers to this device pointer, or null if
  6946. /// there is none.
  6947. const ValueDecl *DevPtrDecl = nullptr;
  6948. public:
  6949. BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
  6950. : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
  6951. llvm::Value *operator*() const { return Ptr; }
  6952. const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
  6953. void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
  6954. };
  6955. using MapExprsArrayTy = SmallVector<MappingExprInfo, 4>;
  6956. using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
  6957. using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
  6958. using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
  6959. using MapMappersArrayTy = SmallVector<const ValueDecl *, 4>;
  6960. using MapDimArrayTy = SmallVector<uint64_t, 4>;
  6961. using MapNonContiguousArrayTy = SmallVector<MapValuesArrayTy, 4>;
  6962. /// This structure contains combined information generated for mappable
  6963. /// clauses, including base pointers, pointers, sizes, map types, user-defined
  6964. /// mappers, and non-contiguous information.
  6965. struct MapCombinedInfoTy {
  6966. struct StructNonContiguousInfo {
  6967. bool IsNonContiguous = false;
  6968. MapDimArrayTy Dims;
  6969. MapNonContiguousArrayTy Offsets;
  6970. MapNonContiguousArrayTy Counts;
  6971. MapNonContiguousArrayTy Strides;
  6972. };
  6973. MapExprsArrayTy Exprs;
  6974. MapBaseValuesArrayTy BasePointers;
  6975. MapValuesArrayTy Pointers;
  6976. MapValuesArrayTy Sizes;
  6977. MapFlagsArrayTy Types;
  6978. MapMappersArrayTy Mappers;
  6979. StructNonContiguousInfo NonContigInfo;
  6980. /// Append arrays in \a CurInfo.
  6981. void append(MapCombinedInfoTy &CurInfo) {
  6982. Exprs.append(CurInfo.Exprs.begin(), CurInfo.Exprs.end());
  6983. BasePointers.append(CurInfo.BasePointers.begin(),
  6984. CurInfo.BasePointers.end());
  6985. Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
  6986. Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
  6987. Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
  6988. Mappers.append(CurInfo.Mappers.begin(), CurInfo.Mappers.end());
  6989. NonContigInfo.Dims.append(CurInfo.NonContigInfo.Dims.begin(),
  6990. CurInfo.NonContigInfo.Dims.end());
  6991. NonContigInfo.Offsets.append(CurInfo.NonContigInfo.Offsets.begin(),
  6992. CurInfo.NonContigInfo.Offsets.end());
  6993. NonContigInfo.Counts.append(CurInfo.NonContigInfo.Counts.begin(),
  6994. CurInfo.NonContigInfo.Counts.end());
  6995. NonContigInfo.Strides.append(CurInfo.NonContigInfo.Strides.begin(),
  6996. CurInfo.NonContigInfo.Strides.end());
  6997. }
  6998. };
  6999. /// Map between a struct and the its lowest & highest elements which have been
  7000. /// mapped.
  7001. /// [ValueDecl *] --> {LE(FieldIndex, Pointer),
  7002. /// HE(FieldIndex, Pointer)}
  7003. struct StructRangeInfoTy {
  7004. MapCombinedInfoTy PreliminaryMapData;
  7005. std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> LowestElem = {
  7006. 0, Address::invalid()};
  7007. std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> HighestElem = {
  7008. 0, Address::invalid()};
  7009. Address Base = Address::invalid();
  7010. Address LB = Address::invalid();
  7011. bool IsArraySection = false;
  7012. bool HasCompleteRecord = false;
  7013. };
  7014. private:
  7015. /// Kind that defines how a device pointer has to be returned.
  7016. struct MapInfo {
  7017. OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
  7018. OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
  7019. ArrayRef<OpenMPMapModifierKind> MapModifiers;
  7020. ArrayRef<OpenMPMotionModifierKind> MotionModifiers;
  7021. bool ReturnDevicePointer = false;
  7022. bool IsImplicit = false;
  7023. const ValueDecl *Mapper = nullptr;
  7024. const Expr *VarRef = nullptr;
  7025. bool ForDeviceAddr = false;
  7026. MapInfo() = default;
  7027. MapInfo(
  7028. OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
  7029. OpenMPMapClauseKind MapType,
  7030. ArrayRef<OpenMPMapModifierKind> MapModifiers,
  7031. ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
  7032. bool ReturnDevicePointer, bool IsImplicit,
  7033. const ValueDecl *Mapper = nullptr, const Expr *VarRef = nullptr,
  7034. bool ForDeviceAddr = false)
  7035. : Components(Components), MapType(MapType), MapModifiers(MapModifiers),
  7036. MotionModifiers(MotionModifiers),
  7037. ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit),
  7038. Mapper(Mapper), VarRef(VarRef), ForDeviceAddr(ForDeviceAddr) {}
  7039. };
  7040. /// If use_device_ptr or use_device_addr is used on a decl which is a struct
  7041. /// member and there is no map information about it, then emission of that
  7042. /// entry is deferred until the whole struct has been processed.
  7043. struct DeferredDevicePtrEntryTy {
  7044. const Expr *IE = nullptr;
  7045. const ValueDecl *VD = nullptr;
  7046. bool ForDeviceAddr = false;
  7047. DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD,
  7048. bool ForDeviceAddr)
  7049. : IE(IE), VD(VD), ForDeviceAddr(ForDeviceAddr) {}
  7050. };
  7051. /// The target directive from where the mappable clauses were extracted. It
  7052. /// is either a executable directive or a user-defined mapper directive.
  7053. llvm::PointerUnion<const OMPExecutableDirective *,
  7054. const OMPDeclareMapperDecl *>
  7055. CurDir;
  7056. /// Function the directive is being generated for.
  7057. CodeGenFunction &CGF;
  7058. /// Set of all first private variables in the current directive.
  7059. /// bool data is set to true if the variable is implicitly marked as
  7060. /// firstprivate, false otherwise.
  7061. llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, bool> FirstPrivateDecls;
  7062. /// Map between device pointer declarations and their expression components.
  7063. /// The key value for declarations in 'this' is null.
  7064. llvm::DenseMap<
  7065. const ValueDecl *,
  7066. SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
  7067. DevPointersMap;
  7068. /// Map between lambda declarations and their map type.
  7069. llvm::DenseMap<const ValueDecl *, const OMPMapClause *> LambdasMap;
  7070. llvm::Value *getExprTypeSize(const Expr *E) const {
  7071. QualType ExprTy = E->getType().getCanonicalType();
  7072. // Calculate the size for array shaping expression.
  7073. if (const auto *OAE = dyn_cast<OMPArrayShapingExpr>(E)) {
  7074. llvm::Value *Size =
  7075. CGF.getTypeSize(OAE->getBase()->getType()->getPointeeType());
  7076. for (const Expr *SE : OAE->getDimensions()) {
  7077. llvm::Value *Sz = CGF.EmitScalarExpr(SE);
  7078. Sz = CGF.EmitScalarConversion(Sz, SE->getType(),
  7079. CGF.getContext().getSizeType(),
  7080. SE->getExprLoc());
  7081. Size = CGF.Builder.CreateNUWMul(Size, Sz);
  7082. }
  7083. return Size;
  7084. }
  7085. // Reference types are ignored for mapping purposes.
  7086. if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
  7087. ExprTy = RefTy->getPointeeType().getCanonicalType();
  7088. // Given that an array section is considered a built-in type, we need to
  7089. // do the calculation based on the length of the section instead of relying
  7090. // on CGF.getTypeSize(E->getType()).
  7091. if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) {
  7092. QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(
  7093. OAE->getBase()->IgnoreParenImpCasts())
  7094. .getCanonicalType();
  7095. // If there is no length associated with the expression and lower bound is
  7096. // not specified too, that means we are using the whole length of the
  7097. // base.
  7098. if (!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
  7099. !OAE->getLowerBound())
  7100. return CGF.getTypeSize(BaseTy);
  7101. llvm::Value *ElemSize;
  7102. if (const auto *PTy = BaseTy->getAs<PointerType>()) {
  7103. ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType());
  7104. } else {
  7105. const auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
  7106. assert(ATy && "Expecting array type if not a pointer type.");
  7107. ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType());
  7108. }
  7109. // If we don't have a length at this point, that is because we have an
  7110. // array section with a single element.
  7111. if (!OAE->getLength() && OAE->getColonLocFirst().isInvalid())
  7112. return ElemSize;
  7113. if (const Expr *LenExpr = OAE->getLength()) {
  7114. llvm::Value *LengthVal = CGF.EmitScalarExpr(LenExpr);
  7115. LengthVal = CGF.EmitScalarConversion(LengthVal, LenExpr->getType(),
  7116. CGF.getContext().getSizeType(),
  7117. LenExpr->getExprLoc());
  7118. return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
  7119. }
  7120. assert(!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
  7121. OAE->getLowerBound() && "expected array_section[lb:].");
  7122. // Size = sizetype - lb * elemtype;
  7123. llvm::Value *LengthVal = CGF.getTypeSize(BaseTy);
  7124. llvm::Value *LBVal = CGF.EmitScalarExpr(OAE->getLowerBound());
  7125. LBVal = CGF.EmitScalarConversion(LBVal, OAE->getLowerBound()->getType(),
  7126. CGF.getContext().getSizeType(),
  7127. OAE->getLowerBound()->getExprLoc());
  7128. LBVal = CGF.Builder.CreateNUWMul(LBVal, ElemSize);
  7129. llvm::Value *Cmp = CGF.Builder.CreateICmpUGT(LengthVal, LBVal);
  7130. llvm::Value *TrueVal = CGF.Builder.CreateNUWSub(LengthVal, LBVal);
  7131. LengthVal = CGF.Builder.CreateSelect(
  7132. Cmp, TrueVal, llvm::ConstantInt::get(CGF.SizeTy, 0));
  7133. return LengthVal;
  7134. }
  7135. return CGF.getTypeSize(ExprTy);
  7136. }
  7137. /// Return the corresponding bits for a given map clause modifier. Add
  7138. /// a flag marking the map as a pointer if requested. Add a flag marking the
  7139. /// map as the first one of a series of maps that relate to the same map
  7140. /// expression.
  7141. OpenMPOffloadMappingFlags getMapTypeBits(
  7142. OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
  7143. ArrayRef<OpenMPMotionModifierKind> MotionModifiers, bool IsImplicit,
  7144. bool AddPtrFlag, bool AddIsTargetParamFlag, bool IsNonContiguous) const {
  7145. OpenMPOffloadMappingFlags Bits =
  7146. IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE;
  7147. switch (MapType) {
  7148. case OMPC_MAP_alloc:
  7149. case OMPC_MAP_release:
  7150. // alloc and release is the default behavior in the runtime library, i.e.
  7151. // if we don't pass any bits alloc/release that is what the runtime is
  7152. // going to do. Therefore, we don't need to signal anything for these two
  7153. // type modifiers.
  7154. break;
  7155. case OMPC_MAP_to:
  7156. Bits |= OMP_MAP_TO;
  7157. break;
  7158. case OMPC_MAP_from:
  7159. Bits |= OMP_MAP_FROM;
  7160. break;
  7161. case OMPC_MAP_tofrom:
  7162. Bits |= OMP_MAP_TO | OMP_MAP_FROM;
  7163. break;
  7164. case OMPC_MAP_delete:
  7165. Bits |= OMP_MAP_DELETE;
  7166. break;
  7167. case OMPC_MAP_unknown:
  7168. llvm_unreachable("Unexpected map type!");
  7169. }
  7170. if (AddPtrFlag)
  7171. Bits |= OMP_MAP_PTR_AND_OBJ;
  7172. if (AddIsTargetParamFlag)
  7173. Bits |= OMP_MAP_TARGET_PARAM;
  7174. if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_always))
  7175. Bits |= OMP_MAP_ALWAYS;
  7176. if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_close))
  7177. Bits |= OMP_MAP_CLOSE;
  7178. if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_present) ||
  7179. llvm::is_contained(MotionModifiers, OMPC_MOTION_MODIFIER_present))
  7180. Bits |= OMP_MAP_PRESENT;
  7181. if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_ompx_hold))
  7182. Bits |= OMP_MAP_OMPX_HOLD;
  7183. if (IsNonContiguous)
  7184. Bits |= OMP_MAP_NON_CONTIG;
  7185. return Bits;
  7186. }
  7187. /// Return true if the provided expression is a final array section. A
  7188. /// final array section, is one whose length can't be proved to be one.
  7189. bool isFinalArraySectionExpression(const Expr *E) const {
  7190. const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
  7191. // It is not an array section and therefore not a unity-size one.
  7192. if (!OASE)
  7193. return false;
  7194. // An array section with no colon always refer to a single element.
  7195. if (OASE->getColonLocFirst().isInvalid())
  7196. return false;
  7197. const Expr *Length = OASE->getLength();
  7198. // If we don't have a length we have to check if the array has size 1
  7199. // for this dimension. Also, we should always expect a length if the
  7200. // base type is pointer.
  7201. if (!Length) {
  7202. QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
  7203. OASE->getBase()->IgnoreParenImpCasts())
  7204. .getCanonicalType();
  7205. if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
  7206. return ATy->getSize().getSExtValue() != 1;
  7207. // If we don't have a constant dimension length, we have to consider
  7208. // the current section as having any size, so it is not necessarily
  7209. // unitary. If it happen to be unity size, that's user fault.
  7210. return true;
  7211. }
  7212. // Check if the length evaluates to 1.
  7213. Expr::EvalResult Result;
  7214. if (!Length->EvaluateAsInt(Result, CGF.getContext()))
  7215. return true; // Can have more that size 1.
  7216. llvm::APSInt ConstLength = Result.Val.getInt();
  7217. return ConstLength.getSExtValue() != 1;
  7218. }
  7219. /// Generate the base pointers, section pointers, sizes, map type bits, and
  7220. /// user-defined mappers (all included in \a CombinedInfo) for the provided
  7221. /// map type, map or motion modifiers, and expression components.
  7222. /// \a IsFirstComponent should be set to true if the provided set of
  7223. /// components is the first associated with a capture.
  7224. void generateInfoForComponentList(
  7225. OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
  7226. ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
  7227. OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
  7228. MapCombinedInfoTy &CombinedInfo, StructRangeInfoTy &PartialStruct,
  7229. bool IsFirstComponentList, bool IsImplicit,
  7230. const ValueDecl *Mapper = nullptr, bool ForDeviceAddr = false,
  7231. const ValueDecl *BaseDecl = nullptr, const Expr *MapExpr = nullptr,
  7232. ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
  7233. OverlappedElements = llvm::None) const {
  7234. // The following summarizes what has to be generated for each map and the
  7235. // types below. The generated information is expressed in this order:
  7236. // base pointer, section pointer, size, flags
  7237. // (to add to the ones that come from the map type and modifier).
  7238. //
  7239. // double d;
  7240. // int i[100];
  7241. // float *p;
  7242. //
  7243. // struct S1 {
  7244. // int i;
  7245. // float f[50];
  7246. // }
  7247. // struct S2 {
  7248. // int i;
  7249. // float f[50];
  7250. // S1 s;
  7251. // double *p;
  7252. // struct S2 *ps;
  7253. // int &ref;
  7254. // }
  7255. // S2 s;
  7256. // S2 *ps;
  7257. //
  7258. // map(d)
  7259. // &d, &d, sizeof(double), TARGET_PARAM | TO | FROM
  7260. //
  7261. // map(i)
  7262. // &i, &i, 100*sizeof(int), TARGET_PARAM | TO | FROM
  7263. //
  7264. // map(i[1:23])
  7265. // &i(=&i[0]), &i[1], 23*sizeof(int), TARGET_PARAM | TO | FROM
  7266. //
  7267. // map(p)
  7268. // &p, &p, sizeof(float*), TARGET_PARAM | TO | FROM
  7269. //
  7270. // map(p[1:24])
  7271. // &p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM | PTR_AND_OBJ
  7272. // in unified shared memory mode or for local pointers
  7273. // p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM
  7274. //
  7275. // map(s)
  7276. // &s, &s, sizeof(S2), TARGET_PARAM | TO | FROM
  7277. //
  7278. // map(s.i)
  7279. // &s, &(s.i), sizeof(int), TARGET_PARAM | TO | FROM
  7280. //
  7281. // map(s.s.f)
  7282. // &s, &(s.s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
  7283. //
  7284. // map(s.p)
  7285. // &s, &(s.p), sizeof(double*), TARGET_PARAM | TO | FROM
  7286. //
  7287. // map(to: s.p[:22])
  7288. // &s, &(s.p), sizeof(double*), TARGET_PARAM (*)
  7289. // &s, &(s.p), sizeof(double*), MEMBER_OF(1) (**)
  7290. // &(s.p), &(s.p[0]), 22*sizeof(double),
  7291. // MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
  7292. // (*) alloc space for struct members, only this is a target parameter
  7293. // (**) map the pointer (nothing to be mapped in this example) (the compiler
  7294. // optimizes this entry out, same in the examples below)
  7295. // (***) map the pointee (map: to)
  7296. //
  7297. // map(to: s.ref)
  7298. // &s, &(s.ref), sizeof(int*), TARGET_PARAM (*)
  7299. // &s, &(s.ref), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
  7300. // (*) alloc space for struct members, only this is a target parameter
  7301. // (**) map the pointer (nothing to be mapped in this example) (the compiler
  7302. // optimizes this entry out, same in the examples below)
  7303. // (***) map the pointee (map: to)
  7304. //
  7305. // map(s.ps)
  7306. // &s, &(s.ps), sizeof(S2*), TARGET_PARAM | TO | FROM
  7307. //
  7308. // map(from: s.ps->s.i)
  7309. // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
  7310. // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
  7311. // &(s.ps), &(s.ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
  7312. //
  7313. // map(to: s.ps->ps)
  7314. // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
  7315. // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
  7316. // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | TO
  7317. //
  7318. // map(s.ps->ps->ps)
  7319. // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
  7320. // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
  7321. // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
  7322. // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
  7323. //
  7324. // map(to: s.ps->ps->s.f[:22])
  7325. // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
  7326. // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
  7327. // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
  7328. // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
  7329. //
  7330. // map(ps)
  7331. // &ps, &ps, sizeof(S2*), TARGET_PARAM | TO | FROM
  7332. //
  7333. // map(ps->i)
  7334. // ps, &(ps->i), sizeof(int), TARGET_PARAM | TO | FROM
  7335. //
  7336. // map(ps->s.f)
  7337. // ps, &(ps->s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
  7338. //
  7339. // map(from: ps->p)
  7340. // ps, &(ps->p), sizeof(double*), TARGET_PARAM | FROM
  7341. //
  7342. // map(to: ps->p[:22])
  7343. // ps, &(ps->p), sizeof(double*), TARGET_PARAM
  7344. // ps, &(ps->p), sizeof(double*), MEMBER_OF(1)
  7345. // &(ps->p), &(ps->p[0]), 22*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | TO
  7346. //
  7347. // map(ps->ps)
  7348. // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM | TO | FROM
  7349. //
  7350. // map(from: ps->ps->s.i)
  7351. // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
  7352. // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
  7353. // &(ps->ps), &(ps->ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
  7354. //
  7355. // map(from: ps->ps->ps)
  7356. // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
  7357. // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
  7358. // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | FROM
  7359. //
  7360. // map(ps->ps->ps->ps)
  7361. // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
  7362. // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
  7363. // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
  7364. // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
  7365. //
  7366. // map(to: ps->ps->ps->s.f[:22])
  7367. // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
  7368. // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
  7369. // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
  7370. // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
  7371. //
  7372. // map(to: s.f[:22]) map(from: s.p[:33])
  7373. // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1) +
  7374. // sizeof(double*) (**), TARGET_PARAM
  7375. // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | TO
  7376. // &s, &(s.p), sizeof(double*), MEMBER_OF(1)
  7377. // &(s.p), &(s.p[0]), 33*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | FROM
  7378. // (*) allocate contiguous space needed to fit all mapped members even if
  7379. // we allocate space for members not mapped (in this example,
  7380. // s.f[22..49] and s.s are not mapped, yet we must allocate space for
  7381. // them as well because they fall between &s.f[0] and &s.p)
  7382. //
  7383. // map(from: s.f[:22]) map(to: ps->p[:33])
  7384. // &s, &(s.f[0]), 22*sizeof(float), TARGET_PARAM | FROM
  7385. // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
  7386. // ps, &(ps->p), sizeof(double*), MEMBER_OF(2) (*)
  7387. // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(2) | PTR_AND_OBJ | TO
  7388. // (*) the struct this entry pertains to is the 2nd element in the list of
  7389. // arguments, hence MEMBER_OF(2)
  7390. //
  7391. // map(from: s.f[:22], s.s) map(to: ps->p[:33])
  7392. // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1), TARGET_PARAM
  7393. // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | FROM
  7394. // &s, &(s.s), sizeof(struct S1), MEMBER_OF(1) | FROM
  7395. // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
  7396. // ps, &(ps->p), sizeof(double*), MEMBER_OF(4) (*)
  7397. // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(4) | PTR_AND_OBJ | TO
  7398. // (*) the struct this entry pertains to is the 4th element in the list
  7399. // of arguments, hence MEMBER_OF(4)
  7400. // Track if the map information being generated is the first for a capture.
  7401. bool IsCaptureFirstInfo = IsFirstComponentList;
  7402. // When the variable is on a declare target link or in a to clause with
  7403. // unified memory, a reference is needed to hold the host/device address
  7404. // of the variable.
  7405. bool RequiresReference = false;
  7406. // Scan the components from the base to the complete expression.
  7407. auto CI = Components.rbegin();
  7408. auto CE = Components.rend();
  7409. auto I = CI;
  7410. // Track if the map information being generated is the first for a list of
  7411. // components.
  7412. bool IsExpressionFirstInfo = true;
  7413. bool FirstPointerInComplexData = false;
  7414. Address BP = Address::invalid();
  7415. const Expr *AssocExpr = I->getAssociatedExpression();
  7416. const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr);
  7417. const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
  7418. const auto *OAShE = dyn_cast<OMPArrayShapingExpr>(AssocExpr);
  7419. if (isa<MemberExpr>(AssocExpr)) {
  7420. // The base is the 'this' pointer. The content of the pointer is going
  7421. // to be the base of the field being mapped.
  7422. BP = CGF.LoadCXXThisAddress();
  7423. } else if ((AE && isa<CXXThisExpr>(AE->getBase()->IgnoreParenImpCasts())) ||
  7424. (OASE &&
  7425. isa<CXXThisExpr>(OASE->getBase()->IgnoreParenImpCasts()))) {
  7426. BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
  7427. } else if (OAShE &&
  7428. isa<CXXThisExpr>(OAShE->getBase()->IgnoreParenCasts())) {
  7429. BP = Address(
  7430. CGF.EmitScalarExpr(OAShE->getBase()),
  7431. CGF.getContext().getTypeAlignInChars(OAShE->getBase()->getType()));
  7432. } else {
  7433. // The base is the reference to the variable.
  7434. // BP = &Var.
  7435. BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
  7436. if (const auto *VD =
  7437. dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
  7438. if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  7439. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
  7440. if ((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
  7441. (*Res == OMPDeclareTargetDeclAttr::MT_To &&
  7442. CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) {
  7443. RequiresReference = true;
  7444. BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
  7445. }
  7446. }
  7447. }
  7448. // If the variable is a pointer and is being dereferenced (i.e. is not
  7449. // the last component), the base has to be the pointer itself, not its
  7450. // reference. References are ignored for mapping purposes.
  7451. QualType Ty =
  7452. I->getAssociatedDeclaration()->getType().getNonReferenceType();
  7453. if (Ty->isAnyPointerType() && std::next(I) != CE) {
  7454. // No need to generate individual map information for the pointer, it
  7455. // can be associated with the combined storage if shared memory mode is
  7456. // active or the base declaration is not global variable.
  7457. const auto *VD = dyn_cast<VarDecl>(I->getAssociatedDeclaration());
  7458. if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
  7459. !VD || VD->hasLocalStorage())
  7460. BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
  7461. else
  7462. FirstPointerInComplexData = true;
  7463. ++I;
  7464. }
  7465. }
  7466. // Track whether a component of the list should be marked as MEMBER_OF some
  7467. // combined entry (for partial structs). Only the first PTR_AND_OBJ entry
  7468. // in a component list should be marked as MEMBER_OF, all subsequent entries
  7469. // do not belong to the base struct. E.g.
  7470. // struct S2 s;
  7471. // s.ps->ps->ps->f[:]
  7472. // (1) (2) (3) (4)
  7473. // ps(1) is a member pointer, ps(2) is a pointee of ps(1), so it is a
  7474. // PTR_AND_OBJ entry; the PTR is ps(1), so MEMBER_OF the base struct. ps(3)
  7475. // is the pointee of ps(2) which is not member of struct s, so it should not
  7476. // be marked as such (it is still PTR_AND_OBJ).
  7477. // The variable is initialized to false so that PTR_AND_OBJ entries which
  7478. // are not struct members are not considered (e.g. array of pointers to
  7479. // data).
  7480. bool ShouldBeMemberOf = false;
  7481. // Variable keeping track of whether or not we have encountered a component
  7482. // in the component list which is a member expression. Useful when we have a
  7483. // pointer or a final array section, in which case it is the previous
  7484. // component in the list which tells us whether we have a member expression.
  7485. // E.g. X.f[:]
  7486. // While processing the final array section "[:]" it is "f" which tells us
  7487. // whether we are dealing with a member of a declared struct.
  7488. const MemberExpr *EncounteredME = nullptr;
  7489. // Track for the total number of dimension. Start from one for the dummy
  7490. // dimension.
  7491. uint64_t DimSize = 1;
  7492. bool IsNonContiguous = CombinedInfo.NonContigInfo.IsNonContiguous;
  7493. bool IsPrevMemberReference = false;
  7494. for (; I != CE; ++I) {
  7495. // If the current component is member of a struct (parent struct) mark it.
  7496. if (!EncounteredME) {
  7497. EncounteredME = dyn_cast<MemberExpr>(I->getAssociatedExpression());
  7498. // If we encounter a PTR_AND_OBJ entry from now on it should be marked
  7499. // as MEMBER_OF the parent struct.
  7500. if (EncounteredME) {
  7501. ShouldBeMemberOf = true;
  7502. // Do not emit as complex pointer if this is actually not array-like
  7503. // expression.
  7504. if (FirstPointerInComplexData) {
  7505. QualType Ty = std::prev(I)
  7506. ->getAssociatedDeclaration()
  7507. ->getType()
  7508. .getNonReferenceType();
  7509. BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
  7510. FirstPointerInComplexData = false;
  7511. }
  7512. }
  7513. }
  7514. auto Next = std::next(I);
  7515. // We need to generate the addresses and sizes if this is the last
  7516. // component, if the component is a pointer or if it is an array section
  7517. // whose length can't be proved to be one. If this is a pointer, it
  7518. // becomes the base address for the following components.
  7519. // A final array section, is one whose length can't be proved to be one.
  7520. // If the map item is non-contiguous then we don't treat any array section
  7521. // as final array section.
  7522. bool IsFinalArraySection =
  7523. !IsNonContiguous &&
  7524. isFinalArraySectionExpression(I->getAssociatedExpression());
  7525. // If we have a declaration for the mapping use that, otherwise use
  7526. // the base declaration of the map clause.
  7527. const ValueDecl *MapDecl = (I->getAssociatedDeclaration())
  7528. ? I->getAssociatedDeclaration()
  7529. : BaseDecl;
  7530. MapExpr = (I->getAssociatedExpression()) ? I->getAssociatedExpression()
  7531. : MapExpr;
  7532. // Get information on whether the element is a pointer. Have to do a
  7533. // special treatment for array sections given that they are built-in
  7534. // types.
  7535. const auto *OASE =
  7536. dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
  7537. const auto *OAShE =
  7538. dyn_cast<OMPArrayShapingExpr>(I->getAssociatedExpression());
  7539. const auto *UO = dyn_cast<UnaryOperator>(I->getAssociatedExpression());
  7540. const auto *BO = dyn_cast<BinaryOperator>(I->getAssociatedExpression());
  7541. bool IsPointer =
  7542. OAShE ||
  7543. (OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
  7544. .getCanonicalType()
  7545. ->isAnyPointerType()) ||
  7546. I->getAssociatedExpression()->getType()->isAnyPointerType();
  7547. bool IsMemberReference = isa<MemberExpr>(I->getAssociatedExpression()) &&
  7548. MapDecl &&
  7549. MapDecl->getType()->isLValueReferenceType();
  7550. bool IsNonDerefPointer = IsPointer && !UO && !BO && !IsNonContiguous;
  7551. if (OASE)
  7552. ++DimSize;
  7553. if (Next == CE || IsMemberReference || IsNonDerefPointer ||
  7554. IsFinalArraySection) {
  7555. // If this is not the last component, we expect the pointer to be
  7556. // associated with an array expression or member expression.
  7557. assert((Next == CE ||
  7558. isa<MemberExpr>(Next->getAssociatedExpression()) ||
  7559. isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
  7560. isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) ||
  7561. isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) ||
  7562. isa<UnaryOperator>(Next->getAssociatedExpression()) ||
  7563. isa<BinaryOperator>(Next->getAssociatedExpression())) &&
  7564. "Unexpected expression");
  7565. Address LB = Address::invalid();
  7566. Address LowestElem = Address::invalid();
  7567. auto &&EmitMemberExprBase = [](CodeGenFunction &CGF,
  7568. const MemberExpr *E) {
  7569. const Expr *BaseExpr = E->getBase();
  7570. // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a
  7571. // scalar.
  7572. LValue BaseLV;
  7573. if (E->isArrow()) {
  7574. LValueBaseInfo BaseInfo;
  7575. TBAAAccessInfo TBAAInfo;
  7576. Address Addr =
  7577. CGF.EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
  7578. QualType PtrTy = BaseExpr->getType()->getPointeeType();
  7579. BaseLV = CGF.MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
  7580. } else {
  7581. BaseLV = CGF.EmitOMPSharedLValue(BaseExpr);
  7582. }
  7583. return BaseLV;
  7584. };
  7585. if (OAShE) {
  7586. LowestElem = LB = Address(CGF.EmitScalarExpr(OAShE->getBase()),
  7587. CGF.getContext().getTypeAlignInChars(
  7588. OAShE->getBase()->getType()));
  7589. } else if (IsMemberReference) {
  7590. const auto *ME = cast<MemberExpr>(I->getAssociatedExpression());
  7591. LValue BaseLVal = EmitMemberExprBase(CGF, ME);
  7592. LowestElem = CGF.EmitLValueForFieldInitialization(
  7593. BaseLVal, cast<FieldDecl>(MapDecl))
  7594. .getAddress(CGF);
  7595. LB = CGF.EmitLoadOfReferenceLValue(LowestElem, MapDecl->getType())
  7596. .getAddress(CGF);
  7597. } else {
  7598. LowestElem = LB =
  7599. CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
  7600. .getAddress(CGF);
  7601. }
  7602. // If this component is a pointer inside the base struct then we don't
  7603. // need to create any entry for it - it will be combined with the object
  7604. // it is pointing to into a single PTR_AND_OBJ entry.
  7605. bool IsMemberPointerOrAddr =
  7606. EncounteredME &&
  7607. (((IsPointer || ForDeviceAddr) &&
  7608. I->getAssociatedExpression() == EncounteredME) ||
  7609. (IsPrevMemberReference && !IsPointer) ||
  7610. (IsMemberReference && Next != CE &&
  7611. !Next->getAssociatedExpression()->getType()->isPointerType()));
  7612. if (!OverlappedElements.empty() && Next == CE) {
  7613. // Handle base element with the info for overlapped elements.
  7614. assert(!PartialStruct.Base.isValid() && "The base element is set.");
  7615. assert(!IsPointer &&
  7616. "Unexpected base element with the pointer type.");
  7617. // Mark the whole struct as the struct that requires allocation on the
  7618. // device.
  7619. PartialStruct.LowestElem = {0, LowestElem};
  7620. CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(
  7621. I->getAssociatedExpression()->getType());
  7622. Address HB = CGF.Builder.CreateConstGEP(
  7623. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(LowestElem,
  7624. CGF.VoidPtrTy),
  7625. TypeSize.getQuantity() - 1);
  7626. PartialStruct.HighestElem = {
  7627. std::numeric_limits<decltype(
  7628. PartialStruct.HighestElem.first)>::max(),
  7629. HB};
  7630. PartialStruct.Base = BP;
  7631. PartialStruct.LB = LB;
  7632. assert(
  7633. PartialStruct.PreliminaryMapData.BasePointers.empty() &&
  7634. "Overlapped elements must be used only once for the variable.");
  7635. std::swap(PartialStruct.PreliminaryMapData, CombinedInfo);
  7636. // Emit data for non-overlapped data.
  7637. OpenMPOffloadMappingFlags Flags =
  7638. OMP_MAP_MEMBER_OF |
  7639. getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
  7640. /*AddPtrFlag=*/false,
  7641. /*AddIsTargetParamFlag=*/false, IsNonContiguous);
  7642. llvm::Value *Size = nullptr;
  7643. // Do bitcopy of all non-overlapped structure elements.
  7644. for (OMPClauseMappableExprCommon::MappableExprComponentListRef
  7645. Component : OverlappedElements) {
  7646. Address ComponentLB = Address::invalid();
  7647. for (const OMPClauseMappableExprCommon::MappableComponent &MC :
  7648. Component) {
  7649. if (const ValueDecl *VD = MC.getAssociatedDeclaration()) {
  7650. const auto *FD = dyn_cast<FieldDecl>(VD);
  7651. if (FD && FD->getType()->isLValueReferenceType()) {
  7652. const auto *ME =
  7653. cast<MemberExpr>(MC.getAssociatedExpression());
  7654. LValue BaseLVal = EmitMemberExprBase(CGF, ME);
  7655. ComponentLB =
  7656. CGF.EmitLValueForFieldInitialization(BaseLVal, FD)
  7657. .getAddress(CGF);
  7658. } else {
  7659. ComponentLB =
  7660. CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
  7661. .getAddress(CGF);
  7662. }
  7663. Size = CGF.Builder.CreatePtrDiff(
  7664. CGF.Int8Ty, CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
  7665. CGF.EmitCastToVoidPtr(LB.getPointer()));
  7666. break;
  7667. }
  7668. }
  7669. assert(Size && "Failed to determine structure size");
  7670. CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
  7671. CombinedInfo.BasePointers.push_back(BP.getPointer());
  7672. CombinedInfo.Pointers.push_back(LB.getPointer());
  7673. CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
  7674. Size, CGF.Int64Ty, /*isSigned=*/true));
  7675. CombinedInfo.Types.push_back(Flags);
  7676. CombinedInfo.Mappers.push_back(nullptr);
  7677. CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
  7678. : 1);
  7679. LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
  7680. }
  7681. CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
  7682. CombinedInfo.BasePointers.push_back(BP.getPointer());
  7683. CombinedInfo.Pointers.push_back(LB.getPointer());
  7684. Size = CGF.Builder.CreatePtrDiff(
  7685. CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
  7686. CGF.EmitCastToVoidPtr(LB.getPointer()));
  7687. CombinedInfo.Sizes.push_back(
  7688. CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
  7689. CombinedInfo.Types.push_back(Flags);
  7690. CombinedInfo.Mappers.push_back(nullptr);
  7691. CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
  7692. : 1);
  7693. break;
  7694. }
  7695. llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
  7696. if (!IsMemberPointerOrAddr ||
  7697. (Next == CE && MapType != OMPC_MAP_unknown)) {
  7698. CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
  7699. CombinedInfo.BasePointers.push_back(BP.getPointer());
  7700. CombinedInfo.Pointers.push_back(LB.getPointer());
  7701. CombinedInfo.Sizes.push_back(
  7702. CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
  7703. CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
  7704. : 1);
  7705. // If Mapper is valid, the last component inherits the mapper.
  7706. bool HasMapper = Mapper && Next == CE;
  7707. CombinedInfo.Mappers.push_back(HasMapper ? Mapper : nullptr);
  7708. // We need to add a pointer flag for each map that comes from the
  7709. // same expression except for the first one. We also need to signal
  7710. // this map is the first one that relates with the current capture
  7711. // (there is a set of entries for each capture).
  7712. OpenMPOffloadMappingFlags Flags = getMapTypeBits(
  7713. MapType, MapModifiers, MotionModifiers, IsImplicit,
  7714. !IsExpressionFirstInfo || RequiresReference ||
  7715. FirstPointerInComplexData || IsMemberReference,
  7716. IsCaptureFirstInfo && !RequiresReference, IsNonContiguous);
  7717. if (!IsExpressionFirstInfo || IsMemberReference) {
  7718. // If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
  7719. // then we reset the TO/FROM/ALWAYS/DELETE/CLOSE flags.
  7720. if (IsPointer || (IsMemberReference && Next != CE))
  7721. Flags &= ~(OMP_MAP_TO | OMP_MAP_FROM | OMP_MAP_ALWAYS |
  7722. OMP_MAP_DELETE | OMP_MAP_CLOSE);
  7723. if (ShouldBeMemberOf) {
  7724. // Set placeholder value MEMBER_OF=FFFF to indicate that the flag
  7725. // should be later updated with the correct value of MEMBER_OF.
  7726. Flags |= OMP_MAP_MEMBER_OF;
  7727. // From now on, all subsequent PTR_AND_OBJ entries should not be
  7728. // marked as MEMBER_OF.
  7729. ShouldBeMemberOf = false;
  7730. }
  7731. }
  7732. CombinedInfo.Types.push_back(Flags);
  7733. }
  7734. // If we have encountered a member expression so far, keep track of the
  7735. // mapped member. If the parent is "*this", then the value declaration
  7736. // is nullptr.
  7737. if (EncounteredME) {
  7738. const auto *FD = cast<FieldDecl>(EncounteredME->getMemberDecl());
  7739. unsigned FieldIndex = FD->getFieldIndex();
  7740. // Update info about the lowest and highest elements for this struct
  7741. if (!PartialStruct.Base.isValid()) {
  7742. PartialStruct.LowestElem = {FieldIndex, LowestElem};
  7743. if (IsFinalArraySection) {
  7744. Address HB =
  7745. CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false)
  7746. .getAddress(CGF);
  7747. PartialStruct.HighestElem = {FieldIndex, HB};
  7748. } else {
  7749. PartialStruct.HighestElem = {FieldIndex, LowestElem};
  7750. }
  7751. PartialStruct.Base = BP;
  7752. PartialStruct.LB = BP;
  7753. } else if (FieldIndex < PartialStruct.LowestElem.first) {
  7754. PartialStruct.LowestElem = {FieldIndex, LowestElem};
  7755. } else if (FieldIndex > PartialStruct.HighestElem.first) {
  7756. PartialStruct.HighestElem = {FieldIndex, LowestElem};
  7757. }
  7758. }
  7759. // Need to emit combined struct for array sections.
  7760. if (IsFinalArraySection || IsNonContiguous)
  7761. PartialStruct.IsArraySection = true;
  7762. // If we have a final array section, we are done with this expression.
  7763. if (IsFinalArraySection)
  7764. break;
  7765. // The pointer becomes the base for the next element.
  7766. if (Next != CE)
  7767. BP = IsMemberReference ? LowestElem : LB;
  7768. IsExpressionFirstInfo = false;
  7769. IsCaptureFirstInfo = false;
  7770. FirstPointerInComplexData = false;
  7771. IsPrevMemberReference = IsMemberReference;
  7772. } else if (FirstPointerInComplexData) {
  7773. QualType Ty = Components.rbegin()
  7774. ->getAssociatedDeclaration()
  7775. ->getType()
  7776. .getNonReferenceType();
  7777. BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
  7778. FirstPointerInComplexData = false;
  7779. }
  7780. }
  7781. // If ran into the whole component - allocate the space for the whole
  7782. // record.
  7783. if (!EncounteredME)
  7784. PartialStruct.HasCompleteRecord = true;
  7785. if (!IsNonContiguous)
  7786. return;
  7787. const ASTContext &Context = CGF.getContext();
  7788. // For supporting stride in array section, we need to initialize the first
  7789. // dimension size as 1, first offset as 0, and first count as 1
  7790. MapValuesArrayTy CurOffsets = {llvm::ConstantInt::get(CGF.CGM.Int64Ty, 0)};
  7791. MapValuesArrayTy CurCounts = {llvm::ConstantInt::get(CGF.CGM.Int64Ty, 1)};
  7792. MapValuesArrayTy CurStrides;
  7793. MapValuesArrayTy DimSizes{llvm::ConstantInt::get(CGF.CGM.Int64Ty, 1)};
  7794. uint64_t ElementTypeSize;
  7795. // Collect Size information for each dimension and get the element size as
  7796. // the first Stride. For example, for `int arr[10][10]`, the DimSizes
  7797. // should be [10, 10] and the first stride is 4 btyes.
  7798. for (const OMPClauseMappableExprCommon::MappableComponent &Component :
  7799. Components) {
  7800. const Expr *AssocExpr = Component.getAssociatedExpression();
  7801. const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
  7802. if (!OASE)
  7803. continue;
  7804. QualType Ty = OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
  7805. auto *CAT = Context.getAsConstantArrayType(Ty);
  7806. auto *VAT = Context.getAsVariableArrayType(Ty);
  7807. // We need all the dimension size except for the last dimension.
  7808. assert((VAT || CAT || &Component == &*Components.begin()) &&
  7809. "Should be either ConstantArray or VariableArray if not the "
  7810. "first Component");
  7811. // Get element size if CurStrides is empty.
  7812. if (CurStrides.empty()) {
  7813. const Type *ElementType = nullptr;
  7814. if (CAT)
  7815. ElementType = CAT->getElementType().getTypePtr();
  7816. else if (VAT)
  7817. ElementType = VAT->getElementType().getTypePtr();
  7818. else
  7819. assert(&Component == &*Components.begin() &&
  7820. "Only expect pointer (non CAT or VAT) when this is the "
  7821. "first Component");
  7822. // If ElementType is null, then it means the base is a pointer
  7823. // (neither CAT nor VAT) and we'll attempt to get ElementType again
  7824. // for next iteration.
  7825. if (ElementType) {
  7826. // For the case that having pointer as base, we need to remove one
  7827. // level of indirection.
  7828. if (&Component != &*Components.begin())
  7829. ElementType = ElementType->getPointeeOrArrayElementType();
  7830. ElementTypeSize =
  7831. Context.getTypeSizeInChars(ElementType).getQuantity();
  7832. CurStrides.push_back(
  7833. llvm::ConstantInt::get(CGF.Int64Ty, ElementTypeSize));
  7834. }
  7835. }
  7836. // Get dimension value except for the last dimension since we don't need
  7837. // it.
  7838. if (DimSizes.size() < Components.size() - 1) {
  7839. if (CAT)
  7840. DimSizes.push_back(llvm::ConstantInt::get(
  7841. CGF.Int64Ty, CAT->getSize().getZExtValue()));
  7842. else if (VAT)
  7843. DimSizes.push_back(CGF.Builder.CreateIntCast(
  7844. CGF.EmitScalarExpr(VAT->getSizeExpr()), CGF.Int64Ty,
  7845. /*IsSigned=*/false));
  7846. }
  7847. }
  7848. // Skip the dummy dimension since we have already have its information.
  7849. auto DI = DimSizes.begin() + 1;
  7850. // Product of dimension.
  7851. llvm::Value *DimProd =
  7852. llvm::ConstantInt::get(CGF.CGM.Int64Ty, ElementTypeSize);
  7853. // Collect info for non-contiguous. Notice that offset, count, and stride
  7854. // are only meaningful for array-section, so we insert a null for anything
  7855. // other than array-section.
  7856. // Also, the size of offset, count, and stride are not the same as
  7857. // pointers, base_pointers, sizes, or dims. Instead, the size of offset,
  7858. // count, and stride are the same as the number of non-contiguous
  7859. // declaration in target update to/from clause.
  7860. for (const OMPClauseMappableExprCommon::MappableComponent &Component :
  7861. Components) {
  7862. const Expr *AssocExpr = Component.getAssociatedExpression();
  7863. if (const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr)) {
  7864. llvm::Value *Offset = CGF.Builder.CreateIntCast(
  7865. CGF.EmitScalarExpr(AE->getIdx()), CGF.Int64Ty,
  7866. /*isSigned=*/false);
  7867. CurOffsets.push_back(Offset);
  7868. CurCounts.push_back(llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/1));
  7869. CurStrides.push_back(CurStrides.back());
  7870. continue;
  7871. }
  7872. const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
  7873. if (!OASE)
  7874. continue;
  7875. // Offset
  7876. const Expr *OffsetExpr = OASE->getLowerBound();
  7877. llvm::Value *Offset = nullptr;
  7878. if (!OffsetExpr) {
  7879. // If offset is absent, then we just set it to zero.
  7880. Offset = llvm::ConstantInt::get(CGF.Int64Ty, 0);
  7881. } else {
  7882. Offset = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(OffsetExpr),
  7883. CGF.Int64Ty,
  7884. /*isSigned=*/false);
  7885. }
  7886. CurOffsets.push_back(Offset);
  7887. // Count
  7888. const Expr *CountExpr = OASE->getLength();
  7889. llvm::Value *Count = nullptr;
  7890. if (!CountExpr) {
  7891. // In Clang, once a high dimension is an array section, we construct all
  7892. // the lower dimension as array section, however, for case like
  7893. // arr[0:2][2], Clang construct the inner dimension as an array section
  7894. // but it actually is not in an array section form according to spec.
  7895. if (!OASE->getColonLocFirst().isValid() &&
  7896. !OASE->getColonLocSecond().isValid()) {
  7897. Count = llvm::ConstantInt::get(CGF.Int64Ty, 1);
  7898. } else {
  7899. // OpenMP 5.0, 2.1.5 Array Sections, Description.
  7900. // When the length is absent it defaults to ⌈(size −
  7901. // lower-bound)/stride⌉, where size is the size of the array
  7902. // dimension.
  7903. const Expr *StrideExpr = OASE->getStride();
  7904. llvm::Value *Stride =
  7905. StrideExpr
  7906. ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(StrideExpr),
  7907. CGF.Int64Ty, /*isSigned=*/false)
  7908. : nullptr;
  7909. if (Stride)
  7910. Count = CGF.Builder.CreateUDiv(
  7911. CGF.Builder.CreateNUWSub(*DI, Offset), Stride);
  7912. else
  7913. Count = CGF.Builder.CreateNUWSub(*DI, Offset);
  7914. }
  7915. } else {
  7916. Count = CGF.EmitScalarExpr(CountExpr);
  7917. }
  7918. Count = CGF.Builder.CreateIntCast(Count, CGF.Int64Ty, /*isSigned=*/false);
  7919. CurCounts.push_back(Count);
  7920. // Stride_n' = Stride_n * (D_0 * D_1 ... * D_n-1) * Unit size
  7921. // Take `int arr[5][5][5]` and `arr[0:2:2][1:2:1][0:2:2]` as an example:
  7922. // Offset Count Stride
  7923. // D0 0 1 4 (int) <- dummy dimension
  7924. // D1 0 2 8 (2 * (1) * 4)
  7925. // D2 1 2 20 (1 * (1 * 5) * 4)
  7926. // D3 0 2 200 (2 * (1 * 5 * 4) * 4)
  7927. const Expr *StrideExpr = OASE->getStride();
  7928. llvm::Value *Stride =
  7929. StrideExpr
  7930. ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(StrideExpr),
  7931. CGF.Int64Ty, /*isSigned=*/false)
  7932. : nullptr;
  7933. DimProd = CGF.Builder.CreateNUWMul(DimProd, *(DI - 1));
  7934. if (Stride)
  7935. CurStrides.push_back(CGF.Builder.CreateNUWMul(DimProd, Stride));
  7936. else
  7937. CurStrides.push_back(DimProd);
  7938. if (DI != DimSizes.end())
  7939. ++DI;
  7940. }
  7941. CombinedInfo.NonContigInfo.Offsets.push_back(CurOffsets);
  7942. CombinedInfo.NonContigInfo.Counts.push_back(CurCounts);
  7943. CombinedInfo.NonContigInfo.Strides.push_back(CurStrides);
  7944. }
  7945. /// Return the adjusted map modifiers if the declaration a capture refers to
  7946. /// appears in a first-private clause. This is expected to be used only with
  7947. /// directives that start with 'target'.
  7948. MappableExprsHandler::OpenMPOffloadMappingFlags
  7949. getMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap) const {
  7950. assert(Cap.capturesVariable() && "Expected capture by reference only!");
  7951. // A first private variable captured by reference will use only the
  7952. // 'private ptr' and 'map to' flag. Return the right flags if the captured
  7953. // declaration is known as first-private in this handler.
  7954. if (FirstPrivateDecls.count(Cap.getCapturedVar())) {
  7955. if (Cap.getCapturedVar()->getType()->isAnyPointerType())
  7956. return MappableExprsHandler::OMP_MAP_TO |
  7957. MappableExprsHandler::OMP_MAP_PTR_AND_OBJ;
  7958. return MappableExprsHandler::OMP_MAP_PRIVATE |
  7959. MappableExprsHandler::OMP_MAP_TO;
  7960. }
  7961. auto I = LambdasMap.find(Cap.getCapturedVar()->getCanonicalDecl());
  7962. if (I != LambdasMap.end())
  7963. // for map(to: lambda): using user specified map type.
  7964. return getMapTypeBits(
  7965. I->getSecond()->getMapType(), I->getSecond()->getMapTypeModifiers(),
  7966. /*MotionModifiers=*/llvm::None, I->getSecond()->isImplicit(),
  7967. /*AddPtrFlag=*/false,
  7968. /*AddIsTargetParamFlag=*/false,
  7969. /*isNonContiguous=*/false);
  7970. return MappableExprsHandler::OMP_MAP_TO |
  7971. MappableExprsHandler::OMP_MAP_FROM;
  7972. }
  7973. static OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position) {
  7974. // Rotate by getFlagMemberOffset() bits.
  7975. return static_cast<OpenMPOffloadMappingFlags>(((uint64_t)Position + 1)
  7976. << getFlagMemberOffset());
  7977. }
  7978. static void setCorrectMemberOfFlag(OpenMPOffloadMappingFlags &Flags,
  7979. OpenMPOffloadMappingFlags MemberOfFlag) {
  7980. // If the entry is PTR_AND_OBJ but has not been marked with the special
  7981. // placeholder value 0xFFFF in the MEMBER_OF field, then it should not be
  7982. // marked as MEMBER_OF.
  7983. if ((Flags & OMP_MAP_PTR_AND_OBJ) &&
  7984. ((Flags & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF))
  7985. return;
  7986. // Reset the placeholder value to prepare the flag for the assignment of the
  7987. // proper MEMBER_OF value.
  7988. Flags &= ~OMP_MAP_MEMBER_OF;
  7989. Flags |= MemberOfFlag;
  7990. }
  7991. void getPlainLayout(const CXXRecordDecl *RD,
  7992. llvm::SmallVectorImpl<const FieldDecl *> &Layout,
  7993. bool AsBase) const {
  7994. const CGRecordLayout &RL = CGF.getTypes().getCGRecordLayout(RD);
  7995. llvm::StructType *St =
  7996. AsBase ? RL.getBaseSubobjectLLVMType() : RL.getLLVMType();
  7997. unsigned NumElements = St->getNumElements();
  7998. llvm::SmallVector<
  7999. llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>, 4>
  8000. RecordLayout(NumElements);
  8001. // Fill bases.
  8002. for (const auto &I : RD->bases()) {
  8003. if (I.isVirtual())
  8004. continue;
  8005. const auto *Base = I.getType()->getAsCXXRecordDecl();
  8006. // Ignore empty bases.
  8007. if (Base->isEmpty() || CGF.getContext()
  8008. .getASTRecordLayout(Base)
  8009. .getNonVirtualSize()
  8010. .isZero())
  8011. continue;
  8012. unsigned FieldIndex = RL.getNonVirtualBaseLLVMFieldNo(Base);
  8013. RecordLayout[FieldIndex] = Base;
  8014. }
  8015. // Fill in virtual bases.
  8016. for (const auto &I : RD->vbases()) {
  8017. const auto *Base = I.getType()->getAsCXXRecordDecl();
  8018. // Ignore empty bases.
  8019. if (Base->isEmpty())
  8020. continue;
  8021. unsigned FieldIndex = RL.getVirtualBaseIndex(Base);
  8022. if (RecordLayout[FieldIndex])
  8023. continue;
  8024. RecordLayout[FieldIndex] = Base;
  8025. }
  8026. // Fill in all the fields.
  8027. assert(!RD->isUnion() && "Unexpected union.");
  8028. for (const auto *Field : RD->fields()) {
  8029. // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
  8030. // will fill in later.)
  8031. if (!Field->isBitField() && !Field->isZeroSize(CGF.getContext())) {
  8032. unsigned FieldIndex = RL.getLLVMFieldNo(Field);
  8033. RecordLayout[FieldIndex] = Field;
  8034. }
  8035. }
  8036. for (const llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>
  8037. &Data : RecordLayout) {
  8038. if (Data.isNull())
  8039. continue;
  8040. if (const auto *Base = Data.dyn_cast<const CXXRecordDecl *>())
  8041. getPlainLayout(Base, Layout, /*AsBase=*/true);
  8042. else
  8043. Layout.push_back(Data.get<const FieldDecl *>());
  8044. }
  8045. }
  8046. /// Generate all the base pointers, section pointers, sizes, map types, and
  8047. /// mappers for the extracted mappable expressions (all included in \a
  8048. /// CombinedInfo). Also, for each item that relates with a device pointer, a
  8049. /// pair of the relevant declaration and index where it occurs is appended to
  8050. /// the device pointers info array.
  8051. void generateAllInfoForClauses(
  8052. ArrayRef<const OMPClause *> Clauses, MapCombinedInfoTy &CombinedInfo,
  8053. const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
  8054. llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
  8055. // We have to process the component lists that relate with the same
  8056. // declaration in a single chunk so that we can generate the map flags
  8057. // correctly. Therefore, we organize all lists in a map.
  8058. enum MapKind { Present, Allocs, Other, Total };
  8059. llvm::MapVector<CanonicalDeclPtr<const Decl>,
  8060. SmallVector<SmallVector<MapInfo, 8>, 4>>
  8061. Info;
  8062. // Helper function to fill the information map for the different supported
  8063. // clauses.
  8064. auto &&InfoGen =
  8065. [&Info, &SkipVarSet](
  8066. const ValueDecl *D, MapKind Kind,
  8067. OMPClauseMappableExprCommon::MappableExprComponentListRef L,
  8068. OpenMPMapClauseKind MapType,
  8069. ArrayRef<OpenMPMapModifierKind> MapModifiers,
  8070. ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
  8071. bool ReturnDevicePointer, bool IsImplicit, const ValueDecl *Mapper,
  8072. const Expr *VarRef = nullptr, bool ForDeviceAddr = false) {
  8073. if (SkipVarSet.contains(D))
  8074. return;
  8075. auto It = Info.find(D);
  8076. if (It == Info.end())
  8077. It = Info
  8078. .insert(std::make_pair(
  8079. D, SmallVector<SmallVector<MapInfo, 8>, 4>(Total)))
  8080. .first;
  8081. It->second[Kind].emplace_back(
  8082. L, MapType, MapModifiers, MotionModifiers, ReturnDevicePointer,
  8083. IsImplicit, Mapper, VarRef, ForDeviceAddr);
  8084. };
  8085. for (const auto *Cl : Clauses) {
  8086. const auto *C = dyn_cast<OMPMapClause>(Cl);
  8087. if (!C)
  8088. continue;
  8089. MapKind Kind = Other;
  8090. if (llvm::is_contained(C->getMapTypeModifiers(),
  8091. OMPC_MAP_MODIFIER_present))
  8092. Kind = Present;
  8093. else if (C->getMapType() == OMPC_MAP_alloc)
  8094. Kind = Allocs;
  8095. const auto *EI = C->getVarRefs().begin();
  8096. for (const auto L : C->component_lists()) {
  8097. const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr;
  8098. InfoGen(std::get<0>(L), Kind, std::get<1>(L), C->getMapType(),
  8099. C->getMapTypeModifiers(), llvm::None,
  8100. /*ReturnDevicePointer=*/false, C->isImplicit(), std::get<2>(L),
  8101. E);
  8102. ++EI;
  8103. }
  8104. }
  8105. for (const auto *Cl : Clauses) {
  8106. const auto *C = dyn_cast<OMPToClause>(Cl);
  8107. if (!C)
  8108. continue;
  8109. MapKind Kind = Other;
  8110. if (llvm::is_contained(C->getMotionModifiers(),
  8111. OMPC_MOTION_MODIFIER_present))
  8112. Kind = Present;
  8113. const auto *EI = C->getVarRefs().begin();
  8114. for (const auto L : C->component_lists()) {
  8115. InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_to, llvm::None,
  8116. C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
  8117. C->isImplicit(), std::get<2>(L), *EI);
  8118. ++EI;
  8119. }
  8120. }
  8121. for (const auto *Cl : Clauses) {
  8122. const auto *C = dyn_cast<OMPFromClause>(Cl);
  8123. if (!C)
  8124. continue;
  8125. MapKind Kind = Other;
  8126. if (llvm::is_contained(C->getMotionModifiers(),
  8127. OMPC_MOTION_MODIFIER_present))
  8128. Kind = Present;
  8129. const auto *EI = C->getVarRefs().begin();
  8130. for (const auto L : C->component_lists()) {
  8131. InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_from, llvm::None,
  8132. C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
  8133. C->isImplicit(), std::get<2>(L), *EI);
  8134. ++EI;
  8135. }
  8136. }
  8137. // Look at the use_device_ptr clause information and mark the existing map
  8138. // entries as such. If there is no map information for an entry in the
  8139. // use_device_ptr list, we create one with map type 'alloc' and zero size
  8140. // section. It is the user fault if that was not mapped before. If there is
  8141. // no map information and the pointer is a struct member, then we defer the
  8142. // emission of that entry until the whole struct has been processed.
  8143. llvm::MapVector<CanonicalDeclPtr<const Decl>,
  8144. SmallVector<DeferredDevicePtrEntryTy, 4>>
  8145. DeferredInfo;
  8146. MapCombinedInfoTy UseDevicePtrCombinedInfo;
  8147. for (const auto *Cl : Clauses) {
  8148. const auto *C = dyn_cast<OMPUseDevicePtrClause>(Cl);
  8149. if (!C)
  8150. continue;
  8151. for (const auto L : C->component_lists()) {
  8152. OMPClauseMappableExprCommon::MappableExprComponentListRef Components =
  8153. std::get<1>(L);
  8154. assert(!Components.empty() &&
  8155. "Not expecting empty list of components!");
  8156. const ValueDecl *VD = Components.back().getAssociatedDeclaration();
  8157. VD = cast<ValueDecl>(VD->getCanonicalDecl());
  8158. const Expr *IE = Components.back().getAssociatedExpression();
  8159. // If the first component is a member expression, we have to look into
  8160. // 'this', which maps to null in the map of map information. Otherwise
  8161. // look directly for the information.
  8162. auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
  8163. // We potentially have map information for this declaration already.
  8164. // Look for the first set of components that refer to it.
  8165. if (It != Info.end()) {
  8166. bool Found = false;
  8167. for (auto &Data : It->second) {
  8168. auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) {
  8169. return MI.Components.back().getAssociatedDeclaration() == VD;
  8170. });
  8171. // If we found a map entry, signal that the pointer has to be
  8172. // returned and move on to the next declaration. Exclude cases where
  8173. // the base pointer is mapped as array subscript, array section or
  8174. // array shaping. The base address is passed as a pointer to base in
  8175. // this case and cannot be used as a base for use_device_ptr list
  8176. // item.
  8177. if (CI != Data.end()) {
  8178. auto PrevCI = std::next(CI->Components.rbegin());
  8179. const auto *VarD = dyn_cast<VarDecl>(VD);
  8180. if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
  8181. isa<MemberExpr>(IE) ||
  8182. !VD->getType().getNonReferenceType()->isPointerType() ||
  8183. PrevCI == CI->Components.rend() ||
  8184. isa<MemberExpr>(PrevCI->getAssociatedExpression()) || !VarD ||
  8185. VarD->hasLocalStorage()) {
  8186. CI->ReturnDevicePointer = true;
  8187. Found = true;
  8188. break;
  8189. }
  8190. }
  8191. }
  8192. if (Found)
  8193. continue;
  8194. }
  8195. // We didn't find any match in our map information - generate a zero
  8196. // size array section - if the pointer is a struct member we defer this
  8197. // action until the whole struct has been processed.
  8198. if (isa<MemberExpr>(IE)) {
  8199. // Insert the pointer into Info to be processed by
  8200. // generateInfoForComponentList. Because it is a member pointer
  8201. // without a pointee, no entry will be generated for it, therefore
  8202. // we need to generate one after the whole struct has been processed.
  8203. // Nonetheless, generateInfoForComponentList must be called to take
  8204. // the pointer into account for the calculation of the range of the
  8205. // partial struct.
  8206. InfoGen(nullptr, Other, Components, OMPC_MAP_unknown, llvm::None,
  8207. llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(),
  8208. nullptr);
  8209. DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/false);
  8210. } else {
  8211. llvm::Value *Ptr =
  8212. CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc());
  8213. UseDevicePtrCombinedInfo.Exprs.push_back(VD);
  8214. UseDevicePtrCombinedInfo.BasePointers.emplace_back(Ptr, VD);
  8215. UseDevicePtrCombinedInfo.Pointers.push_back(Ptr);
  8216. UseDevicePtrCombinedInfo.Sizes.push_back(
  8217. llvm::Constant::getNullValue(CGF.Int64Ty));
  8218. UseDevicePtrCombinedInfo.Types.push_back(OMP_MAP_RETURN_PARAM);
  8219. UseDevicePtrCombinedInfo.Mappers.push_back(nullptr);
  8220. }
  8221. }
  8222. }
  8223. // Look at the use_device_addr clause information and mark the existing map
  8224. // entries as such. If there is no map information for an entry in the
  8225. // use_device_addr list, we create one with map type 'alloc' and zero size
  8226. // section. It is the user fault if that was not mapped before. If there is
  8227. // no map information and the pointer is a struct member, then we defer the
  8228. // emission of that entry until the whole struct has been processed.
  8229. llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
  8230. for (const auto *Cl : Clauses) {
  8231. const auto *C = dyn_cast<OMPUseDeviceAddrClause>(Cl);
  8232. if (!C)
  8233. continue;
  8234. for (const auto L : C->component_lists()) {
  8235. assert(!std::get<1>(L).empty() &&
  8236. "Not expecting empty list of components!");
  8237. const ValueDecl *VD = std::get<1>(L).back().getAssociatedDeclaration();
  8238. if (!Processed.insert(VD).second)
  8239. continue;
  8240. VD = cast<ValueDecl>(VD->getCanonicalDecl());
  8241. const Expr *IE = std::get<1>(L).back().getAssociatedExpression();
  8242. // If the first component is a member expression, we have to look into
  8243. // 'this', which maps to null in the map of map information. Otherwise
  8244. // look directly for the information.
  8245. auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
  8246. // We potentially have map information for this declaration already.
  8247. // Look for the first set of components that refer to it.
  8248. if (It != Info.end()) {
  8249. bool Found = false;
  8250. for (auto &Data : It->second) {
  8251. auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) {
  8252. return MI.Components.back().getAssociatedDeclaration() == VD;
  8253. });
  8254. // If we found a map entry, signal that the pointer has to be
  8255. // returned and move on to the next declaration.
  8256. if (CI != Data.end()) {
  8257. CI->ReturnDevicePointer = true;
  8258. Found = true;
  8259. break;
  8260. }
  8261. }
  8262. if (Found)
  8263. continue;
  8264. }
  8265. // We didn't find any match in our map information - generate a zero
  8266. // size array section - if the pointer is a struct member we defer this
  8267. // action until the whole struct has been processed.
  8268. if (isa<MemberExpr>(IE)) {
  8269. // Insert the pointer into Info to be processed by
  8270. // generateInfoForComponentList. Because it is a member pointer
  8271. // without a pointee, no entry will be generated for it, therefore
  8272. // we need to generate one after the whole struct has been processed.
  8273. // Nonetheless, generateInfoForComponentList must be called to take
  8274. // the pointer into account for the calculation of the range of the
  8275. // partial struct.
  8276. InfoGen(nullptr, Other, std::get<1>(L), OMPC_MAP_unknown, llvm::None,
  8277. llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(),
  8278. nullptr, nullptr, /*ForDeviceAddr=*/true);
  8279. DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/true);
  8280. } else {
  8281. llvm::Value *Ptr;
  8282. if (IE->isGLValue())
  8283. Ptr = CGF.EmitLValue(IE).getPointer(CGF);
  8284. else
  8285. Ptr = CGF.EmitScalarExpr(IE);
  8286. CombinedInfo.Exprs.push_back(VD);
  8287. CombinedInfo.BasePointers.emplace_back(Ptr, VD);
  8288. CombinedInfo.Pointers.push_back(Ptr);
  8289. CombinedInfo.Sizes.push_back(
  8290. llvm::Constant::getNullValue(CGF.Int64Ty));
  8291. CombinedInfo.Types.push_back(OMP_MAP_RETURN_PARAM);
  8292. CombinedInfo.Mappers.push_back(nullptr);
  8293. }
  8294. }
  8295. }
  8296. for (const auto &Data : Info) {
  8297. StructRangeInfoTy PartialStruct;
  8298. // Temporary generated information.
  8299. MapCombinedInfoTy CurInfo;
  8300. const Decl *D = Data.first;
  8301. const ValueDecl *VD = cast_or_null<ValueDecl>(D);
  8302. for (const auto &M : Data.second) {
  8303. for (const MapInfo &L : M) {
  8304. assert(!L.Components.empty() &&
  8305. "Not expecting declaration with no component lists.");
  8306. // Remember the current base pointer index.
  8307. unsigned CurrentBasePointersIdx = CurInfo.BasePointers.size();
  8308. CurInfo.NonContigInfo.IsNonContiguous =
  8309. L.Components.back().isNonContiguous();
  8310. generateInfoForComponentList(
  8311. L.MapType, L.MapModifiers, L.MotionModifiers, L.Components,
  8312. CurInfo, PartialStruct, /*IsFirstComponentList=*/false,
  8313. L.IsImplicit, L.Mapper, L.ForDeviceAddr, VD, L.VarRef);
  8314. // If this entry relates with a device pointer, set the relevant
  8315. // declaration and add the 'return pointer' flag.
  8316. if (L.ReturnDevicePointer) {
  8317. assert(CurInfo.BasePointers.size() > CurrentBasePointersIdx &&
  8318. "Unexpected number of mapped base pointers.");
  8319. const ValueDecl *RelevantVD =
  8320. L.Components.back().getAssociatedDeclaration();
  8321. assert(RelevantVD &&
  8322. "No relevant declaration related with device pointer??");
  8323. CurInfo.BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(
  8324. RelevantVD);
  8325. CurInfo.Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
  8326. }
  8327. }
  8328. }
  8329. // Append any pending zero-length pointers which are struct members and
  8330. // used with use_device_ptr or use_device_addr.
  8331. auto CI = DeferredInfo.find(Data.first);
  8332. if (CI != DeferredInfo.end()) {
  8333. for (const DeferredDevicePtrEntryTy &L : CI->second) {
  8334. llvm::Value *BasePtr;
  8335. llvm::Value *Ptr;
  8336. if (L.ForDeviceAddr) {
  8337. if (L.IE->isGLValue())
  8338. Ptr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
  8339. else
  8340. Ptr = this->CGF.EmitScalarExpr(L.IE);
  8341. BasePtr = Ptr;
  8342. // Entry is RETURN_PARAM. Also, set the placeholder value
  8343. // MEMBER_OF=FFFF so that the entry is later updated with the
  8344. // correct value of MEMBER_OF.
  8345. CurInfo.Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_MEMBER_OF);
  8346. } else {
  8347. BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
  8348. Ptr = this->CGF.EmitLoadOfScalar(this->CGF.EmitLValue(L.IE),
  8349. L.IE->getExprLoc());
  8350. // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the
  8351. // placeholder value MEMBER_OF=FFFF so that the entry is later
  8352. // updated with the correct value of MEMBER_OF.
  8353. CurInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
  8354. OMP_MAP_MEMBER_OF);
  8355. }
  8356. CurInfo.Exprs.push_back(L.VD);
  8357. CurInfo.BasePointers.emplace_back(BasePtr, L.VD);
  8358. CurInfo.Pointers.push_back(Ptr);
  8359. CurInfo.Sizes.push_back(
  8360. llvm::Constant::getNullValue(this->CGF.Int64Ty));
  8361. CurInfo.Mappers.push_back(nullptr);
  8362. }
  8363. }
  8364. // If there is an entry in PartialStruct it means we have a struct with
  8365. // individual members mapped. Emit an extra combined entry.
  8366. if (PartialStruct.Base.isValid()) {
  8367. CurInfo.NonContigInfo.Dims.push_back(0);
  8368. emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct, VD);
  8369. }
  8370. // We need to append the results of this capture to what we already
  8371. // have.
  8372. CombinedInfo.append(CurInfo);
  8373. }
  8374. // Append data for use_device_ptr clauses.
  8375. CombinedInfo.append(UseDevicePtrCombinedInfo);
  8376. }
  8377. public:
  8378. MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
  8379. : CurDir(&Dir), CGF(CGF) {
  8380. // Extract firstprivate clause information.
  8381. for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
  8382. for (const auto *D : C->varlists())
  8383. FirstPrivateDecls.try_emplace(
  8384. cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl()), C->isImplicit());
  8385. // Extract implicit firstprivates from uses_allocators clauses.
  8386. for (const auto *C : Dir.getClausesOfKind<OMPUsesAllocatorsClause>()) {
  8387. for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
  8388. OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
  8389. if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(D.AllocatorTraits))
  8390. FirstPrivateDecls.try_emplace(cast<VarDecl>(DRE->getDecl()),
  8391. /*Implicit=*/true);
  8392. else if (const auto *VD = dyn_cast<VarDecl>(
  8393. cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts())
  8394. ->getDecl()))
  8395. FirstPrivateDecls.try_emplace(VD, /*Implicit=*/true);
  8396. }
  8397. }
  8398. // Extract device pointer clause information.
  8399. for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
  8400. for (auto L : C->component_lists())
  8401. DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
  8402. // Extract map information.
  8403. for (const auto *C : Dir.getClausesOfKind<OMPMapClause>()) {
  8404. if (C->getMapType() != OMPC_MAP_to)
  8405. continue;
  8406. for (auto L : C->component_lists()) {
  8407. const ValueDecl *VD = std::get<0>(L);
  8408. const auto *RD = VD ? VD->getType()
  8409. .getCanonicalType()
  8410. .getNonReferenceType()
  8411. ->getAsCXXRecordDecl()
  8412. : nullptr;
  8413. if (RD && RD->isLambda())
  8414. LambdasMap.try_emplace(std::get<0>(L), C);
  8415. }
  8416. }
  8417. }
  8418. /// Constructor for the declare mapper directive.
  8419. MappableExprsHandler(const OMPDeclareMapperDecl &Dir, CodeGenFunction &CGF)
  8420. : CurDir(&Dir), CGF(CGF) {}
  8421. /// Generate code for the combined entry if we have a partially mapped struct
  8422. /// and take care of the mapping flags of the arguments corresponding to
  8423. /// individual struct members.
  8424. void emitCombinedEntry(MapCombinedInfoTy &CombinedInfo,
  8425. MapFlagsArrayTy &CurTypes,
  8426. const StructRangeInfoTy &PartialStruct,
  8427. const ValueDecl *VD = nullptr,
  8428. bool NotTargetParams = true) const {
  8429. if (CurTypes.size() == 1 &&
  8430. ((CurTypes.back() & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF) &&
  8431. !PartialStruct.IsArraySection)
  8432. return;
  8433. Address LBAddr = PartialStruct.LowestElem.second;
  8434. Address HBAddr = PartialStruct.HighestElem.second;
  8435. if (PartialStruct.HasCompleteRecord) {
  8436. LBAddr = PartialStruct.LB;
  8437. HBAddr = PartialStruct.LB;
  8438. }
  8439. CombinedInfo.Exprs.push_back(VD);
  8440. // Base is the base of the struct
  8441. CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer());
  8442. // Pointer is the address of the lowest element
  8443. llvm::Value *LB = LBAddr.getPointer();
  8444. CombinedInfo.Pointers.push_back(LB);
  8445. // There should not be a mapper for a combined entry.
  8446. CombinedInfo.Mappers.push_back(nullptr);
  8447. // Size is (addr of {highest+1} element) - (addr of lowest element)
  8448. llvm::Value *HB = HBAddr.getPointer();
  8449. llvm::Value *HAddr =
  8450. CGF.Builder.CreateConstGEP1_32(HBAddr.getElementType(), HB, /*Idx0=*/1);
  8451. llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
  8452. llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
  8453. llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CGF.Int8Ty, CHAddr, CLAddr);
  8454. llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
  8455. /*isSigned=*/false);
  8456. CombinedInfo.Sizes.push_back(Size);
  8457. // Map type is always TARGET_PARAM, if generate info for captures.
  8458. CombinedInfo.Types.push_back(NotTargetParams ? OMP_MAP_NONE
  8459. : OMP_MAP_TARGET_PARAM);
  8460. // If any element has the present modifier, then make sure the runtime
  8461. // doesn't attempt to allocate the struct.
  8462. if (CurTypes.end() !=
  8463. llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
  8464. return Type & OMP_MAP_PRESENT;
  8465. }))
  8466. CombinedInfo.Types.back() |= OMP_MAP_PRESENT;
  8467. // Remove TARGET_PARAM flag from the first element
  8468. (*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM;
  8469. // If any element has the ompx_hold modifier, then make sure the runtime
  8470. // uses the hold reference count for the struct as a whole so that it won't
  8471. // be unmapped by an extra dynamic reference count decrement. Add it to all
  8472. // elements as well so the runtime knows which reference count to check
  8473. // when determining whether it's time for device-to-host transfers of
  8474. // individual elements.
  8475. if (CurTypes.end() !=
  8476. llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
  8477. return Type & OMP_MAP_OMPX_HOLD;
  8478. })) {
  8479. CombinedInfo.Types.back() |= OMP_MAP_OMPX_HOLD;
  8480. for (auto &M : CurTypes)
  8481. M |= OMP_MAP_OMPX_HOLD;
  8482. }
  8483. // All other current entries will be MEMBER_OF the combined entry
  8484. // (except for PTR_AND_OBJ entries which do not have a placeholder value
  8485. // 0xFFFF in the MEMBER_OF field).
  8486. OpenMPOffloadMappingFlags MemberOfFlag =
  8487. getMemberOfFlag(CombinedInfo.BasePointers.size() - 1);
  8488. for (auto &M : CurTypes)
  8489. setCorrectMemberOfFlag(M, MemberOfFlag);
  8490. }
  8491. /// Generate all the base pointers, section pointers, sizes, map types, and
  8492. /// mappers for the extracted mappable expressions (all included in \a
  8493. /// CombinedInfo). Also, for each item that relates with a device pointer, a
  8494. /// pair of the relevant declaration and index where it occurs is appended to
  8495. /// the device pointers info array.
  8496. void generateAllInfo(
  8497. MapCombinedInfoTy &CombinedInfo,
  8498. const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
  8499. llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
  8500. assert(CurDir.is<const OMPExecutableDirective *>() &&
  8501. "Expect a executable directive");
  8502. const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
  8503. generateAllInfoForClauses(CurExecDir->clauses(), CombinedInfo, SkipVarSet);
  8504. }
  8505. /// Generate all the base pointers, section pointers, sizes, map types, and
  8506. /// mappers for the extracted map clauses of user-defined mapper (all included
  8507. /// in \a CombinedInfo).
  8508. void generateAllInfoForMapper(MapCombinedInfoTy &CombinedInfo) const {
  8509. assert(CurDir.is<const OMPDeclareMapperDecl *>() &&
  8510. "Expect a declare mapper directive");
  8511. const auto *CurMapperDir = CurDir.get<const OMPDeclareMapperDecl *>();
  8512. generateAllInfoForClauses(CurMapperDir->clauses(), CombinedInfo);
  8513. }
  8514. /// Emit capture info for lambdas for variables captured by reference.
  8515. void generateInfoForLambdaCaptures(
  8516. const ValueDecl *VD, llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
  8517. llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers) const {
  8518. const auto *RD = VD->getType()
  8519. .getCanonicalType()
  8520. .getNonReferenceType()
  8521. ->getAsCXXRecordDecl();
  8522. if (!RD || !RD->isLambda())
  8523. return;
  8524. Address VDAddr = Address(Arg, CGF.getContext().getDeclAlign(VD));
  8525. LValue VDLVal = CGF.MakeAddrLValue(
  8526. VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
  8527. llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
  8528. FieldDecl *ThisCapture = nullptr;
  8529. RD->getCaptureFields(Captures, ThisCapture);
  8530. if (ThisCapture) {
  8531. LValue ThisLVal =
  8532. CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
  8533. LValue ThisLValVal = CGF.EmitLValueForField(VDLVal, ThisCapture);
  8534. LambdaPointers.try_emplace(ThisLVal.getPointer(CGF),
  8535. VDLVal.getPointer(CGF));
  8536. CombinedInfo.Exprs.push_back(VD);
  8537. CombinedInfo.BasePointers.push_back(ThisLVal.getPointer(CGF));
  8538. CombinedInfo.Pointers.push_back(ThisLValVal.getPointer(CGF));
  8539. CombinedInfo.Sizes.push_back(
  8540. CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy),
  8541. CGF.Int64Ty, /*isSigned=*/true));
  8542. CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
  8543. OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
  8544. CombinedInfo.Mappers.push_back(nullptr);
  8545. }
  8546. for (const LambdaCapture &LC : RD->captures()) {
  8547. if (!LC.capturesVariable())
  8548. continue;
  8549. const VarDecl *VD = LC.getCapturedVar();
  8550. if (LC.getCaptureKind() != LCK_ByRef && !VD->getType()->isPointerType())
  8551. continue;
  8552. auto It = Captures.find(VD);
  8553. assert(It != Captures.end() && "Found lambda capture without field.");
  8554. LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
  8555. if (LC.getCaptureKind() == LCK_ByRef) {
  8556. LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second);
  8557. LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
  8558. VDLVal.getPointer(CGF));
  8559. CombinedInfo.Exprs.push_back(VD);
  8560. CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
  8561. CombinedInfo.Pointers.push_back(VarLValVal.getPointer(CGF));
  8562. CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
  8563. CGF.getTypeSize(
  8564. VD->getType().getCanonicalType().getNonReferenceType()),
  8565. CGF.Int64Ty, /*isSigned=*/true));
  8566. } else {
  8567. RValue VarRVal = CGF.EmitLoadOfLValue(VarLVal, RD->getLocation());
  8568. LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
  8569. VDLVal.getPointer(CGF));
  8570. CombinedInfo.Exprs.push_back(VD);
  8571. CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
  8572. CombinedInfo.Pointers.push_back(VarRVal.getScalarVal());
  8573. CombinedInfo.Sizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0));
  8574. }
  8575. CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
  8576. OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
  8577. CombinedInfo.Mappers.push_back(nullptr);
  8578. }
  8579. }
  8580. /// Set correct indices for lambdas captures.
  8581. void adjustMemberOfForLambdaCaptures(
  8582. const llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers,
  8583. MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
  8584. MapFlagsArrayTy &Types) const {
  8585. for (unsigned I = 0, E = Types.size(); I < E; ++I) {
  8586. // Set correct member_of idx for all implicit lambda captures.
  8587. if (Types[I] != (OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
  8588. OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT))
  8589. continue;
  8590. llvm::Value *BasePtr = LambdaPointers.lookup(*BasePointers[I]);
  8591. assert(BasePtr && "Unable to find base lambda address.");
  8592. int TgtIdx = -1;
  8593. for (unsigned J = I; J > 0; --J) {
  8594. unsigned Idx = J - 1;
  8595. if (Pointers[Idx] != BasePtr)
  8596. continue;
  8597. TgtIdx = Idx;
  8598. break;
  8599. }
  8600. assert(TgtIdx != -1 && "Unable to find parent lambda.");
  8601. // All other current entries will be MEMBER_OF the combined entry
  8602. // (except for PTR_AND_OBJ entries which do not have a placeholder value
  8603. // 0xFFFF in the MEMBER_OF field).
  8604. OpenMPOffloadMappingFlags MemberOfFlag = getMemberOfFlag(TgtIdx);
  8605. setCorrectMemberOfFlag(Types[I], MemberOfFlag);
  8606. }
  8607. }
  8608. /// Generate the base pointers, section pointers, sizes, map types, and
  8609. /// mappers associated to a given capture (all included in \a CombinedInfo).
  8610. void generateInfoForCapture(const CapturedStmt::Capture *Cap,
  8611. llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
  8612. StructRangeInfoTy &PartialStruct) const {
  8613. assert(!Cap->capturesVariableArrayType() &&
  8614. "Not expecting to generate map info for a variable array type!");
  8615. // We need to know when we generating information for the first component
  8616. const ValueDecl *VD = Cap->capturesThis()
  8617. ? nullptr
  8618. : Cap->getCapturedVar()->getCanonicalDecl();
  8619. // for map(to: lambda): skip here, processing it in
  8620. // generateDefaultMapInfo
  8621. if (LambdasMap.count(VD))
  8622. return;
  8623. // If this declaration appears in a is_device_ptr clause we just have to
  8624. // pass the pointer by value. If it is a reference to a declaration, we just
  8625. // pass its value.
  8626. if (DevPointersMap.count(VD)) {
  8627. CombinedInfo.Exprs.push_back(VD);
  8628. CombinedInfo.BasePointers.emplace_back(Arg, VD);
  8629. CombinedInfo.Pointers.push_back(Arg);
  8630. CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
  8631. CGF.getTypeSize(CGF.getContext().VoidPtrTy), CGF.Int64Ty,
  8632. /*isSigned=*/true));
  8633. CombinedInfo.Types.push_back(
  8634. (Cap->capturesVariable() ? OMP_MAP_TO : OMP_MAP_LITERAL) |
  8635. OMP_MAP_TARGET_PARAM);
  8636. CombinedInfo.Mappers.push_back(nullptr);
  8637. return;
  8638. }
  8639. using MapData =
  8640. std::tuple<OMPClauseMappableExprCommon::MappableExprComponentListRef,
  8641. OpenMPMapClauseKind, ArrayRef<OpenMPMapModifierKind>, bool,
  8642. const ValueDecl *, const Expr *>;
  8643. SmallVector<MapData, 4> DeclComponentLists;
  8644. assert(CurDir.is<const OMPExecutableDirective *>() &&
  8645. "Expect a executable directive");
  8646. const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
  8647. for (const auto *C : CurExecDir->getClausesOfKind<OMPMapClause>()) {
  8648. const auto *EI = C->getVarRefs().begin();
  8649. for (const auto L : C->decl_component_lists(VD)) {
  8650. const ValueDecl *VDecl, *Mapper;
  8651. // The Expression is not correct if the mapping is implicit
  8652. const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr;
  8653. OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
  8654. std::tie(VDecl, Components, Mapper) = L;
  8655. assert(VDecl == VD && "We got information for the wrong declaration??");
  8656. assert(!Components.empty() &&
  8657. "Not expecting declaration with no component lists.");
  8658. DeclComponentLists.emplace_back(Components, C->getMapType(),
  8659. C->getMapTypeModifiers(),
  8660. C->isImplicit(), Mapper, E);
  8661. ++EI;
  8662. }
  8663. }
  8664. llvm::stable_sort(DeclComponentLists, [](const MapData &LHS,
  8665. const MapData &RHS) {
  8666. ArrayRef<OpenMPMapModifierKind> MapModifiers = std::get<2>(LHS);
  8667. OpenMPMapClauseKind MapType = std::get<1>(RHS);
  8668. bool HasPresent =
  8669. llvm::is_contained(MapModifiers, clang::OMPC_MAP_MODIFIER_present);
  8670. bool HasAllocs = MapType == OMPC_MAP_alloc;
  8671. MapModifiers = std::get<2>(RHS);
  8672. MapType = std::get<1>(LHS);
  8673. bool HasPresentR =
  8674. llvm::is_contained(MapModifiers, clang::OMPC_MAP_MODIFIER_present);
  8675. bool HasAllocsR = MapType == OMPC_MAP_alloc;
  8676. return (HasPresent && !HasPresentR) || (HasAllocs && !HasAllocsR);
  8677. });
  8678. // Find overlapping elements (including the offset from the base element).
  8679. llvm::SmallDenseMap<
  8680. const MapData *,
  8681. llvm::SmallVector<
  8682. OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>,
  8683. 4>
  8684. OverlappedData;
  8685. size_t Count = 0;
  8686. for (const MapData &L : DeclComponentLists) {
  8687. OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
  8688. OpenMPMapClauseKind MapType;
  8689. ArrayRef<OpenMPMapModifierKind> MapModifiers;
  8690. bool IsImplicit;
  8691. const ValueDecl *Mapper;
  8692. const Expr *VarRef;
  8693. std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
  8694. L;
  8695. ++Count;
  8696. for (const MapData &L1 : makeArrayRef(DeclComponentLists).slice(Count)) {
  8697. OMPClauseMappableExprCommon::MappableExprComponentListRef Components1;
  8698. std::tie(Components1, MapType, MapModifiers, IsImplicit, Mapper,
  8699. VarRef) = L1;
  8700. auto CI = Components.rbegin();
  8701. auto CE = Components.rend();
  8702. auto SI = Components1.rbegin();
  8703. auto SE = Components1.rend();
  8704. for (; CI != CE && SI != SE; ++CI, ++SI) {
  8705. if (CI->getAssociatedExpression()->getStmtClass() !=
  8706. SI->getAssociatedExpression()->getStmtClass())
  8707. break;
  8708. // Are we dealing with different variables/fields?
  8709. if (CI->getAssociatedDeclaration() != SI->getAssociatedDeclaration())
  8710. break;
  8711. }
  8712. // Found overlapping if, at least for one component, reached the head
  8713. // of the components list.
  8714. if (CI == CE || SI == SE) {
  8715. // Ignore it if it is the same component.
  8716. if (CI == CE && SI == SE)
  8717. continue;
  8718. const auto It = (SI == SE) ? CI : SI;
  8719. // If one component is a pointer and another one is a kind of
  8720. // dereference of this pointer (array subscript, section, dereference,
  8721. // etc.), it is not an overlapping.
  8722. // Same, if one component is a base and another component is a
  8723. // dereferenced pointer memberexpr with the same base.
  8724. if (!isa<MemberExpr>(It->getAssociatedExpression()) ||
  8725. (std::prev(It)->getAssociatedDeclaration() &&
  8726. std::prev(It)
  8727. ->getAssociatedDeclaration()
  8728. ->getType()
  8729. ->isPointerType()) ||
  8730. (It->getAssociatedDeclaration() &&
  8731. It->getAssociatedDeclaration()->getType()->isPointerType() &&
  8732. std::next(It) != CE && std::next(It) != SE))
  8733. continue;
  8734. const MapData &BaseData = CI == CE ? L : L1;
  8735. OMPClauseMappableExprCommon::MappableExprComponentListRef SubData =
  8736. SI == SE ? Components : Components1;
  8737. auto &OverlappedElements = OverlappedData.FindAndConstruct(&BaseData);
  8738. OverlappedElements.getSecond().push_back(SubData);
  8739. }
  8740. }
  8741. }
  8742. // Sort the overlapped elements for each item.
  8743. llvm::SmallVector<const FieldDecl *, 4> Layout;
  8744. if (!OverlappedData.empty()) {
  8745. const Type *BaseType = VD->getType().getCanonicalType().getTypePtr();
  8746. const Type *OrigType = BaseType->getPointeeOrArrayElementType();
  8747. while (BaseType != OrigType) {
  8748. BaseType = OrigType->getCanonicalTypeInternal().getTypePtr();
  8749. OrigType = BaseType->getPointeeOrArrayElementType();
  8750. }
  8751. if (const auto *CRD = BaseType->getAsCXXRecordDecl())
  8752. getPlainLayout(CRD, Layout, /*AsBase=*/false);
  8753. else {
  8754. const auto *RD = BaseType->getAsRecordDecl();
  8755. Layout.append(RD->field_begin(), RD->field_end());
  8756. }
  8757. }
  8758. for (auto &Pair : OverlappedData) {
  8759. llvm::stable_sort(
  8760. Pair.getSecond(),
  8761. [&Layout](
  8762. OMPClauseMappableExprCommon::MappableExprComponentListRef First,
  8763. OMPClauseMappableExprCommon::MappableExprComponentListRef
  8764. Second) {
  8765. auto CI = First.rbegin();
  8766. auto CE = First.rend();
  8767. auto SI = Second.rbegin();
  8768. auto SE = Second.rend();
  8769. for (; CI != CE && SI != SE; ++CI, ++SI) {
  8770. if (CI->getAssociatedExpression()->getStmtClass() !=
  8771. SI->getAssociatedExpression()->getStmtClass())
  8772. break;
  8773. // Are we dealing with different variables/fields?
  8774. if (CI->getAssociatedDeclaration() !=
  8775. SI->getAssociatedDeclaration())
  8776. break;
  8777. }
  8778. // Lists contain the same elements.
  8779. if (CI == CE && SI == SE)
  8780. return false;
  8781. // List with less elements is less than list with more elements.
  8782. if (CI == CE || SI == SE)
  8783. return CI == CE;
  8784. const auto *FD1 = cast<FieldDecl>(CI->getAssociatedDeclaration());
  8785. const auto *FD2 = cast<FieldDecl>(SI->getAssociatedDeclaration());
  8786. if (FD1->getParent() == FD2->getParent())
  8787. return FD1->getFieldIndex() < FD2->getFieldIndex();
  8788. const auto *It =
  8789. llvm::find_if(Layout, [FD1, FD2](const FieldDecl *FD) {
  8790. return FD == FD1 || FD == FD2;
  8791. });
  8792. return *It == FD1;
  8793. });
  8794. }
  8795. // Associated with a capture, because the mapping flags depend on it.
  8796. // Go through all of the elements with the overlapped elements.
  8797. bool IsFirstComponentList = true;
  8798. for (const auto &Pair : OverlappedData) {
  8799. const MapData &L = *Pair.getFirst();
  8800. OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
  8801. OpenMPMapClauseKind MapType;
  8802. ArrayRef<OpenMPMapModifierKind> MapModifiers;
  8803. bool IsImplicit;
  8804. const ValueDecl *Mapper;
  8805. const Expr *VarRef;
  8806. std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
  8807. L;
  8808. ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
  8809. OverlappedComponents = Pair.getSecond();
  8810. generateInfoForComponentList(
  8811. MapType, MapModifiers, llvm::None, Components, CombinedInfo,
  8812. PartialStruct, IsFirstComponentList, IsImplicit, Mapper,
  8813. /*ForDeviceAddr=*/false, VD, VarRef, OverlappedComponents);
  8814. IsFirstComponentList = false;
  8815. }
  8816. // Go through other elements without overlapped elements.
  8817. for (const MapData &L : DeclComponentLists) {
  8818. OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
  8819. OpenMPMapClauseKind MapType;
  8820. ArrayRef<OpenMPMapModifierKind> MapModifiers;
  8821. bool IsImplicit;
  8822. const ValueDecl *Mapper;
  8823. const Expr *VarRef;
  8824. std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
  8825. L;
  8826. auto It = OverlappedData.find(&L);
  8827. if (It == OverlappedData.end())
  8828. generateInfoForComponentList(MapType, MapModifiers, llvm::None,
  8829. Components, CombinedInfo, PartialStruct,
  8830. IsFirstComponentList, IsImplicit, Mapper,
  8831. /*ForDeviceAddr=*/false, VD, VarRef);
  8832. IsFirstComponentList = false;
  8833. }
  8834. }
  8835. /// Generate the default map information for a given capture \a CI,
  8836. /// record field declaration \a RI and captured value \a CV.
  8837. void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
  8838. const FieldDecl &RI, llvm::Value *CV,
  8839. MapCombinedInfoTy &CombinedInfo) const {
  8840. bool IsImplicit = true;
  8841. // Do the default mapping.
  8842. if (CI.capturesThis()) {
  8843. CombinedInfo.Exprs.push_back(nullptr);
  8844. CombinedInfo.BasePointers.push_back(CV);
  8845. CombinedInfo.Pointers.push_back(CV);
  8846. const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
  8847. CombinedInfo.Sizes.push_back(
  8848. CGF.Builder.CreateIntCast(CGF.getTypeSize(PtrTy->getPointeeType()),
  8849. CGF.Int64Ty, /*isSigned=*/true));
  8850. // Default map type.
  8851. CombinedInfo.Types.push_back(OMP_MAP_TO | OMP_MAP_FROM);
  8852. } else if (CI.capturesVariableByCopy()) {
  8853. const VarDecl *VD = CI.getCapturedVar();
  8854. CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
  8855. CombinedInfo.BasePointers.push_back(CV);
  8856. CombinedInfo.Pointers.push_back(CV);
  8857. if (!RI.getType()->isAnyPointerType()) {
  8858. // We have to signal to the runtime captures passed by value that are
  8859. // not pointers.
  8860. CombinedInfo.Types.push_back(OMP_MAP_LITERAL);
  8861. CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
  8862. CGF.getTypeSize(RI.getType()), CGF.Int64Ty, /*isSigned=*/true));
  8863. } else {
  8864. // Pointers are implicitly mapped with a zero size and no flags
  8865. // (other than first map that is added for all implicit maps).
  8866. CombinedInfo.Types.push_back(OMP_MAP_NONE);
  8867. CombinedInfo.Sizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
  8868. }
  8869. auto I = FirstPrivateDecls.find(VD);
  8870. if (I != FirstPrivateDecls.end())
  8871. IsImplicit = I->getSecond();
  8872. } else {
  8873. assert(CI.capturesVariable() && "Expected captured reference.");
  8874. const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
  8875. QualType ElementType = PtrTy->getPointeeType();
  8876. CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
  8877. CGF.getTypeSize(ElementType), CGF.Int64Ty, /*isSigned=*/true));
  8878. // The default map type for a scalar/complex type is 'to' because by
  8879. // default the value doesn't have to be retrieved. For an aggregate
  8880. // type, the default is 'tofrom'.
  8881. CombinedInfo.Types.push_back(getMapModifiersForPrivateClauses(CI));
  8882. const VarDecl *VD = CI.getCapturedVar();
  8883. auto I = FirstPrivateDecls.find(VD);
  8884. CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
  8885. CombinedInfo.BasePointers.push_back(CV);
  8886. if (I != FirstPrivateDecls.end() && ElementType->isAnyPointerType()) {
  8887. Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
  8888. CV, ElementType, CGF.getContext().getDeclAlign(VD),
  8889. AlignmentSource::Decl));
  8890. CombinedInfo.Pointers.push_back(PtrAddr.getPointer());
  8891. } else {
  8892. CombinedInfo.Pointers.push_back(CV);
  8893. }
  8894. if (I != FirstPrivateDecls.end())
  8895. IsImplicit = I->getSecond();
  8896. }
  8897. // Every default map produces a single argument which is a target parameter.
  8898. CombinedInfo.Types.back() |= OMP_MAP_TARGET_PARAM;
  8899. // Add flag stating this is an implicit map.
  8900. if (IsImplicit)
  8901. CombinedInfo.Types.back() |= OMP_MAP_IMPLICIT;
  8902. // No user-defined mapper for default mapping.
  8903. CombinedInfo.Mappers.push_back(nullptr);
  8904. }
  8905. };
  8906. } // anonymous namespace
  8907. static void emitNonContiguousDescriptor(
  8908. CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
  8909. CGOpenMPRuntime::TargetDataInfo &Info) {
  8910. CodeGenModule &CGM = CGF.CGM;
  8911. MappableExprsHandler::MapCombinedInfoTy::StructNonContiguousInfo
  8912. &NonContigInfo = CombinedInfo.NonContigInfo;
  8913. // Build an array of struct descriptor_dim and then assign it to
  8914. // offload_args.
  8915. //
  8916. // struct descriptor_dim {
  8917. // uint64_t offset;
  8918. // uint64_t count;
  8919. // uint64_t stride
  8920. // };
  8921. ASTContext &C = CGF.getContext();
  8922. QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
  8923. RecordDecl *RD;
  8924. RD = C.buildImplicitRecord("descriptor_dim");
  8925. RD->startDefinition();
  8926. addFieldToRecordDecl(C, RD, Int64Ty);
  8927. addFieldToRecordDecl(C, RD, Int64Ty);
  8928. addFieldToRecordDecl(C, RD, Int64Ty);
  8929. RD->completeDefinition();
  8930. QualType DimTy = C.getRecordType(RD);
  8931. enum { OffsetFD = 0, CountFD, StrideFD };
  8932. // We need two index variable here since the size of "Dims" is the same as the
  8933. // size of Components, however, the size of offset, count, and stride is equal
  8934. // to the size of base declaration that is non-contiguous.
  8935. for (unsigned I = 0, L = 0, E = NonContigInfo.Dims.size(); I < E; ++I) {
  8936. // Skip emitting ir if dimension size is 1 since it cannot be
  8937. // non-contiguous.
  8938. if (NonContigInfo.Dims[I] == 1)
  8939. continue;
  8940. llvm::APInt Size(/*numBits=*/32, NonContigInfo.Dims[I]);
  8941. QualType ArrayTy =
  8942. C.getConstantArrayType(DimTy, Size, nullptr, ArrayType::Normal, 0);
  8943. Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
  8944. for (unsigned II = 0, EE = NonContigInfo.Dims[I]; II < EE; ++II) {
  8945. unsigned RevIdx = EE - II - 1;
  8946. LValue DimsLVal = CGF.MakeAddrLValue(
  8947. CGF.Builder.CreateConstArrayGEP(DimsAddr, II), DimTy);
  8948. // Offset
  8949. LValue OffsetLVal = CGF.EmitLValueForField(
  8950. DimsLVal, *std::next(RD->field_begin(), OffsetFD));
  8951. CGF.EmitStoreOfScalar(NonContigInfo.Offsets[L][RevIdx], OffsetLVal);
  8952. // Count
  8953. LValue CountLVal = CGF.EmitLValueForField(
  8954. DimsLVal, *std::next(RD->field_begin(), CountFD));
  8955. CGF.EmitStoreOfScalar(NonContigInfo.Counts[L][RevIdx], CountLVal);
  8956. // Stride
  8957. LValue StrideLVal = CGF.EmitLValueForField(
  8958. DimsLVal, *std::next(RD->field_begin(), StrideFD));
  8959. CGF.EmitStoreOfScalar(NonContigInfo.Strides[L][RevIdx], StrideLVal);
  8960. }
  8961. // args[I] = &dims
  8962. Address DAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  8963. DimsAddr, CGM.Int8PtrTy);
  8964. llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
  8965. llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
  8966. Info.PointersArray, 0, I);
  8967. Address PAddr(P, CGF.getPointerAlign());
  8968. CGF.Builder.CreateStore(DAddr.getPointer(), PAddr);
  8969. ++L;
  8970. }
  8971. }
  8972. // Try to extract the base declaration from a `this->x` expression if possible.
  8973. static ValueDecl *getDeclFromThisExpr(const Expr *E) {
  8974. if (!E)
  8975. return nullptr;
  8976. if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenCasts()))
  8977. if (const MemberExpr *ME =
  8978. dyn_cast<MemberExpr>(OASE->getBase()->IgnoreParenImpCasts()))
  8979. return ME->getMemberDecl();
  8980. return nullptr;
  8981. }
  8982. /// Emit a string constant containing the names of the values mapped to the
  8983. /// offloading runtime library.
  8984. llvm::Constant *
  8985. emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
  8986. MappableExprsHandler::MappingExprInfo &MapExprs) {
  8987. uint32_t SrcLocStrSize;
  8988. if (!MapExprs.getMapDecl() && !MapExprs.getMapExpr())
  8989. return OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
  8990. SourceLocation Loc;
  8991. if (!MapExprs.getMapDecl() && MapExprs.getMapExpr()) {
  8992. if (const ValueDecl *VD = getDeclFromThisExpr(MapExprs.getMapExpr()))
  8993. Loc = VD->getLocation();
  8994. else
  8995. Loc = MapExprs.getMapExpr()->getExprLoc();
  8996. } else {
  8997. Loc = MapExprs.getMapDecl()->getLocation();
  8998. }
  8999. std::string ExprName;
  9000. if (MapExprs.getMapExpr()) {
  9001. PrintingPolicy P(CGF.getContext().getLangOpts());
  9002. llvm::raw_string_ostream OS(ExprName);
  9003. MapExprs.getMapExpr()->printPretty(OS, nullptr, P);
  9004. OS.flush();
  9005. } else {
  9006. ExprName = MapExprs.getMapDecl()->getNameAsString();
  9007. }
  9008. PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
  9009. return OMPBuilder.getOrCreateSrcLocStr(PLoc.getFilename(), ExprName,
  9010. PLoc.getLine(), PLoc.getColumn(),
  9011. SrcLocStrSize);
  9012. }
  9013. /// Emit the arrays used to pass the captures and map information to the
  9014. /// offloading runtime library. If there is no map or capture information,
  9015. /// return nullptr by reference.
  9016. static void emitOffloadingArrays(
  9017. CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
  9018. CGOpenMPRuntime::TargetDataInfo &Info, llvm::OpenMPIRBuilder &OMPBuilder,
  9019. bool IsNonContiguous = false) {
  9020. CodeGenModule &CGM = CGF.CGM;
  9021. ASTContext &Ctx = CGF.getContext();
  9022. // Reset the array information.
  9023. Info.clearArrayInfo();
  9024. Info.NumberOfPtrs = CombinedInfo.BasePointers.size();
  9025. if (Info.NumberOfPtrs) {
  9026. // Detect if we have any capture size requiring runtime evaluation of the
  9027. // size so that a constant array could be eventually used.
  9028. bool hasRuntimeEvaluationCaptureSize = false;
  9029. for (llvm::Value *S : CombinedInfo.Sizes)
  9030. if (!isa<llvm::Constant>(S)) {
  9031. hasRuntimeEvaluationCaptureSize = true;
  9032. break;
  9033. }
  9034. llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
  9035. QualType PointerArrayType = Ctx.getConstantArrayType(
  9036. Ctx.VoidPtrTy, PointerNumAP, nullptr, ArrayType::Normal,
  9037. /*IndexTypeQuals=*/0);
  9038. Info.BasePointersArray =
  9039. CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
  9040. Info.PointersArray =
  9041. CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
  9042. Address MappersArray =
  9043. CGF.CreateMemTemp(PointerArrayType, ".offload_mappers");
  9044. Info.MappersArray = MappersArray.getPointer();
  9045. // If we don't have any VLA types or other types that require runtime
  9046. // evaluation, we can use a constant array for the map sizes, otherwise we
  9047. // need to fill up the arrays as we do for the pointers.
  9048. QualType Int64Ty =
  9049. Ctx.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
  9050. if (hasRuntimeEvaluationCaptureSize) {
  9051. QualType SizeArrayType = Ctx.getConstantArrayType(
  9052. Int64Ty, PointerNumAP, nullptr, ArrayType::Normal,
  9053. /*IndexTypeQuals=*/0);
  9054. Info.SizesArray =
  9055. CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
  9056. } else {
  9057. // We expect all the sizes to be constant, so we collect them to create
  9058. // a constant array.
  9059. SmallVector<llvm::Constant *, 16> ConstSizes;
  9060. for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) {
  9061. if (IsNonContiguous &&
  9062. (CombinedInfo.Types[I] & MappableExprsHandler::OMP_MAP_NON_CONTIG)) {
  9063. ConstSizes.push_back(llvm::ConstantInt::get(
  9064. CGF.Int64Ty, CombinedInfo.NonContigInfo.Dims[I]));
  9065. } else {
  9066. ConstSizes.push_back(cast<llvm::Constant>(CombinedInfo.Sizes[I]));
  9067. }
  9068. }
  9069. auto *SizesArrayInit = llvm::ConstantArray::get(
  9070. llvm::ArrayType::get(CGM.Int64Ty, ConstSizes.size()), ConstSizes);
  9071. std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
  9072. auto *SizesArrayGbl = new llvm::GlobalVariable(
  9073. CGM.getModule(), SizesArrayInit->getType(),
  9074. /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
  9075. SizesArrayInit, Name);
  9076. SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
  9077. Info.SizesArray = SizesArrayGbl;
  9078. }
  9079. // The map types are always constant so we don't need to generate code to
  9080. // fill arrays. Instead, we create an array constant.
  9081. SmallVector<uint64_t, 4> Mapping(CombinedInfo.Types.size(), 0);
  9082. llvm::copy(CombinedInfo.Types, Mapping.begin());
  9083. std::string MaptypesName =
  9084. CGM.getOpenMPRuntime().getName({"offload_maptypes"});
  9085. auto *MapTypesArrayGbl =
  9086. OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
  9087. Info.MapTypesArray = MapTypesArrayGbl;
  9088. // The information types are only built if there is debug information
  9089. // requested.
  9090. if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo) {
  9091. Info.MapNamesArray = llvm::Constant::getNullValue(
  9092. llvm::Type::getInt8Ty(CGF.Builder.getContext())->getPointerTo());
  9093. } else {
  9094. auto fillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
  9095. return emitMappingInformation(CGF, OMPBuilder, MapExpr);
  9096. };
  9097. SmallVector<llvm::Constant *, 4> InfoMap(CombinedInfo.Exprs.size());
  9098. llvm::transform(CombinedInfo.Exprs, InfoMap.begin(), fillInfoMap);
  9099. std::string MapnamesName =
  9100. CGM.getOpenMPRuntime().getName({"offload_mapnames"});
  9101. auto *MapNamesArrayGbl =
  9102. OMPBuilder.createOffloadMapnames(InfoMap, MapnamesName);
  9103. Info.MapNamesArray = MapNamesArrayGbl;
  9104. }
  9105. // If there's a present map type modifier, it must not be applied to the end
  9106. // of a region, so generate a separate map type array in that case.
  9107. if (Info.separateBeginEndCalls()) {
  9108. bool EndMapTypesDiffer = false;
  9109. for (uint64_t &Type : Mapping) {
  9110. if (Type & MappableExprsHandler::OMP_MAP_PRESENT) {
  9111. Type &= ~MappableExprsHandler::OMP_MAP_PRESENT;
  9112. EndMapTypesDiffer = true;
  9113. }
  9114. }
  9115. if (EndMapTypesDiffer) {
  9116. MapTypesArrayGbl =
  9117. OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
  9118. Info.MapTypesArrayEnd = MapTypesArrayGbl;
  9119. }
  9120. }
  9121. for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
  9122. llvm::Value *BPVal = *CombinedInfo.BasePointers[I];
  9123. llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
  9124. llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
  9125. Info.BasePointersArray, 0, I);
  9126. BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  9127. BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
  9128. Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
  9129. CGF.Builder.CreateStore(BPVal, BPAddr);
  9130. if (Info.requiresDevicePointerInfo())
  9131. if (const ValueDecl *DevVD =
  9132. CombinedInfo.BasePointers[I].getDevicePtrDecl())
  9133. Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
  9134. llvm::Value *PVal = CombinedInfo.Pointers[I];
  9135. llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
  9136. llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
  9137. Info.PointersArray, 0, I);
  9138. P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  9139. P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
  9140. Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
  9141. CGF.Builder.CreateStore(PVal, PAddr);
  9142. if (hasRuntimeEvaluationCaptureSize) {
  9143. llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
  9144. llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
  9145. Info.SizesArray,
  9146. /*Idx0=*/0,
  9147. /*Idx1=*/I);
  9148. Address SAddr(S, Ctx.getTypeAlignInChars(Int64Ty));
  9149. CGF.Builder.CreateStore(CGF.Builder.CreateIntCast(CombinedInfo.Sizes[I],
  9150. CGM.Int64Ty,
  9151. /*isSigned=*/true),
  9152. SAddr);
  9153. }
  9154. // Fill up the mapper array.
  9155. llvm::Value *MFunc = llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
  9156. if (CombinedInfo.Mappers[I]) {
  9157. MFunc = CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc(
  9158. cast<OMPDeclareMapperDecl>(CombinedInfo.Mappers[I]));
  9159. MFunc = CGF.Builder.CreatePointerCast(MFunc, CGM.VoidPtrTy);
  9160. Info.HasMapper = true;
  9161. }
  9162. Address MAddr = CGF.Builder.CreateConstArrayGEP(MappersArray, I);
  9163. CGF.Builder.CreateStore(MFunc, MAddr);
  9164. }
  9165. }
  9166. if (!IsNonContiguous || CombinedInfo.NonContigInfo.Offsets.empty() ||
  9167. Info.NumberOfPtrs == 0)
  9168. return;
  9169. emitNonContiguousDescriptor(CGF, CombinedInfo, Info);
  9170. }
  9171. namespace {
  9172. /// Additional arguments for emitOffloadingArraysArgument function.
  9173. struct ArgumentsOptions {
  9174. bool ForEndCall = false;
  9175. ArgumentsOptions() = default;
  9176. ArgumentsOptions(bool ForEndCall) : ForEndCall(ForEndCall) {}
  9177. };
  9178. } // namespace
  9179. /// Emit the arguments to be passed to the runtime library based on the
  9180. /// arrays of base pointers, pointers, sizes, map types, and mappers. If
  9181. /// ForEndCall, emit map types to be passed for the end of the region instead of
  9182. /// the beginning.
  9183. static void emitOffloadingArraysArgument(
  9184. CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
  9185. llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
  9186. llvm::Value *&MapTypesArrayArg, llvm::Value *&MapNamesArrayArg,
  9187. llvm::Value *&MappersArrayArg, CGOpenMPRuntime::TargetDataInfo &Info,
  9188. const ArgumentsOptions &Options = ArgumentsOptions()) {
  9189. assert((!Options.ForEndCall || Info.separateBeginEndCalls()) &&
  9190. "expected region end call to runtime only when end call is separate");
  9191. CodeGenModule &CGM = CGF.CGM;
  9192. if (Info.NumberOfPtrs) {
  9193. BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
  9194. llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
  9195. Info.BasePointersArray,
  9196. /*Idx0=*/0, /*Idx1=*/0);
  9197. PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
  9198. llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
  9199. Info.PointersArray,
  9200. /*Idx0=*/0,
  9201. /*Idx1=*/0);
  9202. SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
  9203. llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs), Info.SizesArray,
  9204. /*Idx0=*/0, /*Idx1=*/0);
  9205. MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
  9206. llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
  9207. Options.ForEndCall && Info.MapTypesArrayEnd ? Info.MapTypesArrayEnd
  9208. : Info.MapTypesArray,
  9209. /*Idx0=*/0,
  9210. /*Idx1=*/0);
  9211. // Only emit the mapper information arrays if debug information is
  9212. // requested.
  9213. if (CGF.CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo)
  9214. MapNamesArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
  9215. else
  9216. MapNamesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
  9217. llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
  9218. Info.MapNamesArray,
  9219. /*Idx0=*/0,
  9220. /*Idx1=*/0);
  9221. // If there is no user-defined mapper, set the mapper array to nullptr to
  9222. // avoid an unnecessary data privatization
  9223. if (!Info.HasMapper)
  9224. MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
  9225. else
  9226. MappersArrayArg =
  9227. CGF.Builder.CreatePointerCast(Info.MappersArray, CGM.VoidPtrPtrTy);
  9228. } else {
  9229. BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
  9230. PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
  9231. SizesArrayArg = llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
  9232. MapTypesArrayArg =
  9233. llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
  9234. MapNamesArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
  9235. MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
  9236. }
  9237. }
  9238. /// Check for inner distribute directive.
  9239. static const OMPExecutableDirective *
  9240. getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
  9241. const auto *CS = D.getInnermostCapturedStmt();
  9242. const auto *Body =
  9243. CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
  9244. const Stmt *ChildStmt =
  9245. CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body);
  9246. if (const auto *NestedDir =
  9247. dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
  9248. OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
  9249. switch (D.getDirectiveKind()) {
  9250. case OMPD_target:
  9251. if (isOpenMPDistributeDirective(DKind))
  9252. return NestedDir;
  9253. if (DKind == OMPD_teams) {
  9254. Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
  9255. /*IgnoreCaptured=*/true);
  9256. if (!Body)
  9257. return nullptr;
  9258. ChildStmt = CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body);
  9259. if (const auto *NND =
  9260. dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
  9261. DKind = NND->getDirectiveKind();
  9262. if (isOpenMPDistributeDirective(DKind))
  9263. return NND;
  9264. }
  9265. }
  9266. return nullptr;
  9267. case OMPD_target_teams:
  9268. if (isOpenMPDistributeDirective(DKind))
  9269. return NestedDir;
  9270. return nullptr;
  9271. case OMPD_target_parallel:
  9272. case OMPD_target_simd:
  9273. case OMPD_target_parallel_for:
  9274. case OMPD_target_parallel_for_simd:
  9275. return nullptr;
  9276. case OMPD_target_teams_distribute:
  9277. case OMPD_target_teams_distribute_simd:
  9278. case OMPD_target_teams_distribute_parallel_for:
  9279. case OMPD_target_teams_distribute_parallel_for_simd:
  9280. case OMPD_parallel:
  9281. case OMPD_for:
  9282. case OMPD_parallel_for:
  9283. case OMPD_parallel_master:
  9284. case OMPD_parallel_sections:
  9285. case OMPD_for_simd:
  9286. case OMPD_parallel_for_simd:
  9287. case OMPD_cancel:
  9288. case OMPD_cancellation_point:
  9289. case OMPD_ordered:
  9290. case OMPD_threadprivate:
  9291. case OMPD_allocate:
  9292. case OMPD_task:
  9293. case OMPD_simd:
  9294. case OMPD_tile:
  9295. case OMPD_unroll:
  9296. case OMPD_sections:
  9297. case OMPD_section:
  9298. case OMPD_single:
  9299. case OMPD_master:
  9300. case OMPD_critical:
  9301. case OMPD_taskyield:
  9302. case OMPD_barrier:
  9303. case OMPD_taskwait:
  9304. case OMPD_taskgroup:
  9305. case OMPD_atomic:
  9306. case OMPD_flush:
  9307. case OMPD_depobj:
  9308. case OMPD_scan:
  9309. case OMPD_teams:
  9310. case OMPD_target_data:
  9311. case OMPD_target_exit_data:
  9312. case OMPD_target_enter_data:
  9313. case OMPD_distribute:
  9314. case OMPD_distribute_simd:
  9315. case OMPD_distribute_parallel_for:
  9316. case OMPD_distribute_parallel_for_simd:
  9317. case OMPD_teams_distribute:
  9318. case OMPD_teams_distribute_simd:
  9319. case OMPD_teams_distribute_parallel_for:
  9320. case OMPD_teams_distribute_parallel_for_simd:
  9321. case OMPD_target_update:
  9322. case OMPD_declare_simd:
  9323. case OMPD_declare_variant:
  9324. case OMPD_begin_declare_variant:
  9325. case OMPD_end_declare_variant:
  9326. case OMPD_declare_target:
  9327. case OMPD_end_declare_target:
  9328. case OMPD_declare_reduction:
  9329. case OMPD_declare_mapper:
  9330. case OMPD_taskloop:
  9331. case OMPD_taskloop_simd:
  9332. case OMPD_master_taskloop:
  9333. case OMPD_master_taskloop_simd:
  9334. case OMPD_parallel_master_taskloop:
  9335. case OMPD_parallel_master_taskloop_simd:
  9336. case OMPD_requires:
  9337. case OMPD_metadirective:
  9338. case OMPD_unknown:
  9339. default:
  9340. llvm_unreachable("Unexpected directive.");
  9341. }
  9342. }
  9343. return nullptr;
  9344. }
  9345. /// Emit the user-defined mapper function. The code generation follows the
  9346. /// pattern in the example below.
  9347. /// \code
  9348. /// void .omp_mapper.<type_name>.<mapper_id>.(void *rt_mapper_handle,
  9349. /// void *base, void *begin,
  9350. /// int64_t size, int64_t type,
  9351. /// void *name = nullptr) {
  9352. /// // Allocate space for an array section first or add a base/begin for
  9353. /// // pointer dereference.
  9354. /// if ((size > 1 || (base != begin && maptype.IsPtrAndObj)) &&
  9355. /// !maptype.IsDelete)
  9356. /// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
  9357. /// size*sizeof(Ty), clearToFromMember(type));
  9358. /// // Map members.
  9359. /// for (unsigned i = 0; i < size; i++) {
  9360. /// // For each component specified by this mapper:
  9361. /// for (auto c : begin[i]->all_components) {
  9362. /// if (c.hasMapper())
  9363. /// (*c.Mapper())(rt_mapper_handle, c.arg_base, c.arg_begin, c.arg_size,
  9364. /// c.arg_type, c.arg_name);
  9365. /// else
  9366. /// __tgt_push_mapper_component(rt_mapper_handle, c.arg_base,
  9367. /// c.arg_begin, c.arg_size, c.arg_type,
  9368. /// c.arg_name);
  9369. /// }
  9370. /// }
  9371. /// // Delete the array section.
  9372. /// if (size > 1 && maptype.IsDelete)
  9373. /// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
  9374. /// size*sizeof(Ty), clearToFromMember(type));
  9375. /// }
  9376. /// \endcode
  9377. void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
  9378. CodeGenFunction *CGF) {
  9379. if (UDMMap.count(D) > 0)
  9380. return;
  9381. ASTContext &C = CGM.getContext();
  9382. QualType Ty = D->getType();
  9383. QualType PtrTy = C.getPointerType(Ty).withRestrict();
  9384. QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
  9385. auto *MapperVarDecl =
  9386. cast<VarDecl>(cast<DeclRefExpr>(D->getMapperVarRef())->getDecl());
  9387. SourceLocation Loc = D->getLocation();
  9388. CharUnits ElementSize = C.getTypeSizeInChars(Ty);
  9389. // Prepare mapper function arguments and attributes.
  9390. ImplicitParamDecl HandleArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  9391. C.VoidPtrTy, ImplicitParamDecl::Other);
  9392. ImplicitParamDecl BaseArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  9393. ImplicitParamDecl::Other);
  9394. ImplicitParamDecl BeginArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  9395. C.VoidPtrTy, ImplicitParamDecl::Other);
  9396. ImplicitParamDecl SizeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty,
  9397. ImplicitParamDecl::Other);
  9398. ImplicitParamDecl TypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty,
  9399. ImplicitParamDecl::Other);
  9400. ImplicitParamDecl NameArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  9401. ImplicitParamDecl::Other);
  9402. FunctionArgList Args;
  9403. Args.push_back(&HandleArg);
  9404. Args.push_back(&BaseArg);
  9405. Args.push_back(&BeginArg);
  9406. Args.push_back(&SizeArg);
  9407. Args.push_back(&TypeArg);
  9408. Args.push_back(&NameArg);
  9409. const CGFunctionInfo &FnInfo =
  9410. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  9411. llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
  9412. SmallString<64> TyStr;
  9413. llvm::raw_svector_ostream Out(TyStr);
  9414. CGM.getCXXABI().getMangleContext().mangleTypeName(Ty, Out);
  9415. std::string Name = getName({"omp_mapper", TyStr, D->getName()});
  9416. auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
  9417. Name, &CGM.getModule());
  9418. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
  9419. Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
  9420. // Start the mapper function code generation.
  9421. CodeGenFunction MapperCGF(CGM);
  9422. MapperCGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
  9423. // Compute the starting and end addresses of array elements.
  9424. llvm::Value *Size = MapperCGF.EmitLoadOfScalar(
  9425. MapperCGF.GetAddrOfLocalVar(&SizeArg), /*Volatile=*/false,
  9426. C.getPointerType(Int64Ty), Loc);
  9427. // Prepare common arguments for array initiation and deletion.
  9428. llvm::Value *Handle = MapperCGF.EmitLoadOfScalar(
  9429. MapperCGF.GetAddrOfLocalVar(&HandleArg),
  9430. /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
  9431. llvm::Value *BaseIn = MapperCGF.EmitLoadOfScalar(
  9432. MapperCGF.GetAddrOfLocalVar(&BaseArg),
  9433. /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
  9434. llvm::Value *BeginIn = MapperCGF.EmitLoadOfScalar(
  9435. MapperCGF.GetAddrOfLocalVar(&BeginArg),
  9436. /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
  9437. // Convert the size in bytes into the number of array elements.
  9438. Size = MapperCGF.Builder.CreateExactUDiv(
  9439. Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
  9440. llvm::Value *PtrBegin = MapperCGF.Builder.CreateBitCast(
  9441. BeginIn, CGM.getTypes().ConvertTypeForMem(PtrTy));
  9442. llvm::Value *PtrEnd = MapperCGF.Builder.CreateGEP(
  9443. PtrBegin->getType()->getPointerElementType(), PtrBegin, Size);
  9444. llvm::Value *MapType = MapperCGF.EmitLoadOfScalar(
  9445. MapperCGF.GetAddrOfLocalVar(&TypeArg), /*Volatile=*/false,
  9446. C.getPointerType(Int64Ty), Loc);
  9447. llvm::Value *MapName = MapperCGF.EmitLoadOfScalar(
  9448. MapperCGF.GetAddrOfLocalVar(&NameArg),
  9449. /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
  9450. // Emit array initiation if this is an array section and \p MapType indicates
  9451. // that memory allocation is required.
  9452. llvm::BasicBlock *HeadBB = MapperCGF.createBasicBlock("omp.arraymap.head");
  9453. emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType,
  9454. MapName, ElementSize, HeadBB, /*IsInit=*/true);
  9455. // Emit a for loop to iterate through SizeArg of elements and map all of them.
  9456. // Emit the loop header block.
  9457. MapperCGF.EmitBlock(HeadBB);
  9458. llvm::BasicBlock *BodyBB = MapperCGF.createBasicBlock("omp.arraymap.body");
  9459. llvm::BasicBlock *DoneBB = MapperCGF.createBasicBlock("omp.done");
  9460. // Evaluate whether the initial condition is satisfied.
  9461. llvm::Value *IsEmpty =
  9462. MapperCGF.Builder.CreateICmpEQ(PtrBegin, PtrEnd, "omp.arraymap.isempty");
  9463. MapperCGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
  9464. llvm::BasicBlock *EntryBB = MapperCGF.Builder.GetInsertBlock();
  9465. // Emit the loop body block.
  9466. MapperCGF.EmitBlock(BodyBB);
  9467. llvm::BasicBlock *LastBB = BodyBB;
  9468. llvm::PHINode *PtrPHI = MapperCGF.Builder.CreatePHI(
  9469. PtrBegin->getType(), 2, "omp.arraymap.ptrcurrent");
  9470. PtrPHI->addIncoming(PtrBegin, EntryBB);
  9471. Address PtrCurrent =
  9472. Address(PtrPHI, MapperCGF.GetAddrOfLocalVar(&BeginArg)
  9473. .getAlignment()
  9474. .alignmentOfArrayElement(ElementSize));
  9475. // Privatize the declared variable of mapper to be the current array element.
  9476. CodeGenFunction::OMPPrivateScope Scope(MapperCGF);
  9477. Scope.addPrivate(MapperVarDecl, [PtrCurrent]() { return PtrCurrent; });
  9478. (void)Scope.Privatize();
  9479. // Get map clause information. Fill up the arrays with all mapped variables.
  9480. MappableExprsHandler::MapCombinedInfoTy Info;
  9481. MappableExprsHandler MEHandler(*D, MapperCGF);
  9482. MEHandler.generateAllInfoForMapper(Info);
  9483. // Call the runtime API __tgt_mapper_num_components to get the number of
  9484. // pre-existing components.
  9485. llvm::Value *OffloadingArgs[] = {Handle};
  9486. llvm::Value *PreviousSize = MapperCGF.EmitRuntimeCall(
  9487. OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
  9488. OMPRTL___tgt_mapper_num_components),
  9489. OffloadingArgs);
  9490. llvm::Value *ShiftedPreviousSize = MapperCGF.Builder.CreateShl(
  9491. PreviousSize,
  9492. MapperCGF.Builder.getInt64(MappableExprsHandler::getFlagMemberOffset()));
  9493. // Fill up the runtime mapper handle for all components.
  9494. for (unsigned I = 0; I < Info.BasePointers.size(); ++I) {
  9495. llvm::Value *CurBaseArg = MapperCGF.Builder.CreateBitCast(
  9496. *Info.BasePointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
  9497. llvm::Value *CurBeginArg = MapperCGF.Builder.CreateBitCast(
  9498. Info.Pointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
  9499. llvm::Value *CurSizeArg = Info.Sizes[I];
  9500. llvm::Value *CurNameArg =
  9501. (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo)
  9502. ? llvm::ConstantPointerNull::get(CGM.VoidPtrTy)
  9503. : emitMappingInformation(MapperCGF, OMPBuilder, Info.Exprs[I]);
  9504. // Extract the MEMBER_OF field from the map type.
  9505. llvm::Value *OriMapType = MapperCGF.Builder.getInt64(Info.Types[I]);
  9506. llvm::Value *MemberMapType =
  9507. MapperCGF.Builder.CreateNUWAdd(OriMapType, ShiftedPreviousSize);
  9508. // Combine the map type inherited from user-defined mapper with that
  9509. // specified in the program. According to the OMP_MAP_TO and OMP_MAP_FROM
  9510. // bits of the \a MapType, which is the input argument of the mapper
  9511. // function, the following code will set the OMP_MAP_TO and OMP_MAP_FROM
  9512. // bits of MemberMapType.
  9513. // [OpenMP 5.0], 1.2.6. map-type decay.
  9514. // | alloc | to | from | tofrom | release | delete
  9515. // ----------------------------------------------------------
  9516. // alloc | alloc | alloc | alloc | alloc | release | delete
  9517. // to | alloc | to | alloc | to | release | delete
  9518. // from | alloc | alloc | from | from | release | delete
  9519. // tofrom | alloc | to | from | tofrom | release | delete
  9520. llvm::Value *LeftToFrom = MapperCGF.Builder.CreateAnd(
  9521. MapType,
  9522. MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_TO |
  9523. MappableExprsHandler::OMP_MAP_FROM));
  9524. llvm::BasicBlock *AllocBB = MapperCGF.createBasicBlock("omp.type.alloc");
  9525. llvm::BasicBlock *AllocElseBB =
  9526. MapperCGF.createBasicBlock("omp.type.alloc.else");
  9527. llvm::BasicBlock *ToBB = MapperCGF.createBasicBlock("omp.type.to");
  9528. llvm::BasicBlock *ToElseBB = MapperCGF.createBasicBlock("omp.type.to.else");
  9529. llvm::BasicBlock *FromBB = MapperCGF.createBasicBlock("omp.type.from");
  9530. llvm::BasicBlock *EndBB = MapperCGF.createBasicBlock("omp.type.end");
  9531. llvm::Value *IsAlloc = MapperCGF.Builder.CreateIsNull(LeftToFrom);
  9532. MapperCGF.Builder.CreateCondBr(IsAlloc, AllocBB, AllocElseBB);
  9533. // In case of alloc, clear OMP_MAP_TO and OMP_MAP_FROM.
  9534. MapperCGF.EmitBlock(AllocBB);
  9535. llvm::Value *AllocMapType = MapperCGF.Builder.CreateAnd(
  9536. MemberMapType,
  9537. MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO |
  9538. MappableExprsHandler::OMP_MAP_FROM)));
  9539. MapperCGF.Builder.CreateBr(EndBB);
  9540. MapperCGF.EmitBlock(AllocElseBB);
  9541. llvm::Value *IsTo = MapperCGF.Builder.CreateICmpEQ(
  9542. LeftToFrom,
  9543. MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_TO));
  9544. MapperCGF.Builder.CreateCondBr(IsTo, ToBB, ToElseBB);
  9545. // In case of to, clear OMP_MAP_FROM.
  9546. MapperCGF.EmitBlock(ToBB);
  9547. llvm::Value *ToMapType = MapperCGF.Builder.CreateAnd(
  9548. MemberMapType,
  9549. MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_FROM));
  9550. MapperCGF.Builder.CreateBr(EndBB);
  9551. MapperCGF.EmitBlock(ToElseBB);
  9552. llvm::Value *IsFrom = MapperCGF.Builder.CreateICmpEQ(
  9553. LeftToFrom,
  9554. MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_FROM));
  9555. MapperCGF.Builder.CreateCondBr(IsFrom, FromBB, EndBB);
  9556. // In case of from, clear OMP_MAP_TO.
  9557. MapperCGF.EmitBlock(FromBB);
  9558. llvm::Value *FromMapType = MapperCGF.Builder.CreateAnd(
  9559. MemberMapType,
  9560. MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_TO));
  9561. // In case of tofrom, do nothing.
  9562. MapperCGF.EmitBlock(EndBB);
  9563. LastBB = EndBB;
  9564. llvm::PHINode *CurMapType =
  9565. MapperCGF.Builder.CreatePHI(CGM.Int64Ty, 4, "omp.maptype");
  9566. CurMapType->addIncoming(AllocMapType, AllocBB);
  9567. CurMapType->addIncoming(ToMapType, ToBB);
  9568. CurMapType->addIncoming(FromMapType, FromBB);
  9569. CurMapType->addIncoming(MemberMapType, ToElseBB);
  9570. llvm::Value *OffloadingArgs[] = {Handle, CurBaseArg, CurBeginArg,
  9571. CurSizeArg, CurMapType, CurNameArg};
  9572. if (Info.Mappers[I]) {
  9573. // Call the corresponding mapper function.
  9574. llvm::Function *MapperFunc = getOrCreateUserDefinedMapperFunc(
  9575. cast<OMPDeclareMapperDecl>(Info.Mappers[I]));
  9576. assert(MapperFunc && "Expect a valid mapper function is available.");
  9577. MapperCGF.EmitNounwindRuntimeCall(MapperFunc, OffloadingArgs);
  9578. } else {
  9579. // Call the runtime API __tgt_push_mapper_component to fill up the runtime
  9580. // data structure.
  9581. MapperCGF.EmitRuntimeCall(
  9582. OMPBuilder.getOrCreateRuntimeFunction(
  9583. CGM.getModule(), OMPRTL___tgt_push_mapper_component),
  9584. OffloadingArgs);
  9585. }
  9586. }
  9587. // Update the pointer to point to the next element that needs to be mapped,
  9588. // and check whether we have mapped all elements.
  9589. llvm::Type *ElemTy = PtrPHI->getType()->getPointerElementType();
  9590. llvm::Value *PtrNext = MapperCGF.Builder.CreateConstGEP1_32(
  9591. ElemTy, PtrPHI, /*Idx0=*/1, "omp.arraymap.next");
  9592. PtrPHI->addIncoming(PtrNext, LastBB);
  9593. llvm::Value *IsDone =
  9594. MapperCGF.Builder.CreateICmpEQ(PtrNext, PtrEnd, "omp.arraymap.isdone");
  9595. llvm::BasicBlock *ExitBB = MapperCGF.createBasicBlock("omp.arraymap.exit");
  9596. MapperCGF.Builder.CreateCondBr(IsDone, ExitBB, BodyBB);
  9597. MapperCGF.EmitBlock(ExitBB);
  9598. // Emit array deletion if this is an array section and \p MapType indicates
  9599. // that deletion is required.
  9600. emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType,
  9601. MapName, ElementSize, DoneBB, /*IsInit=*/false);
  9602. // Emit the function exit block.
  9603. MapperCGF.EmitBlock(DoneBB, /*IsFinished=*/true);
  9604. MapperCGF.FinishFunction();
  9605. UDMMap.try_emplace(D, Fn);
  9606. if (CGF) {
  9607. auto &Decls = FunctionUDMMap.FindAndConstruct(CGF->CurFn);
  9608. Decls.second.push_back(D);
  9609. }
  9610. }
  9611. /// Emit the array initialization or deletion portion for user-defined mapper
  9612. /// code generation. First, it evaluates whether an array section is mapped and
  9613. /// whether the \a MapType instructs to delete this section. If \a IsInit is
  9614. /// true, and \a MapType indicates to not delete this array, array
  9615. /// initialization code is generated. If \a IsInit is false, and \a MapType
  9616. /// indicates to not this array, array deletion code is generated.
  9617. void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
  9618. CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *Base,
  9619. llvm::Value *Begin, llvm::Value *Size, llvm::Value *MapType,
  9620. llvm::Value *MapName, CharUnits ElementSize, llvm::BasicBlock *ExitBB,
  9621. bool IsInit) {
  9622. StringRef Prefix = IsInit ? ".init" : ".del";
  9623. // Evaluate if this is an array section.
  9624. llvm::BasicBlock *BodyBB =
  9625. MapperCGF.createBasicBlock(getName({"omp.array", Prefix}));
  9626. llvm::Value *IsArray = MapperCGF.Builder.CreateICmpSGT(
  9627. Size, MapperCGF.Builder.getInt64(1), "omp.arrayinit.isarray");
  9628. llvm::Value *DeleteBit = MapperCGF.Builder.CreateAnd(
  9629. MapType,
  9630. MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_DELETE));
  9631. llvm::Value *DeleteCond;
  9632. llvm::Value *Cond;
  9633. if (IsInit) {
  9634. // base != begin?
  9635. llvm::Value *BaseIsBegin = MapperCGF.Builder.CreateICmpNE(Base, Begin);
  9636. // IsPtrAndObj?
  9637. llvm::Value *PtrAndObjBit = MapperCGF.Builder.CreateAnd(
  9638. MapType,
  9639. MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_PTR_AND_OBJ));
  9640. PtrAndObjBit = MapperCGF.Builder.CreateIsNotNull(PtrAndObjBit);
  9641. BaseIsBegin = MapperCGF.Builder.CreateAnd(BaseIsBegin, PtrAndObjBit);
  9642. Cond = MapperCGF.Builder.CreateOr(IsArray, BaseIsBegin);
  9643. DeleteCond = MapperCGF.Builder.CreateIsNull(
  9644. DeleteBit, getName({"omp.array", Prefix, ".delete"}));
  9645. } else {
  9646. Cond = IsArray;
  9647. DeleteCond = MapperCGF.Builder.CreateIsNotNull(
  9648. DeleteBit, getName({"omp.array", Prefix, ".delete"}));
  9649. }
  9650. Cond = MapperCGF.Builder.CreateAnd(Cond, DeleteCond);
  9651. MapperCGF.Builder.CreateCondBr(Cond, BodyBB, ExitBB);
  9652. MapperCGF.EmitBlock(BodyBB);
  9653. // Get the array size by multiplying element size and element number (i.e., \p
  9654. // Size).
  9655. llvm::Value *ArraySize = MapperCGF.Builder.CreateNUWMul(
  9656. Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
  9657. // Remove OMP_MAP_TO and OMP_MAP_FROM from the map type, so that it achieves
  9658. // memory allocation/deletion purpose only.
  9659. llvm::Value *MapTypeArg = MapperCGF.Builder.CreateAnd(
  9660. MapType,
  9661. MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO |
  9662. MappableExprsHandler::OMP_MAP_FROM)));
  9663. MapTypeArg = MapperCGF.Builder.CreateOr(
  9664. MapTypeArg,
  9665. MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_IMPLICIT));
  9666. // Call the runtime API __tgt_push_mapper_component to fill up the runtime
  9667. // data structure.
  9668. llvm::Value *OffloadingArgs[] = {Handle, Base, Begin,
  9669. ArraySize, MapTypeArg, MapName};
  9670. MapperCGF.EmitRuntimeCall(
  9671. OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
  9672. OMPRTL___tgt_push_mapper_component),
  9673. OffloadingArgs);
  9674. }
  9675. llvm::Function *CGOpenMPRuntime::getOrCreateUserDefinedMapperFunc(
  9676. const OMPDeclareMapperDecl *D) {
  9677. auto I = UDMMap.find(D);
  9678. if (I != UDMMap.end())
  9679. return I->second;
  9680. emitUserDefinedMapper(D);
  9681. return UDMMap.lookup(D);
  9682. }
  9683. void CGOpenMPRuntime::emitTargetNumIterationsCall(
  9684. CodeGenFunction &CGF, const OMPExecutableDirective &D,
  9685. llvm::Value *DeviceID,
  9686. llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
  9687. const OMPLoopDirective &D)>
  9688. SizeEmitter) {
  9689. OpenMPDirectiveKind Kind = D.getDirectiveKind();
  9690. const OMPExecutableDirective *TD = &D;
  9691. // Get nested teams distribute kind directive, if any.
  9692. if (!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind))
  9693. TD = getNestedDistributeDirective(CGM.getContext(), D);
  9694. if (!TD)
  9695. return;
  9696. const auto *LD = cast<OMPLoopDirective>(TD);
  9697. auto &&CodeGen = [LD, DeviceID, SizeEmitter, &D, this](CodeGenFunction &CGF,
  9698. PrePostActionTy &) {
  9699. if (llvm::Value *NumIterations = SizeEmitter(CGF, *LD)) {
  9700. llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
  9701. llvm::Value *Args[] = {RTLoc, DeviceID, NumIterations};
  9702. CGF.EmitRuntimeCall(
  9703. OMPBuilder.getOrCreateRuntimeFunction(
  9704. CGM.getModule(), OMPRTL___kmpc_push_target_tripcount_mapper),
  9705. Args);
  9706. }
  9707. };
  9708. emitInlinedDirective(CGF, OMPD_unknown, CodeGen);
  9709. }
  9710. void CGOpenMPRuntime::emitTargetCall(
  9711. CodeGenFunction &CGF, const OMPExecutableDirective &D,
  9712. llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
  9713. llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
  9714. llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
  9715. const OMPLoopDirective &D)>
  9716. SizeEmitter) {
  9717. if (!CGF.HaveInsertPoint())
  9718. return;
  9719. assert(OutlinedFn && "Invalid outlined function!");
  9720. const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
  9721. D.hasClausesOfKind<OMPNowaitClause>();
  9722. llvm::SmallVector<llvm::Value *, 16> CapturedVars;
  9723. const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
  9724. auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
  9725. PrePostActionTy &) {
  9726. CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
  9727. };
  9728. emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen);
  9729. CodeGenFunction::OMPTargetDataInfo InputInfo;
  9730. llvm::Value *MapTypesArray = nullptr;
  9731. llvm::Value *MapNamesArray = nullptr;
  9732. // Fill up the pointer arrays and transfer execution to the device.
  9733. auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
  9734. &MapTypesArray, &MapNamesArray, &CS, RequiresOuterTask,
  9735. &CapturedVars,
  9736. SizeEmitter](CodeGenFunction &CGF, PrePostActionTy &) {
  9737. if (Device.getInt() == OMPC_DEVICE_ancestor) {
  9738. // Reverse offloading is not supported, so just execute on the host.
  9739. if (RequiresOuterTask) {
  9740. CapturedVars.clear();
  9741. CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
  9742. }
  9743. emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
  9744. return;
  9745. }
  9746. // On top of the arrays that were filled up, the target offloading call
  9747. // takes as arguments the device id as well as the host pointer. The host
  9748. // pointer is used by the runtime library to identify the current target
  9749. // region, so it only has to be unique and not necessarily point to
  9750. // anything. It could be the pointer to the outlined function that
  9751. // implements the target region, but we aren't using that so that the
  9752. // compiler doesn't need to keep that, and could therefore inline the host
  9753. // function if proven worthwhile during optimization.
  9754. // From this point on, we need to have an ID of the target region defined.
  9755. assert(OutlinedFnID && "Invalid outlined function ID!");
  9756. // Emit device ID if any.
  9757. llvm::Value *DeviceID;
  9758. if (Device.getPointer()) {
  9759. assert((Device.getInt() == OMPC_DEVICE_unknown ||
  9760. Device.getInt() == OMPC_DEVICE_device_num) &&
  9761. "Expected device_num modifier.");
  9762. llvm::Value *DevVal = CGF.EmitScalarExpr(Device.getPointer());
  9763. DeviceID =
  9764. CGF.Builder.CreateIntCast(DevVal, CGF.Int64Ty, /*isSigned=*/true);
  9765. } else {
  9766. DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
  9767. }
  9768. // Emit the number of elements in the offloading arrays.
  9769. llvm::Value *PointerNum =
  9770. CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
  9771. // Return value of the runtime offloading call.
  9772. llvm::Value *Return;
  9773. llvm::Value *NumTeams = emitNumTeamsForTargetDirective(CGF, D);
  9774. llvm::Value *NumThreads = emitNumThreadsForTargetDirective(CGF, D);
  9775. // Source location for the ident struct
  9776. llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
  9777. // Emit tripcount for the target loop-based directive.
  9778. emitTargetNumIterationsCall(CGF, D, DeviceID, SizeEmitter);
  9779. bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
  9780. // The target region is an outlined function launched by the runtime
  9781. // via calls __tgt_target() or __tgt_target_teams().
  9782. //
  9783. // __tgt_target() launches a target region with one team and one thread,
  9784. // executing a serial region. This master thread may in turn launch
  9785. // more threads within its team upon encountering a parallel region,
  9786. // however, no additional teams can be launched on the device.
  9787. //
  9788. // __tgt_target_teams() launches a target region with one or more teams,
  9789. // each with one or more threads. This call is required for target
  9790. // constructs such as:
  9791. // 'target teams'
  9792. // 'target' / 'teams'
  9793. // 'target teams distribute parallel for'
  9794. // 'target parallel'
  9795. // and so on.
  9796. //
  9797. // Note that on the host and CPU targets, the runtime implementation of
  9798. // these calls simply call the outlined function without forking threads.
  9799. // The outlined functions themselves have runtime calls to
  9800. // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
  9801. // the compiler in emitTeamsCall() and emitParallelCall().
  9802. //
  9803. // In contrast, on the NVPTX target, the implementation of
  9804. // __tgt_target_teams() launches a GPU kernel with the requested number
  9805. // of teams and threads so no additional calls to the runtime are required.
  9806. if (NumTeams) {
  9807. // If we have NumTeams defined this means that we have an enclosed teams
  9808. // region. Therefore we also expect to have NumThreads defined. These two
  9809. // values should be defined in the presence of a teams directive,
  9810. // regardless of having any clauses associated. If the user is using teams
  9811. // but no clauses, these two values will be the default that should be
  9812. // passed to the runtime library - a 32-bit integer with the value zero.
  9813. assert(NumThreads && "Thread limit expression should be available along "
  9814. "with number of teams.");
  9815. SmallVector<llvm::Value *> OffloadingArgs = {
  9816. RTLoc,
  9817. DeviceID,
  9818. OutlinedFnID,
  9819. PointerNum,
  9820. InputInfo.BasePointersArray.getPointer(),
  9821. InputInfo.PointersArray.getPointer(),
  9822. InputInfo.SizesArray.getPointer(),
  9823. MapTypesArray,
  9824. MapNamesArray,
  9825. InputInfo.MappersArray.getPointer(),
  9826. NumTeams,
  9827. NumThreads};
  9828. if (HasNowait) {
  9829. // Add int32_t depNum = 0, void *depList = nullptr, int32_t
  9830. // noAliasDepNum = 0, void *noAliasDepList = nullptr.
  9831. OffloadingArgs.push_back(CGF.Builder.getInt32(0));
  9832. OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
  9833. OffloadingArgs.push_back(CGF.Builder.getInt32(0));
  9834. OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
  9835. }
  9836. Return = CGF.EmitRuntimeCall(
  9837. OMPBuilder.getOrCreateRuntimeFunction(
  9838. CGM.getModule(), HasNowait
  9839. ? OMPRTL___tgt_target_teams_nowait_mapper
  9840. : OMPRTL___tgt_target_teams_mapper),
  9841. OffloadingArgs);
  9842. } else {
  9843. SmallVector<llvm::Value *> OffloadingArgs = {
  9844. RTLoc,
  9845. DeviceID,
  9846. OutlinedFnID,
  9847. PointerNum,
  9848. InputInfo.BasePointersArray.getPointer(),
  9849. InputInfo.PointersArray.getPointer(),
  9850. InputInfo.SizesArray.getPointer(),
  9851. MapTypesArray,
  9852. MapNamesArray,
  9853. InputInfo.MappersArray.getPointer()};
  9854. if (HasNowait) {
  9855. // Add int32_t depNum = 0, void *depList = nullptr, int32_t
  9856. // noAliasDepNum = 0, void *noAliasDepList = nullptr.
  9857. OffloadingArgs.push_back(CGF.Builder.getInt32(0));
  9858. OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
  9859. OffloadingArgs.push_back(CGF.Builder.getInt32(0));
  9860. OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
  9861. }
  9862. Return = CGF.EmitRuntimeCall(
  9863. OMPBuilder.getOrCreateRuntimeFunction(
  9864. CGM.getModule(), HasNowait ? OMPRTL___tgt_target_nowait_mapper
  9865. : OMPRTL___tgt_target_mapper),
  9866. OffloadingArgs);
  9867. }
  9868. // Check the error code and execute the host version if required.
  9869. llvm::BasicBlock *OffloadFailedBlock =
  9870. CGF.createBasicBlock("omp_offload.failed");
  9871. llvm::BasicBlock *OffloadContBlock =
  9872. CGF.createBasicBlock("omp_offload.cont");
  9873. llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
  9874. CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
  9875. CGF.EmitBlock(OffloadFailedBlock);
  9876. if (RequiresOuterTask) {
  9877. CapturedVars.clear();
  9878. CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
  9879. }
  9880. emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
  9881. CGF.EmitBranch(OffloadContBlock);
  9882. CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
  9883. };
  9884. // Notify that the host version must be executed.
  9885. auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars,
  9886. RequiresOuterTask](CodeGenFunction &CGF,
  9887. PrePostActionTy &) {
  9888. if (RequiresOuterTask) {
  9889. CapturedVars.clear();
  9890. CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
  9891. }
  9892. emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
  9893. };
  9894. auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
  9895. &MapNamesArray, &CapturedVars, RequiresOuterTask,
  9896. &CS](CodeGenFunction &CGF, PrePostActionTy &) {
  9897. // Fill up the arrays with all the captured variables.
  9898. MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
  9899. // Get mappable expression information.
  9900. MappableExprsHandler MEHandler(D, CGF);
  9901. llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
  9902. llvm::DenseSet<CanonicalDeclPtr<const Decl>> MappedVarSet;
  9903. auto RI = CS.getCapturedRecordDecl()->field_begin();
  9904. auto *CV = CapturedVars.begin();
  9905. for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
  9906. CE = CS.capture_end();
  9907. CI != CE; ++CI, ++RI, ++CV) {
  9908. MappableExprsHandler::MapCombinedInfoTy CurInfo;
  9909. MappableExprsHandler::StructRangeInfoTy PartialStruct;
  9910. // VLA sizes are passed to the outlined region by copy and do not have map
  9911. // information associated.
  9912. if (CI->capturesVariableArrayType()) {
  9913. CurInfo.Exprs.push_back(nullptr);
  9914. CurInfo.BasePointers.push_back(*CV);
  9915. CurInfo.Pointers.push_back(*CV);
  9916. CurInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
  9917. CGF.getTypeSize(RI->getType()), CGF.Int64Ty, /*isSigned=*/true));
  9918. // Copy to the device as an argument. No need to retrieve it.
  9919. CurInfo.Types.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
  9920. MappableExprsHandler::OMP_MAP_TARGET_PARAM |
  9921. MappableExprsHandler::OMP_MAP_IMPLICIT);
  9922. CurInfo.Mappers.push_back(nullptr);
  9923. } else {
  9924. // If we have any information in the map clause, we use it, otherwise we
  9925. // just do a default mapping.
  9926. MEHandler.generateInfoForCapture(CI, *CV, CurInfo, PartialStruct);
  9927. if (!CI->capturesThis())
  9928. MappedVarSet.insert(CI->getCapturedVar());
  9929. else
  9930. MappedVarSet.insert(nullptr);
  9931. if (CurInfo.BasePointers.empty() && !PartialStruct.Base.isValid())
  9932. MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurInfo);
  9933. // Generate correct mapping for variables captured by reference in
  9934. // lambdas.
  9935. if (CI->capturesVariable())
  9936. MEHandler.generateInfoForLambdaCaptures(CI->getCapturedVar(), *CV,
  9937. CurInfo, LambdaPointers);
  9938. }
  9939. // We expect to have at least an element of information for this capture.
  9940. assert((!CurInfo.BasePointers.empty() || PartialStruct.Base.isValid()) &&
  9941. "Non-existing map pointer for capture!");
  9942. assert(CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
  9943. CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
  9944. CurInfo.BasePointers.size() == CurInfo.Types.size() &&
  9945. CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
  9946. "Inconsistent map information sizes!");
  9947. // If there is an entry in PartialStruct it means we have a struct with
  9948. // individual members mapped. Emit an extra combined entry.
  9949. if (PartialStruct.Base.isValid()) {
  9950. CombinedInfo.append(PartialStruct.PreliminaryMapData);
  9951. MEHandler.emitCombinedEntry(
  9952. CombinedInfo, CurInfo.Types, PartialStruct, nullptr,
  9953. !PartialStruct.PreliminaryMapData.BasePointers.empty());
  9954. }
  9955. // We need to append the results of this capture to what we already have.
  9956. CombinedInfo.append(CurInfo);
  9957. }
  9958. // Adjust MEMBER_OF flags for the lambdas captures.
  9959. MEHandler.adjustMemberOfForLambdaCaptures(
  9960. LambdaPointers, CombinedInfo.BasePointers, CombinedInfo.Pointers,
  9961. CombinedInfo.Types);
  9962. // Map any list items in a map clause that were not captures because they
  9963. // weren't referenced within the construct.
  9964. MEHandler.generateAllInfo(CombinedInfo, MappedVarSet);
  9965. TargetDataInfo Info;
  9966. // Fill up the arrays and create the arguments.
  9967. emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder);
  9968. emitOffloadingArraysArgument(
  9969. CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
  9970. Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
  9971. {/*ForEndCall=*/false});
  9972. InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
  9973. InputInfo.BasePointersArray =
  9974. Address(Info.BasePointersArray, CGM.getPointerAlign());
  9975. InputInfo.PointersArray =
  9976. Address(Info.PointersArray, CGM.getPointerAlign());
  9977. InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign());
  9978. InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign());
  9979. MapTypesArray = Info.MapTypesArray;
  9980. MapNamesArray = Info.MapNamesArray;
  9981. if (RequiresOuterTask)
  9982. CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
  9983. else
  9984. emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
  9985. };
  9986. auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
  9987. CodeGenFunction &CGF, PrePostActionTy &) {
  9988. if (RequiresOuterTask) {
  9989. CodeGenFunction::OMPTargetDataInfo InputInfo;
  9990. CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
  9991. } else {
  9992. emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
  9993. }
  9994. };
  9995. // If we have a target function ID it means that we need to support
  9996. // offloading, otherwise, just execute on the host. We need to execute on host
  9997. // regardless of the conditional in the if clause if, e.g., the user do not
  9998. // specify target triples.
  9999. if (OutlinedFnID) {
  10000. if (IfCond) {
  10001. emitIfClause(CGF, IfCond, TargetThenGen, TargetElseGen);
  10002. } else {
  10003. RegionCodeGenTy ThenRCG(TargetThenGen);
  10004. ThenRCG(CGF);
  10005. }
  10006. } else {
  10007. RegionCodeGenTy ElseRCG(TargetElseGen);
  10008. ElseRCG(CGF);
  10009. }
  10010. }
  10011. void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
  10012. StringRef ParentName) {
  10013. if (!S)
  10014. return;
  10015. // Codegen OMP target directives that offload compute to the device.
  10016. bool RequiresDeviceCodegen =
  10017. isa<OMPExecutableDirective>(S) &&
  10018. isOpenMPTargetExecutionDirective(
  10019. cast<OMPExecutableDirective>(S)->getDirectiveKind());
  10020. if (RequiresDeviceCodegen) {
  10021. const auto &E = *cast<OMPExecutableDirective>(S);
  10022. unsigned DeviceID;
  10023. unsigned FileID;
  10024. unsigned Line;
  10025. getTargetEntryUniqueInfo(CGM.getContext(), E.getBeginLoc(), DeviceID,
  10026. FileID, Line);
  10027. // Is this a target region that should not be emitted as an entry point? If
  10028. // so just signal we are done with this target region.
  10029. if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID,
  10030. ParentName, Line))
  10031. return;
  10032. switch (E.getDirectiveKind()) {
  10033. case OMPD_target:
  10034. CodeGenFunction::EmitOMPTargetDeviceFunction(CGM, ParentName,
  10035. cast<OMPTargetDirective>(E));
  10036. break;
  10037. case OMPD_target_parallel:
  10038. CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
  10039. CGM, ParentName, cast<OMPTargetParallelDirective>(E));
  10040. break;
  10041. case OMPD_target_teams:
  10042. CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
  10043. CGM, ParentName, cast<OMPTargetTeamsDirective>(E));
  10044. break;
  10045. case OMPD_target_teams_distribute:
  10046. CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
  10047. CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(E));
  10048. break;
  10049. case OMPD_target_teams_distribute_simd:
  10050. CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
  10051. CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(E));
  10052. break;
  10053. case OMPD_target_parallel_for:
  10054. CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
  10055. CGM, ParentName, cast<OMPTargetParallelForDirective>(E));
  10056. break;
  10057. case OMPD_target_parallel_for_simd:
  10058. CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
  10059. CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(E));
  10060. break;
  10061. case OMPD_target_simd:
  10062. CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
  10063. CGM, ParentName, cast<OMPTargetSimdDirective>(E));
  10064. break;
  10065. case OMPD_target_teams_distribute_parallel_for:
  10066. CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
  10067. CGM, ParentName,
  10068. cast<OMPTargetTeamsDistributeParallelForDirective>(E));
  10069. break;
  10070. case OMPD_target_teams_distribute_parallel_for_simd:
  10071. CodeGenFunction::
  10072. EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
  10073. CGM, ParentName,
  10074. cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
  10075. break;
  10076. case OMPD_parallel:
  10077. case OMPD_for:
  10078. case OMPD_parallel_for:
  10079. case OMPD_parallel_master:
  10080. case OMPD_parallel_sections:
  10081. case OMPD_for_simd:
  10082. case OMPD_parallel_for_simd:
  10083. case OMPD_cancel:
  10084. case OMPD_cancellation_point:
  10085. case OMPD_ordered:
  10086. case OMPD_threadprivate:
  10087. case OMPD_allocate:
  10088. case OMPD_task:
  10089. case OMPD_simd:
  10090. case OMPD_tile:
  10091. case OMPD_unroll:
  10092. case OMPD_sections:
  10093. case OMPD_section:
  10094. case OMPD_single:
  10095. case OMPD_master:
  10096. case OMPD_critical:
  10097. case OMPD_taskyield:
  10098. case OMPD_barrier:
  10099. case OMPD_taskwait:
  10100. case OMPD_taskgroup:
  10101. case OMPD_atomic:
  10102. case OMPD_flush:
  10103. case OMPD_depobj:
  10104. case OMPD_scan:
  10105. case OMPD_teams:
  10106. case OMPD_target_data:
  10107. case OMPD_target_exit_data:
  10108. case OMPD_target_enter_data:
  10109. case OMPD_distribute:
  10110. case OMPD_distribute_simd:
  10111. case OMPD_distribute_parallel_for:
  10112. case OMPD_distribute_parallel_for_simd:
  10113. case OMPD_teams_distribute:
  10114. case OMPD_teams_distribute_simd:
  10115. case OMPD_teams_distribute_parallel_for:
  10116. case OMPD_teams_distribute_parallel_for_simd:
  10117. case OMPD_target_update:
  10118. case OMPD_declare_simd:
  10119. case OMPD_declare_variant:
  10120. case OMPD_begin_declare_variant:
  10121. case OMPD_end_declare_variant:
  10122. case OMPD_declare_target:
  10123. case OMPD_end_declare_target:
  10124. case OMPD_declare_reduction:
  10125. case OMPD_declare_mapper:
  10126. case OMPD_taskloop:
  10127. case OMPD_taskloop_simd:
  10128. case OMPD_master_taskloop:
  10129. case OMPD_master_taskloop_simd:
  10130. case OMPD_parallel_master_taskloop:
  10131. case OMPD_parallel_master_taskloop_simd:
  10132. case OMPD_requires:
  10133. case OMPD_metadirective:
  10134. case OMPD_unknown:
  10135. default:
  10136. llvm_unreachable("Unknown target directive for OpenMP device codegen.");
  10137. }
  10138. return;
  10139. }
  10140. if (const auto *E = dyn_cast<OMPExecutableDirective>(S)) {
  10141. if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
  10142. return;
  10143. scanForTargetRegionsFunctions(E->getRawStmt(), ParentName);
  10144. return;
  10145. }
  10146. // If this is a lambda function, look into its body.
  10147. if (const auto *L = dyn_cast<LambdaExpr>(S))
  10148. S = L->getBody();
  10149. // Keep looking for target regions recursively.
  10150. for (const Stmt *II : S->children())
  10151. scanForTargetRegionsFunctions(II, ParentName);
  10152. }
  10153. static bool isAssumedToBeNotEmitted(const ValueDecl *VD, bool IsDevice) {
  10154. Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
  10155. OMPDeclareTargetDeclAttr::getDeviceType(VD);
  10156. if (!DevTy)
  10157. return false;
  10158. // Do not emit device_type(nohost) functions for the host.
  10159. if (!IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
  10160. return true;
  10161. // Do not emit device_type(host) functions for the device.
  10162. if (IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_Host)
  10163. return true;
  10164. return false;
  10165. }
  10166. bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
  10167. // If emitting code for the host, we do not process FD here. Instead we do
  10168. // the normal code generation.
  10169. if (!CGM.getLangOpts().OpenMPIsDevice) {
  10170. if (const auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
  10171. if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
  10172. CGM.getLangOpts().OpenMPIsDevice))
  10173. return true;
  10174. return false;
  10175. }
  10176. const ValueDecl *VD = cast<ValueDecl>(GD.getDecl());
  10177. // Try to detect target regions in the function.
  10178. if (const auto *FD = dyn_cast<FunctionDecl>(VD)) {
  10179. StringRef Name = CGM.getMangledName(GD);
  10180. scanForTargetRegionsFunctions(FD->getBody(), Name);
  10181. if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
  10182. CGM.getLangOpts().OpenMPIsDevice))
  10183. return true;
  10184. }
  10185. // Do not to emit function if it is not marked as declare target.
  10186. return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
  10187. AlreadyEmittedTargetDecls.count(VD) == 0;
  10188. }
  10189. bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
  10190. if (isAssumedToBeNotEmitted(cast<ValueDecl>(GD.getDecl()),
  10191. CGM.getLangOpts().OpenMPIsDevice))
  10192. return true;
  10193. if (!CGM.getLangOpts().OpenMPIsDevice)
  10194. return false;
  10195. // Check if there are Ctors/Dtors in this declaration and look for target
  10196. // regions in it. We use the complete variant to produce the kernel name
  10197. // mangling.
  10198. QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
  10199. if (const auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
  10200. for (const CXXConstructorDecl *Ctor : RD->ctors()) {
  10201. StringRef ParentName =
  10202. CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
  10203. scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
  10204. }
  10205. if (const CXXDestructorDecl *Dtor = RD->getDestructor()) {
  10206. StringRef ParentName =
  10207. CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
  10208. scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
  10209. }
  10210. }
  10211. // Do not to emit variable if it is not marked as declare target.
  10212. llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  10213. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
  10214. cast<VarDecl>(GD.getDecl()));
  10215. if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
  10216. (*Res == OMPDeclareTargetDeclAttr::MT_To &&
  10217. HasRequiresUnifiedSharedMemory)) {
  10218. DeferredGlobalVariables.insert(cast<VarDecl>(GD.getDecl()));
  10219. return true;
  10220. }
  10221. return false;
  10222. }
  10223. void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
  10224. llvm::Constant *Addr) {
  10225. if (CGM.getLangOpts().OMPTargetTriples.empty() &&
  10226. !CGM.getLangOpts().OpenMPIsDevice)
  10227. return;
  10228. // If we have host/nohost variables, they do not need to be registered.
  10229. Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
  10230. OMPDeclareTargetDeclAttr::getDeviceType(VD);
  10231. if (DevTy && DevTy.getValue() != OMPDeclareTargetDeclAttr::DT_Any)
  10232. return;
  10233. llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  10234. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
  10235. if (!Res) {
  10236. if (CGM.getLangOpts().OpenMPIsDevice) {
  10237. // Register non-target variables being emitted in device code (debug info
  10238. // may cause this).
  10239. StringRef VarName = CGM.getMangledName(VD);
  10240. EmittedNonTargetVariables.try_emplace(VarName, Addr);
  10241. }
  10242. return;
  10243. }
  10244. // Register declare target variables.
  10245. OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
  10246. StringRef VarName;
  10247. CharUnits VarSize;
  10248. llvm::GlobalValue::LinkageTypes Linkage;
  10249. if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
  10250. !HasRequiresUnifiedSharedMemory) {
  10251. Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
  10252. VarName = CGM.getMangledName(VD);
  10253. if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) {
  10254. VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
  10255. assert(!VarSize.isZero() && "Expected non-zero size of the variable");
  10256. } else {
  10257. VarSize = CharUnits::Zero();
  10258. }
  10259. Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
  10260. // Temp solution to prevent optimizations of the internal variables.
  10261. if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
  10262. // Do not create a "ref-variable" if the original is not also available
  10263. // on the host.
  10264. if (!OffloadEntriesInfoManager.hasDeviceGlobalVarEntryInfo(VarName))
  10265. return;
  10266. std::string RefName = getName({VarName, "ref"});
  10267. if (!CGM.GetGlobalValue(RefName)) {
  10268. llvm::Constant *AddrRef =
  10269. getOrCreateInternalVariable(Addr->getType(), RefName);
  10270. auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
  10271. GVAddrRef->setConstant(/*Val=*/true);
  10272. GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
  10273. GVAddrRef->setInitializer(Addr);
  10274. CGM.addCompilerUsedGlobal(GVAddrRef);
  10275. }
  10276. }
  10277. } else {
  10278. assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
  10279. (*Res == OMPDeclareTargetDeclAttr::MT_To &&
  10280. HasRequiresUnifiedSharedMemory)) &&
  10281. "Declare target attribute must link or to with unified memory.");
  10282. if (*Res == OMPDeclareTargetDeclAttr::MT_Link)
  10283. Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
  10284. else
  10285. Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
  10286. if (CGM.getLangOpts().OpenMPIsDevice) {
  10287. VarName = Addr->getName();
  10288. Addr = nullptr;
  10289. } else {
  10290. VarName = getAddrOfDeclareTargetVar(VD).getName();
  10291. Addr = cast<llvm::Constant>(getAddrOfDeclareTargetVar(VD).getPointer());
  10292. }
  10293. VarSize = CGM.getPointerSize();
  10294. Linkage = llvm::GlobalValue::WeakAnyLinkage;
  10295. }
  10296. OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
  10297. VarName, Addr, VarSize, Flags, Linkage);
  10298. }
  10299. bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
  10300. if (isa<FunctionDecl>(GD.getDecl()) ||
  10301. isa<OMPDeclareReductionDecl>(GD.getDecl()))
  10302. return emitTargetFunctions(GD);
  10303. return emitTargetGlobalVariable(GD);
  10304. }
  10305. void CGOpenMPRuntime::emitDeferredTargetDecls() const {
  10306. for (const VarDecl *VD : DeferredGlobalVariables) {
  10307. llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  10308. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
  10309. if (!Res)
  10310. continue;
  10311. if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
  10312. !HasRequiresUnifiedSharedMemory) {
  10313. CGM.EmitGlobal(VD);
  10314. } else {
  10315. assert((*Res == OMPDeclareTargetDeclAttr::MT_Link ||
  10316. (*Res == OMPDeclareTargetDeclAttr::MT_To &&
  10317. HasRequiresUnifiedSharedMemory)) &&
  10318. "Expected link clause or to clause with unified memory.");
  10319. (void)CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
  10320. }
  10321. }
  10322. }
  10323. void CGOpenMPRuntime::adjustTargetSpecificDataForLambdas(
  10324. CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
  10325. assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
  10326. " Expected target-based directive.");
  10327. }
  10328. void CGOpenMPRuntime::processRequiresDirective(const OMPRequiresDecl *D) {
  10329. for (const OMPClause *Clause : D->clauselists()) {
  10330. if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
  10331. HasRequiresUnifiedSharedMemory = true;
  10332. } else if (const auto *AC =
  10333. dyn_cast<OMPAtomicDefaultMemOrderClause>(Clause)) {
  10334. switch (AC->getAtomicDefaultMemOrderKind()) {
  10335. case OMPC_ATOMIC_DEFAULT_MEM_ORDER_acq_rel:
  10336. RequiresAtomicOrdering = llvm::AtomicOrdering::AcquireRelease;
  10337. break;
  10338. case OMPC_ATOMIC_DEFAULT_MEM_ORDER_seq_cst:
  10339. RequiresAtomicOrdering = llvm::AtomicOrdering::SequentiallyConsistent;
  10340. break;
  10341. case OMPC_ATOMIC_DEFAULT_MEM_ORDER_relaxed:
  10342. RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
  10343. break;
  10344. case OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown:
  10345. break;
  10346. }
  10347. }
  10348. }
  10349. }
  10350. llvm::AtomicOrdering CGOpenMPRuntime::getDefaultMemoryOrdering() const {
  10351. return RequiresAtomicOrdering;
  10352. }
  10353. bool CGOpenMPRuntime::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
  10354. LangAS &AS) {
  10355. if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
  10356. return false;
  10357. const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
  10358. switch(A->getAllocatorType()) {
  10359. case OMPAllocateDeclAttr::OMPNullMemAlloc:
  10360. case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
  10361. // Not supported, fallback to the default mem space.
  10362. case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
  10363. case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
  10364. case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
  10365. case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
  10366. case OMPAllocateDeclAttr::OMPThreadMemAlloc:
  10367. case OMPAllocateDeclAttr::OMPConstMemAlloc:
  10368. case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
  10369. AS = LangAS::Default;
  10370. return true;
  10371. case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
  10372. llvm_unreachable("Expected predefined allocator for the variables with the "
  10373. "static storage.");
  10374. }
  10375. return false;
  10376. }
  10377. bool CGOpenMPRuntime::hasRequiresUnifiedSharedMemory() const {
  10378. return HasRequiresUnifiedSharedMemory;
  10379. }
  10380. CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
  10381. CodeGenModule &CGM)
  10382. : CGM(CGM) {
  10383. if (CGM.getLangOpts().OpenMPIsDevice) {
  10384. SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
  10385. CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
  10386. }
  10387. }
  10388. CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
  10389. if (CGM.getLangOpts().OpenMPIsDevice)
  10390. CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
  10391. }
  10392. bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
  10393. if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
  10394. return true;
  10395. const auto *D = cast<FunctionDecl>(GD.getDecl());
  10396. // Do not to emit function if it is marked as declare target as it was already
  10397. // emitted.
  10398. if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) {
  10399. if (D->hasBody() && AlreadyEmittedTargetDecls.count(D) == 0) {
  10400. if (auto *F = dyn_cast_or_null<llvm::Function>(
  10401. CGM.GetGlobalValue(CGM.getMangledName(GD))))
  10402. return !F->isDeclaration();
  10403. return false;
  10404. }
  10405. return true;
  10406. }
  10407. return !AlreadyEmittedTargetDecls.insert(D).second;
  10408. }
  10409. llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
  10410. // If we don't have entries or if we are emitting code for the device, we
  10411. // don't need to do anything.
  10412. if (CGM.getLangOpts().OMPTargetTriples.empty() ||
  10413. CGM.getLangOpts().OpenMPSimd || CGM.getLangOpts().OpenMPIsDevice ||
  10414. (OffloadEntriesInfoManager.empty() &&
  10415. !HasEmittedDeclareTargetRegion &&
  10416. !HasEmittedTargetRegion))
  10417. return nullptr;
  10418. // Create and register the function that handles the requires directives.
  10419. ASTContext &C = CGM.getContext();
  10420. llvm::Function *RequiresRegFn;
  10421. {
  10422. CodeGenFunction CGF(CGM);
  10423. const auto &FI = CGM.getTypes().arrangeNullaryFunction();
  10424. llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
  10425. std::string ReqName = getName({"omp_offloading", "requires_reg"});
  10426. RequiresRegFn = CGM.CreateGlobalInitOrCleanUpFunction(FTy, ReqName, FI);
  10427. CGF.StartFunction(GlobalDecl(), C.VoidTy, RequiresRegFn, FI, {});
  10428. OpenMPOffloadingRequiresDirFlags Flags = OMP_REQ_NONE;
  10429. // TODO: check for other requires clauses.
  10430. // The requires directive takes effect only when a target region is
  10431. // present in the compilation unit. Otherwise it is ignored and not
  10432. // passed to the runtime. This avoids the runtime from throwing an error
  10433. // for mismatching requires clauses across compilation units that don't
  10434. // contain at least 1 target region.
  10435. assert((HasEmittedTargetRegion ||
  10436. HasEmittedDeclareTargetRegion ||
  10437. !OffloadEntriesInfoManager.empty()) &&
  10438. "Target or declare target region expected.");
  10439. if (HasRequiresUnifiedSharedMemory)
  10440. Flags = OMP_REQ_UNIFIED_SHARED_MEMORY;
  10441. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  10442. CGM.getModule(), OMPRTL___tgt_register_requires),
  10443. llvm::ConstantInt::get(CGM.Int64Ty, Flags));
  10444. CGF.FinishFunction();
  10445. }
  10446. return RequiresRegFn;
  10447. }
  10448. void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
  10449. const OMPExecutableDirective &D,
  10450. SourceLocation Loc,
  10451. llvm::Function *OutlinedFn,
  10452. ArrayRef<llvm::Value *> CapturedVars) {
  10453. if (!CGF.HaveInsertPoint())
  10454. return;
  10455. llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
  10456. CodeGenFunction::RunCleanupsScope Scope(CGF);
  10457. // Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn);
  10458. llvm::Value *Args[] = {
  10459. RTLoc,
  10460. CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
  10461. CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
  10462. llvm::SmallVector<llvm::Value *, 16> RealArgs;
  10463. RealArgs.append(std::begin(Args), std::end(Args));
  10464. RealArgs.append(CapturedVars.begin(), CapturedVars.end());
  10465. llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
  10466. CGM.getModule(), OMPRTL___kmpc_fork_teams);
  10467. CGF.EmitRuntimeCall(RTLFn, RealArgs);
  10468. }
  10469. void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
  10470. const Expr *NumTeams,
  10471. const Expr *ThreadLimit,
  10472. SourceLocation Loc) {
  10473. if (!CGF.HaveInsertPoint())
  10474. return;
  10475. llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
  10476. llvm::Value *NumTeamsVal =
  10477. NumTeams
  10478. ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams),
  10479. CGF.CGM.Int32Ty, /* isSigned = */ true)
  10480. : CGF.Builder.getInt32(0);
  10481. llvm::Value *ThreadLimitVal =
  10482. ThreadLimit
  10483. ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
  10484. CGF.CGM.Int32Ty, /* isSigned = */ true)
  10485. : CGF.Builder.getInt32(0);
  10486. // Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit)
  10487. llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
  10488. ThreadLimitVal};
  10489. CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
  10490. CGM.getModule(), OMPRTL___kmpc_push_num_teams),
  10491. PushNumTeamsArgs);
  10492. }
  10493. void CGOpenMPRuntime::emitTargetDataCalls(
  10494. CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
  10495. const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
  10496. if (!CGF.HaveInsertPoint())
  10497. return;
  10498. // Action used to replace the default codegen action and turn privatization
  10499. // off.
  10500. PrePostActionTy NoPrivAction;
  10501. // Generate the code for the opening of the data environment. Capture all the
  10502. // arguments of the runtime call by reference because they are used in the
  10503. // closing of the region.
  10504. auto &&BeginThenGen = [this, &D, Device, &Info,
  10505. &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
  10506. // Fill up the arrays with all the mapped variables.
  10507. MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
  10508. // Get map clause information.
  10509. MappableExprsHandler MEHandler(D, CGF);
  10510. MEHandler.generateAllInfo(CombinedInfo);
  10511. // Fill up the arrays and create the arguments.
  10512. emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
  10513. /*IsNonContiguous=*/true);
  10514. llvm::Value *BasePointersArrayArg = nullptr;
  10515. llvm::Value *PointersArrayArg = nullptr;
  10516. llvm::Value *SizesArrayArg = nullptr;
  10517. llvm::Value *MapTypesArrayArg = nullptr;
  10518. llvm::Value *MapNamesArrayArg = nullptr;
  10519. llvm::Value *MappersArrayArg = nullptr;
  10520. emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
  10521. SizesArrayArg, MapTypesArrayArg,
  10522. MapNamesArrayArg, MappersArrayArg, Info);
  10523. // Emit device ID if any.
  10524. llvm::Value *DeviceID = nullptr;
  10525. if (Device) {
  10526. DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
  10527. CGF.Int64Ty, /*isSigned=*/true);
  10528. } else {
  10529. DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
  10530. }
  10531. // Emit the number of elements in the offloading arrays.
  10532. llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
  10533. //
  10534. // Source location for the ident struct
  10535. llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
  10536. llvm::Value *OffloadingArgs[] = {RTLoc,
  10537. DeviceID,
  10538. PointerNum,
  10539. BasePointersArrayArg,
  10540. PointersArrayArg,
  10541. SizesArrayArg,
  10542. MapTypesArrayArg,
  10543. MapNamesArrayArg,
  10544. MappersArrayArg};
  10545. CGF.EmitRuntimeCall(
  10546. OMPBuilder.getOrCreateRuntimeFunction(
  10547. CGM.getModule(), OMPRTL___tgt_target_data_begin_mapper),
  10548. OffloadingArgs);
  10549. // If device pointer privatization is required, emit the body of the region
  10550. // here. It will have to be duplicated: with and without privatization.
  10551. if (!Info.CaptureDeviceAddrMap.empty())
  10552. CodeGen(CGF);
  10553. };
  10554. // Generate code for the closing of the data region.
  10555. auto &&EndThenGen = [this, Device, &Info, &D](CodeGenFunction &CGF,
  10556. PrePostActionTy &) {
  10557. assert(Info.isValid() && "Invalid data environment closing arguments.");
  10558. llvm::Value *BasePointersArrayArg = nullptr;
  10559. llvm::Value *PointersArrayArg = nullptr;
  10560. llvm::Value *SizesArrayArg = nullptr;
  10561. llvm::Value *MapTypesArrayArg = nullptr;
  10562. llvm::Value *MapNamesArrayArg = nullptr;
  10563. llvm::Value *MappersArrayArg = nullptr;
  10564. emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
  10565. SizesArrayArg, MapTypesArrayArg,
  10566. MapNamesArrayArg, MappersArrayArg, Info,
  10567. {/*ForEndCall=*/true});
  10568. // Emit device ID if any.
  10569. llvm::Value *DeviceID = nullptr;
  10570. if (Device) {
  10571. DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
  10572. CGF.Int64Ty, /*isSigned=*/true);
  10573. } else {
  10574. DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
  10575. }
  10576. // Emit the number of elements in the offloading arrays.
  10577. llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
  10578. // Source location for the ident struct
  10579. llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
  10580. llvm::Value *OffloadingArgs[] = {RTLoc,
  10581. DeviceID,
  10582. PointerNum,
  10583. BasePointersArrayArg,
  10584. PointersArrayArg,
  10585. SizesArrayArg,
  10586. MapTypesArrayArg,
  10587. MapNamesArrayArg,
  10588. MappersArrayArg};
  10589. CGF.EmitRuntimeCall(
  10590. OMPBuilder.getOrCreateRuntimeFunction(
  10591. CGM.getModule(), OMPRTL___tgt_target_data_end_mapper),
  10592. OffloadingArgs);
  10593. };
  10594. // If we need device pointer privatization, we need to emit the body of the
  10595. // region with no privatization in the 'else' branch of the conditional.
  10596. // Otherwise, we don't have to do anything.
  10597. auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
  10598. PrePostActionTy &) {
  10599. if (!Info.CaptureDeviceAddrMap.empty()) {
  10600. CodeGen.setAction(NoPrivAction);
  10601. CodeGen(CGF);
  10602. }
  10603. };
  10604. // We don't have to do anything to close the region if the if clause evaluates
  10605. // to false.
  10606. auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
  10607. if (IfCond) {
  10608. emitIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
  10609. } else {
  10610. RegionCodeGenTy RCG(BeginThenGen);
  10611. RCG(CGF);
  10612. }
  10613. // If we don't require privatization of device pointers, we emit the body in
  10614. // between the runtime calls. This avoids duplicating the body code.
  10615. if (Info.CaptureDeviceAddrMap.empty()) {
  10616. CodeGen.setAction(NoPrivAction);
  10617. CodeGen(CGF);
  10618. }
  10619. if (IfCond) {
  10620. emitIfClause(CGF, IfCond, EndThenGen, EndElseGen);
  10621. } else {
  10622. RegionCodeGenTy RCG(EndThenGen);
  10623. RCG(CGF);
  10624. }
  10625. }
  10626. void CGOpenMPRuntime::emitTargetDataStandAloneCall(
  10627. CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
  10628. const Expr *Device) {
  10629. if (!CGF.HaveInsertPoint())
  10630. return;
  10631. assert((isa<OMPTargetEnterDataDirective>(D) ||
  10632. isa<OMPTargetExitDataDirective>(D) ||
  10633. isa<OMPTargetUpdateDirective>(D)) &&
  10634. "Expecting either target enter, exit data, or update directives.");
  10635. CodeGenFunction::OMPTargetDataInfo InputInfo;
  10636. llvm::Value *MapTypesArray = nullptr;
  10637. llvm::Value *MapNamesArray = nullptr;
  10638. // Generate the code for the opening of the data environment.
  10639. auto &&ThenGen = [this, &D, Device, &InputInfo, &MapTypesArray,
  10640. &MapNamesArray](CodeGenFunction &CGF, PrePostActionTy &) {
  10641. // Emit device ID if any.
  10642. llvm::Value *DeviceID = nullptr;
  10643. if (Device) {
  10644. DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
  10645. CGF.Int64Ty, /*isSigned=*/true);
  10646. } else {
  10647. DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
  10648. }
  10649. // Emit the number of elements in the offloading arrays.
  10650. llvm::Constant *PointerNum =
  10651. CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
  10652. // Source location for the ident struct
  10653. llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
  10654. llvm::Value *OffloadingArgs[] = {RTLoc,
  10655. DeviceID,
  10656. PointerNum,
  10657. InputInfo.BasePointersArray.getPointer(),
  10658. InputInfo.PointersArray.getPointer(),
  10659. InputInfo.SizesArray.getPointer(),
  10660. MapTypesArray,
  10661. MapNamesArray,
  10662. InputInfo.MappersArray.getPointer()};
  10663. // Select the right runtime function call for each standalone
  10664. // directive.
  10665. const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
  10666. RuntimeFunction RTLFn;
  10667. switch (D.getDirectiveKind()) {
  10668. case OMPD_target_enter_data:
  10669. RTLFn = HasNowait ? OMPRTL___tgt_target_data_begin_nowait_mapper
  10670. : OMPRTL___tgt_target_data_begin_mapper;
  10671. break;
  10672. case OMPD_target_exit_data:
  10673. RTLFn = HasNowait ? OMPRTL___tgt_target_data_end_nowait_mapper
  10674. : OMPRTL___tgt_target_data_end_mapper;
  10675. break;
  10676. case OMPD_target_update:
  10677. RTLFn = HasNowait ? OMPRTL___tgt_target_data_update_nowait_mapper
  10678. : OMPRTL___tgt_target_data_update_mapper;
  10679. break;
  10680. case OMPD_parallel:
  10681. case OMPD_for:
  10682. case OMPD_parallel_for:
  10683. case OMPD_parallel_master:
  10684. case OMPD_parallel_sections:
  10685. case OMPD_for_simd:
  10686. case OMPD_parallel_for_simd:
  10687. case OMPD_cancel:
  10688. case OMPD_cancellation_point:
  10689. case OMPD_ordered:
  10690. case OMPD_threadprivate:
  10691. case OMPD_allocate:
  10692. case OMPD_task:
  10693. case OMPD_simd:
  10694. case OMPD_tile:
  10695. case OMPD_unroll:
  10696. case OMPD_sections:
  10697. case OMPD_section:
  10698. case OMPD_single:
  10699. case OMPD_master:
  10700. case OMPD_critical:
  10701. case OMPD_taskyield:
  10702. case OMPD_barrier:
  10703. case OMPD_taskwait:
  10704. case OMPD_taskgroup:
  10705. case OMPD_atomic:
  10706. case OMPD_flush:
  10707. case OMPD_depobj:
  10708. case OMPD_scan:
  10709. case OMPD_teams:
  10710. case OMPD_target_data:
  10711. case OMPD_distribute:
  10712. case OMPD_distribute_simd:
  10713. case OMPD_distribute_parallel_for:
  10714. case OMPD_distribute_parallel_for_simd:
  10715. case OMPD_teams_distribute:
  10716. case OMPD_teams_distribute_simd:
  10717. case OMPD_teams_distribute_parallel_for:
  10718. case OMPD_teams_distribute_parallel_for_simd:
  10719. case OMPD_declare_simd:
  10720. case OMPD_declare_variant:
  10721. case OMPD_begin_declare_variant:
  10722. case OMPD_end_declare_variant:
  10723. case OMPD_declare_target:
  10724. case OMPD_end_declare_target:
  10725. case OMPD_declare_reduction:
  10726. case OMPD_declare_mapper:
  10727. case OMPD_taskloop:
  10728. case OMPD_taskloop_simd:
  10729. case OMPD_master_taskloop:
  10730. case OMPD_master_taskloop_simd:
  10731. case OMPD_parallel_master_taskloop:
  10732. case OMPD_parallel_master_taskloop_simd:
  10733. case OMPD_target:
  10734. case OMPD_target_simd:
  10735. case OMPD_target_teams_distribute:
  10736. case OMPD_target_teams_distribute_simd:
  10737. case OMPD_target_teams_distribute_parallel_for:
  10738. case OMPD_target_teams_distribute_parallel_for_simd:
  10739. case OMPD_target_teams:
  10740. case OMPD_target_parallel:
  10741. case OMPD_target_parallel_for:
  10742. case OMPD_target_parallel_for_simd:
  10743. case OMPD_requires:
  10744. case OMPD_metadirective:
  10745. case OMPD_unknown:
  10746. default:
  10747. llvm_unreachable("Unexpected standalone target data directive.");
  10748. break;
  10749. }
  10750. CGF.EmitRuntimeCall(
  10751. OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), RTLFn),
  10752. OffloadingArgs);
  10753. };
  10754. auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
  10755. &MapNamesArray](CodeGenFunction &CGF,
  10756. PrePostActionTy &) {
  10757. // Fill up the arrays with all the mapped variables.
  10758. MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
  10759. // Get map clause information.
  10760. MappableExprsHandler MEHandler(D, CGF);
  10761. MEHandler.generateAllInfo(CombinedInfo);
  10762. TargetDataInfo Info;
  10763. // Fill up the arrays and create the arguments.
  10764. emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
  10765. /*IsNonContiguous=*/true);
  10766. bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
  10767. D.hasClausesOfKind<OMPNowaitClause>();
  10768. emitOffloadingArraysArgument(
  10769. CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
  10770. Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
  10771. {/*ForEndCall=*/false});
  10772. InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
  10773. InputInfo.BasePointersArray =
  10774. Address(Info.BasePointersArray, CGM.getPointerAlign());
  10775. InputInfo.PointersArray =
  10776. Address(Info.PointersArray, CGM.getPointerAlign());
  10777. InputInfo.SizesArray =
  10778. Address(Info.SizesArray, CGM.getPointerAlign());
  10779. InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign());
  10780. MapTypesArray = Info.MapTypesArray;
  10781. MapNamesArray = Info.MapNamesArray;
  10782. if (RequiresOuterTask)
  10783. CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
  10784. else
  10785. emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
  10786. };
  10787. if (IfCond) {
  10788. emitIfClause(CGF, IfCond, TargetThenGen,
  10789. [](CodeGenFunction &CGF, PrePostActionTy &) {});
  10790. } else {
  10791. RegionCodeGenTy ThenRCG(TargetThenGen);
  10792. ThenRCG(CGF);
  10793. }
  10794. }
  10795. namespace {
  10796. /// Kind of parameter in a function with 'declare simd' directive.
  10797. enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector };
  10798. /// Attribute set of the parameter.
  10799. struct ParamAttrTy {
  10800. ParamKindTy Kind = Vector;
  10801. llvm::APSInt StrideOrArg;
  10802. llvm::APSInt Alignment;
  10803. };
  10804. } // namespace
  10805. static unsigned evaluateCDTSize(const FunctionDecl *FD,
  10806. ArrayRef<ParamAttrTy> ParamAttrs) {
  10807. // Every vector variant of a SIMD-enabled function has a vector length (VLEN).
  10808. // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument
  10809. // of that clause. The VLEN value must be power of 2.
  10810. // In other case the notion of the function`s "characteristic data type" (CDT)
  10811. // is used to compute the vector length.
  10812. // CDT is defined in the following order:
  10813. // a) For non-void function, the CDT is the return type.
  10814. // b) If the function has any non-uniform, non-linear parameters, then the
  10815. // CDT is the type of the first such parameter.
  10816. // c) If the CDT determined by a) or b) above is struct, union, or class
  10817. // type which is pass-by-value (except for the type that maps to the
  10818. // built-in complex data type), the characteristic data type is int.
  10819. // d) If none of the above three cases is applicable, the CDT is int.
  10820. // The VLEN is then determined based on the CDT and the size of vector
  10821. // register of that ISA for which current vector version is generated. The
  10822. // VLEN is computed using the formula below:
  10823. // VLEN = sizeof(vector_register) / sizeof(CDT),
  10824. // where vector register size specified in section 3.2.1 Registers and the
  10825. // Stack Frame of original AMD64 ABI document.
  10826. QualType RetType = FD->getReturnType();
  10827. if (RetType.isNull())
  10828. return 0;
  10829. ASTContext &C = FD->getASTContext();
  10830. QualType CDT;
  10831. if (!RetType.isNull() && !RetType->isVoidType()) {
  10832. CDT = RetType;
  10833. } else {
  10834. unsigned Offset = 0;
  10835. if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
  10836. if (ParamAttrs[Offset].Kind == Vector)
  10837. CDT = C.getPointerType(C.getRecordType(MD->getParent()));
  10838. ++Offset;
  10839. }
  10840. if (CDT.isNull()) {
  10841. for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
  10842. if (ParamAttrs[I + Offset].Kind == Vector) {
  10843. CDT = FD->getParamDecl(I)->getType();
  10844. break;
  10845. }
  10846. }
  10847. }
  10848. }
  10849. if (CDT.isNull())
  10850. CDT = C.IntTy;
  10851. CDT = CDT->getCanonicalTypeUnqualified();
  10852. if (CDT->isRecordType() || CDT->isUnionType())
  10853. CDT = C.IntTy;
  10854. return C.getTypeSize(CDT);
  10855. }
  10856. static void
  10857. emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
  10858. const llvm::APSInt &VLENVal,
  10859. ArrayRef<ParamAttrTy> ParamAttrs,
  10860. OMPDeclareSimdDeclAttr::BranchStateTy State) {
  10861. struct ISADataTy {
  10862. char ISA;
  10863. unsigned VecRegSize;
  10864. };
  10865. ISADataTy ISAData[] = {
  10866. {
  10867. 'b', 128
  10868. }, // SSE
  10869. {
  10870. 'c', 256
  10871. }, // AVX
  10872. {
  10873. 'd', 256
  10874. }, // AVX2
  10875. {
  10876. 'e', 512
  10877. }, // AVX512
  10878. };
  10879. llvm::SmallVector<char, 2> Masked;
  10880. switch (State) {
  10881. case OMPDeclareSimdDeclAttr::BS_Undefined:
  10882. Masked.push_back('N');
  10883. Masked.push_back('M');
  10884. break;
  10885. case OMPDeclareSimdDeclAttr::BS_Notinbranch:
  10886. Masked.push_back('N');
  10887. break;
  10888. case OMPDeclareSimdDeclAttr::BS_Inbranch:
  10889. Masked.push_back('M');
  10890. break;
  10891. }
  10892. for (char Mask : Masked) {
  10893. for (const ISADataTy &Data : ISAData) {
  10894. SmallString<256> Buffer;
  10895. llvm::raw_svector_ostream Out(Buffer);
  10896. Out << "_ZGV" << Data.ISA << Mask;
  10897. if (!VLENVal) {
  10898. unsigned NumElts = evaluateCDTSize(FD, ParamAttrs);
  10899. assert(NumElts && "Non-zero simdlen/cdtsize expected");
  10900. Out << llvm::APSInt::getUnsigned(Data.VecRegSize / NumElts);
  10901. } else {
  10902. Out << VLENVal;
  10903. }
  10904. for (const ParamAttrTy &ParamAttr : ParamAttrs) {
  10905. switch (ParamAttr.Kind){
  10906. case LinearWithVarStride:
  10907. Out << 's' << ParamAttr.StrideOrArg;
  10908. break;
  10909. case Linear:
  10910. Out << 'l';
  10911. if (ParamAttr.StrideOrArg != 1)
  10912. Out << ParamAttr.StrideOrArg;
  10913. break;
  10914. case Uniform:
  10915. Out << 'u';
  10916. break;
  10917. case Vector:
  10918. Out << 'v';
  10919. break;
  10920. }
  10921. if (!!ParamAttr.Alignment)
  10922. Out << 'a' << ParamAttr.Alignment;
  10923. }
  10924. Out << '_' << Fn->getName();
  10925. Fn->addFnAttr(Out.str());
  10926. }
  10927. }
  10928. }
  10929. // This are the Functions that are needed to mangle the name of the
  10930. // vector functions generated by the compiler, according to the rules
  10931. // defined in the "Vector Function ABI specifications for AArch64",
  10932. // available at
  10933. // https://developer.arm.com/products/software-development-tools/hpc/arm-compiler-for-hpc/vector-function-abi.
  10934. /// Maps To Vector (MTV), as defined in 3.1.1 of the AAVFABI.
  10935. ///
  10936. /// TODO: Need to implement the behavior for reference marked with a
  10937. /// var or no linear modifiers (1.b in the section). For this, we
  10938. /// need to extend ParamKindTy to support the linear modifiers.
  10939. static bool getAArch64MTV(QualType QT, ParamKindTy Kind) {
  10940. QT = QT.getCanonicalType();
  10941. if (QT->isVoidType())
  10942. return false;
  10943. if (Kind == ParamKindTy::Uniform)
  10944. return false;
  10945. if (Kind == ParamKindTy::Linear)
  10946. return false;
  10947. // TODO: Handle linear references with modifiers
  10948. if (Kind == ParamKindTy::LinearWithVarStride)
  10949. return false;
  10950. return true;
  10951. }
  10952. /// Pass By Value (PBV), as defined in 3.1.2 of the AAVFABI.
  10953. static bool getAArch64PBV(QualType QT, ASTContext &C) {
  10954. QT = QT.getCanonicalType();
  10955. unsigned Size = C.getTypeSize(QT);
  10956. // Only scalars and complex within 16 bytes wide set PVB to true.
  10957. if (Size != 8 && Size != 16 && Size != 32 && Size != 64 && Size != 128)
  10958. return false;
  10959. if (QT->isFloatingType())
  10960. return true;
  10961. if (QT->isIntegerType())
  10962. return true;
  10963. if (QT->isPointerType())
  10964. return true;
  10965. // TODO: Add support for complex types (section 3.1.2, item 2).
  10966. return false;
  10967. }
  10968. /// Computes the lane size (LS) of a return type or of an input parameter,
  10969. /// as defined by `LS(P)` in 3.2.1 of the AAVFABI.
  10970. /// TODO: Add support for references, section 3.2.1, item 1.
  10971. static unsigned getAArch64LS(QualType QT, ParamKindTy Kind, ASTContext &C) {
  10972. if (!getAArch64MTV(QT, Kind) && QT.getCanonicalType()->isPointerType()) {
  10973. QualType PTy = QT.getCanonicalType()->getPointeeType();
  10974. if (getAArch64PBV(PTy, C))
  10975. return C.getTypeSize(PTy);
  10976. }
  10977. if (getAArch64PBV(QT, C))
  10978. return C.getTypeSize(QT);
  10979. return C.getTypeSize(C.getUIntPtrType());
  10980. }
  10981. // Get Narrowest Data Size (NDS) and Widest Data Size (WDS) from the
  10982. // signature of the scalar function, as defined in 3.2.2 of the
  10983. // AAVFABI.
  10984. static std::tuple<unsigned, unsigned, bool>
  10985. getNDSWDS(const FunctionDecl *FD, ArrayRef<ParamAttrTy> ParamAttrs) {
  10986. QualType RetType = FD->getReturnType().getCanonicalType();
  10987. ASTContext &C = FD->getASTContext();
  10988. bool OutputBecomesInput = false;
  10989. llvm::SmallVector<unsigned, 8> Sizes;
  10990. if (!RetType->isVoidType()) {
  10991. Sizes.push_back(getAArch64LS(RetType, ParamKindTy::Vector, C));
  10992. if (!getAArch64PBV(RetType, C) && getAArch64MTV(RetType, {}))
  10993. OutputBecomesInput = true;
  10994. }
  10995. for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
  10996. QualType QT = FD->getParamDecl(I)->getType().getCanonicalType();
  10997. Sizes.push_back(getAArch64LS(QT, ParamAttrs[I].Kind, C));
  10998. }
  10999. assert(!Sizes.empty() && "Unable to determine NDS and WDS.");
  11000. // The LS of a function parameter / return value can only be a power
  11001. // of 2, starting from 8 bits, up to 128.
  11002. assert(llvm::all_of(Sizes,
  11003. [](unsigned Size) {
  11004. return Size == 8 || Size == 16 || Size == 32 ||
  11005. Size == 64 || Size == 128;
  11006. }) &&
  11007. "Invalid size");
  11008. return std::make_tuple(*std::min_element(std::begin(Sizes), std::end(Sizes)),
  11009. *std::max_element(std::begin(Sizes), std::end(Sizes)),
  11010. OutputBecomesInput);
  11011. }
  11012. /// Mangle the parameter part of the vector function name according to
  11013. /// their OpenMP classification. The mangling function is defined in
  11014. /// section 3.5 of the AAVFABI.
  11015. static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
  11016. SmallString<256> Buffer;
  11017. llvm::raw_svector_ostream Out(Buffer);
  11018. for (const auto &ParamAttr : ParamAttrs) {
  11019. switch (ParamAttr.Kind) {
  11020. case LinearWithVarStride:
  11021. Out << "ls" << ParamAttr.StrideOrArg;
  11022. break;
  11023. case Linear:
  11024. Out << 'l';
  11025. // Don't print the step value if it is not present or if it is
  11026. // equal to 1.
  11027. if (ParamAttr.StrideOrArg != 1)
  11028. Out << ParamAttr.StrideOrArg;
  11029. break;
  11030. case Uniform:
  11031. Out << 'u';
  11032. break;
  11033. case Vector:
  11034. Out << 'v';
  11035. break;
  11036. }
  11037. if (!!ParamAttr.Alignment)
  11038. Out << 'a' << ParamAttr.Alignment;
  11039. }
  11040. return std::string(Out.str());
  11041. }
  11042. // Function used to add the attribute. The parameter `VLEN` is
  11043. // templated to allow the use of "x" when targeting scalable functions
  11044. // for SVE.
  11045. template <typename T>
  11046. static void addAArch64VectorName(T VLEN, StringRef LMask, StringRef Prefix,
  11047. char ISA, StringRef ParSeq,
  11048. StringRef MangledName, bool OutputBecomesInput,
  11049. llvm::Function *Fn) {
  11050. SmallString<256> Buffer;
  11051. llvm::raw_svector_ostream Out(Buffer);
  11052. Out << Prefix << ISA << LMask << VLEN;
  11053. if (OutputBecomesInput)
  11054. Out << "v";
  11055. Out << ParSeq << "_" << MangledName;
  11056. Fn->addFnAttr(Out.str());
  11057. }
  11058. // Helper function to generate the Advanced SIMD names depending on
  11059. // the value of the NDS when simdlen is not present.
  11060. static void addAArch64AdvSIMDNDSNames(unsigned NDS, StringRef Mask,
  11061. StringRef Prefix, char ISA,
  11062. StringRef ParSeq, StringRef MangledName,
  11063. bool OutputBecomesInput,
  11064. llvm::Function *Fn) {
  11065. switch (NDS) {
  11066. case 8:
  11067. addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName,
  11068. OutputBecomesInput, Fn);
  11069. addAArch64VectorName(16, Mask, Prefix, ISA, ParSeq, MangledName,
  11070. OutputBecomesInput, Fn);
  11071. break;
  11072. case 16:
  11073. addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName,
  11074. OutputBecomesInput, Fn);
  11075. addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName,
  11076. OutputBecomesInput, Fn);
  11077. break;
  11078. case 32:
  11079. addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName,
  11080. OutputBecomesInput, Fn);
  11081. addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName,
  11082. OutputBecomesInput, Fn);
  11083. break;
  11084. case 64:
  11085. case 128:
  11086. addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName,
  11087. OutputBecomesInput, Fn);
  11088. break;
  11089. default:
  11090. llvm_unreachable("Scalar type is too wide.");
  11091. }
  11092. }
  11093. /// Emit vector function attributes for AArch64, as defined in the AAVFABI.
  11094. static void emitAArch64DeclareSimdFunction(
  11095. CodeGenModule &CGM, const FunctionDecl *FD, unsigned UserVLEN,
  11096. ArrayRef<ParamAttrTy> ParamAttrs,
  11097. OMPDeclareSimdDeclAttr::BranchStateTy State, StringRef MangledName,
  11098. char ISA, unsigned VecRegSize, llvm::Function *Fn, SourceLocation SLoc) {
  11099. // Get basic data for building the vector signature.
  11100. const auto Data = getNDSWDS(FD, ParamAttrs);
  11101. const unsigned NDS = std::get<0>(Data);
  11102. const unsigned WDS = std::get<1>(Data);
  11103. const bool OutputBecomesInput = std::get<2>(Data);
  11104. // Check the values provided via `simdlen` by the user.
  11105. // 1. A `simdlen(1)` doesn't produce vector signatures,
  11106. if (UserVLEN == 1) {
  11107. unsigned DiagID = CGM.getDiags().getCustomDiagID(
  11108. DiagnosticsEngine::Warning,
  11109. "The clause simdlen(1) has no effect when targeting aarch64.");
  11110. CGM.getDiags().Report(SLoc, DiagID);
  11111. return;
  11112. }
  11113. // 2. Section 3.3.1, item 1: user input must be a power of 2 for
  11114. // Advanced SIMD output.
  11115. if (ISA == 'n' && UserVLEN && !llvm::isPowerOf2_32(UserVLEN)) {
  11116. unsigned DiagID = CGM.getDiags().getCustomDiagID(
  11117. DiagnosticsEngine::Warning, "The value specified in simdlen must be a "
  11118. "power of 2 when targeting Advanced SIMD.");
  11119. CGM.getDiags().Report(SLoc, DiagID);
  11120. return;
  11121. }
  11122. // 3. Section 3.4.1. SVE fixed lengh must obey the architectural
  11123. // limits.
  11124. if (ISA == 's' && UserVLEN != 0) {
  11125. if ((UserVLEN * WDS > 2048) || (UserVLEN * WDS % 128 != 0)) {
  11126. unsigned DiagID = CGM.getDiags().getCustomDiagID(
  11127. DiagnosticsEngine::Warning, "The clause simdlen must fit the %0-bit "
  11128. "lanes in the architectural constraints "
  11129. "for SVE (min is 128-bit, max is "
  11130. "2048-bit, by steps of 128-bit)");
  11131. CGM.getDiags().Report(SLoc, DiagID) << WDS;
  11132. return;
  11133. }
  11134. }
  11135. // Sort out parameter sequence.
  11136. const std::string ParSeq = mangleVectorParameters(ParamAttrs);
  11137. StringRef Prefix = "_ZGV";
  11138. // Generate simdlen from user input (if any).
  11139. if (UserVLEN) {
  11140. if (ISA == 's') {
  11141. // SVE generates only a masked function.
  11142. addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
  11143. OutputBecomesInput, Fn);
  11144. } else {
  11145. assert(ISA == 'n' && "Expected ISA either 's' or 'n'.");
  11146. // Advanced SIMD generates one or two functions, depending on
  11147. // the `[not]inbranch` clause.
  11148. switch (State) {
  11149. case OMPDeclareSimdDeclAttr::BS_Undefined:
  11150. addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName,
  11151. OutputBecomesInput, Fn);
  11152. addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
  11153. OutputBecomesInput, Fn);
  11154. break;
  11155. case OMPDeclareSimdDeclAttr::BS_Notinbranch:
  11156. addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName,
  11157. OutputBecomesInput, Fn);
  11158. break;
  11159. case OMPDeclareSimdDeclAttr::BS_Inbranch:
  11160. addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
  11161. OutputBecomesInput, Fn);
  11162. break;
  11163. }
  11164. }
  11165. } else {
  11166. // If no user simdlen is provided, follow the AAVFABI rules for
  11167. // generating the vector length.
  11168. if (ISA == 's') {
  11169. // SVE, section 3.4.1, item 1.
  11170. addAArch64VectorName("x", "M", Prefix, ISA, ParSeq, MangledName,
  11171. OutputBecomesInput, Fn);
  11172. } else {
  11173. assert(ISA == 'n' && "Expected ISA either 's' or 'n'.");
  11174. // Advanced SIMD, Section 3.3.1 of the AAVFABI, generates one or
  11175. // two vector names depending on the use of the clause
  11176. // `[not]inbranch`.
  11177. switch (State) {
  11178. case OMPDeclareSimdDeclAttr::BS_Undefined:
  11179. addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName,
  11180. OutputBecomesInput, Fn);
  11181. addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName,
  11182. OutputBecomesInput, Fn);
  11183. break;
  11184. case OMPDeclareSimdDeclAttr::BS_Notinbranch:
  11185. addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName,
  11186. OutputBecomesInput, Fn);
  11187. break;
  11188. case OMPDeclareSimdDeclAttr::BS_Inbranch:
  11189. addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName,
  11190. OutputBecomesInput, Fn);
  11191. break;
  11192. }
  11193. }
  11194. }
  11195. }
  11196. void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
  11197. llvm::Function *Fn) {
  11198. ASTContext &C = CGM.getContext();
  11199. FD = FD->getMostRecentDecl();
  11200. // Map params to their positions in function decl.
  11201. llvm::DenseMap<const Decl *, unsigned> ParamPositions;
  11202. if (isa<CXXMethodDecl>(FD))
  11203. ParamPositions.try_emplace(FD, 0);
  11204. unsigned ParamPos = ParamPositions.size();
  11205. for (const ParmVarDecl *P : FD->parameters()) {
  11206. ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
  11207. ++ParamPos;
  11208. }
  11209. while (FD) {
  11210. for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
  11211. llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
  11212. // Mark uniform parameters.
  11213. for (const Expr *E : Attr->uniforms()) {
  11214. E = E->IgnoreParenImpCasts();
  11215. unsigned Pos;
  11216. if (isa<CXXThisExpr>(E)) {
  11217. Pos = ParamPositions[FD];
  11218. } else {
  11219. const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
  11220. ->getCanonicalDecl();
  11221. Pos = ParamPositions[PVD];
  11222. }
  11223. ParamAttrs[Pos].Kind = Uniform;
  11224. }
  11225. // Get alignment info.
  11226. auto NI = Attr->alignments_begin();
  11227. for (const Expr *E : Attr->aligneds()) {
  11228. E = E->IgnoreParenImpCasts();
  11229. unsigned Pos;
  11230. QualType ParmTy;
  11231. if (isa<CXXThisExpr>(E)) {
  11232. Pos = ParamPositions[FD];
  11233. ParmTy = E->getType();
  11234. } else {
  11235. const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
  11236. ->getCanonicalDecl();
  11237. Pos = ParamPositions[PVD];
  11238. ParmTy = PVD->getType();
  11239. }
  11240. ParamAttrs[Pos].Alignment =
  11241. (*NI)
  11242. ? (*NI)->EvaluateKnownConstInt(C)
  11243. : llvm::APSInt::getUnsigned(
  11244. C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy))
  11245. .getQuantity());
  11246. ++NI;
  11247. }
  11248. // Mark linear parameters.
  11249. auto SI = Attr->steps_begin();
  11250. auto MI = Attr->modifiers_begin();
  11251. for (const Expr *E : Attr->linears()) {
  11252. E = E->IgnoreParenImpCasts();
  11253. unsigned Pos;
  11254. // Rescaling factor needed to compute the linear parameter
  11255. // value in the mangled name.
  11256. unsigned PtrRescalingFactor = 1;
  11257. if (isa<CXXThisExpr>(E)) {
  11258. Pos = ParamPositions[FD];
  11259. } else {
  11260. const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
  11261. ->getCanonicalDecl();
  11262. Pos = ParamPositions[PVD];
  11263. if (auto *P = dyn_cast<PointerType>(PVD->getType()))
  11264. PtrRescalingFactor = CGM.getContext()
  11265. .getTypeSizeInChars(P->getPointeeType())
  11266. .getQuantity();
  11267. }
  11268. ParamAttrTy &ParamAttr = ParamAttrs[Pos];
  11269. ParamAttr.Kind = Linear;
  11270. // Assuming a stride of 1, for `linear` without modifiers.
  11271. ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(1);
  11272. if (*SI) {
  11273. Expr::EvalResult Result;
  11274. if (!(*SI)->EvaluateAsInt(Result, C, Expr::SE_AllowSideEffects)) {
  11275. if (const auto *DRE =
  11276. cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
  11277. if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
  11278. ParamAttr.Kind = LinearWithVarStride;
  11279. ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
  11280. ParamPositions[StridePVD->getCanonicalDecl()]);
  11281. }
  11282. }
  11283. } else {
  11284. ParamAttr.StrideOrArg = Result.Val.getInt();
  11285. }
  11286. }
  11287. // If we are using a linear clause on a pointer, we need to
  11288. // rescale the value of linear_step with the byte size of the
  11289. // pointee type.
  11290. if (Linear == ParamAttr.Kind)
  11291. ParamAttr.StrideOrArg = ParamAttr.StrideOrArg * PtrRescalingFactor;
  11292. ++SI;
  11293. ++MI;
  11294. }
  11295. llvm::APSInt VLENVal;
  11296. SourceLocation ExprLoc;
  11297. const Expr *VLENExpr = Attr->getSimdlen();
  11298. if (VLENExpr) {
  11299. VLENVal = VLENExpr->EvaluateKnownConstInt(C);
  11300. ExprLoc = VLENExpr->getExprLoc();
  11301. }
  11302. OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
  11303. if (CGM.getTriple().isX86()) {
  11304. emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
  11305. } else if (CGM.getTriple().getArch() == llvm::Triple::aarch64) {
  11306. unsigned VLEN = VLENVal.getExtValue();
  11307. StringRef MangledName = Fn->getName();
  11308. if (CGM.getTarget().hasFeature("sve"))
  11309. emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
  11310. MangledName, 's', 128, Fn, ExprLoc);
  11311. if (CGM.getTarget().hasFeature("neon"))
  11312. emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
  11313. MangledName, 'n', 128, Fn, ExprLoc);
  11314. }
  11315. }
  11316. FD = FD->getPreviousDecl();
  11317. }
  11318. }
  11319. namespace {
  11320. /// Cleanup action for doacross support.
  11321. class DoacrossCleanupTy final : public EHScopeStack::Cleanup {
  11322. public:
  11323. static const int DoacrossFinArgs = 2;
  11324. private:
  11325. llvm::FunctionCallee RTLFn;
  11326. llvm::Value *Args[DoacrossFinArgs];
  11327. public:
  11328. DoacrossCleanupTy(llvm::FunctionCallee RTLFn,
  11329. ArrayRef<llvm::Value *> CallArgs)
  11330. : RTLFn(RTLFn) {
  11331. assert(CallArgs.size() == DoacrossFinArgs);
  11332. std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
  11333. }
  11334. void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
  11335. if (!CGF.HaveInsertPoint())
  11336. return;
  11337. CGF.EmitRuntimeCall(RTLFn, Args);
  11338. }
  11339. };
  11340. } // namespace
  11341. void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
  11342. const OMPLoopDirective &D,
  11343. ArrayRef<Expr *> NumIterations) {
  11344. if (!CGF.HaveInsertPoint())
  11345. return;
  11346. ASTContext &C = CGM.getContext();
  11347. QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
  11348. RecordDecl *RD;
  11349. if (KmpDimTy.isNull()) {
  11350. // Build struct kmp_dim { // loop bounds info casted to kmp_int64
  11351. // kmp_int64 lo; // lower
  11352. // kmp_int64 up; // upper
  11353. // kmp_int64 st; // stride
  11354. // };
  11355. RD = C.buildImplicitRecord("kmp_dim");
  11356. RD->startDefinition();
  11357. addFieldToRecordDecl(C, RD, Int64Ty);
  11358. addFieldToRecordDecl(C, RD, Int64Ty);
  11359. addFieldToRecordDecl(C, RD, Int64Ty);
  11360. RD->completeDefinition();
  11361. KmpDimTy = C.getRecordType(RD);
  11362. } else {
  11363. RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
  11364. }
  11365. llvm::APInt Size(/*numBits=*/32, NumIterations.size());
  11366. QualType ArrayTy =
  11367. C.getConstantArrayType(KmpDimTy, Size, nullptr, ArrayType::Normal, 0);
  11368. Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
  11369. CGF.EmitNullInitialization(DimsAddr, ArrayTy);
  11370. enum { LowerFD = 0, UpperFD, StrideFD };
  11371. // Fill dims with data.
  11372. for (unsigned I = 0, E = NumIterations.size(); I < E; ++I) {
  11373. LValue DimsLVal = CGF.MakeAddrLValue(
  11374. CGF.Builder.CreateConstArrayGEP(DimsAddr, I), KmpDimTy);
  11375. // dims.upper = num_iterations;
  11376. LValue UpperLVal = CGF.EmitLValueForField(
  11377. DimsLVal, *std::next(RD->field_begin(), UpperFD));
  11378. llvm::Value *NumIterVal = CGF.EmitScalarConversion(
  11379. CGF.EmitScalarExpr(NumIterations[I]), NumIterations[I]->getType(),
  11380. Int64Ty, NumIterations[I]->getExprLoc());
  11381. CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
  11382. // dims.stride = 1;
  11383. LValue StrideLVal = CGF.EmitLValueForField(
  11384. DimsLVal, *std::next(RD->field_begin(), StrideFD));
  11385. CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
  11386. StrideLVal);
  11387. }
  11388. // Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
  11389. // kmp_int32 num_dims, struct kmp_dim * dims);
  11390. llvm::Value *Args[] = {
  11391. emitUpdateLocation(CGF, D.getBeginLoc()),
  11392. getThreadID(CGF, D.getBeginLoc()),
  11393. llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
  11394. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  11395. CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).getPointer(),
  11396. CGM.VoidPtrTy)};
  11397. llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
  11398. CGM.getModule(), OMPRTL___kmpc_doacross_init);
  11399. CGF.EmitRuntimeCall(RTLFn, Args);
  11400. llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
  11401. emitUpdateLocation(CGF, D.getEndLoc()), getThreadID(CGF, D.getEndLoc())};
  11402. llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
  11403. CGM.getModule(), OMPRTL___kmpc_doacross_fini);
  11404. CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
  11405. llvm::makeArrayRef(FiniArgs));
  11406. }
  11407. void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
  11408. const OMPDependClause *C) {
  11409. QualType Int64Ty =
  11410. CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
  11411. llvm::APInt Size(/*numBits=*/32, C->getNumLoops());
  11412. QualType ArrayTy = CGM.getContext().getConstantArrayType(
  11413. Int64Ty, Size, nullptr, ArrayType::Normal, 0);
  11414. Address CntAddr = CGF.CreateMemTemp(ArrayTy, ".cnt.addr");
  11415. for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I) {
  11416. const Expr *CounterVal = C->getLoopData(I);
  11417. assert(CounterVal);
  11418. llvm::Value *CntVal = CGF.EmitScalarConversion(
  11419. CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
  11420. CounterVal->getExprLoc());
  11421. CGF.EmitStoreOfScalar(CntVal, CGF.Builder.CreateConstArrayGEP(CntAddr, I),
  11422. /*Volatile=*/false, Int64Ty);
  11423. }
  11424. llvm::Value *Args[] = {
  11425. emitUpdateLocation(CGF, C->getBeginLoc()),
  11426. getThreadID(CGF, C->getBeginLoc()),
  11427. CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
  11428. llvm::FunctionCallee RTLFn;
  11429. if (C->getDependencyKind() == OMPC_DEPEND_source) {
  11430. RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
  11431. OMPRTL___kmpc_doacross_post);
  11432. } else {
  11433. assert(C->getDependencyKind() == OMPC_DEPEND_sink);
  11434. RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
  11435. OMPRTL___kmpc_doacross_wait);
  11436. }
  11437. CGF.EmitRuntimeCall(RTLFn, Args);
  11438. }
  11439. void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
  11440. llvm::FunctionCallee Callee,
  11441. ArrayRef<llvm::Value *> Args) const {
  11442. assert(Loc.isValid() && "Outlined function call location must be valid.");
  11443. auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
  11444. if (auto *Fn = dyn_cast<llvm::Function>(Callee.getCallee())) {
  11445. if (Fn->doesNotThrow()) {
  11446. CGF.EmitNounwindRuntimeCall(Fn, Args);
  11447. return;
  11448. }
  11449. }
  11450. CGF.EmitRuntimeCall(Callee, Args);
  11451. }
  11452. void CGOpenMPRuntime::emitOutlinedFunctionCall(
  11453. CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
  11454. ArrayRef<llvm::Value *> Args) const {
  11455. emitCall(CGF, Loc, OutlinedFn, Args);
  11456. }
  11457. void CGOpenMPRuntime::emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) {
  11458. if (const auto *FD = dyn_cast<FunctionDecl>(D))
  11459. if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD))
  11460. HasEmittedDeclareTargetRegion = true;
  11461. }
  11462. Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
  11463. const VarDecl *NativeParam,
  11464. const VarDecl *TargetParam) const {
  11465. return CGF.GetAddrOfLocalVar(NativeParam);
  11466. }
  11467. /// Return allocator value from expression, or return a null allocator (default
  11468. /// when no allocator specified).
  11469. static llvm::Value *getAllocatorVal(CodeGenFunction &CGF,
  11470. const Expr *Allocator) {
  11471. llvm::Value *AllocVal;
  11472. if (Allocator) {
  11473. AllocVal = CGF.EmitScalarExpr(Allocator);
  11474. // According to the standard, the original allocator type is a enum
  11475. // (integer). Convert to pointer type, if required.
  11476. AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(),
  11477. CGF.getContext().VoidPtrTy,
  11478. Allocator->getExprLoc());
  11479. } else {
  11480. // If no allocator specified, it defaults to the null allocator.
  11481. AllocVal = llvm::Constant::getNullValue(
  11482. CGF.CGM.getTypes().ConvertType(CGF.getContext().VoidPtrTy));
  11483. }
  11484. return AllocVal;
  11485. }
  11486. Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
  11487. const VarDecl *VD) {
  11488. if (!VD)
  11489. return Address::invalid();
  11490. Address UntiedAddr = Address::invalid();
  11491. Address UntiedRealAddr = Address::invalid();
  11492. auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn);
  11493. if (It != FunctionToUntiedTaskStackMap.end()) {
  11494. const UntiedLocalVarsAddressesMap &UntiedData =
  11495. UntiedLocalVarsStack[It->second];
  11496. auto I = UntiedData.find(VD);
  11497. if (I != UntiedData.end()) {
  11498. UntiedAddr = I->second.first;
  11499. UntiedRealAddr = I->second.second;
  11500. }
  11501. }
  11502. const VarDecl *CVD = VD->getCanonicalDecl();
  11503. if (CVD->hasAttr<OMPAllocateDeclAttr>()) {
  11504. // Use the default allocation.
  11505. if (!isAllocatableDecl(VD))
  11506. return UntiedAddr;
  11507. llvm::Value *Size;
  11508. CharUnits Align = CGM.getContext().getDeclAlign(CVD);
  11509. if (CVD->getType()->isVariablyModifiedType()) {
  11510. Size = CGF.getTypeSize(CVD->getType());
  11511. // Align the size: ((size + align - 1) / align) * align
  11512. Size = CGF.Builder.CreateNUWAdd(
  11513. Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
  11514. Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
  11515. Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
  11516. } else {
  11517. CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
  11518. Size = CGM.getSize(Sz.alignTo(Align));
  11519. }
  11520. llvm::Value *ThreadID = getThreadID(CGF, CVD->getBeginLoc());
  11521. const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
  11522. const Expr *Allocator = AA->getAllocator();
  11523. llvm::Value *AllocVal = getAllocatorVal(CGF, Allocator);
  11524. llvm::Value *Alignment =
  11525. AA->getAlignment()
  11526. ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(AA->getAlignment()),
  11527. CGM.SizeTy, /*isSigned=*/false)
  11528. : nullptr;
  11529. SmallVector<llvm::Value *, 4> Args;
  11530. Args.push_back(ThreadID);
  11531. if (Alignment)
  11532. Args.push_back(Alignment);
  11533. Args.push_back(Size);
  11534. Args.push_back(AllocVal);
  11535. llvm::omp::RuntimeFunction FnID =
  11536. Alignment ? OMPRTL___kmpc_aligned_alloc : OMPRTL___kmpc_alloc;
  11537. llvm::Value *Addr = CGF.EmitRuntimeCall(
  11538. OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), FnID), Args,
  11539. getName({CVD->getName(), ".void.addr"}));
  11540. llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
  11541. CGM.getModule(), OMPRTL___kmpc_free);
  11542. QualType Ty = CGM.getContext().getPointerType(CVD->getType());
  11543. Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  11544. Addr, CGF.ConvertTypeForMem(Ty), getName({CVD->getName(), ".addr"}));
  11545. if (UntiedAddr.isValid())
  11546. CGF.EmitStoreOfScalar(Addr, UntiedAddr, /*Volatile=*/false, Ty);
  11547. // Cleanup action for allocate support.
  11548. class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
  11549. llvm::FunctionCallee RTLFn;
  11550. SourceLocation::UIntTy LocEncoding;
  11551. Address Addr;
  11552. const Expr *AllocExpr;
  11553. public:
  11554. OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn,
  11555. SourceLocation::UIntTy LocEncoding, Address Addr,
  11556. const Expr *AllocExpr)
  11557. : RTLFn(RTLFn), LocEncoding(LocEncoding), Addr(Addr),
  11558. AllocExpr(AllocExpr) {}
  11559. void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
  11560. if (!CGF.HaveInsertPoint())
  11561. return;
  11562. llvm::Value *Args[3];
  11563. Args[0] = CGF.CGM.getOpenMPRuntime().getThreadID(
  11564. CGF, SourceLocation::getFromRawEncoding(LocEncoding));
  11565. Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  11566. Addr.getPointer(), CGF.VoidPtrTy);
  11567. llvm::Value *AllocVal = getAllocatorVal(CGF, AllocExpr);
  11568. Args[2] = AllocVal;
  11569. CGF.EmitRuntimeCall(RTLFn, Args);
  11570. }
  11571. };
  11572. Address VDAddr =
  11573. UntiedRealAddr.isValid() ? UntiedRealAddr : Address(Addr, Align);
  11574. CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(
  11575. NormalAndEHCleanup, FiniRTLFn, CVD->getLocation().getRawEncoding(),
  11576. VDAddr, Allocator);
  11577. if (UntiedRealAddr.isValid())
  11578. if (auto *Region =
  11579. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
  11580. Region->emitUntiedSwitch(CGF);
  11581. return VDAddr;
  11582. }
  11583. return UntiedAddr;
  11584. }
  11585. bool CGOpenMPRuntime::isLocalVarInUntiedTask(CodeGenFunction &CGF,
  11586. const VarDecl *VD) const {
  11587. auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn);
  11588. if (It == FunctionToUntiedTaskStackMap.end())
  11589. return false;
  11590. return UntiedLocalVarsStack[It->second].count(VD) > 0;
  11591. }
  11592. CGOpenMPRuntime::NontemporalDeclsRAII::NontemporalDeclsRAII(
  11593. CodeGenModule &CGM, const OMPLoopDirective &S)
  11594. : CGM(CGM), NeedToPush(S.hasClausesOfKind<OMPNontemporalClause>()) {
  11595. assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
  11596. if (!NeedToPush)
  11597. return;
  11598. NontemporalDeclsSet &DS =
  11599. CGM.getOpenMPRuntime().NontemporalDeclsStack.emplace_back();
  11600. for (const auto *C : S.getClausesOfKind<OMPNontemporalClause>()) {
  11601. for (const Stmt *Ref : C->private_refs()) {
  11602. const auto *SimpleRefExpr = cast<Expr>(Ref)->IgnoreParenImpCasts();
  11603. const ValueDecl *VD;
  11604. if (const auto *DRE = dyn_cast<DeclRefExpr>(SimpleRefExpr)) {
  11605. VD = DRE->getDecl();
  11606. } else {
  11607. const auto *ME = cast<MemberExpr>(SimpleRefExpr);
  11608. assert((ME->isImplicitCXXThis() ||
  11609. isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts())) &&
  11610. "Expected member of current class.");
  11611. VD = ME->getMemberDecl();
  11612. }
  11613. DS.insert(VD);
  11614. }
  11615. }
  11616. }
  11617. CGOpenMPRuntime::NontemporalDeclsRAII::~NontemporalDeclsRAII() {
  11618. if (!NeedToPush)
  11619. return;
  11620. CGM.getOpenMPRuntime().NontemporalDeclsStack.pop_back();
  11621. }
  11622. CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::UntiedTaskLocalDeclsRAII(
  11623. CodeGenFunction &CGF,
  11624. const llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
  11625. std::pair<Address, Address>> &LocalVars)
  11626. : CGM(CGF.CGM), NeedToPush(!LocalVars.empty()) {
  11627. if (!NeedToPush)
  11628. return;
  11629. CGM.getOpenMPRuntime().FunctionToUntiedTaskStackMap.try_emplace(
  11630. CGF.CurFn, CGM.getOpenMPRuntime().UntiedLocalVarsStack.size());
  11631. CGM.getOpenMPRuntime().UntiedLocalVarsStack.push_back(LocalVars);
  11632. }
  11633. CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::~UntiedTaskLocalDeclsRAII() {
  11634. if (!NeedToPush)
  11635. return;
  11636. CGM.getOpenMPRuntime().UntiedLocalVarsStack.pop_back();
  11637. }
  11638. bool CGOpenMPRuntime::isNontemporalDecl(const ValueDecl *VD) const {
  11639. assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
  11640. return llvm::any_of(
  11641. CGM.getOpenMPRuntime().NontemporalDeclsStack,
  11642. [VD](const NontemporalDeclsSet &Set) { return Set.contains(VD); });
  11643. }
  11644. void CGOpenMPRuntime::LastprivateConditionalRAII::tryToDisableInnerAnalysis(
  11645. const OMPExecutableDirective &S,
  11646. llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled)
  11647. const {
  11648. llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToCheckForLPCs;
  11649. // Vars in target/task regions must be excluded completely.
  11650. if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()) ||
  11651. isOpenMPTaskingDirective(S.getDirectiveKind())) {
  11652. SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
  11653. getOpenMPCaptureRegions(CaptureRegions, S.getDirectiveKind());
  11654. const CapturedStmt *CS = S.getCapturedStmt(CaptureRegions.front());
  11655. for (const CapturedStmt::Capture &Cap : CS->captures()) {
  11656. if (Cap.capturesVariable() || Cap.capturesVariableByCopy())
  11657. NeedToCheckForLPCs.insert(Cap.getCapturedVar());
  11658. }
  11659. }
  11660. // Exclude vars in private clauses.
  11661. for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
  11662. for (const Expr *Ref : C->varlists()) {
  11663. if (!Ref->getType()->isScalarType())
  11664. continue;
  11665. const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
  11666. if (!DRE)
  11667. continue;
  11668. NeedToCheckForLPCs.insert(DRE->getDecl());
  11669. }
  11670. }
  11671. for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
  11672. for (const Expr *Ref : C->varlists()) {
  11673. if (!Ref->getType()->isScalarType())
  11674. continue;
  11675. const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
  11676. if (!DRE)
  11677. continue;
  11678. NeedToCheckForLPCs.insert(DRE->getDecl());
  11679. }
  11680. }
  11681. for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
  11682. for (const Expr *Ref : C->varlists()) {
  11683. if (!Ref->getType()->isScalarType())
  11684. continue;
  11685. const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
  11686. if (!DRE)
  11687. continue;
  11688. NeedToCheckForLPCs.insert(DRE->getDecl());
  11689. }
  11690. }
  11691. for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
  11692. for (const Expr *Ref : C->varlists()) {
  11693. if (!Ref->getType()->isScalarType())
  11694. continue;
  11695. const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
  11696. if (!DRE)
  11697. continue;
  11698. NeedToCheckForLPCs.insert(DRE->getDecl());
  11699. }
  11700. }
  11701. for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
  11702. for (const Expr *Ref : C->varlists()) {
  11703. if (!Ref->getType()->isScalarType())
  11704. continue;
  11705. const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
  11706. if (!DRE)
  11707. continue;
  11708. NeedToCheckForLPCs.insert(DRE->getDecl());
  11709. }
  11710. }
  11711. for (const Decl *VD : NeedToCheckForLPCs) {
  11712. for (const LastprivateConditionalData &Data :
  11713. llvm::reverse(CGM.getOpenMPRuntime().LastprivateConditionalStack)) {
  11714. if (Data.DeclToUniqueName.count(VD) > 0) {
  11715. if (!Data.Disabled)
  11716. NeedToAddForLPCsAsDisabled.insert(VD);
  11717. break;
  11718. }
  11719. }
  11720. }
  11721. }
  11722. CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
  11723. CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal)
  11724. : CGM(CGF.CGM),
  11725. Action((CGM.getLangOpts().OpenMP >= 50 &&
  11726. llvm::any_of(S.getClausesOfKind<OMPLastprivateClause>(),
  11727. [](const OMPLastprivateClause *C) {
  11728. return C->getKind() ==
  11729. OMPC_LASTPRIVATE_conditional;
  11730. }))
  11731. ? ActionToDo::PushAsLastprivateConditional
  11732. : ActionToDo::DoNotPush) {
  11733. assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
  11734. if (CGM.getLangOpts().OpenMP < 50 || Action == ActionToDo::DoNotPush)
  11735. return;
  11736. assert(Action == ActionToDo::PushAsLastprivateConditional &&
  11737. "Expected a push action.");
  11738. LastprivateConditionalData &Data =
  11739. CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
  11740. for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
  11741. if (C->getKind() != OMPC_LASTPRIVATE_conditional)
  11742. continue;
  11743. for (const Expr *Ref : C->varlists()) {
  11744. Data.DeclToUniqueName.insert(std::make_pair(
  11745. cast<DeclRefExpr>(Ref->IgnoreParenImpCasts())->getDecl(),
  11746. SmallString<16>(generateUniqueName(CGM, "pl_cond", Ref))));
  11747. }
  11748. }
  11749. Data.IVLVal = IVLVal;
  11750. Data.Fn = CGF.CurFn;
  11751. }
  11752. CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
  11753. CodeGenFunction &CGF, const OMPExecutableDirective &S)
  11754. : CGM(CGF.CGM), Action(ActionToDo::DoNotPush) {
  11755. assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
  11756. if (CGM.getLangOpts().OpenMP < 50)
  11757. return;
  11758. llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToAddForLPCsAsDisabled;
  11759. tryToDisableInnerAnalysis(S, NeedToAddForLPCsAsDisabled);
  11760. if (!NeedToAddForLPCsAsDisabled.empty()) {
  11761. Action = ActionToDo::DisableLastprivateConditional;
  11762. LastprivateConditionalData &Data =
  11763. CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
  11764. for (const Decl *VD : NeedToAddForLPCsAsDisabled)
  11765. Data.DeclToUniqueName.insert(std::make_pair(VD, SmallString<16>()));
  11766. Data.Fn = CGF.CurFn;
  11767. Data.Disabled = true;
  11768. }
  11769. }
  11770. CGOpenMPRuntime::LastprivateConditionalRAII
  11771. CGOpenMPRuntime::LastprivateConditionalRAII::disable(
  11772. CodeGenFunction &CGF, const OMPExecutableDirective &S) {
  11773. return LastprivateConditionalRAII(CGF, S);
  11774. }
  11775. CGOpenMPRuntime::LastprivateConditionalRAII::~LastprivateConditionalRAII() {
  11776. if (CGM.getLangOpts().OpenMP < 50)
  11777. return;
  11778. if (Action == ActionToDo::DisableLastprivateConditional) {
  11779. assert(CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&
  11780. "Expected list of disabled private vars.");
  11781. CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
  11782. }
  11783. if (Action == ActionToDo::PushAsLastprivateConditional) {
  11784. assert(
  11785. !CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&
  11786. "Expected list of lastprivate conditional vars.");
  11787. CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
  11788. }
  11789. }
  11790. Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
  11791. const VarDecl *VD) {
  11792. ASTContext &C = CGM.getContext();
  11793. auto I = LastprivateConditionalToTypes.find(CGF.CurFn);
  11794. if (I == LastprivateConditionalToTypes.end())
  11795. I = LastprivateConditionalToTypes.try_emplace(CGF.CurFn).first;
  11796. QualType NewType;
  11797. const FieldDecl *VDField;
  11798. const FieldDecl *FiredField;
  11799. LValue BaseLVal;
  11800. auto VI = I->getSecond().find(VD);
  11801. if (VI == I->getSecond().end()) {
  11802. RecordDecl *RD = C.buildImplicitRecord("lasprivate.conditional");
  11803. RD->startDefinition();
  11804. VDField = addFieldToRecordDecl(C, RD, VD->getType().getNonReferenceType());
  11805. FiredField = addFieldToRecordDecl(C, RD, C.CharTy);
  11806. RD->completeDefinition();
  11807. NewType = C.getRecordType(RD);
  11808. Address Addr = CGF.CreateMemTemp(NewType, C.getDeclAlign(VD), VD->getName());
  11809. BaseLVal = CGF.MakeAddrLValue(Addr, NewType, AlignmentSource::Decl);
  11810. I->getSecond().try_emplace(VD, NewType, VDField, FiredField, BaseLVal);
  11811. } else {
  11812. NewType = std::get<0>(VI->getSecond());
  11813. VDField = std::get<1>(VI->getSecond());
  11814. FiredField = std::get<2>(VI->getSecond());
  11815. BaseLVal = std::get<3>(VI->getSecond());
  11816. }
  11817. LValue FiredLVal =
  11818. CGF.EmitLValueForField(BaseLVal, FiredField);
  11819. CGF.EmitStoreOfScalar(
  11820. llvm::ConstantInt::getNullValue(CGF.ConvertTypeForMem(C.CharTy)),
  11821. FiredLVal);
  11822. return CGF.EmitLValueForField(BaseLVal, VDField).getAddress(CGF);
  11823. }
  11824. namespace {
  11825. /// Checks if the lastprivate conditional variable is referenced in LHS.
  11826. class LastprivateConditionalRefChecker final
  11827. : public ConstStmtVisitor<LastprivateConditionalRefChecker, bool> {
  11828. ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM;
  11829. const Expr *FoundE = nullptr;
  11830. const Decl *FoundD = nullptr;
  11831. StringRef UniqueDeclName;
  11832. LValue IVLVal;
  11833. llvm::Function *FoundFn = nullptr;
  11834. SourceLocation Loc;
  11835. public:
  11836. bool VisitDeclRefExpr(const DeclRefExpr *E) {
  11837. for (const CGOpenMPRuntime::LastprivateConditionalData &D :
  11838. llvm::reverse(LPM)) {
  11839. auto It = D.DeclToUniqueName.find(E->getDecl());
  11840. if (It == D.DeclToUniqueName.end())
  11841. continue;
  11842. if (D.Disabled)
  11843. return false;
  11844. FoundE = E;
  11845. FoundD = E->getDecl()->getCanonicalDecl();
  11846. UniqueDeclName = It->second;
  11847. IVLVal = D.IVLVal;
  11848. FoundFn = D.Fn;
  11849. break;
  11850. }
  11851. return FoundE == E;
  11852. }
  11853. bool VisitMemberExpr(const MemberExpr *E) {
  11854. if (!CodeGenFunction::IsWrappedCXXThis(E->getBase()))
  11855. return false;
  11856. for (const CGOpenMPRuntime::LastprivateConditionalData &D :
  11857. llvm::reverse(LPM)) {
  11858. auto It = D.DeclToUniqueName.find(E->getMemberDecl());
  11859. if (It == D.DeclToUniqueName.end())
  11860. continue;
  11861. if (D.Disabled)
  11862. return false;
  11863. FoundE = E;
  11864. FoundD = E->getMemberDecl()->getCanonicalDecl();
  11865. UniqueDeclName = It->second;
  11866. IVLVal = D.IVLVal;
  11867. FoundFn = D.Fn;
  11868. break;
  11869. }
  11870. return FoundE == E;
  11871. }
  11872. bool VisitStmt(const Stmt *S) {
  11873. for (const Stmt *Child : S->children()) {
  11874. if (!Child)
  11875. continue;
  11876. if (const auto *E = dyn_cast<Expr>(Child))
  11877. if (!E->isGLValue())
  11878. continue;
  11879. if (Visit(Child))
  11880. return true;
  11881. }
  11882. return false;
  11883. }
  11884. explicit LastprivateConditionalRefChecker(
  11885. ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM)
  11886. : LPM(LPM) {}
  11887. std::tuple<const Expr *, const Decl *, StringRef, LValue, llvm::Function *>
  11888. getFoundData() const {
  11889. return std::make_tuple(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn);
  11890. }
  11891. };
  11892. } // namespace
  11893. void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF,
  11894. LValue IVLVal,
  11895. StringRef UniqueDeclName,
  11896. LValue LVal,
  11897. SourceLocation Loc) {
  11898. // Last updated loop counter for the lastprivate conditional var.
  11899. // int<xx> last_iv = 0;
  11900. llvm::Type *LLIVTy = CGF.ConvertTypeForMem(IVLVal.getType());
  11901. llvm::Constant *LastIV =
  11902. getOrCreateInternalVariable(LLIVTy, getName({UniqueDeclName, "iv"}));
  11903. cast<llvm::GlobalVariable>(LastIV)->setAlignment(
  11904. IVLVal.getAlignment().getAsAlign());
  11905. LValue LastIVLVal = CGF.MakeNaturalAlignAddrLValue(LastIV, IVLVal.getType());
  11906. // Last value of the lastprivate conditional.
  11907. // decltype(priv_a) last_a;
  11908. llvm::GlobalVariable *Last = getOrCreateInternalVariable(
  11909. CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName);
  11910. Last->setAlignment(LVal.getAlignment().getAsAlign());
  11911. LValue LastLVal = CGF.MakeAddrLValue(
  11912. Address(Last, Last->getValueType(), LVal.getAlignment()), LVal.getType());
  11913. // Global loop counter. Required to handle inner parallel-for regions.
  11914. // iv
  11915. llvm::Value *IVVal = CGF.EmitLoadOfScalar(IVLVal, Loc);
  11916. // #pragma omp critical(a)
  11917. // if (last_iv <= iv) {
  11918. // last_iv = iv;
  11919. // last_a = priv_a;
  11920. // }
  11921. auto &&CodeGen = [&LastIVLVal, &IVLVal, IVVal, &LVal, &LastLVal,
  11922. Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
  11923. Action.Enter(CGF);
  11924. llvm::Value *LastIVVal = CGF.EmitLoadOfScalar(LastIVLVal, Loc);
  11925. // (last_iv <= iv) ? Check if the variable is updated and store new
  11926. // value in global var.
  11927. llvm::Value *CmpRes;
  11928. if (IVLVal.getType()->isSignedIntegerType()) {
  11929. CmpRes = CGF.Builder.CreateICmpSLE(LastIVVal, IVVal);
  11930. } else {
  11931. assert(IVLVal.getType()->isUnsignedIntegerType() &&
  11932. "Loop iteration variable must be integer.");
  11933. CmpRes = CGF.Builder.CreateICmpULE(LastIVVal, IVVal);
  11934. }
  11935. llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lp_cond_then");
  11936. llvm::BasicBlock *ExitBB = CGF.createBasicBlock("lp_cond_exit");
  11937. CGF.Builder.CreateCondBr(CmpRes, ThenBB, ExitBB);
  11938. // {
  11939. CGF.EmitBlock(ThenBB);
  11940. // last_iv = iv;
  11941. CGF.EmitStoreOfScalar(IVVal, LastIVLVal);
  11942. // last_a = priv_a;
  11943. switch (CGF.getEvaluationKind(LVal.getType())) {
  11944. case TEK_Scalar: {
  11945. llvm::Value *PrivVal = CGF.EmitLoadOfScalar(LVal, Loc);
  11946. CGF.EmitStoreOfScalar(PrivVal, LastLVal);
  11947. break;
  11948. }
  11949. case TEK_Complex: {
  11950. CodeGenFunction::ComplexPairTy PrivVal = CGF.EmitLoadOfComplex(LVal, Loc);
  11951. CGF.EmitStoreOfComplex(PrivVal, LastLVal, /*isInit=*/false);
  11952. break;
  11953. }
  11954. case TEK_Aggregate:
  11955. llvm_unreachable(
  11956. "Aggregates are not supported in lastprivate conditional.");
  11957. }
  11958. // }
  11959. CGF.EmitBranch(ExitBB);
  11960. // There is no need to emit line number for unconditional branch.
  11961. (void)ApplyDebugLocation::CreateEmpty(CGF);
  11962. CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
  11963. };
  11964. if (CGM.getLangOpts().OpenMPSimd) {
  11965. // Do not emit as a critical region as no parallel region could be emitted.
  11966. RegionCodeGenTy ThenRCG(CodeGen);
  11967. ThenRCG(CGF);
  11968. } else {
  11969. emitCriticalRegion(CGF, UniqueDeclName, CodeGen, Loc);
  11970. }
  11971. }
  11972. void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
  11973. const Expr *LHS) {
  11974. if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
  11975. return;
  11976. LastprivateConditionalRefChecker Checker(LastprivateConditionalStack);
  11977. if (!Checker.Visit(LHS))
  11978. return;
  11979. const Expr *FoundE;
  11980. const Decl *FoundD;
  11981. StringRef UniqueDeclName;
  11982. LValue IVLVal;
  11983. llvm::Function *FoundFn;
  11984. std::tie(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn) =
  11985. Checker.getFoundData();
  11986. if (FoundFn != CGF.CurFn) {
  11987. // Special codegen for inner parallel regions.
  11988. // ((struct.lastprivate.conditional*)&priv_a)->Fired = 1;
  11989. auto It = LastprivateConditionalToTypes[FoundFn].find(FoundD);
  11990. assert(It != LastprivateConditionalToTypes[FoundFn].end() &&
  11991. "Lastprivate conditional is not found in outer region.");
  11992. QualType StructTy = std::get<0>(It->getSecond());
  11993. const FieldDecl* FiredDecl = std::get<2>(It->getSecond());
  11994. LValue PrivLVal = CGF.EmitLValue(FoundE);
  11995. Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  11996. PrivLVal.getAddress(CGF),
  11997. CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)));
  11998. LValue BaseLVal =
  11999. CGF.MakeAddrLValue(StructAddr, StructTy, AlignmentSource::Decl);
  12000. LValue FiredLVal = CGF.EmitLValueForField(BaseLVal, FiredDecl);
  12001. CGF.EmitAtomicStore(RValue::get(llvm::ConstantInt::get(
  12002. CGF.ConvertTypeForMem(FiredDecl->getType()), 1)),
  12003. FiredLVal, llvm::AtomicOrdering::Unordered,
  12004. /*IsVolatile=*/true, /*isInit=*/false);
  12005. return;
  12006. }
  12007. // Private address of the lastprivate conditional in the current context.
  12008. // priv_a
  12009. LValue LVal = CGF.EmitLValue(FoundE);
  12010. emitLastprivateConditionalUpdate(CGF, IVLVal, UniqueDeclName, LVal,
  12011. FoundE->getExprLoc());
  12012. }
  12013. void CGOpenMPRuntime::checkAndEmitSharedLastprivateConditional(
  12014. CodeGenFunction &CGF, const OMPExecutableDirective &D,
  12015. const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls) {
  12016. if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
  12017. return;
  12018. auto Range = llvm::reverse(LastprivateConditionalStack);
  12019. auto It = llvm::find_if(
  12020. Range, [](const LastprivateConditionalData &D) { return !D.Disabled; });
  12021. if (It == Range.end() || It->Fn != CGF.CurFn)
  12022. return;
  12023. auto LPCI = LastprivateConditionalToTypes.find(It->Fn);
  12024. assert(LPCI != LastprivateConditionalToTypes.end() &&
  12025. "Lastprivates must be registered already.");
  12026. SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
  12027. getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
  12028. const CapturedStmt *CS = D.getCapturedStmt(CaptureRegions.back());
  12029. for (const auto &Pair : It->DeclToUniqueName) {
  12030. const auto *VD = cast<VarDecl>(Pair.first->getCanonicalDecl());
  12031. if (!CS->capturesVariable(VD) || IgnoredDecls.contains(VD))
  12032. continue;
  12033. auto I = LPCI->getSecond().find(Pair.first);
  12034. assert(I != LPCI->getSecond().end() &&
  12035. "Lastprivate must be rehistered already.");
  12036. // bool Cmp = priv_a.Fired != 0;
  12037. LValue BaseLVal = std::get<3>(I->getSecond());
  12038. LValue FiredLVal =
  12039. CGF.EmitLValueForField(BaseLVal, std::get<2>(I->getSecond()));
  12040. llvm::Value *Res = CGF.EmitLoadOfScalar(FiredLVal, D.getBeginLoc());
  12041. llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Res);
  12042. llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lpc.then");
  12043. llvm::BasicBlock *DoneBB = CGF.createBasicBlock("lpc.done");
  12044. // if (Cmp) {
  12045. CGF.Builder.CreateCondBr(Cmp, ThenBB, DoneBB);
  12046. CGF.EmitBlock(ThenBB);
  12047. Address Addr = CGF.GetAddrOfLocalVar(VD);
  12048. LValue LVal;
  12049. if (VD->getType()->isReferenceType())
  12050. LVal = CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
  12051. AlignmentSource::Decl);
  12052. else
  12053. LVal = CGF.MakeAddrLValue(Addr, VD->getType().getNonReferenceType(),
  12054. AlignmentSource::Decl);
  12055. emitLastprivateConditionalUpdate(CGF, It->IVLVal, Pair.second, LVal,
  12056. D.getBeginLoc());
  12057. auto AL = ApplyDebugLocation::CreateArtificial(CGF);
  12058. CGF.EmitBlock(DoneBB, /*IsFinal=*/true);
  12059. // }
  12060. }
  12061. }
  12062. void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
  12063. CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD,
  12064. SourceLocation Loc) {
  12065. if (CGF.getLangOpts().OpenMP < 50)
  12066. return;
  12067. auto It = LastprivateConditionalStack.back().DeclToUniqueName.find(VD);
  12068. assert(It != LastprivateConditionalStack.back().DeclToUniqueName.end() &&
  12069. "Unknown lastprivate conditional variable.");
  12070. StringRef UniqueName = It->second;
  12071. llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(UniqueName);
  12072. // The variable was not updated in the region - exit.
  12073. if (!GV)
  12074. return;
  12075. LValue LPLVal = CGF.MakeAddrLValue(
  12076. Address(GV, GV->getValueType(), PrivLVal.getAlignment()),
  12077. PrivLVal.getType().getNonReferenceType());
  12078. llvm::Value *Res = CGF.EmitLoadOfScalar(LPLVal, Loc);
  12079. CGF.EmitStoreOfScalar(Res, PrivLVal);
  12080. }
  12081. llvm::Function *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
  12082. const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
  12083. OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
  12084. llvm_unreachable("Not supported in SIMD-only mode");
  12085. }
  12086. llvm::Function *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
  12087. const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
  12088. OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
  12089. llvm_unreachable("Not supported in SIMD-only mode");
  12090. }
  12091. llvm::Function *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
  12092. const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
  12093. const VarDecl *PartIDVar, const VarDecl *TaskTVar,
  12094. OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
  12095. bool Tied, unsigned &NumberOfParts) {
  12096. llvm_unreachable("Not supported in SIMD-only mode");
  12097. }
  12098. void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
  12099. SourceLocation Loc,
  12100. llvm::Function *OutlinedFn,
  12101. ArrayRef<llvm::Value *> CapturedVars,
  12102. const Expr *IfCond,
  12103. llvm::Value *NumThreads) {
  12104. llvm_unreachable("Not supported in SIMD-only mode");
  12105. }
  12106. void CGOpenMPSIMDRuntime::emitCriticalRegion(
  12107. CodeGenFunction &CGF, StringRef CriticalName,
  12108. const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
  12109. const Expr *Hint) {
  12110. llvm_unreachable("Not supported in SIMD-only mode");
  12111. }
  12112. void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF,
  12113. const RegionCodeGenTy &MasterOpGen,
  12114. SourceLocation Loc) {
  12115. llvm_unreachable("Not supported in SIMD-only mode");
  12116. }
  12117. void CGOpenMPSIMDRuntime::emitMaskedRegion(CodeGenFunction &CGF,
  12118. const RegionCodeGenTy &MasterOpGen,
  12119. SourceLocation Loc,
  12120. const Expr *Filter) {
  12121. llvm_unreachable("Not supported in SIMD-only mode");
  12122. }
  12123. void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
  12124. SourceLocation Loc) {
  12125. llvm_unreachable("Not supported in SIMD-only mode");
  12126. }
  12127. void CGOpenMPSIMDRuntime::emitTaskgroupRegion(
  12128. CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen,
  12129. SourceLocation Loc) {
  12130. llvm_unreachable("Not supported in SIMD-only mode");
  12131. }
  12132. void CGOpenMPSIMDRuntime::emitSingleRegion(
  12133. CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen,
  12134. SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars,
  12135. ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs,
  12136. ArrayRef<const Expr *> AssignmentOps) {
  12137. llvm_unreachable("Not supported in SIMD-only mode");
  12138. }
  12139. void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF,
  12140. const RegionCodeGenTy &OrderedOpGen,
  12141. SourceLocation Loc,
  12142. bool IsThreads) {
  12143. llvm_unreachable("Not supported in SIMD-only mode");
  12144. }
  12145. void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF,
  12146. SourceLocation Loc,
  12147. OpenMPDirectiveKind Kind,
  12148. bool EmitChecks,
  12149. bool ForceSimpleCall) {
  12150. llvm_unreachable("Not supported in SIMD-only mode");
  12151. }
  12152. void CGOpenMPSIMDRuntime::emitForDispatchInit(
  12153. CodeGenFunction &CGF, SourceLocation Loc,
  12154. const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
  12155. bool Ordered, const DispatchRTInput &DispatchValues) {
  12156. llvm_unreachable("Not supported in SIMD-only mode");
  12157. }
  12158. void CGOpenMPSIMDRuntime::emitForStaticInit(
  12159. CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind,
  12160. const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) {
  12161. llvm_unreachable("Not supported in SIMD-only mode");
  12162. }
  12163. void CGOpenMPSIMDRuntime::emitDistributeStaticInit(
  12164. CodeGenFunction &CGF, SourceLocation Loc,
  12165. OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) {
  12166. llvm_unreachable("Not supported in SIMD-only mode");
  12167. }
  12168. void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
  12169. SourceLocation Loc,
  12170. unsigned IVSize,
  12171. bool IVSigned) {
  12172. llvm_unreachable("Not supported in SIMD-only mode");
  12173. }
  12174. void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF,
  12175. SourceLocation Loc,
  12176. OpenMPDirectiveKind DKind) {
  12177. llvm_unreachable("Not supported in SIMD-only mode");
  12178. }
  12179. llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF,
  12180. SourceLocation Loc,
  12181. unsigned IVSize, bool IVSigned,
  12182. Address IL, Address LB,
  12183. Address UB, Address ST) {
  12184. llvm_unreachable("Not supported in SIMD-only mode");
  12185. }
  12186. void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
  12187. llvm::Value *NumThreads,
  12188. SourceLocation Loc) {
  12189. llvm_unreachable("Not supported in SIMD-only mode");
  12190. }
  12191. void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF,
  12192. ProcBindKind ProcBind,
  12193. SourceLocation Loc) {
  12194. llvm_unreachable("Not supported in SIMD-only mode");
  12195. }
  12196. Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
  12197. const VarDecl *VD,
  12198. Address VDAddr,
  12199. SourceLocation Loc) {
  12200. llvm_unreachable("Not supported in SIMD-only mode");
  12201. }
  12202. llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition(
  12203. const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit,
  12204. CodeGenFunction *CGF) {
  12205. llvm_unreachable("Not supported in SIMD-only mode");
  12206. }
  12207. Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
  12208. CodeGenFunction &CGF, QualType VarType, StringRef Name) {
  12209. llvm_unreachable("Not supported in SIMD-only mode");
  12210. }
  12211. void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
  12212. ArrayRef<const Expr *> Vars,
  12213. SourceLocation Loc,
  12214. llvm::AtomicOrdering AO) {
  12215. llvm_unreachable("Not supported in SIMD-only mode");
  12216. }
  12217. void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
  12218. const OMPExecutableDirective &D,
  12219. llvm::Function *TaskFunction,
  12220. QualType SharedsTy, Address Shareds,
  12221. const Expr *IfCond,
  12222. const OMPTaskDataTy &Data) {
  12223. llvm_unreachable("Not supported in SIMD-only mode");
  12224. }
  12225. void CGOpenMPSIMDRuntime::emitTaskLoopCall(
  12226. CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
  12227. llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds,
  12228. const Expr *IfCond, const OMPTaskDataTy &Data) {
  12229. llvm_unreachable("Not supported in SIMD-only mode");
  12230. }
  12231. void CGOpenMPSIMDRuntime::emitReduction(
  12232. CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
  12233. ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
  12234. ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
  12235. assert(Options.SimpleReduction && "Only simple reduction is expected.");
  12236. CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
  12237. ReductionOps, Options);
  12238. }
  12239. llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
  12240. CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
  12241. ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
  12242. llvm_unreachable("Not supported in SIMD-only mode");
  12243. }
  12244. void CGOpenMPSIMDRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
  12245. SourceLocation Loc,
  12246. bool IsWorksharingReduction) {
  12247. llvm_unreachable("Not supported in SIMD-only mode");
  12248. }
  12249. void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
  12250. SourceLocation Loc,
  12251. ReductionCodeGen &RCG,
  12252. unsigned N) {
  12253. llvm_unreachable("Not supported in SIMD-only mode");
  12254. }
  12255. Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF,
  12256. SourceLocation Loc,
  12257. llvm::Value *ReductionsPtr,
  12258. LValue SharedLVal) {
  12259. llvm_unreachable("Not supported in SIMD-only mode");
  12260. }
  12261. void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
  12262. SourceLocation Loc,
  12263. const OMPTaskDataTy &Data) {
  12264. llvm_unreachable("Not supported in SIMD-only mode");
  12265. }
  12266. void CGOpenMPSIMDRuntime::emitCancellationPointCall(
  12267. CodeGenFunction &CGF, SourceLocation Loc,
  12268. OpenMPDirectiveKind CancelRegion) {
  12269. llvm_unreachable("Not supported in SIMD-only mode");
  12270. }
  12271. void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF,
  12272. SourceLocation Loc, const Expr *IfCond,
  12273. OpenMPDirectiveKind CancelRegion) {
  12274. llvm_unreachable("Not supported in SIMD-only mode");
  12275. }
  12276. void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
  12277. const OMPExecutableDirective &D, StringRef ParentName,
  12278. llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
  12279. bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
  12280. llvm_unreachable("Not supported in SIMD-only mode");
  12281. }
  12282. void CGOpenMPSIMDRuntime::emitTargetCall(
  12283. CodeGenFunction &CGF, const OMPExecutableDirective &D,
  12284. llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
  12285. llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
  12286. llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
  12287. const OMPLoopDirective &D)>
  12288. SizeEmitter) {
  12289. llvm_unreachable("Not supported in SIMD-only mode");
  12290. }
  12291. bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) {
  12292. llvm_unreachable("Not supported in SIMD-only mode");
  12293. }
  12294. bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
  12295. llvm_unreachable("Not supported in SIMD-only mode");
  12296. }
  12297. bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) {
  12298. return false;
  12299. }
  12300. void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF,
  12301. const OMPExecutableDirective &D,
  12302. SourceLocation Loc,
  12303. llvm::Function *OutlinedFn,
  12304. ArrayRef<llvm::Value *> CapturedVars) {
  12305. llvm_unreachable("Not supported in SIMD-only mode");
  12306. }
  12307. void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
  12308. const Expr *NumTeams,
  12309. const Expr *ThreadLimit,
  12310. SourceLocation Loc) {
  12311. llvm_unreachable("Not supported in SIMD-only mode");
  12312. }
  12313. void CGOpenMPSIMDRuntime::emitTargetDataCalls(
  12314. CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
  12315. const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
  12316. llvm_unreachable("Not supported in SIMD-only mode");
  12317. }
  12318. void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
  12319. CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
  12320. const Expr *Device) {
  12321. llvm_unreachable("Not supported in SIMD-only mode");
  12322. }
  12323. void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
  12324. const OMPLoopDirective &D,
  12325. ArrayRef<Expr *> NumIterations) {
  12326. llvm_unreachable("Not supported in SIMD-only mode");
  12327. }
  12328. void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
  12329. const OMPDependClause *C) {
  12330. llvm_unreachable("Not supported in SIMD-only mode");
  12331. }
  12332. const VarDecl *
  12333. CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
  12334. const VarDecl *NativeParam) const {
  12335. llvm_unreachable("Not supported in SIMD-only mode");
  12336. }
  12337. Address
  12338. CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF,
  12339. const VarDecl *NativeParam,
  12340. const VarDecl *TargetParam) const {
  12341. llvm_unreachable("Not supported in SIMD-only mode");
  12342. }