CGAtomic.cpp 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178
  1. //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains the code for emitting atomic operations.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "CGCall.h"
  13. #include "CGRecordLayout.h"
  14. #include "CodeGenFunction.h"
  15. #include "CodeGenModule.h"
  16. #include "TargetInfo.h"
  17. #include "clang/AST/ASTContext.h"
  18. #include "clang/CodeGen/CGFunctionInfo.h"
  19. #include "clang/Frontend/FrontendDiagnostic.h"
  20. #include "llvm/ADT/DenseMap.h"
  21. #include "llvm/IR/DataLayout.h"
  22. #include "llvm/IR/Intrinsics.h"
  23. #include "llvm/IR/Operator.h"
  24. using namespace clang;
  25. using namespace CodeGen;
  26. namespace {
  27. class AtomicInfo {
  28. CodeGenFunction &CGF;
  29. QualType AtomicTy;
  30. QualType ValueTy;
  31. uint64_t AtomicSizeInBits;
  32. uint64_t ValueSizeInBits;
  33. CharUnits AtomicAlign;
  34. CharUnits ValueAlign;
  35. TypeEvaluationKind EvaluationKind;
  36. bool UseLibcall;
  37. LValue LVal;
  38. CGBitFieldInfo BFI;
  39. public:
  40. AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
  41. : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
  42. EvaluationKind(TEK_Scalar), UseLibcall(true) {
  43. assert(!lvalue.isGlobalReg());
  44. ASTContext &C = CGF.getContext();
  45. if (lvalue.isSimple()) {
  46. AtomicTy = lvalue.getType();
  47. if (auto *ATy = AtomicTy->getAs<AtomicType>())
  48. ValueTy = ATy->getValueType();
  49. else
  50. ValueTy = AtomicTy;
  51. EvaluationKind = CGF.getEvaluationKind(ValueTy);
  52. uint64_t ValueAlignInBits;
  53. uint64_t AtomicAlignInBits;
  54. TypeInfo ValueTI = C.getTypeInfo(ValueTy);
  55. ValueSizeInBits = ValueTI.Width;
  56. ValueAlignInBits = ValueTI.Align;
  57. TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
  58. AtomicSizeInBits = AtomicTI.Width;
  59. AtomicAlignInBits = AtomicTI.Align;
  60. assert(ValueSizeInBits <= AtomicSizeInBits);
  61. assert(ValueAlignInBits <= AtomicAlignInBits);
  62. AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
  63. ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
  64. if (lvalue.getAlignment().isZero())
  65. lvalue.setAlignment(AtomicAlign);
  66. LVal = lvalue;
  67. } else if (lvalue.isBitField()) {
  68. ValueTy = lvalue.getType();
  69. ValueSizeInBits = C.getTypeSize(ValueTy);
  70. auto &OrigBFI = lvalue.getBitFieldInfo();
  71. auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
  72. AtomicSizeInBits = C.toBits(
  73. C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
  74. .alignTo(lvalue.getAlignment()));
  75. auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
  76. auto OffsetInChars =
  77. (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
  78. lvalue.getAlignment();
  79. VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
  80. CGF.Int8Ty, VoidPtrAddr, OffsetInChars.getQuantity());
  81. llvm::Type *IntTy = CGF.Builder.getIntNTy(AtomicSizeInBits);
  82. auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  83. VoidPtrAddr, IntTy->getPointerTo(), "atomic_bitfield_base");
  84. BFI = OrigBFI;
  85. BFI.Offset = Offset;
  86. BFI.StorageSize = AtomicSizeInBits;
  87. BFI.StorageOffset += OffsetInChars;
  88. LVal = LValue::MakeBitfield(Address(Addr, IntTy, lvalue.getAlignment()),
  89. BFI, lvalue.getType(), lvalue.getBaseInfo(),
  90. lvalue.getTBAAInfo());
  91. AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
  92. if (AtomicTy.isNull()) {
  93. llvm::APInt Size(
  94. /*numBits=*/32,
  95. C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
  96. AtomicTy =
  97. C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
  98. /*IndexTypeQuals=*/0);
  99. }
  100. AtomicAlign = ValueAlign = lvalue.getAlignment();
  101. } else if (lvalue.isVectorElt()) {
  102. ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
  103. ValueSizeInBits = C.getTypeSize(ValueTy);
  104. AtomicTy = lvalue.getType();
  105. AtomicSizeInBits = C.getTypeSize(AtomicTy);
  106. AtomicAlign = ValueAlign = lvalue.getAlignment();
  107. LVal = lvalue;
  108. } else {
  109. assert(lvalue.isExtVectorElt());
  110. ValueTy = lvalue.getType();
  111. ValueSizeInBits = C.getTypeSize(ValueTy);
  112. AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
  113. lvalue.getType(), cast<llvm::FixedVectorType>(
  114. lvalue.getExtVectorAddress().getElementType())
  115. ->getNumElements());
  116. AtomicSizeInBits = C.getTypeSize(AtomicTy);
  117. AtomicAlign = ValueAlign = lvalue.getAlignment();
  118. LVal = lvalue;
  119. }
  120. UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
  121. AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
  122. }
  123. QualType getAtomicType() const { return AtomicTy; }
  124. QualType getValueType() const { return ValueTy; }
  125. CharUnits getAtomicAlignment() const { return AtomicAlign; }
  126. uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
  127. uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
  128. TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
  129. bool shouldUseLibcall() const { return UseLibcall; }
  130. const LValue &getAtomicLValue() const { return LVal; }
  131. llvm::Value *getAtomicPointer() const {
  132. if (LVal.isSimple())
  133. return LVal.getPointer(CGF);
  134. else if (LVal.isBitField())
  135. return LVal.getBitFieldPointer();
  136. else if (LVal.isVectorElt())
  137. return LVal.getVectorPointer();
  138. assert(LVal.isExtVectorElt());
  139. return LVal.getExtVectorPointer();
  140. }
  141. Address getAtomicAddress() const {
  142. llvm::Type *ElTy;
  143. if (LVal.isSimple())
  144. ElTy = LVal.getAddress(CGF).getElementType();
  145. else if (LVal.isBitField())
  146. ElTy = LVal.getBitFieldAddress().getElementType();
  147. else if (LVal.isVectorElt())
  148. ElTy = LVal.getVectorAddress().getElementType();
  149. else
  150. ElTy = LVal.getExtVectorAddress().getElementType();
  151. return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
  152. }
  153. Address getAtomicAddressAsAtomicIntPointer() const {
  154. return emitCastToAtomicIntPointer(getAtomicAddress());
  155. }
  156. /// Is the atomic size larger than the underlying value type?
  157. ///
  158. /// Note that the absence of padding does not mean that atomic
  159. /// objects are completely interchangeable with non-atomic
  160. /// objects: we might have promoted the alignment of a type
  161. /// without making it bigger.
  162. bool hasPadding() const {
  163. return (ValueSizeInBits != AtomicSizeInBits);
  164. }
  165. bool emitMemSetZeroIfNecessary() const;
  166. llvm::Value *getAtomicSizeValue() const {
  167. CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
  168. return CGF.CGM.getSize(size);
  169. }
  170. /// Cast the given pointer to an integer pointer suitable for atomic
  171. /// operations if the source.
  172. Address emitCastToAtomicIntPointer(Address Addr) const;
  173. /// If Addr is compatible with the iN that will be used for an atomic
  174. /// operation, bitcast it. Otherwise, create a temporary that is suitable
  175. /// and copy the value across.
  176. Address convertToAtomicIntPointer(Address Addr) const;
  177. /// Turn an atomic-layout object into an r-value.
  178. RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
  179. SourceLocation loc, bool AsValue) const;
  180. /// Converts a rvalue to integer value.
  181. llvm::Value *convertRValueToInt(RValue RVal) const;
  182. RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
  183. AggValueSlot ResultSlot,
  184. SourceLocation Loc, bool AsValue) const;
  185. /// Copy an atomic r-value into atomic-layout memory.
  186. void emitCopyIntoMemory(RValue rvalue) const;
  187. /// Project an l-value down to the value field.
  188. LValue projectValue() const {
  189. assert(LVal.isSimple());
  190. Address addr = getAtomicAddress();
  191. if (hasPadding())
  192. addr = CGF.Builder.CreateStructGEP(addr, 0);
  193. return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
  194. LVal.getBaseInfo(), LVal.getTBAAInfo());
  195. }
  196. /// Emits atomic load.
  197. /// \returns Loaded value.
  198. RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
  199. bool AsValue, llvm::AtomicOrdering AO,
  200. bool IsVolatile);
  201. /// Emits atomic compare-and-exchange sequence.
  202. /// \param Expected Expected value.
  203. /// \param Desired Desired value.
  204. /// \param Success Atomic ordering for success operation.
  205. /// \param Failure Atomic ordering for failed operation.
  206. /// \param IsWeak true if atomic operation is weak, false otherwise.
  207. /// \returns Pair of values: previous value from storage (value type) and
  208. /// boolean flag (i1 type) with true if success and false otherwise.
  209. std::pair<RValue, llvm::Value *>
  210. EmitAtomicCompareExchange(RValue Expected, RValue Desired,
  211. llvm::AtomicOrdering Success =
  212. llvm::AtomicOrdering::SequentiallyConsistent,
  213. llvm::AtomicOrdering Failure =
  214. llvm::AtomicOrdering::SequentiallyConsistent,
  215. bool IsWeak = false);
  216. /// Emits atomic update.
  217. /// \param AO Atomic ordering.
  218. /// \param UpdateOp Update operation for the current lvalue.
  219. void EmitAtomicUpdate(llvm::AtomicOrdering AO,
  220. const llvm::function_ref<RValue(RValue)> &UpdateOp,
  221. bool IsVolatile);
  222. /// Emits atomic update.
  223. /// \param AO Atomic ordering.
  224. void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
  225. bool IsVolatile);
  226. /// Materialize an atomic r-value in atomic-layout memory.
  227. Address materializeRValue(RValue rvalue) const;
  228. /// Creates temp alloca for intermediate operations on atomic value.
  229. Address CreateTempAlloca() const;
  230. private:
  231. bool requiresMemSetZero(llvm::Type *type) const;
  232. /// Emits atomic load as a libcall.
  233. void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
  234. llvm::AtomicOrdering AO, bool IsVolatile);
  235. /// Emits atomic load as LLVM instruction.
  236. llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
  237. /// Emits atomic compare-and-exchange op as a libcall.
  238. llvm::Value *EmitAtomicCompareExchangeLibcall(
  239. llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
  240. llvm::AtomicOrdering Success =
  241. llvm::AtomicOrdering::SequentiallyConsistent,
  242. llvm::AtomicOrdering Failure =
  243. llvm::AtomicOrdering::SequentiallyConsistent);
  244. /// Emits atomic compare-and-exchange op as LLVM instruction.
  245. std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
  246. llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
  247. llvm::AtomicOrdering Success =
  248. llvm::AtomicOrdering::SequentiallyConsistent,
  249. llvm::AtomicOrdering Failure =
  250. llvm::AtomicOrdering::SequentiallyConsistent,
  251. bool IsWeak = false);
  252. /// Emit atomic update as libcalls.
  253. void
  254. EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
  255. const llvm::function_ref<RValue(RValue)> &UpdateOp,
  256. bool IsVolatile);
  257. /// Emit atomic update as LLVM instructions.
  258. void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
  259. const llvm::function_ref<RValue(RValue)> &UpdateOp,
  260. bool IsVolatile);
  261. /// Emit atomic update as libcalls.
  262. void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
  263. bool IsVolatile);
  264. /// Emit atomic update as LLVM instructions.
  265. void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
  266. bool IsVolatile);
  267. };
  268. }
  269. Address AtomicInfo::CreateTempAlloca() const {
  270. Address TempAlloca = CGF.CreateMemTemp(
  271. (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
  272. : AtomicTy,
  273. getAtomicAlignment(),
  274. "atomic-temp");
  275. // Cast to pointer to value type for bitfields.
  276. if (LVal.isBitField())
  277. return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  278. TempAlloca, getAtomicAddress().getType(),
  279. getAtomicAddress().getElementType());
  280. return TempAlloca;
  281. }
  282. static RValue emitAtomicLibcall(CodeGenFunction &CGF,
  283. StringRef fnName,
  284. QualType resultType,
  285. CallArgList &args) {
  286. const CGFunctionInfo &fnInfo =
  287. CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
  288. llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
  289. llvm::AttrBuilder fnAttrB(CGF.getLLVMContext());
  290. fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
  291. fnAttrB.addAttribute(llvm::Attribute::WillReturn);
  292. llvm::AttributeList fnAttrs = llvm::AttributeList::get(
  293. CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
  294. llvm::FunctionCallee fn =
  295. CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
  296. auto callee = CGCallee::forDirect(fn);
  297. return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
  298. }
  299. /// Does a store of the given IR type modify the full expected width?
  300. static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
  301. uint64_t expectedSize) {
  302. return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
  303. }
  304. /// Does the atomic type require memsetting to zero before initialization?
  305. ///
  306. /// The IR type is provided as a way of making certain queries faster.
  307. bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
  308. // If the atomic type has size padding, we definitely need a memset.
  309. if (hasPadding()) return true;
  310. // Otherwise, do some simple heuristics to try to avoid it:
  311. switch (getEvaluationKind()) {
  312. // For scalars and complexes, check whether the store size of the
  313. // type uses the full size.
  314. case TEK_Scalar:
  315. return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
  316. case TEK_Complex:
  317. return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
  318. AtomicSizeInBits / 2);
  319. // Padding in structs has an undefined bit pattern. User beware.
  320. case TEK_Aggregate:
  321. return false;
  322. }
  323. llvm_unreachable("bad evaluation kind");
  324. }
  325. bool AtomicInfo::emitMemSetZeroIfNecessary() const {
  326. assert(LVal.isSimple());
  327. Address addr = LVal.getAddress(CGF);
  328. if (!requiresMemSetZero(addr.getElementType()))
  329. return false;
  330. CGF.Builder.CreateMemSet(
  331. addr.getPointer(), llvm::ConstantInt::get(CGF.Int8Ty, 0),
  332. CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
  333. LVal.getAlignment().getAsAlign());
  334. return true;
  335. }
  336. static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
  337. Address Dest, Address Ptr,
  338. Address Val1, Address Val2,
  339. uint64_t Size,
  340. llvm::AtomicOrdering SuccessOrder,
  341. llvm::AtomicOrdering FailureOrder,
  342. llvm::SyncScope::ID Scope) {
  343. // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
  344. llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
  345. llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
  346. llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
  347. Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
  348. Scope);
  349. Pair->setVolatile(E->isVolatile());
  350. Pair->setWeak(IsWeak);
  351. // Cmp holds the result of the compare-exchange operation: true on success,
  352. // false on failure.
  353. llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
  354. llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
  355. // This basic block is used to hold the store instruction if the operation
  356. // failed.
  357. llvm::BasicBlock *StoreExpectedBB =
  358. CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
  359. // This basic block is the exit point of the operation, we should end up
  360. // here regardless of whether or not the operation succeeded.
  361. llvm::BasicBlock *ContinueBB =
  362. CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
  363. // Update Expected if Expected isn't equal to Old, otherwise branch to the
  364. // exit point.
  365. CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
  366. CGF.Builder.SetInsertPoint(StoreExpectedBB);
  367. // Update the memory at Expected with Old's value.
  368. CGF.Builder.CreateStore(Old, Val1);
  369. // Finally, branch to the exit point.
  370. CGF.Builder.CreateBr(ContinueBB);
  371. CGF.Builder.SetInsertPoint(ContinueBB);
  372. // Update the memory at Dest with Cmp's value.
  373. CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
  374. }
  375. /// Given an ordering required on success, emit all possible cmpxchg
  376. /// instructions to cope with the provided (but possibly only dynamically known)
  377. /// FailureOrder.
  378. static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
  379. bool IsWeak, Address Dest, Address Ptr,
  380. Address Val1, Address Val2,
  381. llvm::Value *FailureOrderVal,
  382. uint64_t Size,
  383. llvm::AtomicOrdering SuccessOrder,
  384. llvm::SyncScope::ID Scope) {
  385. llvm::AtomicOrdering FailureOrder;
  386. if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
  387. auto FOS = FO->getSExtValue();
  388. if (!llvm::isValidAtomicOrderingCABI(FOS))
  389. FailureOrder = llvm::AtomicOrdering::Monotonic;
  390. else
  391. switch ((llvm::AtomicOrderingCABI)FOS) {
  392. case llvm::AtomicOrderingCABI::relaxed:
  393. // 31.7.2.18: "The failure argument shall not be memory_order_release
  394. // nor memory_order_acq_rel". Fallback to monotonic.
  395. case llvm::AtomicOrderingCABI::release:
  396. case llvm::AtomicOrderingCABI::acq_rel:
  397. FailureOrder = llvm::AtomicOrdering::Monotonic;
  398. break;
  399. case llvm::AtomicOrderingCABI::consume:
  400. case llvm::AtomicOrderingCABI::acquire:
  401. FailureOrder = llvm::AtomicOrdering::Acquire;
  402. break;
  403. case llvm::AtomicOrderingCABI::seq_cst:
  404. FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
  405. break;
  406. }
  407. // Prior to c++17, "the failure argument shall be no stronger than the
  408. // success argument". This condition has been lifted and the only
  409. // precondition is 31.7.2.18. Effectively treat this as a DR and skip
  410. // language version checks.
  411. emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
  412. FailureOrder, Scope);
  413. return;
  414. }
  415. // Create all the relevant BB's
  416. auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
  417. auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
  418. auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
  419. auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
  420. // MonotonicBB is arbitrarily chosen as the default case; in practice, this
  421. // doesn't matter unless someone is crazy enough to use something that
  422. // doesn't fold to a constant for the ordering.
  423. llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
  424. // Implemented as acquire, since it's the closest in LLVM.
  425. SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
  426. AcquireBB);
  427. SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
  428. AcquireBB);
  429. SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
  430. SeqCstBB);
  431. // Emit all the different atomics
  432. CGF.Builder.SetInsertPoint(MonotonicBB);
  433. emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
  434. Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
  435. CGF.Builder.CreateBr(ContBB);
  436. CGF.Builder.SetInsertPoint(AcquireBB);
  437. emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
  438. llvm::AtomicOrdering::Acquire, Scope);
  439. CGF.Builder.CreateBr(ContBB);
  440. CGF.Builder.SetInsertPoint(SeqCstBB);
  441. emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
  442. llvm::AtomicOrdering::SequentiallyConsistent, Scope);
  443. CGF.Builder.CreateBr(ContBB);
  444. CGF.Builder.SetInsertPoint(ContBB);
  445. }
  446. /// Duplicate the atomic min/max operation in conventional IR for the builtin
  447. /// variants that return the new rather than the original value.
  448. static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
  449. AtomicExpr::AtomicOp Op,
  450. bool IsSigned,
  451. llvm::Value *OldVal,
  452. llvm::Value *RHS) {
  453. llvm::CmpInst::Predicate Pred;
  454. switch (Op) {
  455. default:
  456. llvm_unreachable("Unexpected min/max operation");
  457. case AtomicExpr::AO__atomic_max_fetch:
  458. Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
  459. break;
  460. case AtomicExpr::AO__atomic_min_fetch:
  461. Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
  462. break;
  463. }
  464. llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
  465. return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
  466. }
  467. static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
  468. Address Ptr, Address Val1, Address Val2,
  469. llvm::Value *IsWeak, llvm::Value *FailureOrder,
  470. uint64_t Size, llvm::AtomicOrdering Order,
  471. llvm::SyncScope::ID Scope) {
  472. llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
  473. bool PostOpMinMax = false;
  474. unsigned PostOp = 0;
  475. switch (E->getOp()) {
  476. case AtomicExpr::AO__c11_atomic_init:
  477. case AtomicExpr::AO__opencl_atomic_init:
  478. llvm_unreachable("Already handled!");
  479. case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
  480. case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
  481. case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
  482. emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
  483. FailureOrder, Size, Order, Scope);
  484. return;
  485. case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
  486. case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
  487. case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
  488. emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
  489. FailureOrder, Size, Order, Scope);
  490. return;
  491. case AtomicExpr::AO__atomic_compare_exchange:
  492. case AtomicExpr::AO__atomic_compare_exchange_n: {
  493. if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
  494. emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
  495. Val1, Val2, FailureOrder, Size, Order, Scope);
  496. } else {
  497. // Create all the relevant BB's
  498. llvm::BasicBlock *StrongBB =
  499. CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
  500. llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
  501. llvm::BasicBlock *ContBB =
  502. CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
  503. llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
  504. SI->addCase(CGF.Builder.getInt1(false), StrongBB);
  505. CGF.Builder.SetInsertPoint(StrongBB);
  506. emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
  507. FailureOrder, Size, Order, Scope);
  508. CGF.Builder.CreateBr(ContBB);
  509. CGF.Builder.SetInsertPoint(WeakBB);
  510. emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
  511. FailureOrder, Size, Order, Scope);
  512. CGF.Builder.CreateBr(ContBB);
  513. CGF.Builder.SetInsertPoint(ContBB);
  514. }
  515. return;
  516. }
  517. case AtomicExpr::AO__c11_atomic_load:
  518. case AtomicExpr::AO__opencl_atomic_load:
  519. case AtomicExpr::AO__hip_atomic_load:
  520. case AtomicExpr::AO__atomic_load_n:
  521. case AtomicExpr::AO__atomic_load: {
  522. llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
  523. Load->setAtomic(Order, Scope);
  524. Load->setVolatile(E->isVolatile());
  525. CGF.Builder.CreateStore(Load, Dest);
  526. return;
  527. }
  528. case AtomicExpr::AO__c11_atomic_store:
  529. case AtomicExpr::AO__opencl_atomic_store:
  530. case AtomicExpr::AO__hip_atomic_store:
  531. case AtomicExpr::AO__atomic_store:
  532. case AtomicExpr::AO__atomic_store_n: {
  533. llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
  534. llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
  535. Store->setAtomic(Order, Scope);
  536. Store->setVolatile(E->isVolatile());
  537. return;
  538. }
  539. case AtomicExpr::AO__c11_atomic_exchange:
  540. case AtomicExpr::AO__hip_atomic_exchange:
  541. case AtomicExpr::AO__opencl_atomic_exchange:
  542. case AtomicExpr::AO__atomic_exchange_n:
  543. case AtomicExpr::AO__atomic_exchange:
  544. Op = llvm::AtomicRMWInst::Xchg;
  545. break;
  546. case AtomicExpr::AO__atomic_add_fetch:
  547. PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
  548. : llvm::Instruction::Add;
  549. [[fallthrough]];
  550. case AtomicExpr::AO__c11_atomic_fetch_add:
  551. case AtomicExpr::AO__hip_atomic_fetch_add:
  552. case AtomicExpr::AO__opencl_atomic_fetch_add:
  553. case AtomicExpr::AO__atomic_fetch_add:
  554. Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
  555. : llvm::AtomicRMWInst::Add;
  556. break;
  557. case AtomicExpr::AO__atomic_sub_fetch:
  558. PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
  559. : llvm::Instruction::Sub;
  560. [[fallthrough]];
  561. case AtomicExpr::AO__c11_atomic_fetch_sub:
  562. case AtomicExpr::AO__opencl_atomic_fetch_sub:
  563. case AtomicExpr::AO__atomic_fetch_sub:
  564. Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
  565. : llvm::AtomicRMWInst::Sub;
  566. break;
  567. case AtomicExpr::AO__atomic_min_fetch:
  568. PostOpMinMax = true;
  569. [[fallthrough]];
  570. case AtomicExpr::AO__c11_atomic_fetch_min:
  571. case AtomicExpr::AO__hip_atomic_fetch_min:
  572. case AtomicExpr::AO__opencl_atomic_fetch_min:
  573. case AtomicExpr::AO__atomic_fetch_min:
  574. Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
  575. : llvm::AtomicRMWInst::UMin;
  576. break;
  577. case AtomicExpr::AO__atomic_max_fetch:
  578. PostOpMinMax = true;
  579. [[fallthrough]];
  580. case AtomicExpr::AO__c11_atomic_fetch_max:
  581. case AtomicExpr::AO__hip_atomic_fetch_max:
  582. case AtomicExpr::AO__opencl_atomic_fetch_max:
  583. case AtomicExpr::AO__atomic_fetch_max:
  584. Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
  585. : llvm::AtomicRMWInst::UMax;
  586. break;
  587. case AtomicExpr::AO__atomic_and_fetch:
  588. PostOp = llvm::Instruction::And;
  589. [[fallthrough]];
  590. case AtomicExpr::AO__c11_atomic_fetch_and:
  591. case AtomicExpr::AO__hip_atomic_fetch_and:
  592. case AtomicExpr::AO__opencl_atomic_fetch_and:
  593. case AtomicExpr::AO__atomic_fetch_and:
  594. Op = llvm::AtomicRMWInst::And;
  595. break;
  596. case AtomicExpr::AO__atomic_or_fetch:
  597. PostOp = llvm::Instruction::Or;
  598. [[fallthrough]];
  599. case AtomicExpr::AO__c11_atomic_fetch_or:
  600. case AtomicExpr::AO__hip_atomic_fetch_or:
  601. case AtomicExpr::AO__opencl_atomic_fetch_or:
  602. case AtomicExpr::AO__atomic_fetch_or:
  603. Op = llvm::AtomicRMWInst::Or;
  604. break;
  605. case AtomicExpr::AO__atomic_xor_fetch:
  606. PostOp = llvm::Instruction::Xor;
  607. [[fallthrough]];
  608. case AtomicExpr::AO__c11_atomic_fetch_xor:
  609. case AtomicExpr::AO__hip_atomic_fetch_xor:
  610. case AtomicExpr::AO__opencl_atomic_fetch_xor:
  611. case AtomicExpr::AO__atomic_fetch_xor:
  612. Op = llvm::AtomicRMWInst::Xor;
  613. break;
  614. case AtomicExpr::AO__atomic_nand_fetch:
  615. PostOp = llvm::Instruction::And; // the NOT is special cased below
  616. [[fallthrough]];
  617. case AtomicExpr::AO__c11_atomic_fetch_nand:
  618. case AtomicExpr::AO__atomic_fetch_nand:
  619. Op = llvm::AtomicRMWInst::Nand;
  620. break;
  621. }
  622. llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
  623. llvm::AtomicRMWInst *RMWI =
  624. CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
  625. RMWI->setVolatile(E->isVolatile());
  626. // For __atomic_*_fetch operations, perform the operation again to
  627. // determine the value which was written.
  628. llvm::Value *Result = RMWI;
  629. if (PostOpMinMax)
  630. Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
  631. E->getValueType()->isSignedIntegerType(),
  632. RMWI, LoadVal1);
  633. else if (PostOp)
  634. Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
  635. LoadVal1);
  636. if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
  637. Result = CGF.Builder.CreateNot(Result);
  638. CGF.Builder.CreateStore(Result, Dest);
  639. }
  640. // This function emits any expression (scalar, complex, or aggregate)
  641. // into a temporary alloca.
  642. static Address
  643. EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
  644. Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
  645. CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
  646. /*Init*/ true);
  647. return DeclPtr;
  648. }
  649. static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
  650. Address Ptr, Address Val1, Address Val2,
  651. llvm::Value *IsWeak, llvm::Value *FailureOrder,
  652. uint64_t Size, llvm::AtomicOrdering Order,
  653. llvm::Value *Scope) {
  654. auto ScopeModel = Expr->getScopeModel();
  655. // LLVM atomic instructions always have synch scope. If clang atomic
  656. // expression has no scope operand, use default LLVM synch scope.
  657. if (!ScopeModel) {
  658. EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
  659. Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
  660. return;
  661. }
  662. // Handle constant scope.
  663. if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
  664. auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
  665. CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
  666. Order, CGF.CGM.getLLVMContext());
  667. EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
  668. Order, SCID);
  669. return;
  670. }
  671. // Handle non-constant scope.
  672. auto &Builder = CGF.Builder;
  673. auto Scopes = ScopeModel->getRuntimeValues();
  674. llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
  675. for (auto S : Scopes)
  676. BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
  677. llvm::BasicBlock *ContBB =
  678. CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
  679. auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
  680. // If unsupported synch scope is encountered at run time, assume a fallback
  681. // synch scope value.
  682. auto FallBack = ScopeModel->getFallBackValue();
  683. llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
  684. for (auto S : Scopes) {
  685. auto *B = BB[S];
  686. if (S != FallBack)
  687. SI->addCase(Builder.getInt32(S), B);
  688. Builder.SetInsertPoint(B);
  689. EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
  690. Order,
  691. CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
  692. ScopeModel->map(S),
  693. Order,
  694. CGF.getLLVMContext()));
  695. Builder.CreateBr(ContBB);
  696. }
  697. Builder.SetInsertPoint(ContBB);
  698. }
  699. static void
  700. AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
  701. bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
  702. SourceLocation Loc, CharUnits SizeInChars) {
  703. if (UseOptimizedLibcall) {
  704. // Load value and pass it to the function directly.
  705. CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
  706. int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
  707. ValTy =
  708. CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
  709. llvm::Type *ITy = llvm::IntegerType::get(CGF.getLLVMContext(), SizeInBits);
  710. Address Ptr = Address(CGF.Builder.CreateBitCast(Val, ITy->getPointerTo()),
  711. ITy, Align);
  712. Val = CGF.EmitLoadOfScalar(Ptr, false,
  713. CGF.getContext().getPointerType(ValTy),
  714. Loc);
  715. // Coerce the value into an appropriately sized integer type.
  716. Args.add(RValue::get(Val), ValTy);
  717. } else {
  718. // Non-optimized functions always take a reference.
  719. Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
  720. CGF.getContext().VoidPtrTy);
  721. }
  722. }
  723. RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
  724. QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
  725. QualType MemTy = AtomicTy;
  726. if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
  727. MemTy = AT->getValueType();
  728. llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
  729. Address Val1 = Address::invalid();
  730. Address Val2 = Address::invalid();
  731. Address Dest = Address::invalid();
  732. Address Ptr = EmitPointerWithAlignment(E->getPtr());
  733. if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
  734. E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
  735. LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
  736. EmitAtomicInit(E->getVal1(), lvalue);
  737. return RValue::get(nullptr);
  738. }
  739. auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
  740. uint64_t Size = TInfo.Width.getQuantity();
  741. unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
  742. bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
  743. bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
  744. bool UseLibcall = Misaligned | Oversized;
  745. bool ShouldCastToIntPtrTy = true;
  746. CharUnits MaxInlineWidth =
  747. getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
  748. DiagnosticsEngine &Diags = CGM.getDiags();
  749. if (Misaligned) {
  750. Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
  751. << (int)TInfo.Width.getQuantity()
  752. << (int)Ptr.getAlignment().getQuantity();
  753. }
  754. if (Oversized) {
  755. Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
  756. << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
  757. }
  758. llvm::Value *Order = EmitScalarExpr(E->getOrder());
  759. llvm::Value *Scope =
  760. E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
  761. switch (E->getOp()) {
  762. case AtomicExpr::AO__c11_atomic_init:
  763. case AtomicExpr::AO__opencl_atomic_init:
  764. llvm_unreachable("Already handled above with EmitAtomicInit!");
  765. case AtomicExpr::AO__c11_atomic_load:
  766. case AtomicExpr::AO__opencl_atomic_load:
  767. case AtomicExpr::AO__hip_atomic_load:
  768. case AtomicExpr::AO__atomic_load_n:
  769. break;
  770. case AtomicExpr::AO__atomic_load:
  771. Dest = EmitPointerWithAlignment(E->getVal1());
  772. break;
  773. case AtomicExpr::AO__atomic_store:
  774. Val1 = EmitPointerWithAlignment(E->getVal1());
  775. break;
  776. case AtomicExpr::AO__atomic_exchange:
  777. Val1 = EmitPointerWithAlignment(E->getVal1());
  778. Dest = EmitPointerWithAlignment(E->getVal2());
  779. break;
  780. case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
  781. case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
  782. case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
  783. case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
  784. case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
  785. case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
  786. case AtomicExpr::AO__atomic_compare_exchange_n:
  787. case AtomicExpr::AO__atomic_compare_exchange:
  788. Val1 = EmitPointerWithAlignment(E->getVal1());
  789. if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
  790. Val2 = EmitPointerWithAlignment(E->getVal2());
  791. else
  792. Val2 = EmitValToTemp(*this, E->getVal2());
  793. OrderFail = EmitScalarExpr(E->getOrderFail());
  794. if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
  795. E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
  796. IsWeak = EmitScalarExpr(E->getWeak());
  797. break;
  798. case AtomicExpr::AO__c11_atomic_fetch_add:
  799. case AtomicExpr::AO__c11_atomic_fetch_sub:
  800. case AtomicExpr::AO__hip_atomic_fetch_add:
  801. case AtomicExpr::AO__opencl_atomic_fetch_add:
  802. case AtomicExpr::AO__opencl_atomic_fetch_sub:
  803. if (MemTy->isPointerType()) {
  804. // For pointer arithmetic, we're required to do a bit of math:
  805. // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
  806. // ... but only for the C11 builtins. The GNU builtins expect the
  807. // user to multiply by sizeof(T).
  808. QualType Val1Ty = E->getVal1()->getType();
  809. llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
  810. CharUnits PointeeIncAmt =
  811. getContext().getTypeSizeInChars(MemTy->getPointeeType());
  812. Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
  813. auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
  814. Val1 = Temp;
  815. EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
  816. break;
  817. }
  818. [[fallthrough]];
  819. case AtomicExpr::AO__atomic_fetch_add:
  820. case AtomicExpr::AO__atomic_fetch_sub:
  821. case AtomicExpr::AO__atomic_add_fetch:
  822. case AtomicExpr::AO__atomic_sub_fetch:
  823. ShouldCastToIntPtrTy = !MemTy->isFloatingType();
  824. [[fallthrough]];
  825. case AtomicExpr::AO__c11_atomic_store:
  826. case AtomicExpr::AO__c11_atomic_exchange:
  827. case AtomicExpr::AO__opencl_atomic_store:
  828. case AtomicExpr::AO__hip_atomic_store:
  829. case AtomicExpr::AO__opencl_atomic_exchange:
  830. case AtomicExpr::AO__hip_atomic_exchange:
  831. case AtomicExpr::AO__atomic_store_n:
  832. case AtomicExpr::AO__atomic_exchange_n:
  833. case AtomicExpr::AO__c11_atomic_fetch_and:
  834. case AtomicExpr::AO__c11_atomic_fetch_or:
  835. case AtomicExpr::AO__c11_atomic_fetch_xor:
  836. case AtomicExpr::AO__c11_atomic_fetch_nand:
  837. case AtomicExpr::AO__c11_atomic_fetch_max:
  838. case AtomicExpr::AO__c11_atomic_fetch_min:
  839. case AtomicExpr::AO__opencl_atomic_fetch_and:
  840. case AtomicExpr::AO__opencl_atomic_fetch_or:
  841. case AtomicExpr::AO__opencl_atomic_fetch_xor:
  842. case AtomicExpr::AO__opencl_atomic_fetch_min:
  843. case AtomicExpr::AO__opencl_atomic_fetch_max:
  844. case AtomicExpr::AO__atomic_fetch_and:
  845. case AtomicExpr::AO__hip_atomic_fetch_and:
  846. case AtomicExpr::AO__atomic_fetch_or:
  847. case AtomicExpr::AO__hip_atomic_fetch_or:
  848. case AtomicExpr::AO__atomic_fetch_xor:
  849. case AtomicExpr::AO__hip_atomic_fetch_xor:
  850. case AtomicExpr::AO__atomic_fetch_nand:
  851. case AtomicExpr::AO__atomic_and_fetch:
  852. case AtomicExpr::AO__atomic_or_fetch:
  853. case AtomicExpr::AO__atomic_xor_fetch:
  854. case AtomicExpr::AO__atomic_nand_fetch:
  855. case AtomicExpr::AO__atomic_max_fetch:
  856. case AtomicExpr::AO__atomic_min_fetch:
  857. case AtomicExpr::AO__atomic_fetch_max:
  858. case AtomicExpr::AO__hip_atomic_fetch_max:
  859. case AtomicExpr::AO__atomic_fetch_min:
  860. case AtomicExpr::AO__hip_atomic_fetch_min:
  861. Val1 = EmitValToTemp(*this, E->getVal1());
  862. break;
  863. }
  864. QualType RValTy = E->getType().getUnqualifiedType();
  865. // The inlined atomics only function on iN types, where N is a power of 2. We
  866. // need to make sure (via temporaries if necessary) that all incoming values
  867. // are compatible.
  868. LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
  869. AtomicInfo Atomics(*this, AtomicVal);
  870. if (ShouldCastToIntPtrTy) {
  871. Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
  872. if (Val1.isValid())
  873. Val1 = Atomics.convertToAtomicIntPointer(Val1);
  874. if (Val2.isValid())
  875. Val2 = Atomics.convertToAtomicIntPointer(Val2);
  876. }
  877. if (Dest.isValid()) {
  878. if (ShouldCastToIntPtrTy)
  879. Dest = Atomics.emitCastToAtomicIntPointer(Dest);
  880. } else if (E->isCmpXChg())
  881. Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
  882. else if (!RValTy->isVoidType()) {
  883. Dest = Atomics.CreateTempAlloca();
  884. if (ShouldCastToIntPtrTy)
  885. Dest = Atomics.emitCastToAtomicIntPointer(Dest);
  886. }
  887. // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
  888. if (UseLibcall) {
  889. bool UseOptimizedLibcall = false;
  890. switch (E->getOp()) {
  891. case AtomicExpr::AO__c11_atomic_init:
  892. case AtomicExpr::AO__opencl_atomic_init:
  893. llvm_unreachable("Already handled above with EmitAtomicInit!");
  894. case AtomicExpr::AO__c11_atomic_fetch_add:
  895. case AtomicExpr::AO__opencl_atomic_fetch_add:
  896. case AtomicExpr::AO__atomic_fetch_add:
  897. case AtomicExpr::AO__hip_atomic_fetch_add:
  898. case AtomicExpr::AO__c11_atomic_fetch_and:
  899. case AtomicExpr::AO__opencl_atomic_fetch_and:
  900. case AtomicExpr::AO__hip_atomic_fetch_and:
  901. case AtomicExpr::AO__atomic_fetch_and:
  902. case AtomicExpr::AO__c11_atomic_fetch_or:
  903. case AtomicExpr::AO__opencl_atomic_fetch_or:
  904. case AtomicExpr::AO__hip_atomic_fetch_or:
  905. case AtomicExpr::AO__atomic_fetch_or:
  906. case AtomicExpr::AO__c11_atomic_fetch_nand:
  907. case AtomicExpr::AO__atomic_fetch_nand:
  908. case AtomicExpr::AO__c11_atomic_fetch_sub:
  909. case AtomicExpr::AO__opencl_atomic_fetch_sub:
  910. case AtomicExpr::AO__atomic_fetch_sub:
  911. case AtomicExpr::AO__c11_atomic_fetch_xor:
  912. case AtomicExpr::AO__opencl_atomic_fetch_xor:
  913. case AtomicExpr::AO__opencl_atomic_fetch_min:
  914. case AtomicExpr::AO__opencl_atomic_fetch_max:
  915. case AtomicExpr::AO__atomic_fetch_xor:
  916. case AtomicExpr::AO__hip_atomic_fetch_xor:
  917. case AtomicExpr::AO__c11_atomic_fetch_max:
  918. case AtomicExpr::AO__c11_atomic_fetch_min:
  919. case AtomicExpr::AO__atomic_add_fetch:
  920. case AtomicExpr::AO__atomic_and_fetch:
  921. case AtomicExpr::AO__atomic_nand_fetch:
  922. case AtomicExpr::AO__atomic_or_fetch:
  923. case AtomicExpr::AO__atomic_sub_fetch:
  924. case AtomicExpr::AO__atomic_xor_fetch:
  925. case AtomicExpr::AO__atomic_fetch_max:
  926. case AtomicExpr::AO__hip_atomic_fetch_max:
  927. case AtomicExpr::AO__atomic_fetch_min:
  928. case AtomicExpr::AO__hip_atomic_fetch_min:
  929. case AtomicExpr::AO__atomic_max_fetch:
  930. case AtomicExpr::AO__atomic_min_fetch:
  931. // For these, only library calls for certain sizes exist.
  932. UseOptimizedLibcall = true;
  933. break;
  934. case AtomicExpr::AO__atomic_load:
  935. case AtomicExpr::AO__atomic_store:
  936. case AtomicExpr::AO__atomic_exchange:
  937. case AtomicExpr::AO__atomic_compare_exchange:
  938. // Use the generic version if we don't know that the operand will be
  939. // suitably aligned for the optimized version.
  940. if (Misaligned)
  941. break;
  942. [[fallthrough]];
  943. case AtomicExpr::AO__c11_atomic_load:
  944. case AtomicExpr::AO__c11_atomic_store:
  945. case AtomicExpr::AO__c11_atomic_exchange:
  946. case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
  947. case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
  948. case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
  949. case AtomicExpr::AO__opencl_atomic_load:
  950. case AtomicExpr::AO__hip_atomic_load:
  951. case AtomicExpr::AO__opencl_atomic_store:
  952. case AtomicExpr::AO__hip_atomic_store:
  953. case AtomicExpr::AO__opencl_atomic_exchange:
  954. case AtomicExpr::AO__hip_atomic_exchange:
  955. case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
  956. case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
  957. case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
  958. case AtomicExpr::AO__atomic_load_n:
  959. case AtomicExpr::AO__atomic_store_n:
  960. case AtomicExpr::AO__atomic_exchange_n:
  961. case AtomicExpr::AO__atomic_compare_exchange_n:
  962. // Only use optimized library calls for sizes for which they exist.
  963. // FIXME: Size == 16 optimized library functions exist too.
  964. if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
  965. UseOptimizedLibcall = true;
  966. break;
  967. }
  968. CallArgList Args;
  969. if (!UseOptimizedLibcall) {
  970. // For non-optimized library calls, the size is the first parameter
  971. Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
  972. getContext().getSizeType());
  973. }
  974. // Atomic address is the first or second parameter
  975. // The OpenCL atomic library functions only accept pointer arguments to
  976. // generic address space.
  977. auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
  978. if (!E->isOpenCL())
  979. return V;
  980. auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
  981. if (AS == LangAS::opencl_generic)
  982. return V;
  983. auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
  984. auto T = llvm::cast<llvm::PointerType>(V->getType());
  985. auto *DestType = llvm::PointerType::getWithSamePointeeType(T, DestAS);
  986. return getTargetHooks().performAddrSpaceCast(
  987. *this, V, AS, LangAS::opencl_generic, DestType, false);
  988. };
  989. Args.add(RValue::get(CastToGenericAddrSpace(
  990. EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
  991. getContext().VoidPtrTy);
  992. std::string LibCallName;
  993. QualType LoweredMemTy =
  994. MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
  995. QualType RetTy;
  996. bool HaveRetTy = false;
  997. llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
  998. bool PostOpMinMax = false;
  999. switch (E->getOp()) {
  1000. case AtomicExpr::AO__c11_atomic_init:
  1001. case AtomicExpr::AO__opencl_atomic_init:
  1002. llvm_unreachable("Already handled!");
  1003. // There is only one libcall for compare an exchange, because there is no
  1004. // optimisation benefit possible from a libcall version of a weak compare
  1005. // and exchange.
  1006. // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
  1007. // void *desired, int success, int failure)
  1008. // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
  1009. // int success, int failure)
  1010. case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
  1011. case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
  1012. case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
  1013. case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
  1014. case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
  1015. case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
  1016. case AtomicExpr::AO__atomic_compare_exchange:
  1017. case AtomicExpr::AO__atomic_compare_exchange_n:
  1018. LibCallName = "__atomic_compare_exchange";
  1019. RetTy = getContext().BoolTy;
  1020. HaveRetTy = true;
  1021. Args.add(
  1022. RValue::get(CastToGenericAddrSpace(
  1023. EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
  1024. getContext().VoidPtrTy);
  1025. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
  1026. MemTy, E->getExprLoc(), TInfo.Width);
  1027. Args.add(RValue::get(Order), getContext().IntTy);
  1028. Order = OrderFail;
  1029. break;
  1030. // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
  1031. // int order)
  1032. // T __atomic_exchange_N(T *mem, T val, int order)
  1033. case AtomicExpr::AO__c11_atomic_exchange:
  1034. case AtomicExpr::AO__opencl_atomic_exchange:
  1035. case AtomicExpr::AO__atomic_exchange_n:
  1036. case AtomicExpr::AO__atomic_exchange:
  1037. case AtomicExpr::AO__hip_atomic_exchange:
  1038. LibCallName = "__atomic_exchange";
  1039. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
  1040. MemTy, E->getExprLoc(), TInfo.Width);
  1041. break;
  1042. // void __atomic_store(size_t size, void *mem, void *val, int order)
  1043. // void __atomic_store_N(T *mem, T val, int order)
  1044. case AtomicExpr::AO__c11_atomic_store:
  1045. case AtomicExpr::AO__opencl_atomic_store:
  1046. case AtomicExpr::AO__hip_atomic_store:
  1047. case AtomicExpr::AO__atomic_store:
  1048. case AtomicExpr::AO__atomic_store_n:
  1049. LibCallName = "__atomic_store";
  1050. RetTy = getContext().VoidTy;
  1051. HaveRetTy = true;
  1052. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
  1053. MemTy, E->getExprLoc(), TInfo.Width);
  1054. break;
  1055. // void __atomic_load(size_t size, void *mem, void *return, int order)
  1056. // T __atomic_load_N(T *mem, int order)
  1057. case AtomicExpr::AO__c11_atomic_load:
  1058. case AtomicExpr::AO__opencl_atomic_load:
  1059. case AtomicExpr::AO__hip_atomic_load:
  1060. case AtomicExpr::AO__atomic_load:
  1061. case AtomicExpr::AO__atomic_load_n:
  1062. LibCallName = "__atomic_load";
  1063. break;
  1064. // T __atomic_add_fetch_N(T *mem, T val, int order)
  1065. // T __atomic_fetch_add_N(T *mem, T val, int order)
  1066. case AtomicExpr::AO__atomic_add_fetch:
  1067. PostOp = llvm::Instruction::Add;
  1068. [[fallthrough]];
  1069. case AtomicExpr::AO__c11_atomic_fetch_add:
  1070. case AtomicExpr::AO__opencl_atomic_fetch_add:
  1071. case AtomicExpr::AO__atomic_fetch_add:
  1072. case AtomicExpr::AO__hip_atomic_fetch_add:
  1073. LibCallName = "__atomic_fetch_add";
  1074. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
  1075. LoweredMemTy, E->getExprLoc(), TInfo.Width);
  1076. break;
  1077. // T __atomic_and_fetch_N(T *mem, T val, int order)
  1078. // T __atomic_fetch_and_N(T *mem, T val, int order)
  1079. case AtomicExpr::AO__atomic_and_fetch:
  1080. PostOp = llvm::Instruction::And;
  1081. [[fallthrough]];
  1082. case AtomicExpr::AO__c11_atomic_fetch_and:
  1083. case AtomicExpr::AO__opencl_atomic_fetch_and:
  1084. case AtomicExpr::AO__hip_atomic_fetch_and:
  1085. case AtomicExpr::AO__atomic_fetch_and:
  1086. LibCallName = "__atomic_fetch_and";
  1087. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
  1088. MemTy, E->getExprLoc(), TInfo.Width);
  1089. break;
  1090. // T __atomic_or_fetch_N(T *mem, T val, int order)
  1091. // T __atomic_fetch_or_N(T *mem, T val, int order)
  1092. case AtomicExpr::AO__atomic_or_fetch:
  1093. PostOp = llvm::Instruction::Or;
  1094. [[fallthrough]];
  1095. case AtomicExpr::AO__c11_atomic_fetch_or:
  1096. case AtomicExpr::AO__opencl_atomic_fetch_or:
  1097. case AtomicExpr::AO__hip_atomic_fetch_or:
  1098. case AtomicExpr::AO__atomic_fetch_or:
  1099. LibCallName = "__atomic_fetch_or";
  1100. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
  1101. MemTy, E->getExprLoc(), TInfo.Width);
  1102. break;
  1103. // T __atomic_sub_fetch_N(T *mem, T val, int order)
  1104. // T __atomic_fetch_sub_N(T *mem, T val, int order)
  1105. case AtomicExpr::AO__atomic_sub_fetch:
  1106. PostOp = llvm::Instruction::Sub;
  1107. [[fallthrough]];
  1108. case AtomicExpr::AO__c11_atomic_fetch_sub:
  1109. case AtomicExpr::AO__opencl_atomic_fetch_sub:
  1110. case AtomicExpr::AO__atomic_fetch_sub:
  1111. LibCallName = "__atomic_fetch_sub";
  1112. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
  1113. LoweredMemTy, E->getExprLoc(), TInfo.Width);
  1114. break;
  1115. // T __atomic_xor_fetch_N(T *mem, T val, int order)
  1116. // T __atomic_fetch_xor_N(T *mem, T val, int order)
  1117. case AtomicExpr::AO__atomic_xor_fetch:
  1118. PostOp = llvm::Instruction::Xor;
  1119. [[fallthrough]];
  1120. case AtomicExpr::AO__c11_atomic_fetch_xor:
  1121. case AtomicExpr::AO__opencl_atomic_fetch_xor:
  1122. case AtomicExpr::AO__hip_atomic_fetch_xor:
  1123. case AtomicExpr::AO__atomic_fetch_xor:
  1124. LibCallName = "__atomic_fetch_xor";
  1125. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
  1126. MemTy, E->getExprLoc(), TInfo.Width);
  1127. break;
  1128. case AtomicExpr::AO__atomic_min_fetch:
  1129. PostOpMinMax = true;
  1130. [[fallthrough]];
  1131. case AtomicExpr::AO__c11_atomic_fetch_min:
  1132. case AtomicExpr::AO__atomic_fetch_min:
  1133. case AtomicExpr::AO__hip_atomic_fetch_min:
  1134. case AtomicExpr::AO__opencl_atomic_fetch_min:
  1135. LibCallName = E->getValueType()->isSignedIntegerType()
  1136. ? "__atomic_fetch_min"
  1137. : "__atomic_fetch_umin";
  1138. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
  1139. LoweredMemTy, E->getExprLoc(), TInfo.Width);
  1140. break;
  1141. case AtomicExpr::AO__atomic_max_fetch:
  1142. PostOpMinMax = true;
  1143. [[fallthrough]];
  1144. case AtomicExpr::AO__c11_atomic_fetch_max:
  1145. case AtomicExpr::AO__atomic_fetch_max:
  1146. case AtomicExpr::AO__hip_atomic_fetch_max:
  1147. case AtomicExpr::AO__opencl_atomic_fetch_max:
  1148. LibCallName = E->getValueType()->isSignedIntegerType()
  1149. ? "__atomic_fetch_max"
  1150. : "__atomic_fetch_umax";
  1151. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
  1152. LoweredMemTy, E->getExprLoc(), TInfo.Width);
  1153. break;
  1154. // T __atomic_nand_fetch_N(T *mem, T val, int order)
  1155. // T __atomic_fetch_nand_N(T *mem, T val, int order)
  1156. case AtomicExpr::AO__atomic_nand_fetch:
  1157. PostOp = llvm::Instruction::And; // the NOT is special cased below
  1158. [[fallthrough]];
  1159. case AtomicExpr::AO__c11_atomic_fetch_nand:
  1160. case AtomicExpr::AO__atomic_fetch_nand:
  1161. LibCallName = "__atomic_fetch_nand";
  1162. AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
  1163. MemTy, E->getExprLoc(), TInfo.Width);
  1164. break;
  1165. }
  1166. if (E->isOpenCL()) {
  1167. LibCallName = std::string("__opencl") +
  1168. StringRef(LibCallName).drop_front(1).str();
  1169. }
  1170. // Optimized functions have the size in their name.
  1171. if (UseOptimizedLibcall)
  1172. LibCallName += "_" + llvm::utostr(Size);
  1173. // By default, assume we return a value of the atomic type.
  1174. if (!HaveRetTy) {
  1175. if (UseOptimizedLibcall) {
  1176. // Value is returned directly.
  1177. // The function returns an appropriately sized integer type.
  1178. RetTy = getContext().getIntTypeForBitwidth(
  1179. getContext().toBits(TInfo.Width), /*Signed=*/false);
  1180. } else {
  1181. // Value is returned through parameter before the order.
  1182. RetTy = getContext().VoidTy;
  1183. Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
  1184. getContext().VoidPtrTy);
  1185. }
  1186. }
  1187. // order is always the last parameter
  1188. Args.add(RValue::get(Order),
  1189. getContext().IntTy);
  1190. if (E->isOpenCL())
  1191. Args.add(RValue::get(Scope), getContext().IntTy);
  1192. // PostOp is only needed for the atomic_*_fetch operations, and
  1193. // thus is only needed for and implemented in the
  1194. // UseOptimizedLibcall codepath.
  1195. assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
  1196. RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
  1197. // The value is returned directly from the libcall.
  1198. if (E->isCmpXChg())
  1199. return Res;
  1200. // The value is returned directly for optimized libcalls but the expr
  1201. // provided an out-param.
  1202. if (UseOptimizedLibcall && Res.getScalarVal()) {
  1203. llvm::Value *ResVal = Res.getScalarVal();
  1204. if (PostOpMinMax) {
  1205. llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
  1206. ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
  1207. E->getValueType()->isSignedIntegerType(),
  1208. ResVal, LoadVal1);
  1209. } else if (PostOp) {
  1210. llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
  1211. ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
  1212. }
  1213. if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
  1214. ResVal = Builder.CreateNot(ResVal);
  1215. Builder.CreateStore(
  1216. ResVal, Builder.CreateElementBitCast(Dest, ResVal->getType()));
  1217. }
  1218. if (RValTy->isVoidType())
  1219. return RValue::get(nullptr);
  1220. return convertTempToRValue(
  1221. Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
  1222. RValTy, E->getExprLoc());
  1223. }
  1224. bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
  1225. E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
  1226. E->getOp() == AtomicExpr::AO__hip_atomic_store ||
  1227. E->getOp() == AtomicExpr::AO__atomic_store ||
  1228. E->getOp() == AtomicExpr::AO__atomic_store_n;
  1229. bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
  1230. E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
  1231. E->getOp() == AtomicExpr::AO__hip_atomic_load ||
  1232. E->getOp() == AtomicExpr::AO__atomic_load ||
  1233. E->getOp() == AtomicExpr::AO__atomic_load_n;
  1234. if (isa<llvm::ConstantInt>(Order)) {
  1235. auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
  1236. // We should not ever get to a case where the ordering isn't a valid C ABI
  1237. // value, but it's hard to enforce that in general.
  1238. if (llvm::isValidAtomicOrderingCABI(ord))
  1239. switch ((llvm::AtomicOrderingCABI)ord) {
  1240. case llvm::AtomicOrderingCABI::relaxed:
  1241. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
  1242. llvm::AtomicOrdering::Monotonic, Scope);
  1243. break;
  1244. case llvm::AtomicOrderingCABI::consume:
  1245. case llvm::AtomicOrderingCABI::acquire:
  1246. if (IsStore)
  1247. break; // Avoid crashing on code with undefined behavior
  1248. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
  1249. llvm::AtomicOrdering::Acquire, Scope);
  1250. break;
  1251. case llvm::AtomicOrderingCABI::release:
  1252. if (IsLoad)
  1253. break; // Avoid crashing on code with undefined behavior
  1254. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
  1255. llvm::AtomicOrdering::Release, Scope);
  1256. break;
  1257. case llvm::AtomicOrderingCABI::acq_rel:
  1258. if (IsLoad || IsStore)
  1259. break; // Avoid crashing on code with undefined behavior
  1260. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
  1261. llvm::AtomicOrdering::AcquireRelease, Scope);
  1262. break;
  1263. case llvm::AtomicOrderingCABI::seq_cst:
  1264. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
  1265. llvm::AtomicOrdering::SequentiallyConsistent, Scope);
  1266. break;
  1267. }
  1268. if (RValTy->isVoidType())
  1269. return RValue::get(nullptr);
  1270. return convertTempToRValue(
  1271. Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
  1272. RValTy, E->getExprLoc());
  1273. }
  1274. // Long case, when Order isn't obviously constant.
  1275. // Create all the relevant BB's
  1276. llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
  1277. *ReleaseBB = nullptr, *AcqRelBB = nullptr,
  1278. *SeqCstBB = nullptr;
  1279. MonotonicBB = createBasicBlock("monotonic", CurFn);
  1280. if (!IsStore)
  1281. AcquireBB = createBasicBlock("acquire", CurFn);
  1282. if (!IsLoad)
  1283. ReleaseBB = createBasicBlock("release", CurFn);
  1284. if (!IsLoad && !IsStore)
  1285. AcqRelBB = createBasicBlock("acqrel", CurFn);
  1286. SeqCstBB = createBasicBlock("seqcst", CurFn);
  1287. llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
  1288. // Create the switch for the split
  1289. // MonotonicBB is arbitrarily chosen as the default case; in practice, this
  1290. // doesn't matter unless someone is crazy enough to use something that
  1291. // doesn't fold to a constant for the ordering.
  1292. Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
  1293. llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
  1294. // Emit all the different atomics
  1295. Builder.SetInsertPoint(MonotonicBB);
  1296. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
  1297. llvm::AtomicOrdering::Monotonic, Scope);
  1298. Builder.CreateBr(ContBB);
  1299. if (!IsStore) {
  1300. Builder.SetInsertPoint(AcquireBB);
  1301. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
  1302. llvm::AtomicOrdering::Acquire, Scope);
  1303. Builder.CreateBr(ContBB);
  1304. SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
  1305. AcquireBB);
  1306. SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
  1307. AcquireBB);
  1308. }
  1309. if (!IsLoad) {
  1310. Builder.SetInsertPoint(ReleaseBB);
  1311. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
  1312. llvm::AtomicOrdering::Release, Scope);
  1313. Builder.CreateBr(ContBB);
  1314. SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
  1315. ReleaseBB);
  1316. }
  1317. if (!IsLoad && !IsStore) {
  1318. Builder.SetInsertPoint(AcqRelBB);
  1319. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
  1320. llvm::AtomicOrdering::AcquireRelease, Scope);
  1321. Builder.CreateBr(ContBB);
  1322. SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
  1323. AcqRelBB);
  1324. }
  1325. Builder.SetInsertPoint(SeqCstBB);
  1326. EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
  1327. llvm::AtomicOrdering::SequentiallyConsistent, Scope);
  1328. Builder.CreateBr(ContBB);
  1329. SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
  1330. SeqCstBB);
  1331. // Cleanup and return
  1332. Builder.SetInsertPoint(ContBB);
  1333. if (RValTy->isVoidType())
  1334. return RValue::get(nullptr);
  1335. assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
  1336. return convertTempToRValue(
  1337. Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
  1338. RValTy, E->getExprLoc());
  1339. }
  1340. Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
  1341. llvm::IntegerType *ty =
  1342. llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
  1343. return CGF.Builder.CreateElementBitCast(addr, ty);
  1344. }
  1345. Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
  1346. llvm::Type *Ty = Addr.getElementType();
  1347. uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
  1348. if (SourceSizeInBits != AtomicSizeInBits) {
  1349. Address Tmp = CreateTempAlloca();
  1350. CGF.Builder.CreateMemCpy(Tmp, Addr,
  1351. std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
  1352. Addr = Tmp;
  1353. }
  1354. return emitCastToAtomicIntPointer(Addr);
  1355. }
  1356. RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
  1357. AggValueSlot resultSlot,
  1358. SourceLocation loc,
  1359. bool asValue) const {
  1360. if (LVal.isSimple()) {
  1361. if (EvaluationKind == TEK_Aggregate)
  1362. return resultSlot.asRValue();
  1363. // Drill into the padding structure if we have one.
  1364. if (hasPadding())
  1365. addr = CGF.Builder.CreateStructGEP(addr, 0);
  1366. // Otherwise, just convert the temporary to an r-value using the
  1367. // normal conversion routine.
  1368. return CGF.convertTempToRValue(addr, getValueType(), loc);
  1369. }
  1370. if (!asValue)
  1371. // Get RValue from temp memory as atomic for non-simple lvalues
  1372. return RValue::get(CGF.Builder.CreateLoad(addr));
  1373. if (LVal.isBitField())
  1374. return CGF.EmitLoadOfBitfieldLValue(
  1375. LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
  1376. LVal.getBaseInfo(), TBAAAccessInfo()), loc);
  1377. if (LVal.isVectorElt())
  1378. return CGF.EmitLoadOfLValue(
  1379. LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
  1380. LVal.getBaseInfo(), TBAAAccessInfo()), loc);
  1381. assert(LVal.isExtVectorElt());
  1382. return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
  1383. addr, LVal.getExtVectorElts(), LVal.getType(),
  1384. LVal.getBaseInfo(), TBAAAccessInfo()));
  1385. }
  1386. RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
  1387. AggValueSlot ResultSlot,
  1388. SourceLocation Loc,
  1389. bool AsValue) const {
  1390. // Try not to in some easy cases.
  1391. assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
  1392. if (getEvaluationKind() == TEK_Scalar &&
  1393. (((!LVal.isBitField() ||
  1394. LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
  1395. !hasPadding()) ||
  1396. !AsValue)) {
  1397. auto *ValTy = AsValue
  1398. ? CGF.ConvertTypeForMem(ValueTy)
  1399. : getAtomicAddress().getElementType();
  1400. if (ValTy->isIntegerTy()) {
  1401. assert(IntVal->getType() == ValTy && "Different integer types.");
  1402. return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
  1403. } else if (ValTy->isPointerTy())
  1404. return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
  1405. else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
  1406. return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
  1407. }
  1408. // Create a temporary. This needs to be big enough to hold the
  1409. // atomic integer.
  1410. Address Temp = Address::invalid();
  1411. bool TempIsVolatile = false;
  1412. if (AsValue && getEvaluationKind() == TEK_Aggregate) {
  1413. assert(!ResultSlot.isIgnored());
  1414. Temp = ResultSlot.getAddress();
  1415. TempIsVolatile = ResultSlot.isVolatile();
  1416. } else {
  1417. Temp = CreateTempAlloca();
  1418. }
  1419. // Slam the integer into the temporary.
  1420. Address CastTemp = emitCastToAtomicIntPointer(Temp);
  1421. CGF.Builder.CreateStore(IntVal, CastTemp)
  1422. ->setVolatile(TempIsVolatile);
  1423. return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
  1424. }
  1425. void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
  1426. llvm::AtomicOrdering AO, bool) {
  1427. // void __atomic_load(size_t size, void *mem, void *return, int order);
  1428. CallArgList Args;
  1429. Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
  1430. Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
  1431. CGF.getContext().VoidPtrTy);
  1432. Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
  1433. CGF.getContext().VoidPtrTy);
  1434. Args.add(
  1435. RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
  1436. CGF.getContext().IntTy);
  1437. emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
  1438. }
  1439. llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
  1440. bool IsVolatile) {
  1441. // Okay, we're doing this natively.
  1442. Address Addr = getAtomicAddressAsAtomicIntPointer();
  1443. llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
  1444. Load->setAtomic(AO);
  1445. // Other decoration.
  1446. if (IsVolatile)
  1447. Load->setVolatile(true);
  1448. CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
  1449. return Load;
  1450. }
  1451. /// An LValue is a candidate for having its loads and stores be made atomic if
  1452. /// we are operating under /volatile:ms *and* the LValue itself is volatile and
  1453. /// performing such an operation can be performed without a libcall.
  1454. bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
  1455. if (!CGM.getLangOpts().MSVolatile) return false;
  1456. AtomicInfo AI(*this, LV);
  1457. bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
  1458. // An atomic is inline if we don't need to use a libcall.
  1459. bool AtomicIsInline = !AI.shouldUseLibcall();
  1460. // MSVC doesn't seem to do this for types wider than a pointer.
  1461. if (getContext().getTypeSize(LV.getType()) >
  1462. getContext().getTypeSize(getContext().getIntPtrType()))
  1463. return false;
  1464. return IsVolatile && AtomicIsInline;
  1465. }
  1466. RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
  1467. AggValueSlot Slot) {
  1468. llvm::AtomicOrdering AO;
  1469. bool IsVolatile = LV.isVolatileQualified();
  1470. if (LV.getType()->isAtomicType()) {
  1471. AO = llvm::AtomicOrdering::SequentiallyConsistent;
  1472. } else {
  1473. AO = llvm::AtomicOrdering::Acquire;
  1474. IsVolatile = true;
  1475. }
  1476. return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
  1477. }
  1478. RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
  1479. bool AsValue, llvm::AtomicOrdering AO,
  1480. bool IsVolatile) {
  1481. // Check whether we should use a library call.
  1482. if (shouldUseLibcall()) {
  1483. Address TempAddr = Address::invalid();
  1484. if (LVal.isSimple() && !ResultSlot.isIgnored()) {
  1485. assert(getEvaluationKind() == TEK_Aggregate);
  1486. TempAddr = ResultSlot.getAddress();
  1487. } else
  1488. TempAddr = CreateTempAlloca();
  1489. EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
  1490. // Okay, turn that back into the original value or whole atomic (for
  1491. // non-simple lvalues) type.
  1492. return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
  1493. }
  1494. // Okay, we're doing this natively.
  1495. auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
  1496. // If we're ignoring an aggregate return, don't do anything.
  1497. if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
  1498. return RValue::getAggregate(Address::invalid(), false);
  1499. // Okay, turn that back into the original value or atomic (for non-simple
  1500. // lvalues) type.
  1501. return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
  1502. }
  1503. /// Emit a load from an l-value of atomic type. Note that the r-value
  1504. /// we produce is an r-value of the atomic *value* type.
  1505. RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
  1506. llvm::AtomicOrdering AO, bool IsVolatile,
  1507. AggValueSlot resultSlot) {
  1508. AtomicInfo Atomics(*this, src);
  1509. return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
  1510. IsVolatile);
  1511. }
  1512. /// Copy an r-value into memory as part of storing to an atomic type.
  1513. /// This needs to create a bit-pattern suitable for atomic operations.
  1514. void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
  1515. assert(LVal.isSimple());
  1516. // If we have an r-value, the rvalue should be of the atomic type,
  1517. // which means that the caller is responsible for having zeroed
  1518. // any padding. Just do an aggregate copy of that type.
  1519. if (rvalue.isAggregate()) {
  1520. LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
  1521. LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
  1522. getAtomicType());
  1523. bool IsVolatile = rvalue.isVolatileQualified() ||
  1524. LVal.isVolatileQualified();
  1525. CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
  1526. AggValueSlot::DoesNotOverlap, IsVolatile);
  1527. return;
  1528. }
  1529. // Okay, otherwise we're copying stuff.
  1530. // Zero out the buffer if necessary.
  1531. emitMemSetZeroIfNecessary();
  1532. // Drill past the padding if present.
  1533. LValue TempLVal = projectValue();
  1534. // Okay, store the rvalue in.
  1535. if (rvalue.isScalar()) {
  1536. CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
  1537. } else {
  1538. CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
  1539. }
  1540. }
  1541. /// Materialize an r-value into memory for the purposes of storing it
  1542. /// to an atomic type.
  1543. Address AtomicInfo::materializeRValue(RValue rvalue) const {
  1544. // Aggregate r-values are already in memory, and EmitAtomicStore
  1545. // requires them to be values of the atomic type.
  1546. if (rvalue.isAggregate())
  1547. return rvalue.getAggregateAddress();
  1548. // Otherwise, make a temporary and materialize into it.
  1549. LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
  1550. AtomicInfo Atomics(CGF, TempLV);
  1551. Atomics.emitCopyIntoMemory(rvalue);
  1552. return TempLV.getAddress(CGF);
  1553. }
  1554. llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
  1555. // If we've got a scalar value of the right size, try to avoid going
  1556. // through memory.
  1557. if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
  1558. llvm::Value *Value = RVal.getScalarVal();
  1559. if (isa<llvm::IntegerType>(Value->getType()))
  1560. return CGF.EmitToMemory(Value, ValueTy);
  1561. else {
  1562. llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
  1563. CGF.getLLVMContext(),
  1564. LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
  1565. if (isa<llvm::PointerType>(Value->getType()))
  1566. return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
  1567. else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
  1568. return CGF.Builder.CreateBitCast(Value, InputIntTy);
  1569. }
  1570. }
  1571. // Otherwise, we need to go through memory.
  1572. // Put the r-value in memory.
  1573. Address Addr = materializeRValue(RVal);
  1574. // Cast the temporary to the atomic int type and pull a value out.
  1575. Addr = emitCastToAtomicIntPointer(Addr);
  1576. return CGF.Builder.CreateLoad(Addr);
  1577. }
  1578. std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
  1579. llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
  1580. llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
  1581. // Do the atomic store.
  1582. Address Addr = getAtomicAddressAsAtomicIntPointer();
  1583. auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
  1584. ExpectedVal, DesiredVal,
  1585. Success, Failure);
  1586. // Other decoration.
  1587. Inst->setVolatile(LVal.isVolatileQualified());
  1588. Inst->setWeak(IsWeak);
  1589. // Okay, turn that back into the original value type.
  1590. auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
  1591. auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
  1592. return std::make_pair(PreviousVal, SuccessFailureVal);
  1593. }
  1594. llvm::Value *
  1595. AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
  1596. llvm::Value *DesiredAddr,
  1597. llvm::AtomicOrdering Success,
  1598. llvm::AtomicOrdering Failure) {
  1599. // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
  1600. // void *desired, int success, int failure);
  1601. CallArgList Args;
  1602. Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
  1603. Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
  1604. CGF.getContext().VoidPtrTy);
  1605. Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
  1606. CGF.getContext().VoidPtrTy);
  1607. Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
  1608. CGF.getContext().VoidPtrTy);
  1609. Args.add(RValue::get(
  1610. llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
  1611. CGF.getContext().IntTy);
  1612. Args.add(RValue::get(
  1613. llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
  1614. CGF.getContext().IntTy);
  1615. auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
  1616. CGF.getContext().BoolTy, Args);
  1617. return SuccessFailureRVal.getScalarVal();
  1618. }
  1619. std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
  1620. RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
  1621. llvm::AtomicOrdering Failure, bool IsWeak) {
  1622. // Check whether we should use a library call.
  1623. if (shouldUseLibcall()) {
  1624. // Produce a source address.
  1625. Address ExpectedAddr = materializeRValue(Expected);
  1626. Address DesiredAddr = materializeRValue(Desired);
  1627. auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
  1628. DesiredAddr.getPointer(),
  1629. Success, Failure);
  1630. return std::make_pair(
  1631. convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
  1632. SourceLocation(), /*AsValue=*/false),
  1633. Res);
  1634. }
  1635. // If we've got a scalar value of the right size, try to avoid going
  1636. // through memory.
  1637. auto *ExpectedVal = convertRValueToInt(Expected);
  1638. auto *DesiredVal = convertRValueToInt(Desired);
  1639. auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
  1640. Failure, IsWeak);
  1641. return std::make_pair(
  1642. ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
  1643. SourceLocation(), /*AsValue=*/false),
  1644. Res.second);
  1645. }
  1646. static void
  1647. EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
  1648. const llvm::function_ref<RValue(RValue)> &UpdateOp,
  1649. Address DesiredAddr) {
  1650. RValue UpRVal;
  1651. LValue AtomicLVal = Atomics.getAtomicLValue();
  1652. LValue DesiredLVal;
  1653. if (AtomicLVal.isSimple()) {
  1654. UpRVal = OldRVal;
  1655. DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
  1656. } else {
  1657. // Build new lvalue for temp address.
  1658. Address Ptr = Atomics.materializeRValue(OldRVal);
  1659. LValue UpdateLVal;
  1660. if (AtomicLVal.isBitField()) {
  1661. UpdateLVal =
  1662. LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
  1663. AtomicLVal.getType(),
  1664. AtomicLVal.getBaseInfo(),
  1665. AtomicLVal.getTBAAInfo());
  1666. DesiredLVal =
  1667. LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
  1668. AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
  1669. AtomicLVal.getTBAAInfo());
  1670. } else if (AtomicLVal.isVectorElt()) {
  1671. UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
  1672. AtomicLVal.getType(),
  1673. AtomicLVal.getBaseInfo(),
  1674. AtomicLVal.getTBAAInfo());
  1675. DesiredLVal = LValue::MakeVectorElt(
  1676. DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
  1677. AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
  1678. } else {
  1679. assert(AtomicLVal.isExtVectorElt());
  1680. UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
  1681. AtomicLVal.getType(),
  1682. AtomicLVal.getBaseInfo(),
  1683. AtomicLVal.getTBAAInfo());
  1684. DesiredLVal = LValue::MakeExtVectorElt(
  1685. DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
  1686. AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
  1687. }
  1688. UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
  1689. }
  1690. // Store new value in the corresponding memory area.
  1691. RValue NewRVal = UpdateOp(UpRVal);
  1692. if (NewRVal.isScalar()) {
  1693. CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
  1694. } else {
  1695. assert(NewRVal.isComplex());
  1696. CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
  1697. /*isInit=*/false);
  1698. }
  1699. }
  1700. void AtomicInfo::EmitAtomicUpdateLibcall(
  1701. llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
  1702. bool IsVolatile) {
  1703. auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
  1704. Address ExpectedAddr = CreateTempAlloca();
  1705. EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
  1706. auto *ContBB = CGF.createBasicBlock("atomic_cont");
  1707. auto *ExitBB = CGF.createBasicBlock("atomic_exit");
  1708. CGF.EmitBlock(ContBB);
  1709. Address DesiredAddr = CreateTempAlloca();
  1710. if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
  1711. requiresMemSetZero(getAtomicAddress().getElementType())) {
  1712. auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
  1713. CGF.Builder.CreateStore(OldVal, DesiredAddr);
  1714. }
  1715. auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
  1716. AggValueSlot::ignored(),
  1717. SourceLocation(), /*AsValue=*/false);
  1718. EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
  1719. auto *Res =
  1720. EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
  1721. DesiredAddr.getPointer(),
  1722. AO, Failure);
  1723. CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
  1724. CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
  1725. }
  1726. void AtomicInfo::EmitAtomicUpdateOp(
  1727. llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
  1728. bool IsVolatile) {
  1729. auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
  1730. // Do the atomic load.
  1731. auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
  1732. // For non-simple lvalues perform compare-and-swap procedure.
  1733. auto *ContBB = CGF.createBasicBlock("atomic_cont");
  1734. auto *ExitBB = CGF.createBasicBlock("atomic_exit");
  1735. auto *CurBB = CGF.Builder.GetInsertBlock();
  1736. CGF.EmitBlock(ContBB);
  1737. llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
  1738. /*NumReservedValues=*/2);
  1739. PHI->addIncoming(OldVal, CurBB);
  1740. Address NewAtomicAddr = CreateTempAlloca();
  1741. Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
  1742. if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
  1743. requiresMemSetZero(getAtomicAddress().getElementType())) {
  1744. CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
  1745. }
  1746. auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
  1747. SourceLocation(), /*AsValue=*/false);
  1748. EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
  1749. auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
  1750. // Try to write new value using cmpxchg operation.
  1751. auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
  1752. PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
  1753. CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
  1754. CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
  1755. }
  1756. static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
  1757. RValue UpdateRVal, Address DesiredAddr) {
  1758. LValue AtomicLVal = Atomics.getAtomicLValue();
  1759. LValue DesiredLVal;
  1760. // Build new lvalue for temp address.
  1761. if (AtomicLVal.isBitField()) {
  1762. DesiredLVal =
  1763. LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
  1764. AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
  1765. AtomicLVal.getTBAAInfo());
  1766. } else if (AtomicLVal.isVectorElt()) {
  1767. DesiredLVal =
  1768. LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
  1769. AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
  1770. AtomicLVal.getTBAAInfo());
  1771. } else {
  1772. assert(AtomicLVal.isExtVectorElt());
  1773. DesiredLVal = LValue::MakeExtVectorElt(
  1774. DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
  1775. AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
  1776. }
  1777. // Store new value in the corresponding memory area.
  1778. assert(UpdateRVal.isScalar());
  1779. CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
  1780. }
  1781. void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
  1782. RValue UpdateRVal, bool IsVolatile) {
  1783. auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
  1784. Address ExpectedAddr = CreateTempAlloca();
  1785. EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
  1786. auto *ContBB = CGF.createBasicBlock("atomic_cont");
  1787. auto *ExitBB = CGF.createBasicBlock("atomic_exit");
  1788. CGF.EmitBlock(ContBB);
  1789. Address DesiredAddr = CreateTempAlloca();
  1790. if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
  1791. requiresMemSetZero(getAtomicAddress().getElementType())) {
  1792. auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
  1793. CGF.Builder.CreateStore(OldVal, DesiredAddr);
  1794. }
  1795. EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
  1796. auto *Res =
  1797. EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
  1798. DesiredAddr.getPointer(),
  1799. AO, Failure);
  1800. CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
  1801. CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
  1802. }
  1803. void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
  1804. bool IsVolatile) {
  1805. auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
  1806. // Do the atomic load.
  1807. auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
  1808. // For non-simple lvalues perform compare-and-swap procedure.
  1809. auto *ContBB = CGF.createBasicBlock("atomic_cont");
  1810. auto *ExitBB = CGF.createBasicBlock("atomic_exit");
  1811. auto *CurBB = CGF.Builder.GetInsertBlock();
  1812. CGF.EmitBlock(ContBB);
  1813. llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
  1814. /*NumReservedValues=*/2);
  1815. PHI->addIncoming(OldVal, CurBB);
  1816. Address NewAtomicAddr = CreateTempAlloca();
  1817. Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
  1818. if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
  1819. requiresMemSetZero(getAtomicAddress().getElementType())) {
  1820. CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
  1821. }
  1822. EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
  1823. auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
  1824. // Try to write new value using cmpxchg operation.
  1825. auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
  1826. PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
  1827. CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
  1828. CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
  1829. }
  1830. void AtomicInfo::EmitAtomicUpdate(
  1831. llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
  1832. bool IsVolatile) {
  1833. if (shouldUseLibcall()) {
  1834. EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
  1835. } else {
  1836. EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
  1837. }
  1838. }
  1839. void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
  1840. bool IsVolatile) {
  1841. if (shouldUseLibcall()) {
  1842. EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
  1843. } else {
  1844. EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
  1845. }
  1846. }
  1847. void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
  1848. bool isInit) {
  1849. bool IsVolatile = lvalue.isVolatileQualified();
  1850. llvm::AtomicOrdering AO;
  1851. if (lvalue.getType()->isAtomicType()) {
  1852. AO = llvm::AtomicOrdering::SequentiallyConsistent;
  1853. } else {
  1854. AO = llvm::AtomicOrdering::Release;
  1855. IsVolatile = true;
  1856. }
  1857. return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
  1858. }
  1859. /// Emit a store to an l-value of atomic type.
  1860. ///
  1861. /// Note that the r-value is expected to be an r-value *of the atomic
  1862. /// type*; this means that for aggregate r-values, it should include
  1863. /// storage for any padding that was necessary.
  1864. void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
  1865. llvm::AtomicOrdering AO, bool IsVolatile,
  1866. bool isInit) {
  1867. // If this is an aggregate r-value, it should agree in type except
  1868. // maybe for address-space qualification.
  1869. assert(!rvalue.isAggregate() ||
  1870. rvalue.getAggregateAddress().getElementType() ==
  1871. dest.getAddress(*this).getElementType());
  1872. AtomicInfo atomics(*this, dest);
  1873. LValue LVal = atomics.getAtomicLValue();
  1874. // If this is an initialization, just put the value there normally.
  1875. if (LVal.isSimple()) {
  1876. if (isInit) {
  1877. atomics.emitCopyIntoMemory(rvalue);
  1878. return;
  1879. }
  1880. // Check whether we should use a library call.
  1881. if (atomics.shouldUseLibcall()) {
  1882. // Produce a source address.
  1883. Address srcAddr = atomics.materializeRValue(rvalue);
  1884. // void __atomic_store(size_t size, void *mem, void *val, int order)
  1885. CallArgList args;
  1886. args.add(RValue::get(atomics.getAtomicSizeValue()),
  1887. getContext().getSizeType());
  1888. args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
  1889. getContext().VoidPtrTy);
  1890. args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
  1891. getContext().VoidPtrTy);
  1892. args.add(
  1893. RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
  1894. getContext().IntTy);
  1895. emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
  1896. return;
  1897. }
  1898. // Okay, we're doing this natively.
  1899. llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
  1900. // Do the atomic store.
  1901. Address addr =
  1902. atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
  1903. intValue = Builder.CreateIntCast(
  1904. intValue, addr.getElementType(), /*isSigned=*/false);
  1905. llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
  1906. if (AO == llvm::AtomicOrdering::Acquire)
  1907. AO = llvm::AtomicOrdering::Monotonic;
  1908. else if (AO == llvm::AtomicOrdering::AcquireRelease)
  1909. AO = llvm::AtomicOrdering::Release;
  1910. // Initializations don't need to be atomic.
  1911. if (!isInit)
  1912. store->setAtomic(AO);
  1913. // Other decoration.
  1914. if (IsVolatile)
  1915. store->setVolatile(true);
  1916. CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
  1917. return;
  1918. }
  1919. // Emit simple atomic update operation.
  1920. atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
  1921. }
  1922. /// Emit a compare-and-exchange op for atomic type.
  1923. ///
  1924. std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
  1925. LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
  1926. llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
  1927. AggValueSlot Slot) {
  1928. // If this is an aggregate r-value, it should agree in type except
  1929. // maybe for address-space qualification.
  1930. assert(!Expected.isAggregate() ||
  1931. Expected.getAggregateAddress().getElementType() ==
  1932. Obj.getAddress(*this).getElementType());
  1933. assert(!Desired.isAggregate() ||
  1934. Desired.getAggregateAddress().getElementType() ==
  1935. Obj.getAddress(*this).getElementType());
  1936. AtomicInfo Atomics(*this, Obj);
  1937. return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
  1938. IsWeak);
  1939. }
  1940. void CodeGenFunction::EmitAtomicUpdate(
  1941. LValue LVal, llvm::AtomicOrdering AO,
  1942. const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
  1943. AtomicInfo Atomics(*this, LVal);
  1944. Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
  1945. }
  1946. void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
  1947. AtomicInfo atomics(*this, dest);
  1948. switch (atomics.getEvaluationKind()) {
  1949. case TEK_Scalar: {
  1950. llvm::Value *value = EmitScalarExpr(init);
  1951. atomics.emitCopyIntoMemory(RValue::get(value));
  1952. return;
  1953. }
  1954. case TEK_Complex: {
  1955. ComplexPairTy value = EmitComplexExpr(init);
  1956. atomics.emitCopyIntoMemory(RValue::getComplex(value));
  1957. return;
  1958. }
  1959. case TEK_Aggregate: {
  1960. // Fix up the destination if the initializer isn't an expression
  1961. // of atomic type.
  1962. bool Zeroed = false;
  1963. if (!init->getType()->isAtomicType()) {
  1964. Zeroed = atomics.emitMemSetZeroIfNecessary();
  1965. dest = atomics.projectValue();
  1966. }
  1967. // Evaluate the expression directly into the destination.
  1968. AggValueSlot slot = AggValueSlot::forLValue(
  1969. dest, *this, AggValueSlot::IsNotDestructed,
  1970. AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
  1971. AggValueSlot::DoesNotOverlap,
  1972. Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
  1973. EmitAggExpr(init, slot);
  1974. return;
  1975. }
  1976. }
  1977. llvm_unreachable("bad evaluation kind");
  1978. }