GlobalMerge.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. //===- GlobalMerge.cpp - Internal globals merging -------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass merges globals with internal linkage into one. This way all the
  10. // globals which were merged into a biggest one can be addressed using offsets
  11. // from the same base pointer (no need for separate base pointer for each of the
  12. // global). Such a transformation can significantly reduce the register pressure
  13. // when many globals are involved.
  14. //
  15. // For example, consider the code which touches several global variables at
  16. // once:
  17. //
  18. // static int foo[N], bar[N], baz[N];
  19. //
  20. // for (i = 0; i < N; ++i) {
  21. // foo[i] = bar[i] * baz[i];
  22. // }
  23. //
  24. // On ARM the addresses of 3 arrays should be kept in the registers, thus
  25. // this code has quite large register pressure (loop body):
  26. //
  27. // ldr r1, [r5], #4
  28. // ldr r2, [r6], #4
  29. // mul r1, r2, r1
  30. // str r1, [r0], #4
  31. //
  32. // Pass converts the code to something like:
  33. //
  34. // static struct {
  35. // int foo[N];
  36. // int bar[N];
  37. // int baz[N];
  38. // } merged;
  39. //
  40. // for (i = 0; i < N; ++i) {
  41. // merged.foo[i] = merged.bar[i] * merged.baz[i];
  42. // }
  43. //
  44. // and in ARM code this becomes:
  45. //
  46. // ldr r0, [r5, #40]
  47. // ldr r1, [r5, #80]
  48. // mul r0, r1, r0
  49. // str r0, [r5], #4
  50. //
  51. // note that we saved 2 registers here almostly "for free".
  52. //
  53. // However, merging globals can have tradeoffs:
  54. // - it confuses debuggers, tools, and users
  55. // - it makes linker optimizations less useful (order files, LOHs, ...)
  56. // - it forces usage of indexed addressing (which isn't necessarily "free")
  57. // - it can increase register pressure when the uses are disparate enough.
  58. //
  59. // We use heuristics to discover the best global grouping we can (cf cl::opts).
  60. //
  61. // ===---------------------------------------------------------------------===//
  62. #include "llvm/ADT/BitVector.h"
  63. #include "llvm/ADT/DenseMap.h"
  64. #include "llvm/ADT/SmallPtrSet.h"
  65. #include "llvm/ADT/SmallVector.h"
  66. #include "llvm/ADT/Statistic.h"
  67. #include "llvm/ADT/StringRef.h"
  68. #include "llvm/ADT/Triple.h"
  69. #include "llvm/ADT/Twine.h"
  70. #include "llvm/CodeGen/Passes.h"
  71. #include "llvm/IR/BasicBlock.h"
  72. #include "llvm/IR/Constants.h"
  73. #include "llvm/IR/DataLayout.h"
  74. #include "llvm/IR/DerivedTypes.h"
  75. #include "llvm/IR/Function.h"
  76. #include "llvm/IR/GlobalAlias.h"
  77. #include "llvm/IR/GlobalValue.h"
  78. #include "llvm/IR/GlobalVariable.h"
  79. #include "llvm/IR/Instruction.h"
  80. #include "llvm/IR/Module.h"
  81. #include "llvm/IR/Type.h"
  82. #include "llvm/IR/Use.h"
  83. #include "llvm/IR/User.h"
  84. #include "llvm/InitializePasses.h"
  85. #include "llvm/MC/SectionKind.h"
  86. #include "llvm/Pass.h"
  87. #include "llvm/Support/Casting.h"
  88. #include "llvm/Support/CommandLine.h"
  89. #include "llvm/Support/Debug.h"
  90. #include "llvm/Support/raw_ostream.h"
  91. #include "llvm/Target/TargetLoweringObjectFile.h"
  92. #include "llvm/Target/TargetMachine.h"
  93. #include <algorithm>
  94. #include <cassert>
  95. #include <cstddef>
  96. #include <cstdint>
  97. #include <string>
  98. #include <vector>
  99. using namespace llvm;
  100. #define DEBUG_TYPE "global-merge"
  101. // FIXME: This is only useful as a last-resort way to disable the pass.
  102. static cl::opt<bool>
  103. EnableGlobalMerge("enable-global-merge", cl::Hidden,
  104. cl::desc("Enable the global merge pass"),
  105. cl::init(true));
  106. static cl::opt<unsigned>
  107. GlobalMergeMaxOffset("global-merge-max-offset", cl::Hidden,
  108. cl::desc("Set maximum offset for global merge pass"),
  109. cl::init(0));
  110. static cl::opt<bool> GlobalMergeGroupByUse(
  111. "global-merge-group-by-use", cl::Hidden,
  112. cl::desc("Improve global merge pass to look at uses"), cl::init(true));
  113. static cl::opt<bool> GlobalMergeIgnoreSingleUse(
  114. "global-merge-ignore-single-use", cl::Hidden,
  115. cl::desc("Improve global merge pass to ignore globals only used alone"),
  116. cl::init(true));
  117. static cl::opt<bool>
  118. EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden,
  119. cl::desc("Enable global merge pass on constants"),
  120. cl::init(false));
  121. // FIXME: this could be a transitional option, and we probably need to remove
  122. // it if only we are sure this optimization could always benefit all targets.
  123. static cl::opt<cl::boolOrDefault>
  124. EnableGlobalMergeOnExternal("global-merge-on-external", cl::Hidden,
  125. cl::desc("Enable global merge pass on external linkage"));
  126. STATISTIC(NumMerged, "Number of globals merged");
  127. namespace {
  128. class GlobalMerge : public FunctionPass {
  129. const TargetMachine *TM = nullptr;
  130. // FIXME: Infer the maximum possible offset depending on the actual users
  131. // (these max offsets are different for the users inside Thumb or ARM
  132. // functions), see the code that passes in the offset in the ARM backend
  133. // for more information.
  134. unsigned MaxOffset;
  135. /// Whether we should try to optimize for size only.
  136. /// Currently, this applies a dead simple heuristic: only consider globals
  137. /// used in minsize functions for merging.
  138. /// FIXME: This could learn about optsize, and be used in the cost model.
  139. bool OnlyOptimizeForSize = false;
  140. /// Whether we should merge global variables that have external linkage.
  141. bool MergeExternalGlobals = false;
  142. bool IsMachO;
  143. bool doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
  144. Module &M, bool isConst, unsigned AddrSpace) const;
  145. /// Merge everything in \p Globals for which the corresponding bit
  146. /// in \p GlobalSet is set.
  147. bool doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
  148. const BitVector &GlobalSet, Module &M, bool isConst,
  149. unsigned AddrSpace) const;
  150. /// Check if the given variable has been identified as must keep
  151. /// \pre setMustKeepGlobalVariables must have been called on the Module that
  152. /// contains GV
  153. bool isMustKeepGlobalVariable(const GlobalVariable *GV) const {
  154. return MustKeepGlobalVariables.count(GV);
  155. }
  156. /// Collect every variables marked as "used" or used in a landing pad
  157. /// instruction for this Module.
  158. void setMustKeepGlobalVariables(Module &M);
  159. /// Collect every variables marked as "used"
  160. void collectUsedGlobalVariables(Module &M, StringRef Name);
  161. /// Keep track of the GlobalVariable that must not be merged away
  162. SmallPtrSet<const GlobalVariable *, 16> MustKeepGlobalVariables;
  163. public:
  164. static char ID; // Pass identification, replacement for typeid.
  165. explicit GlobalMerge()
  166. : FunctionPass(ID), MaxOffset(GlobalMergeMaxOffset) {
  167. initializeGlobalMergePass(*PassRegistry::getPassRegistry());
  168. }
  169. explicit GlobalMerge(const TargetMachine *TM, unsigned MaximalOffset,
  170. bool OnlyOptimizeForSize, bool MergeExternalGlobals)
  171. : FunctionPass(ID), TM(TM), MaxOffset(MaximalOffset),
  172. OnlyOptimizeForSize(OnlyOptimizeForSize),
  173. MergeExternalGlobals(MergeExternalGlobals) {
  174. initializeGlobalMergePass(*PassRegistry::getPassRegistry());
  175. }
  176. bool doInitialization(Module &M) override;
  177. bool runOnFunction(Function &F) override;
  178. bool doFinalization(Module &M) override;
  179. StringRef getPassName() const override { return "Merge internal globals"; }
  180. void getAnalysisUsage(AnalysisUsage &AU) const override {
  181. AU.setPreservesCFG();
  182. FunctionPass::getAnalysisUsage(AU);
  183. }
  184. };
  185. } // end anonymous namespace
  186. char GlobalMerge::ID = 0;
  187. INITIALIZE_PASS(GlobalMerge, DEBUG_TYPE, "Merge global variables", false, false)
  188. bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
  189. Module &M, bool isConst, unsigned AddrSpace) const {
  190. auto &DL = M.getDataLayout();
  191. // FIXME: Find better heuristics
  192. llvm::stable_sort(
  193. Globals, [&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
  194. // We don't support scalable global variables.
  195. return DL.getTypeAllocSize(GV1->getValueType()).getFixedSize() <
  196. DL.getTypeAllocSize(GV2->getValueType()).getFixedSize();
  197. });
  198. // If we want to just blindly group all globals together, do so.
  199. if (!GlobalMergeGroupByUse) {
  200. BitVector AllGlobals(Globals.size());
  201. AllGlobals.set();
  202. return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
  203. }
  204. // If we want to be smarter, look at all uses of each global, to try to
  205. // discover all sets of globals used together, and how many times each of
  206. // these sets occurred.
  207. //
  208. // Keep this reasonably efficient, by having an append-only list of all sets
  209. // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of
  210. // code (currently, a Function) to the set of globals seen so far that are
  211. // used together in that unit (GlobalUsesByFunction).
  212. //
  213. // When we look at the Nth global, we know that any new set is either:
  214. // - the singleton set {N}, containing this global only, or
  215. // - the union of {N} and a previously-discovered set, containing some
  216. // combination of the previous N-1 globals.
  217. // Using that knowledge, when looking at the Nth global, we can keep:
  218. // - a reference to the singleton set {N} (CurGVOnlySetIdx)
  219. // - a list mapping each previous set to its union with {N} (EncounteredUGS),
  220. // if it actually occurs.
  221. // We keep track of the sets of globals used together "close enough".
  222. struct UsedGlobalSet {
  223. BitVector Globals;
  224. unsigned UsageCount = 1;
  225. UsedGlobalSet(size_t Size) : Globals(Size) {}
  226. };
  227. // Each set is unique in UsedGlobalSets.
  228. std::vector<UsedGlobalSet> UsedGlobalSets;
  229. // Avoid repeating the create-global-set pattern.
  230. auto CreateGlobalSet = [&]() -> UsedGlobalSet & {
  231. UsedGlobalSets.emplace_back(Globals.size());
  232. return UsedGlobalSets.back();
  233. };
  234. // The first set is the empty set.
  235. CreateGlobalSet().UsageCount = 0;
  236. // We define "close enough" to be "in the same function".
  237. // FIXME: Grouping uses by function is way too aggressive, so we should have
  238. // a better metric for distance between uses.
  239. // The obvious alternative would be to group by BasicBlock, but that's in
  240. // turn too conservative..
  241. // Anything in between wouldn't be trivial to compute, so just stick with
  242. // per-function grouping.
  243. // The value type is an index into UsedGlobalSets.
  244. // The default (0) conveniently points to the empty set.
  245. DenseMap<Function *, size_t /*UsedGlobalSetIdx*/> GlobalUsesByFunction;
  246. // Now, look at each merge-eligible global in turn.
  247. // Keep track of the sets we already encountered to which we added the
  248. // current global.
  249. // Each element matches the same-index element in UsedGlobalSets.
  250. // This lets us efficiently tell whether a set has already been expanded to
  251. // include the current global.
  252. std::vector<size_t> EncounteredUGS;
  253. for (size_t GI = 0, GE = Globals.size(); GI != GE; ++GI) {
  254. GlobalVariable *GV = Globals[GI];
  255. // Reset the encountered sets for this global...
  256. std::fill(EncounteredUGS.begin(), EncounteredUGS.end(), 0);
  257. // ...and grow it in case we created new sets for the previous global.
  258. EncounteredUGS.resize(UsedGlobalSets.size());
  259. // We might need to create a set that only consists of the current global.
  260. // Keep track of its index into UsedGlobalSets.
  261. size_t CurGVOnlySetIdx = 0;
  262. // For each global, look at all its Uses.
  263. for (auto &U : GV->uses()) {
  264. // This Use might be a ConstantExpr. We're interested in Instruction
  265. // users, so look through ConstantExpr...
  266. Use *UI, *UE;
  267. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
  268. if (CE->use_empty())
  269. continue;
  270. UI = &*CE->use_begin();
  271. UE = nullptr;
  272. } else if (isa<Instruction>(U.getUser())) {
  273. UI = &U;
  274. UE = UI->getNext();
  275. } else {
  276. continue;
  277. }
  278. // ...to iterate on all the instruction users of the global.
  279. // Note that we iterate on Uses and not on Users to be able to getNext().
  280. for (; UI != UE; UI = UI->getNext()) {
  281. Instruction *I = dyn_cast<Instruction>(UI->getUser());
  282. if (!I)
  283. continue;
  284. Function *ParentFn = I->getParent()->getParent();
  285. // If we're only optimizing for size, ignore non-minsize functions.
  286. if (OnlyOptimizeForSize && !ParentFn->hasMinSize())
  287. continue;
  288. size_t UGSIdx = GlobalUsesByFunction[ParentFn];
  289. // If this is the first global the basic block uses, map it to the set
  290. // consisting of this global only.
  291. if (!UGSIdx) {
  292. // If that set doesn't exist yet, create it.
  293. if (!CurGVOnlySetIdx) {
  294. CurGVOnlySetIdx = UsedGlobalSets.size();
  295. CreateGlobalSet().Globals.set(GI);
  296. } else {
  297. ++UsedGlobalSets[CurGVOnlySetIdx].UsageCount;
  298. }
  299. GlobalUsesByFunction[ParentFn] = CurGVOnlySetIdx;
  300. continue;
  301. }
  302. // If we already encountered this BB, just increment the counter.
  303. if (UsedGlobalSets[UGSIdx].Globals.test(GI)) {
  304. ++UsedGlobalSets[UGSIdx].UsageCount;
  305. continue;
  306. }
  307. // If not, the previous set wasn't actually used in this function.
  308. --UsedGlobalSets[UGSIdx].UsageCount;
  309. // If we already expanded the previous set to include this global, just
  310. // reuse that expanded set.
  311. if (size_t ExpandedIdx = EncounteredUGS[UGSIdx]) {
  312. ++UsedGlobalSets[ExpandedIdx].UsageCount;
  313. GlobalUsesByFunction[ParentFn] = ExpandedIdx;
  314. continue;
  315. }
  316. // If not, create a new set consisting of the union of the previous set
  317. // and this global. Mark it as encountered, so we can reuse it later.
  318. GlobalUsesByFunction[ParentFn] = EncounteredUGS[UGSIdx] =
  319. UsedGlobalSets.size();
  320. UsedGlobalSet &NewUGS = CreateGlobalSet();
  321. NewUGS.Globals.set(GI);
  322. NewUGS.Globals |= UsedGlobalSets[UGSIdx].Globals;
  323. }
  324. }
  325. }
  326. // Now we found a bunch of sets of globals used together. We accumulated
  327. // the number of times we encountered the sets (i.e., the number of blocks
  328. // that use that exact set of globals).
  329. //
  330. // Multiply that by the size of the set to give us a crude profitability
  331. // metric.
  332. llvm::stable_sort(UsedGlobalSets,
  333. [](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) {
  334. return UGS1.Globals.count() * UGS1.UsageCount <
  335. UGS2.Globals.count() * UGS2.UsageCount;
  336. });
  337. // We can choose to merge all globals together, but ignore globals never used
  338. // with another global. This catches the obviously non-profitable cases of
  339. // having a single global, but is aggressive enough for any other case.
  340. if (GlobalMergeIgnoreSingleUse) {
  341. BitVector AllGlobals(Globals.size());
  342. for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
  343. const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
  344. if (UGS.UsageCount == 0)
  345. continue;
  346. if (UGS.Globals.count() > 1)
  347. AllGlobals |= UGS.Globals;
  348. }
  349. return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
  350. }
  351. // Starting from the sets with the best (=biggest) profitability, find a
  352. // good combination.
  353. // The ideal (and expensive) solution can only be found by trying all
  354. // combinations, looking for the one with the best profitability.
  355. // Don't be smart about it, and just pick the first compatible combination,
  356. // starting with the sets with the best profitability.
  357. BitVector PickedGlobals(Globals.size());
  358. bool Changed = false;
  359. for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
  360. const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
  361. if (UGS.UsageCount == 0)
  362. continue;
  363. if (PickedGlobals.anyCommon(UGS.Globals))
  364. continue;
  365. PickedGlobals |= UGS.Globals;
  366. // If the set only contains one global, there's no point in merging.
  367. // Ignore the global for inclusion in other sets though, so keep it in
  368. // PickedGlobals.
  369. if (UGS.Globals.count() < 2)
  370. continue;
  371. Changed |= doMerge(Globals, UGS.Globals, M, isConst, AddrSpace);
  372. }
  373. return Changed;
  374. }
  375. bool GlobalMerge::doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
  376. const BitVector &GlobalSet, Module &M, bool isConst,
  377. unsigned AddrSpace) const {
  378. assert(Globals.size() > 1);
  379. Type *Int32Ty = Type::getInt32Ty(M.getContext());
  380. Type *Int8Ty = Type::getInt8Ty(M.getContext());
  381. auto &DL = M.getDataLayout();
  382. LLVM_DEBUG(dbgs() << " Trying to merge set, starts with #"
  383. << GlobalSet.find_first() << "\n");
  384. bool Changed = false;
  385. ssize_t i = GlobalSet.find_first();
  386. while (i != -1) {
  387. ssize_t j = 0;
  388. uint64_t MergedSize = 0;
  389. std::vector<Type*> Tys;
  390. std::vector<Constant*> Inits;
  391. std::vector<unsigned> StructIdxs;
  392. bool HasExternal = false;
  393. StringRef FirstExternalName;
  394. Align MaxAlign;
  395. unsigned CurIdx = 0;
  396. for (j = i; j != -1; j = GlobalSet.find_next(j)) {
  397. Type *Ty = Globals[j]->getValueType();
  398. // Make sure we use the same alignment AsmPrinter would use.
  399. Align Alignment = DL.getPreferredAlign(Globals[j]);
  400. unsigned Padding = alignTo(MergedSize, Alignment) - MergedSize;
  401. MergedSize += Padding;
  402. MergedSize += DL.getTypeAllocSize(Ty);
  403. if (MergedSize > MaxOffset) {
  404. break;
  405. }
  406. if (Padding) {
  407. Tys.push_back(ArrayType::get(Int8Ty, Padding));
  408. Inits.push_back(ConstantAggregateZero::get(Tys.back()));
  409. ++CurIdx;
  410. }
  411. Tys.push_back(Ty);
  412. Inits.push_back(Globals[j]->getInitializer());
  413. StructIdxs.push_back(CurIdx++);
  414. MaxAlign = std::max(MaxAlign, Alignment);
  415. if (Globals[j]->hasExternalLinkage() && !HasExternal) {
  416. HasExternal = true;
  417. FirstExternalName = Globals[j]->getName();
  418. }
  419. }
  420. // Exit early if there is only one global to merge.
  421. if (Tys.size() < 2) {
  422. i = j;
  423. continue;
  424. }
  425. // If merged variables doesn't have external linkage, we needn't to expose
  426. // the symbol after merging.
  427. GlobalValue::LinkageTypes Linkage = HasExternal
  428. ? GlobalValue::ExternalLinkage
  429. : GlobalValue::InternalLinkage;
  430. // Use a packed struct so we can control alignment.
  431. StructType *MergedTy = StructType::get(M.getContext(), Tys, true);
  432. Constant *MergedInit = ConstantStruct::get(MergedTy, Inits);
  433. // On Darwin external linkage needs to be preserved, otherwise
  434. // dsymutil cannot preserve the debug info for the merged
  435. // variables. If they have external linkage, use the symbol name
  436. // of the first variable merged as the suffix of global symbol
  437. // name. This avoids a link-time naming conflict for the
  438. // _MergedGlobals symbols.
  439. Twine MergedName =
  440. (IsMachO && HasExternal)
  441. ? "_MergedGlobals_" + FirstExternalName
  442. : "_MergedGlobals";
  443. auto MergedLinkage = IsMachO ? Linkage : GlobalValue::PrivateLinkage;
  444. auto *MergedGV = new GlobalVariable(
  445. M, MergedTy, isConst, MergedLinkage, MergedInit, MergedName, nullptr,
  446. GlobalVariable::NotThreadLocal, AddrSpace);
  447. MergedGV->setAlignment(MaxAlign);
  448. MergedGV->setSection(Globals[i]->getSection());
  449. const StructLayout *MergedLayout = DL.getStructLayout(MergedTy);
  450. for (ssize_t k = i, idx = 0; k != j; k = GlobalSet.find_next(k), ++idx) {
  451. GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage();
  452. std::string Name(Globals[k]->getName());
  453. GlobalValue::VisibilityTypes Visibility = Globals[k]->getVisibility();
  454. GlobalValue::DLLStorageClassTypes DLLStorage =
  455. Globals[k]->getDLLStorageClass();
  456. // Copy metadata while adjusting any debug info metadata by the original
  457. // global's offset within the merged global.
  458. MergedGV->copyMetadata(Globals[k],
  459. MergedLayout->getElementOffset(StructIdxs[idx]));
  460. Constant *Idx[2] = {
  461. ConstantInt::get(Int32Ty, 0),
  462. ConstantInt::get(Int32Ty, StructIdxs[idx]),
  463. };
  464. Constant *GEP =
  465. ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx);
  466. Globals[k]->replaceAllUsesWith(GEP);
  467. Globals[k]->eraseFromParent();
  468. // When the linkage is not internal we must emit an alias for the original
  469. // variable name as it may be accessed from another object. On non-Mach-O
  470. // we can also emit an alias for internal linkage as it's safe to do so.
  471. // It's not safe on Mach-O as the alias (and thus the portion of the
  472. // MergedGlobals variable) may be dead stripped at link time.
  473. if (Linkage != GlobalValue::InternalLinkage || !IsMachO) {
  474. GlobalAlias *GA = GlobalAlias::create(Tys[StructIdxs[idx]], AddrSpace,
  475. Linkage, Name, GEP, &M);
  476. GA->setVisibility(Visibility);
  477. GA->setDLLStorageClass(DLLStorage);
  478. }
  479. NumMerged++;
  480. }
  481. Changed = true;
  482. i = j;
  483. }
  484. return Changed;
  485. }
  486. void GlobalMerge::collectUsedGlobalVariables(Module &M, StringRef Name) {
  487. // Extract global variables from llvm.used array
  488. const GlobalVariable *GV = M.getGlobalVariable(Name);
  489. if (!GV || !GV->hasInitializer()) return;
  490. // Should be an array of 'i8*'.
  491. const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer());
  492. for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
  493. if (const GlobalVariable *G =
  494. dyn_cast<GlobalVariable>(InitList->getOperand(i)->stripPointerCasts()))
  495. MustKeepGlobalVariables.insert(G);
  496. }
  497. void GlobalMerge::setMustKeepGlobalVariables(Module &M) {
  498. collectUsedGlobalVariables(M, "llvm.used");
  499. collectUsedGlobalVariables(M, "llvm.compiler.used");
  500. for (Function &F : M) {
  501. for (BasicBlock &BB : F) {
  502. Instruction *Pad = BB.getFirstNonPHI();
  503. if (!Pad->isEHPad())
  504. continue;
  505. // Keep globals used by landingpads and catchpads.
  506. for (const Use &U : Pad->operands()) {
  507. if (const GlobalVariable *GV =
  508. dyn_cast<GlobalVariable>(U->stripPointerCasts()))
  509. MustKeepGlobalVariables.insert(GV);
  510. }
  511. }
  512. }
  513. }
  514. bool GlobalMerge::doInitialization(Module &M) {
  515. if (!EnableGlobalMerge)
  516. return false;
  517. IsMachO = Triple(M.getTargetTriple()).isOSBinFormatMachO();
  518. auto &DL = M.getDataLayout();
  519. DenseMap<std::pair<unsigned, StringRef>, SmallVector<GlobalVariable *, 16>>
  520. Globals, ConstGlobals, BSSGlobals;
  521. bool Changed = false;
  522. setMustKeepGlobalVariables(M);
  523. // Grab all non-const globals.
  524. for (auto &GV : M.globals()) {
  525. // Merge is safe for "normal" internal or external globals only
  526. if (GV.isDeclaration() || GV.isThreadLocal() || GV.hasImplicitSection())
  527. continue;
  528. // It's not safe to merge globals that may be preempted
  529. if (TM && !TM->shouldAssumeDSOLocal(M, &GV))
  530. continue;
  531. if (!(MergeExternalGlobals && GV.hasExternalLinkage()) &&
  532. !GV.hasInternalLinkage())
  533. continue;
  534. PointerType *PT = dyn_cast<PointerType>(GV.getType());
  535. assert(PT && "Global variable is not a pointer!");
  536. unsigned AddressSpace = PT->getAddressSpace();
  537. StringRef Section = GV.getSection();
  538. // Ignore all 'special' globals.
  539. if (GV.getName().startswith("llvm.") ||
  540. GV.getName().startswith(".llvm."))
  541. continue;
  542. // Ignore all "required" globals:
  543. if (isMustKeepGlobalVariable(&GV))
  544. continue;
  545. Type *Ty = GV.getValueType();
  546. if (DL.getTypeAllocSize(Ty) < MaxOffset) {
  547. if (TM &&
  548. TargetLoweringObjectFile::getKindForGlobal(&GV, *TM).isBSS())
  549. BSSGlobals[{AddressSpace, Section}].push_back(&GV);
  550. else if (GV.isConstant())
  551. ConstGlobals[{AddressSpace, Section}].push_back(&GV);
  552. else
  553. Globals[{AddressSpace, Section}].push_back(&GV);
  554. }
  555. }
  556. for (auto &P : Globals)
  557. if (P.second.size() > 1)
  558. Changed |= doMerge(P.second, M, false, P.first.first);
  559. for (auto &P : BSSGlobals)
  560. if (P.second.size() > 1)
  561. Changed |= doMerge(P.second, M, false, P.first.first);
  562. if (EnableGlobalMergeOnConst)
  563. for (auto &P : ConstGlobals)
  564. if (P.second.size() > 1)
  565. Changed |= doMerge(P.second, M, true, P.first.first);
  566. return Changed;
  567. }
  568. bool GlobalMerge::runOnFunction(Function &F) {
  569. return false;
  570. }
  571. bool GlobalMerge::doFinalization(Module &M) {
  572. MustKeepGlobalVariables.clear();
  573. return false;
  574. }
  575. Pass *llvm::createGlobalMergePass(const TargetMachine *TM, unsigned Offset,
  576. bool OnlyOptimizeForSize,
  577. bool MergeExternalByDefault) {
  578. bool MergeExternal = (EnableGlobalMergeOnExternal == cl::BOU_UNSET) ?
  579. MergeExternalByDefault : (EnableGlobalMergeOnExternal == cl::BOU_TRUE);
  580. return new GlobalMerge(TM, Offset, OnlyOptimizeForSize, MergeExternal);
  581. }