LegacyDivergenceAnalysis.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408
  1. //===- LegacyDivergenceAnalysis.cpp --------- Legacy Divergence Analysis
  2. //Implementation -==//
  3. //
  4. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  5. // See https://llvm.org/LICENSE.txt for license information.
  6. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file implements divergence analysis which determines whether a branch
  11. // in a GPU program is divergent.It can help branch optimizations such as jump
  12. // threading and loop unswitching to make better decisions.
  13. //
  14. // GPU programs typically use the SIMD execution model, where multiple threads
  15. // in the same execution group have to execute in lock-step. Therefore, if the
  16. // code contains divergent branches (i.e., threads in a group do not agree on
  17. // which path of the branch to take), the group of threads has to execute all
  18. // the paths from that branch with different subsets of threads enabled until
  19. // they converge at the immediately post-dominating BB of the paths.
  20. //
  21. // Due to this execution model, some optimizations such as jump
  22. // threading and loop unswitching can be unfortunately harmful when performed on
  23. // divergent branches. Therefore, an analysis that computes which branches in a
  24. // GPU program are divergent can help the compiler to selectively run these
  25. // optimizations.
  26. //
  27. // This file defines divergence analysis which computes a conservative but
  28. // non-trivial approximation of all divergent branches in a GPU program. It
  29. // partially implements the approach described in
  30. //
  31. // Divergence Analysis
  32. // Sampaio, Souza, Collange, Pereira
  33. // TOPLAS '13
  34. //
  35. // The divergence analysis identifies the sources of divergence (e.g., special
  36. // variables that hold the thread ID), and recursively marks variables that are
  37. // data or sync dependent on a source of divergence as divergent.
  38. //
  39. // While data dependency is a well-known concept, the notion of sync dependency
  40. // is worth more explanation. Sync dependence characterizes the control flow
  41. // aspect of the propagation of branch divergence. For example,
  42. //
  43. // %cond = icmp slt i32 %tid, 10
  44. // br i1 %cond, label %then, label %else
  45. // then:
  46. // br label %merge
  47. // else:
  48. // br label %merge
  49. // merge:
  50. // %a = phi i32 [ 0, %then ], [ 1, %else ]
  51. //
  52. // Suppose %tid holds the thread ID. Although %a is not data dependent on %tid
  53. // because %tid is not on its use-def chains, %a is sync dependent on %tid
  54. // because the branch "br i1 %cond" depends on %tid and affects which value %a
  55. // is assigned to.
  56. //
  57. // The current implementation has the following limitations:
  58. // 1. intra-procedural. It conservatively considers the arguments of a
  59. // non-kernel-entry function and the return value of a function call as
  60. // divergent.
  61. // 2. memory as black box. It conservatively considers values loaded from
  62. // generic or local address as divergent. This can be improved by leveraging
  63. // pointer analysis.
  64. //
  65. //===----------------------------------------------------------------------===//
  66. #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
  67. #include "llvm/ADT/PostOrderIterator.h"
  68. #include "llvm/Analysis/CFG.h"
  69. #include "llvm/Analysis/DivergenceAnalysis.h"
  70. #include "llvm/Analysis/Passes.h"
  71. #include "llvm/Analysis/PostDominators.h"
  72. #include "llvm/Analysis/TargetTransformInfo.h"
  73. #include "llvm/IR/Dominators.h"
  74. #include "llvm/IR/InstIterator.h"
  75. #include "llvm/IR/Instructions.h"
  76. #include "llvm/IR/Value.h"
  77. #include "llvm/InitializePasses.h"
  78. #include "llvm/Support/CommandLine.h"
  79. #include "llvm/Support/Debug.h"
  80. #include "llvm/Support/raw_ostream.h"
  81. #include <vector>
  82. using namespace llvm;
  83. #define DEBUG_TYPE "divergence"
  84. // transparently use the GPUDivergenceAnalysis
  85. static cl::opt<bool> UseGPUDA("use-gpu-divergence-analysis", cl::init(false),
  86. cl::Hidden,
  87. cl::desc("turn the LegacyDivergenceAnalysis into "
  88. "a wrapper for GPUDivergenceAnalysis"));
  89. namespace {
  90. class DivergencePropagator {
  91. public:
  92. DivergencePropagator(Function &F, TargetTransformInfo &TTI, DominatorTree &DT,
  93. PostDominatorTree &PDT, DenseSet<const Value *> &DV,
  94. DenseSet<const Use *> &DU)
  95. : F(F), TTI(TTI), DT(DT), PDT(PDT), DV(DV), DU(DU) {}
  96. void populateWithSourcesOfDivergence();
  97. void propagate();
  98. private:
  99. // A helper function that explores data dependents of V.
  100. void exploreDataDependency(Value *V);
  101. // A helper function that explores sync dependents of TI.
  102. void exploreSyncDependency(Instruction *TI);
  103. // Computes the influence region from Start to End. This region includes all
  104. // basic blocks on any simple path from Start to End.
  105. void computeInfluenceRegion(BasicBlock *Start, BasicBlock *End,
  106. DenseSet<BasicBlock *> &InfluenceRegion);
  107. // Finds all users of I that are outside the influence region, and add these
  108. // users to Worklist.
  109. void findUsersOutsideInfluenceRegion(
  110. Instruction &I, const DenseSet<BasicBlock *> &InfluenceRegion);
  111. Function &F;
  112. TargetTransformInfo &TTI;
  113. DominatorTree &DT;
  114. PostDominatorTree &PDT;
  115. std::vector<Value *> Worklist; // Stack for DFS.
  116. DenseSet<const Value *> &DV; // Stores all divergent values.
  117. DenseSet<const Use *> &DU; // Stores divergent uses of possibly uniform
  118. // values.
  119. };
  120. void DivergencePropagator::populateWithSourcesOfDivergence() {
  121. Worklist.clear();
  122. DV.clear();
  123. DU.clear();
  124. for (auto &I : instructions(F)) {
  125. if (TTI.isSourceOfDivergence(&I)) {
  126. Worklist.push_back(&I);
  127. DV.insert(&I);
  128. }
  129. }
  130. for (auto &Arg : F.args()) {
  131. if (TTI.isSourceOfDivergence(&Arg)) {
  132. Worklist.push_back(&Arg);
  133. DV.insert(&Arg);
  134. }
  135. }
  136. }
  137. void DivergencePropagator::exploreSyncDependency(Instruction *TI) {
  138. // Propagation rule 1: if branch TI is divergent, all PHINodes in TI's
  139. // immediate post dominator are divergent. This rule handles if-then-else
  140. // patterns. For example,
  141. //
  142. // if (tid < 5)
  143. // a1 = 1;
  144. // else
  145. // a2 = 2;
  146. // a = phi(a1, a2); // sync dependent on (tid < 5)
  147. BasicBlock *ThisBB = TI->getParent();
  148. // Unreachable blocks may not be in the dominator tree.
  149. if (!DT.isReachableFromEntry(ThisBB))
  150. return;
  151. // If the function has no exit blocks or doesn't reach any exit blocks, the
  152. // post dominator may be null.
  153. DomTreeNode *ThisNode = PDT.getNode(ThisBB);
  154. if (!ThisNode)
  155. return;
  156. BasicBlock *IPostDom = ThisNode->getIDom()->getBlock();
  157. if (IPostDom == nullptr)
  158. return;
  159. for (auto I = IPostDom->begin(); isa<PHINode>(I); ++I) {
  160. // A PHINode is uniform if it returns the same value no matter which path is
  161. // taken.
  162. if (!cast<PHINode>(I)->hasConstantOrUndefValue() && DV.insert(&*I).second)
  163. Worklist.push_back(&*I);
  164. }
  165. // Propagation rule 2: if a value defined in a loop is used outside, the user
  166. // is sync dependent on the condition of the loop exits that dominate the
  167. // user. For example,
  168. //
  169. // int i = 0;
  170. // do {
  171. // i++;
  172. // if (foo(i)) ... // uniform
  173. // } while (i < tid);
  174. // if (bar(i)) ... // divergent
  175. //
  176. // A program may contain unstructured loops. Therefore, we cannot leverage
  177. // LoopInfo, which only recognizes natural loops.
  178. //
  179. // The algorithm used here handles both natural and unstructured loops. Given
  180. // a branch TI, we first compute its influence region, the union of all simple
  181. // paths from TI to its immediate post dominator (IPostDom). Then, we search
  182. // for all the values defined in the influence region but used outside. All
  183. // these users are sync dependent on TI.
  184. DenseSet<BasicBlock *> InfluenceRegion;
  185. computeInfluenceRegion(ThisBB, IPostDom, InfluenceRegion);
  186. // An insight that can speed up the search process is that all the in-region
  187. // values that are used outside must dominate TI. Therefore, instead of
  188. // searching every basic blocks in the influence region, we search all the
  189. // dominators of TI until it is outside the influence region.
  190. BasicBlock *InfluencedBB = ThisBB;
  191. while (InfluenceRegion.count(InfluencedBB)) {
  192. for (auto &I : *InfluencedBB) {
  193. if (!DV.count(&I))
  194. findUsersOutsideInfluenceRegion(I, InfluenceRegion);
  195. }
  196. DomTreeNode *IDomNode = DT.getNode(InfluencedBB)->getIDom();
  197. if (IDomNode == nullptr)
  198. break;
  199. InfluencedBB = IDomNode->getBlock();
  200. }
  201. }
  202. void DivergencePropagator::findUsersOutsideInfluenceRegion(
  203. Instruction &I, const DenseSet<BasicBlock *> &InfluenceRegion) {
  204. for (Use &Use : I.uses()) {
  205. Instruction *UserInst = cast<Instruction>(Use.getUser());
  206. if (!InfluenceRegion.count(UserInst->getParent())) {
  207. DU.insert(&Use);
  208. if (DV.insert(UserInst).second)
  209. Worklist.push_back(UserInst);
  210. }
  211. }
  212. }
  213. // A helper function for computeInfluenceRegion that adds successors of "ThisBB"
  214. // to the influence region.
  215. static void
  216. addSuccessorsToInfluenceRegion(BasicBlock *ThisBB, BasicBlock *End,
  217. DenseSet<BasicBlock *> &InfluenceRegion,
  218. std::vector<BasicBlock *> &InfluenceStack) {
  219. for (BasicBlock *Succ : successors(ThisBB)) {
  220. if (Succ != End && InfluenceRegion.insert(Succ).second)
  221. InfluenceStack.push_back(Succ);
  222. }
  223. }
  224. void DivergencePropagator::computeInfluenceRegion(
  225. BasicBlock *Start, BasicBlock *End,
  226. DenseSet<BasicBlock *> &InfluenceRegion) {
  227. assert(PDT.properlyDominates(End, Start) &&
  228. "End does not properly dominate Start");
  229. // The influence region starts from the end of "Start" to the beginning of
  230. // "End". Therefore, "Start" should not be in the region unless "Start" is in
  231. // a loop that doesn't contain "End".
  232. std::vector<BasicBlock *> InfluenceStack;
  233. addSuccessorsToInfluenceRegion(Start, End, InfluenceRegion, InfluenceStack);
  234. while (!InfluenceStack.empty()) {
  235. BasicBlock *BB = InfluenceStack.back();
  236. InfluenceStack.pop_back();
  237. addSuccessorsToInfluenceRegion(BB, End, InfluenceRegion, InfluenceStack);
  238. }
  239. }
  240. void DivergencePropagator::exploreDataDependency(Value *V) {
  241. // Follow def-use chains of V.
  242. for (User *U : V->users()) {
  243. if (!TTI.isAlwaysUniform(U) && DV.insert(U).second)
  244. Worklist.push_back(U);
  245. }
  246. }
  247. void DivergencePropagator::propagate() {
  248. // Traverse the dependency graph using DFS.
  249. while (!Worklist.empty()) {
  250. Value *V = Worklist.back();
  251. Worklist.pop_back();
  252. if (Instruction *I = dyn_cast<Instruction>(V)) {
  253. // Terminators with less than two successors won't introduce sync
  254. // dependency. Ignore them.
  255. if (I->isTerminator() && I->getNumSuccessors() > 1)
  256. exploreSyncDependency(I);
  257. }
  258. exploreDataDependency(V);
  259. }
  260. }
  261. } // namespace
  262. // Register this pass.
  263. char LegacyDivergenceAnalysis::ID = 0;
  264. LegacyDivergenceAnalysis::LegacyDivergenceAnalysis() : FunctionPass(ID) {
  265. initializeLegacyDivergenceAnalysisPass(*PassRegistry::getPassRegistry());
  266. }
  267. INITIALIZE_PASS_BEGIN(LegacyDivergenceAnalysis, "divergence",
  268. "Legacy Divergence Analysis", false, true)
  269. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  270. INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
  271. INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
  272. INITIALIZE_PASS_END(LegacyDivergenceAnalysis, "divergence",
  273. "Legacy Divergence Analysis", false, true)
  274. FunctionPass *llvm::createLegacyDivergenceAnalysisPass() {
  275. return new LegacyDivergenceAnalysis();
  276. }
  277. void LegacyDivergenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
  278. AU.addRequiredTransitive<DominatorTreeWrapperPass>();
  279. AU.addRequiredTransitive<PostDominatorTreeWrapperPass>();
  280. AU.addRequiredTransitive<LoopInfoWrapperPass>();
  281. AU.setPreservesAll();
  282. }
  283. bool LegacyDivergenceAnalysis::shouldUseGPUDivergenceAnalysis(
  284. const Function &F, const TargetTransformInfo &TTI) const {
  285. if (!(UseGPUDA || TTI.useGPUDivergenceAnalysis()))
  286. return false;
  287. // GPUDivergenceAnalysis requires a reducible CFG.
  288. auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  289. using RPOTraversal = ReversePostOrderTraversal<const Function *>;
  290. RPOTraversal FuncRPOT(&F);
  291. return !containsIrreducibleCFG<const BasicBlock *, const RPOTraversal,
  292. const LoopInfo>(FuncRPOT, LI);
  293. }
  294. bool LegacyDivergenceAnalysis::runOnFunction(Function &F) {
  295. auto *TTIWP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>();
  296. if (TTIWP == nullptr)
  297. return false;
  298. TargetTransformInfo &TTI = TTIWP->getTTI(F);
  299. // Fast path: if the target does not have branch divergence, we do not mark
  300. // any branch as divergent.
  301. if (!TTI.hasBranchDivergence())
  302. return false;
  303. DivergentValues.clear();
  304. DivergentUses.clear();
  305. gpuDA = nullptr;
  306. auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  307. auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
  308. if (shouldUseGPUDivergenceAnalysis(F, TTI)) {
  309. // run the new GPU divergence analysis
  310. auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  311. gpuDA = std::make_unique<DivergenceInfo>(F, DT, PDT, LI, TTI,
  312. /* KnownReducible = */ true);
  313. } else {
  314. // run LLVM's existing DivergenceAnalysis
  315. DivergencePropagator DP(F, TTI, DT, PDT, DivergentValues, DivergentUses);
  316. DP.populateWithSourcesOfDivergence();
  317. DP.propagate();
  318. }
  319. LLVM_DEBUG(dbgs() << "\nAfter divergence analysis on " << F.getName()
  320. << ":\n";
  321. print(dbgs(), F.getParent()));
  322. return false;
  323. }
  324. bool LegacyDivergenceAnalysis::isDivergent(const Value *V) const {
  325. if (gpuDA) {
  326. return gpuDA->isDivergent(*V);
  327. }
  328. return DivergentValues.count(V);
  329. }
  330. bool LegacyDivergenceAnalysis::isDivergentUse(const Use *U) const {
  331. if (gpuDA) {
  332. return gpuDA->isDivergentUse(*U);
  333. }
  334. return DivergentValues.count(U->get()) || DivergentUses.count(U);
  335. }
  336. void LegacyDivergenceAnalysis::print(raw_ostream &OS, const Module *) const {
  337. if ((!gpuDA || !gpuDA->hasDivergence()) && DivergentValues.empty())
  338. return;
  339. const Function *F = nullptr;
  340. if (!DivergentValues.empty()) {
  341. const Value *FirstDivergentValue = *DivergentValues.begin();
  342. if (const Argument *Arg = dyn_cast<Argument>(FirstDivergentValue)) {
  343. F = Arg->getParent();
  344. } else if (const Instruction *I =
  345. dyn_cast<Instruction>(FirstDivergentValue)) {
  346. F = I->getParent()->getParent();
  347. } else {
  348. llvm_unreachable("Only arguments and instructions can be divergent");
  349. }
  350. } else if (gpuDA) {
  351. F = &gpuDA->getFunction();
  352. }
  353. if (!F)
  354. return;
  355. // Dumps all divergent values in F, arguments and then instructions.
  356. for (auto &Arg : F->args()) {
  357. OS << (isDivergent(&Arg) ? "DIVERGENT: " : " ");
  358. OS << Arg << "\n";
  359. }
  360. // Iterate instructions using instructions() to ensure a deterministic order.
  361. for (const BasicBlock &BB : *F) {
  362. OS << "\n " << BB.getName() << ":\n";
  363. for (auto &I : BB.instructionsWithoutDebug()) {
  364. OS << (isDivergent(&I) ? "DIVERGENT: " : " ");
  365. OS << I << "\n";
  366. }
  367. }
  368. OS << "\n";
  369. }