ThreadSafetyTIL.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. //===- ThreadSafetyTIL.cpp ------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
  9. #include "clang/Basic/LLVM.h"
  10. #include "llvm/Support/Casting.h"
  11. #include <cassert>
  12. #include <cstddef>
  13. using namespace clang;
  14. using namespace threadSafety;
  15. using namespace til;
  16. StringRef til::getUnaryOpcodeString(TIL_UnaryOpcode Op) {
  17. switch (Op) {
  18. case UOP_Minus: return "-";
  19. case UOP_BitNot: return "~";
  20. case UOP_LogicNot: return "!";
  21. }
  22. return {};
  23. }
  24. StringRef til::getBinaryOpcodeString(TIL_BinaryOpcode Op) {
  25. switch (Op) {
  26. case BOP_Mul: return "*";
  27. case BOP_Div: return "/";
  28. case BOP_Rem: return "%";
  29. case BOP_Add: return "+";
  30. case BOP_Sub: return "-";
  31. case BOP_Shl: return "<<";
  32. case BOP_Shr: return ">>";
  33. case BOP_BitAnd: return "&";
  34. case BOP_BitXor: return "^";
  35. case BOP_BitOr: return "|";
  36. case BOP_Eq: return "==";
  37. case BOP_Neq: return "!=";
  38. case BOP_Lt: return "<";
  39. case BOP_Leq: return "<=";
  40. case BOP_Cmp: return "<=>";
  41. case BOP_LogicAnd: return "&&";
  42. case BOP_LogicOr: return "||";
  43. }
  44. return {};
  45. }
  46. SExpr* Future::force() {
  47. Status = FS_evaluating;
  48. Result = compute();
  49. Status = FS_done;
  50. return Result;
  51. }
  52. unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
  53. unsigned Idx = Predecessors.size();
  54. Predecessors.reserveCheck(1, Arena);
  55. Predecessors.push_back(Pred);
  56. for (auto *E : Args) {
  57. if (auto *Ph = dyn_cast<Phi>(E)) {
  58. Ph->values().reserveCheck(1, Arena);
  59. Ph->values().push_back(nullptr);
  60. }
  61. }
  62. return Idx;
  63. }
  64. void BasicBlock::reservePredecessors(unsigned NumPreds) {
  65. Predecessors.reserve(NumPreds, Arena);
  66. for (auto *E : Args) {
  67. if (auto *Ph = dyn_cast<Phi>(E)) {
  68. Ph->values().reserve(NumPreds, Arena);
  69. }
  70. }
  71. }
  72. // If E is a variable, then trace back through any aliases or redundant
  73. // Phi nodes to find the canonical definition.
  74. const SExpr *til::getCanonicalVal(const SExpr *E) {
  75. while (true) {
  76. if (const auto *V = dyn_cast<Variable>(E)) {
  77. if (V->kind() == Variable::VK_Let) {
  78. E = V->definition();
  79. continue;
  80. }
  81. }
  82. if (const auto *Ph = dyn_cast<Phi>(E)) {
  83. if (Ph->status() == Phi::PH_SingleVal) {
  84. E = Ph->values()[0];
  85. continue;
  86. }
  87. }
  88. break;
  89. }
  90. return E;
  91. }
  92. // If E is a variable, then trace back through any aliases or redundant
  93. // Phi nodes to find the canonical definition.
  94. // The non-const version will simplify incomplete Phi nodes.
  95. SExpr *til::simplifyToCanonicalVal(SExpr *E) {
  96. while (true) {
  97. if (auto *V = dyn_cast<Variable>(E)) {
  98. if (V->kind() != Variable::VK_Let)
  99. return V;
  100. // Eliminate redundant variables, e.g. x = y, or x = 5,
  101. // but keep anything more complicated.
  102. if (til::ThreadSafetyTIL::isTrivial(V->definition())) {
  103. E = V->definition();
  104. continue;
  105. }
  106. return V;
  107. }
  108. if (auto *Ph = dyn_cast<Phi>(E)) {
  109. if (Ph->status() == Phi::PH_Incomplete)
  110. simplifyIncompleteArg(Ph);
  111. // Eliminate redundant Phi nodes.
  112. if (Ph->status() == Phi::PH_SingleVal) {
  113. E = Ph->values()[0];
  114. continue;
  115. }
  116. }
  117. return E;
  118. }
  119. }
  120. // Trace the arguments of an incomplete Phi node to see if they have the same
  121. // canonical definition. If so, mark the Phi node as redundant.
  122. // getCanonicalVal() will recursively call simplifyIncompletePhi().
  123. void til::simplifyIncompleteArg(til::Phi *Ph) {
  124. assert(Ph && Ph->status() == Phi::PH_Incomplete);
  125. // eliminate infinite recursion -- assume that this node is not redundant.
  126. Ph->setStatus(Phi::PH_MultiVal);
  127. SExpr *E0 = simplifyToCanonicalVal(Ph->values()[0]);
  128. for (unsigned i = 1, n = Ph->values().size(); i < n; ++i) {
  129. SExpr *Ei = simplifyToCanonicalVal(Ph->values()[i]);
  130. if (Ei == Ph)
  131. continue; // Recursive reference to itself. Don't count.
  132. if (Ei != E0) {
  133. return; // Status is already set to MultiVal.
  134. }
  135. }
  136. Ph->setStatus(Phi::PH_SingleVal);
  137. }
  138. // Renumbers the arguments and instructions to have unique, sequential IDs.
  139. unsigned BasicBlock::renumberInstrs(unsigned ID) {
  140. for (auto *Arg : Args)
  141. Arg->setID(this, ID++);
  142. for (auto *Instr : Instrs)
  143. Instr->setID(this, ID++);
  144. TermInstr->setID(this, ID++);
  145. return ID;
  146. }
  147. // Sorts the CFGs blocks using a reverse post-order depth-first traversal.
  148. // Each block will be written into the Blocks array in order, and its BlockID
  149. // will be set to the index in the array. Sorting should start from the entry
  150. // block, and ID should be the total number of blocks.
  151. unsigned BasicBlock::topologicalSort(SimpleArray<BasicBlock *> &Blocks,
  152. unsigned ID) {
  153. if (Visited) return ID;
  154. Visited = true;
  155. for (auto *Block : successors())
  156. ID = Block->topologicalSort(Blocks, ID);
  157. // set ID and update block array in place.
  158. // We may lose pointers to unreachable blocks.
  159. assert(ID > 0);
  160. BlockID = --ID;
  161. Blocks[BlockID] = this;
  162. return ID;
  163. }
  164. // Performs a reverse topological traversal, starting from the exit block and
  165. // following back-edges. The dominator is serialized before any predecessors,
  166. // which guarantees that all blocks are serialized after their dominator and
  167. // before their post-dominator (because it's a reverse topological traversal).
  168. // ID should be initially set to 0.
  169. //
  170. // This sort assumes that (1) dominators have been computed, (2) there are no
  171. // critical edges, and (3) the entry block is reachable from the exit block
  172. // and no blocks are accessible via traversal of back-edges from the exit that
  173. // weren't accessible via forward edges from the entry.
  174. unsigned BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock *> &Blocks,
  175. unsigned ID) {
  176. // Visited is assumed to have been set by the topologicalSort. This pass
  177. // assumes !Visited means that we've visited this node before.
  178. if (!Visited) return ID;
  179. Visited = false;
  180. if (DominatorNode.Parent)
  181. ID = DominatorNode.Parent->topologicalFinalSort(Blocks, ID);
  182. for (auto *Pred : Predecessors)
  183. ID = Pred->topologicalFinalSort(Blocks, ID);
  184. assert(static_cast<size_t>(ID) < Blocks.size());
  185. BlockID = ID++;
  186. Blocks[BlockID] = this;
  187. return ID;
  188. }
  189. // Computes the immediate dominator of the current block. Assumes that all of
  190. // its predecessors have already computed their dominators. This is achieved
  191. // by visiting the nodes in topological order.
  192. void BasicBlock::computeDominator() {
  193. BasicBlock *Candidate = nullptr;
  194. // Walk backwards from each predecessor to find the common dominator node.
  195. for (auto *Pred : Predecessors) {
  196. // Skip back-edges
  197. if (Pred->BlockID >= BlockID) continue;
  198. // If we don't yet have a candidate for dominator yet, take this one.
  199. if (Candidate == nullptr) {
  200. Candidate = Pred;
  201. continue;
  202. }
  203. // Walk the alternate and current candidate back to find a common ancestor.
  204. auto *Alternate = Pred;
  205. while (Alternate != Candidate) {
  206. if (Candidate->BlockID > Alternate->BlockID)
  207. Candidate = Candidate->DominatorNode.Parent;
  208. else
  209. Alternate = Alternate->DominatorNode.Parent;
  210. }
  211. }
  212. DominatorNode.Parent = Candidate;
  213. DominatorNode.SizeOfSubTree = 1;
  214. }
  215. // Computes the immediate post-dominator of the current block. Assumes that all
  216. // of its successors have already computed their post-dominators. This is
  217. // achieved visiting the nodes in reverse topological order.
  218. void BasicBlock::computePostDominator() {
  219. BasicBlock *Candidate = nullptr;
  220. // Walk back from each predecessor to find the common post-dominator node.
  221. for (auto *Succ : successors()) {
  222. // Skip back-edges
  223. if (Succ->BlockID <= BlockID) continue;
  224. // If we don't yet have a candidate for post-dominator yet, take this one.
  225. if (Candidate == nullptr) {
  226. Candidate = Succ;
  227. continue;
  228. }
  229. // Walk the alternate and current candidate back to find a common ancestor.
  230. auto *Alternate = Succ;
  231. while (Alternate != Candidate) {
  232. if (Candidate->BlockID < Alternate->BlockID)
  233. Candidate = Candidate->PostDominatorNode.Parent;
  234. else
  235. Alternate = Alternate->PostDominatorNode.Parent;
  236. }
  237. }
  238. PostDominatorNode.Parent = Candidate;
  239. PostDominatorNode.SizeOfSubTree = 1;
  240. }
  241. // Renumber instructions in all blocks
  242. void SCFG::renumberInstrs() {
  243. unsigned InstrID = 0;
  244. for (auto *Block : Blocks)
  245. InstrID = Block->renumberInstrs(InstrID);
  246. }
  247. static inline void computeNodeSize(BasicBlock *B,
  248. BasicBlock::TopologyNode BasicBlock::*TN) {
  249. BasicBlock::TopologyNode *N = &(B->*TN);
  250. if (N->Parent) {
  251. BasicBlock::TopologyNode *P = &(N->Parent->*TN);
  252. // Initially set ID relative to the (as yet uncomputed) parent ID
  253. N->NodeID = P->SizeOfSubTree;
  254. P->SizeOfSubTree += N->SizeOfSubTree;
  255. }
  256. }
  257. static inline void computeNodeID(BasicBlock *B,
  258. BasicBlock::TopologyNode BasicBlock::*TN) {
  259. BasicBlock::TopologyNode *N = &(B->*TN);
  260. if (N->Parent) {
  261. BasicBlock::TopologyNode *P = &(N->Parent->*TN);
  262. N->NodeID += P->NodeID; // Fix NodeIDs relative to starting node.
  263. }
  264. }
  265. // Normalizes a CFG. Normalization has a few major components:
  266. // 1) Removing unreachable blocks.
  267. // 2) Computing dominators and post-dominators
  268. // 3) Topologically sorting the blocks into the "Blocks" array.
  269. void SCFG::computeNormalForm() {
  270. // Topologically sort the blocks starting from the entry block.
  271. unsigned NumUnreachableBlocks = Entry->topologicalSort(Blocks, Blocks.size());
  272. if (NumUnreachableBlocks > 0) {
  273. // If there were unreachable blocks shift everything down, and delete them.
  274. for (unsigned I = NumUnreachableBlocks, E = Blocks.size(); I < E; ++I) {
  275. unsigned NI = I - NumUnreachableBlocks;
  276. Blocks[NI] = Blocks[I];
  277. Blocks[NI]->BlockID = NI;
  278. // FIXME: clean up predecessor pointers to unreachable blocks?
  279. }
  280. Blocks.drop(NumUnreachableBlocks);
  281. }
  282. // Compute dominators.
  283. for (auto *Block : Blocks)
  284. Block->computeDominator();
  285. // Once dominators have been computed, the final sort may be performed.
  286. unsigned NumBlocks = Exit->topologicalFinalSort(Blocks, 0);
  287. assert(static_cast<size_t>(NumBlocks) == Blocks.size());
  288. (void) NumBlocks;
  289. // Renumber the instructions now that we have a final sort.
  290. renumberInstrs();
  291. // Compute post-dominators and compute the sizes of each node in the
  292. // dominator tree.
  293. for (auto *Block : Blocks.reverse()) {
  294. Block->computePostDominator();
  295. computeNodeSize(Block, &BasicBlock::DominatorNode);
  296. }
  297. // Compute the sizes of each node in the post-dominator tree and assign IDs in
  298. // the dominator tree.
  299. for (auto *Block : Blocks) {
  300. computeNodeID(Block, &BasicBlock::DominatorNode);
  301. computeNodeSize(Block, &BasicBlock::PostDominatorNode);
  302. }
  303. // Assign IDs in the post-dominator tree.
  304. for (auto *Block : Blocks.reverse()) {
  305. computeNodeID(Block, &BasicBlock::PostDominatorNode);
  306. }
  307. }