graphcycles.cc 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708
  1. // Copyright 2017 The Abseil Authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // GraphCycles provides incremental cycle detection on a dynamic
  15. // graph using the following algorithm:
  16. //
  17. // A dynamic topological sort algorithm for directed acyclic graphs
  18. // David J. Pearce, Paul H. J. Kelly
  19. // Journal of Experimental Algorithmics (JEA) JEA Homepage archive
  20. // Volume 11, 2006, Article No. 1.7
  21. //
  22. // Brief summary of the algorithm:
  23. //
  24. // (1) Maintain a rank for each node that is consistent
  25. // with the topological sort of the graph. I.e., path from x to y
  26. // implies rank[x] < rank[y].
  27. // (2) When a new edge (x->y) is inserted, do nothing if rank[x] < rank[y].
  28. // (3) Otherwise: adjust ranks in the neighborhood of x and y.
  29. #include "absl/base/attributes.h"
  30. // This file is a no-op if the required LowLevelAlloc support is missing.
  31. #include "absl/base/internal/low_level_alloc.h"
  32. #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
  33. #include "absl/synchronization/internal/graphcycles.h"
  34. #include <algorithm>
  35. #include <array>
  36. #include <cinttypes>
  37. #include <limits>
  38. #include "absl/base/internal/hide_ptr.h"
  39. #include "absl/base/internal/raw_logging.h"
  40. #include "absl/base/internal/spinlock.h"
  41. // Do not use STL. This module does not use standard memory allocation.
  42. namespace absl {
  43. ABSL_NAMESPACE_BEGIN
  44. namespace synchronization_internal {
  45. namespace {
  46. // Avoid LowLevelAlloc's default arena since it calls malloc hooks in
  47. // which people are doing things like acquiring Mutexes.
  48. ABSL_CONST_INIT static absl::base_internal::SpinLock arena_mu(
  49. absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
  50. ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena;
  51. static void InitArenaIfNecessary() {
  52. arena_mu.Lock();
  53. if (arena == nullptr) {
  54. arena = base_internal::LowLevelAlloc::NewArena(0);
  55. }
  56. arena_mu.Unlock();
  57. }
  58. // Number of inlined elements in Vec. Hash table implementation
  59. // relies on this being a power of two.
  60. static const uint32_t kInline = 8;
  61. // A simple LowLevelAlloc based resizable vector with inlined storage
  62. // for a few elements. T must be a plain type since constructor
  63. // and destructor are not run on elements of type T managed by Vec.
  64. template <typename T>
  65. class Vec {
  66. public:
  67. Vec() { Init(); }
  68. ~Vec() { Discard(); }
  69. void clear() {
  70. Discard();
  71. Init();
  72. }
  73. bool empty() const { return size_ == 0; }
  74. uint32_t size() const { return size_; }
  75. T* begin() { return ptr_; }
  76. T* end() { return ptr_ + size_; }
  77. const T& operator[](uint32_t i) const { return ptr_[i]; }
  78. T& operator[](uint32_t i) { return ptr_[i]; }
  79. const T& back() const { return ptr_[size_-1]; }
  80. void pop_back() { size_--; }
  81. void push_back(const T& v) {
  82. if (size_ == capacity_) Grow(size_ + 1);
  83. ptr_[size_] = v;
  84. size_++;
  85. }
  86. void resize(uint32_t n) {
  87. if (n > capacity_) Grow(n);
  88. size_ = n;
  89. }
  90. void fill(const T& val) {
  91. for (uint32_t i = 0; i < size(); i++) {
  92. ptr_[i] = val;
  93. }
  94. }
  95. // Guarantees src is empty at end.
  96. // Provided for the hash table resizing code below.
  97. void MoveFrom(Vec<T>* src) {
  98. if (src->ptr_ == src->space_) {
  99. // Need to actually copy
  100. resize(src->size_);
  101. std::copy_n(src->ptr_, src->size_, ptr_);
  102. src->size_ = 0;
  103. } else {
  104. Discard();
  105. ptr_ = src->ptr_;
  106. size_ = src->size_;
  107. capacity_ = src->capacity_;
  108. src->Init();
  109. }
  110. }
  111. private:
  112. T* ptr_;
  113. T space_[kInline];
  114. uint32_t size_;
  115. uint32_t capacity_;
  116. void Init() {
  117. ptr_ = space_;
  118. size_ = 0;
  119. capacity_ = kInline;
  120. }
  121. void Discard() {
  122. if (ptr_ != space_) base_internal::LowLevelAlloc::Free(ptr_);
  123. }
  124. void Grow(uint32_t n) {
  125. while (capacity_ < n) {
  126. capacity_ *= 2;
  127. }
  128. size_t request = static_cast<size_t>(capacity_) * sizeof(T);
  129. T* copy = static_cast<T*>(
  130. base_internal::LowLevelAlloc::AllocWithArena(request, arena));
  131. std::copy_n(ptr_, size_, copy);
  132. Discard();
  133. ptr_ = copy;
  134. }
  135. Vec(const Vec&) = delete;
  136. Vec& operator=(const Vec&) = delete;
  137. };
  138. // A hash set of non-negative int32_t that uses Vec for its underlying storage.
  139. class NodeSet {
  140. public:
  141. NodeSet() { Init(); }
  142. void clear() { Init(); }
  143. bool contains(int32_t v) const { return table_[FindIndex(v)] == v; }
  144. bool insert(int32_t v) {
  145. uint32_t i = FindIndex(v);
  146. if (table_[i] == v) {
  147. return false;
  148. }
  149. if (table_[i] == kEmpty) {
  150. // Only inserting over an empty cell increases the number of occupied
  151. // slots.
  152. occupied_++;
  153. }
  154. table_[i] = v;
  155. // Double when 75% full.
  156. if (occupied_ >= table_.size() - table_.size()/4) Grow();
  157. return true;
  158. }
  159. void erase(int32_t v) {
  160. uint32_t i = FindIndex(v);
  161. if (table_[i] == v) {
  162. table_[i] = kDel;
  163. }
  164. }
  165. // Iteration: is done via HASH_FOR_EACH
  166. // Example:
  167. // HASH_FOR_EACH(elem, node->out) { ... }
  168. #define HASH_FOR_EACH(elem, eset) \
  169. for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
  170. bool Next(int32_t* cursor, int32_t* elem) {
  171. while (static_cast<uint32_t>(*cursor) < table_.size()) {
  172. int32_t v = table_[static_cast<uint32_t>(*cursor)];
  173. (*cursor)++;
  174. if (v >= 0) {
  175. *elem = v;
  176. return true;
  177. }
  178. }
  179. return false;
  180. }
  181. private:
  182. enum : int32_t { kEmpty = -1, kDel = -2 };
  183. Vec<int32_t> table_;
  184. uint32_t occupied_; // Count of non-empty slots (includes deleted slots)
  185. static uint32_t Hash(int32_t a) { return static_cast<uint32_t>(a * 41); }
  186. // Return index for storing v. May return an empty index or deleted index
  187. uint32_t FindIndex(int32_t v) const {
  188. // Search starting at hash index.
  189. const uint32_t mask = table_.size() - 1;
  190. uint32_t i = Hash(v) & mask;
  191. uint32_t deleted_index = 0; // index of first deleted element we see
  192. bool seen_deleted_element = false;
  193. while (true) {
  194. int32_t e = table_[i];
  195. if (v == e) {
  196. return i;
  197. } else if (e == kEmpty) {
  198. // Return any previously encountered deleted slot.
  199. return seen_deleted_element ? deleted_index : i;
  200. } else if (e == kDel && !seen_deleted_element) {
  201. // Keep searching since v might be present later.
  202. deleted_index = i;
  203. seen_deleted_element = true;
  204. }
  205. i = (i + 1) & mask; // Linear probing; quadratic is slightly slower.
  206. }
  207. }
  208. void Init() {
  209. table_.clear();
  210. table_.resize(kInline);
  211. table_.fill(kEmpty);
  212. occupied_ = 0;
  213. }
  214. void Grow() {
  215. Vec<int32_t> copy;
  216. copy.MoveFrom(&table_);
  217. occupied_ = 0;
  218. table_.resize(copy.size() * 2);
  219. table_.fill(kEmpty);
  220. for (const auto& e : copy) {
  221. if (e >= 0) insert(e);
  222. }
  223. }
  224. NodeSet(const NodeSet&) = delete;
  225. NodeSet& operator=(const NodeSet&) = delete;
  226. };
  227. // We encode a node index and a node version in GraphId. The version
  228. // number is incremented when the GraphId is freed which automatically
  229. // invalidates all copies of the GraphId.
  230. inline GraphId MakeId(int32_t index, uint32_t version) {
  231. GraphId g;
  232. g.handle =
  233. (static_cast<uint64_t>(version) << 32) | static_cast<uint32_t>(index);
  234. return g;
  235. }
  236. inline int32_t NodeIndex(GraphId id) {
  237. return static_cast<int32_t>(id.handle);
  238. }
  239. inline uint32_t NodeVersion(GraphId id) {
  240. return static_cast<uint32_t>(id.handle >> 32);
  241. }
  242. struct Node {
  243. int32_t rank; // rank number assigned by Pearce-Kelly algorithm
  244. uint32_t version; // Current version number
  245. int32_t next_hash; // Next entry in hash table
  246. bool visited; // Temporary marker used by depth-first-search
  247. uintptr_t masked_ptr; // User-supplied pointer
  248. NodeSet in; // List of immediate predecessor nodes in graph
  249. NodeSet out; // List of immediate successor nodes in graph
  250. int priority; // Priority of recorded stack trace.
  251. int nstack; // Depth of recorded stack trace.
  252. void* stack[40]; // stack[0,nstack-1] holds stack trace for node.
  253. };
  254. // Hash table for pointer to node index lookups.
  255. class PointerMap {
  256. public:
  257. explicit PointerMap(const Vec<Node*>* nodes) : nodes_(nodes) {
  258. table_.fill(-1);
  259. }
  260. int32_t Find(void* ptr) {
  261. auto masked = base_internal::HidePtr(ptr);
  262. for (int32_t i = table_[Hash(ptr)]; i != -1;) {
  263. Node* n = (*nodes_)[static_cast<uint32_t>(i)];
  264. if (n->masked_ptr == masked) return i;
  265. i = n->next_hash;
  266. }
  267. return -1;
  268. }
  269. void Add(void* ptr, int32_t i) {
  270. int32_t* head = &table_[Hash(ptr)];
  271. (*nodes_)[static_cast<uint32_t>(i)]->next_hash = *head;
  272. *head = i;
  273. }
  274. int32_t Remove(void* ptr) {
  275. // Advance through linked list while keeping track of the
  276. // predecessor slot that points to the current entry.
  277. auto masked = base_internal::HidePtr(ptr);
  278. for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
  279. int32_t index = *slot;
  280. Node* n = (*nodes_)[static_cast<uint32_t>(index)];
  281. if (n->masked_ptr == masked) {
  282. *slot = n->next_hash; // Remove n from linked list
  283. n->next_hash = -1;
  284. return index;
  285. }
  286. slot = &n->next_hash;
  287. }
  288. return -1;
  289. }
  290. private:
  291. // Number of buckets in hash table for pointer lookups.
  292. static constexpr uint32_t kHashTableSize = 8171; // should be prime
  293. const Vec<Node*>* nodes_;
  294. std::array<int32_t, kHashTableSize> table_;
  295. static uint32_t Hash(void* ptr) {
  296. return reinterpret_cast<uintptr_t>(ptr) % kHashTableSize;
  297. }
  298. };
  299. } // namespace
  300. struct GraphCycles::Rep {
  301. Vec<Node*> nodes_;
  302. Vec<int32_t> free_nodes_; // Indices for unused entries in nodes_
  303. PointerMap ptrmap_;
  304. // Temporary state.
  305. Vec<int32_t> deltaf_; // Results of forward DFS
  306. Vec<int32_t> deltab_; // Results of backward DFS
  307. Vec<int32_t> list_; // All nodes to reprocess
  308. Vec<int32_t> merged_; // Rank values to assign to list_ entries
  309. Vec<int32_t> stack_; // Emulates recursion stack for depth-first searches
  310. Rep() : ptrmap_(&nodes_) {}
  311. };
  312. static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
  313. Node* n = rep->nodes_[static_cast<uint32_t>(NodeIndex(id))];
  314. return (n->version == NodeVersion(id)) ? n : nullptr;
  315. }
  316. GraphCycles::GraphCycles() {
  317. InitArenaIfNecessary();
  318. rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
  319. Rep;
  320. }
  321. GraphCycles::~GraphCycles() {
  322. for (auto* node : rep_->nodes_) {
  323. node->Node::~Node();
  324. base_internal::LowLevelAlloc::Free(node);
  325. }
  326. rep_->Rep::~Rep();
  327. base_internal::LowLevelAlloc::Free(rep_);
  328. }
  329. bool GraphCycles::CheckInvariants() const {
  330. Rep* r = rep_;
  331. NodeSet ranks; // Set of ranks seen so far.
  332. for (uint32_t x = 0; x < r->nodes_.size(); x++) {
  333. Node* nx = r->nodes_[x];
  334. void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
  335. if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
  336. ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %" PRIu32 " %p",
  337. x, ptr);
  338. }
  339. if (nx->visited) {
  340. ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %" PRIu32, x);
  341. }
  342. if (!ranks.insert(nx->rank)) {
  343. ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %" PRId32, nx->rank);
  344. }
  345. HASH_FOR_EACH(y, nx->out) {
  346. Node* ny = r->nodes_[static_cast<uint32_t>(y)];
  347. if (nx->rank >= ny->rank) {
  348. ABSL_RAW_LOG(FATAL,
  349. "Edge %" PRIu32 " ->%" PRId32
  350. " has bad rank assignment %" PRId32 "->%" PRId32,
  351. x, y, nx->rank, ny->rank);
  352. }
  353. }
  354. }
  355. return true;
  356. }
  357. GraphId GraphCycles::GetId(void* ptr) {
  358. int32_t i = rep_->ptrmap_.Find(ptr);
  359. if (i != -1) {
  360. return MakeId(i, rep_->nodes_[static_cast<uint32_t>(i)]->version);
  361. } else if (rep_->free_nodes_.empty()) {
  362. Node* n =
  363. new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
  364. Node;
  365. n->version = 1; // Avoid 0 since it is used by InvalidGraphId()
  366. n->visited = false;
  367. n->rank = static_cast<int32_t>(rep_->nodes_.size());
  368. n->masked_ptr = base_internal::HidePtr(ptr);
  369. n->nstack = 0;
  370. n->priority = 0;
  371. rep_->nodes_.push_back(n);
  372. rep_->ptrmap_.Add(ptr, n->rank);
  373. return MakeId(n->rank, n->version);
  374. } else {
  375. // Preserve preceding rank since the set of ranks in use must be
  376. // a permutation of [0,rep_->nodes_.size()-1].
  377. int32_t r = rep_->free_nodes_.back();
  378. rep_->free_nodes_.pop_back();
  379. Node* n = rep_->nodes_[static_cast<uint32_t>(r)];
  380. n->masked_ptr = base_internal::HidePtr(ptr);
  381. n->nstack = 0;
  382. n->priority = 0;
  383. rep_->ptrmap_.Add(ptr, r);
  384. return MakeId(r, n->version);
  385. }
  386. }
  387. void GraphCycles::RemoveNode(void* ptr) {
  388. int32_t i = rep_->ptrmap_.Remove(ptr);
  389. if (i == -1) {
  390. return;
  391. }
  392. Node* x = rep_->nodes_[static_cast<uint32_t>(i)];
  393. HASH_FOR_EACH(y, x->out) {
  394. rep_->nodes_[static_cast<uint32_t>(y)]->in.erase(i);
  395. }
  396. HASH_FOR_EACH(y, x->in) {
  397. rep_->nodes_[static_cast<uint32_t>(y)]->out.erase(i);
  398. }
  399. x->in.clear();
  400. x->out.clear();
  401. x->masked_ptr = base_internal::HidePtr<void>(nullptr);
  402. if (x->version == std::numeric_limits<uint32_t>::max()) {
  403. // Cannot use x any more
  404. } else {
  405. x->version++; // Invalidates all copies of node.
  406. rep_->free_nodes_.push_back(i);
  407. }
  408. }
  409. void* GraphCycles::Ptr(GraphId id) {
  410. Node* n = FindNode(rep_, id);
  411. return n == nullptr ? nullptr
  412. : base_internal::UnhidePtr<void>(n->masked_ptr);
  413. }
  414. bool GraphCycles::HasNode(GraphId node) {
  415. return FindNode(rep_, node) != nullptr;
  416. }
  417. bool GraphCycles::HasEdge(GraphId x, GraphId y) const {
  418. Node* xn = FindNode(rep_, x);
  419. return xn && FindNode(rep_, y) && xn->out.contains(NodeIndex(y));
  420. }
  421. void GraphCycles::RemoveEdge(GraphId x, GraphId y) {
  422. Node* xn = FindNode(rep_, x);
  423. Node* yn = FindNode(rep_, y);
  424. if (xn && yn) {
  425. xn->out.erase(NodeIndex(y));
  426. yn->in.erase(NodeIndex(x));
  427. // No need to update the rank assignment since a previous valid
  428. // rank assignment remains valid after an edge deletion.
  429. }
  430. }
  431. static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
  432. static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
  433. static void Reorder(GraphCycles::Rep* r);
  434. static void Sort(const Vec<Node*>&, Vec<int32_t>* delta);
  435. static void MoveToList(
  436. GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst);
  437. bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) {
  438. Rep* r = rep_;
  439. const int32_t x = NodeIndex(idx);
  440. const int32_t y = NodeIndex(idy);
  441. Node* nx = FindNode(r, idx);
  442. Node* ny = FindNode(r, idy);
  443. if (nx == nullptr || ny == nullptr) return true; // Expired ids
  444. if (nx == ny) return false; // Self edge
  445. if (!nx->out.insert(y)) {
  446. // Edge already exists.
  447. return true;
  448. }
  449. ny->in.insert(x);
  450. if (nx->rank <= ny->rank) {
  451. // New edge is consistent with existing rank assignment.
  452. return true;
  453. }
  454. // Current rank assignments are incompatible with the new edge. Recompute.
  455. // We only need to consider nodes that fall in the range [ny->rank,nx->rank].
  456. if (!ForwardDFS(r, y, nx->rank)) {
  457. // Found a cycle. Undo the insertion and tell caller.
  458. nx->out.erase(y);
  459. ny->in.erase(x);
  460. // Since we do not call Reorder() on this path, clear any visited
  461. // markers left by ForwardDFS.
  462. for (const auto& d : r->deltaf_) {
  463. r->nodes_[static_cast<uint32_t>(d)]->visited = false;
  464. }
  465. return false;
  466. }
  467. BackwardDFS(r, x, ny->rank);
  468. Reorder(r);
  469. return true;
  470. }
  471. static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
  472. // Avoid recursion since stack space might be limited.
  473. // We instead keep a stack of nodes to visit.
  474. r->deltaf_.clear();
  475. r->stack_.clear();
  476. r->stack_.push_back(n);
  477. while (!r->stack_.empty()) {
  478. n = r->stack_.back();
  479. r->stack_.pop_back();
  480. Node* nn = r->nodes_[static_cast<uint32_t>(n)];
  481. if (nn->visited) continue;
  482. nn->visited = true;
  483. r->deltaf_.push_back(n);
  484. HASH_FOR_EACH(w, nn->out) {
  485. Node* nw = r->nodes_[static_cast<uint32_t>(w)];
  486. if (nw->rank == upper_bound) {
  487. return false; // Cycle
  488. }
  489. if (!nw->visited && nw->rank < upper_bound) {
  490. r->stack_.push_back(w);
  491. }
  492. }
  493. }
  494. return true;
  495. }
  496. static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
  497. r->deltab_.clear();
  498. r->stack_.clear();
  499. r->stack_.push_back(n);
  500. while (!r->stack_.empty()) {
  501. n = r->stack_.back();
  502. r->stack_.pop_back();
  503. Node* nn = r->nodes_[static_cast<uint32_t>(n)];
  504. if (nn->visited) continue;
  505. nn->visited = true;
  506. r->deltab_.push_back(n);
  507. HASH_FOR_EACH(w, nn->in) {
  508. Node* nw = r->nodes_[static_cast<uint32_t>(w)];
  509. if (!nw->visited && lower_bound < nw->rank) {
  510. r->stack_.push_back(w);
  511. }
  512. }
  513. }
  514. }
  515. static void Reorder(GraphCycles::Rep* r) {
  516. Sort(r->nodes_, &r->deltab_);
  517. Sort(r->nodes_, &r->deltaf_);
  518. // Adds contents of delta lists to list_ (backwards deltas first).
  519. r->list_.clear();
  520. MoveToList(r, &r->deltab_, &r->list_);
  521. MoveToList(r, &r->deltaf_, &r->list_);
  522. // Produce sorted list of all ranks that will be reassigned.
  523. r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
  524. std::merge(r->deltab_.begin(), r->deltab_.end(),
  525. r->deltaf_.begin(), r->deltaf_.end(),
  526. r->merged_.begin());
  527. // Assign the ranks in order to the collected list.
  528. for (uint32_t i = 0; i < r->list_.size(); i++) {
  529. r->nodes_[static_cast<uint32_t>(r->list_[i])]->rank = r->merged_[i];
  530. }
  531. }
  532. static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
  533. struct ByRank {
  534. const Vec<Node*>* nodes;
  535. bool operator()(int32_t a, int32_t b) const {
  536. return (*nodes)[static_cast<uint32_t>(a)]->rank <
  537. (*nodes)[static_cast<uint32_t>(b)]->rank;
  538. }
  539. };
  540. ByRank cmp;
  541. cmp.nodes = &nodes;
  542. std::sort(delta->begin(), delta->end(), cmp);
  543. }
  544. static void MoveToList(
  545. GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
  546. for (auto& v : *src) {
  547. int32_t w = v;
  548. // Replace v entry with its rank
  549. v = r->nodes_[static_cast<uint32_t>(w)]->rank;
  550. // Prepare for future DFS calls
  551. r->nodes_[static_cast<uint32_t>(w)]->visited = false;
  552. dst->push_back(w);
  553. }
  554. }
  555. int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
  556. GraphId path[]) const {
  557. Rep* r = rep_;
  558. if (FindNode(r, idx) == nullptr || FindNode(r, idy) == nullptr) return 0;
  559. const int32_t x = NodeIndex(idx);
  560. const int32_t y = NodeIndex(idy);
  561. // Forward depth first search starting at x until we hit y.
  562. // As we descend into a node, we push it onto the path.
  563. // As we leave a node, we remove it from the path.
  564. int path_len = 0;
  565. NodeSet seen;
  566. r->stack_.clear();
  567. r->stack_.push_back(x);
  568. while (!r->stack_.empty()) {
  569. int32_t n = r->stack_.back();
  570. r->stack_.pop_back();
  571. if (n < 0) {
  572. // Marker to indicate that we are leaving a node
  573. path_len--;
  574. continue;
  575. }
  576. if (path_len < max_path_len) {
  577. path[path_len] =
  578. MakeId(n, rep_->nodes_[static_cast<uint32_t>(n)]->version);
  579. }
  580. path_len++;
  581. r->stack_.push_back(-1); // Will remove tentative path entry
  582. if (n == y) {
  583. return path_len;
  584. }
  585. HASH_FOR_EACH(w, r->nodes_[static_cast<uint32_t>(n)]->out) {
  586. if (seen.insert(w)) {
  587. r->stack_.push_back(w);
  588. }
  589. }
  590. }
  591. return 0;
  592. }
  593. bool GraphCycles::IsReachable(GraphId x, GraphId y) const {
  594. return FindPath(x, y, 0, nullptr) > 0;
  595. }
  596. void GraphCycles::UpdateStackTrace(GraphId id, int priority,
  597. int (*get_stack_trace)(void** stack, int)) {
  598. Node* n = FindNode(rep_, id);
  599. if (n == nullptr || n->priority >= priority) {
  600. return;
  601. }
  602. n->nstack = (*get_stack_trace)(n->stack, ABSL_ARRAYSIZE(n->stack));
  603. n->priority = priority;
  604. }
  605. int GraphCycles::GetStackTrace(GraphId id, void*** ptr) {
  606. Node* n = FindNode(rep_, id);
  607. if (n == nullptr) {
  608. *ptr = nullptr;
  609. return 0;
  610. } else {
  611. *ptr = n->stack;
  612. return n->nstack;
  613. }
  614. }
  615. } // namespace synchronization_internal
  616. ABSL_NAMESPACE_END
  617. } // namespace absl
  618. #endif // ABSL_LOW_LEVEL_ALLOC_MISSING