Clustering.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. //===-- Clustering.cpp ------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #include "Clustering.h"
  9. #include "Error.h"
  10. #include "SchedClassResolution.h"
  11. #include "llvm/ADT/MapVector.h"
  12. #include "llvm/ADT/SetVector.h"
  13. #include "llvm/ADT/SmallSet.h"
  14. #include "llvm/ADT/SmallVector.h"
  15. #include <algorithm>
  16. #include <deque>
  17. #include <string>
  18. #include <vector>
  19. namespace llvm {
  20. namespace exegesis {
  21. // The clustering problem has the following characteristics:
  22. // (A) - Low dimension (dimensions are typically proc resource units,
  23. // typically < 10).
  24. // (B) - Number of points : ~thousands (points are measurements of an MCInst)
  25. // (C) - Number of clusters: ~tens.
  26. // (D) - The number of clusters is not known /a priory/.
  27. // (E) - The amount of noise is relatively small.
  28. // The problem is rather small. In terms of algorithms, (D) disqualifies
  29. // k-means and makes algorithms such as DBSCAN[1] or OPTICS[2] more applicable.
  30. //
  31. // We've used DBSCAN here because it's simple to implement. This is a pretty
  32. // straightforward and inefficient implementation of the pseudocode in [2].
  33. //
  34. // [1] https://en.wikipedia.org/wiki/DBSCAN
  35. // [2] https://en.wikipedia.org/wiki/OPTICS_algorithm
  36. // Finds the points at distance less than sqrt(EpsilonSquared) of Q (not
  37. // including Q).
  38. void InstructionBenchmarkClustering::rangeQuery(
  39. const size_t Q, std::vector<size_t> &Neighbors) const {
  40. Neighbors.clear();
  41. Neighbors.reserve(Points_.size() - 1); // The Q itself isn't a neighbor.
  42. const auto &QMeasurements = Points_[Q].Measurements;
  43. for (size_t P = 0, NumPoints = Points_.size(); P < NumPoints; ++P) {
  44. if (P == Q)
  45. continue;
  46. const auto &PMeasurements = Points_[P].Measurements;
  47. if (PMeasurements.empty()) // Error point.
  48. continue;
  49. if (isNeighbour(PMeasurements, QMeasurements,
  50. AnalysisClusteringEpsilonSquared_)) {
  51. Neighbors.push_back(P);
  52. }
  53. }
  54. }
  55. // Given a set of points, checks that all the points are neighbours
  56. // up to AnalysisClusteringEpsilon. This is O(2*N).
  57. bool InstructionBenchmarkClustering::areAllNeighbours(
  58. ArrayRef<size_t> Pts) const {
  59. // First, get the centroid of this group of points. This is O(N).
  60. SchedClassClusterCentroid G;
  61. for (size_t P : Pts) {
  62. assert(P < Points_.size());
  63. ArrayRef<BenchmarkMeasure> Measurements = Points_[P].Measurements;
  64. if (Measurements.empty()) // Error point.
  65. continue;
  66. G.addPoint(Measurements);
  67. }
  68. const std::vector<BenchmarkMeasure> Centroid = G.getAsPoint();
  69. // Since we will be comparing with the centroid, we need to halve the epsilon.
  70. double AnalysisClusteringEpsilonHalvedSquared =
  71. AnalysisClusteringEpsilonSquared_ / 4.0;
  72. // And now check that every point is a neighbour of the centroid. Also O(N).
  73. return all_of(
  74. Pts, [this, &Centroid, AnalysisClusteringEpsilonHalvedSquared](size_t P) {
  75. assert(P < Points_.size());
  76. const auto &PMeasurements = Points_[P].Measurements;
  77. if (PMeasurements.empty()) // Error point.
  78. return true; // Pretend that error point is a neighbour.
  79. return isNeighbour(PMeasurements, Centroid,
  80. AnalysisClusteringEpsilonHalvedSquared);
  81. });
  82. }
  83. InstructionBenchmarkClustering::InstructionBenchmarkClustering(
  84. const std::vector<InstructionBenchmark> &Points,
  85. const double AnalysisClusteringEpsilonSquared)
  86. : Points_(Points),
  87. AnalysisClusteringEpsilonSquared_(AnalysisClusteringEpsilonSquared),
  88. NoiseCluster_(ClusterId::noise()), ErrorCluster_(ClusterId::error()) {}
  89. Error InstructionBenchmarkClustering::validateAndSetup() {
  90. ClusterIdForPoint_.resize(Points_.size());
  91. // Mark erroneous measurements out.
  92. // All points must have the same number of dimensions, in the same order.
  93. const std::vector<BenchmarkMeasure> *LastMeasurement = nullptr;
  94. for (size_t P = 0, NumPoints = Points_.size(); P < NumPoints; ++P) {
  95. const auto &Point = Points_[P];
  96. if (!Point.Error.empty()) {
  97. ClusterIdForPoint_[P] = ClusterId::error();
  98. ErrorCluster_.PointIndices.push_back(P);
  99. continue;
  100. }
  101. const auto *CurMeasurement = &Point.Measurements;
  102. if (LastMeasurement) {
  103. if (LastMeasurement->size() != CurMeasurement->size()) {
  104. return make_error<ClusteringError>(
  105. "inconsistent measurement dimensions");
  106. }
  107. for (size_t I = 0, E = LastMeasurement->size(); I < E; ++I) {
  108. if (LastMeasurement->at(I).Key != CurMeasurement->at(I).Key) {
  109. return make_error<ClusteringError>(
  110. "inconsistent measurement dimensions keys");
  111. }
  112. }
  113. }
  114. LastMeasurement = CurMeasurement;
  115. }
  116. if (LastMeasurement) {
  117. NumDimensions_ = LastMeasurement->size();
  118. }
  119. return Error::success();
  120. }
  121. void InstructionBenchmarkClustering::clusterizeDbScan(const size_t MinPts) {
  122. std::vector<size_t> Neighbors; // Persistent buffer to avoid allocs.
  123. for (size_t P = 0, NumPoints = Points_.size(); P < NumPoints; ++P) {
  124. if (!ClusterIdForPoint_[P].isUndef())
  125. continue; // Previously processed in inner loop.
  126. rangeQuery(P, Neighbors);
  127. if (Neighbors.size() + 1 < MinPts) { // Density check.
  128. // The region around P is not dense enough to create a new cluster, mark
  129. // as noise for now.
  130. ClusterIdForPoint_[P] = ClusterId::noise();
  131. continue;
  132. }
  133. // Create a new cluster, add P.
  134. Clusters_.emplace_back(ClusterId::makeValid(Clusters_.size()));
  135. Cluster &CurrentCluster = Clusters_.back();
  136. ClusterIdForPoint_[P] = CurrentCluster.Id; /* Label initial point */
  137. CurrentCluster.PointIndices.push_back(P);
  138. // Process P's neighbors.
  139. SetVector<size_t, std::deque<size_t>> ToProcess;
  140. ToProcess.insert(Neighbors.begin(), Neighbors.end());
  141. while (!ToProcess.empty()) {
  142. // Retrieve a point from the set.
  143. const size_t Q = *ToProcess.begin();
  144. ToProcess.erase(ToProcess.begin());
  145. if (ClusterIdForPoint_[Q].isNoise()) {
  146. // Change noise point to border point.
  147. ClusterIdForPoint_[Q] = CurrentCluster.Id;
  148. CurrentCluster.PointIndices.push_back(Q);
  149. continue;
  150. }
  151. if (!ClusterIdForPoint_[Q].isUndef()) {
  152. continue; // Previously processed.
  153. }
  154. // Add Q to the current custer.
  155. ClusterIdForPoint_[Q] = CurrentCluster.Id;
  156. CurrentCluster.PointIndices.push_back(Q);
  157. // And extend to the neighbors of Q if the region is dense enough.
  158. rangeQuery(Q, Neighbors);
  159. if (Neighbors.size() + 1 >= MinPts) {
  160. ToProcess.insert(Neighbors.begin(), Neighbors.end());
  161. }
  162. }
  163. }
  164. // assert(Neighbors.capacity() == (Points_.size() - 1));
  165. // ^ True, but it is not quaranteed to be true in all the cases.
  166. // Add noisy points to noise cluster.
  167. for (size_t P = 0, NumPoints = Points_.size(); P < NumPoints; ++P) {
  168. if (ClusterIdForPoint_[P].isNoise()) {
  169. NoiseCluster_.PointIndices.push_back(P);
  170. }
  171. }
  172. }
  173. void InstructionBenchmarkClustering::clusterizeNaive(
  174. const MCSubtargetInfo &SubtargetInfo, const MCInstrInfo &InstrInfo) {
  175. // Given an instruction Opcode, which sched class id's are represented,
  176. // and which are the benchmarks for each sched class?
  177. std::vector<SmallMapVector<unsigned, SmallVector<size_t, 1>, 1>>
  178. OpcodeToSchedClassesToPoints;
  179. const unsigned NumOpcodes = InstrInfo.getNumOpcodes();
  180. OpcodeToSchedClassesToPoints.resize(NumOpcodes);
  181. size_t NumClusters = 0;
  182. for (size_t P = 0, NumPoints = Points_.size(); P < NumPoints; ++P) {
  183. const InstructionBenchmark &Point = Points_[P];
  184. const MCInst &MCI = Point.keyInstruction();
  185. unsigned SchedClassId;
  186. std::tie(SchedClassId, std::ignore) =
  187. ResolvedSchedClass::resolveSchedClassId(SubtargetInfo, InstrInfo, MCI);
  188. const unsigned Opcode = MCI.getOpcode();
  189. assert(Opcode < NumOpcodes && "NumOpcodes is incorrect (too small)");
  190. auto &Points = OpcodeToSchedClassesToPoints[Opcode][SchedClassId];
  191. if (Points.empty()) // If we previously have not seen any points of
  192. ++NumClusters; // this opcode's sched class, then new cluster begins.
  193. Points.emplace_back(P);
  194. }
  195. assert(NumClusters <= NumOpcodes &&
  196. "can't see more opcodes than there are total opcodes");
  197. assert(NumClusters <= Points_.size() &&
  198. "can't see more opcodes than there are total points");
  199. Clusters_.reserve(NumClusters); // We already know how many clusters there is.
  200. for (const auto &SchedClassesOfOpcode : OpcodeToSchedClassesToPoints) {
  201. if (SchedClassesOfOpcode.empty())
  202. continue;
  203. for (ArrayRef<size_t> PointsOfSchedClass :
  204. make_second_range(SchedClassesOfOpcode)) {
  205. if (PointsOfSchedClass.empty())
  206. continue;
  207. // Create a new cluster.
  208. Clusters_.emplace_back(ClusterId::makeValid(
  209. Clusters_.size(),
  210. /*IsUnstable=*/!areAllNeighbours(PointsOfSchedClass)));
  211. Cluster &CurrentCluster = Clusters_.back();
  212. // Mark points as belonging to the new cluster.
  213. for (size_t P : PointsOfSchedClass)
  214. ClusterIdForPoint_[P] = CurrentCluster.Id;
  215. // And add all the points of this opcode's sched class to the new cluster.
  216. CurrentCluster.PointIndices.reserve(PointsOfSchedClass.size());
  217. CurrentCluster.PointIndices.assign(PointsOfSchedClass.begin(),
  218. PointsOfSchedClass.end());
  219. assert(CurrentCluster.PointIndices.size() == PointsOfSchedClass.size());
  220. }
  221. }
  222. assert(Clusters_.size() == NumClusters);
  223. }
  224. // Given an instruction Opcode, we can make benchmarks (measurements) of the
  225. // instruction characteristics/performance. Then, to facilitate further analysis
  226. // we group the benchmarks with *similar* characteristics into clusters.
  227. // Now, this is all not entirely deterministic. Some instructions have variable
  228. // characteristics, depending on their arguments. And thus, if we do several
  229. // benchmarks of the same instruction Opcode, we may end up with *different*
  230. // performance characteristics measurements. And when we then do clustering,
  231. // these several benchmarks of the same instruction Opcode may end up being
  232. // clustered into *different* clusters. This is not great for further analysis.
  233. // We shall find every opcode with benchmarks not in just one cluster, and move
  234. // *all* the benchmarks of said Opcode into one new unstable cluster per Opcode.
  235. void InstructionBenchmarkClustering::stabilize(unsigned NumOpcodes) {
  236. // Given an instruction Opcode and Config, in which clusters do benchmarks of
  237. // this instruction lie? Normally, they all should be in the same cluster.
  238. struct OpcodeAndConfig {
  239. explicit OpcodeAndConfig(const InstructionBenchmark &IB)
  240. : Opcode(IB.keyInstruction().getOpcode()), Config(&IB.Key.Config) {}
  241. unsigned Opcode;
  242. const std::string *Config;
  243. auto Tie() const -> auto { return std::tie(Opcode, *Config); }
  244. bool operator<(const OpcodeAndConfig &O) const { return Tie() < O.Tie(); }
  245. bool operator!=(const OpcodeAndConfig &O) const { return Tie() != O.Tie(); }
  246. };
  247. std::map<OpcodeAndConfig, SmallSet<ClusterId, 1>> OpcodeConfigToClusterIDs;
  248. // Populate OpcodeConfigToClusterIDs and UnstableOpcodes data structures.
  249. assert(ClusterIdForPoint_.size() == Points_.size() && "size mismatch");
  250. for (auto Point : zip(Points_, ClusterIdForPoint_)) {
  251. const ClusterId &ClusterIdOfPoint = std::get<1>(Point);
  252. if (!ClusterIdOfPoint.isValid())
  253. continue; // Only process fully valid clusters.
  254. const OpcodeAndConfig Key(std::get<0>(Point));
  255. SmallSet<ClusterId, 1> &ClusterIDsOfOpcode = OpcodeConfigToClusterIDs[Key];
  256. ClusterIDsOfOpcode.insert(ClusterIdOfPoint);
  257. }
  258. for (const auto &OpcodeConfigToClusterID : OpcodeConfigToClusterIDs) {
  259. const SmallSet<ClusterId, 1> &ClusterIDs = OpcodeConfigToClusterID.second;
  260. const OpcodeAndConfig &Key = OpcodeConfigToClusterID.first;
  261. // We only care about unstable instructions.
  262. if (ClusterIDs.size() < 2)
  263. continue;
  264. // Create a new unstable cluster, one per Opcode.
  265. Clusters_.emplace_back(ClusterId::makeValidUnstable(Clusters_.size()));
  266. Cluster &UnstableCluster = Clusters_.back();
  267. // We will find *at least* one point in each of these clusters.
  268. UnstableCluster.PointIndices.reserve(ClusterIDs.size());
  269. // Go through every cluster which we recorded as containing benchmarks
  270. // of this UnstableOpcode. NOTE: we only recorded valid clusters.
  271. for (const ClusterId &CID : ClusterIDs) {
  272. assert(CID.isValid() &&
  273. "We only recorded valid clusters, not noise/error clusters.");
  274. Cluster &OldCluster = Clusters_[CID.getId()]; // Valid clusters storage.
  275. // Within each cluster, go through each point, and either move it to the
  276. // new unstable cluster, or 'keep' it.
  277. // In this case, we'll reshuffle OldCluster.PointIndices vector
  278. // so that all the points that are *not* for UnstableOpcode are first,
  279. // and the rest of the points is for the UnstableOpcode.
  280. const auto it = std::stable_partition(
  281. OldCluster.PointIndices.begin(), OldCluster.PointIndices.end(),
  282. [this, &Key](size_t P) {
  283. return OpcodeAndConfig(Points_[P]) != Key;
  284. });
  285. assert(std::distance(it, OldCluster.PointIndices.end()) > 0 &&
  286. "Should have found at least one bad point");
  287. // Mark to-be-moved points as belonging to the new cluster.
  288. std::for_each(it, OldCluster.PointIndices.end(),
  289. [this, &UnstableCluster](size_t P) {
  290. ClusterIdForPoint_[P] = UnstableCluster.Id;
  291. });
  292. // Actually append to-be-moved points to the new cluster.
  293. UnstableCluster.PointIndices.insert(UnstableCluster.PointIndices.end(),
  294. it, OldCluster.PointIndices.end());
  295. // And finally, remove "to-be-moved" points from the old cluster.
  296. OldCluster.PointIndices.erase(it, OldCluster.PointIndices.end());
  297. // Now, the old cluster may end up being empty, but let's just keep it
  298. // in whatever state it ended up. Purging empty clusters isn't worth it.
  299. };
  300. assert(UnstableCluster.PointIndices.size() > 1 &&
  301. "New unstable cluster should end up with more than one point.");
  302. assert(UnstableCluster.PointIndices.size() >= ClusterIDs.size() &&
  303. "New unstable cluster should end up with no less points than there "
  304. "was clusters");
  305. }
  306. }
  307. Expected<InstructionBenchmarkClustering> InstructionBenchmarkClustering::create(
  308. const std::vector<InstructionBenchmark> &Points, const ModeE Mode,
  309. const size_t DbscanMinPts, const double AnalysisClusteringEpsilon,
  310. const MCSubtargetInfo *SubtargetInfo, const MCInstrInfo *InstrInfo) {
  311. InstructionBenchmarkClustering Clustering(
  312. Points, AnalysisClusteringEpsilon * AnalysisClusteringEpsilon);
  313. if (auto Error = Clustering.validateAndSetup()) {
  314. return std::move(Error);
  315. }
  316. if (Clustering.ErrorCluster_.PointIndices.size() == Points.size()) {
  317. return Clustering; // Nothing to cluster.
  318. }
  319. if (Mode == ModeE::Dbscan) {
  320. Clustering.clusterizeDbScan(DbscanMinPts);
  321. if (InstrInfo)
  322. Clustering.stabilize(InstrInfo->getNumOpcodes());
  323. } else /*if(Mode == ModeE::Naive)*/ {
  324. if (!SubtargetInfo || !InstrInfo)
  325. return make_error<Failure>("'naive' clustering mode requires "
  326. "SubtargetInfo and InstrInfo to be present");
  327. Clustering.clusterizeNaive(*SubtargetInfo, *InstrInfo);
  328. }
  329. return Clustering;
  330. }
  331. void SchedClassClusterCentroid::addPoint(ArrayRef<BenchmarkMeasure> Point) {
  332. if (Representative.empty())
  333. Representative.resize(Point.size());
  334. assert(Representative.size() == Point.size() &&
  335. "All points should have identical dimensions.");
  336. for (auto I : zip(Representative, Point))
  337. std::get<0>(I).push(std::get<1>(I));
  338. }
  339. std::vector<BenchmarkMeasure> SchedClassClusterCentroid::getAsPoint() const {
  340. std::vector<BenchmarkMeasure> ClusterCenterPoint(Representative.size());
  341. for (auto I : zip(ClusterCenterPoint, Representative))
  342. std::get<0>(I).PerInstructionValue = std::get<1>(I).avg();
  343. return ClusterCenterPoint;
  344. }
  345. bool SchedClassClusterCentroid::validate(
  346. InstructionBenchmark::ModeE Mode) const {
  347. size_t NumMeasurements = Representative.size();
  348. switch (Mode) {
  349. case InstructionBenchmark::Latency:
  350. if (NumMeasurements != 1) {
  351. errs()
  352. << "invalid number of measurements in latency mode: expected 1, got "
  353. << NumMeasurements << "\n";
  354. return false;
  355. }
  356. break;
  357. case InstructionBenchmark::Uops:
  358. // Can have many measurements.
  359. break;
  360. case InstructionBenchmark::InverseThroughput:
  361. if (NumMeasurements != 1) {
  362. errs() << "invalid number of measurements in inverse throughput "
  363. "mode: expected 1, got "
  364. << NumMeasurements << "\n";
  365. return false;
  366. }
  367. break;
  368. default:
  369. llvm_unreachable("unimplemented measurement matching mode");
  370. return false;
  371. }
  372. return true; // All good.
  373. }
  374. } // namespace exegesis
  375. } // namespace llvm