VPlanSLP.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. //===- VPlanSLP.cpp - SLP Analysis based on VPlan -------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. /// This file implements SLP analysis based on VPlan. The analysis is based on
  9. /// the ideas described in
  10. ///
  11. /// Look-ahead SLP: auto-vectorization in the presence of commutative
  12. /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
  13. /// Luís F. W. Góes
  14. ///
  15. //===----------------------------------------------------------------------===//
  16. #include "VPlan.h"
  17. #include "VPlanValue.h"
  18. #include "llvm/ADT/DenseMap.h"
  19. #include "llvm/ADT/SmallVector.h"
  20. #include "llvm/Analysis/VectorUtils.h"
  21. #include "llvm/IR/Instruction.h"
  22. #include "llvm/IR/Instructions.h"
  23. #include "llvm/IR/Type.h"
  24. #include "llvm/IR/Value.h"
  25. #include "llvm/Support/Casting.h"
  26. #include "llvm/Support/Debug.h"
  27. #include "llvm/Support/ErrorHandling.h"
  28. #include "llvm/Support/raw_ostream.h"
  29. #include <algorithm>
  30. #include <cassert>
  31. #include <optional>
  32. #include <utility>
  33. using namespace llvm;
  34. #define DEBUG_TYPE "vplan-slp"
  35. // Number of levels to look ahead when re-ordering multi node operands.
  36. static unsigned LookaheadMaxDepth = 5;
  37. VPInstruction *VPlanSlp::markFailed() {
  38. // FIXME: Currently this is used to signal we hit instructions we cannot
  39. // trivially SLP'ize.
  40. CompletelySLP = false;
  41. return nullptr;
  42. }
  43. void VPlanSlp::addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New) {
  44. if (all_of(Operands, [](VPValue *V) {
  45. return cast<VPInstruction>(V)->getUnderlyingInstr();
  46. })) {
  47. unsigned BundleSize = 0;
  48. for (VPValue *V : Operands) {
  49. Type *T = cast<VPInstruction>(V)->getUnderlyingInstr()->getType();
  50. assert(!T->isVectorTy() && "Only scalar types supported for now");
  51. BundleSize += T->getScalarSizeInBits();
  52. }
  53. WidestBundleBits = std::max(WidestBundleBits, BundleSize);
  54. }
  55. auto Res = BundleToCombined.try_emplace(to_vector<4>(Operands), New);
  56. assert(Res.second &&
  57. "Already created a combined instruction for the operand bundle");
  58. (void)Res;
  59. }
  60. bool VPlanSlp::areVectorizable(ArrayRef<VPValue *> Operands) const {
  61. // Currently we only support VPInstructions.
  62. if (!all_of(Operands, [](VPValue *Op) {
  63. return Op && isa<VPInstruction>(Op) &&
  64. cast<VPInstruction>(Op)->getUnderlyingInstr();
  65. })) {
  66. LLVM_DEBUG(dbgs() << "VPSLP: not all operands are VPInstructions\n");
  67. return false;
  68. }
  69. // Check if opcodes and type width agree for all instructions in the bundle.
  70. // FIXME: Differing widths/opcodes can be handled by inserting additional
  71. // instructions.
  72. // FIXME: Deal with non-primitive types.
  73. const Instruction *OriginalInstr =
  74. cast<VPInstruction>(Operands[0])->getUnderlyingInstr();
  75. unsigned Opcode = OriginalInstr->getOpcode();
  76. unsigned Width = OriginalInstr->getType()->getPrimitiveSizeInBits();
  77. if (!all_of(Operands, [Opcode, Width](VPValue *Op) {
  78. const Instruction *I = cast<VPInstruction>(Op)->getUnderlyingInstr();
  79. return I->getOpcode() == Opcode &&
  80. I->getType()->getPrimitiveSizeInBits() == Width;
  81. })) {
  82. LLVM_DEBUG(dbgs() << "VPSLP: Opcodes do not agree \n");
  83. return false;
  84. }
  85. // For now, all operands must be defined in the same BB.
  86. if (any_of(Operands, [this](VPValue *Op) {
  87. return cast<VPInstruction>(Op)->getParent() != &this->BB;
  88. })) {
  89. LLVM_DEBUG(dbgs() << "VPSLP: operands in different BBs\n");
  90. return false;
  91. }
  92. if (any_of(Operands,
  93. [](VPValue *Op) { return Op->hasMoreThanOneUniqueUser(); })) {
  94. LLVM_DEBUG(dbgs() << "VPSLP: Some operands have multiple users.\n");
  95. return false;
  96. }
  97. // For loads, check that there are no instructions writing to memory in
  98. // between them.
  99. // TODO: we only have to forbid instructions writing to memory that could
  100. // interfere with any of the loads in the bundle
  101. if (Opcode == Instruction::Load) {
  102. unsigned LoadsSeen = 0;
  103. VPBasicBlock *Parent = cast<VPInstruction>(Operands[0])->getParent();
  104. for (auto &I : *Parent) {
  105. auto *VPI = dyn_cast<VPInstruction>(&I);
  106. if (!VPI)
  107. break;
  108. if (VPI->getOpcode() == Instruction::Load &&
  109. llvm::is_contained(Operands, VPI))
  110. LoadsSeen++;
  111. if (LoadsSeen == Operands.size())
  112. break;
  113. if (LoadsSeen > 0 && VPI->mayWriteToMemory()) {
  114. LLVM_DEBUG(
  115. dbgs() << "VPSLP: instruction modifying memory between loads\n");
  116. return false;
  117. }
  118. }
  119. if (!all_of(Operands, [](VPValue *Op) {
  120. return cast<LoadInst>(cast<VPInstruction>(Op)->getUnderlyingInstr())
  121. ->isSimple();
  122. })) {
  123. LLVM_DEBUG(dbgs() << "VPSLP: only simple loads are supported.\n");
  124. return false;
  125. }
  126. }
  127. if (Opcode == Instruction::Store)
  128. if (!all_of(Operands, [](VPValue *Op) {
  129. return cast<StoreInst>(cast<VPInstruction>(Op)->getUnderlyingInstr())
  130. ->isSimple();
  131. })) {
  132. LLVM_DEBUG(dbgs() << "VPSLP: only simple stores are supported.\n");
  133. return false;
  134. }
  135. return true;
  136. }
  137. static SmallVector<VPValue *, 4> getOperands(ArrayRef<VPValue *> Values,
  138. unsigned OperandIndex) {
  139. SmallVector<VPValue *, 4> Operands;
  140. for (VPValue *V : Values) {
  141. // Currently we only support VPInstructions.
  142. auto *U = cast<VPInstruction>(V);
  143. Operands.push_back(U->getOperand(OperandIndex));
  144. }
  145. return Operands;
  146. }
  147. static bool areCommutative(ArrayRef<VPValue *> Values) {
  148. return Instruction::isCommutative(
  149. cast<VPInstruction>(Values[0])->getOpcode());
  150. }
  151. static SmallVector<SmallVector<VPValue *, 4>, 4>
  152. getOperands(ArrayRef<VPValue *> Values) {
  153. SmallVector<SmallVector<VPValue *, 4>, 4> Result;
  154. auto *VPI = cast<VPInstruction>(Values[0]);
  155. switch (VPI->getOpcode()) {
  156. case Instruction::Load:
  157. llvm_unreachable("Loads terminate a tree, no need to get operands");
  158. case Instruction::Store:
  159. Result.push_back(getOperands(Values, 0));
  160. break;
  161. default:
  162. for (unsigned I = 0, NumOps = VPI->getNumOperands(); I < NumOps; ++I)
  163. Result.push_back(getOperands(Values, I));
  164. break;
  165. }
  166. return Result;
  167. }
  168. /// Returns the opcode of Values or ~0 if they do not all agree.
  169. static std::optional<unsigned> getOpcode(ArrayRef<VPValue *> Values) {
  170. unsigned Opcode = cast<VPInstruction>(Values[0])->getOpcode();
  171. if (any_of(Values, [Opcode](VPValue *V) {
  172. return cast<VPInstruction>(V)->getOpcode() != Opcode;
  173. }))
  174. return std::nullopt;
  175. return {Opcode};
  176. }
  177. /// Returns true if A and B access sequential memory if they are loads or
  178. /// stores or if they have identical opcodes otherwise.
  179. static bool areConsecutiveOrMatch(VPInstruction *A, VPInstruction *B,
  180. VPInterleavedAccessInfo &IAI) {
  181. if (A->getOpcode() != B->getOpcode())
  182. return false;
  183. if (A->getOpcode() != Instruction::Load &&
  184. A->getOpcode() != Instruction::Store)
  185. return true;
  186. auto *GA = IAI.getInterleaveGroup(A);
  187. auto *GB = IAI.getInterleaveGroup(B);
  188. return GA && GB && GA == GB && GA->getIndex(A) + 1 == GB->getIndex(B);
  189. }
  190. /// Implements getLAScore from Listing 7 in the paper.
  191. /// Traverses and compares operands of V1 and V2 to MaxLevel.
  192. static unsigned getLAScore(VPValue *V1, VPValue *V2, unsigned MaxLevel,
  193. VPInterleavedAccessInfo &IAI) {
  194. auto *I1 = dyn_cast<VPInstruction>(V1);
  195. auto *I2 = dyn_cast<VPInstruction>(V2);
  196. // Currently we only support VPInstructions.
  197. if (!I1 || !I2)
  198. return 0;
  199. if (MaxLevel == 0)
  200. return (unsigned)areConsecutiveOrMatch(I1, I2, IAI);
  201. unsigned Score = 0;
  202. for (unsigned I = 0, EV1 = I1->getNumOperands(); I < EV1; ++I)
  203. for (unsigned J = 0, EV2 = I2->getNumOperands(); J < EV2; ++J)
  204. Score +=
  205. getLAScore(I1->getOperand(I), I2->getOperand(J), MaxLevel - 1, IAI);
  206. return Score;
  207. }
  208. std::pair<VPlanSlp::OpMode, VPValue *>
  209. VPlanSlp::getBest(OpMode Mode, VPValue *Last,
  210. SmallPtrSetImpl<VPValue *> &Candidates,
  211. VPInterleavedAccessInfo &IAI) {
  212. assert((Mode == OpMode::Load || Mode == OpMode::Opcode) &&
  213. "Currently we only handle load and commutative opcodes");
  214. LLVM_DEBUG(dbgs() << " getBest\n");
  215. SmallVector<VPValue *, 4> BestCandidates;
  216. LLVM_DEBUG(dbgs() << " Candidates for "
  217. << *cast<VPInstruction>(Last)->getUnderlyingInstr() << " ");
  218. for (auto *Candidate : Candidates) {
  219. auto *LastI = cast<VPInstruction>(Last);
  220. auto *CandidateI = cast<VPInstruction>(Candidate);
  221. if (areConsecutiveOrMatch(LastI, CandidateI, IAI)) {
  222. LLVM_DEBUG(dbgs() << *cast<VPInstruction>(Candidate)->getUnderlyingInstr()
  223. << " ");
  224. BestCandidates.push_back(Candidate);
  225. }
  226. }
  227. LLVM_DEBUG(dbgs() << "\n");
  228. if (BestCandidates.empty())
  229. return {OpMode::Failed, nullptr};
  230. if (BestCandidates.size() == 1)
  231. return {Mode, BestCandidates[0]};
  232. VPValue *Best = nullptr;
  233. unsigned BestScore = 0;
  234. for (unsigned Depth = 1; Depth < LookaheadMaxDepth; Depth++) {
  235. unsigned PrevScore = ~0u;
  236. bool AllSame = true;
  237. // FIXME: Avoid visiting the same operands multiple times.
  238. for (auto *Candidate : BestCandidates) {
  239. unsigned Score = getLAScore(Last, Candidate, Depth, IAI);
  240. if (PrevScore == ~0u)
  241. PrevScore = Score;
  242. if (PrevScore != Score)
  243. AllSame = false;
  244. PrevScore = Score;
  245. if (Score > BestScore) {
  246. BestScore = Score;
  247. Best = Candidate;
  248. }
  249. }
  250. if (!AllSame)
  251. break;
  252. }
  253. LLVM_DEBUG(dbgs() << "Found best "
  254. << *cast<VPInstruction>(Best)->getUnderlyingInstr()
  255. << "\n");
  256. Candidates.erase(Best);
  257. return {Mode, Best};
  258. }
  259. SmallVector<VPlanSlp::MultiNodeOpTy, 4> VPlanSlp::reorderMultiNodeOps() {
  260. SmallVector<MultiNodeOpTy, 4> FinalOrder;
  261. SmallVector<OpMode, 4> Mode;
  262. FinalOrder.reserve(MultiNodeOps.size());
  263. Mode.reserve(MultiNodeOps.size());
  264. LLVM_DEBUG(dbgs() << "Reordering multinode\n");
  265. for (auto &Operands : MultiNodeOps) {
  266. FinalOrder.push_back({Operands.first, {Operands.second[0]}});
  267. if (cast<VPInstruction>(Operands.second[0])->getOpcode() ==
  268. Instruction::Load)
  269. Mode.push_back(OpMode::Load);
  270. else
  271. Mode.push_back(OpMode::Opcode);
  272. }
  273. for (unsigned Lane = 1, E = MultiNodeOps[0].second.size(); Lane < E; ++Lane) {
  274. LLVM_DEBUG(dbgs() << " Finding best value for lane " << Lane << "\n");
  275. SmallPtrSet<VPValue *, 4> Candidates;
  276. LLVM_DEBUG(dbgs() << " Candidates ");
  277. for (auto Ops : MultiNodeOps) {
  278. LLVM_DEBUG(
  279. dbgs() << *cast<VPInstruction>(Ops.second[Lane])->getUnderlyingInstr()
  280. << " ");
  281. Candidates.insert(Ops.second[Lane]);
  282. }
  283. LLVM_DEBUG(dbgs() << "\n");
  284. for (unsigned Op = 0, E = MultiNodeOps.size(); Op < E; ++Op) {
  285. LLVM_DEBUG(dbgs() << " Checking " << Op << "\n");
  286. if (Mode[Op] == OpMode::Failed)
  287. continue;
  288. VPValue *Last = FinalOrder[Op].second[Lane - 1];
  289. std::pair<OpMode, VPValue *> Res =
  290. getBest(Mode[Op], Last, Candidates, IAI);
  291. if (Res.second)
  292. FinalOrder[Op].second.push_back(Res.second);
  293. else
  294. // TODO: handle this case
  295. FinalOrder[Op].second.push_back(markFailed());
  296. }
  297. }
  298. return FinalOrder;
  299. }
  300. #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  301. void VPlanSlp::dumpBundle(ArrayRef<VPValue *> Values) {
  302. dbgs() << " Ops: ";
  303. for (auto *Op : Values) {
  304. if (auto *VPInstr = cast_or_null<VPInstruction>(Op))
  305. if (auto *Instr = VPInstr->getUnderlyingInstr()) {
  306. dbgs() << *Instr << " | ";
  307. continue;
  308. }
  309. dbgs() << " nullptr | ";
  310. }
  311. dbgs() << "\n";
  312. }
  313. #endif
  314. VPInstruction *VPlanSlp::buildGraph(ArrayRef<VPValue *> Values) {
  315. assert(!Values.empty() && "Need some operands!");
  316. // If we already visited this instruction bundle, re-use the existing node
  317. auto I = BundleToCombined.find(to_vector<4>(Values));
  318. if (I != BundleToCombined.end()) {
  319. #ifndef NDEBUG
  320. // Check that the resulting graph is a tree. If we re-use a node, this means
  321. // its values have multiple users. We only allow this, if all users of each
  322. // value are the same instruction.
  323. for (auto *V : Values) {
  324. auto UI = V->user_begin();
  325. auto *FirstUser = *UI++;
  326. while (UI != V->user_end()) {
  327. assert(*UI == FirstUser && "Currently we only support SLP trees.");
  328. UI++;
  329. }
  330. }
  331. #endif
  332. return I->second;
  333. }
  334. // Dump inputs
  335. LLVM_DEBUG({
  336. dbgs() << "buildGraph: ";
  337. dumpBundle(Values);
  338. });
  339. if (!areVectorizable(Values))
  340. return markFailed();
  341. assert(getOpcode(Values) && "Opcodes for all values must match");
  342. unsigned ValuesOpcode = *getOpcode(Values);
  343. SmallVector<VPValue *, 4> CombinedOperands;
  344. if (areCommutative(Values)) {
  345. bool MultiNodeRoot = !MultiNodeActive;
  346. MultiNodeActive = true;
  347. for (auto &Operands : getOperands(Values)) {
  348. LLVM_DEBUG({
  349. dbgs() << " Visiting Commutative";
  350. dumpBundle(Operands);
  351. });
  352. auto OperandsOpcode = getOpcode(Operands);
  353. if (OperandsOpcode && OperandsOpcode == getOpcode(Values)) {
  354. LLVM_DEBUG(dbgs() << " Same opcode, continue building\n");
  355. CombinedOperands.push_back(buildGraph(Operands));
  356. } else {
  357. LLVM_DEBUG(dbgs() << " Adding multinode Ops\n");
  358. // Create dummy VPInstruction, which will we replace later by the
  359. // re-ordered operand.
  360. VPInstruction *Op = new VPInstruction(0, {});
  361. CombinedOperands.push_back(Op);
  362. MultiNodeOps.emplace_back(Op, Operands);
  363. }
  364. }
  365. if (MultiNodeRoot) {
  366. LLVM_DEBUG(dbgs() << "Reorder \n");
  367. MultiNodeActive = false;
  368. auto FinalOrder = reorderMultiNodeOps();
  369. MultiNodeOps.clear();
  370. for (auto &Ops : FinalOrder) {
  371. VPInstruction *NewOp = buildGraph(Ops.second);
  372. Ops.first->replaceAllUsesWith(NewOp);
  373. for (unsigned i = 0; i < CombinedOperands.size(); i++)
  374. if (CombinedOperands[i] == Ops.first)
  375. CombinedOperands[i] = NewOp;
  376. delete Ops.first;
  377. Ops.first = NewOp;
  378. }
  379. LLVM_DEBUG(dbgs() << "Found final order\n");
  380. }
  381. } else {
  382. LLVM_DEBUG(dbgs() << " NonCommuntative\n");
  383. if (ValuesOpcode == Instruction::Load)
  384. for (VPValue *V : Values)
  385. CombinedOperands.push_back(cast<VPInstruction>(V)->getOperand(0));
  386. else
  387. for (auto &Operands : getOperands(Values))
  388. CombinedOperands.push_back(buildGraph(Operands));
  389. }
  390. unsigned Opcode;
  391. switch (ValuesOpcode) {
  392. case Instruction::Load:
  393. Opcode = VPInstruction::SLPLoad;
  394. break;
  395. case Instruction::Store:
  396. Opcode = VPInstruction::SLPStore;
  397. break;
  398. default:
  399. Opcode = ValuesOpcode;
  400. break;
  401. }
  402. if (!CompletelySLP)
  403. return markFailed();
  404. assert(CombinedOperands.size() > 0 && "Need more some operands");
  405. auto *Inst = cast<VPInstruction>(Values[0])->getUnderlyingInstr();
  406. auto *VPI = new VPInstruction(Opcode, CombinedOperands, Inst->getDebugLoc());
  407. VPI->setUnderlyingInstr(Inst);
  408. LLVM_DEBUG(dbgs() << "Create VPInstruction " << *VPI << " "
  409. << *cast<VPInstruction>(Values[0]) << "\n");
  410. addCombined(Values, VPI);
  411. return VPI;
  412. }