Scalarizer.cpp 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027
  1. //===- Scalarizer.cpp - Scalarize vector operations -----------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass converts vector operations into scalar operations, in order
  10. // to expose optimization opportunities on the individual scalar operations.
  11. // It is mainly intended for targets that do not have vector units, but it
  12. // may also be useful for revectorizing code to different vector widths.
  13. //
  14. //===----------------------------------------------------------------------===//
  15. #include "llvm/Transforms/Scalar/Scalarizer.h"
  16. #include "llvm/ADT/PostOrderIterator.h"
  17. #include "llvm/ADT/SmallVector.h"
  18. #include "llvm/ADT/Twine.h"
  19. #include "llvm/Analysis/VectorUtils.h"
  20. #include "llvm/IR/Argument.h"
  21. #include "llvm/IR/BasicBlock.h"
  22. #include "llvm/IR/Constants.h"
  23. #include "llvm/IR/DataLayout.h"
  24. #include "llvm/IR/DerivedTypes.h"
  25. #include "llvm/IR/Dominators.h"
  26. #include "llvm/IR/Function.h"
  27. #include "llvm/IR/IRBuilder.h"
  28. #include "llvm/IR/InstVisitor.h"
  29. #include "llvm/IR/InstrTypes.h"
  30. #include "llvm/IR/Instruction.h"
  31. #include "llvm/IR/Instructions.h"
  32. #include "llvm/IR/Intrinsics.h"
  33. #include "llvm/IR/LLVMContext.h"
  34. #include "llvm/IR/Module.h"
  35. #include "llvm/IR/Type.h"
  36. #include "llvm/IR/Value.h"
  37. #include "llvm/InitializePasses.h"
  38. #include "llvm/Pass.h"
  39. #include "llvm/Support/Casting.h"
  40. #include "llvm/Support/CommandLine.h"
  41. #include "llvm/Transforms/Utils/Local.h"
  42. #include <cassert>
  43. #include <cstdint>
  44. #include <iterator>
  45. #include <map>
  46. #include <utility>
  47. using namespace llvm;
  48. #define DEBUG_TYPE "scalarizer"
  49. static cl::opt<bool> ClScalarizeVariableInsertExtract(
  50. "scalarize-variable-insert-extract", cl::init(true), cl::Hidden,
  51. cl::desc("Allow the scalarizer pass to scalarize "
  52. "insertelement/extractelement with variable index"));
  53. // This is disabled by default because having separate loads and stores
  54. // makes it more likely that the -combiner-alias-analysis limits will be
  55. // reached.
  56. static cl::opt<bool> ClScalarizeLoadStore(
  57. "scalarize-load-store", cl::init(false), cl::Hidden,
  58. cl::desc("Allow the scalarizer pass to scalarize loads and store"));
  59. namespace {
  60. BasicBlock::iterator skipPastPhiNodesAndDbg(BasicBlock::iterator Itr) {
  61. BasicBlock *BB = Itr->getParent();
  62. if (isa<PHINode>(Itr))
  63. Itr = BB->getFirstInsertionPt();
  64. if (Itr != BB->end())
  65. Itr = skipDebugIntrinsics(Itr);
  66. return Itr;
  67. }
  68. // Used to store the scattered form of a vector.
  69. using ValueVector = SmallVector<Value *, 8>;
  70. // Used to map a vector Value and associated type to its scattered form.
  71. // The associated type is only non-null for pointer values that are "scattered"
  72. // when used as pointer operands to load or store.
  73. //
  74. // We use std::map because we want iterators to persist across insertion and
  75. // because the values are relatively large.
  76. using ScatterMap = std::map<std::pair<Value *, Type *>, ValueVector>;
  77. // Lists Instructions that have been replaced with scalar implementations,
  78. // along with a pointer to their scattered forms.
  79. using GatherList = SmallVector<std::pair<Instruction *, ValueVector *>, 16>;
  80. // Provides a very limited vector-like interface for lazily accessing one
  81. // component of a scattered vector or vector pointer.
  82. class Scatterer {
  83. public:
  84. Scatterer() = default;
  85. // Scatter V into Size components. If new instructions are needed,
  86. // insert them before BBI in BB. If Cache is nonnull, use it to cache
  87. // the results.
  88. Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, Type *PtrElemTy,
  89. ValueVector *cachePtr = nullptr);
  90. // Return component I, creating a new Value for it if necessary.
  91. Value *operator[](unsigned I);
  92. // Return the number of components.
  93. unsigned size() const { return Size; }
  94. private:
  95. BasicBlock *BB;
  96. BasicBlock::iterator BBI;
  97. Value *V;
  98. Type *PtrElemTy;
  99. ValueVector *CachePtr;
  100. ValueVector Tmp;
  101. unsigned Size;
  102. };
  103. // FCmpSplitter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp
  104. // called Name that compares X and Y in the same way as FCI.
  105. struct FCmpSplitter {
  106. FCmpSplitter(FCmpInst &fci) : FCI(fci) {}
  107. Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
  108. const Twine &Name) const {
  109. return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name);
  110. }
  111. FCmpInst &FCI;
  112. };
  113. // ICmpSplitter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp
  114. // called Name that compares X and Y in the same way as ICI.
  115. struct ICmpSplitter {
  116. ICmpSplitter(ICmpInst &ici) : ICI(ici) {}
  117. Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
  118. const Twine &Name) const {
  119. return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name);
  120. }
  121. ICmpInst &ICI;
  122. };
  123. // UnarySplitter(UO)(Builder, X, Name) uses Builder to create
  124. // a unary operator like UO called Name with operand X.
  125. struct UnarySplitter {
  126. UnarySplitter(UnaryOperator &uo) : UO(uo) {}
  127. Value *operator()(IRBuilder<> &Builder, Value *Op, const Twine &Name) const {
  128. return Builder.CreateUnOp(UO.getOpcode(), Op, Name);
  129. }
  130. UnaryOperator &UO;
  131. };
  132. // BinarySplitter(BO)(Builder, X, Y, Name) uses Builder to create
  133. // a binary operator like BO called Name with operands X and Y.
  134. struct BinarySplitter {
  135. BinarySplitter(BinaryOperator &bo) : BO(bo) {}
  136. Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
  137. const Twine &Name) const {
  138. return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name);
  139. }
  140. BinaryOperator &BO;
  141. };
  142. // Information about a load or store that we're scalarizing.
  143. struct VectorLayout {
  144. VectorLayout() = default;
  145. // Return the alignment of element I.
  146. Align getElemAlign(unsigned I) {
  147. return commonAlignment(VecAlign, I * ElemSize);
  148. }
  149. // The type of the vector.
  150. FixedVectorType *VecTy = nullptr;
  151. // The type of each element.
  152. Type *ElemTy = nullptr;
  153. // The alignment of the vector.
  154. Align VecAlign;
  155. // The size of each element.
  156. uint64_t ElemSize = 0;
  157. };
  158. template <typename T>
  159. T getWithDefaultOverride(const cl::opt<T> &ClOption,
  160. const std::optional<T> &DefaultOverride) {
  161. return ClOption.getNumOccurrences() ? ClOption
  162. : DefaultOverride.value_or(ClOption);
  163. }
  164. class ScalarizerVisitor : public InstVisitor<ScalarizerVisitor, bool> {
  165. public:
  166. ScalarizerVisitor(unsigned ParallelLoopAccessMDKind, DominatorTree *DT,
  167. ScalarizerPassOptions Options)
  168. : ParallelLoopAccessMDKind(ParallelLoopAccessMDKind), DT(DT),
  169. ScalarizeVariableInsertExtract(
  170. getWithDefaultOverride(ClScalarizeVariableInsertExtract,
  171. Options.ScalarizeVariableInsertExtract)),
  172. ScalarizeLoadStore(getWithDefaultOverride(ClScalarizeLoadStore,
  173. Options.ScalarizeLoadStore)) {
  174. }
  175. bool visit(Function &F);
  176. // InstVisitor methods. They return true if the instruction was scalarized,
  177. // false if nothing changed.
  178. bool visitInstruction(Instruction &I) { return false; }
  179. bool visitSelectInst(SelectInst &SI);
  180. bool visitICmpInst(ICmpInst &ICI);
  181. bool visitFCmpInst(FCmpInst &FCI);
  182. bool visitUnaryOperator(UnaryOperator &UO);
  183. bool visitBinaryOperator(BinaryOperator &BO);
  184. bool visitGetElementPtrInst(GetElementPtrInst &GEPI);
  185. bool visitCastInst(CastInst &CI);
  186. bool visitBitCastInst(BitCastInst &BCI);
  187. bool visitInsertElementInst(InsertElementInst &IEI);
  188. bool visitExtractElementInst(ExtractElementInst &EEI);
  189. bool visitShuffleVectorInst(ShuffleVectorInst &SVI);
  190. bool visitPHINode(PHINode &PHI);
  191. bool visitLoadInst(LoadInst &LI);
  192. bool visitStoreInst(StoreInst &SI);
  193. bool visitCallInst(CallInst &ICI);
  194. private:
  195. Scatterer scatter(Instruction *Point, Value *V, Type *PtrElemTy = nullptr);
  196. void gather(Instruction *Op, const ValueVector &CV);
  197. void replaceUses(Instruction *Op, Value *CV);
  198. bool canTransferMetadata(unsigned Kind);
  199. void transferMetadataAndIRFlags(Instruction *Op, const ValueVector &CV);
  200. std::optional<VectorLayout> getVectorLayout(Type *Ty, Align Alignment,
  201. const DataLayout &DL);
  202. bool finish();
  203. template<typename T> bool splitUnary(Instruction &, const T &);
  204. template<typename T> bool splitBinary(Instruction &, const T &);
  205. bool splitCall(CallInst &CI);
  206. ScatterMap Scattered;
  207. GatherList Gathered;
  208. bool Scalarized;
  209. SmallVector<WeakTrackingVH, 32> PotentiallyDeadInstrs;
  210. unsigned ParallelLoopAccessMDKind;
  211. DominatorTree *DT;
  212. const bool ScalarizeVariableInsertExtract;
  213. const bool ScalarizeLoadStore;
  214. };
  215. class ScalarizerLegacyPass : public FunctionPass {
  216. public:
  217. static char ID;
  218. ScalarizerLegacyPass() : FunctionPass(ID) {
  219. initializeScalarizerLegacyPassPass(*PassRegistry::getPassRegistry());
  220. }
  221. bool runOnFunction(Function &F) override;
  222. void getAnalysisUsage(AnalysisUsage& AU) const override {
  223. AU.addRequired<DominatorTreeWrapperPass>();
  224. AU.addPreserved<DominatorTreeWrapperPass>();
  225. }
  226. };
  227. } // end anonymous namespace
  228. char ScalarizerLegacyPass::ID = 0;
  229. INITIALIZE_PASS_BEGIN(ScalarizerLegacyPass, "scalarizer",
  230. "Scalarize vector operations", false, false)
  231. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  232. INITIALIZE_PASS_END(ScalarizerLegacyPass, "scalarizer",
  233. "Scalarize vector operations", false, false)
  234. Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
  235. Type *PtrElemTy, ValueVector *cachePtr)
  236. : BB(bb), BBI(bbi), V(v), PtrElemTy(PtrElemTy), CachePtr(cachePtr) {
  237. Type *Ty = V->getType();
  238. if (Ty->isPointerTy()) {
  239. assert(cast<PointerType>(Ty)->isOpaqueOrPointeeTypeMatches(PtrElemTy) &&
  240. "Pointer element type mismatch");
  241. Ty = PtrElemTy;
  242. }
  243. Size = cast<FixedVectorType>(Ty)->getNumElements();
  244. if (!CachePtr)
  245. Tmp.resize(Size, nullptr);
  246. else if (CachePtr->empty())
  247. CachePtr->resize(Size, nullptr);
  248. else
  249. assert(Size == CachePtr->size() && "Inconsistent vector sizes");
  250. }
  251. // Return component I, creating a new Value for it if necessary.
  252. Value *Scatterer::operator[](unsigned I) {
  253. ValueVector &CV = (CachePtr ? *CachePtr : Tmp);
  254. // Try to reuse a previous value.
  255. if (CV[I])
  256. return CV[I];
  257. IRBuilder<> Builder(BB, BBI);
  258. if (PtrElemTy) {
  259. Type *VectorElemTy = cast<VectorType>(PtrElemTy)->getElementType();
  260. if (!CV[0]) {
  261. Type *NewPtrTy = PointerType::get(
  262. VectorElemTy, V->getType()->getPointerAddressSpace());
  263. CV[0] = Builder.CreateBitCast(V, NewPtrTy, V->getName() + ".i0");
  264. }
  265. if (I != 0)
  266. CV[I] = Builder.CreateConstGEP1_32(VectorElemTy, CV[0], I,
  267. V->getName() + ".i" + Twine(I));
  268. } else {
  269. // Search through a chain of InsertElementInsts looking for element I.
  270. // Record other elements in the cache. The new V is still suitable
  271. // for all uncached indices.
  272. while (true) {
  273. InsertElementInst *Insert = dyn_cast<InsertElementInst>(V);
  274. if (!Insert)
  275. break;
  276. ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2));
  277. if (!Idx)
  278. break;
  279. unsigned J = Idx->getZExtValue();
  280. V = Insert->getOperand(0);
  281. if (I == J) {
  282. CV[J] = Insert->getOperand(1);
  283. return CV[J];
  284. } else if (!CV[J]) {
  285. // Only cache the first entry we find for each index we're not actively
  286. // searching for. This prevents us from going too far up the chain and
  287. // caching incorrect entries.
  288. CV[J] = Insert->getOperand(1);
  289. }
  290. }
  291. CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I),
  292. V->getName() + ".i" + Twine(I));
  293. }
  294. return CV[I];
  295. }
  296. bool ScalarizerLegacyPass::runOnFunction(Function &F) {
  297. if (skipFunction(F))
  298. return false;
  299. Module &M = *F.getParent();
  300. unsigned ParallelLoopAccessMDKind =
  301. M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
  302. DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  303. ScalarizerVisitor Impl(ParallelLoopAccessMDKind, DT, ScalarizerPassOptions());
  304. return Impl.visit(F);
  305. }
  306. FunctionPass *llvm::createScalarizerPass() {
  307. return new ScalarizerLegacyPass();
  308. }
  309. bool ScalarizerVisitor::visit(Function &F) {
  310. assert(Gathered.empty() && Scattered.empty());
  311. Scalarized = false;
  312. // To ensure we replace gathered components correctly we need to do an ordered
  313. // traversal of the basic blocks in the function.
  314. ReversePostOrderTraversal<BasicBlock *> RPOT(&F.getEntryBlock());
  315. for (BasicBlock *BB : RPOT) {
  316. for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
  317. Instruction *I = &*II;
  318. bool Done = InstVisitor::visit(I);
  319. ++II;
  320. if (Done && I->getType()->isVoidTy())
  321. I->eraseFromParent();
  322. }
  323. }
  324. return finish();
  325. }
  326. // Return a scattered form of V that can be accessed by Point. V must be a
  327. // vector or a pointer to a vector.
  328. Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V,
  329. Type *PtrElemTy) {
  330. if (Argument *VArg = dyn_cast<Argument>(V)) {
  331. // Put the scattered form of arguments in the entry block,
  332. // so that it can be used everywhere.
  333. Function *F = VArg->getParent();
  334. BasicBlock *BB = &F->getEntryBlock();
  335. return Scatterer(BB, BB->begin(), V, PtrElemTy, &Scattered[{V, PtrElemTy}]);
  336. }
  337. if (Instruction *VOp = dyn_cast<Instruction>(V)) {
  338. // When scalarizing PHI nodes we might try to examine/rewrite InsertElement
  339. // nodes in predecessors. If those predecessors are unreachable from entry,
  340. // then the IR in those blocks could have unexpected properties resulting in
  341. // infinite loops in Scatterer::operator[]. By simply treating values
  342. // originating from instructions in unreachable blocks as undef we do not
  343. // need to analyse them further.
  344. if (!DT->isReachableFromEntry(VOp->getParent()))
  345. return Scatterer(Point->getParent(), Point->getIterator(),
  346. PoisonValue::get(V->getType()), PtrElemTy);
  347. // Put the scattered form of an instruction directly after the
  348. // instruction, skipping over PHI nodes and debug intrinsics.
  349. BasicBlock *BB = VOp->getParent();
  350. return Scatterer(
  351. BB, skipPastPhiNodesAndDbg(std::next(BasicBlock::iterator(VOp))), V,
  352. PtrElemTy, &Scattered[{V, PtrElemTy}]);
  353. }
  354. // In the fallback case, just put the scattered before Point and
  355. // keep the result local to Point.
  356. return Scatterer(Point->getParent(), Point->getIterator(), V, PtrElemTy);
  357. }
  358. // Replace Op with the gathered form of the components in CV. Defer the
  359. // deletion of Op and creation of the gathered form to the end of the pass,
  360. // so that we can avoid creating the gathered form if all uses of Op are
  361. // replaced with uses of CV.
  362. void ScalarizerVisitor::gather(Instruction *Op, const ValueVector &CV) {
  363. transferMetadataAndIRFlags(Op, CV);
  364. // If we already have a scattered form of Op (created from ExtractElements
  365. // of Op itself), replace them with the new form.
  366. ValueVector &SV = Scattered[{Op, nullptr}];
  367. if (!SV.empty()) {
  368. for (unsigned I = 0, E = SV.size(); I != E; ++I) {
  369. Value *V = SV[I];
  370. if (V == nullptr || SV[I] == CV[I])
  371. continue;
  372. Instruction *Old = cast<Instruction>(V);
  373. if (isa<Instruction>(CV[I]))
  374. CV[I]->takeName(Old);
  375. Old->replaceAllUsesWith(CV[I]);
  376. PotentiallyDeadInstrs.emplace_back(Old);
  377. }
  378. }
  379. SV = CV;
  380. Gathered.push_back(GatherList::value_type(Op, &SV));
  381. }
  382. // Replace Op with CV and collect Op has a potentially dead instruction.
  383. void ScalarizerVisitor::replaceUses(Instruction *Op, Value *CV) {
  384. if (CV != Op) {
  385. Op->replaceAllUsesWith(CV);
  386. PotentiallyDeadInstrs.emplace_back(Op);
  387. Scalarized = true;
  388. }
  389. }
  390. // Return true if it is safe to transfer the given metadata tag from
  391. // vector to scalar instructions.
  392. bool ScalarizerVisitor::canTransferMetadata(unsigned Tag) {
  393. return (Tag == LLVMContext::MD_tbaa
  394. || Tag == LLVMContext::MD_fpmath
  395. || Tag == LLVMContext::MD_tbaa_struct
  396. || Tag == LLVMContext::MD_invariant_load
  397. || Tag == LLVMContext::MD_alias_scope
  398. || Tag == LLVMContext::MD_noalias
  399. || Tag == ParallelLoopAccessMDKind
  400. || Tag == LLVMContext::MD_access_group);
  401. }
  402. // Transfer metadata from Op to the instructions in CV if it is known
  403. // to be safe to do so.
  404. void ScalarizerVisitor::transferMetadataAndIRFlags(Instruction *Op,
  405. const ValueVector &CV) {
  406. SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
  407. Op->getAllMetadataOtherThanDebugLoc(MDs);
  408. for (unsigned I = 0, E = CV.size(); I != E; ++I) {
  409. if (Instruction *New = dyn_cast<Instruction>(CV[I])) {
  410. for (const auto &MD : MDs)
  411. if (canTransferMetadata(MD.first))
  412. New->setMetadata(MD.first, MD.second);
  413. New->copyIRFlags(Op);
  414. if (Op->getDebugLoc() && !New->getDebugLoc())
  415. New->setDebugLoc(Op->getDebugLoc());
  416. }
  417. }
  418. }
  419. // Try to fill in Layout from Ty, returning true on success. Alignment is
  420. // the alignment of the vector, or std::nullopt if the ABI default should be
  421. // used.
  422. std::optional<VectorLayout>
  423. ScalarizerVisitor::getVectorLayout(Type *Ty, Align Alignment,
  424. const DataLayout &DL) {
  425. VectorLayout Layout;
  426. // Make sure we're dealing with a vector.
  427. Layout.VecTy = dyn_cast<FixedVectorType>(Ty);
  428. if (!Layout.VecTy)
  429. return std::nullopt;
  430. // Check that we're dealing with full-byte elements.
  431. Layout.ElemTy = Layout.VecTy->getElementType();
  432. if (!DL.typeSizeEqualsStoreSize(Layout.ElemTy))
  433. return std::nullopt;
  434. Layout.VecAlign = Alignment;
  435. Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy);
  436. return Layout;
  437. }
  438. // Scalarize one-operand instruction I, using Split(Builder, X, Name)
  439. // to create an instruction like I with operand X and name Name.
  440. template<typename Splitter>
  441. bool ScalarizerVisitor::splitUnary(Instruction &I, const Splitter &Split) {
  442. auto *VT = dyn_cast<FixedVectorType>(I.getType());
  443. if (!VT)
  444. return false;
  445. unsigned NumElems = VT->getNumElements();
  446. IRBuilder<> Builder(&I);
  447. Scatterer Op = scatter(&I, I.getOperand(0));
  448. assert(Op.size() == NumElems && "Mismatched unary operation");
  449. ValueVector Res;
  450. Res.resize(NumElems);
  451. for (unsigned Elem = 0; Elem < NumElems; ++Elem)
  452. Res[Elem] = Split(Builder, Op[Elem], I.getName() + ".i" + Twine(Elem));
  453. gather(&I, Res);
  454. return true;
  455. }
  456. // Scalarize two-operand instruction I, using Split(Builder, X, Y, Name)
  457. // to create an instruction like I with operands X and Y and name Name.
  458. template<typename Splitter>
  459. bool ScalarizerVisitor::splitBinary(Instruction &I, const Splitter &Split) {
  460. auto *VT = dyn_cast<FixedVectorType>(I.getType());
  461. if (!VT)
  462. return false;
  463. unsigned NumElems = VT->getNumElements();
  464. IRBuilder<> Builder(&I);
  465. Scatterer VOp0 = scatter(&I, I.getOperand(0));
  466. Scatterer VOp1 = scatter(&I, I.getOperand(1));
  467. assert(VOp0.size() == NumElems && "Mismatched binary operation");
  468. assert(VOp1.size() == NumElems && "Mismatched binary operation");
  469. ValueVector Res;
  470. Res.resize(NumElems);
  471. for (unsigned Elem = 0; Elem < NumElems; ++Elem) {
  472. Value *Op0 = VOp0[Elem];
  473. Value *Op1 = VOp1[Elem];
  474. Res[Elem] = Split(Builder, Op0, Op1, I.getName() + ".i" + Twine(Elem));
  475. }
  476. gather(&I, Res);
  477. return true;
  478. }
  479. static bool isTriviallyScalariable(Intrinsic::ID ID) {
  480. return isTriviallyVectorizable(ID);
  481. }
  482. // All of the current scalarizable intrinsics only have one mangled type.
  483. static Function *getScalarIntrinsicDeclaration(Module *M,
  484. Intrinsic::ID ID,
  485. ArrayRef<Type*> Tys) {
  486. return Intrinsic::getDeclaration(M, ID, Tys);
  487. }
  488. /// If a call to a vector typed intrinsic function, split into a scalar call per
  489. /// element if possible for the intrinsic.
  490. bool ScalarizerVisitor::splitCall(CallInst &CI) {
  491. auto *VT = dyn_cast<FixedVectorType>(CI.getType());
  492. if (!VT)
  493. return false;
  494. Function *F = CI.getCalledFunction();
  495. if (!F)
  496. return false;
  497. Intrinsic::ID ID = F->getIntrinsicID();
  498. if (ID == Intrinsic::not_intrinsic || !isTriviallyScalariable(ID))
  499. return false;
  500. unsigned NumElems = VT->getNumElements();
  501. unsigned NumArgs = CI.arg_size();
  502. ValueVector ScalarOperands(NumArgs);
  503. SmallVector<Scatterer, 8> Scattered(NumArgs);
  504. Scattered.resize(NumArgs);
  505. SmallVector<llvm::Type *, 3> Tys;
  506. Tys.push_back(VT->getScalarType());
  507. // Assumes that any vector type has the same number of elements as the return
  508. // vector type, which is true for all current intrinsics.
  509. for (unsigned I = 0; I != NumArgs; ++I) {
  510. Value *OpI = CI.getOperand(I);
  511. if (OpI->getType()->isVectorTy()) {
  512. Scattered[I] = scatter(&CI, OpI);
  513. assert(Scattered[I].size() == NumElems && "mismatched call operands");
  514. if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I))
  515. Tys.push_back(OpI->getType()->getScalarType());
  516. } else {
  517. ScalarOperands[I] = OpI;
  518. if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I))
  519. Tys.push_back(OpI->getType());
  520. }
  521. }
  522. ValueVector Res(NumElems);
  523. ValueVector ScalarCallOps(NumArgs);
  524. Function *NewIntrin = getScalarIntrinsicDeclaration(F->getParent(), ID, Tys);
  525. IRBuilder<> Builder(&CI);
  526. // Perform actual scalarization, taking care to preserve any scalar operands.
  527. for (unsigned Elem = 0; Elem < NumElems; ++Elem) {
  528. ScalarCallOps.clear();
  529. for (unsigned J = 0; J != NumArgs; ++J) {
  530. if (isVectorIntrinsicWithScalarOpAtArg(ID, J))
  531. ScalarCallOps.push_back(ScalarOperands[J]);
  532. else
  533. ScalarCallOps.push_back(Scattered[J][Elem]);
  534. }
  535. Res[Elem] = Builder.CreateCall(NewIntrin, ScalarCallOps,
  536. CI.getName() + ".i" + Twine(Elem));
  537. }
  538. gather(&CI, Res);
  539. return true;
  540. }
  541. bool ScalarizerVisitor::visitSelectInst(SelectInst &SI) {
  542. auto *VT = dyn_cast<FixedVectorType>(SI.getType());
  543. if (!VT)
  544. return false;
  545. unsigned NumElems = VT->getNumElements();
  546. IRBuilder<> Builder(&SI);
  547. Scatterer VOp1 = scatter(&SI, SI.getOperand(1));
  548. Scatterer VOp2 = scatter(&SI, SI.getOperand(2));
  549. assert(VOp1.size() == NumElems && "Mismatched select");
  550. assert(VOp2.size() == NumElems && "Mismatched select");
  551. ValueVector Res;
  552. Res.resize(NumElems);
  553. if (SI.getOperand(0)->getType()->isVectorTy()) {
  554. Scatterer VOp0 = scatter(&SI, SI.getOperand(0));
  555. assert(VOp0.size() == NumElems && "Mismatched select");
  556. for (unsigned I = 0; I < NumElems; ++I) {
  557. Value *Op0 = VOp0[I];
  558. Value *Op1 = VOp1[I];
  559. Value *Op2 = VOp2[I];
  560. Res[I] = Builder.CreateSelect(Op0, Op1, Op2,
  561. SI.getName() + ".i" + Twine(I));
  562. }
  563. } else {
  564. Value *Op0 = SI.getOperand(0);
  565. for (unsigned I = 0; I < NumElems; ++I) {
  566. Value *Op1 = VOp1[I];
  567. Value *Op2 = VOp2[I];
  568. Res[I] = Builder.CreateSelect(Op0, Op1, Op2,
  569. SI.getName() + ".i" + Twine(I));
  570. }
  571. }
  572. gather(&SI, Res);
  573. return true;
  574. }
  575. bool ScalarizerVisitor::visitICmpInst(ICmpInst &ICI) {
  576. return splitBinary(ICI, ICmpSplitter(ICI));
  577. }
  578. bool ScalarizerVisitor::visitFCmpInst(FCmpInst &FCI) {
  579. return splitBinary(FCI, FCmpSplitter(FCI));
  580. }
  581. bool ScalarizerVisitor::visitUnaryOperator(UnaryOperator &UO) {
  582. return splitUnary(UO, UnarySplitter(UO));
  583. }
  584. bool ScalarizerVisitor::visitBinaryOperator(BinaryOperator &BO) {
  585. return splitBinary(BO, BinarySplitter(BO));
  586. }
  587. bool ScalarizerVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
  588. auto *VT = dyn_cast<FixedVectorType>(GEPI.getType());
  589. if (!VT)
  590. return false;
  591. IRBuilder<> Builder(&GEPI);
  592. unsigned NumElems = VT->getNumElements();
  593. unsigned NumIndices = GEPI.getNumIndices();
  594. // The base pointer might be scalar even if it's a vector GEP. In those cases,
  595. // splat the pointer into a vector value, and scatter that vector.
  596. Value *Op0 = GEPI.getOperand(0);
  597. if (!Op0->getType()->isVectorTy())
  598. Op0 = Builder.CreateVectorSplat(NumElems, Op0);
  599. Scatterer Base = scatter(&GEPI, Op0);
  600. SmallVector<Scatterer, 8> Ops;
  601. Ops.resize(NumIndices);
  602. for (unsigned I = 0; I < NumIndices; ++I) {
  603. Value *Op = GEPI.getOperand(I + 1);
  604. // The indices might be scalars even if it's a vector GEP. In those cases,
  605. // splat the scalar into a vector value, and scatter that vector.
  606. if (!Op->getType()->isVectorTy())
  607. Op = Builder.CreateVectorSplat(NumElems, Op);
  608. Ops[I] = scatter(&GEPI, Op);
  609. }
  610. ValueVector Res;
  611. Res.resize(NumElems);
  612. for (unsigned I = 0; I < NumElems; ++I) {
  613. SmallVector<Value *, 8> Indices;
  614. Indices.resize(NumIndices);
  615. for (unsigned J = 0; J < NumIndices; ++J)
  616. Indices[J] = Ops[J][I];
  617. Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices,
  618. GEPI.getName() + ".i" + Twine(I));
  619. if (GEPI.isInBounds())
  620. if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I]))
  621. NewGEPI->setIsInBounds();
  622. }
  623. gather(&GEPI, Res);
  624. return true;
  625. }
  626. bool ScalarizerVisitor::visitCastInst(CastInst &CI) {
  627. auto *VT = dyn_cast<FixedVectorType>(CI.getDestTy());
  628. if (!VT)
  629. return false;
  630. unsigned NumElems = VT->getNumElements();
  631. IRBuilder<> Builder(&CI);
  632. Scatterer Op0 = scatter(&CI, CI.getOperand(0));
  633. assert(Op0.size() == NumElems && "Mismatched cast");
  634. ValueVector Res;
  635. Res.resize(NumElems);
  636. for (unsigned I = 0; I < NumElems; ++I)
  637. Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(),
  638. CI.getName() + ".i" + Twine(I));
  639. gather(&CI, Res);
  640. return true;
  641. }
  642. bool ScalarizerVisitor::visitBitCastInst(BitCastInst &BCI) {
  643. auto *DstVT = dyn_cast<FixedVectorType>(BCI.getDestTy());
  644. auto *SrcVT = dyn_cast<FixedVectorType>(BCI.getSrcTy());
  645. if (!DstVT || !SrcVT)
  646. return false;
  647. unsigned DstNumElems = DstVT->getNumElements();
  648. unsigned SrcNumElems = SrcVT->getNumElements();
  649. IRBuilder<> Builder(&BCI);
  650. Scatterer Op0 = scatter(&BCI, BCI.getOperand(0));
  651. ValueVector Res;
  652. Res.resize(DstNumElems);
  653. if (DstNumElems == SrcNumElems) {
  654. for (unsigned I = 0; I < DstNumElems; ++I)
  655. Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(),
  656. BCI.getName() + ".i" + Twine(I));
  657. } else if (DstNumElems > SrcNumElems) {
  658. // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the
  659. // individual elements to the destination.
  660. unsigned FanOut = DstNumElems / SrcNumElems;
  661. auto *MidTy = FixedVectorType::get(DstVT->getElementType(), FanOut);
  662. unsigned ResI = 0;
  663. for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) {
  664. Value *V = Op0[Op0I];
  665. Instruction *VI;
  666. // Look through any existing bitcasts before converting to <N x t2>.
  667. // In the best case, the resulting conversion might be a no-op.
  668. while ((VI = dyn_cast<Instruction>(V)) &&
  669. VI->getOpcode() == Instruction::BitCast)
  670. V = VI->getOperand(0);
  671. V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast");
  672. Scatterer Mid = scatter(&BCI, V);
  673. for (unsigned MidI = 0; MidI < FanOut; ++MidI)
  674. Res[ResI++] = Mid[MidI];
  675. }
  676. } else {
  677. // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2.
  678. unsigned FanIn = SrcNumElems / DstNumElems;
  679. auto *MidTy = FixedVectorType::get(SrcVT->getElementType(), FanIn);
  680. unsigned Op0I = 0;
  681. for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) {
  682. Value *V = PoisonValue::get(MidTy);
  683. for (unsigned MidI = 0; MidI < FanIn; ++MidI)
  684. V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI),
  685. BCI.getName() + ".i" + Twine(ResI)
  686. + ".upto" + Twine(MidI));
  687. Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(),
  688. BCI.getName() + ".i" + Twine(ResI));
  689. }
  690. }
  691. gather(&BCI, Res);
  692. return true;
  693. }
  694. bool ScalarizerVisitor::visitInsertElementInst(InsertElementInst &IEI) {
  695. auto *VT = dyn_cast<FixedVectorType>(IEI.getType());
  696. if (!VT)
  697. return false;
  698. unsigned NumElems = VT->getNumElements();
  699. IRBuilder<> Builder(&IEI);
  700. Scatterer Op0 = scatter(&IEI, IEI.getOperand(0));
  701. Value *NewElt = IEI.getOperand(1);
  702. Value *InsIdx = IEI.getOperand(2);
  703. ValueVector Res;
  704. Res.resize(NumElems);
  705. if (auto *CI = dyn_cast<ConstantInt>(InsIdx)) {
  706. for (unsigned I = 0; I < NumElems; ++I)
  707. Res[I] = CI->getValue().getZExtValue() == I ? NewElt : Op0[I];
  708. } else {
  709. if (!ScalarizeVariableInsertExtract)
  710. return false;
  711. for (unsigned I = 0; I < NumElems; ++I) {
  712. Value *ShouldReplace =
  713. Builder.CreateICmpEQ(InsIdx, ConstantInt::get(InsIdx->getType(), I),
  714. InsIdx->getName() + ".is." + Twine(I));
  715. Value *OldElt = Op0[I];
  716. Res[I] = Builder.CreateSelect(ShouldReplace, NewElt, OldElt,
  717. IEI.getName() + ".i" + Twine(I));
  718. }
  719. }
  720. gather(&IEI, Res);
  721. return true;
  722. }
  723. bool ScalarizerVisitor::visitExtractElementInst(ExtractElementInst &EEI) {
  724. auto *VT = dyn_cast<FixedVectorType>(EEI.getOperand(0)->getType());
  725. if (!VT)
  726. return false;
  727. unsigned NumSrcElems = VT->getNumElements();
  728. IRBuilder<> Builder(&EEI);
  729. Scatterer Op0 = scatter(&EEI, EEI.getOperand(0));
  730. Value *ExtIdx = EEI.getOperand(1);
  731. if (auto *CI = dyn_cast<ConstantInt>(ExtIdx)) {
  732. Value *Res = Op0[CI->getValue().getZExtValue()];
  733. replaceUses(&EEI, Res);
  734. return true;
  735. }
  736. if (!ScalarizeVariableInsertExtract)
  737. return false;
  738. Value *Res = PoisonValue::get(VT->getElementType());
  739. for (unsigned I = 0; I < NumSrcElems; ++I) {
  740. Value *ShouldExtract =
  741. Builder.CreateICmpEQ(ExtIdx, ConstantInt::get(ExtIdx->getType(), I),
  742. ExtIdx->getName() + ".is." + Twine(I));
  743. Value *Elt = Op0[I];
  744. Res = Builder.CreateSelect(ShouldExtract, Elt, Res,
  745. EEI.getName() + ".upto" + Twine(I));
  746. }
  747. replaceUses(&EEI, Res);
  748. return true;
  749. }
  750. bool ScalarizerVisitor::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
  751. auto *VT = dyn_cast<FixedVectorType>(SVI.getType());
  752. if (!VT)
  753. return false;
  754. unsigned NumElems = VT->getNumElements();
  755. Scatterer Op0 = scatter(&SVI, SVI.getOperand(0));
  756. Scatterer Op1 = scatter(&SVI, SVI.getOperand(1));
  757. ValueVector Res;
  758. Res.resize(NumElems);
  759. for (unsigned I = 0; I < NumElems; ++I) {
  760. int Selector = SVI.getMaskValue(I);
  761. if (Selector < 0)
  762. Res[I] = UndefValue::get(VT->getElementType());
  763. else if (unsigned(Selector) < Op0.size())
  764. Res[I] = Op0[Selector];
  765. else
  766. Res[I] = Op1[Selector - Op0.size()];
  767. }
  768. gather(&SVI, Res);
  769. return true;
  770. }
  771. bool ScalarizerVisitor::visitPHINode(PHINode &PHI) {
  772. auto *VT = dyn_cast<FixedVectorType>(PHI.getType());
  773. if (!VT)
  774. return false;
  775. unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements();
  776. IRBuilder<> Builder(&PHI);
  777. ValueVector Res;
  778. Res.resize(NumElems);
  779. unsigned NumOps = PHI.getNumOperands();
  780. for (unsigned I = 0; I < NumElems; ++I)
  781. Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps,
  782. PHI.getName() + ".i" + Twine(I));
  783. for (unsigned I = 0; I < NumOps; ++I) {
  784. Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I));
  785. BasicBlock *IncomingBlock = PHI.getIncomingBlock(I);
  786. for (unsigned J = 0; J < NumElems; ++J)
  787. cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock);
  788. }
  789. gather(&PHI, Res);
  790. return true;
  791. }
  792. bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) {
  793. if (!ScalarizeLoadStore)
  794. return false;
  795. if (!LI.isSimple())
  796. return false;
  797. std::optional<VectorLayout> Layout = getVectorLayout(
  798. LI.getType(), LI.getAlign(), LI.getModule()->getDataLayout());
  799. if (!Layout)
  800. return false;
  801. unsigned NumElems = cast<FixedVectorType>(Layout->VecTy)->getNumElements();
  802. IRBuilder<> Builder(&LI);
  803. Scatterer Ptr = scatter(&LI, LI.getPointerOperand(), LI.getType());
  804. ValueVector Res;
  805. Res.resize(NumElems);
  806. for (unsigned I = 0; I < NumElems; ++I)
  807. Res[I] = Builder.CreateAlignedLoad(Layout->VecTy->getElementType(), Ptr[I],
  808. Align(Layout->getElemAlign(I)),
  809. LI.getName() + ".i" + Twine(I));
  810. gather(&LI, Res);
  811. return true;
  812. }
  813. bool ScalarizerVisitor::visitStoreInst(StoreInst &SI) {
  814. if (!ScalarizeLoadStore)
  815. return false;
  816. if (!SI.isSimple())
  817. return false;
  818. Value *FullValue = SI.getValueOperand();
  819. std::optional<VectorLayout> Layout = getVectorLayout(
  820. FullValue->getType(), SI.getAlign(), SI.getModule()->getDataLayout());
  821. if (!Layout)
  822. return false;
  823. unsigned NumElems = cast<FixedVectorType>(Layout->VecTy)->getNumElements();
  824. IRBuilder<> Builder(&SI);
  825. Scatterer VPtr = scatter(&SI, SI.getPointerOperand(), FullValue->getType());
  826. Scatterer VVal = scatter(&SI, FullValue);
  827. ValueVector Stores;
  828. Stores.resize(NumElems);
  829. for (unsigned I = 0; I < NumElems; ++I) {
  830. Value *Val = VVal[I];
  831. Value *Ptr = VPtr[I];
  832. Stores[I] = Builder.CreateAlignedStore(Val, Ptr, Layout->getElemAlign(I));
  833. }
  834. transferMetadataAndIRFlags(&SI, Stores);
  835. return true;
  836. }
  837. bool ScalarizerVisitor::visitCallInst(CallInst &CI) {
  838. return splitCall(CI);
  839. }
  840. // Delete the instructions that we scalarized. If a full vector result
  841. // is still needed, recreate it using InsertElements.
  842. bool ScalarizerVisitor::finish() {
  843. // The presence of data in Gathered or Scattered indicates changes
  844. // made to the Function.
  845. if (Gathered.empty() && Scattered.empty() && !Scalarized)
  846. return false;
  847. for (const auto &GMI : Gathered) {
  848. Instruction *Op = GMI.first;
  849. ValueVector &CV = *GMI.second;
  850. if (!Op->use_empty()) {
  851. // The value is still needed, so recreate it using a series of
  852. // InsertElements.
  853. Value *Res = PoisonValue::get(Op->getType());
  854. if (auto *Ty = dyn_cast<FixedVectorType>(Op->getType())) {
  855. BasicBlock *BB = Op->getParent();
  856. unsigned Count = Ty->getNumElements();
  857. IRBuilder<> Builder(Op);
  858. if (isa<PHINode>(Op))
  859. Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
  860. for (unsigned I = 0; I < Count; ++I)
  861. Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I),
  862. Op->getName() + ".upto" + Twine(I));
  863. Res->takeName(Op);
  864. } else {
  865. assert(CV.size() == 1 && Op->getType() == CV[0]->getType());
  866. Res = CV[0];
  867. if (Op == Res)
  868. continue;
  869. }
  870. Op->replaceAllUsesWith(Res);
  871. }
  872. PotentiallyDeadInstrs.emplace_back(Op);
  873. }
  874. Gathered.clear();
  875. Scattered.clear();
  876. Scalarized = false;
  877. RecursivelyDeleteTriviallyDeadInstructionsPermissive(PotentiallyDeadInstrs);
  878. return true;
  879. }
  880. PreservedAnalyses ScalarizerPass::run(Function &F, FunctionAnalysisManager &AM) {
  881. Module &M = *F.getParent();
  882. unsigned ParallelLoopAccessMDKind =
  883. M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
  884. DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
  885. ScalarizerVisitor Impl(ParallelLoopAccessMDKind, DT, Options);
  886. bool Changed = Impl.visit(F);
  887. PreservedAnalyses PA;
  888. PA.preserve<DominatorTreeAnalysis>();
  889. return Changed ? PA : PreservedAnalyses::all();
  890. }