LoopVectorizationLegality.cpp 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338
  1. //===- LoopVectorizationLegality.cpp --------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file provides loop vectorization legality analysis. Original code
  10. // resided in LoopVectorize.cpp for a long time.
  11. //
  12. // At this point, it is implemented as a utility class, not as an analysis
  13. // pass. It should be easy to create an analysis pass around it if there
  14. // is a need (but D45420 needs to happen first).
  15. //
  16. #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
  17. #include "llvm/Analysis/Loads.h"
  18. #include "llvm/Analysis/LoopInfo.h"
  19. #include "llvm/Analysis/TargetLibraryInfo.h"
  20. #include "llvm/Analysis/ValueTracking.h"
  21. #include "llvm/Analysis/VectorUtils.h"
  22. #include "llvm/IR/IntrinsicInst.h"
  23. #include "llvm/IR/PatternMatch.h"
  24. #include "llvm/Transforms/Utils/SizeOpts.h"
  25. #include "llvm/Transforms/Vectorize/LoopVectorize.h"
  26. using namespace llvm;
  27. using namespace PatternMatch;
  28. #define LV_NAME "loop-vectorize"
  29. #define DEBUG_TYPE LV_NAME
  30. extern cl::opt<bool> EnableVPlanPredication;
  31. static cl::opt<bool>
  32. EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
  33. cl::desc("Enable if-conversion during vectorization."));
  34. namespace llvm {
  35. cl::opt<bool>
  36. HintsAllowReordering("hints-allow-reordering", cl::init(true), cl::Hidden,
  37. cl::desc("Allow enabling loop hints to reorder "
  38. "FP operations during vectorization."));
  39. }
  40. // TODO: Move size-based thresholds out of legality checking, make cost based
  41. // decisions instead of hard thresholds.
  42. static cl::opt<unsigned> VectorizeSCEVCheckThreshold(
  43. "vectorize-scev-check-threshold", cl::init(16), cl::Hidden,
  44. cl::desc("The maximum number of SCEV checks allowed."));
  45. static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold(
  46. "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden,
  47. cl::desc("The maximum number of SCEV checks allowed with a "
  48. "vectorize(enable) pragma"));
  49. static cl::opt<LoopVectorizeHints::ScalableForceKind>
  50. ForceScalableVectorization(
  51. "scalable-vectorization", cl::init(LoopVectorizeHints::SK_Unspecified),
  52. cl::Hidden,
  53. cl::desc("Control whether the compiler can use scalable vectors to "
  54. "vectorize a loop"),
  55. cl::values(
  56. clEnumValN(LoopVectorizeHints::SK_FixedWidthOnly, "off",
  57. "Scalable vectorization is disabled."),
  58. clEnumValN(
  59. LoopVectorizeHints::SK_PreferScalable, "preferred",
  60. "Scalable vectorization is available and favored when the "
  61. "cost is inconclusive."),
  62. clEnumValN(
  63. LoopVectorizeHints::SK_PreferScalable, "on",
  64. "Scalable vectorization is available and favored when the "
  65. "cost is inconclusive.")));
  66. /// Maximum vectorization interleave count.
  67. static const unsigned MaxInterleaveFactor = 16;
  68. namespace llvm {
  69. bool LoopVectorizeHints::Hint::validate(unsigned Val) {
  70. switch (Kind) {
  71. case HK_WIDTH:
  72. return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth;
  73. case HK_INTERLEAVE:
  74. return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor;
  75. case HK_FORCE:
  76. return (Val <= 1);
  77. case HK_ISVECTORIZED:
  78. case HK_PREDICATE:
  79. case HK_SCALABLE:
  80. return (Val == 0 || Val == 1);
  81. }
  82. return false;
  83. }
  84. LoopVectorizeHints::LoopVectorizeHints(const Loop *L,
  85. bool InterleaveOnlyWhenForced,
  86. OptimizationRemarkEmitter &ORE,
  87. const TargetTransformInfo *TTI)
  88. : Width("vectorize.width", VectorizerParams::VectorizationFactor, HK_WIDTH),
  89. Interleave("interleave.count", InterleaveOnlyWhenForced, HK_INTERLEAVE),
  90. Force("vectorize.enable", FK_Undefined, HK_FORCE),
  91. IsVectorized("isvectorized", 0, HK_ISVECTORIZED),
  92. Predicate("vectorize.predicate.enable", FK_Undefined, HK_PREDICATE),
  93. Scalable("vectorize.scalable.enable", SK_Unspecified, HK_SCALABLE),
  94. TheLoop(L), ORE(ORE) {
  95. // Populate values with existing loop metadata.
  96. getHintsFromMetadata();
  97. // force-vector-interleave overrides DisableInterleaving.
  98. if (VectorizerParams::isInterleaveForced())
  99. Interleave.Value = VectorizerParams::VectorizationInterleave;
  100. // If the metadata doesn't explicitly specify whether to enable scalable
  101. // vectorization, then decide based on the following criteria (increasing
  102. // level of priority):
  103. // - Target default
  104. // - Metadata width
  105. // - Force option (always overrides)
  106. if ((LoopVectorizeHints::ScalableForceKind)Scalable.Value == SK_Unspecified) {
  107. if (TTI)
  108. Scalable.Value = TTI->enableScalableVectorization() ? SK_PreferScalable
  109. : SK_FixedWidthOnly;
  110. if (Width.Value)
  111. // If the width is set, but the metadata says nothing about the scalable
  112. // property, then assume it concerns only a fixed-width UserVF.
  113. // If width is not set, the flag takes precedence.
  114. Scalable.Value = SK_FixedWidthOnly;
  115. }
  116. // If the flag is set to force any use of scalable vectors, override the loop
  117. // hints.
  118. if (ForceScalableVectorization.getValue() !=
  119. LoopVectorizeHints::SK_Unspecified)
  120. Scalable.Value = ForceScalableVectorization.getValue();
  121. // Scalable vectorization is disabled if no preference is specified.
  122. if ((LoopVectorizeHints::ScalableForceKind)Scalable.Value == SK_Unspecified)
  123. Scalable.Value = SK_FixedWidthOnly;
  124. if (IsVectorized.Value != 1)
  125. // If the vectorization width and interleaving count are both 1 then
  126. // consider the loop to have been already vectorized because there's
  127. // nothing more that we can do.
  128. IsVectorized.Value =
  129. getWidth() == ElementCount::getFixed(1) && getInterleave() == 1;
  130. LLVM_DEBUG(if (InterleaveOnlyWhenForced && getInterleave() == 1) dbgs()
  131. << "LV: Interleaving disabled by the pass manager\n");
  132. }
  133. void LoopVectorizeHints::setAlreadyVectorized() {
  134. LLVMContext &Context = TheLoop->getHeader()->getContext();
  135. MDNode *IsVectorizedMD = MDNode::get(
  136. Context,
  137. {MDString::get(Context, "llvm.loop.isvectorized"),
  138. ConstantAsMetadata::get(ConstantInt::get(Context, APInt(32, 1)))});
  139. MDNode *LoopID = TheLoop->getLoopID();
  140. MDNode *NewLoopID =
  141. makePostTransformationMetadata(Context, LoopID,
  142. {Twine(Prefix(), "vectorize.").str(),
  143. Twine(Prefix(), "interleave.").str()},
  144. {IsVectorizedMD});
  145. TheLoop->setLoopID(NewLoopID);
  146. // Update internal cache.
  147. IsVectorized.Value = 1;
  148. }
  149. bool LoopVectorizeHints::allowVectorization(
  150. Function *F, Loop *L, bool VectorizeOnlyWhenForced) const {
  151. if (getForce() == LoopVectorizeHints::FK_Disabled) {
  152. LLVM_DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
  153. emitRemarkWithHints();
  154. return false;
  155. }
  156. if (VectorizeOnlyWhenForced && getForce() != LoopVectorizeHints::FK_Enabled) {
  157. LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
  158. emitRemarkWithHints();
  159. return false;
  160. }
  161. if (getIsVectorized() == 1) {
  162. LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
  163. // FIXME: Add interleave.disable metadata. This will allow
  164. // vectorize.disable to be used without disabling the pass and errors
  165. // to differentiate between disabled vectorization and a width of 1.
  166. ORE.emit([&]() {
  167. return OptimizationRemarkAnalysis(vectorizeAnalysisPassName(),
  168. "AllDisabled", L->getStartLoc(),
  169. L->getHeader())
  170. << "loop not vectorized: vectorization and interleaving are "
  171. "explicitly disabled, or the loop has already been "
  172. "vectorized";
  173. });
  174. return false;
  175. }
  176. return true;
  177. }
  178. void LoopVectorizeHints::emitRemarkWithHints() const {
  179. using namespace ore;
  180. ORE.emit([&]() {
  181. if (Force.Value == LoopVectorizeHints::FK_Disabled)
  182. return OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled",
  183. TheLoop->getStartLoc(),
  184. TheLoop->getHeader())
  185. << "loop not vectorized: vectorization is explicitly disabled";
  186. else {
  187. OptimizationRemarkMissed R(LV_NAME, "MissedDetails",
  188. TheLoop->getStartLoc(), TheLoop->getHeader());
  189. R << "loop not vectorized";
  190. if (Force.Value == LoopVectorizeHints::FK_Enabled) {
  191. R << " (Force=" << NV("Force", true);
  192. if (Width.Value != 0)
  193. R << ", Vector Width=" << NV("VectorWidth", getWidth());
  194. if (getInterleave() != 0)
  195. R << ", Interleave Count=" << NV("InterleaveCount", getInterleave());
  196. R << ")";
  197. }
  198. return R;
  199. }
  200. });
  201. }
  202. const char *LoopVectorizeHints::vectorizeAnalysisPassName() const {
  203. if (getWidth() == ElementCount::getFixed(1))
  204. return LV_NAME;
  205. if (getForce() == LoopVectorizeHints::FK_Disabled)
  206. return LV_NAME;
  207. if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth().isZero())
  208. return LV_NAME;
  209. return OptimizationRemarkAnalysis::AlwaysPrint;
  210. }
  211. bool LoopVectorizeHints::allowReordering() const {
  212. // Allow the vectorizer to change the order of operations if enabling
  213. // loop hints are provided
  214. ElementCount EC = getWidth();
  215. return HintsAllowReordering &&
  216. (getForce() == LoopVectorizeHints::FK_Enabled ||
  217. EC.getKnownMinValue() > 1);
  218. }
  219. void LoopVectorizeHints::getHintsFromMetadata() {
  220. MDNode *LoopID = TheLoop->getLoopID();
  221. if (!LoopID)
  222. return;
  223. // First operand should refer to the loop id itself.
  224. assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
  225. assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
  226. for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
  227. const MDString *S = nullptr;
  228. SmallVector<Metadata *, 4> Args;
  229. // The expected hint is either a MDString or a MDNode with the first
  230. // operand a MDString.
  231. if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) {
  232. if (!MD || MD->getNumOperands() == 0)
  233. continue;
  234. S = dyn_cast<MDString>(MD->getOperand(0));
  235. for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i)
  236. Args.push_back(MD->getOperand(i));
  237. } else {
  238. S = dyn_cast<MDString>(LoopID->getOperand(i));
  239. assert(Args.size() == 0 && "too many arguments for MDString");
  240. }
  241. if (!S)
  242. continue;
  243. // Check if the hint starts with the loop metadata prefix.
  244. StringRef Name = S->getString();
  245. if (Args.size() == 1)
  246. setHint(Name, Args[0]);
  247. }
  248. }
  249. void LoopVectorizeHints::setHint(StringRef Name, Metadata *Arg) {
  250. if (!Name.startswith(Prefix()))
  251. return;
  252. Name = Name.substr(Prefix().size(), StringRef::npos);
  253. const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg);
  254. if (!C)
  255. return;
  256. unsigned Val = C->getZExtValue();
  257. Hint *Hints[] = {&Width, &Interleave, &Force,
  258. &IsVectorized, &Predicate, &Scalable};
  259. for (auto H : Hints) {
  260. if (Name == H->Name) {
  261. if (H->validate(Val))
  262. H->Value = Val;
  263. else
  264. LLVM_DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n");
  265. break;
  266. }
  267. }
  268. }
  269. // Return true if the inner loop \p Lp is uniform with regard to the outer loop
  270. // \p OuterLp (i.e., if the outer loop is vectorized, all the vector lanes
  271. // executing the inner loop will execute the same iterations). This check is
  272. // very constrained for now but it will be relaxed in the future. \p Lp is
  273. // considered uniform if it meets all the following conditions:
  274. // 1) it has a canonical IV (starting from 0 and with stride 1),
  275. // 2) its latch terminator is a conditional branch and,
  276. // 3) its latch condition is a compare instruction whose operands are the
  277. // canonical IV and an OuterLp invariant.
  278. // This check doesn't take into account the uniformity of other conditions not
  279. // related to the loop latch because they don't affect the loop uniformity.
  280. //
  281. // NOTE: We decided to keep all these checks and its associated documentation
  282. // together so that we can easily have a picture of the current supported loop
  283. // nests. However, some of the current checks don't depend on \p OuterLp and
  284. // would be redundantly executed for each \p Lp if we invoked this function for
  285. // different candidate outer loops. This is not the case for now because we
  286. // don't currently have the infrastructure to evaluate multiple candidate outer
  287. // loops and \p OuterLp will be a fixed parameter while we only support explicit
  288. // outer loop vectorization. It's also very likely that these checks go away
  289. // before introducing the aforementioned infrastructure. However, if this is not
  290. // the case, we should move the \p OuterLp independent checks to a separate
  291. // function that is only executed once for each \p Lp.
  292. static bool isUniformLoop(Loop *Lp, Loop *OuterLp) {
  293. assert(Lp->getLoopLatch() && "Expected loop with a single latch.");
  294. // If Lp is the outer loop, it's uniform by definition.
  295. if (Lp == OuterLp)
  296. return true;
  297. assert(OuterLp->contains(Lp) && "OuterLp must contain Lp.");
  298. // 1.
  299. PHINode *IV = Lp->getCanonicalInductionVariable();
  300. if (!IV) {
  301. LLVM_DEBUG(dbgs() << "LV: Canonical IV not found.\n");
  302. return false;
  303. }
  304. // 2.
  305. BasicBlock *Latch = Lp->getLoopLatch();
  306. auto *LatchBr = dyn_cast<BranchInst>(Latch->getTerminator());
  307. if (!LatchBr || LatchBr->isUnconditional()) {
  308. LLVM_DEBUG(dbgs() << "LV: Unsupported loop latch branch.\n");
  309. return false;
  310. }
  311. // 3.
  312. auto *LatchCmp = dyn_cast<CmpInst>(LatchBr->getCondition());
  313. if (!LatchCmp) {
  314. LLVM_DEBUG(
  315. dbgs() << "LV: Loop latch condition is not a compare instruction.\n");
  316. return false;
  317. }
  318. Value *CondOp0 = LatchCmp->getOperand(0);
  319. Value *CondOp1 = LatchCmp->getOperand(1);
  320. Value *IVUpdate = IV->getIncomingValueForBlock(Latch);
  321. if (!(CondOp0 == IVUpdate && OuterLp->isLoopInvariant(CondOp1)) &&
  322. !(CondOp1 == IVUpdate && OuterLp->isLoopInvariant(CondOp0))) {
  323. LLVM_DEBUG(dbgs() << "LV: Loop latch condition is not uniform.\n");
  324. return false;
  325. }
  326. return true;
  327. }
  328. // Return true if \p Lp and all its nested loops are uniform with regard to \p
  329. // OuterLp.
  330. static bool isUniformLoopNest(Loop *Lp, Loop *OuterLp) {
  331. if (!isUniformLoop(Lp, OuterLp))
  332. return false;
  333. // Check if nested loops are uniform.
  334. for (Loop *SubLp : *Lp)
  335. if (!isUniformLoopNest(SubLp, OuterLp))
  336. return false;
  337. return true;
  338. }
  339. /// Check whether it is safe to if-convert this phi node.
  340. ///
  341. /// Phi nodes with constant expressions that can trap are not safe to if
  342. /// convert.
  343. static bool canIfConvertPHINodes(BasicBlock *BB) {
  344. for (PHINode &Phi : BB->phis()) {
  345. for (Value *V : Phi.incoming_values())
  346. if (auto *C = dyn_cast<Constant>(V))
  347. if (C->canTrap())
  348. return false;
  349. }
  350. return true;
  351. }
  352. static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) {
  353. if (Ty->isPointerTy())
  354. return DL.getIntPtrType(Ty);
  355. // It is possible that char's or short's overflow when we ask for the loop's
  356. // trip count, work around this by changing the type size.
  357. if (Ty->getScalarSizeInBits() < 32)
  358. return Type::getInt32Ty(Ty->getContext());
  359. return Ty;
  360. }
  361. static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
  362. Ty0 = convertPointerToIntegerType(DL, Ty0);
  363. Ty1 = convertPointerToIntegerType(DL, Ty1);
  364. if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits())
  365. return Ty0;
  366. return Ty1;
  367. }
  368. /// Check that the instruction has outside loop users and is not an
  369. /// identified reduction variable.
  370. static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
  371. SmallPtrSetImpl<Value *> &AllowedExit) {
  372. // Reductions, Inductions and non-header phis are allowed to have exit users. All
  373. // other instructions must not have external users.
  374. if (!AllowedExit.count(Inst))
  375. // Check that all of the users of the loop are inside the BB.
  376. for (User *U : Inst->users()) {
  377. Instruction *UI = cast<Instruction>(U);
  378. // This user may be a reduction exit value.
  379. if (!TheLoop->contains(UI)) {
  380. LLVM_DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
  381. return true;
  382. }
  383. }
  384. return false;
  385. }
  386. int LoopVectorizationLegality::isConsecutivePtr(Type *AccessTy,
  387. Value *Ptr) const {
  388. const ValueToValueMap &Strides =
  389. getSymbolicStrides() ? *getSymbolicStrides() : ValueToValueMap();
  390. Function *F = TheLoop->getHeader()->getParent();
  391. bool OptForSize = F->hasOptSize() ||
  392. llvm::shouldOptimizeForSize(TheLoop->getHeader(), PSI, BFI,
  393. PGSOQueryType::IRPass);
  394. bool CanAddPredicate = !OptForSize;
  395. int Stride = getPtrStride(PSE, AccessTy, Ptr, TheLoop, Strides,
  396. CanAddPredicate, false);
  397. if (Stride == 1 || Stride == -1)
  398. return Stride;
  399. return 0;
  400. }
  401. bool LoopVectorizationLegality::isUniform(Value *V) {
  402. return LAI->isUniform(V);
  403. }
  404. bool LoopVectorizationLegality::canVectorizeOuterLoop() {
  405. assert(!TheLoop->isInnermost() && "We are not vectorizing an outer loop.");
  406. // Store the result and return it at the end instead of exiting early, in case
  407. // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
  408. bool Result = true;
  409. bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
  410. for (BasicBlock *BB : TheLoop->blocks()) {
  411. // Check whether the BB terminator is a BranchInst. Any other terminator is
  412. // not supported yet.
  413. auto *Br = dyn_cast<BranchInst>(BB->getTerminator());
  414. if (!Br) {
  415. reportVectorizationFailure("Unsupported basic block terminator",
  416. "loop control flow is not understood by vectorizer",
  417. "CFGNotUnderstood", ORE, TheLoop);
  418. if (DoExtraAnalysis)
  419. Result = false;
  420. else
  421. return false;
  422. }
  423. // Check whether the BranchInst is a supported one. Only unconditional
  424. // branches, conditional branches with an outer loop invariant condition or
  425. // backedges are supported.
  426. // FIXME: We skip these checks when VPlan predication is enabled as we
  427. // want to allow divergent branches. This whole check will be removed
  428. // once VPlan predication is on by default.
  429. if (!EnableVPlanPredication && Br && Br->isConditional() &&
  430. !TheLoop->isLoopInvariant(Br->getCondition()) &&
  431. !LI->isLoopHeader(Br->getSuccessor(0)) &&
  432. !LI->isLoopHeader(Br->getSuccessor(1))) {
  433. reportVectorizationFailure("Unsupported conditional branch",
  434. "loop control flow is not understood by vectorizer",
  435. "CFGNotUnderstood", ORE, TheLoop);
  436. if (DoExtraAnalysis)
  437. Result = false;
  438. else
  439. return false;
  440. }
  441. }
  442. // Check whether inner loops are uniform. At this point, we only support
  443. // simple outer loops scenarios with uniform nested loops.
  444. if (!isUniformLoopNest(TheLoop /*loop nest*/,
  445. TheLoop /*context outer loop*/)) {
  446. reportVectorizationFailure("Outer loop contains divergent loops",
  447. "loop control flow is not understood by vectorizer",
  448. "CFGNotUnderstood", ORE, TheLoop);
  449. if (DoExtraAnalysis)
  450. Result = false;
  451. else
  452. return false;
  453. }
  454. // Check whether we are able to set up outer loop induction.
  455. if (!setupOuterLoopInductions()) {
  456. reportVectorizationFailure("Unsupported outer loop Phi(s)",
  457. "Unsupported outer loop Phi(s)",
  458. "UnsupportedPhi", ORE, TheLoop);
  459. if (DoExtraAnalysis)
  460. Result = false;
  461. else
  462. return false;
  463. }
  464. return Result;
  465. }
  466. void LoopVectorizationLegality::addInductionPhi(
  467. PHINode *Phi, const InductionDescriptor &ID,
  468. SmallPtrSetImpl<Value *> &AllowedExit) {
  469. Inductions[Phi] = ID;
  470. // In case this induction also comes with casts that we know we can ignore
  471. // in the vectorized loop body, record them here. All casts could be recorded
  472. // here for ignoring, but suffices to record only the first (as it is the
  473. // only one that may bw used outside the cast sequence).
  474. const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
  475. if (!Casts.empty())
  476. InductionCastsToIgnore.insert(*Casts.begin());
  477. Type *PhiTy = Phi->getType();
  478. const DataLayout &DL = Phi->getModule()->getDataLayout();
  479. // Get the widest type.
  480. if (!PhiTy->isFloatingPointTy()) {
  481. if (!WidestIndTy)
  482. WidestIndTy = convertPointerToIntegerType(DL, PhiTy);
  483. else
  484. WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy);
  485. }
  486. // Int inductions are special because we only allow one IV.
  487. if (ID.getKind() == InductionDescriptor::IK_IntInduction &&
  488. ID.getConstIntStepValue() && ID.getConstIntStepValue()->isOne() &&
  489. isa<Constant>(ID.getStartValue()) &&
  490. cast<Constant>(ID.getStartValue())->isNullValue()) {
  491. // Use the phi node with the widest type as induction. Use the last
  492. // one if there are multiple (no good reason for doing this other
  493. // than it is expedient). We've checked that it begins at zero and
  494. // steps by one, so this is a canonical induction variable.
  495. if (!PrimaryInduction || PhiTy == WidestIndTy)
  496. PrimaryInduction = Phi;
  497. }
  498. // Both the PHI node itself, and the "post-increment" value feeding
  499. // back into the PHI node may have external users.
  500. // We can allow those uses, except if the SCEVs we have for them rely
  501. // on predicates that only hold within the loop, since allowing the exit
  502. // currently means re-using this SCEV outside the loop (see PR33706 for more
  503. // details).
  504. if (PSE.getUnionPredicate().isAlwaysTrue()) {
  505. AllowedExit.insert(Phi);
  506. AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch()));
  507. }
  508. LLVM_DEBUG(dbgs() << "LV: Found an induction variable.\n");
  509. }
  510. bool LoopVectorizationLegality::setupOuterLoopInductions() {
  511. BasicBlock *Header = TheLoop->getHeader();
  512. // Returns true if a given Phi is a supported induction.
  513. auto isSupportedPhi = [&](PHINode &Phi) -> bool {
  514. InductionDescriptor ID;
  515. if (InductionDescriptor::isInductionPHI(&Phi, TheLoop, PSE, ID) &&
  516. ID.getKind() == InductionDescriptor::IK_IntInduction) {
  517. addInductionPhi(&Phi, ID, AllowedExit);
  518. return true;
  519. } else {
  520. // Bail out for any Phi in the outer loop header that is not a supported
  521. // induction.
  522. LLVM_DEBUG(
  523. dbgs()
  524. << "LV: Found unsupported PHI for outer loop vectorization.\n");
  525. return false;
  526. }
  527. };
  528. if (llvm::all_of(Header->phis(), isSupportedPhi))
  529. return true;
  530. else
  531. return false;
  532. }
  533. /// Checks if a function is scalarizable according to the TLI, in
  534. /// the sense that it should be vectorized and then expanded in
  535. /// multiple scalar calls. This is represented in the
  536. /// TLI via mappings that do not specify a vector name, as in the
  537. /// following example:
  538. ///
  539. /// const VecDesc VecIntrinsics[] = {
  540. /// {"llvm.phx.abs.i32", "", 4}
  541. /// };
  542. static bool isTLIScalarize(const TargetLibraryInfo &TLI, const CallInst &CI) {
  543. const StringRef ScalarName = CI.getCalledFunction()->getName();
  544. bool Scalarize = TLI.isFunctionVectorizable(ScalarName);
  545. // Check that all known VFs are not associated to a vector
  546. // function, i.e. the vector name is emty.
  547. if (Scalarize) {
  548. ElementCount WidestFixedVF, WidestScalableVF;
  549. TLI.getWidestVF(ScalarName, WidestFixedVF, WidestScalableVF);
  550. for (ElementCount VF = ElementCount::getFixed(2);
  551. ElementCount::isKnownLE(VF, WidestFixedVF); VF *= 2)
  552. Scalarize &= !TLI.isFunctionVectorizable(ScalarName, VF);
  553. for (ElementCount VF = ElementCount::getScalable(1);
  554. ElementCount::isKnownLE(VF, WidestScalableVF); VF *= 2)
  555. Scalarize &= !TLI.isFunctionVectorizable(ScalarName, VF);
  556. assert((WidestScalableVF.isZero() || !Scalarize) &&
  557. "Caller may decide to scalarize a variant using a scalable VF");
  558. }
  559. return Scalarize;
  560. }
  561. bool LoopVectorizationLegality::canVectorizeInstrs() {
  562. BasicBlock *Header = TheLoop->getHeader();
  563. // For each block in the loop.
  564. for (BasicBlock *BB : TheLoop->blocks()) {
  565. // Scan the instructions in the block and look for hazards.
  566. for (Instruction &I : *BB) {
  567. if (auto *Phi = dyn_cast<PHINode>(&I)) {
  568. Type *PhiTy = Phi->getType();
  569. // Check that this PHI type is allowed.
  570. if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() &&
  571. !PhiTy->isPointerTy()) {
  572. reportVectorizationFailure("Found a non-int non-pointer PHI",
  573. "loop control flow is not understood by vectorizer",
  574. "CFGNotUnderstood", ORE, TheLoop);
  575. return false;
  576. }
  577. // If this PHINode is not in the header block, then we know that we
  578. // can convert it to select during if-conversion. No need to check if
  579. // the PHIs in this block are induction or reduction variables.
  580. if (BB != Header) {
  581. // Non-header phi nodes that have outside uses can be vectorized. Add
  582. // them to the list of allowed exits.
  583. // Unsafe cyclic dependencies with header phis are identified during
  584. // legalization for reduction, induction and first order
  585. // recurrences.
  586. AllowedExit.insert(&I);
  587. continue;
  588. }
  589. // We only allow if-converted PHIs with exactly two incoming values.
  590. if (Phi->getNumIncomingValues() != 2) {
  591. reportVectorizationFailure("Found an invalid PHI",
  592. "loop control flow is not understood by vectorizer",
  593. "CFGNotUnderstood", ORE, TheLoop, Phi);
  594. return false;
  595. }
  596. RecurrenceDescriptor RedDes;
  597. if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes, DB, AC,
  598. DT)) {
  599. Requirements->addExactFPMathInst(RedDes.getExactFPMathInst());
  600. AllowedExit.insert(RedDes.getLoopExitInstr());
  601. Reductions[Phi] = RedDes;
  602. continue;
  603. }
  604. // TODO: Instead of recording the AllowedExit, it would be good to record the
  605. // complementary set: NotAllowedExit. These include (but may not be
  606. // limited to):
  607. // 1. Reduction phis as they represent the one-before-last value, which
  608. // is not available when vectorized
  609. // 2. Induction phis and increment when SCEV predicates cannot be used
  610. // outside the loop - see addInductionPhi
  611. // 3. Non-Phis with outside uses when SCEV predicates cannot be used
  612. // outside the loop - see call to hasOutsideLoopUser in the non-phi
  613. // handling below
  614. // 4. FirstOrderRecurrence phis that can possibly be handled by
  615. // extraction.
  616. // By recording these, we can then reason about ways to vectorize each
  617. // of these NotAllowedExit.
  618. InductionDescriptor ID;
  619. if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) {
  620. addInductionPhi(Phi, ID, AllowedExit);
  621. Requirements->addExactFPMathInst(ID.getExactFPMathInst());
  622. continue;
  623. }
  624. if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop,
  625. SinkAfter, DT)) {
  626. AllowedExit.insert(Phi);
  627. FirstOrderRecurrences.insert(Phi);
  628. continue;
  629. }
  630. // As a last resort, coerce the PHI to a AddRec expression
  631. // and re-try classifying it a an induction PHI.
  632. if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) {
  633. addInductionPhi(Phi, ID, AllowedExit);
  634. continue;
  635. }
  636. reportVectorizationFailure("Found an unidentified PHI",
  637. "value that could not be identified as "
  638. "reduction is used outside the loop",
  639. "NonReductionValueUsedOutsideLoop", ORE, TheLoop, Phi);
  640. return false;
  641. } // end of PHI handling
  642. // We handle calls that:
  643. // * Are debug info intrinsics.
  644. // * Have a mapping to an IR intrinsic.
  645. // * Have a vector version available.
  646. auto *CI = dyn_cast<CallInst>(&I);
  647. if (CI && !getVectorIntrinsicIDForCall(CI, TLI) &&
  648. !isa<DbgInfoIntrinsic>(CI) &&
  649. !(CI->getCalledFunction() && TLI &&
  650. (!VFDatabase::getMappings(*CI).empty() ||
  651. isTLIScalarize(*TLI, *CI)))) {
  652. // If the call is a recognized math libary call, it is likely that
  653. // we can vectorize it given loosened floating-point constraints.
  654. LibFunc Func;
  655. bool IsMathLibCall =
  656. TLI && CI->getCalledFunction() &&
  657. CI->getType()->isFloatingPointTy() &&
  658. TLI->getLibFunc(CI->getCalledFunction()->getName(), Func) &&
  659. TLI->hasOptimizedCodeGen(Func);
  660. if (IsMathLibCall) {
  661. // TODO: Ideally, we should not use clang-specific language here,
  662. // but it's hard to provide meaningful yet generic advice.
  663. // Also, should this be guarded by allowExtraAnalysis() and/or be part
  664. // of the returned info from isFunctionVectorizable()?
  665. reportVectorizationFailure(
  666. "Found a non-intrinsic callsite",
  667. "library call cannot be vectorized. "
  668. "Try compiling with -fno-math-errno, -ffast-math, "
  669. "or similar flags",
  670. "CantVectorizeLibcall", ORE, TheLoop, CI);
  671. } else {
  672. reportVectorizationFailure("Found a non-intrinsic callsite",
  673. "call instruction cannot be vectorized",
  674. "CantVectorizeLibcall", ORE, TheLoop, CI);
  675. }
  676. return false;
  677. }
  678. // Some intrinsics have scalar arguments and should be same in order for
  679. // them to be vectorized (i.e. loop invariant).
  680. if (CI) {
  681. auto *SE = PSE.getSE();
  682. Intrinsic::ID IntrinID = getVectorIntrinsicIDForCall(CI, TLI);
  683. for (unsigned i = 0, e = CI->arg_size(); i != e; ++i)
  684. if (hasVectorInstrinsicScalarOpd(IntrinID, i)) {
  685. if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(i)), TheLoop)) {
  686. reportVectorizationFailure("Found unvectorizable intrinsic",
  687. "intrinsic instruction cannot be vectorized",
  688. "CantVectorizeIntrinsic", ORE, TheLoop, CI);
  689. return false;
  690. }
  691. }
  692. }
  693. // Check that the instruction return type is vectorizable.
  694. // Also, we can't vectorize extractelement instructions.
  695. if ((!VectorType::isValidElementType(I.getType()) &&
  696. !I.getType()->isVoidTy()) ||
  697. isa<ExtractElementInst>(I)) {
  698. reportVectorizationFailure("Found unvectorizable type",
  699. "instruction return type cannot be vectorized",
  700. "CantVectorizeInstructionReturnType", ORE, TheLoop, &I);
  701. return false;
  702. }
  703. // Check that the stored type is vectorizable.
  704. if (auto *ST = dyn_cast<StoreInst>(&I)) {
  705. Type *T = ST->getValueOperand()->getType();
  706. if (!VectorType::isValidElementType(T)) {
  707. reportVectorizationFailure("Store instruction cannot be vectorized",
  708. "store instruction cannot be vectorized",
  709. "CantVectorizeStore", ORE, TheLoop, ST);
  710. return false;
  711. }
  712. // For nontemporal stores, check that a nontemporal vector version is
  713. // supported on the target.
  714. if (ST->getMetadata(LLVMContext::MD_nontemporal)) {
  715. // Arbitrarily try a vector of 2 elements.
  716. auto *VecTy = FixedVectorType::get(T, /*NumElts=*/2);
  717. assert(VecTy && "did not find vectorized version of stored type");
  718. if (!TTI->isLegalNTStore(VecTy, ST->getAlign())) {
  719. reportVectorizationFailure(
  720. "nontemporal store instruction cannot be vectorized",
  721. "nontemporal store instruction cannot be vectorized",
  722. "CantVectorizeNontemporalStore", ORE, TheLoop, ST);
  723. return false;
  724. }
  725. }
  726. } else if (auto *LD = dyn_cast<LoadInst>(&I)) {
  727. if (LD->getMetadata(LLVMContext::MD_nontemporal)) {
  728. // For nontemporal loads, check that a nontemporal vector version is
  729. // supported on the target (arbitrarily try a vector of 2 elements).
  730. auto *VecTy = FixedVectorType::get(I.getType(), /*NumElts=*/2);
  731. assert(VecTy && "did not find vectorized version of load type");
  732. if (!TTI->isLegalNTLoad(VecTy, LD->getAlign())) {
  733. reportVectorizationFailure(
  734. "nontemporal load instruction cannot be vectorized",
  735. "nontemporal load instruction cannot be vectorized",
  736. "CantVectorizeNontemporalLoad", ORE, TheLoop, LD);
  737. return false;
  738. }
  739. }
  740. // FP instructions can allow unsafe algebra, thus vectorizable by
  741. // non-IEEE-754 compliant SIMD units.
  742. // This applies to floating-point math operations and calls, not memory
  743. // operations, shuffles, or casts, as they don't change precision or
  744. // semantics.
  745. } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) &&
  746. !I.isFast()) {
  747. LLVM_DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
  748. Hints->setPotentiallyUnsafe();
  749. }
  750. // Reduction instructions are allowed to have exit users.
  751. // All other instructions must not have external users.
  752. if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) {
  753. // We can safely vectorize loops where instructions within the loop are
  754. // used outside the loop only if the SCEV predicates within the loop is
  755. // same as outside the loop. Allowing the exit means reusing the SCEV
  756. // outside the loop.
  757. if (PSE.getUnionPredicate().isAlwaysTrue()) {
  758. AllowedExit.insert(&I);
  759. continue;
  760. }
  761. reportVectorizationFailure("Value cannot be used outside the loop",
  762. "value cannot be used outside the loop",
  763. "ValueUsedOutsideLoop", ORE, TheLoop, &I);
  764. return false;
  765. }
  766. } // next instr.
  767. }
  768. if (!PrimaryInduction) {
  769. if (Inductions.empty()) {
  770. reportVectorizationFailure("Did not find one integer induction var",
  771. "loop induction variable could not be identified",
  772. "NoInductionVariable", ORE, TheLoop);
  773. return false;
  774. } else if (!WidestIndTy) {
  775. reportVectorizationFailure("Did not find one integer induction var",
  776. "integer loop induction variable could not be identified",
  777. "NoIntegerInductionVariable", ORE, TheLoop);
  778. return false;
  779. } else {
  780. LLVM_DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
  781. }
  782. }
  783. // For first order recurrences, we use the previous value (incoming value from
  784. // the latch) to check if it dominates all users of the recurrence. Bail out
  785. // if we have to sink such an instruction for another recurrence, as the
  786. // dominance requirement may not hold after sinking.
  787. BasicBlock *LoopLatch = TheLoop->getLoopLatch();
  788. if (any_of(FirstOrderRecurrences, [LoopLatch, this](const PHINode *Phi) {
  789. Instruction *V =
  790. cast<Instruction>(Phi->getIncomingValueForBlock(LoopLatch));
  791. return SinkAfter.find(V) != SinkAfter.end();
  792. }))
  793. return false;
  794. // Now we know the widest induction type, check if our found induction
  795. // is the same size. If it's not, unset it here and InnerLoopVectorizer
  796. // will create another.
  797. if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType())
  798. PrimaryInduction = nullptr;
  799. return true;
  800. }
  801. bool LoopVectorizationLegality::canVectorizeMemory() {
  802. LAI = &(*GetLAA)(*TheLoop);
  803. const OptimizationRemarkAnalysis *LAR = LAI->getReport();
  804. if (LAR) {
  805. ORE->emit([&]() {
  806. return OptimizationRemarkAnalysis(Hints->vectorizeAnalysisPassName(),
  807. "loop not vectorized: ", *LAR);
  808. });
  809. }
  810. if (!LAI->canVectorizeMemory())
  811. return false;
  812. if (LAI->hasDependenceInvolvingLoopInvariantAddress()) {
  813. reportVectorizationFailure("Stores to a uniform address",
  814. "write to a loop invariant address could not be vectorized",
  815. "CantVectorizeStoreToLoopInvariantAddress", ORE, TheLoop);
  816. return false;
  817. }
  818. Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks());
  819. PSE.addPredicate(LAI->getPSE().getUnionPredicate());
  820. return true;
  821. }
  822. bool LoopVectorizationLegality::canVectorizeFPMath(
  823. bool EnableStrictReductions) {
  824. // First check if there is any ExactFP math or if we allow reassociations
  825. if (!Requirements->getExactFPInst() || Hints->allowReordering())
  826. return true;
  827. // If the above is false, we have ExactFPMath & do not allow reordering.
  828. // If the EnableStrictReductions flag is set, first check if we have any
  829. // Exact FP induction vars, which we cannot vectorize.
  830. if (!EnableStrictReductions ||
  831. any_of(getInductionVars(), [&](auto &Induction) -> bool {
  832. InductionDescriptor IndDesc = Induction.second;
  833. return IndDesc.getExactFPMathInst();
  834. }))
  835. return false;
  836. // We can now only vectorize if all reductions with Exact FP math also
  837. // have the isOrdered flag set, which indicates that we can move the
  838. // reduction operations in-loop.
  839. return (all_of(getReductionVars(), [&](auto &Reduction) -> bool {
  840. const RecurrenceDescriptor &RdxDesc = Reduction.second;
  841. return !RdxDesc.hasExactFPMath() || RdxDesc.isOrdered();
  842. }));
  843. }
  844. bool LoopVectorizationLegality::isInductionPhi(const Value *V) const {
  845. Value *In0 = const_cast<Value *>(V);
  846. PHINode *PN = dyn_cast_or_null<PHINode>(In0);
  847. if (!PN)
  848. return false;
  849. return Inductions.count(PN);
  850. }
  851. const InductionDescriptor *
  852. LoopVectorizationLegality::getIntOrFpInductionDescriptor(PHINode *Phi) const {
  853. if (!isInductionPhi(Phi))
  854. return nullptr;
  855. auto &ID = getInductionVars().find(Phi)->second;
  856. if (ID.getKind() == InductionDescriptor::IK_IntInduction ||
  857. ID.getKind() == InductionDescriptor::IK_FpInduction)
  858. return &ID;
  859. return nullptr;
  860. }
  861. bool LoopVectorizationLegality::isCastedInductionVariable(
  862. const Value *V) const {
  863. auto *Inst = dyn_cast<Instruction>(V);
  864. return (Inst && InductionCastsToIgnore.count(Inst));
  865. }
  866. bool LoopVectorizationLegality::isInductionVariable(const Value *V) const {
  867. return isInductionPhi(V) || isCastedInductionVariable(V);
  868. }
  869. bool LoopVectorizationLegality::isFirstOrderRecurrence(
  870. const PHINode *Phi) const {
  871. return FirstOrderRecurrences.count(Phi);
  872. }
  873. bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) const {
  874. return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
  875. }
  876. bool LoopVectorizationLegality::blockCanBePredicated(
  877. BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
  878. SmallPtrSetImpl<const Instruction *> &MaskedOp,
  879. SmallPtrSetImpl<Instruction *> &ConditionalAssumes) const {
  880. for (Instruction &I : *BB) {
  881. // Check that we don't have a constant expression that can trap as operand.
  882. for (Value *Operand : I.operands()) {
  883. if (auto *C = dyn_cast<Constant>(Operand))
  884. if (C->canTrap())
  885. return false;
  886. }
  887. // We can predicate blocks with calls to assume, as long as we drop them in
  888. // case we flatten the CFG via predication.
  889. if (match(&I, m_Intrinsic<Intrinsic::assume>())) {
  890. ConditionalAssumes.insert(&I);
  891. continue;
  892. }
  893. // Do not let llvm.experimental.noalias.scope.decl block the vectorization.
  894. // TODO: there might be cases that it should block the vectorization. Let's
  895. // ignore those for now.
  896. if (isa<NoAliasScopeDeclInst>(&I))
  897. continue;
  898. // We might be able to hoist the load.
  899. if (I.mayReadFromMemory()) {
  900. auto *LI = dyn_cast<LoadInst>(&I);
  901. if (!LI)
  902. return false;
  903. if (!SafePtrs.count(LI->getPointerOperand())) {
  904. MaskedOp.insert(LI);
  905. continue;
  906. }
  907. }
  908. if (I.mayWriteToMemory()) {
  909. auto *SI = dyn_cast<StoreInst>(&I);
  910. if (!SI)
  911. return false;
  912. // Predicated store requires some form of masking:
  913. // 1) masked store HW instruction,
  914. // 2) emulation via load-blend-store (only if safe and legal to do so,
  915. // be aware on the race conditions), or
  916. // 3) element-by-element predicate check and scalar store.
  917. MaskedOp.insert(SI);
  918. continue;
  919. }
  920. if (I.mayThrow())
  921. return false;
  922. }
  923. return true;
  924. }
  925. bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
  926. if (!EnableIfConversion) {
  927. reportVectorizationFailure("If-conversion is disabled",
  928. "if-conversion is disabled",
  929. "IfConversionDisabled",
  930. ORE, TheLoop);
  931. return false;
  932. }
  933. assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable");
  934. // A list of pointers which are known to be dereferenceable within scope of
  935. // the loop body for each iteration of the loop which executes. That is,
  936. // the memory pointed to can be dereferenced (with the access size implied by
  937. // the value's type) unconditionally within the loop header without
  938. // introducing a new fault.
  939. SmallPtrSet<Value *, 8> SafePointers;
  940. // Collect safe addresses.
  941. for (BasicBlock *BB : TheLoop->blocks()) {
  942. if (!blockNeedsPredication(BB)) {
  943. for (Instruction &I : *BB)
  944. if (auto *Ptr = getLoadStorePointerOperand(&I))
  945. SafePointers.insert(Ptr);
  946. continue;
  947. }
  948. // For a block which requires predication, a address may be safe to access
  949. // in the loop w/o predication if we can prove dereferenceability facts
  950. // sufficient to ensure it'll never fault within the loop. For the moment,
  951. // we restrict this to loads; stores are more complicated due to
  952. // concurrency restrictions.
  953. ScalarEvolution &SE = *PSE.getSE();
  954. for (Instruction &I : *BB) {
  955. LoadInst *LI = dyn_cast<LoadInst>(&I);
  956. if (LI && !LI->getType()->isVectorTy() && !mustSuppressSpeculation(*LI) &&
  957. isDereferenceableAndAlignedInLoop(LI, TheLoop, SE, *DT))
  958. SafePointers.insert(LI->getPointerOperand());
  959. }
  960. }
  961. // Collect the blocks that need predication.
  962. BasicBlock *Header = TheLoop->getHeader();
  963. for (BasicBlock *BB : TheLoop->blocks()) {
  964. // We don't support switch statements inside loops.
  965. if (!isa<BranchInst>(BB->getTerminator())) {
  966. reportVectorizationFailure("Loop contains a switch statement",
  967. "loop contains a switch statement",
  968. "LoopContainsSwitch", ORE, TheLoop,
  969. BB->getTerminator());
  970. return false;
  971. }
  972. // We must be able to predicate all blocks that need to be predicated.
  973. if (blockNeedsPredication(BB)) {
  974. if (!blockCanBePredicated(BB, SafePointers, MaskedOp,
  975. ConditionalAssumes)) {
  976. reportVectorizationFailure(
  977. "Control flow cannot be substituted for a select",
  978. "control flow cannot be substituted for a select",
  979. "NoCFGForSelect", ORE, TheLoop,
  980. BB->getTerminator());
  981. return false;
  982. }
  983. } else if (BB != Header && !canIfConvertPHINodes(BB)) {
  984. reportVectorizationFailure(
  985. "Control flow cannot be substituted for a select",
  986. "control flow cannot be substituted for a select",
  987. "NoCFGForSelect", ORE, TheLoop,
  988. BB->getTerminator());
  989. return false;
  990. }
  991. }
  992. // We can if-convert this loop.
  993. return true;
  994. }
  995. // Helper function to canVectorizeLoopNestCFG.
  996. bool LoopVectorizationLegality::canVectorizeLoopCFG(Loop *Lp,
  997. bool UseVPlanNativePath) {
  998. assert((UseVPlanNativePath || Lp->isInnermost()) &&
  999. "VPlan-native path is not enabled.");
  1000. // TODO: ORE should be improved to show more accurate information when an
  1001. // outer loop can't be vectorized because a nested loop is not understood or
  1002. // legal. Something like: "outer_loop_location: loop not vectorized:
  1003. // (inner_loop_location) loop control flow is not understood by vectorizer".
  1004. // Store the result and return it at the end instead of exiting early, in case
  1005. // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
  1006. bool Result = true;
  1007. bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
  1008. // We must have a loop in canonical form. Loops with indirectbr in them cannot
  1009. // be canonicalized.
  1010. if (!Lp->getLoopPreheader()) {
  1011. reportVectorizationFailure("Loop doesn't have a legal pre-header",
  1012. "loop control flow is not understood by vectorizer",
  1013. "CFGNotUnderstood", ORE, TheLoop);
  1014. if (DoExtraAnalysis)
  1015. Result = false;
  1016. else
  1017. return false;
  1018. }
  1019. // We must have a single backedge.
  1020. if (Lp->getNumBackEdges() != 1) {
  1021. reportVectorizationFailure("The loop must have a single backedge",
  1022. "loop control flow is not understood by vectorizer",
  1023. "CFGNotUnderstood", ORE, TheLoop);
  1024. if (DoExtraAnalysis)
  1025. Result = false;
  1026. else
  1027. return false;
  1028. }
  1029. return Result;
  1030. }
  1031. bool LoopVectorizationLegality::canVectorizeLoopNestCFG(
  1032. Loop *Lp, bool UseVPlanNativePath) {
  1033. // Store the result and return it at the end instead of exiting early, in case
  1034. // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
  1035. bool Result = true;
  1036. bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
  1037. if (!canVectorizeLoopCFG(Lp, UseVPlanNativePath)) {
  1038. if (DoExtraAnalysis)
  1039. Result = false;
  1040. else
  1041. return false;
  1042. }
  1043. // Recursively check whether the loop control flow of nested loops is
  1044. // understood.
  1045. for (Loop *SubLp : *Lp)
  1046. if (!canVectorizeLoopNestCFG(SubLp, UseVPlanNativePath)) {
  1047. if (DoExtraAnalysis)
  1048. Result = false;
  1049. else
  1050. return false;
  1051. }
  1052. return Result;
  1053. }
  1054. bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
  1055. // Store the result and return it at the end instead of exiting early, in case
  1056. // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
  1057. bool Result = true;
  1058. bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
  1059. // Check whether the loop-related control flow in the loop nest is expected by
  1060. // vectorizer.
  1061. if (!canVectorizeLoopNestCFG(TheLoop, UseVPlanNativePath)) {
  1062. if (DoExtraAnalysis)
  1063. Result = false;
  1064. else
  1065. return false;
  1066. }
  1067. // We need to have a loop header.
  1068. LLVM_DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName()
  1069. << '\n');
  1070. // Specific checks for outer loops. We skip the remaining legal checks at this
  1071. // point because they don't support outer loops.
  1072. if (!TheLoop->isInnermost()) {
  1073. assert(UseVPlanNativePath && "VPlan-native path is not enabled.");
  1074. if (!canVectorizeOuterLoop()) {
  1075. reportVectorizationFailure("Unsupported outer loop",
  1076. "unsupported outer loop",
  1077. "UnsupportedOuterLoop",
  1078. ORE, TheLoop);
  1079. // TODO: Implement DoExtraAnalysis when subsequent legal checks support
  1080. // outer loops.
  1081. return false;
  1082. }
  1083. LLVM_DEBUG(dbgs() << "LV: We can vectorize this outer loop!\n");
  1084. return Result;
  1085. }
  1086. assert(TheLoop->isInnermost() && "Inner loop expected.");
  1087. // Check if we can if-convert non-single-bb loops.
  1088. unsigned NumBlocks = TheLoop->getNumBlocks();
  1089. if (NumBlocks != 1 && !canVectorizeWithIfConvert()) {
  1090. LLVM_DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
  1091. if (DoExtraAnalysis)
  1092. Result = false;
  1093. else
  1094. return false;
  1095. }
  1096. // Check if we can vectorize the instructions and CFG in this loop.
  1097. if (!canVectorizeInstrs()) {
  1098. LLVM_DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
  1099. if (DoExtraAnalysis)
  1100. Result = false;
  1101. else
  1102. return false;
  1103. }
  1104. // Go over each instruction and look at memory deps.
  1105. if (!canVectorizeMemory()) {
  1106. LLVM_DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n");
  1107. if (DoExtraAnalysis)
  1108. Result = false;
  1109. else
  1110. return false;
  1111. }
  1112. LLVM_DEBUG(dbgs() << "LV: We can vectorize this loop"
  1113. << (LAI->getRuntimePointerChecking()->Need
  1114. ? " (with a runtime bound check)"
  1115. : "")
  1116. << "!\n");
  1117. unsigned SCEVThreshold = VectorizeSCEVCheckThreshold;
  1118. if (Hints->getForce() == LoopVectorizeHints::FK_Enabled)
  1119. SCEVThreshold = PragmaVectorizeSCEVCheckThreshold;
  1120. if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) {
  1121. reportVectorizationFailure("Too many SCEV checks needed",
  1122. "Too many SCEV assumptions need to be made and checked at runtime",
  1123. "TooManySCEVRunTimeChecks", ORE, TheLoop);
  1124. if (DoExtraAnalysis)
  1125. Result = false;
  1126. else
  1127. return false;
  1128. }
  1129. // Okay! We've done all the tests. If any have failed, return false. Otherwise
  1130. // we can vectorize, and at this point we don't have any other mem analysis
  1131. // which may limit our maximum vectorization factor, so just return true with
  1132. // no restrictions.
  1133. return Result;
  1134. }
  1135. bool LoopVectorizationLegality::prepareToFoldTailByMasking() {
  1136. LLVM_DEBUG(dbgs() << "LV: checking if tail can be folded by masking.\n");
  1137. SmallPtrSet<const Value *, 8> ReductionLiveOuts;
  1138. for (auto &Reduction : getReductionVars())
  1139. ReductionLiveOuts.insert(Reduction.second.getLoopExitInstr());
  1140. // TODO: handle non-reduction outside users when tail is folded by masking.
  1141. for (auto *AE : AllowedExit) {
  1142. // Check that all users of allowed exit values are inside the loop or
  1143. // are the live-out of a reduction.
  1144. if (ReductionLiveOuts.count(AE))
  1145. continue;
  1146. for (User *U : AE->users()) {
  1147. Instruction *UI = cast<Instruction>(U);
  1148. if (TheLoop->contains(UI))
  1149. continue;
  1150. LLVM_DEBUG(
  1151. dbgs()
  1152. << "LV: Cannot fold tail by masking, loop has an outside user for "
  1153. << *UI << "\n");
  1154. return false;
  1155. }
  1156. }
  1157. // The list of pointers that we can safely read and write to remains empty.
  1158. SmallPtrSet<Value *, 8> SafePointers;
  1159. SmallPtrSet<const Instruction *, 8> TmpMaskedOp;
  1160. SmallPtrSet<Instruction *, 8> TmpConditionalAssumes;
  1161. // Check and mark all blocks for predication, including those that ordinarily
  1162. // do not need predication such as the header block.
  1163. for (BasicBlock *BB : TheLoop->blocks()) {
  1164. if (!blockCanBePredicated(BB, SafePointers, TmpMaskedOp,
  1165. TmpConditionalAssumes)) {
  1166. LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking as requested.\n");
  1167. return false;
  1168. }
  1169. }
  1170. LLVM_DEBUG(dbgs() << "LV: can fold tail by masking.\n");
  1171. MaskedOp.insert(TmpMaskedOp.begin(), TmpMaskedOp.end());
  1172. ConditionalAssumes.insert(TmpConditionalAssumes.begin(),
  1173. TmpConditionalAssumes.end());
  1174. return true;
  1175. }
  1176. } // namespace llvm