InterleavedLoadCombinePass.cpp 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365
  1. //===- InterleavedLoadCombine.cpp - Combine Interleaved Loads ---*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // \file
  10. //
  11. // This file defines the interleaved-load-combine pass. The pass searches for
  12. // ShuffleVectorInstruction that execute interleaving loads. If a matching
  13. // pattern is found, it adds a combined load and further instructions in a
  14. // pattern that is detectable by InterleavedAccesPass. The old instructions are
  15. // left dead to be removed later. The pass is specifically designed to be
  16. // executed just before InterleavedAccesPass to find any left-over instances
  17. // that are not detected within former passes.
  18. //
  19. //===----------------------------------------------------------------------===//
  20. #include "llvm/ADT/Statistic.h"
  21. #include "llvm/Analysis/MemoryLocation.h"
  22. #include "llvm/Analysis/MemorySSA.h"
  23. #include "llvm/Analysis/MemorySSAUpdater.h"
  24. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  25. #include "llvm/Analysis/TargetTransformInfo.h"
  26. #include "llvm/CodeGen/Passes.h"
  27. #include "llvm/CodeGen/TargetLowering.h"
  28. #include "llvm/CodeGen/TargetPassConfig.h"
  29. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  30. #include "llvm/IR/DataLayout.h"
  31. #include "llvm/IR/Dominators.h"
  32. #include "llvm/IR/Function.h"
  33. #include "llvm/IR/Instructions.h"
  34. #include "llvm/IR/IRBuilder.h"
  35. #include "llvm/IR/LegacyPassManager.h"
  36. #include "llvm/IR/Module.h"
  37. #include "llvm/InitializePasses.h"
  38. #include "llvm/Pass.h"
  39. #include "llvm/Support/Debug.h"
  40. #include "llvm/Support/ErrorHandling.h"
  41. #include "llvm/Support/raw_ostream.h"
  42. #include "llvm/Target/TargetMachine.h"
  43. #include <algorithm>
  44. #include <cassert>
  45. #include <list>
  46. using namespace llvm;
  47. #define DEBUG_TYPE "interleaved-load-combine"
  48. namespace {
  49. /// Statistic counter
  50. STATISTIC(NumInterleavedLoadCombine, "Number of combined loads");
  51. /// Option to disable the pass
  52. static cl::opt<bool> DisableInterleavedLoadCombine(
  53. "disable-" DEBUG_TYPE, cl::init(false), cl::Hidden,
  54. cl::desc("Disable combining of interleaved loads"));
  55. struct VectorInfo;
  56. struct InterleavedLoadCombineImpl {
  57. public:
  58. InterleavedLoadCombineImpl(Function &F, DominatorTree &DT, MemorySSA &MSSA,
  59. TargetMachine &TM)
  60. : F(F), DT(DT), MSSA(MSSA),
  61. TLI(*TM.getSubtargetImpl(F)->getTargetLowering()),
  62. TTI(TM.getTargetTransformInfo(F)) {}
  63. /// Scan the function for interleaved load candidates and execute the
  64. /// replacement if applicable.
  65. bool run();
  66. private:
  67. /// Function this pass is working on
  68. Function &F;
  69. /// Dominator Tree Analysis
  70. DominatorTree &DT;
  71. /// Memory Alias Analyses
  72. MemorySSA &MSSA;
  73. /// Target Lowering Information
  74. const TargetLowering &TLI;
  75. /// Target Transform Information
  76. const TargetTransformInfo TTI;
  77. /// Find the instruction in sets LIs that dominates all others, return nullptr
  78. /// if there is none.
  79. LoadInst *findFirstLoad(const std::set<LoadInst *> &LIs);
  80. /// Replace interleaved load candidates. It does additional
  81. /// analyses if this makes sense. Returns true on success and false
  82. /// of nothing has been changed.
  83. bool combine(std::list<VectorInfo> &InterleavedLoad,
  84. OptimizationRemarkEmitter &ORE);
  85. /// Given a set of VectorInfo containing candidates for a given interleave
  86. /// factor, find a set that represents a 'factor' interleaved load.
  87. bool findPattern(std::list<VectorInfo> &Candidates,
  88. std::list<VectorInfo> &InterleavedLoad, unsigned Factor,
  89. const DataLayout &DL);
  90. }; // InterleavedLoadCombine
  91. /// First Order Polynomial on an n-Bit Integer Value
  92. ///
  93. /// Polynomial(Value) = Value * B + A + E*2^(n-e)
  94. ///
  95. /// A and B are the coefficients. E*2^(n-e) is an error within 'e' most
  96. /// significant bits. It is introduced if an exact computation cannot be proven
  97. /// (e.q. division by 2).
  98. ///
  99. /// As part of this optimization multiple loads will be combined. It necessary
  100. /// to prove that loads are within some relative offset to each other. This
  101. /// class is used to prove relative offsets of values loaded from memory.
  102. ///
  103. /// Representing an integer in this form is sound since addition in two's
  104. /// complement is associative (trivial) and multiplication distributes over the
  105. /// addition (see Proof(1) in Polynomial::mul). Further, both operations
  106. /// commute.
  107. //
  108. // Example:
  109. // declare @fn(i64 %IDX, <4 x float>* %PTR) {
  110. // %Pa1 = add i64 %IDX, 2
  111. // %Pa2 = lshr i64 %Pa1, 1
  112. // %Pa3 = getelementptr inbounds <4 x float>, <4 x float>* %PTR, i64 %Pa2
  113. // %Va = load <4 x float>, <4 x float>* %Pa3
  114. //
  115. // %Pb1 = add i64 %IDX, 4
  116. // %Pb2 = lshr i64 %Pb1, 1
  117. // %Pb3 = getelementptr inbounds <4 x float>, <4 x float>* %PTR, i64 %Pb2
  118. // %Vb = load <4 x float>, <4 x float>* %Pb3
  119. // ... }
  120. //
  121. // The goal is to prove that two loads load consecutive addresses.
  122. //
  123. // In this case the polynomials are constructed by the following
  124. // steps.
  125. //
  126. // The number tag #e specifies the error bits.
  127. //
  128. // Pa_0 = %IDX #0
  129. // Pa_1 = %IDX + 2 #0 | add 2
  130. // Pa_2 = %IDX/2 + 1 #1 | lshr 1
  131. // Pa_3 = %IDX/2 + 1 #1 | GEP, step signext to i64
  132. // Pa_4 = (%IDX/2)*16 + 16 #0 | GEP, multiply index by sizeof(4) for floats
  133. // Pa_5 = (%IDX/2)*16 + 16 #0 | GEP, add offset of leading components
  134. //
  135. // Pb_0 = %IDX #0
  136. // Pb_1 = %IDX + 4 #0 | add 2
  137. // Pb_2 = %IDX/2 + 2 #1 | lshr 1
  138. // Pb_3 = %IDX/2 + 2 #1 | GEP, step signext to i64
  139. // Pb_4 = (%IDX/2)*16 + 32 #0 | GEP, multiply index by sizeof(4) for floats
  140. // Pb_5 = (%IDX/2)*16 + 16 #0 | GEP, add offset of leading components
  141. //
  142. // Pb_5 - Pa_5 = 16 #0 | subtract to get the offset
  143. //
  144. // Remark: %PTR is not maintained within this class. So in this instance the
  145. // offset of 16 can only be assumed if the pointers are equal.
  146. //
  147. class Polynomial {
  148. /// Operations on B
  149. enum BOps {
  150. LShr,
  151. Mul,
  152. SExt,
  153. Trunc,
  154. };
  155. /// Number of Error Bits e
  156. unsigned ErrorMSBs;
  157. /// Value
  158. Value *V;
  159. /// Coefficient B
  160. SmallVector<std::pair<BOps, APInt>, 4> B;
  161. /// Coefficient A
  162. APInt A;
  163. public:
  164. Polynomial(Value *V) : ErrorMSBs((unsigned)-1), V(V) {
  165. IntegerType *Ty = dyn_cast<IntegerType>(V->getType());
  166. if (Ty) {
  167. ErrorMSBs = 0;
  168. this->V = V;
  169. A = APInt(Ty->getBitWidth(), 0);
  170. }
  171. }
  172. Polynomial(const APInt &A, unsigned ErrorMSBs = 0)
  173. : ErrorMSBs(ErrorMSBs), V(nullptr), A(A) {}
  174. Polynomial(unsigned BitWidth, uint64_t A, unsigned ErrorMSBs = 0)
  175. : ErrorMSBs(ErrorMSBs), V(nullptr), A(BitWidth, A) {}
  176. Polynomial() : ErrorMSBs((unsigned)-1), V(nullptr) {}
  177. /// Increment and clamp the number of undefined bits.
  178. void incErrorMSBs(unsigned amt) {
  179. if (ErrorMSBs == (unsigned)-1)
  180. return;
  181. ErrorMSBs += amt;
  182. if (ErrorMSBs > A.getBitWidth())
  183. ErrorMSBs = A.getBitWidth();
  184. }
  185. /// Decrement and clamp the number of undefined bits.
  186. void decErrorMSBs(unsigned amt) {
  187. if (ErrorMSBs == (unsigned)-1)
  188. return;
  189. if (ErrorMSBs > amt)
  190. ErrorMSBs -= amt;
  191. else
  192. ErrorMSBs = 0;
  193. }
  194. /// Apply an add on the polynomial
  195. Polynomial &add(const APInt &C) {
  196. // Note: Addition is associative in two's complement even when in case of
  197. // signed overflow.
  198. //
  199. // Error bits can only propagate into higher significant bits. As these are
  200. // already regarded as undefined, there is no change.
  201. //
  202. // Theorem: Adding a constant to a polynomial does not change the error
  203. // term.
  204. //
  205. // Proof:
  206. //
  207. // Since the addition is associative and commutes:
  208. //
  209. // (B + A + E*2^(n-e)) + C = B + (A + C) + E*2^(n-e)
  210. // [qed]
  211. if (C.getBitWidth() != A.getBitWidth()) {
  212. ErrorMSBs = (unsigned)-1;
  213. return *this;
  214. }
  215. A += C;
  216. return *this;
  217. }
  218. /// Apply a multiplication onto the polynomial.
  219. Polynomial &mul(const APInt &C) {
  220. // Note: Multiplication distributes over the addition
  221. //
  222. // Theorem: Multiplication distributes over the addition
  223. //
  224. // Proof(1):
  225. //
  226. // (B+A)*C =-
  227. // = (B + A) + (B + A) + .. {C Times}
  228. // addition is associative and commutes, hence
  229. // = B + B + .. {C Times} .. + A + A + .. {C times}
  230. // = B*C + A*C
  231. // (see (function add) for signed values and overflows)
  232. // [qed]
  233. //
  234. // Theorem: If C has c trailing zeros, errors bits in A or B are shifted out
  235. // to the left.
  236. //
  237. // Proof(2):
  238. //
  239. // Let B' and A' be the n-Bit inputs with some unknown errors EA,
  240. // EB at e leading bits. B' and A' can be written down as:
  241. //
  242. // B' = B + 2^(n-e)*EB
  243. // A' = A + 2^(n-e)*EA
  244. //
  245. // Let C' be an input with c trailing zero bits. C' can be written as
  246. //
  247. // C' = C*2^c
  248. //
  249. // Therefore we can compute the result by using distributivity and
  250. // commutativity.
  251. //
  252. // (B'*C' + A'*C') = [B + 2^(n-e)*EB] * C' + [A + 2^(n-e)*EA] * C' =
  253. // = [B + 2^(n-e)*EB + A + 2^(n-e)*EA] * C' =
  254. // = (B'+A') * C' =
  255. // = [B + 2^(n-e)*EB + A + 2^(n-e)*EA] * C' =
  256. // = [B + A + 2^(n-e)*EB + 2^(n-e)*EA] * C' =
  257. // = (B + A) * C' + [2^(n-e)*EB + 2^(n-e)*EA)] * C' =
  258. // = (B + A) * C' + [2^(n-e)*EB + 2^(n-e)*EA)] * C*2^c =
  259. // = (B + A) * C' + C*(EB + EA)*2^(n-e)*2^c =
  260. //
  261. // Let EC be the final error with EC = C*(EB + EA)
  262. //
  263. // = (B + A)*C' + EC*2^(n-e)*2^c =
  264. // = (B + A)*C' + EC*2^(n-(e-c))
  265. //
  266. // Since EC is multiplied by 2^(n-(e-c)) the resulting error contains c
  267. // less error bits than the input. c bits are shifted out to the left.
  268. // [qed]
  269. if (C.getBitWidth() != A.getBitWidth()) {
  270. ErrorMSBs = (unsigned)-1;
  271. return *this;
  272. }
  273. // Multiplying by one is a no-op.
  274. if (C.isOne()) {
  275. return *this;
  276. }
  277. // Multiplying by zero removes the coefficient B and defines all bits.
  278. if (C.isZero()) {
  279. ErrorMSBs = 0;
  280. deleteB();
  281. }
  282. // See Proof(2): Trailing zero bits indicate a left shift. This removes
  283. // leading bits from the result even if they are undefined.
  284. decErrorMSBs(C.countTrailingZeros());
  285. A *= C;
  286. pushBOperation(Mul, C);
  287. return *this;
  288. }
  289. /// Apply a logical shift right on the polynomial
  290. Polynomial &lshr(const APInt &C) {
  291. // Theorem(1): (B + A + E*2^(n-e)) >> 1 => (B >> 1) + (A >> 1) + E'*2^(n-e')
  292. // where
  293. // e' = e + 1,
  294. // E is a e-bit number,
  295. // E' is a e'-bit number,
  296. // holds under the following precondition:
  297. // pre(1): A % 2 = 0
  298. // pre(2): e < n, (see Theorem(2) for the trivial case with e=n)
  299. // where >> expresses a logical shift to the right, with adding zeros.
  300. //
  301. // We need to show that for every, E there is a E'
  302. //
  303. // B = b_h * 2^(n-1) + b_m * 2 + b_l
  304. // A = a_h * 2^(n-1) + a_m * 2 (pre(1))
  305. //
  306. // where a_h, b_h, b_l are single bits, and a_m, b_m are (n-2) bit numbers
  307. //
  308. // Let X = (B + A + E*2^(n-e)) >> 1
  309. // Let Y = (B >> 1) + (A >> 1) + E*2^(n-e) >> 1
  310. //
  311. // X = [B + A + E*2^(n-e)] >> 1 =
  312. // = [ b_h * 2^(n-1) + b_m * 2 + b_l +
  313. // + a_h * 2^(n-1) + a_m * 2 +
  314. // + E * 2^(n-e) ] >> 1 =
  315. //
  316. // The sum is built by putting the overflow of [a_m + b+n] into the term
  317. // 2^(n-1). As there are no more bits beyond 2^(n-1) the overflow within
  318. // this bit is discarded. This is expressed by % 2.
  319. //
  320. // The bit in position 0 cannot overflow into the term (b_m + a_m).
  321. //
  322. // = [ ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-1) +
  323. // + ((b_m + a_m) % 2^(n-2)) * 2 +
  324. // + b_l + E * 2^(n-e) ] >> 1 =
  325. //
  326. // The shift is computed by dividing the terms by 2 and by cutting off
  327. // b_l.
  328. //
  329. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  330. // + ((b_m + a_m) % 2^(n-2)) +
  331. // + E * 2^(n-(e+1)) =
  332. //
  333. // by the definition in the Theorem e+1 = e'
  334. //
  335. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  336. // + ((b_m + a_m) % 2^(n-2)) +
  337. // + E * 2^(n-e') =
  338. //
  339. // Compute Y by applying distributivity first
  340. //
  341. // Y = (B >> 1) + (A >> 1) + E*2^(n-e') =
  342. // = (b_h * 2^(n-1) + b_m * 2 + b_l) >> 1 +
  343. // + (a_h * 2^(n-1) + a_m * 2) >> 1 +
  344. // + E * 2^(n-e) >> 1 =
  345. //
  346. // Again, the shift is computed by dividing the terms by 2 and by cutting
  347. // off b_l.
  348. //
  349. // = b_h * 2^(n-2) + b_m +
  350. // + a_h * 2^(n-2) + a_m +
  351. // + E * 2^(n-(e+1)) =
  352. //
  353. // Again, the sum is built by putting the overflow of [a_m + b+n] into
  354. // the term 2^(n-1). But this time there is room for a second bit in the
  355. // term 2^(n-2) we add this bit to a new term and denote it o_h in a
  356. // second step.
  357. //
  358. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] >> 1) * 2^(n-1) +
  359. // + ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  360. // + ((b_m + a_m) % 2^(n-2)) +
  361. // + E * 2^(n-(e+1)) =
  362. //
  363. // Let o_h = [b_h + a_h + (b_m + a_m) >> (n-2)] >> 1
  364. // Further replace e+1 by e'.
  365. //
  366. // = o_h * 2^(n-1) +
  367. // + ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  368. // + ((b_m + a_m) % 2^(n-2)) +
  369. // + E * 2^(n-e') =
  370. //
  371. // Move o_h into the error term and construct E'. To ensure that there is
  372. // no 2^x with negative x, this step requires pre(2) (e < n).
  373. //
  374. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  375. // + ((b_m + a_m) % 2^(n-2)) +
  376. // + o_h * 2^(e'-1) * 2^(n-e') + | pre(2), move 2^(e'-1)
  377. // | out of the old exponent
  378. // + E * 2^(n-e') =
  379. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  380. // + ((b_m + a_m) % 2^(n-2)) +
  381. // + [o_h * 2^(e'-1) + E] * 2^(n-e') + | move 2^(e'-1) out of
  382. // | the old exponent
  383. //
  384. // Let E' = o_h * 2^(e'-1) + E
  385. //
  386. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  387. // + ((b_m + a_m) % 2^(n-2)) +
  388. // + E' * 2^(n-e')
  389. //
  390. // Because X and Y are distinct only in there error terms and E' can be
  391. // constructed as shown the theorem holds.
  392. // [qed]
  393. //
  394. // For completeness in case of the case e=n it is also required to show that
  395. // distributivity can be applied.
  396. //
  397. // In this case Theorem(1) transforms to (the pre-condition on A can also be
  398. // dropped)
  399. //
  400. // Theorem(2): (B + A + E) >> 1 => (B >> 1) + (A >> 1) + E'
  401. // where
  402. // A, B, E, E' are two's complement numbers with the same bit
  403. // width
  404. //
  405. // Let A + B + E = X
  406. // Let (B >> 1) + (A >> 1) = Y
  407. //
  408. // Therefore we need to show that for every X and Y there is an E' which
  409. // makes the equation
  410. //
  411. // X = Y + E'
  412. //
  413. // hold. This is trivially the case for E' = X - Y.
  414. //
  415. // [qed]
  416. //
  417. // Remark: Distributing lshr with and arbitrary number n can be expressed as
  418. // ((((B + A) lshr 1) lshr 1) ... ) {n times}.
  419. // This construction induces n additional error bits at the left.
  420. if (C.getBitWidth() != A.getBitWidth()) {
  421. ErrorMSBs = (unsigned)-1;
  422. return *this;
  423. }
  424. if (C.isZero())
  425. return *this;
  426. // Test if the result will be zero
  427. unsigned shiftAmt = C.getZExtValue();
  428. if (shiftAmt >= C.getBitWidth())
  429. return mul(APInt(C.getBitWidth(), 0));
  430. // The proof that shiftAmt LSBs are zero for at least one summand is only
  431. // possible for the constant number.
  432. //
  433. // If this can be proven add shiftAmt to the error counter
  434. // `ErrorMSBs`. Otherwise set all bits as undefined.
  435. if (A.countTrailingZeros() < shiftAmt)
  436. ErrorMSBs = A.getBitWidth();
  437. else
  438. incErrorMSBs(shiftAmt);
  439. // Apply the operation.
  440. pushBOperation(LShr, C);
  441. A = A.lshr(shiftAmt);
  442. return *this;
  443. }
  444. /// Apply a sign-extend or truncate operation on the polynomial.
  445. Polynomial &sextOrTrunc(unsigned n) {
  446. if (n < A.getBitWidth()) {
  447. // Truncate: Clearly undefined Bits on the MSB side are removed
  448. // if there are any.
  449. decErrorMSBs(A.getBitWidth() - n);
  450. A = A.trunc(n);
  451. pushBOperation(Trunc, APInt(sizeof(n) * 8, n));
  452. }
  453. if (n > A.getBitWidth()) {
  454. // Extend: Clearly extending first and adding later is different
  455. // to adding first and extending later in all extended bits.
  456. incErrorMSBs(n - A.getBitWidth());
  457. A = A.sext(n);
  458. pushBOperation(SExt, APInt(sizeof(n) * 8, n));
  459. }
  460. return *this;
  461. }
  462. /// Test if there is a coefficient B.
  463. bool isFirstOrder() const { return V != nullptr; }
  464. /// Test coefficient B of two Polynomials are equal.
  465. bool isCompatibleTo(const Polynomial &o) const {
  466. // The polynomial use different bit width.
  467. if (A.getBitWidth() != o.A.getBitWidth())
  468. return false;
  469. // If neither Polynomial has the Coefficient B.
  470. if (!isFirstOrder() && !o.isFirstOrder())
  471. return true;
  472. // The index variable is different.
  473. if (V != o.V)
  474. return false;
  475. // Check the operations.
  476. if (B.size() != o.B.size())
  477. return false;
  478. auto ob = o.B.begin();
  479. for (auto &b : B) {
  480. if (b != *ob)
  481. return false;
  482. ob++;
  483. }
  484. return true;
  485. }
  486. /// Subtract two polynomials, return an undefined polynomial if
  487. /// subtraction is not possible.
  488. Polynomial operator-(const Polynomial &o) const {
  489. // Return an undefined polynomial if incompatible.
  490. if (!isCompatibleTo(o))
  491. return Polynomial();
  492. // If the polynomials are compatible (meaning they have the same
  493. // coefficient on B), B is eliminated. Thus a polynomial solely
  494. // containing A is returned
  495. return Polynomial(A - o.A, std::max(ErrorMSBs, o.ErrorMSBs));
  496. }
  497. /// Subtract a constant from a polynomial,
  498. Polynomial operator-(uint64_t C) const {
  499. Polynomial Result(*this);
  500. Result.A -= C;
  501. return Result;
  502. }
  503. /// Add a constant to a polynomial,
  504. Polynomial operator+(uint64_t C) const {
  505. Polynomial Result(*this);
  506. Result.A += C;
  507. return Result;
  508. }
  509. /// Returns true if it can be proven that two Polynomials are equal.
  510. bool isProvenEqualTo(const Polynomial &o) {
  511. // Subtract both polynomials and test if it is fully defined and zero.
  512. Polynomial r = *this - o;
  513. return (r.ErrorMSBs == 0) && (!r.isFirstOrder()) && (r.A.isZero());
  514. }
  515. /// Print the polynomial into a stream.
  516. void print(raw_ostream &OS) const {
  517. OS << "[{#ErrBits:" << ErrorMSBs << "} ";
  518. if (V) {
  519. for (auto b : B)
  520. OS << "(";
  521. OS << "(" << *V << ") ";
  522. for (auto b : B) {
  523. switch (b.first) {
  524. case LShr:
  525. OS << "LShr ";
  526. break;
  527. case Mul:
  528. OS << "Mul ";
  529. break;
  530. case SExt:
  531. OS << "SExt ";
  532. break;
  533. case Trunc:
  534. OS << "Trunc ";
  535. break;
  536. }
  537. OS << b.second << ") ";
  538. }
  539. }
  540. OS << "+ " << A << "]";
  541. }
  542. private:
  543. void deleteB() {
  544. V = nullptr;
  545. B.clear();
  546. }
  547. void pushBOperation(const BOps Op, const APInt &C) {
  548. if (isFirstOrder()) {
  549. B.push_back(std::make_pair(Op, C));
  550. return;
  551. }
  552. }
  553. };
  554. #ifndef NDEBUG
  555. static raw_ostream &operator<<(raw_ostream &OS, const Polynomial &S) {
  556. S.print(OS);
  557. return OS;
  558. }
  559. #endif
  560. /// VectorInfo stores abstract the following information for each vector
  561. /// element:
  562. ///
  563. /// 1) The the memory address loaded into the element as Polynomial
  564. /// 2) a set of load instruction necessary to construct the vector,
  565. /// 3) a set of all other instructions that are necessary to create the vector and
  566. /// 4) a pointer value that can be used as relative base for all elements.
  567. struct VectorInfo {
  568. private:
  569. VectorInfo(const VectorInfo &c) : VTy(c.VTy) {
  570. llvm_unreachable(
  571. "Copying VectorInfo is neither implemented nor necessary,");
  572. }
  573. public:
  574. /// Information of a Vector Element
  575. struct ElementInfo {
  576. /// Offset Polynomial.
  577. Polynomial Ofs;
  578. /// The Load Instruction used to Load the entry. LI is null if the pointer
  579. /// of the load instruction does not point on to the entry
  580. LoadInst *LI;
  581. ElementInfo(Polynomial Offset = Polynomial(), LoadInst *LI = nullptr)
  582. : Ofs(Offset), LI(LI) {}
  583. };
  584. /// Basic-block the load instructions are within
  585. BasicBlock *BB = nullptr;
  586. /// Pointer value of all participation load instructions
  587. Value *PV = nullptr;
  588. /// Participating load instructions
  589. std::set<LoadInst *> LIs;
  590. /// Participating instructions
  591. std::set<Instruction *> Is;
  592. /// Final shuffle-vector instruction
  593. ShuffleVectorInst *SVI = nullptr;
  594. /// Information of the offset for each vector element
  595. ElementInfo *EI;
  596. /// Vector Type
  597. FixedVectorType *const VTy;
  598. VectorInfo(FixedVectorType *VTy) : VTy(VTy) {
  599. EI = new ElementInfo[VTy->getNumElements()];
  600. }
  601. virtual ~VectorInfo() { delete[] EI; }
  602. unsigned getDimension() const { return VTy->getNumElements(); }
  603. /// Test if the VectorInfo can be part of an interleaved load with the
  604. /// specified factor.
  605. ///
  606. /// \param Factor of the interleave
  607. /// \param DL Targets Datalayout
  608. ///
  609. /// \returns true if this is possible and false if not
  610. bool isInterleaved(unsigned Factor, const DataLayout &DL) const {
  611. unsigned Size = DL.getTypeAllocSize(VTy->getElementType());
  612. for (unsigned i = 1; i < getDimension(); i++) {
  613. if (!EI[i].Ofs.isProvenEqualTo(EI[0].Ofs + i * Factor * Size)) {
  614. return false;
  615. }
  616. }
  617. return true;
  618. }
  619. /// Recursively computes the vector information stored in V.
  620. ///
  621. /// This function delegates the work to specialized implementations
  622. ///
  623. /// \param V Value to operate on
  624. /// \param Result Result of the computation
  625. ///
  626. /// \returns false if no sensible information can be gathered.
  627. static bool compute(Value *V, VectorInfo &Result, const DataLayout &DL) {
  628. ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
  629. if (SVI)
  630. return computeFromSVI(SVI, Result, DL);
  631. LoadInst *LI = dyn_cast<LoadInst>(V);
  632. if (LI)
  633. return computeFromLI(LI, Result, DL);
  634. BitCastInst *BCI = dyn_cast<BitCastInst>(V);
  635. if (BCI)
  636. return computeFromBCI(BCI, Result, DL);
  637. return false;
  638. }
  639. /// BitCastInst specialization to compute the vector information.
  640. ///
  641. /// \param BCI BitCastInst to operate on
  642. /// \param Result Result of the computation
  643. ///
  644. /// \returns false if no sensible information can be gathered.
  645. static bool computeFromBCI(BitCastInst *BCI, VectorInfo &Result,
  646. const DataLayout &DL) {
  647. Instruction *Op = dyn_cast<Instruction>(BCI->getOperand(0));
  648. if (!Op)
  649. return false;
  650. FixedVectorType *VTy = dyn_cast<FixedVectorType>(Op->getType());
  651. if (!VTy)
  652. return false;
  653. // We can only cast from large to smaller vectors
  654. if (Result.VTy->getNumElements() % VTy->getNumElements())
  655. return false;
  656. unsigned Factor = Result.VTy->getNumElements() / VTy->getNumElements();
  657. unsigned NewSize = DL.getTypeAllocSize(Result.VTy->getElementType());
  658. unsigned OldSize = DL.getTypeAllocSize(VTy->getElementType());
  659. if (NewSize * Factor != OldSize)
  660. return false;
  661. VectorInfo Old(VTy);
  662. if (!compute(Op, Old, DL))
  663. return false;
  664. for (unsigned i = 0; i < Result.VTy->getNumElements(); i += Factor) {
  665. for (unsigned j = 0; j < Factor; j++) {
  666. Result.EI[i + j] =
  667. ElementInfo(Old.EI[i / Factor].Ofs + j * NewSize,
  668. j == 0 ? Old.EI[i / Factor].LI : nullptr);
  669. }
  670. }
  671. Result.BB = Old.BB;
  672. Result.PV = Old.PV;
  673. Result.LIs.insert(Old.LIs.begin(), Old.LIs.end());
  674. Result.Is.insert(Old.Is.begin(), Old.Is.end());
  675. Result.Is.insert(BCI);
  676. Result.SVI = nullptr;
  677. return true;
  678. }
  679. /// ShuffleVectorInst specialization to compute vector information.
  680. ///
  681. /// \param SVI ShuffleVectorInst to operate on
  682. /// \param Result Result of the computation
  683. ///
  684. /// Compute the left and the right side vector information and merge them by
  685. /// applying the shuffle operation. This function also ensures that the left
  686. /// and right side have compatible loads. This means that all loads are with
  687. /// in the same basic block and are based on the same pointer.
  688. ///
  689. /// \returns false if no sensible information can be gathered.
  690. static bool computeFromSVI(ShuffleVectorInst *SVI, VectorInfo &Result,
  691. const DataLayout &DL) {
  692. FixedVectorType *ArgTy =
  693. cast<FixedVectorType>(SVI->getOperand(0)->getType());
  694. // Compute the left hand vector information.
  695. VectorInfo LHS(ArgTy);
  696. if (!compute(SVI->getOperand(0), LHS, DL))
  697. LHS.BB = nullptr;
  698. // Compute the right hand vector information.
  699. VectorInfo RHS(ArgTy);
  700. if (!compute(SVI->getOperand(1), RHS, DL))
  701. RHS.BB = nullptr;
  702. // Neither operand produced sensible results?
  703. if (!LHS.BB && !RHS.BB)
  704. return false;
  705. // Only RHS produced sensible results?
  706. else if (!LHS.BB) {
  707. Result.BB = RHS.BB;
  708. Result.PV = RHS.PV;
  709. }
  710. // Only LHS produced sensible results?
  711. else if (!RHS.BB) {
  712. Result.BB = LHS.BB;
  713. Result.PV = LHS.PV;
  714. }
  715. // Both operands produced sensible results?
  716. else if ((LHS.BB == RHS.BB) && (LHS.PV == RHS.PV)) {
  717. Result.BB = LHS.BB;
  718. Result.PV = LHS.PV;
  719. }
  720. // Both operands produced sensible results but they are incompatible.
  721. else {
  722. return false;
  723. }
  724. // Merge and apply the operation on the offset information.
  725. if (LHS.BB) {
  726. Result.LIs.insert(LHS.LIs.begin(), LHS.LIs.end());
  727. Result.Is.insert(LHS.Is.begin(), LHS.Is.end());
  728. }
  729. if (RHS.BB) {
  730. Result.LIs.insert(RHS.LIs.begin(), RHS.LIs.end());
  731. Result.Is.insert(RHS.Is.begin(), RHS.Is.end());
  732. }
  733. Result.Is.insert(SVI);
  734. Result.SVI = SVI;
  735. int j = 0;
  736. for (int i : SVI->getShuffleMask()) {
  737. assert((i < 2 * (signed)ArgTy->getNumElements()) &&
  738. "Invalid ShuffleVectorInst (index out of bounds)");
  739. if (i < 0)
  740. Result.EI[j] = ElementInfo();
  741. else if (i < (signed)ArgTy->getNumElements()) {
  742. if (LHS.BB)
  743. Result.EI[j] = LHS.EI[i];
  744. else
  745. Result.EI[j] = ElementInfo();
  746. } else {
  747. if (RHS.BB)
  748. Result.EI[j] = RHS.EI[i - ArgTy->getNumElements()];
  749. else
  750. Result.EI[j] = ElementInfo();
  751. }
  752. j++;
  753. }
  754. return true;
  755. }
  756. /// LoadInst specialization to compute vector information.
  757. ///
  758. /// This function also acts as abort condition to the recursion.
  759. ///
  760. /// \param LI LoadInst to operate on
  761. /// \param Result Result of the computation
  762. ///
  763. /// \returns false if no sensible information can be gathered.
  764. static bool computeFromLI(LoadInst *LI, VectorInfo &Result,
  765. const DataLayout &DL) {
  766. Value *BasePtr;
  767. Polynomial Offset;
  768. if (LI->isVolatile())
  769. return false;
  770. if (LI->isAtomic())
  771. return false;
  772. // Get the base polynomial
  773. computePolynomialFromPointer(*LI->getPointerOperand(), Offset, BasePtr, DL);
  774. Result.BB = LI->getParent();
  775. Result.PV = BasePtr;
  776. Result.LIs.insert(LI);
  777. Result.Is.insert(LI);
  778. for (unsigned i = 0; i < Result.getDimension(); i++) {
  779. Value *Idx[2] = {
  780. ConstantInt::get(Type::getInt32Ty(LI->getContext()), 0),
  781. ConstantInt::get(Type::getInt32Ty(LI->getContext()), i),
  782. };
  783. int64_t Ofs = DL.getIndexedOffsetInType(Result.VTy, makeArrayRef(Idx, 2));
  784. Result.EI[i] = ElementInfo(Offset + Ofs, i == 0 ? LI : nullptr);
  785. }
  786. return true;
  787. }
  788. /// Recursively compute polynomial of a value.
  789. ///
  790. /// \param BO Input binary operation
  791. /// \param Result Result polynomial
  792. static void computePolynomialBinOp(BinaryOperator &BO, Polynomial &Result) {
  793. Value *LHS = BO.getOperand(0);
  794. Value *RHS = BO.getOperand(1);
  795. // Find the RHS Constant if any
  796. ConstantInt *C = dyn_cast<ConstantInt>(RHS);
  797. if ((!C) && BO.isCommutative()) {
  798. C = dyn_cast<ConstantInt>(LHS);
  799. if (C)
  800. std::swap(LHS, RHS);
  801. }
  802. switch (BO.getOpcode()) {
  803. case Instruction::Add:
  804. if (!C)
  805. break;
  806. computePolynomial(*LHS, Result);
  807. Result.add(C->getValue());
  808. return;
  809. case Instruction::LShr:
  810. if (!C)
  811. break;
  812. computePolynomial(*LHS, Result);
  813. Result.lshr(C->getValue());
  814. return;
  815. default:
  816. break;
  817. }
  818. Result = Polynomial(&BO);
  819. }
  820. /// Recursively compute polynomial of a value
  821. ///
  822. /// \param V input value
  823. /// \param Result result polynomial
  824. static void computePolynomial(Value &V, Polynomial &Result) {
  825. if (auto *BO = dyn_cast<BinaryOperator>(&V))
  826. computePolynomialBinOp(*BO, Result);
  827. else
  828. Result = Polynomial(&V);
  829. }
  830. /// Compute the Polynomial representation of a Pointer type.
  831. ///
  832. /// \param Ptr input pointer value
  833. /// \param Result result polynomial
  834. /// \param BasePtr pointer the polynomial is based on
  835. /// \param DL Datalayout of the target machine
  836. static void computePolynomialFromPointer(Value &Ptr, Polynomial &Result,
  837. Value *&BasePtr,
  838. const DataLayout &DL) {
  839. // Not a pointer type? Return an undefined polynomial
  840. PointerType *PtrTy = dyn_cast<PointerType>(Ptr.getType());
  841. if (!PtrTy) {
  842. Result = Polynomial();
  843. BasePtr = nullptr;
  844. return;
  845. }
  846. unsigned PointerBits =
  847. DL.getIndexSizeInBits(PtrTy->getPointerAddressSpace());
  848. /// Skip pointer casts. Return Zero polynomial otherwise
  849. if (isa<CastInst>(&Ptr)) {
  850. CastInst &CI = *cast<CastInst>(&Ptr);
  851. switch (CI.getOpcode()) {
  852. case Instruction::BitCast:
  853. computePolynomialFromPointer(*CI.getOperand(0), Result, BasePtr, DL);
  854. break;
  855. default:
  856. BasePtr = &Ptr;
  857. Polynomial(PointerBits, 0);
  858. break;
  859. }
  860. }
  861. /// Resolve GetElementPtrInst.
  862. else if (isa<GetElementPtrInst>(&Ptr)) {
  863. GetElementPtrInst &GEP = *cast<GetElementPtrInst>(&Ptr);
  864. APInt BaseOffset(PointerBits, 0);
  865. // Check if we can compute the Offset with accumulateConstantOffset
  866. if (GEP.accumulateConstantOffset(DL, BaseOffset)) {
  867. Result = Polynomial(BaseOffset);
  868. BasePtr = GEP.getPointerOperand();
  869. return;
  870. } else {
  871. // Otherwise we allow that the last index operand of the GEP is
  872. // non-constant.
  873. unsigned idxOperand, e;
  874. SmallVector<Value *, 4> Indices;
  875. for (idxOperand = 1, e = GEP.getNumOperands(); idxOperand < e;
  876. idxOperand++) {
  877. ConstantInt *IDX = dyn_cast<ConstantInt>(GEP.getOperand(idxOperand));
  878. if (!IDX)
  879. break;
  880. Indices.push_back(IDX);
  881. }
  882. // It must also be the last operand.
  883. if (idxOperand + 1 != e) {
  884. Result = Polynomial();
  885. BasePtr = nullptr;
  886. return;
  887. }
  888. // Compute the polynomial of the index operand.
  889. computePolynomial(*GEP.getOperand(idxOperand), Result);
  890. // Compute base offset from zero based index, excluding the last
  891. // variable operand.
  892. BaseOffset =
  893. DL.getIndexedOffsetInType(GEP.getSourceElementType(), Indices);
  894. // Apply the operations of GEP to the polynomial.
  895. unsigned ResultSize = DL.getTypeAllocSize(GEP.getResultElementType());
  896. Result.sextOrTrunc(PointerBits);
  897. Result.mul(APInt(PointerBits, ResultSize));
  898. Result.add(BaseOffset);
  899. BasePtr = GEP.getPointerOperand();
  900. }
  901. }
  902. // All other instructions are handled by using the value as base pointer and
  903. // a zero polynomial.
  904. else {
  905. BasePtr = &Ptr;
  906. Polynomial(DL.getIndexSizeInBits(PtrTy->getPointerAddressSpace()), 0);
  907. }
  908. }
  909. #ifndef NDEBUG
  910. void print(raw_ostream &OS) const {
  911. if (PV)
  912. OS << *PV;
  913. else
  914. OS << "(none)";
  915. OS << " + ";
  916. for (unsigned i = 0; i < getDimension(); i++)
  917. OS << ((i == 0) ? "[" : ", ") << EI[i].Ofs;
  918. OS << "]";
  919. }
  920. #endif
  921. };
  922. } // anonymous namespace
  923. bool InterleavedLoadCombineImpl::findPattern(
  924. std::list<VectorInfo> &Candidates, std::list<VectorInfo> &InterleavedLoad,
  925. unsigned Factor, const DataLayout &DL) {
  926. for (auto C0 = Candidates.begin(), E0 = Candidates.end(); C0 != E0; ++C0) {
  927. unsigned i;
  928. // Try to find an interleaved load using the front of Worklist as first line
  929. unsigned Size = DL.getTypeAllocSize(C0->VTy->getElementType());
  930. // List containing iterators pointing to the VectorInfos of the candidates
  931. std::vector<std::list<VectorInfo>::iterator> Res(Factor, Candidates.end());
  932. for (auto C = Candidates.begin(), E = Candidates.end(); C != E; C++) {
  933. if (C->VTy != C0->VTy)
  934. continue;
  935. if (C->BB != C0->BB)
  936. continue;
  937. if (C->PV != C0->PV)
  938. continue;
  939. // Check the current value matches any of factor - 1 remaining lines
  940. for (i = 1; i < Factor; i++) {
  941. if (C->EI[0].Ofs.isProvenEqualTo(C0->EI[0].Ofs + i * Size)) {
  942. Res[i] = C;
  943. }
  944. }
  945. for (i = 1; i < Factor; i++) {
  946. if (Res[i] == Candidates.end())
  947. break;
  948. }
  949. if (i == Factor) {
  950. Res[0] = C0;
  951. break;
  952. }
  953. }
  954. if (Res[0] != Candidates.end()) {
  955. // Move the result into the output
  956. for (unsigned i = 0; i < Factor; i++) {
  957. InterleavedLoad.splice(InterleavedLoad.end(), Candidates, Res[i]);
  958. }
  959. return true;
  960. }
  961. }
  962. return false;
  963. }
  964. LoadInst *
  965. InterleavedLoadCombineImpl::findFirstLoad(const std::set<LoadInst *> &LIs) {
  966. assert(!LIs.empty() && "No load instructions given.");
  967. // All LIs are within the same BB. Select the first for a reference.
  968. BasicBlock *BB = (*LIs.begin())->getParent();
  969. BasicBlock::iterator FLI = llvm::find_if(
  970. *BB, [&LIs](Instruction &I) -> bool { return is_contained(LIs, &I); });
  971. assert(FLI != BB->end());
  972. return cast<LoadInst>(FLI);
  973. }
  974. bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
  975. OptimizationRemarkEmitter &ORE) {
  976. LLVM_DEBUG(dbgs() << "Checking interleaved load\n");
  977. // The insertion point is the LoadInst which loads the first values. The
  978. // following tests are used to proof that the combined load can be inserted
  979. // just before InsertionPoint.
  980. LoadInst *InsertionPoint = InterleavedLoad.front().EI[0].LI;
  981. // Test if the offset is computed
  982. if (!InsertionPoint)
  983. return false;
  984. std::set<LoadInst *> LIs;
  985. std::set<Instruction *> Is;
  986. std::set<Instruction *> SVIs;
  987. InstructionCost InterleavedCost;
  988. InstructionCost InstructionCost = 0;
  989. const TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency;
  990. // Get the interleave factor
  991. unsigned Factor = InterleavedLoad.size();
  992. // Merge all input sets used in analysis
  993. for (auto &VI : InterleavedLoad) {
  994. // Generate a set of all load instructions to be combined
  995. LIs.insert(VI.LIs.begin(), VI.LIs.end());
  996. // Generate a set of all instructions taking part in load
  997. // interleaved. This list excludes the instructions necessary for the
  998. // polynomial construction.
  999. Is.insert(VI.Is.begin(), VI.Is.end());
  1000. // Generate the set of the final ShuffleVectorInst.
  1001. SVIs.insert(VI.SVI);
  1002. }
  1003. // There is nothing to combine.
  1004. if (LIs.size() < 2)
  1005. return false;
  1006. // Test if all participating instruction will be dead after the
  1007. // transformation. If intermediate results are used, no performance gain can
  1008. // be expected. Also sum the cost of the Instructions beeing left dead.
  1009. for (auto &I : Is) {
  1010. // Compute the old cost
  1011. InstructionCost += TTI.getInstructionCost(I, CostKind);
  1012. // The final SVIs are allowed not to be dead, all uses will be replaced
  1013. if (SVIs.find(I) != SVIs.end())
  1014. continue;
  1015. // If there are users outside the set to be eliminated, we abort the
  1016. // transformation. No gain can be expected.
  1017. for (auto *U : I->users()) {
  1018. if (Is.find(dyn_cast<Instruction>(U)) == Is.end())
  1019. return false;
  1020. }
  1021. }
  1022. // We need to have a valid cost in order to proceed.
  1023. if (!InstructionCost.isValid())
  1024. return false;
  1025. // We know that all LoadInst are within the same BB. This guarantees that
  1026. // either everything or nothing is loaded.
  1027. LoadInst *First = findFirstLoad(LIs);
  1028. // To be safe that the loads can be combined, iterate over all loads and test
  1029. // that the corresponding defining access dominates first LI. This guarantees
  1030. // that there are no aliasing stores in between the loads.
  1031. auto FMA = MSSA.getMemoryAccess(First);
  1032. for (auto LI : LIs) {
  1033. auto MADef = MSSA.getMemoryAccess(LI)->getDefiningAccess();
  1034. if (!MSSA.dominates(MADef, FMA))
  1035. return false;
  1036. }
  1037. assert(!LIs.empty() && "There are no LoadInst to combine");
  1038. // It is necessary that insertion point dominates all final ShuffleVectorInst.
  1039. for (auto &VI : InterleavedLoad) {
  1040. if (!DT.dominates(InsertionPoint, VI.SVI))
  1041. return false;
  1042. }
  1043. // All checks are done. Add instructions detectable by InterleavedAccessPass
  1044. // The old instruction will are left dead.
  1045. IRBuilder<> Builder(InsertionPoint);
  1046. Type *ETy = InterleavedLoad.front().SVI->getType()->getElementType();
  1047. unsigned ElementsPerSVI =
  1048. cast<FixedVectorType>(InterleavedLoad.front().SVI->getType())
  1049. ->getNumElements();
  1050. FixedVectorType *ILTy = FixedVectorType::get(ETy, Factor * ElementsPerSVI);
  1051. SmallVector<unsigned, 4> Indices;
  1052. for (unsigned i = 0; i < Factor; i++)
  1053. Indices.push_back(i);
  1054. InterleavedCost = TTI.getInterleavedMemoryOpCost(
  1055. Instruction::Load, ILTy, Factor, Indices, InsertionPoint->getAlign(),
  1056. InsertionPoint->getPointerAddressSpace(), CostKind);
  1057. if (InterleavedCost >= InstructionCost) {
  1058. return false;
  1059. }
  1060. // Create a pointer cast for the wide load.
  1061. auto CI = Builder.CreatePointerCast(InsertionPoint->getOperand(0),
  1062. ILTy->getPointerTo(),
  1063. "interleaved.wide.ptrcast");
  1064. // Create the wide load and update the MemorySSA.
  1065. auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlign(),
  1066. "interleaved.wide.load");
  1067. auto MSSAU = MemorySSAUpdater(&MSSA);
  1068. MemoryUse *MSSALoad = cast<MemoryUse>(MSSAU.createMemoryAccessBefore(
  1069. LI, nullptr, MSSA.getMemoryAccess(InsertionPoint)));
  1070. MSSAU.insertUse(MSSALoad);
  1071. // Create the final SVIs and replace all uses.
  1072. int i = 0;
  1073. for (auto &VI : InterleavedLoad) {
  1074. SmallVector<int, 4> Mask;
  1075. for (unsigned j = 0; j < ElementsPerSVI; j++)
  1076. Mask.push_back(i + j * Factor);
  1077. Builder.SetInsertPoint(VI.SVI);
  1078. auto SVI = Builder.CreateShuffleVector(LI, Mask, "interleaved.shuffle");
  1079. VI.SVI->replaceAllUsesWith(SVI);
  1080. i++;
  1081. }
  1082. NumInterleavedLoadCombine++;
  1083. ORE.emit([&]() {
  1084. return OptimizationRemark(DEBUG_TYPE, "Combined Interleaved Load", LI)
  1085. << "Load interleaved combined with factor "
  1086. << ore::NV("Factor", Factor);
  1087. });
  1088. return true;
  1089. }
  1090. bool InterleavedLoadCombineImpl::run() {
  1091. OptimizationRemarkEmitter ORE(&F);
  1092. bool changed = false;
  1093. unsigned MaxFactor = TLI.getMaxSupportedInterleaveFactor();
  1094. auto &DL = F.getParent()->getDataLayout();
  1095. // Start with the highest factor to avoid combining and recombining.
  1096. for (unsigned Factor = MaxFactor; Factor >= 2; Factor--) {
  1097. std::list<VectorInfo> Candidates;
  1098. for (BasicBlock &BB : F) {
  1099. for (Instruction &I : BB) {
  1100. if (auto SVI = dyn_cast<ShuffleVectorInst>(&I)) {
  1101. // We don't support scalable vectors in this pass.
  1102. if (isa<ScalableVectorType>(SVI->getType()))
  1103. continue;
  1104. Candidates.emplace_back(cast<FixedVectorType>(SVI->getType()));
  1105. if (!VectorInfo::computeFromSVI(SVI, Candidates.back(), DL)) {
  1106. Candidates.pop_back();
  1107. continue;
  1108. }
  1109. if (!Candidates.back().isInterleaved(Factor, DL)) {
  1110. Candidates.pop_back();
  1111. }
  1112. }
  1113. }
  1114. }
  1115. std::list<VectorInfo> InterleavedLoad;
  1116. while (findPattern(Candidates, InterleavedLoad, Factor, DL)) {
  1117. if (combine(InterleavedLoad, ORE)) {
  1118. changed = true;
  1119. } else {
  1120. // Remove the first element of the Interleaved Load but put the others
  1121. // back on the list and continue searching
  1122. Candidates.splice(Candidates.begin(), InterleavedLoad,
  1123. std::next(InterleavedLoad.begin()),
  1124. InterleavedLoad.end());
  1125. }
  1126. InterleavedLoad.clear();
  1127. }
  1128. }
  1129. return changed;
  1130. }
  1131. namespace {
  1132. /// This pass combines interleaved loads into a pattern detectable by
  1133. /// InterleavedAccessPass.
  1134. struct InterleavedLoadCombine : public FunctionPass {
  1135. static char ID;
  1136. InterleavedLoadCombine() : FunctionPass(ID) {
  1137. initializeInterleavedLoadCombinePass(*PassRegistry::getPassRegistry());
  1138. }
  1139. StringRef getPassName() const override {
  1140. return "Interleaved Load Combine Pass";
  1141. }
  1142. bool runOnFunction(Function &F) override {
  1143. if (DisableInterleavedLoadCombine)
  1144. return false;
  1145. auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
  1146. if (!TPC)
  1147. return false;
  1148. LLVM_DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName()
  1149. << "\n");
  1150. return InterleavedLoadCombineImpl(
  1151. F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
  1152. getAnalysis<MemorySSAWrapperPass>().getMSSA(),
  1153. TPC->getTM<TargetMachine>())
  1154. .run();
  1155. }
  1156. void getAnalysisUsage(AnalysisUsage &AU) const override {
  1157. AU.addRequired<MemorySSAWrapperPass>();
  1158. AU.addRequired<DominatorTreeWrapperPass>();
  1159. FunctionPass::getAnalysisUsage(AU);
  1160. }
  1161. private:
  1162. };
  1163. } // anonymous namespace
  1164. char InterleavedLoadCombine::ID = 0;
  1165. INITIALIZE_PASS_BEGIN(
  1166. InterleavedLoadCombine, DEBUG_TYPE,
  1167. "Combine interleaved loads into wide loads and shufflevector instructions",
  1168. false, false)
  1169. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  1170. INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
  1171. INITIALIZE_PASS_END(
  1172. InterleavedLoadCombine, DEBUG_TYPE,
  1173. "Combine interleaved loads into wide loads and shufflevector instructions",
  1174. false, false)
  1175. FunctionPass *
  1176. llvm::createInterleavedLoadCombinePass() {
  1177. auto P = new InterleavedLoadCombine();
  1178. return P;
  1179. }