InterleavedLoadCombinePass.cpp 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361
  1. //===- InterleavedLoadCombine.cpp - Combine Interleaved Loads ---*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // \file
  10. //
  11. // This file defines the interleaved-load-combine pass. The pass searches for
  12. // ShuffleVectorInstruction that execute interleaving loads. If a matching
  13. // pattern is found, it adds a combined load and further instructions in a
  14. // pattern that is detectable by InterleavedAccesPass. The old instructions are
  15. // left dead to be removed later. The pass is specifically designed to be
  16. // executed just before InterleavedAccesPass to find any left-over instances
  17. // that are not detected within former passes.
  18. //
  19. //===----------------------------------------------------------------------===//
  20. #include "llvm/ADT/Statistic.h"
  21. #include "llvm/Analysis/MemorySSA.h"
  22. #include "llvm/Analysis/MemorySSAUpdater.h"
  23. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  24. #include "llvm/Analysis/TargetTransformInfo.h"
  25. #include "llvm/CodeGen/Passes.h"
  26. #include "llvm/CodeGen/TargetLowering.h"
  27. #include "llvm/CodeGen/TargetPassConfig.h"
  28. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  29. #include "llvm/IR/DataLayout.h"
  30. #include "llvm/IR/Dominators.h"
  31. #include "llvm/IR/Function.h"
  32. #include "llvm/IR/IRBuilder.h"
  33. #include "llvm/IR/Instructions.h"
  34. #include "llvm/IR/Module.h"
  35. #include "llvm/InitializePasses.h"
  36. #include "llvm/Pass.h"
  37. #include "llvm/Support/Debug.h"
  38. #include "llvm/Support/ErrorHandling.h"
  39. #include "llvm/Support/raw_ostream.h"
  40. #include "llvm/Target/TargetMachine.h"
  41. #include <algorithm>
  42. #include <cassert>
  43. #include <list>
  44. using namespace llvm;
  45. #define DEBUG_TYPE "interleaved-load-combine"
  46. namespace {
  47. /// Statistic counter
  48. STATISTIC(NumInterleavedLoadCombine, "Number of combined loads");
  49. /// Option to disable the pass
  50. static cl::opt<bool> DisableInterleavedLoadCombine(
  51. "disable-" DEBUG_TYPE, cl::init(false), cl::Hidden,
  52. cl::desc("Disable combining of interleaved loads"));
  53. struct VectorInfo;
  54. struct InterleavedLoadCombineImpl {
  55. public:
  56. InterleavedLoadCombineImpl(Function &F, DominatorTree &DT, MemorySSA &MSSA,
  57. TargetMachine &TM)
  58. : F(F), DT(DT), MSSA(MSSA),
  59. TLI(*TM.getSubtargetImpl(F)->getTargetLowering()),
  60. TTI(TM.getTargetTransformInfo(F)) {}
  61. /// Scan the function for interleaved load candidates and execute the
  62. /// replacement if applicable.
  63. bool run();
  64. private:
  65. /// Function this pass is working on
  66. Function &F;
  67. /// Dominator Tree Analysis
  68. DominatorTree &DT;
  69. /// Memory Alias Analyses
  70. MemorySSA &MSSA;
  71. /// Target Lowering Information
  72. const TargetLowering &TLI;
  73. /// Target Transform Information
  74. const TargetTransformInfo TTI;
  75. /// Find the instruction in sets LIs that dominates all others, return nullptr
  76. /// if there is none.
  77. LoadInst *findFirstLoad(const std::set<LoadInst *> &LIs);
  78. /// Replace interleaved load candidates. It does additional
  79. /// analyses if this makes sense. Returns true on success and false
  80. /// of nothing has been changed.
  81. bool combine(std::list<VectorInfo> &InterleavedLoad,
  82. OptimizationRemarkEmitter &ORE);
  83. /// Given a set of VectorInfo containing candidates for a given interleave
  84. /// factor, find a set that represents a 'factor' interleaved load.
  85. bool findPattern(std::list<VectorInfo> &Candidates,
  86. std::list<VectorInfo> &InterleavedLoad, unsigned Factor,
  87. const DataLayout &DL);
  88. }; // InterleavedLoadCombine
  89. /// First Order Polynomial on an n-Bit Integer Value
  90. ///
  91. /// Polynomial(Value) = Value * B + A + E*2^(n-e)
  92. ///
  93. /// A and B are the coefficients. E*2^(n-e) is an error within 'e' most
  94. /// significant bits. It is introduced if an exact computation cannot be proven
  95. /// (e.q. division by 2).
  96. ///
  97. /// As part of this optimization multiple loads will be combined. It necessary
  98. /// to prove that loads are within some relative offset to each other. This
  99. /// class is used to prove relative offsets of values loaded from memory.
  100. ///
  101. /// Representing an integer in this form is sound since addition in two's
  102. /// complement is associative (trivial) and multiplication distributes over the
  103. /// addition (see Proof(1) in Polynomial::mul). Further, both operations
  104. /// commute.
  105. //
  106. // Example:
  107. // declare @fn(i64 %IDX, <4 x float>* %PTR) {
  108. // %Pa1 = add i64 %IDX, 2
  109. // %Pa2 = lshr i64 %Pa1, 1
  110. // %Pa3 = getelementptr inbounds <4 x float>, <4 x float>* %PTR, i64 %Pa2
  111. // %Va = load <4 x float>, <4 x float>* %Pa3
  112. //
  113. // %Pb1 = add i64 %IDX, 4
  114. // %Pb2 = lshr i64 %Pb1, 1
  115. // %Pb3 = getelementptr inbounds <4 x float>, <4 x float>* %PTR, i64 %Pb2
  116. // %Vb = load <4 x float>, <4 x float>* %Pb3
  117. // ... }
  118. //
  119. // The goal is to prove that two loads load consecutive addresses.
  120. //
  121. // In this case the polynomials are constructed by the following
  122. // steps.
  123. //
  124. // The number tag #e specifies the error bits.
  125. //
  126. // Pa_0 = %IDX #0
  127. // Pa_1 = %IDX + 2 #0 | add 2
  128. // Pa_2 = %IDX/2 + 1 #1 | lshr 1
  129. // Pa_3 = %IDX/2 + 1 #1 | GEP, step signext to i64
  130. // Pa_4 = (%IDX/2)*16 + 16 #0 | GEP, multiply index by sizeof(4) for floats
  131. // Pa_5 = (%IDX/2)*16 + 16 #0 | GEP, add offset of leading components
  132. //
  133. // Pb_0 = %IDX #0
  134. // Pb_1 = %IDX + 4 #0 | add 2
  135. // Pb_2 = %IDX/2 + 2 #1 | lshr 1
  136. // Pb_3 = %IDX/2 + 2 #1 | GEP, step signext to i64
  137. // Pb_4 = (%IDX/2)*16 + 32 #0 | GEP, multiply index by sizeof(4) for floats
  138. // Pb_5 = (%IDX/2)*16 + 16 #0 | GEP, add offset of leading components
  139. //
  140. // Pb_5 - Pa_5 = 16 #0 | subtract to get the offset
  141. //
  142. // Remark: %PTR is not maintained within this class. So in this instance the
  143. // offset of 16 can only be assumed if the pointers are equal.
  144. //
  145. class Polynomial {
  146. /// Operations on B
  147. enum BOps {
  148. LShr,
  149. Mul,
  150. SExt,
  151. Trunc,
  152. };
  153. /// Number of Error Bits e
  154. unsigned ErrorMSBs = (unsigned)-1;
  155. /// Value
  156. Value *V = nullptr;
  157. /// Coefficient B
  158. SmallVector<std::pair<BOps, APInt>, 4> B;
  159. /// Coefficient A
  160. APInt A;
  161. public:
  162. Polynomial(Value *V) : V(V) {
  163. IntegerType *Ty = dyn_cast<IntegerType>(V->getType());
  164. if (Ty) {
  165. ErrorMSBs = 0;
  166. this->V = V;
  167. A = APInt(Ty->getBitWidth(), 0);
  168. }
  169. }
  170. Polynomial(const APInt &A, unsigned ErrorMSBs = 0)
  171. : ErrorMSBs(ErrorMSBs), A(A) {}
  172. Polynomial(unsigned BitWidth, uint64_t A, unsigned ErrorMSBs = 0)
  173. : ErrorMSBs(ErrorMSBs), A(BitWidth, A) {}
  174. Polynomial() = default;
  175. /// Increment and clamp the number of undefined bits.
  176. void incErrorMSBs(unsigned amt) {
  177. if (ErrorMSBs == (unsigned)-1)
  178. return;
  179. ErrorMSBs += amt;
  180. if (ErrorMSBs > A.getBitWidth())
  181. ErrorMSBs = A.getBitWidth();
  182. }
  183. /// Decrement and clamp the number of undefined bits.
  184. void decErrorMSBs(unsigned amt) {
  185. if (ErrorMSBs == (unsigned)-1)
  186. return;
  187. if (ErrorMSBs > amt)
  188. ErrorMSBs -= amt;
  189. else
  190. ErrorMSBs = 0;
  191. }
  192. /// Apply an add on the polynomial
  193. Polynomial &add(const APInt &C) {
  194. // Note: Addition is associative in two's complement even when in case of
  195. // signed overflow.
  196. //
  197. // Error bits can only propagate into higher significant bits. As these are
  198. // already regarded as undefined, there is no change.
  199. //
  200. // Theorem: Adding a constant to a polynomial does not change the error
  201. // term.
  202. //
  203. // Proof:
  204. //
  205. // Since the addition is associative and commutes:
  206. //
  207. // (B + A + E*2^(n-e)) + C = B + (A + C) + E*2^(n-e)
  208. // [qed]
  209. if (C.getBitWidth() != A.getBitWidth()) {
  210. ErrorMSBs = (unsigned)-1;
  211. return *this;
  212. }
  213. A += C;
  214. return *this;
  215. }
  216. /// Apply a multiplication onto the polynomial.
  217. Polynomial &mul(const APInt &C) {
  218. // Note: Multiplication distributes over the addition
  219. //
  220. // Theorem: Multiplication distributes over the addition
  221. //
  222. // Proof(1):
  223. //
  224. // (B+A)*C =-
  225. // = (B + A) + (B + A) + .. {C Times}
  226. // addition is associative and commutes, hence
  227. // = B + B + .. {C Times} .. + A + A + .. {C times}
  228. // = B*C + A*C
  229. // (see (function add) for signed values and overflows)
  230. // [qed]
  231. //
  232. // Theorem: If C has c trailing zeros, errors bits in A or B are shifted out
  233. // to the left.
  234. //
  235. // Proof(2):
  236. //
  237. // Let B' and A' be the n-Bit inputs with some unknown errors EA,
  238. // EB at e leading bits. B' and A' can be written down as:
  239. //
  240. // B' = B + 2^(n-e)*EB
  241. // A' = A + 2^(n-e)*EA
  242. //
  243. // Let C' be an input with c trailing zero bits. C' can be written as
  244. //
  245. // C' = C*2^c
  246. //
  247. // Therefore we can compute the result by using distributivity and
  248. // commutativity.
  249. //
  250. // (B'*C' + A'*C') = [B + 2^(n-e)*EB] * C' + [A + 2^(n-e)*EA] * C' =
  251. // = [B + 2^(n-e)*EB + A + 2^(n-e)*EA] * C' =
  252. // = (B'+A') * C' =
  253. // = [B + 2^(n-e)*EB + A + 2^(n-e)*EA] * C' =
  254. // = [B + A + 2^(n-e)*EB + 2^(n-e)*EA] * C' =
  255. // = (B + A) * C' + [2^(n-e)*EB + 2^(n-e)*EA)] * C' =
  256. // = (B + A) * C' + [2^(n-e)*EB + 2^(n-e)*EA)] * C*2^c =
  257. // = (B + A) * C' + C*(EB + EA)*2^(n-e)*2^c =
  258. //
  259. // Let EC be the final error with EC = C*(EB + EA)
  260. //
  261. // = (B + A)*C' + EC*2^(n-e)*2^c =
  262. // = (B + A)*C' + EC*2^(n-(e-c))
  263. //
  264. // Since EC is multiplied by 2^(n-(e-c)) the resulting error contains c
  265. // less error bits than the input. c bits are shifted out to the left.
  266. // [qed]
  267. if (C.getBitWidth() != A.getBitWidth()) {
  268. ErrorMSBs = (unsigned)-1;
  269. return *this;
  270. }
  271. // Multiplying by one is a no-op.
  272. if (C.isOne()) {
  273. return *this;
  274. }
  275. // Multiplying by zero removes the coefficient B and defines all bits.
  276. if (C.isZero()) {
  277. ErrorMSBs = 0;
  278. deleteB();
  279. }
  280. // See Proof(2): Trailing zero bits indicate a left shift. This removes
  281. // leading bits from the result even if they are undefined.
  282. decErrorMSBs(C.countTrailingZeros());
  283. A *= C;
  284. pushBOperation(Mul, C);
  285. return *this;
  286. }
  287. /// Apply a logical shift right on the polynomial
  288. Polynomial &lshr(const APInt &C) {
  289. // Theorem(1): (B + A + E*2^(n-e)) >> 1 => (B >> 1) + (A >> 1) + E'*2^(n-e')
  290. // where
  291. // e' = e + 1,
  292. // E is a e-bit number,
  293. // E' is a e'-bit number,
  294. // holds under the following precondition:
  295. // pre(1): A % 2 = 0
  296. // pre(2): e < n, (see Theorem(2) for the trivial case with e=n)
  297. // where >> expresses a logical shift to the right, with adding zeros.
  298. //
  299. // We need to show that for every, E there is a E'
  300. //
  301. // B = b_h * 2^(n-1) + b_m * 2 + b_l
  302. // A = a_h * 2^(n-1) + a_m * 2 (pre(1))
  303. //
  304. // where a_h, b_h, b_l are single bits, and a_m, b_m are (n-2) bit numbers
  305. //
  306. // Let X = (B + A + E*2^(n-e)) >> 1
  307. // Let Y = (B >> 1) + (A >> 1) + E*2^(n-e) >> 1
  308. //
  309. // X = [B + A + E*2^(n-e)] >> 1 =
  310. // = [ b_h * 2^(n-1) + b_m * 2 + b_l +
  311. // + a_h * 2^(n-1) + a_m * 2 +
  312. // + E * 2^(n-e) ] >> 1 =
  313. //
  314. // The sum is built by putting the overflow of [a_m + b+n] into the term
  315. // 2^(n-1). As there are no more bits beyond 2^(n-1) the overflow within
  316. // this bit is discarded. This is expressed by % 2.
  317. //
  318. // The bit in position 0 cannot overflow into the term (b_m + a_m).
  319. //
  320. // = [ ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-1) +
  321. // + ((b_m + a_m) % 2^(n-2)) * 2 +
  322. // + b_l + E * 2^(n-e) ] >> 1 =
  323. //
  324. // The shift is computed by dividing the terms by 2 and by cutting off
  325. // b_l.
  326. //
  327. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  328. // + ((b_m + a_m) % 2^(n-2)) +
  329. // + E * 2^(n-(e+1)) =
  330. //
  331. // by the definition in the Theorem e+1 = e'
  332. //
  333. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  334. // + ((b_m + a_m) % 2^(n-2)) +
  335. // + E * 2^(n-e') =
  336. //
  337. // Compute Y by applying distributivity first
  338. //
  339. // Y = (B >> 1) + (A >> 1) + E*2^(n-e') =
  340. // = (b_h * 2^(n-1) + b_m * 2 + b_l) >> 1 +
  341. // + (a_h * 2^(n-1) + a_m * 2) >> 1 +
  342. // + E * 2^(n-e) >> 1 =
  343. //
  344. // Again, the shift is computed by dividing the terms by 2 and by cutting
  345. // off b_l.
  346. //
  347. // = b_h * 2^(n-2) + b_m +
  348. // + a_h * 2^(n-2) + a_m +
  349. // + E * 2^(n-(e+1)) =
  350. //
  351. // Again, the sum is built by putting the overflow of [a_m + b+n] into
  352. // the term 2^(n-1). But this time there is room for a second bit in the
  353. // term 2^(n-2) we add this bit to a new term and denote it o_h in a
  354. // second step.
  355. //
  356. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] >> 1) * 2^(n-1) +
  357. // + ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  358. // + ((b_m + a_m) % 2^(n-2)) +
  359. // + E * 2^(n-(e+1)) =
  360. //
  361. // Let o_h = [b_h + a_h + (b_m + a_m) >> (n-2)] >> 1
  362. // Further replace e+1 by e'.
  363. //
  364. // = o_h * 2^(n-1) +
  365. // + ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  366. // + ((b_m + a_m) % 2^(n-2)) +
  367. // + E * 2^(n-e') =
  368. //
  369. // Move o_h into the error term and construct E'. To ensure that there is
  370. // no 2^x with negative x, this step requires pre(2) (e < n).
  371. //
  372. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  373. // + ((b_m + a_m) % 2^(n-2)) +
  374. // + o_h * 2^(e'-1) * 2^(n-e') + | pre(2), move 2^(e'-1)
  375. // | out of the old exponent
  376. // + E * 2^(n-e') =
  377. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  378. // + ((b_m + a_m) % 2^(n-2)) +
  379. // + [o_h * 2^(e'-1) + E] * 2^(n-e') + | move 2^(e'-1) out of
  380. // | the old exponent
  381. //
  382. // Let E' = o_h * 2^(e'-1) + E
  383. //
  384. // = ([b_h + a_h + (b_m + a_m) >> (n-2)] % 2) * 2^(n-2) +
  385. // + ((b_m + a_m) % 2^(n-2)) +
  386. // + E' * 2^(n-e')
  387. //
  388. // Because X and Y are distinct only in there error terms and E' can be
  389. // constructed as shown the theorem holds.
  390. // [qed]
  391. //
  392. // For completeness in case of the case e=n it is also required to show that
  393. // distributivity can be applied.
  394. //
  395. // In this case Theorem(1) transforms to (the pre-condition on A can also be
  396. // dropped)
  397. //
  398. // Theorem(2): (B + A + E) >> 1 => (B >> 1) + (A >> 1) + E'
  399. // where
  400. // A, B, E, E' are two's complement numbers with the same bit
  401. // width
  402. //
  403. // Let A + B + E = X
  404. // Let (B >> 1) + (A >> 1) = Y
  405. //
  406. // Therefore we need to show that for every X and Y there is an E' which
  407. // makes the equation
  408. //
  409. // X = Y + E'
  410. //
  411. // hold. This is trivially the case for E' = X - Y.
  412. //
  413. // [qed]
  414. //
  415. // Remark: Distributing lshr with and arbitrary number n can be expressed as
  416. // ((((B + A) lshr 1) lshr 1) ... ) {n times}.
  417. // This construction induces n additional error bits at the left.
  418. if (C.getBitWidth() != A.getBitWidth()) {
  419. ErrorMSBs = (unsigned)-1;
  420. return *this;
  421. }
  422. if (C.isZero())
  423. return *this;
  424. // Test if the result will be zero
  425. unsigned shiftAmt = C.getZExtValue();
  426. if (shiftAmt >= C.getBitWidth())
  427. return mul(APInt(C.getBitWidth(), 0));
  428. // The proof that shiftAmt LSBs are zero for at least one summand is only
  429. // possible for the constant number.
  430. //
  431. // If this can be proven add shiftAmt to the error counter
  432. // `ErrorMSBs`. Otherwise set all bits as undefined.
  433. if (A.countTrailingZeros() < shiftAmt)
  434. ErrorMSBs = A.getBitWidth();
  435. else
  436. incErrorMSBs(shiftAmt);
  437. // Apply the operation.
  438. pushBOperation(LShr, C);
  439. A = A.lshr(shiftAmt);
  440. return *this;
  441. }
  442. /// Apply a sign-extend or truncate operation on the polynomial.
  443. Polynomial &sextOrTrunc(unsigned n) {
  444. if (n < A.getBitWidth()) {
  445. // Truncate: Clearly undefined Bits on the MSB side are removed
  446. // if there are any.
  447. decErrorMSBs(A.getBitWidth() - n);
  448. A = A.trunc(n);
  449. pushBOperation(Trunc, APInt(sizeof(n) * 8, n));
  450. }
  451. if (n > A.getBitWidth()) {
  452. // Extend: Clearly extending first and adding later is different
  453. // to adding first and extending later in all extended bits.
  454. incErrorMSBs(n - A.getBitWidth());
  455. A = A.sext(n);
  456. pushBOperation(SExt, APInt(sizeof(n) * 8, n));
  457. }
  458. return *this;
  459. }
  460. /// Test if there is a coefficient B.
  461. bool isFirstOrder() const { return V != nullptr; }
  462. /// Test coefficient B of two Polynomials are equal.
  463. bool isCompatibleTo(const Polynomial &o) const {
  464. // The polynomial use different bit width.
  465. if (A.getBitWidth() != o.A.getBitWidth())
  466. return false;
  467. // If neither Polynomial has the Coefficient B.
  468. if (!isFirstOrder() && !o.isFirstOrder())
  469. return true;
  470. // The index variable is different.
  471. if (V != o.V)
  472. return false;
  473. // Check the operations.
  474. if (B.size() != o.B.size())
  475. return false;
  476. auto *ob = o.B.begin();
  477. for (const auto &b : B) {
  478. if (b != *ob)
  479. return false;
  480. ob++;
  481. }
  482. return true;
  483. }
  484. /// Subtract two polynomials, return an undefined polynomial if
  485. /// subtraction is not possible.
  486. Polynomial operator-(const Polynomial &o) const {
  487. // Return an undefined polynomial if incompatible.
  488. if (!isCompatibleTo(o))
  489. return Polynomial();
  490. // If the polynomials are compatible (meaning they have the same
  491. // coefficient on B), B is eliminated. Thus a polynomial solely
  492. // containing A is returned
  493. return Polynomial(A - o.A, std::max(ErrorMSBs, o.ErrorMSBs));
  494. }
  495. /// Subtract a constant from a polynomial,
  496. Polynomial operator-(uint64_t C) const {
  497. Polynomial Result(*this);
  498. Result.A -= C;
  499. return Result;
  500. }
  501. /// Add a constant to a polynomial,
  502. Polynomial operator+(uint64_t C) const {
  503. Polynomial Result(*this);
  504. Result.A += C;
  505. return Result;
  506. }
  507. /// Returns true if it can be proven that two Polynomials are equal.
  508. bool isProvenEqualTo(const Polynomial &o) {
  509. // Subtract both polynomials and test if it is fully defined and zero.
  510. Polynomial r = *this - o;
  511. return (r.ErrorMSBs == 0) && (!r.isFirstOrder()) && (r.A.isZero());
  512. }
  513. /// Print the polynomial into a stream.
  514. void print(raw_ostream &OS) const {
  515. OS << "[{#ErrBits:" << ErrorMSBs << "} ";
  516. if (V) {
  517. for (auto b : B)
  518. OS << "(";
  519. OS << "(" << *V << ") ";
  520. for (auto b : B) {
  521. switch (b.first) {
  522. case LShr:
  523. OS << "LShr ";
  524. break;
  525. case Mul:
  526. OS << "Mul ";
  527. break;
  528. case SExt:
  529. OS << "SExt ";
  530. break;
  531. case Trunc:
  532. OS << "Trunc ";
  533. break;
  534. }
  535. OS << b.second << ") ";
  536. }
  537. }
  538. OS << "+ " << A << "]";
  539. }
  540. private:
  541. void deleteB() {
  542. V = nullptr;
  543. B.clear();
  544. }
  545. void pushBOperation(const BOps Op, const APInt &C) {
  546. if (isFirstOrder()) {
  547. B.push_back(std::make_pair(Op, C));
  548. return;
  549. }
  550. }
  551. };
  552. #ifndef NDEBUG
  553. static raw_ostream &operator<<(raw_ostream &OS, const Polynomial &S) {
  554. S.print(OS);
  555. return OS;
  556. }
  557. #endif
  558. /// VectorInfo stores abstract the following information for each vector
  559. /// element:
  560. ///
  561. /// 1) The the memory address loaded into the element as Polynomial
  562. /// 2) a set of load instruction necessary to construct the vector,
  563. /// 3) a set of all other instructions that are necessary to create the vector and
  564. /// 4) a pointer value that can be used as relative base for all elements.
  565. struct VectorInfo {
  566. private:
  567. VectorInfo(const VectorInfo &c) : VTy(c.VTy) {
  568. llvm_unreachable(
  569. "Copying VectorInfo is neither implemented nor necessary,");
  570. }
  571. public:
  572. /// Information of a Vector Element
  573. struct ElementInfo {
  574. /// Offset Polynomial.
  575. Polynomial Ofs;
  576. /// The Load Instruction used to Load the entry. LI is null if the pointer
  577. /// of the load instruction does not point on to the entry
  578. LoadInst *LI;
  579. ElementInfo(Polynomial Offset = Polynomial(), LoadInst *LI = nullptr)
  580. : Ofs(Offset), LI(LI) {}
  581. };
  582. /// Basic-block the load instructions are within
  583. BasicBlock *BB = nullptr;
  584. /// Pointer value of all participation load instructions
  585. Value *PV = nullptr;
  586. /// Participating load instructions
  587. std::set<LoadInst *> LIs;
  588. /// Participating instructions
  589. std::set<Instruction *> Is;
  590. /// Final shuffle-vector instruction
  591. ShuffleVectorInst *SVI = nullptr;
  592. /// Information of the offset for each vector element
  593. ElementInfo *EI;
  594. /// Vector Type
  595. FixedVectorType *const VTy;
  596. VectorInfo(FixedVectorType *VTy) : VTy(VTy) {
  597. EI = new ElementInfo[VTy->getNumElements()];
  598. }
  599. virtual ~VectorInfo() { delete[] EI; }
  600. unsigned getDimension() const { return VTy->getNumElements(); }
  601. /// Test if the VectorInfo can be part of an interleaved load with the
  602. /// specified factor.
  603. ///
  604. /// \param Factor of the interleave
  605. /// \param DL Targets Datalayout
  606. ///
  607. /// \returns true if this is possible and false if not
  608. bool isInterleaved(unsigned Factor, const DataLayout &DL) const {
  609. unsigned Size = DL.getTypeAllocSize(VTy->getElementType());
  610. for (unsigned i = 1; i < getDimension(); i++) {
  611. if (!EI[i].Ofs.isProvenEqualTo(EI[0].Ofs + i * Factor * Size)) {
  612. return false;
  613. }
  614. }
  615. return true;
  616. }
  617. /// Recursively computes the vector information stored in V.
  618. ///
  619. /// This function delegates the work to specialized implementations
  620. ///
  621. /// \param V Value to operate on
  622. /// \param Result Result of the computation
  623. ///
  624. /// \returns false if no sensible information can be gathered.
  625. static bool compute(Value *V, VectorInfo &Result, const DataLayout &DL) {
  626. ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
  627. if (SVI)
  628. return computeFromSVI(SVI, Result, DL);
  629. LoadInst *LI = dyn_cast<LoadInst>(V);
  630. if (LI)
  631. return computeFromLI(LI, Result, DL);
  632. BitCastInst *BCI = dyn_cast<BitCastInst>(V);
  633. if (BCI)
  634. return computeFromBCI(BCI, Result, DL);
  635. return false;
  636. }
  637. /// BitCastInst specialization to compute the vector information.
  638. ///
  639. /// \param BCI BitCastInst to operate on
  640. /// \param Result Result of the computation
  641. ///
  642. /// \returns false if no sensible information can be gathered.
  643. static bool computeFromBCI(BitCastInst *BCI, VectorInfo &Result,
  644. const DataLayout &DL) {
  645. Instruction *Op = dyn_cast<Instruction>(BCI->getOperand(0));
  646. if (!Op)
  647. return false;
  648. FixedVectorType *VTy = dyn_cast<FixedVectorType>(Op->getType());
  649. if (!VTy)
  650. return false;
  651. // We can only cast from large to smaller vectors
  652. if (Result.VTy->getNumElements() % VTy->getNumElements())
  653. return false;
  654. unsigned Factor = Result.VTy->getNumElements() / VTy->getNumElements();
  655. unsigned NewSize = DL.getTypeAllocSize(Result.VTy->getElementType());
  656. unsigned OldSize = DL.getTypeAllocSize(VTy->getElementType());
  657. if (NewSize * Factor != OldSize)
  658. return false;
  659. VectorInfo Old(VTy);
  660. if (!compute(Op, Old, DL))
  661. return false;
  662. for (unsigned i = 0; i < Result.VTy->getNumElements(); i += Factor) {
  663. for (unsigned j = 0; j < Factor; j++) {
  664. Result.EI[i + j] =
  665. ElementInfo(Old.EI[i / Factor].Ofs + j * NewSize,
  666. j == 0 ? Old.EI[i / Factor].LI : nullptr);
  667. }
  668. }
  669. Result.BB = Old.BB;
  670. Result.PV = Old.PV;
  671. Result.LIs.insert(Old.LIs.begin(), Old.LIs.end());
  672. Result.Is.insert(Old.Is.begin(), Old.Is.end());
  673. Result.Is.insert(BCI);
  674. Result.SVI = nullptr;
  675. return true;
  676. }
  677. /// ShuffleVectorInst specialization to compute vector information.
  678. ///
  679. /// \param SVI ShuffleVectorInst to operate on
  680. /// \param Result Result of the computation
  681. ///
  682. /// Compute the left and the right side vector information and merge them by
  683. /// applying the shuffle operation. This function also ensures that the left
  684. /// and right side have compatible loads. This means that all loads are with
  685. /// in the same basic block and are based on the same pointer.
  686. ///
  687. /// \returns false if no sensible information can be gathered.
  688. static bool computeFromSVI(ShuffleVectorInst *SVI, VectorInfo &Result,
  689. const DataLayout &DL) {
  690. FixedVectorType *ArgTy =
  691. cast<FixedVectorType>(SVI->getOperand(0)->getType());
  692. // Compute the left hand vector information.
  693. VectorInfo LHS(ArgTy);
  694. if (!compute(SVI->getOperand(0), LHS, DL))
  695. LHS.BB = nullptr;
  696. // Compute the right hand vector information.
  697. VectorInfo RHS(ArgTy);
  698. if (!compute(SVI->getOperand(1), RHS, DL))
  699. RHS.BB = nullptr;
  700. // Neither operand produced sensible results?
  701. if (!LHS.BB && !RHS.BB)
  702. return false;
  703. // Only RHS produced sensible results?
  704. else if (!LHS.BB) {
  705. Result.BB = RHS.BB;
  706. Result.PV = RHS.PV;
  707. }
  708. // Only LHS produced sensible results?
  709. else if (!RHS.BB) {
  710. Result.BB = LHS.BB;
  711. Result.PV = LHS.PV;
  712. }
  713. // Both operands produced sensible results?
  714. else if ((LHS.BB == RHS.BB) && (LHS.PV == RHS.PV)) {
  715. Result.BB = LHS.BB;
  716. Result.PV = LHS.PV;
  717. }
  718. // Both operands produced sensible results but they are incompatible.
  719. else {
  720. return false;
  721. }
  722. // Merge and apply the operation on the offset information.
  723. if (LHS.BB) {
  724. Result.LIs.insert(LHS.LIs.begin(), LHS.LIs.end());
  725. Result.Is.insert(LHS.Is.begin(), LHS.Is.end());
  726. }
  727. if (RHS.BB) {
  728. Result.LIs.insert(RHS.LIs.begin(), RHS.LIs.end());
  729. Result.Is.insert(RHS.Is.begin(), RHS.Is.end());
  730. }
  731. Result.Is.insert(SVI);
  732. Result.SVI = SVI;
  733. int j = 0;
  734. for (int i : SVI->getShuffleMask()) {
  735. assert((i < 2 * (signed)ArgTy->getNumElements()) &&
  736. "Invalid ShuffleVectorInst (index out of bounds)");
  737. if (i < 0)
  738. Result.EI[j] = ElementInfo();
  739. else if (i < (signed)ArgTy->getNumElements()) {
  740. if (LHS.BB)
  741. Result.EI[j] = LHS.EI[i];
  742. else
  743. Result.EI[j] = ElementInfo();
  744. } else {
  745. if (RHS.BB)
  746. Result.EI[j] = RHS.EI[i - ArgTy->getNumElements()];
  747. else
  748. Result.EI[j] = ElementInfo();
  749. }
  750. j++;
  751. }
  752. return true;
  753. }
  754. /// LoadInst specialization to compute vector information.
  755. ///
  756. /// This function also acts as abort condition to the recursion.
  757. ///
  758. /// \param LI LoadInst to operate on
  759. /// \param Result Result of the computation
  760. ///
  761. /// \returns false if no sensible information can be gathered.
  762. static bool computeFromLI(LoadInst *LI, VectorInfo &Result,
  763. const DataLayout &DL) {
  764. Value *BasePtr;
  765. Polynomial Offset;
  766. if (LI->isVolatile())
  767. return false;
  768. if (LI->isAtomic())
  769. return false;
  770. // Get the base polynomial
  771. computePolynomialFromPointer(*LI->getPointerOperand(), Offset, BasePtr, DL);
  772. Result.BB = LI->getParent();
  773. Result.PV = BasePtr;
  774. Result.LIs.insert(LI);
  775. Result.Is.insert(LI);
  776. for (unsigned i = 0; i < Result.getDimension(); i++) {
  777. Value *Idx[2] = {
  778. ConstantInt::get(Type::getInt32Ty(LI->getContext()), 0),
  779. ConstantInt::get(Type::getInt32Ty(LI->getContext()), i),
  780. };
  781. int64_t Ofs = DL.getIndexedOffsetInType(Result.VTy, ArrayRef(Idx, 2));
  782. Result.EI[i] = ElementInfo(Offset + Ofs, i == 0 ? LI : nullptr);
  783. }
  784. return true;
  785. }
  786. /// Recursively compute polynomial of a value.
  787. ///
  788. /// \param BO Input binary operation
  789. /// \param Result Result polynomial
  790. static void computePolynomialBinOp(BinaryOperator &BO, Polynomial &Result) {
  791. Value *LHS = BO.getOperand(0);
  792. Value *RHS = BO.getOperand(1);
  793. // Find the RHS Constant if any
  794. ConstantInt *C = dyn_cast<ConstantInt>(RHS);
  795. if ((!C) && BO.isCommutative()) {
  796. C = dyn_cast<ConstantInt>(LHS);
  797. if (C)
  798. std::swap(LHS, RHS);
  799. }
  800. switch (BO.getOpcode()) {
  801. case Instruction::Add:
  802. if (!C)
  803. break;
  804. computePolynomial(*LHS, Result);
  805. Result.add(C->getValue());
  806. return;
  807. case Instruction::LShr:
  808. if (!C)
  809. break;
  810. computePolynomial(*LHS, Result);
  811. Result.lshr(C->getValue());
  812. return;
  813. default:
  814. break;
  815. }
  816. Result = Polynomial(&BO);
  817. }
  818. /// Recursively compute polynomial of a value
  819. ///
  820. /// \param V input value
  821. /// \param Result result polynomial
  822. static void computePolynomial(Value &V, Polynomial &Result) {
  823. if (auto *BO = dyn_cast<BinaryOperator>(&V))
  824. computePolynomialBinOp(*BO, Result);
  825. else
  826. Result = Polynomial(&V);
  827. }
  828. /// Compute the Polynomial representation of a Pointer type.
  829. ///
  830. /// \param Ptr input pointer value
  831. /// \param Result result polynomial
  832. /// \param BasePtr pointer the polynomial is based on
  833. /// \param DL Datalayout of the target machine
  834. static void computePolynomialFromPointer(Value &Ptr, Polynomial &Result,
  835. Value *&BasePtr,
  836. const DataLayout &DL) {
  837. // Not a pointer type? Return an undefined polynomial
  838. PointerType *PtrTy = dyn_cast<PointerType>(Ptr.getType());
  839. if (!PtrTy) {
  840. Result = Polynomial();
  841. BasePtr = nullptr;
  842. return;
  843. }
  844. unsigned PointerBits =
  845. DL.getIndexSizeInBits(PtrTy->getPointerAddressSpace());
  846. /// Skip pointer casts. Return Zero polynomial otherwise
  847. if (isa<CastInst>(&Ptr)) {
  848. CastInst &CI = *cast<CastInst>(&Ptr);
  849. switch (CI.getOpcode()) {
  850. case Instruction::BitCast:
  851. computePolynomialFromPointer(*CI.getOperand(0), Result, BasePtr, DL);
  852. break;
  853. default:
  854. BasePtr = &Ptr;
  855. Polynomial(PointerBits, 0);
  856. break;
  857. }
  858. }
  859. /// Resolve GetElementPtrInst.
  860. else if (isa<GetElementPtrInst>(&Ptr)) {
  861. GetElementPtrInst &GEP = *cast<GetElementPtrInst>(&Ptr);
  862. APInt BaseOffset(PointerBits, 0);
  863. // Check if we can compute the Offset with accumulateConstantOffset
  864. if (GEP.accumulateConstantOffset(DL, BaseOffset)) {
  865. Result = Polynomial(BaseOffset);
  866. BasePtr = GEP.getPointerOperand();
  867. return;
  868. } else {
  869. // Otherwise we allow that the last index operand of the GEP is
  870. // non-constant.
  871. unsigned idxOperand, e;
  872. SmallVector<Value *, 4> Indices;
  873. for (idxOperand = 1, e = GEP.getNumOperands(); idxOperand < e;
  874. idxOperand++) {
  875. ConstantInt *IDX = dyn_cast<ConstantInt>(GEP.getOperand(idxOperand));
  876. if (!IDX)
  877. break;
  878. Indices.push_back(IDX);
  879. }
  880. // It must also be the last operand.
  881. if (idxOperand + 1 != e) {
  882. Result = Polynomial();
  883. BasePtr = nullptr;
  884. return;
  885. }
  886. // Compute the polynomial of the index operand.
  887. computePolynomial(*GEP.getOperand(idxOperand), Result);
  888. // Compute base offset from zero based index, excluding the last
  889. // variable operand.
  890. BaseOffset =
  891. DL.getIndexedOffsetInType(GEP.getSourceElementType(), Indices);
  892. // Apply the operations of GEP to the polynomial.
  893. unsigned ResultSize = DL.getTypeAllocSize(GEP.getResultElementType());
  894. Result.sextOrTrunc(PointerBits);
  895. Result.mul(APInt(PointerBits, ResultSize));
  896. Result.add(BaseOffset);
  897. BasePtr = GEP.getPointerOperand();
  898. }
  899. }
  900. // All other instructions are handled by using the value as base pointer and
  901. // a zero polynomial.
  902. else {
  903. BasePtr = &Ptr;
  904. Polynomial(DL.getIndexSizeInBits(PtrTy->getPointerAddressSpace()), 0);
  905. }
  906. }
  907. #ifndef NDEBUG
  908. void print(raw_ostream &OS) const {
  909. if (PV)
  910. OS << *PV;
  911. else
  912. OS << "(none)";
  913. OS << " + ";
  914. for (unsigned i = 0; i < getDimension(); i++)
  915. OS << ((i == 0) ? "[" : ", ") << EI[i].Ofs;
  916. OS << "]";
  917. }
  918. #endif
  919. };
  920. } // anonymous namespace
  921. bool InterleavedLoadCombineImpl::findPattern(
  922. std::list<VectorInfo> &Candidates, std::list<VectorInfo> &InterleavedLoad,
  923. unsigned Factor, const DataLayout &DL) {
  924. for (auto C0 = Candidates.begin(), E0 = Candidates.end(); C0 != E0; ++C0) {
  925. unsigned i;
  926. // Try to find an interleaved load using the front of Worklist as first line
  927. unsigned Size = DL.getTypeAllocSize(C0->VTy->getElementType());
  928. // List containing iterators pointing to the VectorInfos of the candidates
  929. std::vector<std::list<VectorInfo>::iterator> Res(Factor, Candidates.end());
  930. for (auto C = Candidates.begin(), E = Candidates.end(); C != E; C++) {
  931. if (C->VTy != C0->VTy)
  932. continue;
  933. if (C->BB != C0->BB)
  934. continue;
  935. if (C->PV != C0->PV)
  936. continue;
  937. // Check the current value matches any of factor - 1 remaining lines
  938. for (i = 1; i < Factor; i++) {
  939. if (C->EI[0].Ofs.isProvenEqualTo(C0->EI[0].Ofs + i * Size)) {
  940. Res[i] = C;
  941. }
  942. }
  943. for (i = 1; i < Factor; i++) {
  944. if (Res[i] == Candidates.end())
  945. break;
  946. }
  947. if (i == Factor) {
  948. Res[0] = C0;
  949. break;
  950. }
  951. }
  952. if (Res[0] != Candidates.end()) {
  953. // Move the result into the output
  954. for (unsigned i = 0; i < Factor; i++) {
  955. InterleavedLoad.splice(InterleavedLoad.end(), Candidates, Res[i]);
  956. }
  957. return true;
  958. }
  959. }
  960. return false;
  961. }
  962. LoadInst *
  963. InterleavedLoadCombineImpl::findFirstLoad(const std::set<LoadInst *> &LIs) {
  964. assert(!LIs.empty() && "No load instructions given.");
  965. // All LIs are within the same BB. Select the first for a reference.
  966. BasicBlock *BB = (*LIs.begin())->getParent();
  967. BasicBlock::iterator FLI = llvm::find_if(
  968. *BB, [&LIs](Instruction &I) -> bool { return is_contained(LIs, &I); });
  969. assert(FLI != BB->end());
  970. return cast<LoadInst>(FLI);
  971. }
  972. bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
  973. OptimizationRemarkEmitter &ORE) {
  974. LLVM_DEBUG(dbgs() << "Checking interleaved load\n");
  975. // The insertion point is the LoadInst which loads the first values. The
  976. // following tests are used to proof that the combined load can be inserted
  977. // just before InsertionPoint.
  978. LoadInst *InsertionPoint = InterleavedLoad.front().EI[0].LI;
  979. // Test if the offset is computed
  980. if (!InsertionPoint)
  981. return false;
  982. std::set<LoadInst *> LIs;
  983. std::set<Instruction *> Is;
  984. std::set<Instruction *> SVIs;
  985. InstructionCost InterleavedCost;
  986. InstructionCost InstructionCost = 0;
  987. const TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency;
  988. // Get the interleave factor
  989. unsigned Factor = InterleavedLoad.size();
  990. // Merge all input sets used in analysis
  991. for (auto &VI : InterleavedLoad) {
  992. // Generate a set of all load instructions to be combined
  993. LIs.insert(VI.LIs.begin(), VI.LIs.end());
  994. // Generate a set of all instructions taking part in load
  995. // interleaved. This list excludes the instructions necessary for the
  996. // polynomial construction.
  997. Is.insert(VI.Is.begin(), VI.Is.end());
  998. // Generate the set of the final ShuffleVectorInst.
  999. SVIs.insert(VI.SVI);
  1000. }
  1001. // There is nothing to combine.
  1002. if (LIs.size() < 2)
  1003. return false;
  1004. // Test if all participating instruction will be dead after the
  1005. // transformation. If intermediate results are used, no performance gain can
  1006. // be expected. Also sum the cost of the Instructions beeing left dead.
  1007. for (const auto &I : Is) {
  1008. // Compute the old cost
  1009. InstructionCost += TTI.getInstructionCost(I, CostKind);
  1010. // The final SVIs are allowed not to be dead, all uses will be replaced
  1011. if (SVIs.find(I) != SVIs.end())
  1012. continue;
  1013. // If there are users outside the set to be eliminated, we abort the
  1014. // transformation. No gain can be expected.
  1015. for (auto *U : I->users()) {
  1016. if (Is.find(dyn_cast<Instruction>(U)) == Is.end())
  1017. return false;
  1018. }
  1019. }
  1020. // We need to have a valid cost in order to proceed.
  1021. if (!InstructionCost.isValid())
  1022. return false;
  1023. // We know that all LoadInst are within the same BB. This guarantees that
  1024. // either everything or nothing is loaded.
  1025. LoadInst *First = findFirstLoad(LIs);
  1026. // To be safe that the loads can be combined, iterate over all loads and test
  1027. // that the corresponding defining access dominates first LI. This guarantees
  1028. // that there are no aliasing stores in between the loads.
  1029. auto FMA = MSSA.getMemoryAccess(First);
  1030. for (auto *LI : LIs) {
  1031. auto MADef = MSSA.getMemoryAccess(LI)->getDefiningAccess();
  1032. if (!MSSA.dominates(MADef, FMA))
  1033. return false;
  1034. }
  1035. assert(!LIs.empty() && "There are no LoadInst to combine");
  1036. // It is necessary that insertion point dominates all final ShuffleVectorInst.
  1037. for (auto &VI : InterleavedLoad) {
  1038. if (!DT.dominates(InsertionPoint, VI.SVI))
  1039. return false;
  1040. }
  1041. // All checks are done. Add instructions detectable by InterleavedAccessPass
  1042. // The old instruction will are left dead.
  1043. IRBuilder<> Builder(InsertionPoint);
  1044. Type *ETy = InterleavedLoad.front().SVI->getType()->getElementType();
  1045. unsigned ElementsPerSVI =
  1046. cast<FixedVectorType>(InterleavedLoad.front().SVI->getType())
  1047. ->getNumElements();
  1048. FixedVectorType *ILTy = FixedVectorType::get(ETy, Factor * ElementsPerSVI);
  1049. auto Indices = llvm::to_vector<4>(llvm::seq<unsigned>(0, Factor));
  1050. InterleavedCost = TTI.getInterleavedMemoryOpCost(
  1051. Instruction::Load, ILTy, Factor, Indices, InsertionPoint->getAlign(),
  1052. InsertionPoint->getPointerAddressSpace(), CostKind);
  1053. if (InterleavedCost >= InstructionCost) {
  1054. return false;
  1055. }
  1056. // Create a pointer cast for the wide load.
  1057. auto CI = Builder.CreatePointerCast(InsertionPoint->getOperand(0),
  1058. ILTy->getPointerTo(),
  1059. "interleaved.wide.ptrcast");
  1060. // Create the wide load and update the MemorySSA.
  1061. auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlign(),
  1062. "interleaved.wide.load");
  1063. auto MSSAU = MemorySSAUpdater(&MSSA);
  1064. MemoryUse *MSSALoad = cast<MemoryUse>(MSSAU.createMemoryAccessBefore(
  1065. LI, nullptr, MSSA.getMemoryAccess(InsertionPoint)));
  1066. MSSAU.insertUse(MSSALoad, /*RenameUses=*/ true);
  1067. // Create the final SVIs and replace all uses.
  1068. int i = 0;
  1069. for (auto &VI : InterleavedLoad) {
  1070. SmallVector<int, 4> Mask;
  1071. for (unsigned j = 0; j < ElementsPerSVI; j++)
  1072. Mask.push_back(i + j * Factor);
  1073. Builder.SetInsertPoint(VI.SVI);
  1074. auto SVI = Builder.CreateShuffleVector(LI, Mask, "interleaved.shuffle");
  1075. VI.SVI->replaceAllUsesWith(SVI);
  1076. i++;
  1077. }
  1078. NumInterleavedLoadCombine++;
  1079. ORE.emit([&]() {
  1080. return OptimizationRemark(DEBUG_TYPE, "Combined Interleaved Load", LI)
  1081. << "Load interleaved combined with factor "
  1082. << ore::NV("Factor", Factor);
  1083. });
  1084. return true;
  1085. }
  1086. bool InterleavedLoadCombineImpl::run() {
  1087. OptimizationRemarkEmitter ORE(&F);
  1088. bool changed = false;
  1089. unsigned MaxFactor = TLI.getMaxSupportedInterleaveFactor();
  1090. auto &DL = F.getParent()->getDataLayout();
  1091. // Start with the highest factor to avoid combining and recombining.
  1092. for (unsigned Factor = MaxFactor; Factor >= 2; Factor--) {
  1093. std::list<VectorInfo> Candidates;
  1094. for (BasicBlock &BB : F) {
  1095. for (Instruction &I : BB) {
  1096. if (auto SVI = dyn_cast<ShuffleVectorInst>(&I)) {
  1097. // We don't support scalable vectors in this pass.
  1098. if (isa<ScalableVectorType>(SVI->getType()))
  1099. continue;
  1100. Candidates.emplace_back(cast<FixedVectorType>(SVI->getType()));
  1101. if (!VectorInfo::computeFromSVI(SVI, Candidates.back(), DL)) {
  1102. Candidates.pop_back();
  1103. continue;
  1104. }
  1105. if (!Candidates.back().isInterleaved(Factor, DL)) {
  1106. Candidates.pop_back();
  1107. }
  1108. }
  1109. }
  1110. }
  1111. std::list<VectorInfo> InterleavedLoad;
  1112. while (findPattern(Candidates, InterleavedLoad, Factor, DL)) {
  1113. if (combine(InterleavedLoad, ORE)) {
  1114. changed = true;
  1115. } else {
  1116. // Remove the first element of the Interleaved Load but put the others
  1117. // back on the list and continue searching
  1118. Candidates.splice(Candidates.begin(), InterleavedLoad,
  1119. std::next(InterleavedLoad.begin()),
  1120. InterleavedLoad.end());
  1121. }
  1122. InterleavedLoad.clear();
  1123. }
  1124. }
  1125. return changed;
  1126. }
  1127. namespace {
  1128. /// This pass combines interleaved loads into a pattern detectable by
  1129. /// InterleavedAccessPass.
  1130. struct InterleavedLoadCombine : public FunctionPass {
  1131. static char ID;
  1132. InterleavedLoadCombine() : FunctionPass(ID) {
  1133. initializeInterleavedLoadCombinePass(*PassRegistry::getPassRegistry());
  1134. }
  1135. StringRef getPassName() const override {
  1136. return "Interleaved Load Combine Pass";
  1137. }
  1138. bool runOnFunction(Function &F) override {
  1139. if (DisableInterleavedLoadCombine)
  1140. return false;
  1141. auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
  1142. if (!TPC)
  1143. return false;
  1144. LLVM_DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName()
  1145. << "\n");
  1146. return InterleavedLoadCombineImpl(
  1147. F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
  1148. getAnalysis<MemorySSAWrapperPass>().getMSSA(),
  1149. TPC->getTM<TargetMachine>())
  1150. .run();
  1151. }
  1152. void getAnalysisUsage(AnalysisUsage &AU) const override {
  1153. AU.addRequired<MemorySSAWrapperPass>();
  1154. AU.addRequired<DominatorTreeWrapperPass>();
  1155. FunctionPass::getAnalysisUsage(AU);
  1156. }
  1157. private:
  1158. };
  1159. } // anonymous namespace
  1160. char InterleavedLoadCombine::ID = 0;
  1161. INITIALIZE_PASS_BEGIN(
  1162. InterleavedLoadCombine, DEBUG_TYPE,
  1163. "Combine interleaved loads into wide loads and shufflevector instructions",
  1164. false, false)
  1165. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  1166. INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
  1167. INITIALIZE_PASS_END(
  1168. InterleavedLoadCombine, DEBUG_TYPE,
  1169. "Combine interleaved loads into wide loads and shufflevector instructions",
  1170. false, false)
  1171. FunctionPass *
  1172. llvm::createInterleavedLoadCombinePass() {
  1173. auto P = new InterleavedLoadCombine();
  1174. return P;
  1175. }