X86InterleavedAccess.cpp 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. //===- X86InterleavedAccess.cpp -------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. /// \file
  10. /// This file contains the X86 implementation of the interleaved accesses
  11. /// optimization generating X86-specific instructions/intrinsics for
  12. /// interleaved access groups.
  13. //
  14. //===----------------------------------------------------------------------===//
  15. #include "X86ISelLowering.h"
  16. #include "X86Subtarget.h"
  17. #include "llvm/ADT/ArrayRef.h"
  18. #include "llvm/ADT/SmallVector.h"
  19. #include "llvm/Analysis/VectorUtils.h"
  20. #include "llvm/IR/Constants.h"
  21. #include "llvm/IR/DataLayout.h"
  22. #include "llvm/IR/DerivedTypes.h"
  23. #include "llvm/IR/IRBuilder.h"
  24. #include "llvm/IR/Instruction.h"
  25. #include "llvm/IR/Instructions.h"
  26. #include "llvm/IR/Module.h"
  27. #include "llvm/IR/Type.h"
  28. #include "llvm/IR/Value.h"
  29. #include "llvm/Support/Casting.h"
  30. #include "llvm/Support/MachineValueType.h"
  31. #include <algorithm>
  32. #include <cassert>
  33. #include <cmath>
  34. #include <cstdint>
  35. using namespace llvm;
  36. namespace {
  37. /// This class holds necessary information to represent an interleaved
  38. /// access group and supports utilities to lower the group into
  39. /// X86-specific instructions/intrinsics.
  40. /// E.g. A group of interleaving access loads (Factor = 2; accessing every
  41. /// other element)
  42. /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
  43. /// %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> poison, <0, 2, 4, 6>
  44. /// %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> poison, <1, 3, 5, 7>
  45. class X86InterleavedAccessGroup {
  46. /// Reference to the wide-load instruction of an interleaved access
  47. /// group.
  48. Instruction *const Inst;
  49. /// Reference to the shuffle(s), consumer(s) of the (load) 'Inst'.
  50. ArrayRef<ShuffleVectorInst *> Shuffles;
  51. /// Reference to the starting index of each user-shuffle.
  52. ArrayRef<unsigned> Indices;
  53. /// Reference to the interleaving stride in terms of elements.
  54. const unsigned Factor;
  55. /// Reference to the underlying target.
  56. const X86Subtarget &Subtarget;
  57. const DataLayout &DL;
  58. IRBuilder<> &Builder;
  59. /// Breaks down a vector \p 'Inst' of N elements into \p NumSubVectors
  60. /// sub vectors of type \p T. Returns the sub-vectors in \p DecomposedVectors.
  61. void decompose(Instruction *Inst, unsigned NumSubVectors, FixedVectorType *T,
  62. SmallVectorImpl<Instruction *> &DecomposedVectors);
  63. /// Performs matrix transposition on a 4x4 matrix \p InputVectors and
  64. /// returns the transposed-vectors in \p TransposedVectors.
  65. /// E.g.
  66. /// InputVectors:
  67. /// In-V0 = p1, p2, p3, p4
  68. /// In-V1 = q1, q2, q3, q4
  69. /// In-V2 = r1, r2, r3, r4
  70. /// In-V3 = s1, s2, s3, s4
  71. /// OutputVectors:
  72. /// Out-V0 = p1, q1, r1, s1
  73. /// Out-V1 = p2, q2, r2, s2
  74. /// Out-V2 = p3, q3, r3, s3
  75. /// Out-V3 = P4, q4, r4, s4
  76. void transpose_4x4(ArrayRef<Instruction *> InputVectors,
  77. SmallVectorImpl<Value *> &TransposedMatrix);
  78. void interleave8bitStride4(ArrayRef<Instruction *> InputVectors,
  79. SmallVectorImpl<Value *> &TransposedMatrix,
  80. unsigned NumSubVecElems);
  81. void interleave8bitStride4VF8(ArrayRef<Instruction *> InputVectors,
  82. SmallVectorImpl<Value *> &TransposedMatrix);
  83. void interleave8bitStride3(ArrayRef<Instruction *> InputVectors,
  84. SmallVectorImpl<Value *> &TransposedMatrix,
  85. unsigned NumSubVecElems);
  86. void deinterleave8bitStride3(ArrayRef<Instruction *> InputVectors,
  87. SmallVectorImpl<Value *> &TransposedMatrix,
  88. unsigned NumSubVecElems);
  89. public:
  90. /// In order to form an interleaved access group X86InterleavedAccessGroup
  91. /// requires a wide-load instruction \p 'I', a group of interleaved-vectors
  92. /// \p Shuffs, reference to the first indices of each interleaved-vector
  93. /// \p 'Ind' and the interleaving stride factor \p F. In order to generate
  94. /// X86-specific instructions/intrinsics it also requires the underlying
  95. /// target information \p STarget.
  96. explicit X86InterleavedAccessGroup(Instruction *I,
  97. ArrayRef<ShuffleVectorInst *> Shuffs,
  98. ArrayRef<unsigned> Ind, const unsigned F,
  99. const X86Subtarget &STarget,
  100. IRBuilder<> &B)
  101. : Inst(I), Shuffles(Shuffs), Indices(Ind), Factor(F), Subtarget(STarget),
  102. DL(Inst->getModule()->getDataLayout()), Builder(B) {}
  103. /// Returns true if this interleaved access group can be lowered into
  104. /// x86-specific instructions/intrinsics, false otherwise.
  105. bool isSupported() const;
  106. /// Lowers this interleaved access group into X86-specific
  107. /// instructions/intrinsics.
  108. bool lowerIntoOptimizedSequence();
  109. };
  110. } // end anonymous namespace
  111. bool X86InterleavedAccessGroup::isSupported() const {
  112. VectorType *ShuffleVecTy = Shuffles[0]->getType();
  113. Type *ShuffleEltTy = ShuffleVecTy->getElementType();
  114. unsigned ShuffleElemSize = DL.getTypeSizeInBits(ShuffleEltTy);
  115. unsigned WideInstSize;
  116. // Currently, lowering is supported for the following vectors:
  117. // Stride 4:
  118. // 1. Store and load of 4-element vectors of 64 bits on AVX.
  119. // 2. Store of 16/32-element vectors of 8 bits on AVX.
  120. // Stride 3:
  121. // 1. Load of 16/32-element vectors of 8 bits on AVX.
  122. if (!Subtarget.hasAVX() || (Factor != 4 && Factor != 3))
  123. return false;
  124. if (isa<LoadInst>(Inst)) {
  125. WideInstSize = DL.getTypeSizeInBits(Inst->getType());
  126. if (cast<LoadInst>(Inst)->getPointerAddressSpace())
  127. return false;
  128. } else
  129. WideInstSize = DL.getTypeSizeInBits(Shuffles[0]->getType());
  130. // We support shuffle represents stride 4 for byte type with size of
  131. // WideInstSize.
  132. if (ShuffleElemSize == 64 && WideInstSize == 1024 && Factor == 4)
  133. return true;
  134. if (ShuffleElemSize == 8 && isa<StoreInst>(Inst) && Factor == 4 &&
  135. (WideInstSize == 256 || WideInstSize == 512 || WideInstSize == 1024 ||
  136. WideInstSize == 2048))
  137. return true;
  138. if (ShuffleElemSize == 8 && Factor == 3 &&
  139. (WideInstSize == 384 || WideInstSize == 768 || WideInstSize == 1536))
  140. return true;
  141. return false;
  142. }
  143. void X86InterleavedAccessGroup::decompose(
  144. Instruction *VecInst, unsigned NumSubVectors, FixedVectorType *SubVecTy,
  145. SmallVectorImpl<Instruction *> &DecomposedVectors) {
  146. assert((isa<LoadInst>(VecInst) || isa<ShuffleVectorInst>(VecInst)) &&
  147. "Expected Load or Shuffle");
  148. Type *VecWidth = VecInst->getType();
  149. (void)VecWidth;
  150. assert(VecWidth->isVectorTy() &&
  151. DL.getTypeSizeInBits(VecWidth) >=
  152. DL.getTypeSizeInBits(SubVecTy) * NumSubVectors &&
  153. "Invalid Inst-size!!!");
  154. if (auto *SVI = dyn_cast<ShuffleVectorInst>(VecInst)) {
  155. Value *Op0 = SVI->getOperand(0);
  156. Value *Op1 = SVI->getOperand(1);
  157. // Generate N(= NumSubVectors) shuffles of T(= SubVecTy) type.
  158. for (unsigned i = 0; i < NumSubVectors; ++i)
  159. DecomposedVectors.push_back(
  160. cast<ShuffleVectorInst>(Builder.CreateShuffleVector(
  161. Op0, Op1,
  162. createSequentialMask(Indices[i], SubVecTy->getNumElements(),
  163. 0))));
  164. return;
  165. }
  166. // Decompose the load instruction.
  167. LoadInst *LI = cast<LoadInst>(VecInst);
  168. Type *VecBaseTy, *VecBasePtrTy;
  169. Value *VecBasePtr;
  170. unsigned int NumLoads = NumSubVectors;
  171. // In the case of stride 3 with a vector of 32 elements load the information
  172. // in the following way:
  173. // [0,1...,VF/2-1,VF/2+VF,VF/2+VF+1,...,2VF-1]
  174. unsigned VecLength = DL.getTypeSizeInBits(VecWidth);
  175. if (VecLength == 768 || VecLength == 1536) {
  176. VecBaseTy = FixedVectorType::get(Type::getInt8Ty(LI->getContext()), 16);
  177. VecBasePtrTy = VecBaseTy->getPointerTo(LI->getPointerAddressSpace());
  178. VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy);
  179. NumLoads = NumSubVectors * (VecLength / 384);
  180. } else {
  181. VecBaseTy = SubVecTy;
  182. VecBasePtrTy = VecBaseTy->getPointerTo(LI->getPointerAddressSpace());
  183. VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy);
  184. }
  185. // Generate N loads of T type.
  186. assert(VecBaseTy->getPrimitiveSizeInBits().isKnownMultipleOf(8) &&
  187. "VecBaseTy's size must be a multiple of 8");
  188. const Align FirstAlignment = LI->getAlign();
  189. const Align SubsequentAlignment = commonAlignment(
  190. FirstAlignment, VecBaseTy->getPrimitiveSizeInBits().getFixedSize() / 8);
  191. Align Alignment = FirstAlignment;
  192. for (unsigned i = 0; i < NumLoads; i++) {
  193. // TODO: Support inbounds GEP.
  194. Value *NewBasePtr =
  195. Builder.CreateGEP(VecBaseTy, VecBasePtr, Builder.getInt32(i));
  196. Instruction *NewLoad =
  197. Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, Alignment);
  198. DecomposedVectors.push_back(NewLoad);
  199. Alignment = SubsequentAlignment;
  200. }
  201. }
  202. // Changing the scale of the vector type by reducing the number of elements and
  203. // doubling the scalar size.
  204. static MVT scaleVectorType(MVT VT) {
  205. unsigned ScalarSize = VT.getVectorElementType().getScalarSizeInBits() * 2;
  206. return MVT::getVectorVT(MVT::getIntegerVT(ScalarSize),
  207. VT.getVectorNumElements() / 2);
  208. }
  209. static constexpr int Concat[] = {
  210. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  211. 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
  212. 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
  213. 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63};
  214. // genShuffleBland - Creates shuffle according to two vectors.This function is
  215. // only works on instructions with lane inside 256 registers. According to
  216. // the mask 'Mask' creates a new Mask 'Out' by the offset of the mask. The
  217. // offset amount depends on the two integer, 'LowOffset' and 'HighOffset'.
  218. // Where the 'LowOffset' refers to the first vector and the highOffset refers to
  219. // the second vector.
  220. // |a0....a5,b0....b4,c0....c4|a16..a21,b16..b20,c16..c20|
  221. // |c5...c10,a5....a9,b5....b9|c21..c26,a22..a26,b21..b25|
  222. // |b10..b15,c11..c15,a10..a15|b26..b31,c27..c31,a27..a31|
  223. // For the sequence to work as a mirror to the load.
  224. // We must consider the elements order as above.
  225. // In this function we are combining two types of shuffles.
  226. // The first one is vpshufed and the second is a type of "blend" shuffle.
  227. // By computing the shuffle on a sequence of 16 elements(one lane) and add the
  228. // correct offset. We are creating a vpsuffed + blend sequence between two
  229. // shuffles.
  230. static void genShuffleBland(MVT VT, ArrayRef<int> Mask,
  231. SmallVectorImpl<int> &Out, int LowOffset,
  232. int HighOffset) {
  233. assert(VT.getSizeInBits() >= 256 &&
  234. "This function doesn't accept width smaller then 256");
  235. unsigned NumOfElm = VT.getVectorNumElements();
  236. for (unsigned i = 0; i < Mask.size(); i++)
  237. Out.push_back(Mask[i] + LowOffset);
  238. for (unsigned i = 0; i < Mask.size(); i++)
  239. Out.push_back(Mask[i] + HighOffset + NumOfElm);
  240. }
  241. // reorderSubVector returns the data to is the original state. And de-facto is
  242. // the opposite of the function concatSubVector.
  243. // For VecElems = 16
  244. // Invec[0] - |0| TransposedMatrix[0] - |0|
  245. // Invec[1] - |1| => TransposedMatrix[1] - |1|
  246. // Invec[2] - |2| TransposedMatrix[2] - |2|
  247. // For VecElems = 32
  248. // Invec[0] - |0|3| TransposedMatrix[0] - |0|1|
  249. // Invec[1] - |1|4| => TransposedMatrix[1] - |2|3|
  250. // Invec[2] - |2|5| TransposedMatrix[2] - |4|5|
  251. // For VecElems = 64
  252. // Invec[0] - |0|3|6|9 | TransposedMatrix[0] - |0|1|2 |3 |
  253. // Invec[1] - |1|4|7|10| => TransposedMatrix[1] - |4|5|6 |7 |
  254. // Invec[2] - |2|5|8|11| TransposedMatrix[2] - |8|9|10|11|
  255. static void reorderSubVector(MVT VT, SmallVectorImpl<Value *> &TransposedMatrix,
  256. ArrayRef<Value *> Vec, ArrayRef<int> VPShuf,
  257. unsigned VecElems, unsigned Stride,
  258. IRBuilder<> &Builder) {
  259. if (VecElems == 16) {
  260. for (unsigned i = 0; i < Stride; i++)
  261. TransposedMatrix[i] = Builder.CreateShuffleVector(Vec[i], VPShuf);
  262. return;
  263. }
  264. SmallVector<int, 32> OptimizeShuf;
  265. Value *Temp[8];
  266. for (unsigned i = 0; i < (VecElems / 16) * Stride; i += 2) {
  267. genShuffleBland(VT, VPShuf, OptimizeShuf, (i / Stride) * 16,
  268. (i + 1) / Stride * 16);
  269. Temp[i / 2] = Builder.CreateShuffleVector(
  270. Vec[i % Stride], Vec[(i + 1) % Stride], OptimizeShuf);
  271. OptimizeShuf.clear();
  272. }
  273. if (VecElems == 32) {
  274. std::copy(Temp, Temp + Stride, TransposedMatrix.begin());
  275. return;
  276. } else
  277. for (unsigned i = 0; i < Stride; i++)
  278. TransposedMatrix[i] =
  279. Builder.CreateShuffleVector(Temp[2 * i], Temp[2 * i + 1], Concat);
  280. }
  281. void X86InterleavedAccessGroup::interleave8bitStride4VF8(
  282. ArrayRef<Instruction *> Matrix,
  283. SmallVectorImpl<Value *> &TransposedMatrix) {
  284. // Assuming we start from the following vectors:
  285. // Matrix[0]= c0 c1 c2 c3 c4 ... c7
  286. // Matrix[1]= m0 m1 m2 m3 m4 ... m7
  287. // Matrix[2]= y0 y1 y2 y3 y4 ... y7
  288. // Matrix[3]= k0 k1 k2 k3 k4 ... k7
  289. MVT VT = MVT::v8i16;
  290. TransposedMatrix.resize(2);
  291. SmallVector<int, 16> MaskLow;
  292. SmallVector<int, 32> MaskLowTemp1, MaskLowWord;
  293. SmallVector<int, 32> MaskHighTemp1, MaskHighWord;
  294. for (unsigned i = 0; i < 8; ++i) {
  295. MaskLow.push_back(i);
  296. MaskLow.push_back(i + 8);
  297. }
  298. createUnpackShuffleMask(VT, MaskLowTemp1, true, false);
  299. createUnpackShuffleMask(VT, MaskHighTemp1, false, false);
  300. narrowShuffleMaskElts(2, MaskHighTemp1, MaskHighWord);
  301. narrowShuffleMaskElts(2, MaskLowTemp1, MaskLowWord);
  302. // IntrVec1Low = c0 m0 c1 m1 c2 m2 c3 m3 c4 m4 c5 m5 c6 m6 c7 m7
  303. // IntrVec2Low = y0 k0 y1 k1 y2 k2 y3 k3 y4 k4 y5 k5 y6 k6 y7 k7
  304. Value *IntrVec1Low =
  305. Builder.CreateShuffleVector(Matrix[0], Matrix[1], MaskLow);
  306. Value *IntrVec2Low =
  307. Builder.CreateShuffleVector(Matrix[2], Matrix[3], MaskLow);
  308. // TransposedMatrix[0] = c0 m0 y0 k0 c1 m1 y1 k1 c2 m2 y2 k2 c3 m3 y3 k3
  309. // TransposedMatrix[1] = c4 m4 y4 k4 c5 m5 y5 k5 c6 m6 y6 k6 c7 m7 y7 k7
  310. TransposedMatrix[0] =
  311. Builder.CreateShuffleVector(IntrVec1Low, IntrVec2Low, MaskLowWord);
  312. TransposedMatrix[1] =
  313. Builder.CreateShuffleVector(IntrVec1Low, IntrVec2Low, MaskHighWord);
  314. }
  315. void X86InterleavedAccessGroup::interleave8bitStride4(
  316. ArrayRef<Instruction *> Matrix, SmallVectorImpl<Value *> &TransposedMatrix,
  317. unsigned NumOfElm) {
  318. // Example: Assuming we start from the following vectors:
  319. // Matrix[0]= c0 c1 c2 c3 c4 ... c31
  320. // Matrix[1]= m0 m1 m2 m3 m4 ... m31
  321. // Matrix[2]= y0 y1 y2 y3 y4 ... y31
  322. // Matrix[3]= k0 k1 k2 k3 k4 ... k31
  323. MVT VT = MVT::getVectorVT(MVT::i8, NumOfElm);
  324. MVT HalfVT = scaleVectorType(VT);
  325. TransposedMatrix.resize(4);
  326. SmallVector<int, 32> MaskHigh;
  327. SmallVector<int, 32> MaskLow;
  328. SmallVector<int, 32> LowHighMask[2];
  329. SmallVector<int, 32> MaskHighTemp;
  330. SmallVector<int, 32> MaskLowTemp;
  331. // MaskHighTemp and MaskLowTemp built in the vpunpckhbw and vpunpcklbw X86
  332. // shuffle pattern.
  333. createUnpackShuffleMask(VT, MaskLow, true, false);
  334. createUnpackShuffleMask(VT, MaskHigh, false, false);
  335. // MaskHighTemp1 and MaskLowTemp1 built in the vpunpckhdw and vpunpckldw X86
  336. // shuffle pattern.
  337. createUnpackShuffleMask(HalfVT, MaskLowTemp, true, false);
  338. createUnpackShuffleMask(HalfVT, MaskHighTemp, false, false);
  339. narrowShuffleMaskElts(2, MaskLowTemp, LowHighMask[0]);
  340. narrowShuffleMaskElts(2, MaskHighTemp, LowHighMask[1]);
  341. // IntrVec1Low = c0 m0 c1 m1 ... c7 m7 | c16 m16 c17 m17 ... c23 m23
  342. // IntrVec1High = c8 m8 c9 m9 ... c15 m15 | c24 m24 c25 m25 ... c31 m31
  343. // IntrVec2Low = y0 k0 y1 k1 ... y7 k7 | y16 k16 y17 k17 ... y23 k23
  344. // IntrVec2High = y8 k8 y9 k9 ... y15 k15 | y24 k24 y25 k25 ... y31 k31
  345. Value *IntrVec[4];
  346. IntrVec[0] = Builder.CreateShuffleVector(Matrix[0], Matrix[1], MaskLow);
  347. IntrVec[1] = Builder.CreateShuffleVector(Matrix[0], Matrix[1], MaskHigh);
  348. IntrVec[2] = Builder.CreateShuffleVector(Matrix[2], Matrix[3], MaskLow);
  349. IntrVec[3] = Builder.CreateShuffleVector(Matrix[2], Matrix[3], MaskHigh);
  350. // cmyk4 cmyk5 cmyk6 cmyk7 | cmyk20 cmyk21 cmyk22 cmyk23
  351. // cmyk12 cmyk13 cmyk14 cmyk15 | cmyk28 cmyk29 cmyk30 cmyk31
  352. // cmyk0 cmyk1 cmyk2 cmyk3 | cmyk16 cmyk17 cmyk18 cmyk19
  353. // cmyk8 cmyk9 cmyk10 cmyk11 | cmyk24 cmyk25 cmyk26 cmyk27
  354. Value *VecOut[4];
  355. for (int i = 0; i < 4; i++)
  356. VecOut[i] = Builder.CreateShuffleVector(IntrVec[i / 2], IntrVec[i / 2 + 2],
  357. LowHighMask[i % 2]);
  358. // cmyk0 cmyk1 cmyk2 cmyk3 | cmyk4 cmyk5 cmyk6 cmyk7
  359. // cmyk8 cmyk9 cmyk10 cmyk11 | cmyk12 cmyk13 cmyk14 cmyk15
  360. // cmyk16 cmyk17 cmyk18 cmyk19 | cmyk20 cmyk21 cmyk22 cmyk23
  361. // cmyk24 cmyk25 cmyk26 cmyk27 | cmyk28 cmyk29 cmyk30 cmyk31
  362. if (VT == MVT::v16i8) {
  363. std::copy(VecOut, VecOut + 4, TransposedMatrix.begin());
  364. return;
  365. }
  366. reorderSubVector(VT, TransposedMatrix, VecOut, makeArrayRef(Concat, 16),
  367. NumOfElm, 4, Builder);
  368. }
  369. // createShuffleStride returns shuffle mask of size N.
  370. // The shuffle pattern is as following :
  371. // {0, Stride%(VF/Lane), (2*Stride%(VF/Lane))...(VF*Stride/Lane)%(VF/Lane),
  372. // (VF/ Lane) ,(VF / Lane)+Stride%(VF/Lane),...,
  373. // (VF / Lane)+(VF*Stride/Lane)%(VF/Lane)}
  374. // Where Lane is the # of lanes in a register:
  375. // VectorSize = 128 => Lane = 1
  376. // VectorSize = 256 => Lane = 2
  377. // For example shuffle pattern for VF 16 register size 256 -> lanes = 2
  378. // {<[0|3|6|1|4|7|2|5]-[8|11|14|9|12|15|10|13]>}
  379. static void createShuffleStride(MVT VT, int Stride,
  380. SmallVectorImpl<int> &Mask) {
  381. int VectorSize = VT.getSizeInBits();
  382. int VF = VT.getVectorNumElements();
  383. int LaneCount = std::max(VectorSize / 128, 1);
  384. for (int Lane = 0; Lane < LaneCount; Lane++)
  385. for (int i = 0, LaneSize = VF / LaneCount; i != LaneSize; ++i)
  386. Mask.push_back((i * Stride) % LaneSize + LaneSize * Lane);
  387. }
  388. // setGroupSize sets 'SizeInfo' to the size(number of elements) of group
  389. // inside mask a shuffleMask. A mask contains exactly 3 groups, where
  390. // each group is a monotonically increasing sequence with stride 3.
  391. // For example shuffleMask {0,3,6,1,4,7,2,5} => {3,3,2}
  392. static void setGroupSize(MVT VT, SmallVectorImpl<int> &SizeInfo) {
  393. int VectorSize = VT.getSizeInBits();
  394. int VF = VT.getVectorNumElements() / std::max(VectorSize / 128, 1);
  395. for (int i = 0, FirstGroupElement = 0; i < 3; i++) {
  396. int GroupSize = std::ceil((VF - FirstGroupElement) / 3.0);
  397. SizeInfo.push_back(GroupSize);
  398. FirstGroupElement = ((GroupSize)*3 + FirstGroupElement) % VF;
  399. }
  400. }
  401. // DecodePALIGNRMask returns the shuffle mask of vpalign instruction.
  402. // vpalign works according to lanes
  403. // Where Lane is the # of lanes in a register:
  404. // VectorWide = 128 => Lane = 1
  405. // VectorWide = 256 => Lane = 2
  406. // For Lane = 1 shuffle pattern is: {DiffToJump,...,DiffToJump+VF-1}.
  407. // For Lane = 2 shuffle pattern is:
  408. // {DiffToJump,...,VF/2-1,VF,...,DiffToJump+VF-1}.
  409. // Imm variable sets the offset amount. The result of the
  410. // function is stored inside ShuffleMask vector and it built as described in
  411. // the begin of the description. AlignDirection is a boolean that indicates the
  412. // direction of the alignment. (false - align to the "right" side while true -
  413. // align to the "left" side)
  414. static void DecodePALIGNRMask(MVT VT, unsigned Imm,
  415. SmallVectorImpl<int> &ShuffleMask,
  416. bool AlignDirection = true, bool Unary = false) {
  417. unsigned NumElts = VT.getVectorNumElements();
  418. unsigned NumLanes = std::max((int)VT.getSizeInBits() / 128, 1);
  419. unsigned NumLaneElts = NumElts / NumLanes;
  420. Imm = AlignDirection ? Imm : (NumLaneElts - Imm);
  421. unsigned Offset = Imm * (VT.getScalarSizeInBits() / 8);
  422. for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
  423. for (unsigned i = 0; i != NumLaneElts; ++i) {
  424. unsigned Base = i + Offset;
  425. // if i+offset is out of this lane then we actually need the other source
  426. // If Unary the other source is the first source.
  427. if (Base >= NumLaneElts)
  428. Base = Unary ? Base % NumLaneElts : Base + NumElts - NumLaneElts;
  429. ShuffleMask.push_back(Base + l);
  430. }
  431. }
  432. }
  433. // concatSubVector - The function rebuilds the data to a correct expected
  434. // order. An assumption(The shape of the matrix) was taken for the
  435. // deinterleaved to work with lane's instructions like 'vpalign' or 'vphuf'.
  436. // This function ensures that the data is built in correct way for the lane
  437. // instructions. Each lane inside the vector is a 128-bit length.
  438. //
  439. // The 'InVec' argument contains the data in increasing order. In InVec[0] You
  440. // can find the first 128 bit data. The number of different lanes inside a
  441. // vector depends on the 'VecElems'.In general, the formula is
  442. // VecElems * type / 128. The size of the array 'InVec' depends and equal to
  443. // 'VecElems'.
  444. // For VecElems = 16
  445. // Invec[0] - |0| Vec[0] - |0|
  446. // Invec[1] - |1| => Vec[1] - |1|
  447. // Invec[2] - |2| Vec[2] - |2|
  448. // For VecElems = 32
  449. // Invec[0] - |0|1| Vec[0] - |0|3|
  450. // Invec[1] - |2|3| => Vec[1] - |1|4|
  451. // Invec[2] - |4|5| Vec[2] - |2|5|
  452. // For VecElems = 64
  453. // Invec[0] - |0|1|2 |3 | Vec[0] - |0|3|6|9 |
  454. // Invec[1] - |4|5|6 |7 | => Vec[1] - |1|4|7|10|
  455. // Invec[2] - |8|9|10|11| Vec[2] - |2|5|8|11|
  456. static void concatSubVector(Value **Vec, ArrayRef<Instruction *> InVec,
  457. unsigned VecElems, IRBuilder<> &Builder) {
  458. if (VecElems == 16) {
  459. for (int i = 0; i < 3; i++)
  460. Vec[i] = InVec[i];
  461. return;
  462. }
  463. for (unsigned j = 0; j < VecElems / 32; j++)
  464. for (int i = 0; i < 3; i++)
  465. Vec[i + j * 3] = Builder.CreateShuffleVector(
  466. InVec[j * 6 + i], InVec[j * 6 + i + 3], makeArrayRef(Concat, 32));
  467. if (VecElems == 32)
  468. return;
  469. for (int i = 0; i < 3; i++)
  470. Vec[i] = Builder.CreateShuffleVector(Vec[i], Vec[i + 3], Concat);
  471. }
  472. void X86InterleavedAccessGroup::deinterleave8bitStride3(
  473. ArrayRef<Instruction *> InVec, SmallVectorImpl<Value *> &TransposedMatrix,
  474. unsigned VecElems) {
  475. // Example: Assuming we start from the following vectors:
  476. // Matrix[0]= a0 b0 c0 a1 b1 c1 a2 b2
  477. // Matrix[1]= c2 a3 b3 c3 a4 b4 c4 a5
  478. // Matrix[2]= b5 c5 a6 b6 c6 a7 b7 c7
  479. TransposedMatrix.resize(3);
  480. SmallVector<int, 32> VPShuf;
  481. SmallVector<int, 32> VPAlign[2];
  482. SmallVector<int, 32> VPAlign2;
  483. SmallVector<int, 32> VPAlign3;
  484. SmallVector<int, 3> GroupSize;
  485. Value *Vec[6], *TempVector[3];
  486. MVT VT = MVT::getVT(Shuffles[0]->getType());
  487. createShuffleStride(VT, 3, VPShuf);
  488. setGroupSize(VT, GroupSize);
  489. for (int i = 0; i < 2; i++)
  490. DecodePALIGNRMask(VT, GroupSize[2 - i], VPAlign[i], false);
  491. DecodePALIGNRMask(VT, GroupSize[2] + GroupSize[1], VPAlign2, true, true);
  492. DecodePALIGNRMask(VT, GroupSize[1], VPAlign3, true, true);
  493. concatSubVector(Vec, InVec, VecElems, Builder);
  494. // Vec[0]= a0 a1 a2 b0 b1 b2 c0 c1
  495. // Vec[1]= c2 c3 c4 a3 a4 a5 b3 b4
  496. // Vec[2]= b5 b6 b7 c5 c6 c7 a6 a7
  497. for (int i = 0; i < 3; i++)
  498. Vec[i] = Builder.CreateShuffleVector(Vec[i], VPShuf);
  499. // TempVector[0]= a6 a7 a0 a1 a2 b0 b1 b2
  500. // TempVector[1]= c0 c1 c2 c3 c4 a3 a4 a5
  501. // TempVector[2]= b3 b4 b5 b6 b7 c5 c6 c7
  502. for (int i = 0; i < 3; i++)
  503. TempVector[i] =
  504. Builder.CreateShuffleVector(Vec[(i + 2) % 3], Vec[i], VPAlign[0]);
  505. // Vec[0]= a3 a4 a5 a6 a7 a0 a1 a2
  506. // Vec[1]= c5 c6 c7 c0 c1 c2 c3 c4
  507. // Vec[2]= b0 b1 b2 b3 b4 b5 b6 b7
  508. for (int i = 0; i < 3; i++)
  509. Vec[i] = Builder.CreateShuffleVector(TempVector[(i + 1) % 3], TempVector[i],
  510. VPAlign[1]);
  511. // TransposedMatrix[0]= a0 a1 a2 a3 a4 a5 a6 a7
  512. // TransposedMatrix[1]= b0 b1 b2 b3 b4 b5 b6 b7
  513. // TransposedMatrix[2]= c0 c1 c2 c3 c4 c5 c6 c7
  514. Value *TempVec = Builder.CreateShuffleVector(Vec[1], VPAlign3);
  515. TransposedMatrix[0] = Builder.CreateShuffleVector(Vec[0], VPAlign2);
  516. TransposedMatrix[1] = VecElems == 8 ? Vec[2] : TempVec;
  517. TransposedMatrix[2] = VecElems == 8 ? TempVec : Vec[2];
  518. }
  519. // group2Shuffle reorder the shuffle stride back into continuous order.
  520. // For example For VF16 with Mask1 = {0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13} =>
  521. // MaskResult = {0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5}.
  522. static void group2Shuffle(MVT VT, SmallVectorImpl<int> &Mask,
  523. SmallVectorImpl<int> &Output) {
  524. int IndexGroup[3] = {0, 0, 0};
  525. int Index = 0;
  526. int VectorWidth = VT.getSizeInBits();
  527. int VF = VT.getVectorNumElements();
  528. // Find the index of the different groups.
  529. int Lane = (VectorWidth / 128 > 0) ? VectorWidth / 128 : 1;
  530. for (int i = 0; i < 3; i++) {
  531. IndexGroup[(Index * 3) % (VF / Lane)] = Index;
  532. Index += Mask[i];
  533. }
  534. // According to the index compute the convert mask.
  535. for (int i = 0; i < VF / Lane; i++) {
  536. Output.push_back(IndexGroup[i % 3]);
  537. IndexGroup[i % 3]++;
  538. }
  539. }
  540. void X86InterleavedAccessGroup::interleave8bitStride3(
  541. ArrayRef<Instruction *> InVec, SmallVectorImpl<Value *> &TransposedMatrix,
  542. unsigned VecElems) {
  543. // Example: Assuming we start from the following vectors:
  544. // Matrix[0]= a0 a1 a2 a3 a4 a5 a6 a7
  545. // Matrix[1]= b0 b1 b2 b3 b4 b5 b6 b7
  546. // Matrix[2]= c0 c1 c2 c3 c3 a7 b7 c7
  547. TransposedMatrix.resize(3);
  548. SmallVector<int, 3> GroupSize;
  549. SmallVector<int, 32> VPShuf;
  550. SmallVector<int, 32> VPAlign[3];
  551. SmallVector<int, 32> VPAlign2;
  552. SmallVector<int, 32> VPAlign3;
  553. Value *Vec[3], *TempVector[3];
  554. MVT VT = MVT::getVectorVT(MVT::i8, VecElems);
  555. setGroupSize(VT, GroupSize);
  556. for (int i = 0; i < 3; i++)
  557. DecodePALIGNRMask(VT, GroupSize[i], VPAlign[i]);
  558. DecodePALIGNRMask(VT, GroupSize[1] + GroupSize[2], VPAlign2, false, true);
  559. DecodePALIGNRMask(VT, GroupSize[1], VPAlign3, false, true);
  560. // Vec[0]= a3 a4 a5 a6 a7 a0 a1 a2
  561. // Vec[1]= c5 c6 c7 c0 c1 c2 c3 c4
  562. // Vec[2]= b0 b1 b2 b3 b4 b5 b6 b7
  563. Vec[0] = Builder.CreateShuffleVector(InVec[0], VPAlign2);
  564. Vec[1] = Builder.CreateShuffleVector(InVec[1], VPAlign3);
  565. Vec[2] = InVec[2];
  566. // Vec[0]= a6 a7 a0 a1 a2 b0 b1 b2
  567. // Vec[1]= c0 c1 c2 c3 c4 a3 a4 a5
  568. // Vec[2]= b3 b4 b5 b6 b7 c5 c6 c7
  569. for (int i = 0; i < 3; i++)
  570. TempVector[i] =
  571. Builder.CreateShuffleVector(Vec[i], Vec[(i + 2) % 3], VPAlign[1]);
  572. // Vec[0]= a0 a1 a2 b0 b1 b2 c0 c1
  573. // Vec[1]= c2 c3 c4 a3 a4 a5 b3 b4
  574. // Vec[2]= b5 b6 b7 c5 c6 c7 a6 a7
  575. for (int i = 0; i < 3; i++)
  576. Vec[i] = Builder.CreateShuffleVector(TempVector[i], TempVector[(i + 1) % 3],
  577. VPAlign[2]);
  578. // TransposedMatrix[0] = a0 b0 c0 a1 b1 c1 a2 b2
  579. // TransposedMatrix[1] = c2 a3 b3 c3 a4 b4 c4 a5
  580. // TransposedMatrix[2] = b5 c5 a6 b6 c6 a7 b7 c7
  581. unsigned NumOfElm = VT.getVectorNumElements();
  582. group2Shuffle(VT, GroupSize, VPShuf);
  583. reorderSubVector(VT, TransposedMatrix, Vec, VPShuf, NumOfElm, 3, Builder);
  584. }
  585. void X86InterleavedAccessGroup::transpose_4x4(
  586. ArrayRef<Instruction *> Matrix,
  587. SmallVectorImpl<Value *> &TransposedMatrix) {
  588. assert(Matrix.size() == 4 && "Invalid matrix size");
  589. TransposedMatrix.resize(4);
  590. // dst = src1[0,1],src2[0,1]
  591. static constexpr int IntMask1[] = {0, 1, 4, 5};
  592. ArrayRef<int> Mask = makeArrayRef(IntMask1, 4);
  593. Value *IntrVec1 = Builder.CreateShuffleVector(Matrix[0], Matrix[2], Mask);
  594. Value *IntrVec2 = Builder.CreateShuffleVector(Matrix[1], Matrix[3], Mask);
  595. // dst = src1[2,3],src2[2,3]
  596. static constexpr int IntMask2[] = {2, 3, 6, 7};
  597. Mask = makeArrayRef(IntMask2, 4);
  598. Value *IntrVec3 = Builder.CreateShuffleVector(Matrix[0], Matrix[2], Mask);
  599. Value *IntrVec4 = Builder.CreateShuffleVector(Matrix[1], Matrix[3], Mask);
  600. // dst = src1[0],src2[0],src1[2],src2[2]
  601. static constexpr int IntMask3[] = {0, 4, 2, 6};
  602. Mask = makeArrayRef(IntMask3, 4);
  603. TransposedMatrix[0] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, Mask);
  604. TransposedMatrix[2] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, Mask);
  605. // dst = src1[1],src2[1],src1[3],src2[3]
  606. static constexpr int IntMask4[] = {1, 5, 3, 7};
  607. Mask = makeArrayRef(IntMask4, 4);
  608. TransposedMatrix[1] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, Mask);
  609. TransposedMatrix[3] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, Mask);
  610. }
  611. // Lowers this interleaved access group into X86-specific
  612. // instructions/intrinsics.
  613. bool X86InterleavedAccessGroup::lowerIntoOptimizedSequence() {
  614. SmallVector<Instruction *, 4> DecomposedVectors;
  615. SmallVector<Value *, 4> TransposedVectors;
  616. auto *ShuffleTy = cast<FixedVectorType>(Shuffles[0]->getType());
  617. if (isa<LoadInst>(Inst)) {
  618. auto *ShuffleEltTy = cast<FixedVectorType>(Inst->getType());
  619. unsigned NumSubVecElems = ShuffleEltTy->getNumElements() / Factor;
  620. switch (NumSubVecElems) {
  621. default:
  622. return false;
  623. case 4:
  624. case 8:
  625. case 16:
  626. case 32:
  627. case 64:
  628. if (ShuffleTy->getNumElements() != NumSubVecElems)
  629. return false;
  630. break;
  631. }
  632. // Try to generate target-sized register(/instruction).
  633. decompose(Inst, Factor, ShuffleTy, DecomposedVectors);
  634. // Perform matrix-transposition in order to compute interleaved
  635. // results by generating some sort of (optimized) target-specific
  636. // instructions.
  637. if (NumSubVecElems == 4)
  638. transpose_4x4(DecomposedVectors, TransposedVectors);
  639. else
  640. deinterleave8bitStride3(DecomposedVectors, TransposedVectors,
  641. NumSubVecElems);
  642. // Now replace the unoptimized-interleaved-vectors with the
  643. // transposed-interleaved vectors.
  644. for (unsigned i = 0, e = Shuffles.size(); i < e; ++i)
  645. Shuffles[i]->replaceAllUsesWith(TransposedVectors[Indices[i]]);
  646. return true;
  647. }
  648. Type *ShuffleEltTy = ShuffleTy->getElementType();
  649. unsigned NumSubVecElems = ShuffleTy->getNumElements() / Factor;
  650. // Lower the interleaved stores:
  651. // 1. Decompose the interleaved wide shuffle into individual shuffle
  652. // vectors.
  653. decompose(Shuffles[0], Factor,
  654. FixedVectorType::get(ShuffleEltTy, NumSubVecElems),
  655. DecomposedVectors);
  656. // 2. Transpose the interleaved-vectors into vectors of contiguous
  657. // elements.
  658. switch (NumSubVecElems) {
  659. case 4:
  660. transpose_4x4(DecomposedVectors, TransposedVectors);
  661. break;
  662. case 8:
  663. interleave8bitStride4VF8(DecomposedVectors, TransposedVectors);
  664. break;
  665. case 16:
  666. case 32:
  667. case 64:
  668. if (Factor == 4)
  669. interleave8bitStride4(DecomposedVectors, TransposedVectors,
  670. NumSubVecElems);
  671. if (Factor == 3)
  672. interleave8bitStride3(DecomposedVectors, TransposedVectors,
  673. NumSubVecElems);
  674. break;
  675. default:
  676. return false;
  677. }
  678. // 3. Concatenate the contiguous-vectors back into a wide vector.
  679. Value *WideVec = concatenateVectors(Builder, TransposedVectors);
  680. // 4. Generate a store instruction for wide-vec.
  681. StoreInst *SI = cast<StoreInst>(Inst);
  682. Builder.CreateAlignedStore(WideVec, SI->getPointerOperand(), SI->getAlign());
  683. return true;
  684. }
  685. // Lower interleaved load(s) into target specific instructions/
  686. // intrinsics. Lowering sequence varies depending on the vector-types, factor,
  687. // number of shuffles and ISA.
  688. // Currently, lowering is supported for 4x64 bits with Factor = 4 on AVX.
  689. bool X86TargetLowering::lowerInterleavedLoad(
  690. LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
  691. ArrayRef<unsigned> Indices, unsigned Factor) const {
  692. assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
  693. "Invalid interleave factor");
  694. assert(!Shuffles.empty() && "Empty shufflevector input");
  695. assert(Shuffles.size() == Indices.size() &&
  696. "Unmatched number of shufflevectors and indices");
  697. // Create an interleaved access group.
  698. IRBuilder<> Builder(LI);
  699. X86InterleavedAccessGroup Grp(LI, Shuffles, Indices, Factor, Subtarget,
  700. Builder);
  701. return Grp.isSupported() && Grp.lowerIntoOptimizedSequence();
  702. }
  703. bool X86TargetLowering::lowerInterleavedStore(StoreInst *SI,
  704. ShuffleVectorInst *SVI,
  705. unsigned Factor) const {
  706. assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
  707. "Invalid interleave factor");
  708. assert(cast<FixedVectorType>(SVI->getType())->getNumElements() % Factor ==
  709. 0 &&
  710. "Invalid interleaved store");
  711. // Holds the indices of SVI that correspond to the starting index of each
  712. // interleaved shuffle.
  713. SmallVector<unsigned, 4> Indices;
  714. auto Mask = SVI->getShuffleMask();
  715. for (unsigned i = 0; i < Factor; i++)
  716. Indices.push_back(Mask[i]);
  717. ArrayRef<ShuffleVectorInst *> Shuffles = makeArrayRef(SVI);
  718. // Create an interleaved access group.
  719. IRBuilder<> Builder(SI);
  720. X86InterleavedAccessGroup Grp(SI, Shuffles, Indices, Factor, Subtarget,
  721. Builder);
  722. return Grp.isSupported() && Grp.lowerIntoOptimizedSequence();
  723. }