X86LowerAMXIntrinsics.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. //===-- X86LowerAMXIntrinsics.cpp -X86 Scalarize AMX Intrinsics------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. /// \file Pass to transform amx intrinsics to scalar operations.
  10. /// This pass is always enabled and it skips when it is not -O0 and has no
  11. /// optnone attributes. With -O0 or optnone attribute, the def of shape to amx
  12. /// intrinsics is near the amx intrinsics code. We are not able to find a
  13. /// point which post-dominate all the shape and dominate all amx intrinsics.
  14. /// To decouple the dependency of the shape, we transform amx intrinsics
  15. /// to scalar operation, so that compiling doesn't fail. In long term, we
  16. /// should improve fast register allocation to allocate amx register.
  17. //===----------------------------------------------------------------------===//
  18. //
  19. #include "X86.h"
  20. #include "llvm/ADT/DenseSet.h"
  21. #include "llvm/ADT/PostOrderIterator.h"
  22. #include "llvm/Analysis/DomTreeUpdater.h"
  23. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  24. #include "llvm/Analysis/TargetTransformInfo.h"
  25. #include "llvm/CodeGen/Passes.h"
  26. #include "llvm/CodeGen/TargetPassConfig.h"
  27. #include "llvm/CodeGen/ValueTypes.h"
  28. #include "llvm/IR/DataLayout.h"
  29. #include "llvm/IR/Function.h"
  30. #include "llvm/IR/IRBuilder.h"
  31. #include "llvm/IR/Instructions.h"
  32. #include "llvm/IR/IntrinsicInst.h"
  33. #include "llvm/IR/IntrinsicsX86.h"
  34. #include "llvm/IR/PatternMatch.h"
  35. #include "llvm/InitializePasses.h"
  36. #include "llvm/Pass.h"
  37. #include "llvm/Support/CommandLine.h"
  38. #include "llvm/Target/TargetMachine.h"
  39. #include "llvm/Transforms/Utils/BasicBlockUtils.h"
  40. #include "llvm/Transforms/Utils/LoopUtils.h"
  41. using namespace llvm;
  42. using namespace PatternMatch;
  43. #define DEBUG_TYPE "lower-amx-intrinsics"
  44. #ifndef NDEBUG
  45. static bool isV256I32Ty(Type *Ty) {
  46. if (auto *FVT = dyn_cast<FixedVectorType>(Ty))
  47. return FVT->getNumElements() == 256 &&
  48. FVT->getElementType()->isIntegerTy(32);
  49. return false;
  50. }
  51. #endif
  52. static cl::opt<bool>
  53. X86ScalarizeAMX("enable-x86-scalar-amx", cl::init(false), cl::Hidden,
  54. cl::desc("X86: enable AMX scalarizition."));
  55. namespace {
  56. class X86LowerAMXIntrinsics {
  57. Function &Func;
  58. public:
  59. X86LowerAMXIntrinsics(Function &F, DomTreeUpdater &DomTU, LoopInfo *LoopI)
  60. : Func(F), DTU(DomTU), LI(LoopI) {}
  61. bool visit();
  62. private:
  63. DomTreeUpdater &DTU;
  64. LoopInfo *LI;
  65. BasicBlock *createLoop(BasicBlock *Preheader, BasicBlock *Exit, Value *Bound,
  66. Value *Step, StringRef Name, IRBuilderBase &B,
  67. Loop *L);
  68. template <bool IsTileLoad>
  69. Value *createTileLoadStoreLoops(BasicBlock *Start, BasicBlock *End,
  70. IRBuilderBase &B, Value *Row, Value *Col,
  71. Value *Ptr, Value *Stride, Value *Tile);
  72. template <Intrinsic::ID IntrID>
  73. typename std::enable_if<IntrID == Intrinsic::x86_tdpbssd_internal ||
  74. IntrID == Intrinsic::x86_tdpbsud_internal ||
  75. IntrID == Intrinsic::x86_tdpbusd_internal ||
  76. IntrID == Intrinsic::x86_tdpbuud_internal ||
  77. IntrID == Intrinsic::x86_tdpbf16ps_internal,
  78. Value *>::type
  79. createTileDPLoops(BasicBlock *Start, BasicBlock *End, IRBuilderBase &B,
  80. Value *Row, Value *Col, Value *K, Value *Acc, Value *LHS,
  81. Value *RHS);
  82. template <bool IsTileLoad>
  83. bool lowerTileLoadStore(Instruction *TileLoadStore);
  84. template <Intrinsic::ID IntrID>
  85. typename std::enable_if<IntrID == Intrinsic::x86_tdpbssd_internal ||
  86. IntrID == Intrinsic::x86_tdpbsud_internal ||
  87. IntrID == Intrinsic::x86_tdpbusd_internal ||
  88. IntrID == Intrinsic::x86_tdpbuud_internal ||
  89. IntrID == Intrinsic::x86_tdpbf16ps_internal,
  90. bool>::type
  91. lowerTileDP(Instruction *TileDP);
  92. bool lowerTileZero(Instruction *TileZero);
  93. };
  94. } // anonymous namespace
  95. BasicBlock *X86LowerAMXIntrinsics::createLoop(BasicBlock *Preheader,
  96. BasicBlock *Exit, Value *Bound,
  97. Value *Step, StringRef Name,
  98. IRBuilderBase &B, Loop *L) {
  99. LLVMContext &Ctx = Preheader->getContext();
  100. BasicBlock *Header =
  101. BasicBlock::Create(Ctx, Name + ".header", Preheader->getParent(), Exit);
  102. BasicBlock *Body =
  103. BasicBlock::Create(Ctx, Name + ".body", Header->getParent(), Exit);
  104. BasicBlock *Latch =
  105. BasicBlock::Create(Ctx, Name + ".latch", Header->getParent(), Exit);
  106. Type *I16Ty = Type::getInt16Ty(Ctx);
  107. BranchInst::Create(Body, Header);
  108. BranchInst::Create(Latch, Body);
  109. PHINode *IV =
  110. PHINode::Create(I16Ty, 2, Name + ".iv", Header->getTerminator());
  111. IV->addIncoming(ConstantInt::get(I16Ty, 0), Preheader);
  112. B.SetInsertPoint(Latch);
  113. Value *Inc = B.CreateAdd(IV, Step, Name + ".step");
  114. Value *Cond = B.CreateICmpNE(Inc, Bound, Name + ".cond");
  115. BranchInst::Create(Header, Exit, Cond, Latch);
  116. IV->addIncoming(Inc, Latch);
  117. BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator());
  118. BasicBlock *Tmp = PreheaderBr->getSuccessor(0);
  119. PreheaderBr->setSuccessor(0, Header);
  120. DTU.applyUpdatesPermissive({
  121. {DominatorTree::Delete, Preheader, Tmp},
  122. {DominatorTree::Insert, Header, Body},
  123. {DominatorTree::Insert, Body, Latch},
  124. {DominatorTree::Insert, Latch, Header},
  125. {DominatorTree::Insert, Latch, Exit},
  126. {DominatorTree::Insert, Preheader, Header},
  127. });
  128. if (LI) {
  129. L->addBasicBlockToLoop(Header, *LI);
  130. L->addBasicBlockToLoop(Body, *LI);
  131. L->addBasicBlockToLoop(Latch, *LI);
  132. }
  133. return Body;
  134. }
  135. template <bool IsTileLoad>
  136. Value *X86LowerAMXIntrinsics::createTileLoadStoreLoops(
  137. BasicBlock *Start, BasicBlock *End, IRBuilderBase &B, Value *Row,
  138. Value *Col, Value *Ptr, Value *Stride, Value *Tile) {
  139. std::string IntrinName = IsTileLoad ? "tileload" : "tilestore";
  140. Loop *RowLoop = nullptr;
  141. Loop *ColLoop = nullptr;
  142. if (LI) {
  143. RowLoop = LI->AllocateLoop();
  144. ColLoop = LI->AllocateLoop();
  145. RowLoop->addChildLoop(ColLoop);
  146. if (Loop *ParentL = LI->getLoopFor(Start))
  147. ParentL->addChildLoop(RowLoop);
  148. else
  149. LI->addTopLevelLoop(RowLoop);
  150. }
  151. BasicBlock *RowBody = createLoop(Start, End, Row, B.getInt16(1),
  152. IntrinName + ".scalarize.rows", B, RowLoop);
  153. BasicBlock *RowLatch = RowBody->getSingleSuccessor();
  154. BasicBlock *ColBody = createLoop(RowBody, RowLatch, Col, B.getInt16(1),
  155. IntrinName + ".scalarize.cols", B, ColLoop);
  156. BasicBlock *ColLoopLatch = ColBody->getSingleSuccessor();
  157. BasicBlock *ColLoopHeader = ColBody->getSinglePredecessor();
  158. BasicBlock *RowLoopHeader = RowBody->getSinglePredecessor();
  159. Value *CurrentRow = &*RowLoopHeader->begin();
  160. Value *CurrentCol = &*ColLoopHeader->begin();
  161. Type *EltTy = B.getInt32Ty();
  162. FixedVectorType *V256I32Ty = FixedVectorType::get(EltTy, 256);
  163. // Common part for tileload and tilestore
  164. // *.scalarize.cols.body:
  165. // Calculate %idxmem and %idxvec
  166. B.SetInsertPoint(ColBody->getTerminator());
  167. Value *CurrentRowZExt = B.CreateZExt(CurrentRow, Stride->getType());
  168. Value *CurrentColZExt = B.CreateZExt(CurrentCol, Stride->getType());
  169. Value *Offset =
  170. B.CreateAdd(B.CreateMul(CurrentRowZExt, Stride), CurrentColZExt);
  171. unsigned AS = cast<PointerType>(Ptr->getType())->getAddressSpace();
  172. Value *EltBasePtr = B.CreatePointerCast(Ptr, PointerType::get(EltTy, AS));
  173. Value *EltPtr = B.CreateGEP(EltTy, EltBasePtr, Offset);
  174. Value *Idx = B.CreateAdd(B.CreateMul(CurrentRow, B.getInt16(16)), CurrentCol);
  175. if (IsTileLoad) {
  176. // tileload.scalarize.rows.header:
  177. // %vec.phi.row = phi <256 x i32> [ zeroinitializer, %entry ], [ %ResVec,
  178. // %tileload.scalarize.rows.latch ]
  179. B.SetInsertPoint(RowLoopHeader->getTerminator());
  180. Value *VecZero = Constant::getNullValue(V256I32Ty);
  181. PHINode *VecCPhiRowLoop = B.CreatePHI(V256I32Ty, 2, "vec.phi.row");
  182. VecCPhiRowLoop->addIncoming(VecZero, Start);
  183. // tileload.scalarize.cols.header:
  184. // %vec.phi = phi <256 x i32> [ %vec.phi.row, %tileload.scalarize.rows.body
  185. // ], [ %ResVec, %tileload.scalarize.cols.latch ]
  186. B.SetInsertPoint(ColLoopHeader->getTerminator());
  187. PHINode *VecPhi = B.CreatePHI(V256I32Ty, 2, "vec.phi");
  188. VecPhi->addIncoming(VecCPhiRowLoop, RowBody);
  189. // tileload.scalarize.cols.body:
  190. // Calculate %idxmem and %idxvec
  191. // %eltptr = getelementptr i32, i32* %base, i64 %idxmem
  192. // %elt = load i32, i32* %ptr
  193. // %ResVec = insertelement <256 x i32> %vec.phi, i32 %elt, i16 %idxvec
  194. B.SetInsertPoint(ColBody->getTerminator());
  195. Value *Elt = B.CreateLoad(EltTy, EltPtr);
  196. Value *ResVec = B.CreateInsertElement(VecPhi, Elt, Idx);
  197. VecPhi->addIncoming(ResVec, ColLoopLatch);
  198. VecCPhiRowLoop->addIncoming(ResVec, RowLatch);
  199. return ResVec;
  200. } else {
  201. auto *BitCast = cast<BitCastInst>(Tile);
  202. Value *Vec = BitCast->getOperand(0);
  203. assert(isV256I32Ty(Vec->getType()) && "bitcast from non-v256i32 to x86amx");
  204. // tilestore.scalarize.cols.body:
  205. // %mul = mul i16 %row.iv, i16 16
  206. // %idx = add i16 %mul, i16 %col.iv
  207. // %vec = extractelement <16 x i32> %vec, i16 %idx
  208. // store i32 %vec, i32* %ptr
  209. B.SetInsertPoint(ColBody->getTerminator());
  210. Value *Elt = B.CreateExtractElement(Vec, Idx);
  211. B.CreateStore(Elt, EltPtr);
  212. return nullptr;
  213. }
  214. }
  215. template <Intrinsic::ID IntrID>
  216. typename std::enable_if<IntrID == Intrinsic::x86_tdpbssd_internal ||
  217. IntrID == Intrinsic::x86_tdpbsud_internal ||
  218. IntrID == Intrinsic::x86_tdpbusd_internal ||
  219. IntrID == Intrinsic::x86_tdpbuud_internal ||
  220. IntrID == Intrinsic::x86_tdpbf16ps_internal,
  221. Value *>::type
  222. X86LowerAMXIntrinsics::createTileDPLoops(BasicBlock *Start, BasicBlock *End,
  223. IRBuilderBase &B, Value *Row,
  224. Value *Col, Value *K, Value *Acc,
  225. Value *LHS, Value *RHS) {
  226. std::string IntrinName;
  227. switch (IntrID) {
  228. case Intrinsic::x86_tdpbssd_internal:
  229. IntrinName = "tiledpbssd";
  230. break;
  231. case Intrinsic::x86_tdpbsud_internal:
  232. IntrinName = "tiledpbsud";
  233. break;
  234. case Intrinsic::x86_tdpbusd_internal:
  235. IntrinName = "tiledpbusd";
  236. break;
  237. case Intrinsic::x86_tdpbuud_internal:
  238. IntrinName = "tiledpbuud";
  239. break;
  240. case Intrinsic::x86_tdpbf16ps_internal:
  241. IntrinName = "tiledpbf16ps";
  242. break;
  243. }
  244. Loop *RowLoop = nullptr;
  245. Loop *ColLoop = nullptr;
  246. Loop *InnerLoop = nullptr;
  247. if (LI) {
  248. RowLoop = LI->AllocateLoop();
  249. ColLoop = LI->AllocateLoop();
  250. InnerLoop = LI->AllocateLoop();
  251. ColLoop->addChildLoop(InnerLoop);
  252. RowLoop->addChildLoop(ColLoop);
  253. if (Loop *ParentL = LI->getLoopFor(Start))
  254. ParentL->addChildLoop(RowLoop);
  255. else
  256. LI->addTopLevelLoop(RowLoop);
  257. }
  258. BasicBlock *RowBody = createLoop(Start, End, Row, B.getInt16(1),
  259. IntrinName + ".scalarize.rows", B, RowLoop);
  260. BasicBlock *RowLatch = RowBody->getSingleSuccessor();
  261. BasicBlock *ColBody = createLoop(RowBody, RowLatch, Col, B.getInt16(1),
  262. IntrinName + ".scalarize.cols", B, ColLoop);
  263. BasicBlock *ColLoopLatch = ColBody->getSingleSuccessor();
  264. B.SetInsertPoint(ColBody->getTerminator());
  265. BasicBlock *InnerBody =
  266. createLoop(ColBody, ColLoopLatch, K, B.getInt16(1),
  267. IntrinName + ".scalarize.inner", B, InnerLoop);
  268. BasicBlock *ColLoopHeader = ColBody->getSinglePredecessor();
  269. BasicBlock *RowLoopHeader = RowBody->getSinglePredecessor();
  270. BasicBlock *InnerLoopHeader = InnerBody->getSinglePredecessor();
  271. BasicBlock *InnerLoopLatch = InnerBody->getSingleSuccessor();
  272. Value *CurrentRow = &*RowLoopHeader->begin();
  273. Value *CurrentCol = &*ColLoopHeader->begin();
  274. Value *CurrentInner = &*InnerLoopHeader->begin();
  275. FixedVectorType *V256I32Ty = FixedVectorType::get(B.getInt32Ty(), 256);
  276. auto *BitCastAcc = cast<BitCastInst>(Acc);
  277. Value *VecC = BitCastAcc->getOperand(0);
  278. assert(isV256I32Ty(VecC->getType()) && "bitcast from non-v256i32 to x86amx");
  279. // TODO else create BitCast from x86amx to v256i32.
  280. // Store x86amx to memory, and reload from memory
  281. // to vector. However with -O0, it doesn't happen.
  282. auto *BitCastLHS = cast<BitCastInst>(LHS);
  283. Value *VecA = BitCastLHS->getOperand(0);
  284. assert(isV256I32Ty(VecA->getType()) && "bitcast from non-v256i32 to x86amx");
  285. auto *BitCastRHS = cast<BitCastInst>(RHS);
  286. Value *VecB = BitCastRHS->getOperand(0);
  287. assert(isV256I32Ty(VecB->getType()) && "bitcast from non-v256i32 to x86amx");
  288. // tiledpbssd.scalarize.rows.header:
  289. // %vec.c.phi.row = phi <256 x i32> [ %VecC, %continue ], [ %NewVecC,
  290. // %tiledpbssd.scalarize.rows.latch ]
  291. // %vec.d.phi.row = phi <256 x i32> [ zeroinitializer, %continue ], [
  292. // %NewVecD, %tiledpbssd.scalarize.rows.latch ]
  293. B.SetInsertPoint(RowLoopHeader->getTerminator());
  294. PHINode *VecCPhiRowLoop = B.CreatePHI(V256I32Ty, 2, "vec.c.phi.row");
  295. VecCPhiRowLoop->addIncoming(VecC, Start);
  296. Value *VecZero = Constant::getNullValue(V256I32Ty);
  297. PHINode *VecDPhiRowLoop = B.CreatePHI(V256I32Ty, 2, "vec.d.phi.row");
  298. VecDPhiRowLoop->addIncoming(VecZero, Start);
  299. // tiledpbssd.scalarize.cols.header:
  300. // %vec.c.phi.col = phi <256 x i32> [ %vec.c.phi.row,
  301. // %tiledpbssd.scalarize.rows.body ], [ %NewVecC,
  302. // %tiledpbssd.scalarize.cols.latch ]
  303. // %vec.d.phi.col = phi <256 x i32> [
  304. // %vec.d.phi.row, %tiledpbssd.scalarize.rows.body ], [ %NewVecD,
  305. // %tiledpbssd.scalarize.cols.latch ]
  306. // calculate idxc.
  307. B.SetInsertPoint(ColLoopHeader->getTerminator());
  308. PHINode *VecCPhiColLoop = B.CreatePHI(V256I32Ty, 2, "vec.c.phi.col");
  309. VecCPhiColLoop->addIncoming(VecCPhiRowLoop, RowBody);
  310. PHINode *VecDPhiColLoop = B.CreatePHI(V256I32Ty, 2, "vec.d.phi.col");
  311. VecDPhiColLoop->addIncoming(VecDPhiRowLoop, RowBody);
  312. Value *IdxC =
  313. B.CreateAdd(B.CreateMul(CurrentRow, B.getInt16(16)), CurrentCol);
  314. // tiledpbssd.scalarize.inner.header:
  315. // %vec.c.inner.phi = phi <256 x i32> [ %vec.c.phi.col,
  316. // %tiledpbssd.scalarize.cols.body ], [ %NewVecC,
  317. // %tiledpbssd.scalarize.inner.latch ]
  318. B.SetInsertPoint(InnerLoopHeader->getTerminator());
  319. PHINode *VecCPhi = B.CreatePHI(V256I32Ty, 2, "vec.c.inner.phi");
  320. VecCPhi->addIncoming(VecCPhiColLoop, ColBody);
  321. B.SetInsertPoint(InnerBody->getTerminator());
  322. Value *IdxA =
  323. B.CreateAdd(B.CreateMul(CurrentRow, B.getInt16(16)), CurrentInner);
  324. Value *IdxB =
  325. B.CreateAdd(B.CreateMul(CurrentInner, B.getInt16(16)), CurrentCol);
  326. Value *NewVecC = nullptr;
  327. if (IntrID != Intrinsic::x86_tdpbf16ps_internal) {
  328. // tiledpbssd.scalarize.inner.body:
  329. // calculate idxa, idxb
  330. // %eltc = extractelement <256 x i32> %vec.c.inner.phi, i16 %idxc
  331. // %elta = extractelement <256 x i32> %veca, i16 %idxa
  332. // %eltav4i8 = bitcast i32 %elta to <4 x i8>
  333. // %eltb = extractelement <256 x i32> %vecb, i16 %idxb
  334. // %eltbv4i8 = bitcast i32 %eltb to <4 x i8>
  335. // %eltav4i32 = sext <4 x i8> %eltav4i8 to <4 x i32>
  336. // %eltbv4i32 = sext <4 x i8> %eltbv4i8 to <4 x i32>
  337. // %mulab = mul <4 x i32> %eltbv4i32, %eltav4i32
  338. // %acc = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %131)
  339. // %neweltc = add i32 %elt, %acc
  340. // %NewVecC = insertelement <256 x i32> %vec.c.inner.phi, i32 %neweltc,
  341. // i16 %idxc
  342. FixedVectorType *V4I8Ty = FixedVectorType::get(B.getInt8Ty(), 4);
  343. FixedVectorType *V4I32Ty = FixedVectorType::get(B.getInt32Ty(), 4);
  344. Value *EltC = B.CreateExtractElement(VecCPhi, IdxC);
  345. Value *EltA = B.CreateExtractElement(VecA, IdxA);
  346. Value *SubVecA = B.CreateBitCast(EltA, V4I8Ty);
  347. Value *EltB = B.CreateExtractElement(VecB, IdxB);
  348. Value *SubVecB = B.CreateBitCast(EltB, V4I8Ty);
  349. Value *SEXTSubVecB = nullptr;
  350. Value *SEXTSubVecA = nullptr;
  351. switch (IntrID) {
  352. case Intrinsic::x86_tdpbssd_internal:
  353. SEXTSubVecB = B.CreateSExt(SubVecB, V4I32Ty);
  354. SEXTSubVecA = B.CreateSExt(SubVecA, V4I32Ty);
  355. break;
  356. case Intrinsic::x86_tdpbsud_internal:
  357. SEXTSubVecB = B.CreateZExt(SubVecB, V4I32Ty);
  358. SEXTSubVecA = B.CreateSExt(SubVecA, V4I32Ty);
  359. break;
  360. case Intrinsic::x86_tdpbusd_internal:
  361. SEXTSubVecB = B.CreateSExt(SubVecB, V4I32Ty);
  362. SEXTSubVecA = B.CreateZExt(SubVecA, V4I32Ty);
  363. break;
  364. case Intrinsic::x86_tdpbuud_internal:
  365. SEXTSubVecB = B.CreateZExt(SubVecB, V4I32Ty);
  366. SEXTSubVecA = B.CreateZExt(SubVecA, V4I32Ty);
  367. break;
  368. default:
  369. llvm_unreachable("Invalid intrinsic ID!");
  370. }
  371. Value *SubVecR = B.CreateAddReduce(B.CreateMul(SEXTSubVecA, SEXTSubVecB));
  372. Value *ResElt = B.CreateAdd(EltC, SubVecR);
  373. NewVecC = B.CreateInsertElement(VecCPhi, ResElt, IdxC);
  374. } else {
  375. // tiledpbf16ps.scalarize.inner.body:
  376. // calculate idxa, idxb, idxc
  377. // %eltc = extractelement <256 x i32> %vec.c.inner.phi, i16 %idxc
  378. // %eltcf32 = bitcast i32 %eltc to float
  379. // %elta = extractelement <256 x i32> %veca, i16 %idxa
  380. // %eltav2i16 = bitcast i32 %elta to <2 x i16>
  381. // %eltb = extractelement <256 x i32> %vecb, i16 %idxb
  382. // %eltbv2i16 = bitcast i32 %eltb to <2 x i16>
  383. // %shufflea = shufflevector <2 x i16> %elta, <2 x i16> zeroinitializer, <4
  384. // x i32> <i32 2, i32 0, i32 3, i32 1>
  385. // %eltav2f32 = bitcast <4 x i16> %shufflea to <2 x float>
  386. // %shuffleb = shufflevector <2 x i16> %eltb, <2 xi16> zeroinitializer, <4 x
  387. // i32> <i32 2, i32 0, i32 3, i32 1>
  388. // %eltbv2f32 = bitcast <4 x i16> %shuffleb to <2 x float>
  389. // %mulab = fmul <2 x float> %eltav2f32, %eltbv2f32
  390. // %acc = call float
  391. // @llvm.vector.reduce.fadd.v2f32(float %eltcf32, <2 x float> %mulab)
  392. // %neweltc = bitcast float %acc to i32
  393. // %NewVecC = insertelement <256 x i32> %vec.c.inner.phi, i32 %neweltc,
  394. // i16 %idxc
  395. // %NewVecD = insertelement <256 x i32> %vec.d.inner.phi, i32 %neweltc,
  396. // i16 %idxc
  397. FixedVectorType *V2I16Ty = FixedVectorType::get(B.getInt16Ty(), 2);
  398. FixedVectorType *V2F32Ty = FixedVectorType::get(B.getFloatTy(), 2);
  399. Value *EltC = B.CreateExtractElement(VecCPhi, IdxC);
  400. Value *EltCF32 = B.CreateBitCast(EltC, B.getFloatTy());
  401. Value *EltA = B.CreateExtractElement(VecA, IdxA);
  402. Value *SubVecA = B.CreateBitCast(EltA, V2I16Ty);
  403. Value *EltB = B.CreateExtractElement(VecB, IdxB);
  404. Value *SubVecB = B.CreateBitCast(EltB, V2I16Ty);
  405. Value *ZeroV2I16 = Constant::getNullValue(V2I16Ty);
  406. int ShuffleMask[4] = {2, 0, 3, 1};
  407. auto ShuffleArray = makeArrayRef(ShuffleMask);
  408. Value *AV2F32 = B.CreateBitCast(
  409. B.CreateShuffleVector(SubVecA, ZeroV2I16, ShuffleArray), V2F32Ty);
  410. Value *BV2F32 = B.CreateBitCast(
  411. B.CreateShuffleVector(SubVecB, ZeroV2I16, ShuffleArray), V2F32Ty);
  412. Value *SubVecR = B.CreateFAddReduce(EltCF32, B.CreateFMul(AV2F32, BV2F32));
  413. Value *ResElt = B.CreateBitCast(SubVecR, B.getInt32Ty());
  414. NewVecC = B.CreateInsertElement(VecCPhi, ResElt, IdxC);
  415. }
  416. // tiledpbssd.scalarize.cols.latch:
  417. // %NewEltC = extractelement <256 x i32> %vec.c.phi.col, i16 %idxc
  418. // %NewVecD = insertelement <256 x i32> %vec.d.phi.col, i32 %NewEltC,
  419. // i16 %idxc
  420. B.SetInsertPoint(ColLoopLatch->getTerminator());
  421. Value *NewEltC = B.CreateExtractElement(NewVecC, IdxC);
  422. Value *NewVecD = B.CreateInsertElement(VecDPhiColLoop, NewEltC, IdxC);
  423. VecCPhi->addIncoming(NewVecC, InnerLoopLatch);
  424. VecCPhiRowLoop->addIncoming(NewVecC, RowLatch);
  425. VecCPhiColLoop->addIncoming(NewVecC, ColLoopLatch);
  426. VecDPhiRowLoop->addIncoming(NewVecD, RowLatch);
  427. VecDPhiColLoop->addIncoming(NewVecD, ColLoopLatch);
  428. return NewVecD;
  429. }
  430. template <Intrinsic::ID IntrID>
  431. typename std::enable_if<IntrID == Intrinsic::x86_tdpbssd_internal ||
  432. IntrID == Intrinsic::x86_tdpbsud_internal ||
  433. IntrID == Intrinsic::x86_tdpbusd_internal ||
  434. IntrID == Intrinsic::x86_tdpbuud_internal ||
  435. IntrID == Intrinsic::x86_tdpbf16ps_internal,
  436. bool>::type
  437. X86LowerAMXIntrinsics::lowerTileDP(Instruction *TileDP) {
  438. Value *M, *N, *K, *C, *A, *B;
  439. match(TileDP, m_Intrinsic<IntrID>(m_Value(M), m_Value(N), m_Value(K),
  440. m_Value(C), m_Value(A), m_Value(B)));
  441. Instruction *InsertI = TileDP;
  442. IRBuilder<> PreBuilder(TileDP);
  443. PreBuilder.SetInsertPoint(TileDP);
  444. // We visit the loop with (m, n/4, k/4):
  445. // %n_dword = lshr i16 %n, 2
  446. // %k_dword = lshr i16 %k, 2
  447. Value *NDWord = PreBuilder.CreateLShr(N, PreBuilder.getInt16(2));
  448. Value *KDWord = PreBuilder.CreateLShr(K, PreBuilder.getInt16(2));
  449. BasicBlock *Start = InsertI->getParent();
  450. BasicBlock *End =
  451. SplitBlock(InsertI->getParent(), InsertI, &DTU, LI, nullptr, "continue");
  452. IRBuilder<> Builder(TileDP);
  453. Value *ResVec = createTileDPLoops<IntrID>(Start, End, Builder, M, NDWord,
  454. KDWord, C, A, B);
  455. // we cannot assume there always be bitcast after tiledpbssd. So we need to
  456. // insert one bitcast as required
  457. Builder.SetInsertPoint(End->getFirstNonPHI());
  458. Value *ResAMX =
  459. Builder.CreateBitCast(ResVec, Type::getX86_AMXTy(Builder.getContext()));
  460. // Delete TileDP intrinsic and do some clean-up.
  461. for (Use &U : llvm::make_early_inc_range(TileDP->uses())) {
  462. Instruction *I = cast<Instruction>(U.getUser());
  463. Value *Vec;
  464. if (match(I, m_BitCast(m_Value(Vec)))) {
  465. I->replaceAllUsesWith(ResVec);
  466. I->eraseFromParent();
  467. }
  468. }
  469. TileDP->replaceAllUsesWith(ResAMX);
  470. TileDP->eraseFromParent();
  471. return true;
  472. }
  473. template <bool IsTileLoad>
  474. bool X86LowerAMXIntrinsics::lowerTileLoadStore(Instruction *TileLoadStore) {
  475. Value *M, *N, *Ptr, *Stride, *Tile;
  476. if (IsTileLoad)
  477. match(TileLoadStore,
  478. m_Intrinsic<Intrinsic::x86_tileloadd64_internal>(
  479. m_Value(M), m_Value(N), m_Value(Ptr), m_Value(Stride)));
  480. else
  481. match(TileLoadStore, m_Intrinsic<Intrinsic::x86_tilestored64_internal>(
  482. m_Value(M), m_Value(N), m_Value(Ptr),
  483. m_Value(Stride), m_Value(Tile)));
  484. Instruction *InsertI = TileLoadStore;
  485. IRBuilder<> PreBuilder(TileLoadStore);
  486. PreBuilder.SetInsertPoint(TileLoadStore);
  487. Value *NDWord = PreBuilder.CreateLShr(N, PreBuilder.getInt16(2));
  488. Value *StrideDWord = PreBuilder.CreateLShr(Stride, PreBuilder.getInt64(2));
  489. BasicBlock *Start = InsertI->getParent();
  490. BasicBlock *End =
  491. SplitBlock(InsertI->getParent(), InsertI, &DTU, LI, nullptr, "continue");
  492. IRBuilder<> Builder(TileLoadStore);
  493. Value *ResVec = createTileLoadStoreLoops<IsTileLoad>(
  494. Start, End, Builder, M, NDWord, Ptr, StrideDWord,
  495. IsTileLoad ? nullptr : Tile);
  496. if (IsTileLoad) {
  497. // we cannot assume there always be bitcast after tileload. So we need to
  498. // insert one bitcast as required
  499. Builder.SetInsertPoint(End->getFirstNonPHI());
  500. Value *ResAMX =
  501. Builder.CreateBitCast(ResVec, Type::getX86_AMXTy(Builder.getContext()));
  502. // Delete tileloadd6 intrinsic and do some clean-up
  503. for (Use &U : llvm::make_early_inc_range(TileLoadStore->uses())) {
  504. Instruction *I = cast<Instruction>(U.getUser());
  505. Value *Vec;
  506. if (match(I, m_BitCast(m_Value(Vec)))) {
  507. I->replaceAllUsesWith(ResVec);
  508. I->eraseFromParent();
  509. }
  510. }
  511. TileLoadStore->replaceAllUsesWith(ResAMX);
  512. }
  513. TileLoadStore->eraseFromParent();
  514. return true;
  515. }
  516. bool X86LowerAMXIntrinsics::lowerTileZero(Instruction *TileZero) {
  517. IRBuilder<> Builder(TileZero);
  518. FixedVectorType *V256I32Ty = FixedVectorType::get(Builder.getInt32Ty(), 256);
  519. Value *VecZero = Constant::getNullValue(V256I32Ty);
  520. for (Use &U : llvm::make_early_inc_range(TileZero->uses())) {
  521. Instruction *I = cast<Instruction>(U.getUser());
  522. Value *Vec;
  523. if (match(I, m_BitCast(m_Value(Vec)))) {
  524. I->replaceAllUsesWith(VecZero);
  525. I->eraseFromParent();
  526. }
  527. }
  528. TileZero->eraseFromParent();
  529. return true;
  530. }
  531. bool X86LowerAMXIntrinsics::visit() {
  532. bool C = false;
  533. SmallVector<IntrinsicInst *, 8> WorkList;
  534. for (BasicBlock *BB : depth_first(&Func)) {
  535. for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
  536. if (auto *Inst = dyn_cast<IntrinsicInst>(&*II++)) {
  537. switch (Inst->getIntrinsicID()) {
  538. case Intrinsic::x86_tdpbssd_internal:
  539. case Intrinsic::x86_tdpbsud_internal:
  540. case Intrinsic::x86_tdpbusd_internal:
  541. case Intrinsic::x86_tdpbuud_internal:
  542. case Intrinsic::x86_tileloadd64_internal:
  543. case Intrinsic::x86_tilestored64_internal:
  544. case Intrinsic::x86_tilezero_internal:
  545. case Intrinsic::x86_tdpbf16ps_internal:
  546. WorkList.push_back(Inst);
  547. break;
  548. default:
  549. break;
  550. }
  551. }
  552. }
  553. }
  554. for (auto *Inst : WorkList) {
  555. switch (Inst->getIntrinsicID()) {
  556. case Intrinsic::x86_tdpbssd_internal:
  557. C = lowerTileDP<Intrinsic::x86_tdpbssd_internal>(Inst) || C;
  558. break;
  559. case Intrinsic::x86_tdpbsud_internal:
  560. C = lowerTileDP<Intrinsic::x86_tdpbsud_internal>(Inst) || C;
  561. break;
  562. case Intrinsic::x86_tdpbusd_internal:
  563. C = lowerTileDP<Intrinsic::x86_tdpbusd_internal>(Inst) || C;
  564. break;
  565. case Intrinsic::x86_tdpbuud_internal:
  566. C = lowerTileDP<Intrinsic::x86_tdpbuud_internal>(Inst) || C;
  567. break;
  568. case Intrinsic::x86_tdpbf16ps_internal:
  569. C = lowerTileDP<Intrinsic::x86_tdpbf16ps_internal>(Inst) || C;
  570. break;
  571. case Intrinsic::x86_tileloadd64_internal:
  572. C = lowerTileLoadStore<true>(Inst) || C;
  573. break;
  574. case Intrinsic::x86_tilestored64_internal:
  575. C = lowerTileLoadStore<false>(Inst) || C;
  576. break;
  577. case Intrinsic::x86_tilezero_internal:
  578. C = lowerTileZero(Inst) || C;
  579. break;
  580. default:
  581. llvm_unreachable("invalid amx intrinsics!");
  582. }
  583. }
  584. return C;
  585. }
  586. namespace {
  587. class X86LowerAMXIntrinsicsLegacyPass : public FunctionPass {
  588. public:
  589. static char ID;
  590. X86LowerAMXIntrinsicsLegacyPass() : FunctionPass(ID) {
  591. initializeX86LowerAMXIntrinsicsLegacyPassPass(
  592. *PassRegistry::getPassRegistry());
  593. }
  594. bool runOnFunction(Function &F) override {
  595. if (!X86ScalarizeAMX)
  596. return false;
  597. TargetMachine *TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
  598. if (!F.hasFnAttribute(Attribute::OptimizeNone) &&
  599. TM->getOptLevel() != CodeGenOpt::None)
  600. return false;
  601. auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
  602. auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
  603. auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
  604. auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
  605. DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
  606. X86LowerAMXIntrinsics LAT(F, DTU, LI);
  607. return LAT.visit();
  608. }
  609. StringRef getPassName() const override { return "Lower AMX intrinsics"; }
  610. void getAnalysisUsage(AnalysisUsage &AU) const override {
  611. AU.addPreserved<DominatorTreeWrapperPass>();
  612. AU.addPreserved<LoopInfoWrapperPass>();
  613. AU.addRequired<TargetPassConfig>();
  614. }
  615. };
  616. } // namespace
  617. static const char PassName[] = "Lower AMX intrinsics";
  618. char X86LowerAMXIntrinsicsLegacyPass::ID = 0;
  619. INITIALIZE_PASS_BEGIN(X86LowerAMXIntrinsicsLegacyPass, DEBUG_TYPE, PassName,
  620. false, false)
  621. INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
  622. INITIALIZE_PASS_END(X86LowerAMXIntrinsicsLegacyPass, DEBUG_TYPE, PassName,
  623. false, false)
  624. FunctionPass *llvm::createX86LowerAMXIntrinsicsPass() {
  625. return new X86LowerAMXIntrinsicsLegacyPass();
  626. }