123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995 |
- //===- ScalarizeMaskedMemIntrin.cpp - Scalarize unsupported masked mem ----===//
- // instrinsics
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- //
- // This pass replaces masked memory intrinsics - when unsupported by the target
- // - with a chain of basic blocks, that deal with the elements one-by-one if the
- // appropriate mask bit is set.
- //
- //===----------------------------------------------------------------------===//
- #include "llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h"
- #include "llvm/ADT/Twine.h"
- #include "llvm/Analysis/DomTreeUpdater.h"
- #include "llvm/Analysis/TargetTransformInfo.h"
- #include "llvm/IR/BasicBlock.h"
- #include "llvm/IR/Constant.h"
- #include "llvm/IR/Constants.h"
- #include "llvm/IR/DerivedTypes.h"
- #include "llvm/IR/Dominators.h"
- #include "llvm/IR/Function.h"
- #include "llvm/IR/IRBuilder.h"
- #include "llvm/IR/InstrTypes.h"
- #include "llvm/IR/Instruction.h"
- #include "llvm/IR/Instructions.h"
- #include "llvm/IR/IntrinsicInst.h"
- #include "llvm/IR/Intrinsics.h"
- #include "llvm/IR/Type.h"
- #include "llvm/IR/Value.h"
- #include "llvm/InitializePasses.h"
- #include "llvm/Pass.h"
- #include "llvm/Support/Casting.h"
- #include "llvm/Transforms/Scalar.h"
- #include "llvm/Transforms/Utils/BasicBlockUtils.h"
- #include <algorithm>
- #include <cassert>
- using namespace llvm;
- #define DEBUG_TYPE "scalarize-masked-mem-intrin"
- namespace {
- class ScalarizeMaskedMemIntrinLegacyPass : public FunctionPass {
- public:
- static char ID; // Pass identification, replacement for typeid
- explicit ScalarizeMaskedMemIntrinLegacyPass() : FunctionPass(ID) {
- initializeScalarizeMaskedMemIntrinLegacyPassPass(
- *PassRegistry::getPassRegistry());
- }
- bool runOnFunction(Function &F) override;
- StringRef getPassName() const override {
- return "Scalarize Masked Memory Intrinsics";
- }
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<TargetTransformInfoWrapperPass>();
- AU.addPreserved<DominatorTreeWrapperPass>();
- }
- };
- } // end anonymous namespace
- static bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT,
- const TargetTransformInfo &TTI, const DataLayout &DL,
- DomTreeUpdater *DTU);
- static bool optimizeCallInst(CallInst *CI, bool &ModifiedDT,
- const TargetTransformInfo &TTI,
- const DataLayout &DL, DomTreeUpdater *DTU);
- char ScalarizeMaskedMemIntrinLegacyPass::ID = 0;
- INITIALIZE_PASS_BEGIN(ScalarizeMaskedMemIntrinLegacyPass, DEBUG_TYPE,
- "Scalarize unsupported masked memory intrinsics", false,
- false)
- INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
- INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
- INITIALIZE_PASS_END(ScalarizeMaskedMemIntrinLegacyPass, DEBUG_TYPE,
- "Scalarize unsupported masked memory intrinsics", false,
- false)
- FunctionPass *llvm::createScalarizeMaskedMemIntrinLegacyPass() {
- return new ScalarizeMaskedMemIntrinLegacyPass();
- }
- static bool isConstantIntVector(Value *Mask) {
- Constant *C = dyn_cast<Constant>(Mask);
- if (!C)
- return false;
- unsigned NumElts = cast<FixedVectorType>(Mask->getType())->getNumElements();
- for (unsigned i = 0; i != NumElts; ++i) {
- Constant *CElt = C->getAggregateElement(i);
- if (!CElt || !isa<ConstantInt>(CElt))
- return false;
- }
- return true;
- }
- static unsigned adjustForEndian(const DataLayout &DL, unsigned VectorWidth,
- unsigned Idx) {
- return DL.isBigEndian() ? VectorWidth - 1 - Idx : Idx;
- }
- // Translate a masked load intrinsic like
- // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align,
- // <16 x i1> %mask, <16 x i32> %passthru)
- // to a chain of basic blocks, with loading element one-by-one if
- // the appropriate mask bit is set
- //
- // %1 = bitcast i8* %addr to i32*
- // %2 = extractelement <16 x i1> %mask, i32 0
- // br i1 %2, label %cond.load, label %else
- //
- // cond.load: ; preds = %0
- // %3 = getelementptr i32* %1, i32 0
- // %4 = load i32* %3
- // %5 = insertelement <16 x i32> %passthru, i32 %4, i32 0
- // br label %else
- //
- // else: ; preds = %0, %cond.load
- // %res.phi.else = phi <16 x i32> [ %5, %cond.load ], [ undef, %0 ]
- // %6 = extractelement <16 x i1> %mask, i32 1
- // br i1 %6, label %cond.load1, label %else2
- //
- // cond.load1: ; preds = %else
- // %7 = getelementptr i32* %1, i32 1
- // %8 = load i32* %7
- // %9 = insertelement <16 x i32> %res.phi.else, i32 %8, i32 1
- // br label %else2
- //
- // else2: ; preds = %else, %cond.load1
- // %res.phi.else3 = phi <16 x i32> [ %9, %cond.load1 ], [ %res.phi.else, %else ]
- // %10 = extractelement <16 x i1> %mask, i32 2
- // br i1 %10, label %cond.load4, label %else5
- //
- static void scalarizeMaskedLoad(const DataLayout &DL, CallInst *CI,
- DomTreeUpdater *DTU, bool &ModifiedDT) {
- Value *Ptr = CI->getArgOperand(0);
- Value *Alignment = CI->getArgOperand(1);
- Value *Mask = CI->getArgOperand(2);
- Value *Src0 = CI->getArgOperand(3);
- const Align AlignVal = cast<ConstantInt>(Alignment)->getAlignValue();
- VectorType *VecType = cast<FixedVectorType>(CI->getType());
- Type *EltTy = VecType->getElementType();
- IRBuilder<> Builder(CI->getContext());
- Instruction *InsertPt = CI;
- BasicBlock *IfBlock = CI->getParent();
- Builder.SetInsertPoint(InsertPt);
- Builder.SetCurrentDebugLocation(CI->getDebugLoc());
- // Short-cut if the mask is all-true.
- if (isa<Constant>(Mask) && cast<Constant>(Mask)->isAllOnesValue()) {
- Value *NewI = Builder.CreateAlignedLoad(VecType, Ptr, AlignVal);
- CI->replaceAllUsesWith(NewI);
- CI->eraseFromParent();
- return;
- }
- // Adjust alignment for the scalar instruction.
- const Align AdjustedAlignVal =
- commonAlignment(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
- // Bitcast %addr from i8* to EltTy*
- Type *NewPtrType =
- EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace());
- Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
- unsigned VectorWidth = cast<FixedVectorType>(VecType)->getNumElements();
- // The result vector
- Value *VResult = Src0;
- if (isConstantIntVector(Mask)) {
- for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
- if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
- continue;
- Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
- LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal);
- VResult = Builder.CreateInsertElement(VResult, Load, Idx);
- }
- CI->replaceAllUsesWith(VResult);
- CI->eraseFromParent();
- return;
- }
- // If the mask is not v1i1, use scalar bit test operations. This generates
- // better results on X86 at least.
- Value *SclrMask;
- if (VectorWidth != 1) {
- Type *SclrMaskTy = Builder.getIntNTy(VectorWidth);
- SclrMask = Builder.CreateBitCast(Mask, SclrMaskTy, "scalar_mask");
- }
- for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
- // Fill the "else" block, created in the previous iteration
- //
- // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ]
- // %mask_1 = and i16 %scalar_mask, i32 1 << Idx
- // %cond = icmp ne i16 %mask_1, 0
- // br i1 %mask_1, label %cond.load, label %else
- //
- Value *Predicate;
- if (VectorWidth != 1) {
- Value *Mask = Builder.getInt(APInt::getOneBitSet(
- VectorWidth, adjustForEndian(DL, VectorWidth, Idx)));
- Predicate = Builder.CreateICmpNE(Builder.CreateAnd(SclrMask, Mask),
- Builder.getIntN(VectorWidth, 0));
- } else {
- Predicate = Builder.CreateExtractElement(Mask, Idx);
- }
- // Create "cond" block
- //
- // %EltAddr = getelementptr i32* %1, i32 0
- // %Elt = load i32* %EltAddr
- // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
- //
- Instruction *ThenTerm =
- SplitBlockAndInsertIfThen(Predicate, InsertPt, /*Unreachable=*/false,
- /*BranchWeights=*/nullptr, DTU);
- BasicBlock *CondBlock = ThenTerm->getParent();
- CondBlock->setName("cond.load");
- Builder.SetInsertPoint(CondBlock->getTerminator());
- Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
- LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal);
- Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);
- // Create "else" block, fill it in the next iteration
- BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
- NewIfBlock->setName("else");
- BasicBlock *PrevIfBlock = IfBlock;
- IfBlock = NewIfBlock;
- // Create the phi to join the new and previous value.
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
- PHINode *Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
- Phi->addIncoming(NewVResult, CondBlock);
- Phi->addIncoming(VResult, PrevIfBlock);
- VResult = Phi;
- }
- CI->replaceAllUsesWith(VResult);
- CI->eraseFromParent();
- ModifiedDT = true;
- }
- // Translate a masked store intrinsic, like
- // void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr, i32 align,
- // <16 x i1> %mask)
- // to a chain of basic blocks, that stores element one-by-one if
- // the appropriate mask bit is set
- //
- // %1 = bitcast i8* %addr to i32*
- // %2 = extractelement <16 x i1> %mask, i32 0
- // br i1 %2, label %cond.store, label %else
- //
- // cond.store: ; preds = %0
- // %3 = extractelement <16 x i32> %val, i32 0
- // %4 = getelementptr i32* %1, i32 0
- // store i32 %3, i32* %4
- // br label %else
- //
- // else: ; preds = %0, %cond.store
- // %5 = extractelement <16 x i1> %mask, i32 1
- // br i1 %5, label %cond.store1, label %else2
- //
- // cond.store1: ; preds = %else
- // %6 = extractelement <16 x i32> %val, i32 1
- // %7 = getelementptr i32* %1, i32 1
- // store i32 %6, i32* %7
- // br label %else2
- // . . .
- static void scalarizeMaskedStore(const DataLayout &DL, CallInst *CI,
- DomTreeUpdater *DTU, bool &ModifiedDT) {
- Value *Src = CI->getArgOperand(0);
- Value *Ptr = CI->getArgOperand(1);
- Value *Alignment = CI->getArgOperand(2);
- Value *Mask = CI->getArgOperand(3);
- const Align AlignVal = cast<ConstantInt>(Alignment)->getAlignValue();
- auto *VecType = cast<VectorType>(Src->getType());
- Type *EltTy = VecType->getElementType();
- IRBuilder<> Builder(CI->getContext());
- Instruction *InsertPt = CI;
- Builder.SetInsertPoint(InsertPt);
- Builder.SetCurrentDebugLocation(CI->getDebugLoc());
- // Short-cut if the mask is all-true.
- if (isa<Constant>(Mask) && cast<Constant>(Mask)->isAllOnesValue()) {
- Builder.CreateAlignedStore(Src, Ptr, AlignVal);
- CI->eraseFromParent();
- return;
- }
- // Adjust alignment for the scalar instruction.
- const Align AdjustedAlignVal =
- commonAlignment(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
- // Bitcast %addr from i8* to EltTy*
- Type *NewPtrType =
- EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace());
- Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
- unsigned VectorWidth = cast<FixedVectorType>(VecType)->getNumElements();
- if (isConstantIntVector(Mask)) {
- for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
- if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
- continue;
- Value *OneElt = Builder.CreateExtractElement(Src, Idx);
- Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
- Builder.CreateAlignedStore(OneElt, Gep, AdjustedAlignVal);
- }
- CI->eraseFromParent();
- return;
- }
- // If the mask is not v1i1, use scalar bit test operations. This generates
- // better results on X86 at least.
- Value *SclrMask;
- if (VectorWidth != 1) {
- Type *SclrMaskTy = Builder.getIntNTy(VectorWidth);
- SclrMask = Builder.CreateBitCast(Mask, SclrMaskTy, "scalar_mask");
- }
- for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
- // Fill the "else" block, created in the previous iteration
- //
- // %mask_1 = and i16 %scalar_mask, i32 1 << Idx
- // %cond = icmp ne i16 %mask_1, 0
- // br i1 %mask_1, label %cond.store, label %else
- //
- Value *Predicate;
- if (VectorWidth != 1) {
- Value *Mask = Builder.getInt(APInt::getOneBitSet(
- VectorWidth, adjustForEndian(DL, VectorWidth, Idx)));
- Predicate = Builder.CreateICmpNE(Builder.CreateAnd(SclrMask, Mask),
- Builder.getIntN(VectorWidth, 0));
- } else {
- Predicate = Builder.CreateExtractElement(Mask, Idx);
- }
- // Create "cond" block
- //
- // %OneElt = extractelement <16 x i32> %Src, i32 Idx
- // %EltAddr = getelementptr i32* %1, i32 0
- // %store i32 %OneElt, i32* %EltAddr
- //
- Instruction *ThenTerm =
- SplitBlockAndInsertIfThen(Predicate, InsertPt, /*Unreachable=*/false,
- /*BranchWeights=*/nullptr, DTU);
- BasicBlock *CondBlock = ThenTerm->getParent();
- CondBlock->setName("cond.store");
- Builder.SetInsertPoint(CondBlock->getTerminator());
- Value *OneElt = Builder.CreateExtractElement(Src, Idx);
- Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
- Builder.CreateAlignedStore(OneElt, Gep, AdjustedAlignVal);
- // Create "else" block, fill it in the next iteration
- BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
- NewIfBlock->setName("else");
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
- }
- CI->eraseFromParent();
- ModifiedDT = true;
- }
- // Translate a masked gather intrinsic like
- // <16 x i32 > @llvm.masked.gather.v16i32( <16 x i32*> %Ptrs, i32 4,
- // <16 x i1> %Mask, <16 x i32> %Src)
- // to a chain of basic blocks, with loading element one-by-one if
- // the appropriate mask bit is set
- //
- // %Ptrs = getelementptr i32, i32* %base, <16 x i64> %ind
- // %Mask0 = extractelement <16 x i1> %Mask, i32 0
- // br i1 %Mask0, label %cond.load, label %else
- //
- // cond.load:
- // %Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0
- // %Load0 = load i32, i32* %Ptr0, align 4
- // %Res0 = insertelement <16 x i32> undef, i32 %Load0, i32 0
- // br label %else
- //
- // else:
- // %res.phi.else = phi <16 x i32>[%Res0, %cond.load], [undef, %0]
- // %Mask1 = extractelement <16 x i1> %Mask, i32 1
- // br i1 %Mask1, label %cond.load1, label %else2
- //
- // cond.load1:
- // %Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
- // %Load1 = load i32, i32* %Ptr1, align 4
- // %Res1 = insertelement <16 x i32> %res.phi.else, i32 %Load1, i32 1
- // br label %else2
- // . . .
- // %Result = select <16 x i1> %Mask, <16 x i32> %res.phi.select, <16 x i32> %Src
- // ret <16 x i32> %Result
- static void scalarizeMaskedGather(const DataLayout &DL, CallInst *CI,
- DomTreeUpdater *DTU, bool &ModifiedDT) {
- Value *Ptrs = CI->getArgOperand(0);
- Value *Alignment = CI->getArgOperand(1);
- Value *Mask = CI->getArgOperand(2);
- Value *Src0 = CI->getArgOperand(3);
- auto *VecType = cast<FixedVectorType>(CI->getType());
- Type *EltTy = VecType->getElementType();
- IRBuilder<> Builder(CI->getContext());
- Instruction *InsertPt = CI;
- BasicBlock *IfBlock = CI->getParent();
- Builder.SetInsertPoint(InsertPt);
- MaybeAlign AlignVal = cast<ConstantInt>(Alignment)->getMaybeAlignValue();
- Builder.SetCurrentDebugLocation(CI->getDebugLoc());
- // The result vector
- Value *VResult = Src0;
- unsigned VectorWidth = VecType->getNumElements();
- // Shorten the way if the mask is a vector of constants.
- if (isConstantIntVector(Mask)) {
- for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
- if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
- continue;
- Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
- LoadInst *Load =
- Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
- VResult =
- Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
- }
- CI->replaceAllUsesWith(VResult);
- CI->eraseFromParent();
- return;
- }
- // If the mask is not v1i1, use scalar bit test operations. This generates
- // better results on X86 at least.
- Value *SclrMask;
- if (VectorWidth != 1) {
- Type *SclrMaskTy = Builder.getIntNTy(VectorWidth);
- SclrMask = Builder.CreateBitCast(Mask, SclrMaskTy, "scalar_mask");
- }
- for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
- // Fill the "else" block, created in the previous iteration
- //
- // %Mask1 = and i16 %scalar_mask, i32 1 << Idx
- // %cond = icmp ne i16 %mask_1, 0
- // br i1 %Mask1, label %cond.load, label %else
- //
- Value *Predicate;
- if (VectorWidth != 1) {
- Value *Mask = Builder.getInt(APInt::getOneBitSet(
- VectorWidth, adjustForEndian(DL, VectorWidth, Idx)));
- Predicate = Builder.CreateICmpNE(Builder.CreateAnd(SclrMask, Mask),
- Builder.getIntN(VectorWidth, 0));
- } else {
- Predicate = Builder.CreateExtractElement(Mask, Idx, "Mask" + Twine(Idx));
- }
- // Create "cond" block
- //
- // %EltAddr = getelementptr i32* %1, i32 0
- // %Elt = load i32* %EltAddr
- // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
- //
- Instruction *ThenTerm =
- SplitBlockAndInsertIfThen(Predicate, InsertPt, /*Unreachable=*/false,
- /*BranchWeights=*/nullptr, DTU);
- BasicBlock *CondBlock = ThenTerm->getParent();
- CondBlock->setName("cond.load");
- Builder.SetInsertPoint(CondBlock->getTerminator());
- Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
- LoadInst *Load =
- Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
- Value *NewVResult =
- Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
- // Create "else" block, fill it in the next iteration
- BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
- NewIfBlock->setName("else");
- BasicBlock *PrevIfBlock = IfBlock;
- IfBlock = NewIfBlock;
- // Create the phi to join the new and previous value.
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
- PHINode *Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
- Phi->addIncoming(NewVResult, CondBlock);
- Phi->addIncoming(VResult, PrevIfBlock);
- VResult = Phi;
- }
- CI->replaceAllUsesWith(VResult);
- CI->eraseFromParent();
- ModifiedDT = true;
- }
- // Translate a masked scatter intrinsic, like
- // void @llvm.masked.scatter.v16i32(<16 x i32> %Src, <16 x i32*>* %Ptrs, i32 4,
- // <16 x i1> %Mask)
- // to a chain of basic blocks, that stores element one-by-one if
- // the appropriate mask bit is set.
- //
- // %Ptrs = getelementptr i32, i32* %ptr, <16 x i64> %ind
- // %Mask0 = extractelement <16 x i1> %Mask, i32 0
- // br i1 %Mask0, label %cond.store, label %else
- //
- // cond.store:
- // %Elt0 = extractelement <16 x i32> %Src, i32 0
- // %Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0
- // store i32 %Elt0, i32* %Ptr0, align 4
- // br label %else
- //
- // else:
- // %Mask1 = extractelement <16 x i1> %Mask, i32 1
- // br i1 %Mask1, label %cond.store1, label %else2
- //
- // cond.store1:
- // %Elt1 = extractelement <16 x i32> %Src, i32 1
- // %Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
- // store i32 %Elt1, i32* %Ptr1, align 4
- // br label %else2
- // . . .
- static void scalarizeMaskedScatter(const DataLayout &DL, CallInst *CI,
- DomTreeUpdater *DTU, bool &ModifiedDT) {
- Value *Src = CI->getArgOperand(0);
- Value *Ptrs = CI->getArgOperand(1);
- Value *Alignment = CI->getArgOperand(2);
- Value *Mask = CI->getArgOperand(3);
- auto *SrcFVTy = cast<FixedVectorType>(Src->getType());
- assert(
- isa<VectorType>(Ptrs->getType()) &&
- isa<PointerType>(cast<VectorType>(Ptrs->getType())->getElementType()) &&
- "Vector of pointers is expected in masked scatter intrinsic");
- IRBuilder<> Builder(CI->getContext());
- Instruction *InsertPt = CI;
- Builder.SetInsertPoint(InsertPt);
- Builder.SetCurrentDebugLocation(CI->getDebugLoc());
- MaybeAlign AlignVal = cast<ConstantInt>(Alignment)->getMaybeAlignValue();
- unsigned VectorWidth = SrcFVTy->getNumElements();
- // Shorten the way if the mask is a vector of constants.
- if (isConstantIntVector(Mask)) {
- for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
- if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
- continue;
- Value *OneElt =
- Builder.CreateExtractElement(Src, Idx, "Elt" + Twine(Idx));
- Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
- Builder.CreateAlignedStore(OneElt, Ptr, AlignVal);
- }
- CI->eraseFromParent();
- return;
- }
- // If the mask is not v1i1, use scalar bit test operations. This generates
- // better results on X86 at least.
- Value *SclrMask;
- if (VectorWidth != 1) {
- Type *SclrMaskTy = Builder.getIntNTy(VectorWidth);
- SclrMask = Builder.CreateBitCast(Mask, SclrMaskTy, "scalar_mask");
- }
- for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
- // Fill the "else" block, created in the previous iteration
- //
- // %Mask1 = and i16 %scalar_mask, i32 1 << Idx
- // %cond = icmp ne i16 %mask_1, 0
- // br i1 %Mask1, label %cond.store, label %else
- //
- Value *Predicate;
- if (VectorWidth != 1) {
- Value *Mask = Builder.getInt(APInt::getOneBitSet(
- VectorWidth, adjustForEndian(DL, VectorWidth, Idx)));
- Predicate = Builder.CreateICmpNE(Builder.CreateAnd(SclrMask, Mask),
- Builder.getIntN(VectorWidth, 0));
- } else {
- Predicate = Builder.CreateExtractElement(Mask, Idx, "Mask" + Twine(Idx));
- }
- // Create "cond" block
- //
- // %Elt1 = extractelement <16 x i32> %Src, i32 1
- // %Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
- // %store i32 %Elt1, i32* %Ptr1
- //
- Instruction *ThenTerm =
- SplitBlockAndInsertIfThen(Predicate, InsertPt, /*Unreachable=*/false,
- /*BranchWeights=*/nullptr, DTU);
- BasicBlock *CondBlock = ThenTerm->getParent();
- CondBlock->setName("cond.store");
- Builder.SetInsertPoint(CondBlock->getTerminator());
- Value *OneElt = Builder.CreateExtractElement(Src, Idx, "Elt" + Twine(Idx));
- Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
- Builder.CreateAlignedStore(OneElt, Ptr, AlignVal);
- // Create "else" block, fill it in the next iteration
- BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
- NewIfBlock->setName("else");
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
- }
- CI->eraseFromParent();
- ModifiedDT = true;
- }
- static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
- DomTreeUpdater *DTU, bool &ModifiedDT) {
- Value *Ptr = CI->getArgOperand(0);
- Value *Mask = CI->getArgOperand(1);
- Value *PassThru = CI->getArgOperand(2);
- auto *VecType = cast<FixedVectorType>(CI->getType());
- Type *EltTy = VecType->getElementType();
- IRBuilder<> Builder(CI->getContext());
- Instruction *InsertPt = CI;
- BasicBlock *IfBlock = CI->getParent();
- Builder.SetInsertPoint(InsertPt);
- Builder.SetCurrentDebugLocation(CI->getDebugLoc());
- unsigned VectorWidth = VecType->getNumElements();
- // The result vector
- Value *VResult = PassThru;
- // Shorten the way if the mask is a vector of constants.
- // Create a build_vector pattern, with loads/undefs as necessary and then
- // shuffle blend with the pass through value.
- if (isConstantIntVector(Mask)) {
- unsigned MemIndex = 0;
- VResult = UndefValue::get(VecType);
- SmallVector<int, 16> ShuffleMask(VectorWidth, UndefMaskElem);
- for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
- Value *InsertElt;
- if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue()) {
- InsertElt = UndefValue::get(EltTy);
- ShuffleMask[Idx] = Idx + VectorWidth;
- } else {
- Value *NewPtr =
- Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex);
- InsertElt = Builder.CreateAlignedLoad(EltTy, NewPtr, Align(1),
- "Load" + Twine(Idx));
- ShuffleMask[Idx] = Idx;
- ++MemIndex;
- }
- VResult = Builder.CreateInsertElement(VResult, InsertElt, Idx,
- "Res" + Twine(Idx));
- }
- VResult = Builder.CreateShuffleVector(VResult, PassThru, ShuffleMask);
- CI->replaceAllUsesWith(VResult);
- CI->eraseFromParent();
- return;
- }
- // If the mask is not v1i1, use scalar bit test operations. This generates
- // better results on X86 at least.
- Value *SclrMask;
- if (VectorWidth != 1) {
- Type *SclrMaskTy = Builder.getIntNTy(VectorWidth);
- SclrMask = Builder.CreateBitCast(Mask, SclrMaskTy, "scalar_mask");
- }
- for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
- // Fill the "else" block, created in the previous iteration
- //
- // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ]
- // %mask_1 = extractelement <16 x i1> %mask, i32 Idx
- // br i1 %mask_1, label %cond.load, label %else
- //
- Value *Predicate;
- if (VectorWidth != 1) {
- Value *Mask = Builder.getInt(APInt::getOneBitSet(
- VectorWidth, adjustForEndian(DL, VectorWidth, Idx)));
- Predicate = Builder.CreateICmpNE(Builder.CreateAnd(SclrMask, Mask),
- Builder.getIntN(VectorWidth, 0));
- } else {
- Predicate = Builder.CreateExtractElement(Mask, Idx, "Mask" + Twine(Idx));
- }
- // Create "cond" block
- //
- // %EltAddr = getelementptr i32* %1, i32 0
- // %Elt = load i32* %EltAddr
- // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
- //
- Instruction *ThenTerm =
- SplitBlockAndInsertIfThen(Predicate, InsertPt, /*Unreachable=*/false,
- /*BranchWeights=*/nullptr, DTU);
- BasicBlock *CondBlock = ThenTerm->getParent();
- CondBlock->setName("cond.load");
- Builder.SetInsertPoint(CondBlock->getTerminator());
- LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, Align(1));
- Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);
- // Move the pointer if there are more blocks to come.
- Value *NewPtr;
- if ((Idx + 1) != VectorWidth)
- NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, 1);
- // Create "else" block, fill it in the next iteration
- BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
- NewIfBlock->setName("else");
- BasicBlock *PrevIfBlock = IfBlock;
- IfBlock = NewIfBlock;
- // Create the phi to join the new and previous value.
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
- PHINode *ResultPhi = Builder.CreatePHI(VecType, 2, "res.phi.else");
- ResultPhi->addIncoming(NewVResult, CondBlock);
- ResultPhi->addIncoming(VResult, PrevIfBlock);
- VResult = ResultPhi;
- // Add a PHI for the pointer if this isn't the last iteration.
- if ((Idx + 1) != VectorWidth) {
- PHINode *PtrPhi = Builder.CreatePHI(Ptr->getType(), 2, "ptr.phi.else");
- PtrPhi->addIncoming(NewPtr, CondBlock);
- PtrPhi->addIncoming(Ptr, PrevIfBlock);
- Ptr = PtrPhi;
- }
- }
- CI->replaceAllUsesWith(VResult);
- CI->eraseFromParent();
- ModifiedDT = true;
- }
- static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
- DomTreeUpdater *DTU,
- bool &ModifiedDT) {
- Value *Src = CI->getArgOperand(0);
- Value *Ptr = CI->getArgOperand(1);
- Value *Mask = CI->getArgOperand(2);
- auto *VecType = cast<FixedVectorType>(Src->getType());
- IRBuilder<> Builder(CI->getContext());
- Instruction *InsertPt = CI;
- BasicBlock *IfBlock = CI->getParent();
- Builder.SetInsertPoint(InsertPt);
- Builder.SetCurrentDebugLocation(CI->getDebugLoc());
- Type *EltTy = VecType->getElementType();
- unsigned VectorWidth = VecType->getNumElements();
- // Shorten the way if the mask is a vector of constants.
- if (isConstantIntVector(Mask)) {
- unsigned MemIndex = 0;
- for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
- if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
- continue;
- Value *OneElt =
- Builder.CreateExtractElement(Src, Idx, "Elt" + Twine(Idx));
- Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex);
- Builder.CreateAlignedStore(OneElt, NewPtr, Align(1));
- ++MemIndex;
- }
- CI->eraseFromParent();
- return;
- }
- // If the mask is not v1i1, use scalar bit test operations. This generates
- // better results on X86 at least.
- Value *SclrMask;
- if (VectorWidth != 1) {
- Type *SclrMaskTy = Builder.getIntNTy(VectorWidth);
- SclrMask = Builder.CreateBitCast(Mask, SclrMaskTy, "scalar_mask");
- }
- for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
- // Fill the "else" block, created in the previous iteration
- //
- // %mask_1 = extractelement <16 x i1> %mask, i32 Idx
- // br i1 %mask_1, label %cond.store, label %else
- //
- Value *Predicate;
- if (VectorWidth != 1) {
- Value *Mask = Builder.getInt(APInt::getOneBitSet(
- VectorWidth, adjustForEndian(DL, VectorWidth, Idx)));
- Predicate = Builder.CreateICmpNE(Builder.CreateAnd(SclrMask, Mask),
- Builder.getIntN(VectorWidth, 0));
- } else {
- Predicate = Builder.CreateExtractElement(Mask, Idx, "Mask" + Twine(Idx));
- }
- // Create "cond" block
- //
- // %OneElt = extractelement <16 x i32> %Src, i32 Idx
- // %EltAddr = getelementptr i32* %1, i32 0
- // %store i32 %OneElt, i32* %EltAddr
- //
- Instruction *ThenTerm =
- SplitBlockAndInsertIfThen(Predicate, InsertPt, /*Unreachable=*/false,
- /*BranchWeights=*/nullptr, DTU);
- BasicBlock *CondBlock = ThenTerm->getParent();
- CondBlock->setName("cond.store");
- Builder.SetInsertPoint(CondBlock->getTerminator());
- Value *OneElt = Builder.CreateExtractElement(Src, Idx);
- Builder.CreateAlignedStore(OneElt, Ptr, Align(1));
- // Move the pointer if there are more blocks to come.
- Value *NewPtr;
- if ((Idx + 1) != VectorWidth)
- NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, 1);
- // Create "else" block, fill it in the next iteration
- BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
- NewIfBlock->setName("else");
- BasicBlock *PrevIfBlock = IfBlock;
- IfBlock = NewIfBlock;
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
- // Add a PHI for the pointer if this isn't the last iteration.
- if ((Idx + 1) != VectorWidth) {
- PHINode *PtrPhi = Builder.CreatePHI(Ptr->getType(), 2, "ptr.phi.else");
- PtrPhi->addIncoming(NewPtr, CondBlock);
- PtrPhi->addIncoming(Ptr, PrevIfBlock);
- Ptr = PtrPhi;
- }
- }
- CI->eraseFromParent();
- ModifiedDT = true;
- }
- static bool runImpl(Function &F, const TargetTransformInfo &TTI,
- DominatorTree *DT) {
- Optional<DomTreeUpdater> DTU;
- if (DT)
- DTU.emplace(DT, DomTreeUpdater::UpdateStrategy::Lazy);
- bool EverMadeChange = false;
- bool MadeChange = true;
- auto &DL = F.getParent()->getDataLayout();
- while (MadeChange) {
- MadeChange = false;
- for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
- bool ModifiedDTOnIteration = false;
- MadeChange |= optimizeBlock(BB, ModifiedDTOnIteration, TTI, DL,
- DTU.hasValue() ? DTU.getPointer() : nullptr);
- // Restart BB iteration if the dominator tree of the Function was changed
- if (ModifiedDTOnIteration)
- break;
- }
- EverMadeChange |= MadeChange;
- }
- return EverMadeChange;
- }
- bool ScalarizeMaskedMemIntrinLegacyPass::runOnFunction(Function &F) {
- auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
- DominatorTree *DT = nullptr;
- if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
- DT = &DTWP->getDomTree();
- return runImpl(F, TTI, DT);
- }
- PreservedAnalyses
- ScalarizeMaskedMemIntrinPass::run(Function &F, FunctionAnalysisManager &AM) {
- auto &TTI = AM.getResult<TargetIRAnalysis>(F);
- auto *DT = AM.getCachedResult<DominatorTreeAnalysis>(F);
- if (!runImpl(F, TTI, DT))
- return PreservedAnalyses::all();
- PreservedAnalyses PA;
- PA.preserve<TargetIRAnalysis>();
- PA.preserve<DominatorTreeAnalysis>();
- return PA;
- }
- static bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT,
- const TargetTransformInfo &TTI, const DataLayout &DL,
- DomTreeUpdater *DTU) {
- bool MadeChange = false;
- BasicBlock::iterator CurInstIterator = BB.begin();
- while (CurInstIterator != BB.end()) {
- if (CallInst *CI = dyn_cast<CallInst>(&*CurInstIterator++))
- MadeChange |= optimizeCallInst(CI, ModifiedDT, TTI, DL, DTU);
- if (ModifiedDT)
- return true;
- }
- return MadeChange;
- }
- static bool optimizeCallInst(CallInst *CI, bool &ModifiedDT,
- const TargetTransformInfo &TTI,
- const DataLayout &DL, DomTreeUpdater *DTU) {
- IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
- if (II) {
- // The scalarization code below does not work for scalable vectors.
- if (isa<ScalableVectorType>(II->getType()) ||
- any_of(II->args(),
- [](Value *V) { return isa<ScalableVectorType>(V->getType()); }))
- return false;
- switch (II->getIntrinsicID()) {
- default:
- break;
- case Intrinsic::masked_load:
- // Scalarize unsupported vector masked load
- if (TTI.isLegalMaskedLoad(
- CI->getType(),
- cast<ConstantInt>(CI->getArgOperand(1))->getAlignValue()))
- return false;
- scalarizeMaskedLoad(DL, CI, DTU, ModifiedDT);
- return true;
- case Intrinsic::masked_store:
- if (TTI.isLegalMaskedStore(
- CI->getArgOperand(0)->getType(),
- cast<ConstantInt>(CI->getArgOperand(2))->getAlignValue()))
- return false;
- scalarizeMaskedStore(DL, CI, DTU, ModifiedDT);
- return true;
- case Intrinsic::masked_gather: {
- MaybeAlign MA =
- cast<ConstantInt>(CI->getArgOperand(1))->getMaybeAlignValue();
- Type *LoadTy = CI->getType();
- Align Alignment = DL.getValueOrABITypeAlignment(MA,
- LoadTy->getScalarType());
- if (TTI.isLegalMaskedGather(LoadTy, Alignment) &&
- !TTI.forceScalarizeMaskedGather(cast<VectorType>(LoadTy), Alignment))
- return false;
- scalarizeMaskedGather(DL, CI, DTU, ModifiedDT);
- return true;
- }
- case Intrinsic::masked_scatter: {
- MaybeAlign MA =
- cast<ConstantInt>(CI->getArgOperand(2))->getMaybeAlignValue();
- Type *StoreTy = CI->getArgOperand(0)->getType();
- Align Alignment = DL.getValueOrABITypeAlignment(MA,
- StoreTy->getScalarType());
- if (TTI.isLegalMaskedScatter(StoreTy, Alignment) &&
- !TTI.forceScalarizeMaskedScatter(cast<VectorType>(StoreTy),
- Alignment))
- return false;
- scalarizeMaskedScatter(DL, CI, DTU, ModifiedDT);
- return true;
- }
- case Intrinsic::masked_expandload:
- if (TTI.isLegalMaskedExpandLoad(CI->getType()))
- return false;
- scalarizeMaskedExpandLoad(DL, CI, DTU, ModifiedDT);
- return true;
- case Intrinsic::masked_compressstore:
- if (TTI.isLegalMaskedCompressStore(CI->getArgOperand(0)->getType()))
- return false;
- scalarizeMaskedCompressStore(DL, CI, DTU, ModifiedDT);
- return true;
- }
- }
- return false;
- }
|