//===- MemProfiler.cpp - memory allocation and access profiler ------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file is a part of MemProfiler. Memory accesses are instrumented // to increment the access count held in a shadow memory location, or // alternatively to call into the runtime. Memory intrinsic calls (memmove, // memcpy, memset) are changed to call the memory profiling runtime version // instead. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Instrumentation/MemProfiler.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Triple.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constant.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" #include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/ModuleUtils.h" using namespace llvm; #define DEBUG_TYPE "memprof" constexpr int LLVM_MEM_PROFILER_VERSION = 1; // Size of memory mapped to a single shadow location. constexpr uint64_t DefaultShadowGranularity = 64; // Scale from granularity down to shadow size. constexpr uint64_t DefaultShadowScale = 3; constexpr char MemProfModuleCtorName[] = "memprof.module_ctor"; constexpr uint64_t MemProfCtorAndDtorPriority = 1; // On Emscripten, the system needs more than one priorities for constructors. constexpr uint64_t MemProfEmscriptenCtorAndDtorPriority = 50; constexpr char MemProfInitName[] = "__memprof_init"; constexpr char MemProfVersionCheckNamePrefix[] = "__memprof_version_mismatch_check_v"; constexpr char MemProfShadowMemoryDynamicAddress[] = "__memprof_shadow_memory_dynamic_address"; constexpr char MemProfFilenameVar[] = "__memprof_profile_filename"; // Command-line flags. static cl::opt ClInsertVersionCheck( "memprof-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true)); // This flag may need to be replaced with -f[no-]memprof-reads. static cl::opt ClInstrumentReads("memprof-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true)); static cl::opt ClInstrumentWrites("memprof-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true)); static cl::opt ClInstrumentAtomics( "memprof-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true)); static cl::opt ClUseCalls( "memprof-use-callbacks", cl::desc("Use callbacks instead of inline instrumentation sequences."), cl::Hidden, cl::init(false)); static cl::opt ClMemoryAccessCallbackPrefix("memprof-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__memprof_")); // These flags allow to change the shadow mapping. // The shadow mapping looks like // Shadow = ((Mem & mask) >> scale) + offset static cl::opt ClMappingScale("memprof-mapping-scale", cl::desc("scale of memprof shadow mapping"), cl::Hidden, cl::init(DefaultShadowScale)); static cl::opt ClMappingGranularity("memprof-mapping-granularity", cl::desc("granularity of memprof shadow mapping"), cl::Hidden, cl::init(DefaultShadowGranularity)); static cl::opt ClStack("memprof-instrument-stack", cl::desc("Instrument scalar stack variables"), cl::Hidden, cl::init(false)); // Debug flags. static cl::opt ClDebug("memprof-debug", cl::desc("debug"), cl::Hidden, cl::init(0)); static cl::opt ClDebugFunc("memprof-debug-func", cl::Hidden, cl::desc("Debug func")); static cl::opt ClDebugMin("memprof-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1)); static cl::opt ClDebugMax("memprof-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1)); STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); STATISTIC(NumSkippedStackReads, "Number of non-instrumented stack reads"); STATISTIC(NumSkippedStackWrites, "Number of non-instrumented stack writes"); namespace { /// This struct defines the shadow mapping using the rule: /// shadow = ((mem & mask) >> Scale) ADD DynamicShadowOffset. struct ShadowMapping { ShadowMapping() { Scale = ClMappingScale; Granularity = ClMappingGranularity; Mask = ~(Granularity - 1); } int Scale; int Granularity; uint64_t Mask; // Computed as ~(Granularity-1) }; static uint64_t getCtorAndDtorPriority(Triple &TargetTriple) { return TargetTriple.isOSEmscripten() ? MemProfEmscriptenCtorAndDtorPriority : MemProfCtorAndDtorPriority; } struct InterestingMemoryAccess { Value *Addr = nullptr; bool IsWrite; unsigned Alignment; Type *AccessTy; uint64_t TypeSize; Value *MaybeMask = nullptr; }; /// Instrument the code in module to profile memory accesses. class MemProfiler { public: MemProfiler(Module &M) { C = &(M.getContext()); LongSize = M.getDataLayout().getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); } /// If it is an interesting memory access, populate information /// about the access and return a InterestingMemoryAccess struct. /// Otherwise return None. Optional isInterestingMemoryAccess(Instruction *I) const; void instrumentMop(Instruction *I, const DataLayout &DL, InterestingMemoryAccess &Access); void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, bool IsWrite); void instrumentMaskedLoadOrStore(const DataLayout &DL, Value *Mask, Instruction *I, Value *Addr, unsigned Alignment, Type *AccessTy, bool IsWrite); void instrumentMemIntrinsic(MemIntrinsic *MI); Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); bool instrumentFunction(Function &F); bool maybeInsertMemProfInitAtFunctionEntry(Function &F); bool insertDynamicShadowAtFunctionEntry(Function &F); private: void initializeCallbacks(Module &M); LLVMContext *C; int LongSize; Type *IntptrTy; ShadowMapping Mapping; // These arrays is indexed by AccessIsWrite FunctionCallee MemProfMemoryAccessCallback[2]; FunctionCallee MemProfMemoryAccessCallbackSized[2]; FunctionCallee MemProfMemmove, MemProfMemcpy, MemProfMemset; Value *DynamicShadowOffset = nullptr; }; class MemProfilerLegacyPass : public FunctionPass { public: static char ID; explicit MemProfilerLegacyPass() : FunctionPass(ID) { initializeMemProfilerLegacyPassPass(*PassRegistry::getPassRegistry()); } StringRef getPassName() const override { return "MemProfilerFunctionPass"; } bool runOnFunction(Function &F) override { MemProfiler Profiler(*F.getParent()); return Profiler.instrumentFunction(F); } }; class ModuleMemProfiler { public: ModuleMemProfiler(Module &M) { TargetTriple = Triple(M.getTargetTriple()); } bool instrumentModule(Module &); private: Triple TargetTriple; ShadowMapping Mapping; Function *MemProfCtorFunction = nullptr; }; class ModuleMemProfilerLegacyPass : public ModulePass { public: static char ID; explicit ModuleMemProfilerLegacyPass() : ModulePass(ID) { initializeModuleMemProfilerLegacyPassPass(*PassRegistry::getPassRegistry()); } StringRef getPassName() const override { return "ModuleMemProfiler"; } void getAnalysisUsage(AnalysisUsage &AU) const override {} bool runOnModule(Module &M) override { ModuleMemProfiler MemProfiler(M); return MemProfiler.instrumentModule(M); } }; } // end anonymous namespace MemProfilerPass::MemProfilerPass() {} PreservedAnalyses MemProfilerPass::run(Function &F, AnalysisManager &AM) { Module &M = *F.getParent(); MemProfiler Profiler(M); if (Profiler.instrumentFunction(F)) return PreservedAnalyses::none(); return PreservedAnalyses::all(); } ModuleMemProfilerPass::ModuleMemProfilerPass() {} PreservedAnalyses ModuleMemProfilerPass::run(Module &M, AnalysisManager &AM) { ModuleMemProfiler Profiler(M); if (Profiler.instrumentModule(M)) return PreservedAnalyses::none(); return PreservedAnalyses::all(); } char MemProfilerLegacyPass::ID = 0; INITIALIZE_PASS_BEGIN(MemProfilerLegacyPass, "memprof", "MemProfiler: profile memory allocations and accesses.", false, false) INITIALIZE_PASS_END(MemProfilerLegacyPass, "memprof", "MemProfiler: profile memory allocations and accesses.", false, false) FunctionPass *llvm::createMemProfilerFunctionPass() { return new MemProfilerLegacyPass(); } char ModuleMemProfilerLegacyPass::ID = 0; INITIALIZE_PASS(ModuleMemProfilerLegacyPass, "memprof-module", "MemProfiler: profile memory allocations and accesses." "ModulePass", false, false) ModulePass *llvm::createModuleMemProfilerLegacyPassPass() { return new ModuleMemProfilerLegacyPass(); } Value *MemProfiler::memToShadow(Value *Shadow, IRBuilder<> &IRB) { // (Shadow & mask) >> scale Shadow = IRB.CreateAnd(Shadow, Mapping.Mask); Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); // (Shadow >> scale) | offset assert(DynamicShadowOffset); return IRB.CreateAdd(Shadow, DynamicShadowOffset); } // Instrument memset/memmove/memcpy void MemProfiler::instrumentMemIntrinsic(MemIntrinsic *MI) { IRBuilder<> IRB(MI); if (isa(MI)) { IRB.CreateCall( isa(MI) ? MemProfMemmove : MemProfMemcpy, {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); } else if (isa(MI)) { IRB.CreateCall( MemProfMemset, {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); } MI->eraseFromParent(); } Optional MemProfiler::isInterestingMemoryAccess(Instruction *I) const { // Do not instrument the load fetching the dynamic shadow address. if (DynamicShadowOffset == I) return None; InterestingMemoryAccess Access; if (LoadInst *LI = dyn_cast(I)) { if (!ClInstrumentReads) return None; Access.IsWrite = false; Access.AccessTy = LI->getType(); Access.Alignment = LI->getAlignment(); Access.Addr = LI->getPointerOperand(); } else if (StoreInst *SI = dyn_cast(I)) { if (!ClInstrumentWrites) return None; Access.IsWrite = true; Access.AccessTy = SI->getValueOperand()->getType(); Access.Alignment = SI->getAlignment(); Access.Addr = SI->getPointerOperand(); } else if (AtomicRMWInst *RMW = dyn_cast(I)) { if (!ClInstrumentAtomics) return None; Access.IsWrite = true; Access.AccessTy = RMW->getValOperand()->getType(); Access.Alignment = 0; Access.Addr = RMW->getPointerOperand(); } else if (AtomicCmpXchgInst *XCHG = dyn_cast(I)) { if (!ClInstrumentAtomics) return None; Access.IsWrite = true; Access.AccessTy = XCHG->getCompareOperand()->getType(); Access.Alignment = 0; Access.Addr = XCHG->getPointerOperand(); } else if (auto *CI = dyn_cast(I)) { auto *F = CI->getCalledFunction(); if (F && (F->getIntrinsicID() == Intrinsic::masked_load || F->getIntrinsicID() == Intrinsic::masked_store)) { unsigned OpOffset = 0; if (F->getIntrinsicID() == Intrinsic::masked_store) { if (!ClInstrumentWrites) return None; // Masked store has an initial operand for the value. OpOffset = 1; Access.AccessTy = CI->getArgOperand(0)->getType(); Access.IsWrite = true; } else { if (!ClInstrumentReads) return None; Access.AccessTy = CI->getType(); Access.IsWrite = false; } auto *BasePtr = CI->getOperand(0 + OpOffset); if (auto *AlignmentConstant = dyn_cast(CI->getOperand(1 + OpOffset))) Access.Alignment = (unsigned)AlignmentConstant->getZExtValue(); else Access.Alignment = 1; // No alignment guarantees. We probably got Undef Access.MaybeMask = CI->getOperand(2 + OpOffset); Access.Addr = BasePtr; } } if (!Access.Addr) return None; // Do not instrument acesses from different address spaces; we cannot deal // with them. Type *PtrTy = cast(Access.Addr->getType()->getScalarType()); if (PtrTy->getPointerAddressSpace() != 0) return None; // Ignore swifterror addresses. // swifterror memory addresses are mem2reg promoted by instruction // selection. As such they cannot have regular uses like an instrumentation // function and it makes no sense to track them as memory. if (Access.Addr->isSwiftError()) return None; const DataLayout &DL = I->getModule()->getDataLayout(); Access.TypeSize = DL.getTypeStoreSizeInBits(Access.AccessTy); return Access; } void MemProfiler::instrumentMaskedLoadOrStore(const DataLayout &DL, Value *Mask, Instruction *I, Value *Addr, unsigned Alignment, Type *AccessTy, bool IsWrite) { auto *VTy = cast(AccessTy); uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType()); unsigned Num = VTy->getNumElements(); auto *Zero = ConstantInt::get(IntptrTy, 0); for (unsigned Idx = 0; Idx < Num; ++Idx) { Value *InstrumentedAddress = nullptr; Instruction *InsertBefore = I; if (auto *Vector = dyn_cast(Mask)) { // dyn_cast as we might get UndefValue if (auto *Masked = dyn_cast(Vector->getOperand(Idx))) { if (Masked->isZero()) // Mask is constant false, so no instrumentation needed. continue; // If we have a true or undef value, fall through to instrumentAddress. // with InsertBefore == I } } else { IRBuilder<> IRB(I); Value *MaskElem = IRB.CreateExtractElement(Mask, Idx); Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false); InsertBefore = ThenTerm; } IRBuilder<> IRB(InsertBefore); InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)}); instrumentAddress(I, InsertBefore, InstrumentedAddress, ElemTypeSize, IsWrite); } } void MemProfiler::instrumentMop(Instruction *I, const DataLayout &DL, InterestingMemoryAccess &Access) { // Skip instrumentation of stack accesses unless requested. if (!ClStack && isa(getUnderlyingObject(Access.Addr))) { if (Access.IsWrite) ++NumSkippedStackWrites; else ++NumSkippedStackReads; return; } if (Access.IsWrite) NumInstrumentedWrites++; else NumInstrumentedReads++; if (Access.MaybeMask) { instrumentMaskedLoadOrStore(DL, Access.MaybeMask, I, Access.Addr, Access.Alignment, Access.AccessTy, Access.IsWrite); } else { // Since the access counts will be accumulated across the entire allocation, // we only update the shadow access count for the first location and thus // don't need to worry about alignment and type size. instrumentAddress(I, I, Access.Addr, Access.TypeSize, Access.IsWrite); } } void MemProfiler::instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, bool IsWrite) { IRBuilder<> IRB(InsertBefore); Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); if (ClUseCalls) { IRB.CreateCall(MemProfMemoryAccessCallback[IsWrite], AddrLong); return; } // Create an inline sequence to compute shadow location, and increment the // value by one. Type *ShadowTy = Type::getInt64Ty(*C); Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); Value *ShadowPtr = memToShadow(AddrLong, IRB); Value *ShadowAddr = IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy); Value *ShadowValue = IRB.CreateLoad(ShadowTy, ShadowAddr); Value *Inc = ConstantInt::get(Type::getInt64Ty(*C), 1); ShadowValue = IRB.CreateAdd(ShadowValue, Inc); IRB.CreateStore(ShadowValue, ShadowAddr); } // Create the variable for the profile file name. void createProfileFileNameVar(Module &M) { const MDString *MemProfFilename = dyn_cast_or_null(M.getModuleFlag("MemProfProfileFilename")); if (!MemProfFilename) return; assert(!MemProfFilename->getString().empty() && "Unexpected MemProfProfileFilename metadata with empty string"); Constant *ProfileNameConst = ConstantDataArray::getString( M.getContext(), MemProfFilename->getString(), true); GlobalVariable *ProfileNameVar = new GlobalVariable( M, ProfileNameConst->getType(), /*isConstant=*/true, GlobalValue::WeakAnyLinkage, ProfileNameConst, MemProfFilenameVar); Triple TT(M.getTargetTriple()); if (TT.supportsCOMDAT()) { ProfileNameVar->setLinkage(GlobalValue::ExternalLinkage); ProfileNameVar->setComdat(M.getOrInsertComdat(MemProfFilenameVar)); } } bool ModuleMemProfiler::instrumentModule(Module &M) { // Create a module constructor. std::string MemProfVersion = std::to_string(LLVM_MEM_PROFILER_VERSION); std::string VersionCheckName = ClInsertVersionCheck ? (MemProfVersionCheckNamePrefix + MemProfVersion) : ""; std::tie(MemProfCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(M, MemProfModuleCtorName, MemProfInitName, /*InitArgTypes=*/{}, /*InitArgs=*/{}, VersionCheckName); const uint64_t Priority = getCtorAndDtorPriority(TargetTriple); appendToGlobalCtors(M, MemProfCtorFunction, Priority); createProfileFileNameVar(M); return true; } void MemProfiler::initializeCallbacks(Module &M) { IRBuilder<> IRB(*C); for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { const std::string TypeStr = AccessIsWrite ? "store" : "load"; SmallVector Args2 = {IntptrTy, IntptrTy}; SmallVector Args1{1, IntptrTy}; MemProfMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr + "N", FunctionType::get(IRB.getVoidTy(), Args2, false)); MemProfMemoryAccessCallback[AccessIsWrite] = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr, FunctionType::get(IRB.getVoidTy(), Args1, false)); } MemProfMemmove = M.getOrInsertFunction( ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy); MemProfMemcpy = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy); MemProfMemset = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy); } bool MemProfiler::maybeInsertMemProfInitAtFunctionEntry(Function &F) { // For each NSObject descendant having a +load method, this method is invoked // by the ObjC runtime before any of the static constructors is called. // Therefore we need to instrument such methods with a call to __memprof_init // at the beginning in order to initialize our runtime before any access to // the shadow memory. // We cannot just ignore these methods, because they may call other // instrumented functions. if (F.getName().find(" load]") != std::string::npos) { FunctionCallee MemProfInitFunction = declareSanitizerInitFunction(*F.getParent(), MemProfInitName, {}); IRBuilder<> IRB(&F.front(), F.front().begin()); IRB.CreateCall(MemProfInitFunction, {}); return true; } return false; } bool MemProfiler::insertDynamicShadowAtFunctionEntry(Function &F) { IRBuilder<> IRB(&F.front().front()); Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal( MemProfShadowMemoryDynamicAddress, IntptrTy); if (F.getParent()->getPICLevel() == PICLevel::NotPIC) cast(GlobalDynamicAddress)->setDSOLocal(true); DynamicShadowOffset = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress); return true; } bool MemProfiler::instrumentFunction(Function &F) { if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; if (ClDebugFunc == F.getName()) return false; if (F.getName().startswith("__memprof_")) return false; bool FunctionModified = false; // If needed, insert __memprof_init. // This function needs to be called even if the function body is not // instrumented. if (maybeInsertMemProfInitAtFunctionEntry(F)) FunctionModified = true; LLVM_DEBUG(dbgs() << "MEMPROF instrumenting:\n" << F << "\n"); initializeCallbacks(*F.getParent()); FunctionModified |= insertDynamicShadowAtFunctionEntry(F); SmallVector ToInstrument; // Fill the set of memory operations to instrument. for (auto &BB : F) { for (auto &Inst : BB) { if (isInterestingMemoryAccess(&Inst) || isa(Inst)) ToInstrument.push_back(&Inst); } } int NumInstrumented = 0; for (auto *Inst : ToInstrument) { if (ClDebugMin < 0 || ClDebugMax < 0 || (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { Optional Access = isInterestingMemoryAccess(Inst); if (Access) instrumentMop(Inst, F.getParent()->getDataLayout(), *Access); else instrumentMemIntrinsic(cast(Inst)); } NumInstrumented++; } if (NumInstrumented > 0) FunctionModified = true; LLVM_DEBUG(dbgs() << "MEMPROF done instrumenting: " << FunctionModified << " " << F << "\n"); return FunctionModified; }