MemProfiler.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. //===- MemProfiler.cpp - memory allocation and access profiler ------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of MemProfiler. Memory accesses are instrumented
  10. // to increment the access count held in a shadow memory location, or
  11. // alternatively to call into the runtime. Memory intrinsic calls (memmove,
  12. // memcpy, memset) are changed to call the memory profiling runtime version
  13. // instead.
  14. //
  15. //===----------------------------------------------------------------------===//
  16. #include "llvm/Transforms/Instrumentation/MemProfiler.h"
  17. #include "llvm/ADT/SmallVector.h"
  18. #include "llvm/ADT/Statistic.h"
  19. #include "llvm/ADT/StringRef.h"
  20. #include "llvm/ADT/Triple.h"
  21. #include "llvm/Analysis/ValueTracking.h"
  22. #include "llvm/IR/Constant.h"
  23. #include "llvm/IR/DataLayout.h"
  24. #include "llvm/IR/Function.h"
  25. #include "llvm/IR/GlobalValue.h"
  26. #include "llvm/IR/IRBuilder.h"
  27. #include "llvm/IR/Instruction.h"
  28. #include "llvm/IR/IntrinsicInst.h"
  29. #include "llvm/IR/LLVMContext.h"
  30. #include "llvm/IR/Module.h"
  31. #include "llvm/IR/Type.h"
  32. #include "llvm/IR/Value.h"
  33. #include "llvm/InitializePasses.h"
  34. #include "llvm/Pass.h"
  35. #include "llvm/Support/CommandLine.h"
  36. #include "llvm/Support/Debug.h"
  37. #include "llvm/Transforms/Instrumentation.h"
  38. #include "llvm/Transforms/Utils/BasicBlockUtils.h"
  39. #include "llvm/Transforms/Utils/ModuleUtils.h"
  40. using namespace llvm;
  41. #define DEBUG_TYPE "memprof"
  42. constexpr int LLVM_MEM_PROFILER_VERSION = 1;
  43. // Size of memory mapped to a single shadow location.
  44. constexpr uint64_t DefaultShadowGranularity = 64;
  45. // Scale from granularity down to shadow size.
  46. constexpr uint64_t DefaultShadowScale = 3;
  47. constexpr char MemProfModuleCtorName[] = "memprof.module_ctor";
  48. constexpr uint64_t MemProfCtorAndDtorPriority = 1;
  49. // On Emscripten, the system needs more than one priorities for constructors.
  50. constexpr uint64_t MemProfEmscriptenCtorAndDtorPriority = 50;
  51. constexpr char MemProfInitName[] = "__memprof_init";
  52. constexpr char MemProfVersionCheckNamePrefix[] =
  53. "__memprof_version_mismatch_check_v";
  54. constexpr char MemProfShadowMemoryDynamicAddress[] =
  55. "__memprof_shadow_memory_dynamic_address";
  56. constexpr char MemProfFilenameVar[] = "__memprof_profile_filename";
  57. // Command-line flags.
  58. static cl::opt<bool> ClInsertVersionCheck(
  59. "memprof-guard-against-version-mismatch",
  60. cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
  61. cl::init(true));
  62. // This flag may need to be replaced with -f[no-]memprof-reads.
  63. static cl::opt<bool> ClInstrumentReads("memprof-instrument-reads",
  64. cl::desc("instrument read instructions"),
  65. cl::Hidden, cl::init(true));
  66. static cl::opt<bool>
  67. ClInstrumentWrites("memprof-instrument-writes",
  68. cl::desc("instrument write instructions"), cl::Hidden,
  69. cl::init(true));
  70. static cl::opt<bool> ClInstrumentAtomics(
  71. "memprof-instrument-atomics",
  72. cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
  73. cl::init(true));
  74. static cl::opt<bool> ClUseCalls(
  75. "memprof-use-callbacks",
  76. cl::desc("Use callbacks instead of inline instrumentation sequences."),
  77. cl::Hidden, cl::init(false));
  78. static cl::opt<std::string>
  79. ClMemoryAccessCallbackPrefix("memprof-memory-access-callback-prefix",
  80. cl::desc("Prefix for memory access callbacks"),
  81. cl::Hidden, cl::init("__memprof_"));
  82. // These flags allow to change the shadow mapping.
  83. // The shadow mapping looks like
  84. // Shadow = ((Mem & mask) >> scale) + offset
  85. static cl::opt<int> ClMappingScale("memprof-mapping-scale",
  86. cl::desc("scale of memprof shadow mapping"),
  87. cl::Hidden, cl::init(DefaultShadowScale));
  88. static cl::opt<int>
  89. ClMappingGranularity("memprof-mapping-granularity",
  90. cl::desc("granularity of memprof shadow mapping"),
  91. cl::Hidden, cl::init(DefaultShadowGranularity));
  92. static cl::opt<bool> ClStack("memprof-instrument-stack",
  93. cl::desc("Instrument scalar stack variables"),
  94. cl::Hidden, cl::init(false));
  95. // Debug flags.
  96. static cl::opt<int> ClDebug("memprof-debug", cl::desc("debug"), cl::Hidden,
  97. cl::init(0));
  98. static cl::opt<std::string> ClDebugFunc("memprof-debug-func", cl::Hidden,
  99. cl::desc("Debug func"));
  100. static cl::opt<int> ClDebugMin("memprof-debug-min", cl::desc("Debug min inst"),
  101. cl::Hidden, cl::init(-1));
  102. static cl::opt<int> ClDebugMax("memprof-debug-max", cl::desc("Debug max inst"),
  103. cl::Hidden, cl::init(-1));
  104. STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
  105. STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
  106. STATISTIC(NumSkippedStackReads, "Number of non-instrumented stack reads");
  107. STATISTIC(NumSkippedStackWrites, "Number of non-instrumented stack writes");
  108. namespace {
  109. /// This struct defines the shadow mapping using the rule:
  110. /// shadow = ((mem & mask) >> Scale) ADD DynamicShadowOffset.
  111. struct ShadowMapping {
  112. ShadowMapping() {
  113. Scale = ClMappingScale;
  114. Granularity = ClMappingGranularity;
  115. Mask = ~(Granularity - 1);
  116. }
  117. int Scale;
  118. int Granularity;
  119. uint64_t Mask; // Computed as ~(Granularity-1)
  120. };
  121. static uint64_t getCtorAndDtorPriority(Triple &TargetTriple) {
  122. return TargetTriple.isOSEmscripten() ? MemProfEmscriptenCtorAndDtorPriority
  123. : MemProfCtorAndDtorPriority;
  124. }
  125. struct InterestingMemoryAccess {
  126. Value *Addr = nullptr;
  127. bool IsWrite;
  128. unsigned Alignment;
  129. Type *AccessTy;
  130. uint64_t TypeSize;
  131. Value *MaybeMask = nullptr;
  132. };
  133. /// Instrument the code in module to profile memory accesses.
  134. class MemProfiler {
  135. public:
  136. MemProfiler(Module &M) {
  137. C = &(M.getContext());
  138. LongSize = M.getDataLayout().getPointerSizeInBits();
  139. IntptrTy = Type::getIntNTy(*C, LongSize);
  140. }
  141. /// If it is an interesting memory access, populate information
  142. /// about the access and return a InterestingMemoryAccess struct.
  143. /// Otherwise return None.
  144. Optional<InterestingMemoryAccess>
  145. isInterestingMemoryAccess(Instruction *I) const;
  146. void instrumentMop(Instruction *I, const DataLayout &DL,
  147. InterestingMemoryAccess &Access);
  148. void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
  149. Value *Addr, uint32_t TypeSize, bool IsWrite);
  150. void instrumentMaskedLoadOrStore(const DataLayout &DL, Value *Mask,
  151. Instruction *I, Value *Addr,
  152. unsigned Alignment, Type *AccessTy,
  153. bool IsWrite);
  154. void instrumentMemIntrinsic(MemIntrinsic *MI);
  155. Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
  156. bool instrumentFunction(Function &F);
  157. bool maybeInsertMemProfInitAtFunctionEntry(Function &F);
  158. bool insertDynamicShadowAtFunctionEntry(Function &F);
  159. private:
  160. void initializeCallbacks(Module &M);
  161. LLVMContext *C;
  162. int LongSize;
  163. Type *IntptrTy;
  164. ShadowMapping Mapping;
  165. // These arrays is indexed by AccessIsWrite
  166. FunctionCallee MemProfMemoryAccessCallback[2];
  167. FunctionCallee MemProfMemoryAccessCallbackSized[2];
  168. FunctionCallee MemProfMemmove, MemProfMemcpy, MemProfMemset;
  169. Value *DynamicShadowOffset = nullptr;
  170. };
  171. class MemProfilerLegacyPass : public FunctionPass {
  172. public:
  173. static char ID;
  174. explicit MemProfilerLegacyPass() : FunctionPass(ID) {
  175. initializeMemProfilerLegacyPassPass(*PassRegistry::getPassRegistry());
  176. }
  177. StringRef getPassName() const override { return "MemProfilerFunctionPass"; }
  178. bool runOnFunction(Function &F) override {
  179. MemProfiler Profiler(*F.getParent());
  180. return Profiler.instrumentFunction(F);
  181. }
  182. };
  183. class ModuleMemProfiler {
  184. public:
  185. ModuleMemProfiler(Module &M) { TargetTriple = Triple(M.getTargetTriple()); }
  186. bool instrumentModule(Module &);
  187. private:
  188. Triple TargetTriple;
  189. ShadowMapping Mapping;
  190. Function *MemProfCtorFunction = nullptr;
  191. };
  192. class ModuleMemProfilerLegacyPass : public ModulePass {
  193. public:
  194. static char ID;
  195. explicit ModuleMemProfilerLegacyPass() : ModulePass(ID) {
  196. initializeModuleMemProfilerLegacyPassPass(*PassRegistry::getPassRegistry());
  197. }
  198. StringRef getPassName() const override { return "ModuleMemProfiler"; }
  199. void getAnalysisUsage(AnalysisUsage &AU) const override {}
  200. bool runOnModule(Module &M) override {
  201. ModuleMemProfiler MemProfiler(M);
  202. return MemProfiler.instrumentModule(M);
  203. }
  204. };
  205. } // end anonymous namespace
  206. MemProfilerPass::MemProfilerPass() {}
  207. PreservedAnalyses MemProfilerPass::run(Function &F,
  208. AnalysisManager<Function> &AM) {
  209. Module &M = *F.getParent();
  210. MemProfiler Profiler(M);
  211. if (Profiler.instrumentFunction(F))
  212. return PreservedAnalyses::none();
  213. return PreservedAnalyses::all();
  214. }
  215. ModuleMemProfilerPass::ModuleMemProfilerPass() {}
  216. PreservedAnalyses ModuleMemProfilerPass::run(Module &M,
  217. AnalysisManager<Module> &AM) {
  218. ModuleMemProfiler Profiler(M);
  219. if (Profiler.instrumentModule(M))
  220. return PreservedAnalyses::none();
  221. return PreservedAnalyses::all();
  222. }
  223. char MemProfilerLegacyPass::ID = 0;
  224. INITIALIZE_PASS_BEGIN(MemProfilerLegacyPass, "memprof",
  225. "MemProfiler: profile memory allocations and accesses.",
  226. false, false)
  227. INITIALIZE_PASS_END(MemProfilerLegacyPass, "memprof",
  228. "MemProfiler: profile memory allocations and accesses.",
  229. false, false)
  230. FunctionPass *llvm::createMemProfilerFunctionPass() {
  231. return new MemProfilerLegacyPass();
  232. }
  233. char ModuleMemProfilerLegacyPass::ID = 0;
  234. INITIALIZE_PASS(ModuleMemProfilerLegacyPass, "memprof-module",
  235. "MemProfiler: profile memory allocations and accesses."
  236. "ModulePass",
  237. false, false)
  238. ModulePass *llvm::createModuleMemProfilerLegacyPassPass() {
  239. return new ModuleMemProfilerLegacyPass();
  240. }
  241. Value *MemProfiler::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
  242. // (Shadow & mask) >> scale
  243. Shadow = IRB.CreateAnd(Shadow, Mapping.Mask);
  244. Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
  245. // (Shadow >> scale) | offset
  246. assert(DynamicShadowOffset);
  247. return IRB.CreateAdd(Shadow, DynamicShadowOffset);
  248. }
  249. // Instrument memset/memmove/memcpy
  250. void MemProfiler::instrumentMemIntrinsic(MemIntrinsic *MI) {
  251. IRBuilder<> IRB(MI);
  252. if (isa<MemTransferInst>(MI)) {
  253. IRB.CreateCall(
  254. isa<MemMoveInst>(MI) ? MemProfMemmove : MemProfMemcpy,
  255. {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
  256. IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
  257. IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
  258. } else if (isa<MemSetInst>(MI)) {
  259. IRB.CreateCall(
  260. MemProfMemset,
  261. {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
  262. IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
  263. IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
  264. }
  265. MI->eraseFromParent();
  266. }
  267. Optional<InterestingMemoryAccess>
  268. MemProfiler::isInterestingMemoryAccess(Instruction *I) const {
  269. // Do not instrument the load fetching the dynamic shadow address.
  270. if (DynamicShadowOffset == I)
  271. return None;
  272. InterestingMemoryAccess Access;
  273. if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
  274. if (!ClInstrumentReads)
  275. return None;
  276. Access.IsWrite = false;
  277. Access.AccessTy = LI->getType();
  278. Access.Alignment = LI->getAlignment();
  279. Access.Addr = LI->getPointerOperand();
  280. } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
  281. if (!ClInstrumentWrites)
  282. return None;
  283. Access.IsWrite = true;
  284. Access.AccessTy = SI->getValueOperand()->getType();
  285. Access.Alignment = SI->getAlignment();
  286. Access.Addr = SI->getPointerOperand();
  287. } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
  288. if (!ClInstrumentAtomics)
  289. return None;
  290. Access.IsWrite = true;
  291. Access.AccessTy = RMW->getValOperand()->getType();
  292. Access.Alignment = 0;
  293. Access.Addr = RMW->getPointerOperand();
  294. } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
  295. if (!ClInstrumentAtomics)
  296. return None;
  297. Access.IsWrite = true;
  298. Access.AccessTy = XCHG->getCompareOperand()->getType();
  299. Access.Alignment = 0;
  300. Access.Addr = XCHG->getPointerOperand();
  301. } else if (auto *CI = dyn_cast<CallInst>(I)) {
  302. auto *F = CI->getCalledFunction();
  303. if (F && (F->getIntrinsicID() == Intrinsic::masked_load ||
  304. F->getIntrinsicID() == Intrinsic::masked_store)) {
  305. unsigned OpOffset = 0;
  306. if (F->getIntrinsicID() == Intrinsic::masked_store) {
  307. if (!ClInstrumentWrites)
  308. return None;
  309. // Masked store has an initial operand for the value.
  310. OpOffset = 1;
  311. Access.AccessTy = CI->getArgOperand(0)->getType();
  312. Access.IsWrite = true;
  313. } else {
  314. if (!ClInstrumentReads)
  315. return None;
  316. Access.AccessTy = CI->getType();
  317. Access.IsWrite = false;
  318. }
  319. auto *BasePtr = CI->getOperand(0 + OpOffset);
  320. if (auto *AlignmentConstant =
  321. dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
  322. Access.Alignment = (unsigned)AlignmentConstant->getZExtValue();
  323. else
  324. Access.Alignment = 1; // No alignment guarantees. We probably got Undef
  325. Access.MaybeMask = CI->getOperand(2 + OpOffset);
  326. Access.Addr = BasePtr;
  327. }
  328. }
  329. if (!Access.Addr)
  330. return None;
  331. // Do not instrument acesses from different address spaces; we cannot deal
  332. // with them.
  333. Type *PtrTy = cast<PointerType>(Access.Addr->getType()->getScalarType());
  334. if (PtrTy->getPointerAddressSpace() != 0)
  335. return None;
  336. // Ignore swifterror addresses.
  337. // swifterror memory addresses are mem2reg promoted by instruction
  338. // selection. As such they cannot have regular uses like an instrumentation
  339. // function and it makes no sense to track them as memory.
  340. if (Access.Addr->isSwiftError())
  341. return None;
  342. const DataLayout &DL = I->getModule()->getDataLayout();
  343. Access.TypeSize = DL.getTypeStoreSizeInBits(Access.AccessTy);
  344. return Access;
  345. }
  346. void MemProfiler::instrumentMaskedLoadOrStore(const DataLayout &DL, Value *Mask,
  347. Instruction *I, Value *Addr,
  348. unsigned Alignment,
  349. Type *AccessTy, bool IsWrite) {
  350. auto *VTy = cast<FixedVectorType>(AccessTy);
  351. uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
  352. unsigned Num = VTy->getNumElements();
  353. auto *Zero = ConstantInt::get(IntptrTy, 0);
  354. for (unsigned Idx = 0; Idx < Num; ++Idx) {
  355. Value *InstrumentedAddress = nullptr;
  356. Instruction *InsertBefore = I;
  357. if (auto *Vector = dyn_cast<ConstantVector>(Mask)) {
  358. // dyn_cast as we might get UndefValue
  359. if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) {
  360. if (Masked->isZero())
  361. // Mask is constant false, so no instrumentation needed.
  362. continue;
  363. // If we have a true or undef value, fall through to instrumentAddress.
  364. // with InsertBefore == I
  365. }
  366. } else {
  367. IRBuilder<> IRB(I);
  368. Value *MaskElem = IRB.CreateExtractElement(Mask, Idx);
  369. Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false);
  370. InsertBefore = ThenTerm;
  371. }
  372. IRBuilder<> IRB(InsertBefore);
  373. InstrumentedAddress =
  374. IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)});
  375. instrumentAddress(I, InsertBefore, InstrumentedAddress, ElemTypeSize,
  376. IsWrite);
  377. }
  378. }
  379. void MemProfiler::instrumentMop(Instruction *I, const DataLayout &DL,
  380. InterestingMemoryAccess &Access) {
  381. // Skip instrumentation of stack accesses unless requested.
  382. if (!ClStack && isa<AllocaInst>(getUnderlyingObject(Access.Addr))) {
  383. if (Access.IsWrite)
  384. ++NumSkippedStackWrites;
  385. else
  386. ++NumSkippedStackReads;
  387. return;
  388. }
  389. if (Access.IsWrite)
  390. NumInstrumentedWrites++;
  391. else
  392. NumInstrumentedReads++;
  393. if (Access.MaybeMask) {
  394. instrumentMaskedLoadOrStore(DL, Access.MaybeMask, I, Access.Addr,
  395. Access.Alignment, Access.AccessTy,
  396. Access.IsWrite);
  397. } else {
  398. // Since the access counts will be accumulated across the entire allocation,
  399. // we only update the shadow access count for the first location and thus
  400. // don't need to worry about alignment and type size.
  401. instrumentAddress(I, I, Access.Addr, Access.TypeSize, Access.IsWrite);
  402. }
  403. }
  404. void MemProfiler::instrumentAddress(Instruction *OrigIns,
  405. Instruction *InsertBefore, Value *Addr,
  406. uint32_t TypeSize, bool IsWrite) {
  407. IRBuilder<> IRB(InsertBefore);
  408. Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
  409. if (ClUseCalls) {
  410. IRB.CreateCall(MemProfMemoryAccessCallback[IsWrite], AddrLong);
  411. return;
  412. }
  413. // Create an inline sequence to compute shadow location, and increment the
  414. // value by one.
  415. Type *ShadowTy = Type::getInt64Ty(*C);
  416. Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
  417. Value *ShadowPtr = memToShadow(AddrLong, IRB);
  418. Value *ShadowAddr = IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy);
  419. Value *ShadowValue = IRB.CreateLoad(ShadowTy, ShadowAddr);
  420. Value *Inc = ConstantInt::get(Type::getInt64Ty(*C), 1);
  421. ShadowValue = IRB.CreateAdd(ShadowValue, Inc);
  422. IRB.CreateStore(ShadowValue, ShadowAddr);
  423. }
  424. // Create the variable for the profile file name.
  425. void createProfileFileNameVar(Module &M) {
  426. const MDString *MemProfFilename =
  427. dyn_cast_or_null<MDString>(M.getModuleFlag("MemProfProfileFilename"));
  428. if (!MemProfFilename)
  429. return;
  430. assert(!MemProfFilename->getString().empty() &&
  431. "Unexpected MemProfProfileFilename metadata with empty string");
  432. Constant *ProfileNameConst = ConstantDataArray::getString(
  433. M.getContext(), MemProfFilename->getString(), true);
  434. GlobalVariable *ProfileNameVar = new GlobalVariable(
  435. M, ProfileNameConst->getType(), /*isConstant=*/true,
  436. GlobalValue::WeakAnyLinkage, ProfileNameConst, MemProfFilenameVar);
  437. Triple TT(M.getTargetTriple());
  438. if (TT.supportsCOMDAT()) {
  439. ProfileNameVar->setLinkage(GlobalValue::ExternalLinkage);
  440. ProfileNameVar->setComdat(M.getOrInsertComdat(MemProfFilenameVar));
  441. }
  442. }
  443. bool ModuleMemProfiler::instrumentModule(Module &M) {
  444. // Create a module constructor.
  445. std::string MemProfVersion = std::to_string(LLVM_MEM_PROFILER_VERSION);
  446. std::string VersionCheckName =
  447. ClInsertVersionCheck ? (MemProfVersionCheckNamePrefix + MemProfVersion)
  448. : "";
  449. std::tie(MemProfCtorFunction, std::ignore) =
  450. createSanitizerCtorAndInitFunctions(M, MemProfModuleCtorName,
  451. MemProfInitName, /*InitArgTypes=*/{},
  452. /*InitArgs=*/{}, VersionCheckName);
  453. const uint64_t Priority = getCtorAndDtorPriority(TargetTriple);
  454. appendToGlobalCtors(M, MemProfCtorFunction, Priority);
  455. createProfileFileNameVar(M);
  456. return true;
  457. }
  458. void MemProfiler::initializeCallbacks(Module &M) {
  459. IRBuilder<> IRB(*C);
  460. for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
  461. const std::string TypeStr = AccessIsWrite ? "store" : "load";
  462. SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
  463. SmallVector<Type *, 2> Args1{1, IntptrTy};
  464. MemProfMemoryAccessCallbackSized[AccessIsWrite] =
  465. M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr + "N",
  466. FunctionType::get(IRB.getVoidTy(), Args2, false));
  467. MemProfMemoryAccessCallback[AccessIsWrite] =
  468. M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr,
  469. FunctionType::get(IRB.getVoidTy(), Args1, false));
  470. }
  471. MemProfMemmove = M.getOrInsertFunction(
  472. ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
  473. IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
  474. MemProfMemcpy = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memcpy",
  475. IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
  476. IRB.getInt8PtrTy(), IntptrTy);
  477. MemProfMemset = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memset",
  478. IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
  479. IRB.getInt32Ty(), IntptrTy);
  480. }
  481. bool MemProfiler::maybeInsertMemProfInitAtFunctionEntry(Function &F) {
  482. // For each NSObject descendant having a +load method, this method is invoked
  483. // by the ObjC runtime before any of the static constructors is called.
  484. // Therefore we need to instrument such methods with a call to __memprof_init
  485. // at the beginning in order to initialize our runtime before any access to
  486. // the shadow memory.
  487. // We cannot just ignore these methods, because they may call other
  488. // instrumented functions.
  489. if (F.getName().find(" load]") != std::string::npos) {
  490. FunctionCallee MemProfInitFunction =
  491. declareSanitizerInitFunction(*F.getParent(), MemProfInitName, {});
  492. IRBuilder<> IRB(&F.front(), F.front().begin());
  493. IRB.CreateCall(MemProfInitFunction, {});
  494. return true;
  495. }
  496. return false;
  497. }
  498. bool MemProfiler::insertDynamicShadowAtFunctionEntry(Function &F) {
  499. IRBuilder<> IRB(&F.front().front());
  500. Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
  501. MemProfShadowMemoryDynamicAddress, IntptrTy);
  502. if (F.getParent()->getPICLevel() == PICLevel::NotPIC)
  503. cast<GlobalVariable>(GlobalDynamicAddress)->setDSOLocal(true);
  504. DynamicShadowOffset = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
  505. return true;
  506. }
  507. bool MemProfiler::instrumentFunction(Function &F) {
  508. if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage)
  509. return false;
  510. if (ClDebugFunc == F.getName())
  511. return false;
  512. if (F.getName().startswith("__memprof_"))
  513. return false;
  514. bool FunctionModified = false;
  515. // If needed, insert __memprof_init.
  516. // This function needs to be called even if the function body is not
  517. // instrumented.
  518. if (maybeInsertMemProfInitAtFunctionEntry(F))
  519. FunctionModified = true;
  520. LLVM_DEBUG(dbgs() << "MEMPROF instrumenting:\n" << F << "\n");
  521. initializeCallbacks(*F.getParent());
  522. FunctionModified |= insertDynamicShadowAtFunctionEntry(F);
  523. SmallVector<Instruction *, 16> ToInstrument;
  524. // Fill the set of memory operations to instrument.
  525. for (auto &BB : F) {
  526. for (auto &Inst : BB) {
  527. if (isInterestingMemoryAccess(&Inst) || isa<MemIntrinsic>(Inst))
  528. ToInstrument.push_back(&Inst);
  529. }
  530. }
  531. int NumInstrumented = 0;
  532. for (auto *Inst : ToInstrument) {
  533. if (ClDebugMin < 0 || ClDebugMax < 0 ||
  534. (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
  535. Optional<InterestingMemoryAccess> Access =
  536. isInterestingMemoryAccess(Inst);
  537. if (Access)
  538. instrumentMop(Inst, F.getParent()->getDataLayout(), *Access);
  539. else
  540. instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
  541. }
  542. NumInstrumented++;
  543. }
  544. if (NumInstrumented > 0)
  545. FunctionModified = true;
  546. LLVM_DEBUG(dbgs() << "MEMPROF done instrumenting: " << FunctionModified << " "
  547. << F << "\n");
  548. return FunctionModified;
  549. }