JITLinkMemoryManager.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. //===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
  9. #include "llvm/ExecutionEngine/JITLink/JITLink.h"
  10. #include "llvm/Support/FormatVariadic.h"
  11. #include "llvm/Support/Process.h"
  12. #define DEBUG_TYPE "jitlink"
  13. using namespace llvm;
  14. namespace llvm {
  15. namespace jitlink {
  16. JITLinkMemoryManager::~JITLinkMemoryManager() = default;
  17. JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default;
  18. BasicLayout::BasicLayout(LinkGraph &G) : G(G) {
  19. for (auto &Sec : G.sections()) {
  20. // Skip empty sections.
  21. if (Sec.blocks().empty())
  22. continue;
  23. auto &Seg = Segments[{Sec.getMemProt(), Sec.getMemDeallocPolicy()}];
  24. for (auto *B : Sec.blocks())
  25. if (LLVM_LIKELY(!B->isZeroFill()))
  26. Seg.ContentBlocks.push_back(B);
  27. else
  28. Seg.ZeroFillBlocks.push_back(B);
  29. }
  30. // Build Segments map.
  31. auto CompareBlocks = [](const Block *LHS, const Block *RHS) {
  32. // Sort by section, address and size
  33. if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal())
  34. return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal();
  35. if (LHS->getAddress() != RHS->getAddress())
  36. return LHS->getAddress() < RHS->getAddress();
  37. return LHS->getSize() < RHS->getSize();
  38. };
  39. LLVM_DEBUG(dbgs() << "Generated BasicLayout for " << G.getName() << ":\n");
  40. for (auto &KV : Segments) {
  41. auto &Seg = KV.second;
  42. llvm::sort(Seg.ContentBlocks, CompareBlocks);
  43. llvm::sort(Seg.ZeroFillBlocks, CompareBlocks);
  44. for (auto *B : Seg.ContentBlocks) {
  45. Seg.ContentSize = alignToBlock(Seg.ContentSize, *B);
  46. Seg.ContentSize += B->getSize();
  47. Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
  48. }
  49. uint64_t SegEndOffset = Seg.ContentSize;
  50. for (auto *B : Seg.ZeroFillBlocks) {
  51. SegEndOffset = alignToBlock(SegEndOffset, *B);
  52. SegEndOffset += B->getSize();
  53. Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
  54. }
  55. Seg.ZeroFillSize = SegEndOffset - Seg.ContentSize;
  56. LLVM_DEBUG({
  57. dbgs() << " Seg " << KV.first
  58. << ": content-size=" << formatv("{0:x}", Seg.ContentSize)
  59. << ", zero-fill-size=" << formatv("{0:x}", Seg.ZeroFillSize)
  60. << ", align=" << formatv("{0:x}", Seg.Alignment.value()) << "\n";
  61. });
  62. }
  63. }
  64. Expected<BasicLayout::ContiguousPageBasedLayoutSizes>
  65. BasicLayout::getContiguousPageBasedLayoutSizes(uint64_t PageSize) {
  66. ContiguousPageBasedLayoutSizes SegsSizes;
  67. for (auto &KV : segments()) {
  68. auto &AG = KV.first;
  69. auto &Seg = KV.second;
  70. if (Seg.Alignment > PageSize)
  71. return make_error<StringError>("Segment alignment greater than page size",
  72. inconvertibleErrorCode());
  73. uint64_t SegSize = alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
  74. if (AG.getMemDeallocPolicy() == orc::MemDeallocPolicy::Standard)
  75. SegsSizes.StandardSegs += SegSize;
  76. else
  77. SegsSizes.FinalizeSegs += SegSize;
  78. }
  79. return SegsSizes;
  80. }
  81. Error BasicLayout::apply() {
  82. for (auto &KV : Segments) {
  83. auto &Seg = KV.second;
  84. assert(!(Seg.ContentBlocks.empty() && Seg.ZeroFillBlocks.empty()) &&
  85. "Empty section recorded?");
  86. for (auto *B : Seg.ContentBlocks) {
  87. // Align addr and working-mem-offset.
  88. Seg.Addr = alignToBlock(Seg.Addr, *B);
  89. Seg.NextWorkingMemOffset = alignToBlock(Seg.NextWorkingMemOffset, *B);
  90. // Update block addr.
  91. B->setAddress(Seg.Addr);
  92. Seg.Addr += B->getSize();
  93. // Copy content to working memory, then update content to point at working
  94. // memory.
  95. memcpy(Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getContent().data(),
  96. B->getSize());
  97. B->setMutableContent(
  98. {Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getSize()});
  99. Seg.NextWorkingMemOffset += B->getSize();
  100. }
  101. for (auto *B : Seg.ZeroFillBlocks) {
  102. // Align addr.
  103. Seg.Addr = alignToBlock(Seg.Addr, *B);
  104. // Update block addr.
  105. B->setAddress(Seg.Addr);
  106. Seg.Addr += B->getSize();
  107. }
  108. Seg.ContentBlocks.clear();
  109. Seg.ZeroFillBlocks.clear();
  110. }
  111. return Error::success();
  112. }
  113. orc::shared::AllocActions &BasicLayout::graphAllocActions() {
  114. return G.allocActions();
  115. }
  116. void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr,
  117. const JITLinkDylib *JD, SegmentMap Segments,
  118. OnCreatedFunction OnCreated) {
  119. static_assert(orc::AllocGroup::NumGroups == 16,
  120. "AllocGroup has changed. Section names below must be updated");
  121. StringRef AGSectionNames[] = {
  122. "__---.standard", "__R--.standard", "__-W-.standard", "__RW-.standard",
  123. "__--X.standard", "__R-X.standard", "__-WX.standard", "__RWX.standard",
  124. "__---.finalize", "__R--.finalize", "__-W-.finalize", "__RW-.finalize",
  125. "__--X.finalize", "__R-X.finalize", "__-WX.finalize", "__RWX.finalize"};
  126. auto G =
  127. std::make_unique<LinkGraph>("", Triple(), 0, support::native, nullptr);
  128. orc::AllocGroupSmallMap<Block *> ContentBlocks;
  129. orc::ExecutorAddr NextAddr(0x100000);
  130. for (auto &KV : Segments) {
  131. auto &AG = KV.first;
  132. auto &Seg = KV.second;
  133. auto AGSectionName =
  134. AGSectionNames[static_cast<unsigned>(AG.getMemProt()) |
  135. static_cast<bool>(AG.getMemDeallocPolicy()) << 3];
  136. auto &Sec = G->createSection(AGSectionName, AG.getMemProt());
  137. Sec.setMemDeallocPolicy(AG.getMemDeallocPolicy());
  138. if (Seg.ContentSize != 0) {
  139. NextAddr =
  140. orc::ExecutorAddr(alignTo(NextAddr.getValue(), Seg.ContentAlign));
  141. auto &B =
  142. G->createMutableContentBlock(Sec, G->allocateBuffer(Seg.ContentSize),
  143. NextAddr, Seg.ContentAlign.value(), 0);
  144. ContentBlocks[AG] = &B;
  145. NextAddr += Seg.ContentSize;
  146. }
  147. }
  148. // GRef declared separately since order-of-argument-eval isn't specified.
  149. auto &GRef = *G;
  150. MemMgr.allocate(JD, GRef,
  151. [G = std::move(G), ContentBlocks = std::move(ContentBlocks),
  152. OnCreated = std::move(OnCreated)](
  153. JITLinkMemoryManager::AllocResult Alloc) mutable {
  154. if (!Alloc)
  155. OnCreated(Alloc.takeError());
  156. else
  157. OnCreated(SimpleSegmentAlloc(std::move(G),
  158. std::move(ContentBlocks),
  159. std::move(*Alloc)));
  160. });
  161. }
  162. Expected<SimpleSegmentAlloc>
  163. SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
  164. SegmentMap Segments) {
  165. std::promise<MSVCPExpected<SimpleSegmentAlloc>> AllocP;
  166. auto AllocF = AllocP.get_future();
  167. Create(MemMgr, JD, std::move(Segments),
  168. [&](Expected<SimpleSegmentAlloc> Result) {
  169. AllocP.set_value(std::move(Result));
  170. });
  171. return AllocF.get();
  172. }
  173. SimpleSegmentAlloc::SimpleSegmentAlloc(SimpleSegmentAlloc &&) = default;
  174. SimpleSegmentAlloc &
  175. SimpleSegmentAlloc::operator=(SimpleSegmentAlloc &&) = default;
  176. SimpleSegmentAlloc::~SimpleSegmentAlloc() = default;
  177. SimpleSegmentAlloc::SegmentInfo
  178. SimpleSegmentAlloc::getSegInfo(orc::AllocGroup AG) {
  179. auto I = ContentBlocks.find(AG);
  180. if (I != ContentBlocks.end()) {
  181. auto &B = *I->second;
  182. return {B.getAddress(), B.getAlreadyMutableContent()};
  183. }
  184. return {};
  185. }
  186. SimpleSegmentAlloc::SimpleSegmentAlloc(
  187. std::unique_ptr<LinkGraph> G,
  188. orc::AllocGroupSmallMap<Block *> ContentBlocks,
  189. std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc)
  190. : G(std::move(G)), ContentBlocks(std::move(ContentBlocks)),
  191. Alloc(std::move(Alloc)) {}
  192. class InProcessMemoryManager::IPInFlightAlloc
  193. : public JITLinkMemoryManager::InFlightAlloc {
  194. public:
  195. IPInFlightAlloc(InProcessMemoryManager &MemMgr, LinkGraph &G, BasicLayout BL,
  196. sys::MemoryBlock StandardSegments,
  197. sys::MemoryBlock FinalizationSegments)
  198. : MemMgr(MemMgr), G(G), BL(std::move(BL)),
  199. StandardSegments(std::move(StandardSegments)),
  200. FinalizationSegments(std::move(FinalizationSegments)) {}
  201. void finalize(OnFinalizedFunction OnFinalized) override {
  202. // Apply memory protections to all segments.
  203. if (auto Err = applyProtections()) {
  204. OnFinalized(std::move(Err));
  205. return;
  206. }
  207. // Run finalization actions.
  208. auto DeallocActions = runFinalizeActions(G.allocActions());
  209. if (!DeallocActions) {
  210. OnFinalized(DeallocActions.takeError());
  211. return;
  212. }
  213. // Release the finalize segments slab.
  214. if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) {
  215. OnFinalized(errorCodeToError(EC));
  216. return;
  217. }
  218. // Continue with finalized allocation.
  219. OnFinalized(MemMgr.createFinalizedAlloc(std::move(StandardSegments),
  220. std::move(*DeallocActions)));
  221. }
  222. void abandon(OnAbandonedFunction OnAbandoned) override {
  223. Error Err = Error::success();
  224. if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments))
  225. Err = joinErrors(std::move(Err), errorCodeToError(EC));
  226. if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
  227. Err = joinErrors(std::move(Err), errorCodeToError(EC));
  228. OnAbandoned(std::move(Err));
  229. }
  230. private:
  231. Error applyProtections() {
  232. for (auto &KV : BL.segments()) {
  233. const auto &AG = KV.first;
  234. auto &Seg = KV.second;
  235. auto Prot = toSysMemoryProtectionFlags(AG.getMemProt());
  236. uint64_t SegSize =
  237. alignTo(Seg.ContentSize + Seg.ZeroFillSize, MemMgr.PageSize);
  238. sys::MemoryBlock MB(Seg.WorkingMem, SegSize);
  239. if (auto EC = sys::Memory::protectMappedMemory(MB, Prot))
  240. return errorCodeToError(EC);
  241. if (Prot & sys::Memory::MF_EXEC)
  242. sys::Memory::InvalidateInstructionCache(MB.base(), MB.allocatedSize());
  243. }
  244. return Error::success();
  245. }
  246. InProcessMemoryManager &MemMgr;
  247. LinkGraph &G;
  248. BasicLayout BL;
  249. sys::MemoryBlock StandardSegments;
  250. sys::MemoryBlock FinalizationSegments;
  251. };
  252. Expected<std::unique_ptr<InProcessMemoryManager>>
  253. InProcessMemoryManager::Create() {
  254. if (auto PageSize = sys::Process::getPageSize())
  255. return std::make_unique<InProcessMemoryManager>(*PageSize);
  256. else
  257. return PageSize.takeError();
  258. }
  259. void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G,
  260. OnAllocatedFunction OnAllocated) {
  261. // FIXME: Just check this once on startup.
  262. if (!isPowerOf2_64((uint64_t)PageSize)) {
  263. OnAllocated(make_error<StringError>("Page size is not a power of 2",
  264. inconvertibleErrorCode()));
  265. return;
  266. }
  267. BasicLayout BL(G);
  268. /// Scan the request and calculate the group and total sizes.
  269. /// Check that segment size is no larger than a page.
  270. auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(PageSize);
  271. if (!SegsSizes) {
  272. OnAllocated(SegsSizes.takeError());
  273. return;
  274. }
  275. /// Check that the total size requested (including zero fill) is not larger
  276. /// than a size_t.
  277. if (SegsSizes->total() > std::numeric_limits<size_t>::max()) {
  278. OnAllocated(make_error<JITLinkError>(
  279. "Total requested size " + formatv("{0:x}", SegsSizes->total()) +
  280. " for graph " + G.getName() + " exceeds address space"));
  281. return;
  282. }
  283. // Allocate one slab for the whole thing (to make sure everything is
  284. // in-range), then partition into standard and finalization blocks.
  285. //
  286. // FIXME: Make two separate allocations in the future to reduce
  287. // fragmentation: finalization segments will usually be a single page, and
  288. // standard segments are likely to be more than one page. Where multiple
  289. // allocations are in-flight at once (likely) the current approach will leave
  290. // a lot of single-page holes.
  291. sys::MemoryBlock Slab;
  292. sys::MemoryBlock StandardSegsMem;
  293. sys::MemoryBlock FinalizeSegsMem;
  294. {
  295. const sys::Memory::ProtectionFlags ReadWrite =
  296. static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
  297. sys::Memory::MF_WRITE);
  298. std::error_code EC;
  299. Slab = sys::Memory::allocateMappedMemory(SegsSizes->total(), nullptr,
  300. ReadWrite, EC);
  301. if (EC) {
  302. OnAllocated(errorCodeToError(EC));
  303. return;
  304. }
  305. // Zero-fill the whole slab up-front.
  306. memset(Slab.base(), 0, Slab.allocatedSize());
  307. StandardSegsMem = {Slab.base(),
  308. static_cast<size_t>(SegsSizes->StandardSegs)};
  309. FinalizeSegsMem = {(void *)((char *)Slab.base() + SegsSizes->StandardSegs),
  310. static_cast<size_t>(SegsSizes->FinalizeSegs)};
  311. }
  312. auto NextStandardSegAddr = orc::ExecutorAddr::fromPtr(StandardSegsMem.base());
  313. auto NextFinalizeSegAddr = orc::ExecutorAddr::fromPtr(FinalizeSegsMem.base());
  314. LLVM_DEBUG({
  315. dbgs() << "InProcessMemoryManager allocated:\n";
  316. if (SegsSizes->StandardSegs)
  317. dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextStandardSegAddr,
  318. NextStandardSegAddr + StandardSegsMem.allocatedSize())
  319. << " to stardard segs\n";
  320. else
  321. dbgs() << " no standard segs\n";
  322. if (SegsSizes->FinalizeSegs)
  323. dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr,
  324. NextFinalizeSegAddr + FinalizeSegsMem.allocatedSize())
  325. << " to finalize segs\n";
  326. else
  327. dbgs() << " no finalize segs\n";
  328. });
  329. // Build ProtMap, assign addresses.
  330. for (auto &KV : BL.segments()) {
  331. auto &AG = KV.first;
  332. auto &Seg = KV.second;
  333. auto &SegAddr =
  334. (AG.getMemDeallocPolicy() == orc::MemDeallocPolicy::Standard)
  335. ? NextStandardSegAddr
  336. : NextFinalizeSegAddr;
  337. Seg.WorkingMem = SegAddr.toPtr<char *>();
  338. Seg.Addr = SegAddr;
  339. SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
  340. }
  341. if (auto Err = BL.apply()) {
  342. OnAllocated(std::move(Err));
  343. return;
  344. }
  345. OnAllocated(std::make_unique<IPInFlightAlloc>(*this, G, std::move(BL),
  346. std::move(StandardSegsMem),
  347. std::move(FinalizeSegsMem)));
  348. }
  349. void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs,
  350. OnDeallocatedFunction OnDeallocated) {
  351. std::vector<sys::MemoryBlock> StandardSegmentsList;
  352. std::vector<std::vector<orc::shared::WrapperFunctionCall>> DeallocActionsList;
  353. {
  354. std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
  355. for (auto &Alloc : Allocs) {
  356. auto *FA = Alloc.release().toPtr<FinalizedAllocInfo *>();
  357. StandardSegmentsList.push_back(std::move(FA->StandardSegments));
  358. if (!FA->DeallocActions.empty())
  359. DeallocActionsList.push_back(std::move(FA->DeallocActions));
  360. FA->~FinalizedAllocInfo();
  361. FinalizedAllocInfos.Deallocate(FA);
  362. }
  363. }
  364. Error DeallocErr = Error::success();
  365. while (!DeallocActionsList.empty()) {
  366. auto &DeallocActions = DeallocActionsList.back();
  367. auto &StandardSegments = StandardSegmentsList.back();
  368. /// Run any deallocate calls.
  369. while (!DeallocActions.empty()) {
  370. if (auto Err = DeallocActions.back().runWithSPSRetErrorMerged())
  371. DeallocErr = joinErrors(std::move(DeallocErr), std::move(Err));
  372. DeallocActions.pop_back();
  373. }
  374. /// Release the standard segments slab.
  375. if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
  376. DeallocErr = joinErrors(std::move(DeallocErr), errorCodeToError(EC));
  377. DeallocActionsList.pop_back();
  378. StandardSegmentsList.pop_back();
  379. }
  380. OnDeallocated(std::move(DeallocErr));
  381. }
  382. JITLinkMemoryManager::FinalizedAlloc
  383. InProcessMemoryManager::createFinalizedAlloc(
  384. sys::MemoryBlock StandardSegments,
  385. std::vector<orc::shared::WrapperFunctionCall> DeallocActions) {
  386. std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
  387. auto *FA = FinalizedAllocInfos.Allocate<FinalizedAllocInfo>();
  388. new (FA) FinalizedAllocInfo(
  389. {std::move(StandardSegments), std::move(DeallocActions)});
  390. return FinalizedAlloc(orc::ExecutorAddr::fromPtr(FA));
  391. }
  392. } // end namespace jitlink
  393. } // end namespace llvm