MappedBlockStream.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. //===- MappedBlockStream.cpp - Reads stream data from an MSF file ---------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #include "llvm/DebugInfo/MSF/MappedBlockStream.h"
  9. #include "llvm/ADT/ArrayRef.h"
  10. #include "llvm/DebugInfo/MSF/MSFCommon.h"
  11. #include "llvm/Support/BinaryStreamWriter.h"
  12. #include "llvm/Support/Endian.h"
  13. #include "llvm/Support/Error.h"
  14. #include "llvm/Support/MathExtras.h"
  15. #include <algorithm>
  16. #include <cassert>
  17. #include <cstdint>
  18. #include <cstring>
  19. #include <utility>
  20. #include <vector>
  21. using namespace llvm;
  22. using namespace llvm::msf;
  23. namespace {
  24. template <typename Base> class MappedBlockStreamImpl : public Base {
  25. public:
  26. template <typename... Args>
  27. MappedBlockStreamImpl(Args &&... Params)
  28. : Base(std::forward<Args>(Params)...) {}
  29. };
  30. } // end anonymous namespace
  31. using Interval = std::pair<uint64_t, uint64_t>;
  32. static Interval intersect(const Interval &I1, const Interval &I2) {
  33. return std::make_pair(std::max(I1.first, I2.first),
  34. std::min(I1.second, I2.second));
  35. }
  36. MappedBlockStream::MappedBlockStream(uint32_t BlockSize,
  37. const MSFStreamLayout &Layout,
  38. BinaryStreamRef MsfData,
  39. BumpPtrAllocator &Allocator)
  40. : BlockSize(BlockSize), StreamLayout(Layout), MsfData(MsfData),
  41. Allocator(Allocator) {}
  42. std::unique_ptr<MappedBlockStream> MappedBlockStream::createStream(
  43. uint32_t BlockSize, const MSFStreamLayout &Layout, BinaryStreamRef MsfData,
  44. BumpPtrAllocator &Allocator) {
  45. return std::make_unique<MappedBlockStreamImpl<MappedBlockStream>>(
  46. BlockSize, Layout, MsfData, Allocator);
  47. }
  48. std::unique_ptr<MappedBlockStream> MappedBlockStream::createIndexedStream(
  49. const MSFLayout &Layout, BinaryStreamRef MsfData, uint32_t StreamIndex,
  50. BumpPtrAllocator &Allocator) {
  51. assert(StreamIndex < Layout.StreamMap.size() && "Invalid stream index");
  52. MSFStreamLayout SL;
  53. SL.Blocks = Layout.StreamMap[StreamIndex];
  54. SL.Length = Layout.StreamSizes[StreamIndex];
  55. return std::make_unique<MappedBlockStreamImpl<MappedBlockStream>>(
  56. Layout.SB->BlockSize, SL, MsfData, Allocator);
  57. }
  58. std::unique_ptr<MappedBlockStream>
  59. MappedBlockStream::createDirectoryStream(const MSFLayout &Layout,
  60. BinaryStreamRef MsfData,
  61. BumpPtrAllocator &Allocator) {
  62. MSFStreamLayout SL;
  63. SL.Blocks = Layout.DirectoryBlocks;
  64. SL.Length = Layout.SB->NumDirectoryBytes;
  65. return createStream(Layout.SB->BlockSize, SL, MsfData, Allocator);
  66. }
  67. std::unique_ptr<MappedBlockStream>
  68. MappedBlockStream::createFpmStream(const MSFLayout &Layout,
  69. BinaryStreamRef MsfData,
  70. BumpPtrAllocator &Allocator) {
  71. MSFStreamLayout SL(getFpmStreamLayout(Layout));
  72. return createStream(Layout.SB->BlockSize, SL, MsfData, Allocator);
  73. }
  74. Error MappedBlockStream::readBytes(uint64_t Offset, uint64_t Size,
  75. ArrayRef<uint8_t> &Buffer) {
  76. // Make sure we aren't trying to read beyond the end of the stream.
  77. if (auto EC = checkOffsetForRead(Offset, Size))
  78. return EC;
  79. if (tryReadContiguously(Offset, Size, Buffer))
  80. return Error::success();
  81. auto CacheIter = CacheMap.find(Offset);
  82. if (CacheIter != CacheMap.end()) {
  83. // Try to find an alloc that was large enough for this request.
  84. for (auto &Entry : CacheIter->second) {
  85. if (Entry.size() >= Size) {
  86. Buffer = Entry.slice(0, Size);
  87. return Error::success();
  88. }
  89. }
  90. }
  91. // We couldn't find a buffer that started at the correct offset (the most
  92. // common scenario). Try to see if there is a buffer that starts at some
  93. // other offset but overlaps the desired range.
  94. for (auto &CacheItem : CacheMap) {
  95. Interval RequestExtent = std::make_pair(Offset, Offset + Size);
  96. // We already checked this one on the fast path above.
  97. if (CacheItem.first == Offset)
  98. continue;
  99. // If the initial extent of the cached item is beyond the ending extent
  100. // of the request, there is no overlap.
  101. if (CacheItem.first >= Offset + Size)
  102. continue;
  103. // We really only have to check the last item in the list, since we append
  104. // in order of increasing length.
  105. if (CacheItem.second.empty())
  106. continue;
  107. auto CachedAlloc = CacheItem.second.back();
  108. // If the initial extent of the request is beyond the ending extent of
  109. // the cached item, there is no overlap.
  110. Interval CachedExtent =
  111. std::make_pair(CacheItem.first, CacheItem.first + CachedAlloc.size());
  112. if (RequestExtent.first >= CachedExtent.first + CachedExtent.second)
  113. continue;
  114. Interval Intersection = intersect(CachedExtent, RequestExtent);
  115. // Only use this if the entire request extent is contained in the cached
  116. // extent.
  117. if (Intersection != RequestExtent)
  118. continue;
  119. uint64_t CacheRangeOffset =
  120. AbsoluteDifference(CachedExtent.first, Intersection.first);
  121. Buffer = CachedAlloc.slice(CacheRangeOffset, Size);
  122. return Error::success();
  123. }
  124. // Otherwise allocate a large enough buffer in the pool, memcpy the data
  125. // into it, and return an ArrayRef to that. Do not touch existing pool
  126. // allocations, as existing clients may be holding a pointer which must
  127. // not be invalidated.
  128. uint8_t *WriteBuffer = static_cast<uint8_t *>(Allocator.Allocate(Size, 8));
  129. if (auto EC = readBytes(Offset, MutableArrayRef<uint8_t>(WriteBuffer, Size)))
  130. return EC;
  131. if (CacheIter != CacheMap.end()) {
  132. CacheIter->second.emplace_back(WriteBuffer, Size);
  133. } else {
  134. std::vector<CacheEntry> List;
  135. List.emplace_back(WriteBuffer, Size);
  136. CacheMap.insert(std::make_pair(Offset, List));
  137. }
  138. Buffer = ArrayRef<uint8_t>(WriteBuffer, Size);
  139. return Error::success();
  140. }
  141. Error MappedBlockStream::readLongestContiguousChunk(uint64_t Offset,
  142. ArrayRef<uint8_t> &Buffer) {
  143. // Make sure we aren't trying to read beyond the end of the stream.
  144. if (auto EC = checkOffsetForRead(Offset, 1))
  145. return EC;
  146. uint64_t First = Offset / BlockSize;
  147. uint64_t Last = First;
  148. while (Last < getNumBlocks() - 1) {
  149. if (StreamLayout.Blocks[Last] != StreamLayout.Blocks[Last + 1] - 1)
  150. break;
  151. ++Last;
  152. }
  153. uint64_t OffsetInFirstBlock = Offset % BlockSize;
  154. uint64_t BytesFromFirstBlock = BlockSize - OffsetInFirstBlock;
  155. uint64_t BlockSpan = Last - First + 1;
  156. uint64_t ByteSpan = BytesFromFirstBlock + (BlockSpan - 1) * BlockSize;
  157. ArrayRef<uint8_t> BlockData;
  158. uint64_t MsfOffset = blockToOffset(StreamLayout.Blocks[First], BlockSize);
  159. if (auto EC = MsfData.readBytes(MsfOffset, BlockSize, BlockData))
  160. return EC;
  161. BlockData = BlockData.drop_front(OffsetInFirstBlock);
  162. Buffer = ArrayRef<uint8_t>(BlockData.data(), ByteSpan);
  163. return Error::success();
  164. }
  165. uint64_t MappedBlockStream::getLength() { return StreamLayout.Length; }
  166. bool MappedBlockStream::tryReadContiguously(uint64_t Offset, uint64_t Size,
  167. ArrayRef<uint8_t> &Buffer) {
  168. if (Size == 0) {
  169. Buffer = ArrayRef<uint8_t>();
  170. return true;
  171. }
  172. // Attempt to fulfill the request with a reference directly into the stream.
  173. // This can work even if the request crosses a block boundary, provided that
  174. // all subsequent blocks are contiguous. For example, a 10k read with a 4k
  175. // block size can be filled with a reference if, from the starting offset,
  176. // 3 blocks in a row are contiguous.
  177. uint64_t BlockNum = Offset / BlockSize;
  178. uint64_t OffsetInBlock = Offset % BlockSize;
  179. uint64_t BytesFromFirstBlock = std::min(Size, BlockSize - OffsetInBlock);
  180. uint64_t NumAdditionalBlocks =
  181. alignTo(Size - BytesFromFirstBlock, BlockSize) / BlockSize;
  182. uint64_t RequiredContiguousBlocks = NumAdditionalBlocks + 1;
  183. uint64_t E = StreamLayout.Blocks[BlockNum];
  184. for (uint64_t I = 0; I < RequiredContiguousBlocks; ++I, ++E) {
  185. if (StreamLayout.Blocks[I + BlockNum] != E)
  186. return false;
  187. }
  188. // Read out the entire block where the requested offset starts. Then drop
  189. // bytes from the beginning so that the actual starting byte lines up with
  190. // the requested starting byte. Then, since we know this is a contiguous
  191. // cross-block span, explicitly resize the ArrayRef to cover the entire
  192. // request length.
  193. ArrayRef<uint8_t> BlockData;
  194. uint64_t FirstBlockAddr = StreamLayout.Blocks[BlockNum];
  195. uint64_t MsfOffset = blockToOffset(FirstBlockAddr, BlockSize);
  196. if (auto EC = MsfData.readBytes(MsfOffset, BlockSize, BlockData)) {
  197. consumeError(std::move(EC));
  198. return false;
  199. }
  200. BlockData = BlockData.drop_front(OffsetInBlock);
  201. Buffer = ArrayRef<uint8_t>(BlockData.data(), Size);
  202. return true;
  203. }
  204. Error MappedBlockStream::readBytes(uint64_t Offset,
  205. MutableArrayRef<uint8_t> Buffer) {
  206. uint64_t BlockNum = Offset / BlockSize;
  207. uint64_t OffsetInBlock = Offset % BlockSize;
  208. // Make sure we aren't trying to read beyond the end of the stream.
  209. if (auto EC = checkOffsetForRead(Offset, Buffer.size()))
  210. return EC;
  211. uint64_t BytesLeft = Buffer.size();
  212. uint64_t BytesWritten = 0;
  213. uint8_t *WriteBuffer = Buffer.data();
  214. while (BytesLeft > 0) {
  215. uint64_t StreamBlockAddr = StreamLayout.Blocks[BlockNum];
  216. ArrayRef<uint8_t> BlockData;
  217. uint64_t Offset = blockToOffset(StreamBlockAddr, BlockSize);
  218. if (auto EC = MsfData.readBytes(Offset, BlockSize, BlockData))
  219. return EC;
  220. const uint8_t *ChunkStart = BlockData.data() + OffsetInBlock;
  221. uint64_t BytesInChunk = std::min(BytesLeft, BlockSize - OffsetInBlock);
  222. ::memcpy(WriteBuffer + BytesWritten, ChunkStart, BytesInChunk);
  223. BytesWritten += BytesInChunk;
  224. BytesLeft -= BytesInChunk;
  225. ++BlockNum;
  226. OffsetInBlock = 0;
  227. }
  228. return Error::success();
  229. }
  230. void MappedBlockStream::invalidateCache() { CacheMap.shrink_and_clear(); }
  231. void MappedBlockStream::fixCacheAfterWrite(uint64_t Offset,
  232. ArrayRef<uint8_t> Data) const {
  233. // If this write overlapped a read which previously came from the pool,
  234. // someone may still be holding a pointer to that alloc which is now invalid.
  235. // Compute the overlapping range and update the cache entry, so any
  236. // outstanding buffers are automatically updated.
  237. for (const auto &MapEntry : CacheMap) {
  238. // If the end of the written extent precedes the beginning of the cached
  239. // extent, ignore this map entry.
  240. if (Offset + Data.size() < MapEntry.first)
  241. continue;
  242. for (const auto &Alloc : MapEntry.second) {
  243. // If the end of the cached extent precedes the beginning of the written
  244. // extent, ignore this alloc.
  245. if (MapEntry.first + Alloc.size() < Offset)
  246. continue;
  247. // If we get here, they are guaranteed to overlap.
  248. Interval WriteInterval = std::make_pair(Offset, Offset + Data.size());
  249. Interval CachedInterval =
  250. std::make_pair(MapEntry.first, MapEntry.first + Alloc.size());
  251. // If they overlap, we need to write the new data into the overlapping
  252. // range.
  253. auto Intersection = intersect(WriteInterval, CachedInterval);
  254. assert(Intersection.first <= Intersection.second);
  255. uint64_t Length = Intersection.second - Intersection.first;
  256. uint64_t SrcOffset =
  257. AbsoluteDifference(WriteInterval.first, Intersection.first);
  258. uint64_t DestOffset =
  259. AbsoluteDifference(CachedInterval.first, Intersection.first);
  260. ::memcpy(Alloc.data() + DestOffset, Data.data() + SrcOffset, Length);
  261. }
  262. }
  263. }
  264. WritableMappedBlockStream::WritableMappedBlockStream(
  265. uint32_t BlockSize, const MSFStreamLayout &Layout,
  266. WritableBinaryStreamRef MsfData, BumpPtrAllocator &Allocator)
  267. : ReadInterface(BlockSize, Layout, MsfData, Allocator),
  268. WriteInterface(MsfData) {}
  269. std::unique_ptr<WritableMappedBlockStream>
  270. WritableMappedBlockStream::createStream(uint32_t BlockSize,
  271. const MSFStreamLayout &Layout,
  272. WritableBinaryStreamRef MsfData,
  273. BumpPtrAllocator &Allocator) {
  274. return std::make_unique<MappedBlockStreamImpl<WritableMappedBlockStream>>(
  275. BlockSize, Layout, MsfData, Allocator);
  276. }
  277. std::unique_ptr<WritableMappedBlockStream>
  278. WritableMappedBlockStream::createIndexedStream(const MSFLayout &Layout,
  279. WritableBinaryStreamRef MsfData,
  280. uint32_t StreamIndex,
  281. BumpPtrAllocator &Allocator) {
  282. assert(StreamIndex < Layout.StreamMap.size() && "Invalid stream index");
  283. MSFStreamLayout SL;
  284. SL.Blocks = Layout.StreamMap[StreamIndex];
  285. SL.Length = Layout.StreamSizes[StreamIndex];
  286. return createStream(Layout.SB->BlockSize, SL, MsfData, Allocator);
  287. }
  288. std::unique_ptr<WritableMappedBlockStream>
  289. WritableMappedBlockStream::createDirectoryStream(
  290. const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
  291. BumpPtrAllocator &Allocator) {
  292. MSFStreamLayout SL;
  293. SL.Blocks = Layout.DirectoryBlocks;
  294. SL.Length = Layout.SB->NumDirectoryBytes;
  295. return createStream(Layout.SB->BlockSize, SL, MsfData, Allocator);
  296. }
  297. std::unique_ptr<WritableMappedBlockStream>
  298. WritableMappedBlockStream::createFpmStream(const MSFLayout &Layout,
  299. WritableBinaryStreamRef MsfData,
  300. BumpPtrAllocator &Allocator,
  301. bool AltFpm) {
  302. // We only want to give the user a stream containing the bytes of the FPM that
  303. // are actually valid, but we want to initialize all of the bytes, even those
  304. // that come from reserved FPM blocks where the entire block is unused. To do
  305. // this, we first create the full layout, which gives us a stream with all
  306. // bytes and all blocks, and initialize everything to 0xFF (all blocks in the
  307. // file are unused). Then we create the minimal layout (which contains only a
  308. // subset of the bytes previously initialized), and return that to the user.
  309. MSFStreamLayout MinLayout(getFpmStreamLayout(Layout, false, AltFpm));
  310. MSFStreamLayout FullLayout(getFpmStreamLayout(Layout, true, AltFpm));
  311. auto Result =
  312. createStream(Layout.SB->BlockSize, FullLayout, MsfData, Allocator);
  313. if (!Result)
  314. return Result;
  315. std::vector<uint8_t> InitData(Layout.SB->BlockSize, 0xFF);
  316. BinaryStreamWriter Initializer(*Result);
  317. while (Initializer.bytesRemaining() > 0)
  318. cantFail(Initializer.writeBytes(InitData));
  319. return createStream(Layout.SB->BlockSize, MinLayout, MsfData, Allocator);
  320. }
  321. Error WritableMappedBlockStream::readBytes(uint64_t Offset, uint64_t Size,
  322. ArrayRef<uint8_t> &Buffer) {
  323. return ReadInterface.readBytes(Offset, Size, Buffer);
  324. }
  325. Error WritableMappedBlockStream::readLongestContiguousChunk(
  326. uint64_t Offset, ArrayRef<uint8_t> &Buffer) {
  327. return ReadInterface.readLongestContiguousChunk(Offset, Buffer);
  328. }
  329. uint64_t WritableMappedBlockStream::getLength() {
  330. return ReadInterface.getLength();
  331. }
  332. Error WritableMappedBlockStream::writeBytes(uint64_t Offset,
  333. ArrayRef<uint8_t> Buffer) {
  334. // Make sure we aren't trying to write beyond the end of the stream.
  335. if (auto EC = checkOffsetForWrite(Offset, Buffer.size()))
  336. return EC;
  337. uint64_t BlockNum = Offset / getBlockSize();
  338. uint64_t OffsetInBlock = Offset % getBlockSize();
  339. uint64_t BytesLeft = Buffer.size();
  340. uint64_t BytesWritten = 0;
  341. while (BytesLeft > 0) {
  342. uint64_t StreamBlockAddr = getStreamLayout().Blocks[BlockNum];
  343. uint64_t BytesToWriteInChunk =
  344. std::min(BytesLeft, getBlockSize() - OffsetInBlock);
  345. const uint8_t *Chunk = Buffer.data() + BytesWritten;
  346. ArrayRef<uint8_t> ChunkData(Chunk, BytesToWriteInChunk);
  347. uint64_t MsfOffset = blockToOffset(StreamBlockAddr, getBlockSize());
  348. MsfOffset += OffsetInBlock;
  349. if (auto EC = WriteInterface.writeBytes(MsfOffset, ChunkData))
  350. return EC;
  351. BytesLeft -= BytesToWriteInChunk;
  352. BytesWritten += BytesToWriteInChunk;
  353. ++BlockNum;
  354. OffsetInBlock = 0;
  355. }
  356. ReadInterface.fixCacheAfterWrite(Offset, Buffer);
  357. return Error::success();
  358. }
  359. Error WritableMappedBlockStream::commit() { return WriteInterface.commit(); }