reader_at.go 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. package filer2
  2. import (
  3. "context"
  4. "fmt"
  5. "io"
  6. "sync"
  7. "github.com/chrislusf/seaweedfs/weed/glog"
  8. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  9. "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
  10. "github.com/chrislusf/seaweedfs/weed/wdclient"
  11. )
  12. type ChunkReadAt struct {
  13. masterClient *wdclient.MasterClient
  14. chunkViews []*ChunkView
  15. buffer []byte
  16. bufferOffset int64
  17. lookupFileId func(fileId string) (targetUrl string, err error)
  18. readerLock sync.Mutex
  19. chunkCache *chunk_cache.ChunkCache
  20. }
  21. // var _ = io.ReaderAt(&ChunkReadAt{})
  22. type LookupFileIdFunctionType func(fileId string) (targetUrl string, err error)
  23. func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
  24. return func(fileId string) (targetUrl string, err error) {
  25. err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  26. vid := VolumeId(fileId)
  27. resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
  28. VolumeIds: []string{vid},
  29. })
  30. if err != nil {
  31. return err
  32. }
  33. locations := resp.LocationsMap[vid]
  34. if locations == nil || len(locations.Locations) == 0 {
  35. glog.V(0).Infof("failed to locate %s", fileId)
  36. return fmt.Errorf("failed to locate %s", fileId)
  37. }
  38. volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url)
  39. targetUrl = fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId)
  40. return nil
  41. })
  42. return
  43. }
  44. }
  45. func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache) *ChunkReadAt {
  46. return &ChunkReadAt{
  47. chunkViews: chunkViews,
  48. lookupFileId: LookupFn(filerClient),
  49. bufferOffset: -1,
  50. chunkCache: chunkCache,
  51. }
  52. }
  53. func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
  54. c.readerLock.Lock()
  55. defer c.readerLock.Unlock()
  56. for n < len(p) && err == nil {
  57. readCount, readErr := c.doReadAt(p[n:], offset+int64(n))
  58. n += readCount
  59. err = readErr
  60. if readCount == 0 {
  61. return n, io.EOF
  62. }
  63. }
  64. return
  65. }
  66. func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
  67. var found bool
  68. for _, chunk := range c.chunkViews {
  69. if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
  70. found = true
  71. if c.bufferOffset != chunk.LogicOffset {
  72. c.buffer, err = c.fetchChunkData(chunk)
  73. if err != nil {
  74. glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
  75. }
  76. c.bufferOffset = chunk.LogicOffset
  77. }
  78. break
  79. }
  80. }
  81. if !found {
  82. return 0, io.EOF
  83. }
  84. if err == nil {
  85. n = copy(p, c.buffer[offset-c.bufferOffset:])
  86. }
  87. // fmt.Printf("> doReadAt [%d,%d), buffer:[%d,%d)\n", offset, offset+int64(n), c.bufferOffset, c.bufferOffset+int64(len(c.buffer)))
  88. return
  89. }
  90. func (c *ChunkReadAt) fetchChunkData(chunkView *ChunkView) (data []byte, err error) {
  91. glog.V(4).Infof("fetchChunkData %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
  92. hasDataInCache := false
  93. chunkData := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize)
  94. if chunkData != nil {
  95. glog.V(4).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
  96. hasDataInCache = true
  97. } else {
  98. chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
  99. if err != nil {
  100. return nil, err
  101. }
  102. }
  103. if int64(len(chunkData)) < chunkView.Offset+int64(chunkView.Size) {
  104. glog.Errorf("unexpected larger cached:%v chunk %s [%d,%d) than %d", hasDataInCache, chunkView.FileId, chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData))
  105. return nil, fmt.Errorf("unexpected larger cached:%v chunk %s [%d,%d) than %d", hasDataInCache, chunkView.FileId, chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData))
  106. }
  107. data = chunkData[chunkView.Offset : chunkView.Offset+int64(chunkView.Size)]
  108. if !hasDataInCache {
  109. c.chunkCache.SetChunk(chunkView.FileId, chunkData)
  110. }
  111. return data, nil
  112. }
  113. func (c *ChunkReadAt) doFetchFullChunkData(fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
  114. return fetchChunk(c.lookupFileId, fileId, cipherKey, isGzipped)
  115. }