chunk_cache.go 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. package chunk_cache
  2. import (
  3. "sync"
  4. "github.com/chrislusf/seaweedfs/weed/glog"
  5. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  6. )
  7. const (
  8. memCacheSizeLimit = 1024 * 1024
  9. onDiskCacheSizeLimit0 = memCacheSizeLimit
  10. onDiskCacheSizeLimit1 = 4 * memCacheSizeLimit
  11. )
  12. // a global cache for recently accessed file chunks
  13. type ChunkCache struct {
  14. memCache *ChunkCacheInMemory
  15. diskCaches []*OnDiskCacheLayer
  16. sync.RWMutex
  17. }
  18. func NewChunkCache(maxEntries int64, dir string, diskSizeMB int64) *ChunkCache {
  19. c := &ChunkCache{
  20. memCache: NewChunkCacheInMemory(maxEntries),
  21. }
  22. c.diskCaches = make([]*OnDiskCacheLayer, 3)
  23. c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_1", diskSizeMB/4, 4)
  24. c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_4", diskSizeMB/4, 4)
  25. c.diskCaches[2] = NewOnDiskCacheLayer(dir, "cache", diskSizeMB/2, 4)
  26. return c
  27. }
  28. func (c *ChunkCache) GetChunk(fileId string, chunkSize uint64) (data []byte) {
  29. if c == nil {
  30. return
  31. }
  32. c.RLock()
  33. defer c.RUnlock()
  34. return c.doGetChunk(fileId, chunkSize)
  35. }
  36. func (c *ChunkCache) doGetChunk(fileId string, chunkSize uint64) (data []byte) {
  37. if chunkSize < memCacheSizeLimit {
  38. data = c.memCache.GetChunk(fileId)
  39. if len(data) >= int(chunkSize) {
  40. return data
  41. }
  42. }
  43. fid, err := needle.ParseFileIdFromString(fileId)
  44. if err != nil {
  45. glog.Errorf("failed to parse file id %s", fileId)
  46. return nil
  47. }
  48. if chunkSize < onDiskCacheSizeLimit0 {
  49. data = c.diskCaches[0].getChunk(fid.Key)
  50. if len(data) >= int(chunkSize) {
  51. return data
  52. }
  53. }
  54. if chunkSize < onDiskCacheSizeLimit1 {
  55. data = c.diskCaches[1].getChunk(fid.Key)
  56. if len(data) >= int(chunkSize) {
  57. return data
  58. }
  59. }
  60. {
  61. data = c.diskCaches[2].getChunk(fid.Key)
  62. if len(data) >= int(chunkSize) {
  63. return data
  64. }
  65. }
  66. return nil
  67. }
  68. func (c *ChunkCache) SetChunk(fileId string, data []byte) {
  69. if c == nil {
  70. return
  71. }
  72. c.Lock()
  73. defer c.Unlock()
  74. glog.V(4).Infof("SetChunk %s size %d\n", fileId, len(data))
  75. c.doSetChunk(fileId, data)
  76. }
  77. func (c *ChunkCache) doSetChunk(fileId string, data []byte) {
  78. if len(data) < memCacheSizeLimit {
  79. c.memCache.SetChunk(fileId, data)
  80. }
  81. fid, err := needle.ParseFileIdFromString(fileId)
  82. if err != nil {
  83. glog.Errorf("failed to parse file id %s", fileId)
  84. return
  85. }
  86. if len(data) < onDiskCacheSizeLimit0 {
  87. c.diskCaches[0].setChunk(fid.Key, data)
  88. } else if len(data) < onDiskCacheSizeLimit1 {
  89. c.diskCaches[1].setChunk(fid.Key, data)
  90. } else {
  91. c.diskCaches[2].setChunk(fid.Key, data)
  92. }
  93. }
  94. func (c *ChunkCache) Shutdown() {
  95. if c == nil {
  96. return
  97. }
  98. c.Lock()
  99. defer c.Unlock()
  100. for _, diskCache := range c.diskCaches {
  101. diskCache.shutdown()
  102. }
  103. }