chunk_cache_on_disk.go 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. package chunk_cache
  2. import (
  3. "fmt"
  4. "os"
  5. "time"
  6. "github.com/syndtr/goleveldb/leveldb/opt"
  7. "github.com/seaweedfs/seaweedfs/weed/glog"
  8. "github.com/seaweedfs/seaweedfs/weed/storage"
  9. "github.com/seaweedfs/seaweedfs/weed/storage/backend"
  10. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  11. "github.com/seaweedfs/seaweedfs/weed/util"
  12. )
  13. // This implements an on disk cache
  14. // The entries are an FIFO with a size limit
  15. type ChunkCacheVolume struct {
  16. DataBackend backend.BackendStorageFile
  17. nm storage.NeedleMapper
  18. fileName string
  19. smallBuffer []byte
  20. sizeLimit int64
  21. lastModTime time.Time
  22. fileSize int64
  23. }
  24. func LoadOrCreateChunkCacheVolume(fileName string, preallocate int64) (*ChunkCacheVolume, error) {
  25. v := &ChunkCacheVolume{
  26. smallBuffer: make([]byte, types.NeedlePaddingSize),
  27. fileName: fileName,
  28. sizeLimit: preallocate,
  29. }
  30. var err error
  31. if exists, canRead, canWrite, modTime, fileSize := util.CheckFile(v.fileName + ".dat"); exists {
  32. if !canRead {
  33. return nil, fmt.Errorf("cannot read cache file %s.dat", v.fileName)
  34. }
  35. if !canWrite {
  36. return nil, fmt.Errorf("cannot write cache file %s.dat", v.fileName)
  37. }
  38. if dataFile, err := os.OpenFile(v.fileName+".dat", os.O_RDWR|os.O_CREATE, 0644); err != nil {
  39. return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
  40. } else {
  41. v.DataBackend = backend.NewDiskFile(dataFile)
  42. v.lastModTime = modTime
  43. v.fileSize = fileSize
  44. }
  45. } else {
  46. if v.DataBackend, err = backend.CreateVolumeFile(v.fileName+".dat", preallocate, 0); err != nil {
  47. return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
  48. }
  49. v.lastModTime = time.Now()
  50. }
  51. var indexFile *os.File
  52. if indexFile, err = os.OpenFile(v.fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil {
  53. return nil, fmt.Errorf("cannot write cache index %s.idx: %v", v.fileName, err)
  54. }
  55. glog.V(1).Infoln("loading leveldb", v.fileName+".ldb")
  56. opts := &opt.Options{
  57. BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
  58. WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
  59. CompactionTableSizeMultiplier: 10, // default value is 1
  60. }
  61. if v.nm, err = storage.NewLevelDbNeedleMap(v.fileName+".ldb", indexFile, opts, 0); err != nil {
  62. return nil, fmt.Errorf("loading leveldb %s error: %v", v.fileName+".ldb", err)
  63. }
  64. return v, nil
  65. }
  66. func (v *ChunkCacheVolume) Shutdown() {
  67. if v.DataBackend != nil {
  68. v.DataBackend.Close()
  69. v.DataBackend = nil
  70. }
  71. if v.nm != nil {
  72. v.nm.Close()
  73. v.nm = nil
  74. }
  75. }
  76. func (v *ChunkCacheVolume) doReset() {
  77. v.Shutdown()
  78. os.Truncate(v.fileName+".dat", 0)
  79. os.Truncate(v.fileName+".idx", 0)
  80. glog.V(4).Infof("cache removeAll %s ...", v.fileName+".ldb")
  81. os.RemoveAll(v.fileName + ".ldb")
  82. glog.V(4).Infof("cache removed %s", v.fileName+".ldb")
  83. }
  84. func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) {
  85. v.doReset()
  86. return LoadOrCreateChunkCacheVolume(v.fileName, v.sizeLimit)
  87. }
  88. func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) {
  89. nv, ok := v.nm.Get(key)
  90. if !ok {
  91. return nil, storage.ErrorNotFound
  92. }
  93. data := make([]byte, nv.Size)
  94. if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()); readErr != nil {
  95. return nil, fmt.Errorf("read %s.dat [%d,%d): %v",
  96. v.fileName, nv.Offset.ToActualOffset(), nv.Offset.ToActualOffset()+int64(nv.Size), readErr)
  97. } else {
  98. if readSize != int(nv.Size) {
  99. return nil, fmt.Errorf("read %d, expected %d", readSize, nv.Size)
  100. }
  101. }
  102. return data, nil
  103. }
  104. func (v *ChunkCacheVolume) getNeedleSlice(key types.NeedleId, offset, length uint64) ([]byte, error) {
  105. nv, ok := v.nm.Get(key)
  106. if !ok {
  107. return nil, storage.ErrorNotFound
  108. }
  109. wanted := min(int(length), int(nv.Size)-int(offset))
  110. if wanted < 0 {
  111. // should never happen, but better than panicking
  112. return nil, ErrorOutOfBounds
  113. }
  114. data := make([]byte, wanted)
  115. if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()+int64(offset)); readErr != nil {
  116. return nil, fmt.Errorf("read %s.dat [%d,%d): %v",
  117. v.fileName, nv.Offset.ToActualOffset()+int64(offset), int(nv.Offset.ToActualOffset())+int(offset)+wanted, readErr)
  118. } else {
  119. if readSize != wanted {
  120. return nil, fmt.Errorf("read %d, expected %d", readSize, wanted)
  121. }
  122. }
  123. return data, nil
  124. }
  125. func (v *ChunkCacheVolume) readNeedleSliceAt(data []byte, key types.NeedleId, offset uint64) (n int, err error) {
  126. nv, ok := v.nm.Get(key)
  127. if !ok {
  128. return 0, storage.ErrorNotFound
  129. }
  130. wanted := min(len(data), int(nv.Size)-int(offset))
  131. if wanted < 0 {
  132. // should never happen, but better than panicking
  133. return 0, ErrorOutOfBounds
  134. }
  135. if n, err = v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()+int64(offset)); err != nil {
  136. return n, fmt.Errorf("read %s.dat [%d,%d): %v",
  137. v.fileName, nv.Offset.ToActualOffset()+int64(offset), int(nv.Offset.ToActualOffset())+int(offset)+wanted, err)
  138. } else {
  139. if n != wanted {
  140. return n, fmt.Errorf("read %d, expected %d", n, wanted)
  141. }
  142. }
  143. return n, nil
  144. }
  145. func (v *ChunkCacheVolume) WriteNeedle(key types.NeedleId, data []byte) error {
  146. offset := v.fileSize
  147. written, err := v.DataBackend.WriteAt(data, offset)
  148. if err != nil {
  149. return err
  150. } else if written != len(data) {
  151. return fmt.Errorf("partial written %d, expected %d", written, len(data))
  152. }
  153. v.fileSize += int64(written)
  154. extraSize := written % types.NeedlePaddingSize
  155. if extraSize != 0 {
  156. v.DataBackend.WriteAt(v.smallBuffer[:types.NeedlePaddingSize-extraSize], offset+int64(written))
  157. v.fileSize += int64(types.NeedlePaddingSize - extraSize)
  158. }
  159. if err := v.nm.Put(key, types.ToOffset(offset), types.Size(len(data))); err != nil {
  160. return err
  161. }
  162. return nil
  163. }