123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170 |
- package chunk_cache
- import (
- "fmt"
- "os"
- "time"
- "github.com/syndtr/goleveldb/leveldb/opt"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage"
- "github.com/chrislusf/seaweedfs/weed/storage/backend"
- "github.com/chrislusf/seaweedfs/weed/storage/types"
- "github.com/chrislusf/seaweedfs/weed/util"
- )
- // This implements an on disk cache
- // The entries are an FIFO with a size limit
- type ChunkCacheVolume struct {
- DataBackend backend.BackendStorageFile
- nm storage.NeedleMapper
- fileName string
- smallBuffer []byte
- sizeLimit int64
- lastModTime time.Time
- fileSize int64
- }
- func LoadOrCreateChunkCacheVolume(fileName string, preallocate int64) (*ChunkCacheVolume, error) {
- v := &ChunkCacheVolume{
- smallBuffer: make([]byte, types.NeedlePaddingSize),
- fileName: fileName,
- sizeLimit: preallocate,
- }
- var err error
- if exists, canRead, canWrite, modTime, fileSize := util.CheckFile(v.fileName + ".dat"); exists {
- if !canRead {
- return nil, fmt.Errorf("cannot read cache file %s.dat", v.fileName)
- }
- if !canWrite {
- return nil, fmt.Errorf("cannot write cache file %s.dat", v.fileName)
- }
- if dataFile, err := os.OpenFile(v.fileName+".dat", os.O_RDWR|os.O_CREATE, 0644); err != nil {
- return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
- } else {
- v.DataBackend = backend.NewDiskFile(dataFile)
- v.lastModTime = modTime
- v.fileSize = fileSize
- }
- } else {
- if v.DataBackend, err = backend.CreateVolumeFile(v.fileName+".dat", preallocate, 0); err != nil {
- return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
- }
- v.lastModTime = time.Now()
- }
- var indexFile *os.File
- if indexFile, err = os.OpenFile(v.fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil {
- return nil, fmt.Errorf("cannot write cache index %s.idx: %v", v.fileName, err)
- }
- glog.V(1).Infoln("loading leveldb", v.fileName+".ldb")
- opts := &opt.Options{
- BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
- WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
- CompactionTableSizeMultiplier: 10, // default value is 1
- }
- if v.nm, err = storage.NewLevelDbNeedleMap(v.fileName+".ldb", indexFile, opts); err != nil {
- return nil, fmt.Errorf("loading leveldb %s error: %v", v.fileName+".ldb", err)
- }
- return v, nil
- }
- func (v *ChunkCacheVolume) Shutdown() {
- if v.DataBackend != nil {
- v.DataBackend.Close()
- v.DataBackend = nil
- }
- if v.nm != nil {
- v.nm.Close()
- v.nm = nil
- }
- }
- func (v *ChunkCacheVolume) doReset() {
- v.Shutdown()
- os.Truncate(v.fileName+".dat", 0)
- os.Truncate(v.fileName+".idx", 0)
- glog.V(4).Infof("cache removeAll %s ...", v.fileName+".ldb")
- os.RemoveAll(v.fileName + ".ldb")
- glog.V(4).Infof("cache removed %s", v.fileName+".ldb")
- }
- func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) {
- v.doReset()
- return LoadOrCreateChunkCacheVolume(v.fileName, v.sizeLimit)
- }
- func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) {
- nv, ok := v.nm.Get(key)
- if !ok {
- return nil, storage.ErrorNotFound
- }
- data := make([]byte, nv.Size)
- if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()); readErr != nil {
- return nil, fmt.Errorf("read %s.dat [%d,%d): %v",
- v.fileName, nv.Offset.ToActualOffset(), nv.Offset.ToActualOffset()+int64(nv.Size), readErr)
- } else {
- if readSize != int(nv.Size) {
- return nil, fmt.Errorf("read %d, expected %d", readSize, nv.Size)
- }
- }
- return data, nil
- }
- func (v *ChunkCacheVolume) getNeedleSlice(key types.NeedleId, offset, length uint64) ([]byte, error) {
- nv, ok := v.nm.Get(key)
- if !ok {
- return nil, storage.ErrorNotFound
- }
- wanted := min(int(length), int(nv.Size)-int(offset))
- if wanted < 0 {
- // should never happen, but better than panicing
- return nil, ErrorOutOfBounds
- }
- data := make([]byte, wanted)
- if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()+int64(offset)); readErr != nil {
- return nil, fmt.Errorf("read %s.dat [%d,%d): %v",
- v.fileName, nv.Offset.ToActualOffset()+int64(offset), int(nv.Offset.ToActualOffset())+int(offset)+wanted, readErr)
- } else {
- if readSize != wanted {
- return nil, fmt.Errorf("read %d, expected %d", readSize, wanted)
- }
- }
- return data, nil
- }
- func (v *ChunkCacheVolume) WriteNeedle(key types.NeedleId, data []byte) error {
- offset := v.fileSize
- written, err := v.DataBackend.WriteAt(data, offset)
- if err != nil {
- return err
- } else if written != len(data) {
- return fmt.Errorf("partial written %d, expected %d", written, len(data))
- }
- v.fileSize += int64(written)
- extraSize := written % types.NeedlePaddingSize
- if extraSize != 0 {
- v.DataBackend.WriteAt(v.smallBuffer[:types.NeedlePaddingSize-extraSize], offset+int64(written))
- v.fileSize += int64(types.NeedlePaddingSize - extraSize)
- }
- if err := v.nm.Put(key, types.ToOffset(offset), types.Size(len(data))); err != nil {
- return err
- }
- return nil
- }
|