volume.go 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. package storage
  2. import (
  3. "fmt"
  4. "path"
  5. "strconv"
  6. "sync"
  7. "time"
  8. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  9. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  10. "github.com/seaweedfs/seaweedfs/weed/stats"
  11. "github.com/seaweedfs/seaweedfs/weed/storage/backend"
  12. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  13. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  14. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  15. "github.com/seaweedfs/seaweedfs/weed/glog"
  16. )
  17. type Volume struct {
  18. Id needle.VolumeId
  19. dir string
  20. dirIdx string
  21. Collection string
  22. DataBackend backend.BackendStorageFile
  23. nm NeedleMapper
  24. tmpNm TempNeedleMapper
  25. needleMapKind NeedleMapKind
  26. noWriteOrDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete
  27. noWriteCanDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete
  28. noWriteLock sync.RWMutex
  29. hasRemoteFile bool // if the volume has a remote file
  30. MemoryMapMaxSizeMb uint32
  31. super_block.SuperBlock
  32. dataFileAccessLock sync.RWMutex
  33. superBlockAccessLock sync.Mutex
  34. asyncRequestsChan chan *needle.AsyncRequest
  35. lastModifiedTsSeconds uint64 // unix time in seconds
  36. lastAppendAtNs uint64 // unix time in nanoseconds
  37. lastCompactIndexOffset uint64
  38. lastCompactRevision uint16
  39. isCompacting bool
  40. isCommitCompacting bool
  41. volumeInfo *volume_server_pb.VolumeInfo
  42. location *DiskLocation
  43. lastIoError error
  44. }
  45. func NewVolume(dirname string, dirIdx string, collection string, id needle.VolumeId, needleMapKind NeedleMapKind, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) (v *Volume, e error) {
  46. // if replicaPlacement is nil, the superblock will be loaded from disk
  47. v = &Volume{dir: dirname, dirIdx: dirIdx, Collection: collection, Id: id, MemoryMapMaxSizeMb: memoryMapMaxSizeMb,
  48. asyncRequestsChan: make(chan *needle.AsyncRequest, 128)}
  49. v.SuperBlock = super_block.SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl}
  50. v.needleMapKind = needleMapKind
  51. e = v.load(true, true, needleMapKind, preallocate)
  52. v.startWorker()
  53. return
  54. }
  55. func (v *Volume) String() string {
  56. v.noWriteLock.RLock()
  57. defer v.noWriteLock.RUnlock()
  58. return fmt.Sprintf("Id:%v dir:%s dirIdx:%s Collection:%s dataFile:%v nm:%v noWrite:%v canDelete:%v", v.Id, v.dir, v.dirIdx, v.Collection, v.DataBackend, v.nm, v.noWriteOrDelete || v.noWriteCanDelete, v.noWriteCanDelete)
  59. }
  60. func VolumeFileName(dir string, collection string, id int) (fileName string) {
  61. idString := strconv.Itoa(id)
  62. if collection == "" {
  63. fileName = path.Join(dir, idString)
  64. } else {
  65. fileName = path.Join(dir, collection+"_"+idString)
  66. }
  67. return
  68. }
  69. func (v *Volume) DataFileName() (fileName string) {
  70. return VolumeFileName(v.dir, v.Collection, int(v.Id))
  71. }
  72. func (v *Volume) IndexFileName() (fileName string) {
  73. return VolumeFileName(v.dirIdx, v.Collection, int(v.Id))
  74. }
  75. func (v *Volume) FileName(ext string) (fileName string) {
  76. switch ext {
  77. case ".idx", ".cpx", ".ldb":
  78. return VolumeFileName(v.dirIdx, v.Collection, int(v.Id)) + ext
  79. }
  80. // .dat, .cpd, .vif
  81. return VolumeFileName(v.dir, v.Collection, int(v.Id)) + ext
  82. }
  83. func (v *Volume) Version() needle.Version {
  84. v.superBlockAccessLock.Lock()
  85. defer v.superBlockAccessLock.Unlock()
  86. if v.volumeInfo.Version != 0 {
  87. v.SuperBlock.Version = needle.Version(v.volumeInfo.Version)
  88. }
  89. return v.SuperBlock.Version
  90. }
  91. func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time) {
  92. v.dataFileAccessLock.RLock()
  93. defer v.dataFileAccessLock.RUnlock()
  94. if v.DataBackend == nil {
  95. return
  96. }
  97. datFileSize, modTime, e := v.DataBackend.GetStat()
  98. if e == nil {
  99. return uint64(datFileSize), v.nm.IndexFileSize(), modTime
  100. }
  101. glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
  102. return // -1 causes integer overflow and the volume to become unwritable.
  103. }
  104. func (v *Volume) ContentSize() uint64 {
  105. v.dataFileAccessLock.RLock()
  106. defer v.dataFileAccessLock.RUnlock()
  107. if v.nm == nil {
  108. return 0
  109. }
  110. return v.nm.ContentSize()
  111. }
  112. func (v *Volume) DeletedSize() uint64 {
  113. v.dataFileAccessLock.RLock()
  114. defer v.dataFileAccessLock.RUnlock()
  115. if v.nm == nil {
  116. return 0
  117. }
  118. return v.nm.DeletedSize()
  119. }
  120. func (v *Volume) FileCount() uint64 {
  121. v.dataFileAccessLock.RLock()
  122. defer v.dataFileAccessLock.RUnlock()
  123. if v.nm == nil {
  124. return 0
  125. }
  126. return uint64(v.nm.FileCount())
  127. }
  128. func (v *Volume) DeletedCount() uint64 {
  129. v.dataFileAccessLock.RLock()
  130. defer v.dataFileAccessLock.RUnlock()
  131. if v.nm == nil {
  132. return 0
  133. }
  134. return uint64(v.nm.DeletedCount())
  135. }
  136. func (v *Volume) MaxFileKey() types.NeedleId {
  137. v.dataFileAccessLock.RLock()
  138. defer v.dataFileAccessLock.RUnlock()
  139. if v.nm == nil {
  140. return 0
  141. }
  142. return v.nm.MaxFileKey()
  143. }
  144. func (v *Volume) IndexFileSize() uint64 {
  145. v.dataFileAccessLock.RLock()
  146. defer v.dataFileAccessLock.RUnlock()
  147. if v.nm == nil {
  148. return 0
  149. }
  150. return v.nm.IndexFileSize()
  151. }
  152. func (v *Volume) DiskType() types.DiskType {
  153. return v.location.DiskType
  154. }
  155. func (v *Volume) SetStopping() {
  156. v.dataFileAccessLock.Lock()
  157. defer v.dataFileAccessLock.Unlock()
  158. if v.nm != nil {
  159. if err := v.nm.Sync(); err != nil {
  160. glog.Warningf("Volume SetStopping fail to sync volume idx %d", v.Id)
  161. }
  162. }
  163. if v.DataBackend != nil {
  164. if err := v.DataBackend.Sync(); err != nil {
  165. glog.Warningf("Volume SetStopping fail to sync volume %d", v.Id)
  166. }
  167. }
  168. }
  169. func (v *Volume) SyncToDisk() {
  170. v.dataFileAccessLock.Lock()
  171. defer v.dataFileAccessLock.Unlock()
  172. if v.nm != nil {
  173. if err := v.nm.Sync(); err != nil {
  174. glog.Warningf("Volume Close fail to sync volume idx %d", v.Id)
  175. }
  176. }
  177. if v.DataBackend != nil {
  178. if err := v.DataBackend.Sync(); err != nil {
  179. glog.Warningf("Volume Close fail to sync volume %d", v.Id)
  180. }
  181. }
  182. }
  183. // Close cleanly shuts down this volume
  184. func (v *Volume) Close() {
  185. v.dataFileAccessLock.Lock()
  186. defer v.dataFileAccessLock.Unlock()
  187. for v.isCommitCompacting {
  188. time.Sleep(521 * time.Millisecond)
  189. glog.Warningf("Volume Close wait for compaction %d", v.Id)
  190. }
  191. if v.nm != nil {
  192. if err := v.nm.Sync(); err != nil {
  193. glog.Warningf("Volume Close fail to sync volume idx %d", v.Id)
  194. }
  195. v.nm.Close()
  196. v.nm = nil
  197. }
  198. if v.DataBackend != nil {
  199. if err := v.DataBackend.Sync(); err != nil {
  200. glog.Warningf("Volume Close fail to sync volume %d", v.Id)
  201. }
  202. _ = v.DataBackend.Close()
  203. v.DataBackend = nil
  204. stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec()
  205. }
  206. }
  207. func (v *Volume) NeedToReplicate() bool {
  208. return v.ReplicaPlacement.GetCopyCount() > 1
  209. }
  210. // volume is expired if modified time + volume ttl < now
  211. // except when volume is empty
  212. // or when the volume does not have a ttl
  213. // or when volumeSizeLimit is 0 when server just starts
  214. func (v *Volume) expired(contentSize uint64, volumeSizeLimit uint64) bool {
  215. if volumeSizeLimit == 0 {
  216. // skip if we don't know size limit
  217. return false
  218. }
  219. if contentSize <= super_block.SuperBlockSize {
  220. return false
  221. }
  222. if v.Ttl == nil || v.Ttl.Minutes() == 0 {
  223. return false
  224. }
  225. glog.V(2).Infof("volume %d now:%v lastModified:%v", v.Id, time.Now().Unix(), v.lastModifiedTsSeconds)
  226. livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60
  227. glog.V(2).Infof("volume %d ttl:%v lived:%v", v.Id, v.Ttl, livedMinutes)
  228. if int64(v.Ttl.Minutes()) < livedMinutes {
  229. return true
  230. }
  231. return false
  232. }
  233. // wait either maxDelayMinutes or 10% of ttl minutes
  234. func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool {
  235. if v.Ttl == nil || v.Ttl.Minutes() == 0 {
  236. return false
  237. }
  238. removalDelay := v.Ttl.Minutes() / 10
  239. if removalDelay > maxDelayMinutes {
  240. removalDelay = maxDelayMinutes
  241. }
  242. if uint64(v.Ttl.Minutes()+removalDelay)*60+v.lastModifiedTsSeconds < uint64(time.Now().Unix()) {
  243. return true
  244. }
  245. return false
  246. }
  247. func (v *Volume) collectStatus() (maxFileKey types.NeedleId, datFileSize int64, modTime time.Time, fileCount, deletedCount, deletedSize uint64, ok bool) {
  248. v.dataFileAccessLock.RLock()
  249. defer v.dataFileAccessLock.RUnlock()
  250. glog.V(4).Infof("collectStatus volume %d", v.Id)
  251. if v.nm == nil || v.DataBackend == nil {
  252. return
  253. }
  254. ok = true
  255. maxFileKey = v.nm.MaxFileKey()
  256. datFileSize, modTime, _ = v.DataBackend.GetStat()
  257. fileCount = uint64(v.nm.FileCount())
  258. deletedCount = uint64(v.nm.DeletedCount())
  259. deletedSize = v.nm.DeletedSize()
  260. fileCount = uint64(v.nm.FileCount())
  261. return
  262. }
  263. func (v *Volume) ToVolumeInformationMessage() (types.NeedleId, *master_pb.VolumeInformationMessage) {
  264. maxFileKey, volumeSize, modTime, fileCount, deletedCount, deletedSize, ok := v.collectStatus()
  265. if !ok {
  266. return 0, nil
  267. }
  268. volumeInfo := &master_pb.VolumeInformationMessage{
  269. Id: uint32(v.Id),
  270. Size: uint64(volumeSize),
  271. Collection: v.Collection,
  272. FileCount: fileCount,
  273. DeleteCount: deletedCount,
  274. DeletedByteCount: deletedSize,
  275. ReadOnly: v.IsReadOnly(),
  276. ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
  277. Version: uint32(v.Version()),
  278. Ttl: v.Ttl.ToUint32(),
  279. CompactRevision: uint32(v.SuperBlock.CompactionRevision),
  280. ModifiedAtSecond: modTime.Unix(),
  281. DiskType: string(v.location.DiskType),
  282. }
  283. volumeInfo.RemoteStorageName, volumeInfo.RemoteStorageKey = v.RemoteStorageNameKey()
  284. return maxFileKey, volumeInfo
  285. }
  286. func (v *Volume) RemoteStorageNameKey() (storageName, storageKey string) {
  287. if v.volumeInfo == nil {
  288. return
  289. }
  290. if len(v.volumeInfo.GetFiles()) == 0 {
  291. return
  292. }
  293. return v.volumeInfo.GetFiles()[0].BackendName(), v.volumeInfo.GetFiles()[0].GetKey()
  294. }
  295. func (v *Volume) IsReadOnly() bool {
  296. v.noWriteLock.RLock()
  297. defer v.noWriteLock.RUnlock()
  298. return v.noWriteOrDelete || v.noWriteCanDelete || v.location.isDiskSpaceLow
  299. }