volume_layout.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. package topology
  2. import (
  3. "errors"
  4. "fmt"
  5. "github.com/chrislusf/seaweedfs/weed/storage/types"
  6. "math/rand"
  7. "sync"
  8. "time"
  9. "github.com/chrislusf/seaweedfs/weed/glog"
  10. "github.com/chrislusf/seaweedfs/weed/storage"
  11. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  12. "github.com/chrislusf/seaweedfs/weed/storage/super_block"
  13. )
  14. type copyState int
  15. const (
  16. noCopies copyState = 0 + iota
  17. insufficientCopies
  18. enoughCopies
  19. )
  20. type volumeState string
  21. const (
  22. readOnlyState volumeState = "ReadOnly"
  23. oversizedState = "Oversized"
  24. crowdedState = "Crowded"
  25. )
  26. type stateIndicator func(copyState) bool
  27. func ExistCopies() stateIndicator {
  28. return func(state copyState) bool { return state != noCopies }
  29. }
  30. func NoCopies() stateIndicator {
  31. return func(state copyState) bool { return state == noCopies }
  32. }
  33. type volumesBinaryState struct {
  34. rp *super_block.ReplicaPlacement
  35. name volumeState // the name for volume state (eg. "Readonly", "Oversized")
  36. indicator stateIndicator // indicate whether the volumes should be marked as `name`
  37. copyMap map[needle.VolumeId]*VolumeLocationList
  38. }
  39. func NewVolumesBinaryState(name volumeState, rp *super_block.ReplicaPlacement, indicator stateIndicator) *volumesBinaryState {
  40. return &volumesBinaryState{
  41. rp: rp,
  42. name: name,
  43. indicator: indicator,
  44. copyMap: make(map[needle.VolumeId]*VolumeLocationList),
  45. }
  46. }
  47. func (v *volumesBinaryState) Dump() (res []uint32) {
  48. for vid, list := range v.copyMap {
  49. if v.indicator(v.copyState(list)) {
  50. res = append(res, uint32(vid))
  51. }
  52. }
  53. return
  54. }
  55. func (v *volumesBinaryState) IsTrue(vid needle.VolumeId) bool {
  56. list, _ := v.copyMap[vid]
  57. return v.indicator(v.copyState(list))
  58. }
  59. func (v *volumesBinaryState) Add(vid needle.VolumeId, dn *DataNode) {
  60. list, _ := v.copyMap[vid]
  61. if list != nil {
  62. list.Set(dn)
  63. return
  64. }
  65. list = NewVolumeLocationList()
  66. list.Set(dn)
  67. v.copyMap[vid] = list
  68. }
  69. func (v *volumesBinaryState) Remove(vid needle.VolumeId, dn *DataNode) {
  70. list, _ := v.copyMap[vid]
  71. if list != nil {
  72. list.Remove(dn)
  73. if list.Length() == 0 {
  74. delete(v.copyMap, vid)
  75. }
  76. }
  77. }
  78. func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState {
  79. if list == nil {
  80. return noCopies
  81. }
  82. if list.Length() < v.rp.GetCopyCount() {
  83. return insufficientCopies
  84. }
  85. return enoughCopies
  86. }
  87. // mapping from volume to its locations, inverted from server to volume
  88. type VolumeLayout struct {
  89. rp *super_block.ReplicaPlacement
  90. ttl *needle.TTL
  91. diskType types.DiskType
  92. vid2location map[needle.VolumeId]*VolumeLocationList
  93. writables []needle.VolumeId // transient array of writable volume id
  94. crowded map[needle.VolumeId]struct{}
  95. readonlyVolumes *volumesBinaryState // readonly volumes
  96. oversizedVolumes *volumesBinaryState // oversized volumes
  97. volumeSizeLimit uint64
  98. replicationAsMin bool
  99. accessLock sync.RWMutex
  100. growRequestCount int
  101. growRequestTime time.Time
  102. }
  103. type VolumeLayoutStats struct {
  104. TotalSize uint64
  105. UsedSize uint64
  106. FileCount uint64
  107. }
  108. func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
  109. return &VolumeLayout{
  110. rp: rp,
  111. ttl: ttl,
  112. diskType: diskType,
  113. vid2location: make(map[needle.VolumeId]*VolumeLocationList),
  114. writables: *new([]needle.VolumeId),
  115. crowded: make(map[needle.VolumeId]struct{}),
  116. readonlyVolumes: NewVolumesBinaryState(readOnlyState, rp, ExistCopies()),
  117. oversizedVolumes: NewVolumesBinaryState(oversizedState, rp, ExistCopies()),
  118. volumeSizeLimit: volumeSizeLimit,
  119. replicationAsMin: replicationAsMin,
  120. }
  121. }
  122. func (vl *VolumeLayout) String() string {
  123. vl.accessLock.RLock()
  124. defer vl.accessLock.RUnlock()
  125. return fmt.Sprintf("rp:%v, ttl:%v, vid2location:%v, writables:%v, volumeSizeLimit:%v", vl.rp, vl.ttl, vl.vid2location, vl.writables, vl.volumeSizeLimit)
  126. }
  127. func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  128. vl.accessLock.Lock()
  129. defer vl.accessLock.Unlock()
  130. defer vl.rememberOversizedVolume(v, dn)
  131. if _, ok := vl.vid2location[v.Id]; !ok {
  132. vl.vid2location[v.Id] = NewVolumeLocationList()
  133. }
  134. vl.vid2location[v.Id].Set(dn)
  135. // glog.V(4).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount())
  136. for _, dn := range vl.vid2location[v.Id].list {
  137. if vInfo, err := dn.GetVolumesById(v.Id); err == nil {
  138. if vInfo.ReadOnly {
  139. glog.V(1).Infof("vid %d removed from writable", v.Id)
  140. vl.removeFromWritable(v.Id)
  141. vl.readonlyVolumes.Add(v.Id, dn)
  142. return
  143. } else {
  144. vl.readonlyVolumes.Remove(v.Id, dn)
  145. }
  146. } else {
  147. glog.V(1).Infof("vid %d removed from writable", v.Id)
  148. vl.removeFromWritable(v.Id)
  149. vl.readonlyVolumes.Remove(v.Id, dn)
  150. return
  151. }
  152. }
  153. }
  154. func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo, dn *DataNode) {
  155. if vl.isOversized(v) {
  156. vl.oversizedVolumes.Add(v.Id, dn)
  157. } else {
  158. vl.oversizedVolumes.Remove(v.Id, dn)
  159. }
  160. }
  161. func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  162. vl.accessLock.Lock()
  163. defer vl.accessLock.Unlock()
  164. // remove from vid2location map
  165. location, ok := vl.vid2location[v.Id]
  166. if !ok {
  167. return
  168. }
  169. if location.Remove(dn) {
  170. vl.readonlyVolumes.Remove(v.Id, dn)
  171. vl.oversizedVolumes.Remove(v.Id, dn)
  172. vl.ensureCorrectWritables(v.Id)
  173. if location.Length() == 0 {
  174. delete(vl.vid2location, v.Id)
  175. }
  176. }
  177. }
  178. func (vl *VolumeLayout) EnsureCorrectWritables(v *storage.VolumeInfo) {
  179. vl.accessLock.Lock()
  180. defer vl.accessLock.Unlock()
  181. vl.ensureCorrectWritables(v.Id)
  182. }
  183. func (vl *VolumeLayout) ensureCorrectWritables(vid needle.VolumeId) {
  184. if vl.enoughCopies(vid) && vl.isAllWritable(vid) {
  185. if !vl.oversizedVolumes.IsTrue(vid) {
  186. vl.setVolumeWritable(vid)
  187. }
  188. } else {
  189. vl.removeFromWritable(vid)
  190. }
  191. }
  192. func (vl *VolumeLayout) isAllWritable(vid needle.VolumeId) bool {
  193. for _, dn := range vl.vid2location[vid].list {
  194. if v, getError := dn.GetVolumesById(vid); getError == nil {
  195. if v.ReadOnly {
  196. return false
  197. }
  198. }
  199. }
  200. return true
  201. }
  202. func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool {
  203. return uint64(v.Size) >= vl.volumeSizeLimit
  204. }
  205. func (vl *VolumeLayout) isWritable(v *storage.VolumeInfo) bool {
  206. return !vl.isOversized(v) &&
  207. v.Version == needle.CurrentVersion &&
  208. !v.ReadOnly
  209. }
  210. func (vl *VolumeLayout) isEmpty() bool {
  211. vl.accessLock.RLock()
  212. defer vl.accessLock.RUnlock()
  213. return len(vl.vid2location) == 0
  214. }
  215. func (vl *VolumeLayout) Lookup(vid needle.VolumeId) []*DataNode {
  216. vl.accessLock.RLock()
  217. defer vl.accessLock.RUnlock()
  218. if location := vl.vid2location[vid]; location != nil {
  219. return location.list
  220. }
  221. return nil
  222. }
  223. func (vl *VolumeLayout) ListVolumeServers() (nodes []*DataNode) {
  224. vl.accessLock.RLock()
  225. defer vl.accessLock.RUnlock()
  226. for _, location := range vl.vid2location {
  227. nodes = append(nodes, location.list...)
  228. }
  229. return
  230. }
  231. func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*needle.VolumeId, uint64, *VolumeLocationList, error) {
  232. vl.accessLock.RLock()
  233. defer vl.accessLock.RUnlock()
  234. lenWriters := len(vl.writables)
  235. if lenWriters <= 0 {
  236. //glog.V(0).Infoln("No more writable volumes!")
  237. return nil, 0, nil, errors.New("No more writable volumes!")
  238. }
  239. if option.DataCenter == "" && option.Rack == "" && option.DataNode == "" {
  240. vid := vl.writables[rand.Intn(lenWriters)]
  241. locationList := vl.vid2location[vid]
  242. if locationList != nil {
  243. return &vid, count, locationList, nil
  244. }
  245. return nil, 0, nil, errors.New("Strangely vid " + vid.String() + " is on no machine!")
  246. }
  247. var vid needle.VolumeId
  248. var locationList *VolumeLocationList
  249. counter := 0
  250. for _, v := range vl.writables {
  251. volumeLocationList := vl.vid2location[v]
  252. for _, dn := range volumeLocationList.list {
  253. if option.DataCenter != "" && dn.GetDataCenter().Id() != NodeId(option.DataCenter) {
  254. continue
  255. }
  256. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  257. continue
  258. }
  259. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  260. continue
  261. }
  262. counter++
  263. if rand.Intn(counter) < 1 {
  264. vid, locationList = v, volumeLocationList.Copy()
  265. }
  266. }
  267. }
  268. return &vid, count, locationList, nil
  269. }
  270. func (vl *VolumeLayout) HasGrowRequest() bool {
  271. if vl.growRequestCount > 0 && vl.growRequestTime.Add(time.Minute).After(time.Now()) {
  272. return true
  273. }
  274. return false
  275. }
  276. func (vl *VolumeLayout) AddGrowRequest() {
  277. vl.growRequestTime = time.Now()
  278. vl.growRequestCount++
  279. }
  280. func (vl *VolumeLayout) DoneGrowRequest() {
  281. vl.growRequestTime = time.Unix(0, 0)
  282. vl.growRequestCount = 0
  283. }
  284. func (vl *VolumeLayout) ShouldGrowVolumes(option *VolumeGrowOption) bool {
  285. active, crowded := vl.GetActiveVolumeCount(option)
  286. //glog.V(0).Infof("active volume: %d, high usage volume: %d\n", active, high)
  287. return active <= crowded
  288. }
  289. func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) (active, crowded int) {
  290. vl.accessLock.RLock()
  291. defer vl.accessLock.RUnlock()
  292. if option.DataCenter == "" {
  293. return len(vl.writables), len(vl.crowded)
  294. }
  295. for _, v := range vl.writables {
  296. for _, dn := range vl.vid2location[v].list {
  297. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  298. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  299. continue
  300. }
  301. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  302. continue
  303. }
  304. active++
  305. info, _ := dn.GetVolumesById(v)
  306. if float64(info.Size) > float64(vl.volumeSizeLimit)*option.Threshold() {
  307. crowded++
  308. }
  309. }
  310. }
  311. }
  312. return
  313. }
  314. func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool {
  315. toDeleteIndex := -1
  316. for k, id := range vl.writables {
  317. if id == vid {
  318. toDeleteIndex = k
  319. break
  320. }
  321. }
  322. if toDeleteIndex >= 0 {
  323. glog.V(0).Infoln("Volume", vid, "becomes unwritable")
  324. vl.writables = append(vl.writables[0:toDeleteIndex], vl.writables[toDeleteIndex+1:]...)
  325. vl.removeFromCrowded(vid)
  326. return true
  327. }
  328. return false
  329. }
  330. func (vl *VolumeLayout) setVolumeWritable(vid needle.VolumeId) bool {
  331. for _, v := range vl.writables {
  332. if v == vid {
  333. return false
  334. }
  335. }
  336. glog.V(0).Infoln("Volume", vid, "becomes writable")
  337. vl.writables = append(vl.writables, vid)
  338. return true
  339. }
  340. func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) bool {
  341. vl.accessLock.Lock()
  342. defer vl.accessLock.Unlock()
  343. if location, ok := vl.vid2location[vid]; ok {
  344. if location.Remove(dn) {
  345. vl.readonlyVolumes.Remove(vid, dn)
  346. vl.oversizedVolumes.Remove(vid, dn)
  347. if location.Length() < vl.rp.GetCopyCount() {
  348. glog.V(0).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount())
  349. return vl.removeFromWritable(vid)
  350. }
  351. }
  352. }
  353. return false
  354. }
  355. func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId, isReadOnly bool) bool {
  356. vl.accessLock.Lock()
  357. defer vl.accessLock.Unlock()
  358. vInfo, err := dn.GetVolumesById(vid)
  359. if err != nil {
  360. return false
  361. }
  362. vl.vid2location[vid].Set(dn)
  363. if vInfo.ReadOnly || isReadOnly {
  364. return false
  365. }
  366. if vl.enoughCopies(vid) {
  367. return vl.setVolumeWritable(vid)
  368. }
  369. return false
  370. }
  371. func (vl *VolumeLayout) enoughCopies(vid needle.VolumeId) bool {
  372. locations := vl.vid2location[vid].Length()
  373. desired := vl.rp.GetCopyCount()
  374. return locations == desired || (vl.replicationAsMin && locations > desired)
  375. }
  376. func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool {
  377. vl.accessLock.Lock()
  378. defer vl.accessLock.Unlock()
  379. // glog.V(0).Infoln("Volume", vid, "reaches full capacity.")
  380. return vl.removeFromWritable(vid)
  381. }
  382. func (vl *VolumeLayout) removeFromCrowded(vid needle.VolumeId) {
  383. delete(vl.crowded, vid)
  384. }
  385. func (vl *VolumeLayout) setVolumeCrowded(vid needle.VolumeId) {
  386. if _, ok := vl.crowded[vid]; !ok {
  387. vl.crowded[vid] = struct{}{}
  388. glog.V(0).Infoln("Volume", vid, "becomes crowded")
  389. }
  390. }
  391. func (vl *VolumeLayout) SetVolumeCrowded(vid needle.VolumeId) {
  392. // since delete is guarded by accessLock.Lock(),
  393. // and is always called in sequential order,
  394. // RLock() should be safe enough
  395. vl.accessLock.RLock()
  396. defer vl.accessLock.RUnlock()
  397. for _, v := range vl.writables {
  398. if v == vid {
  399. vl.setVolumeCrowded(vid)
  400. break
  401. }
  402. }
  403. }
  404. func (vl *VolumeLayout) ToMap() map[string]interface{} {
  405. m := make(map[string]interface{})
  406. m["replication"] = vl.rp.String()
  407. m["ttl"] = vl.ttl.String()
  408. m["writables"] = vl.writables
  409. //m["locations"] = vl.vid2location
  410. return m
  411. }
  412. func (vl *VolumeLayout) Stats() *VolumeLayoutStats {
  413. vl.accessLock.RLock()
  414. defer vl.accessLock.RUnlock()
  415. ret := &VolumeLayoutStats{}
  416. freshThreshold := time.Now().Unix() - 60
  417. for vid, vll := range vl.vid2location {
  418. size, fileCount := vll.Stats(vid, freshThreshold)
  419. ret.FileCount += uint64(fileCount)
  420. ret.UsedSize += size
  421. if vl.readonlyVolumes.IsTrue(vid) {
  422. ret.TotalSize += size
  423. } else {
  424. ret.TotalSize += vl.volumeSizeLimit * uint64(vll.Length())
  425. }
  426. }
  427. return ret
  428. }