volume_layout.go 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. package topology
  2. import (
  3. "errors"
  4. "fmt"
  5. "math/rand"
  6. "sync"
  7. "time"
  8. "github.com/chrislusf/seaweedfs/weed/glog"
  9. "github.com/chrislusf/seaweedfs/weed/storage"
  10. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  11. "github.com/chrislusf/seaweedfs/weed/storage/super_block"
  12. )
  13. // mapping from volume to its locations, inverted from server to volume
  14. type VolumeLayout struct {
  15. rp *super_block.ReplicaPlacement
  16. ttl *needle.TTL
  17. vid2location map[needle.VolumeId]*VolumeLocationList
  18. writables []needle.VolumeId // transient array of writable volume id
  19. readonlyVolumes map[needle.VolumeId]bool // transient set of readonly volumes
  20. oversizedVolumes map[needle.VolumeId]bool // set of oversized volumes
  21. volumeSizeLimit uint64
  22. replicationAsMin bool
  23. accessLock sync.RWMutex
  24. }
  25. type VolumeLayoutStats struct {
  26. TotalSize uint64
  27. UsedSize uint64
  28. FileCount uint64
  29. }
  30. func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
  31. return &VolumeLayout{
  32. rp: rp,
  33. ttl: ttl,
  34. vid2location: make(map[needle.VolumeId]*VolumeLocationList),
  35. writables: *new([]needle.VolumeId),
  36. readonlyVolumes: make(map[needle.VolumeId]bool),
  37. oversizedVolumes: make(map[needle.VolumeId]bool),
  38. volumeSizeLimit: volumeSizeLimit,
  39. replicationAsMin: replicationAsMin,
  40. }
  41. }
  42. func (vl *VolumeLayout) String() string {
  43. return fmt.Sprintf("rp:%v, ttl:%v, vid2location:%v, writables:%v, volumeSizeLimit:%v", vl.rp, vl.ttl, vl.vid2location, vl.writables, vl.volumeSizeLimit)
  44. }
  45. func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  46. vl.accessLock.Lock()
  47. defer vl.accessLock.Unlock()
  48. defer vl.ensureCorrectWritables(v)
  49. defer vl.rememberOversizedVolume(v)
  50. if _, ok := vl.vid2location[v.Id]; !ok {
  51. vl.vid2location[v.Id] = NewVolumeLocationList()
  52. }
  53. vl.vid2location[v.Id].Set(dn)
  54. // glog.V(4).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount())
  55. for _, dn := range vl.vid2location[v.Id].list {
  56. if vInfo, err := dn.GetVolumesById(v.Id); err == nil {
  57. if vInfo.ReadOnly {
  58. glog.V(1).Infof("vid %d removed from writable", v.Id)
  59. vl.removeFromWritable(v.Id)
  60. vl.readonlyVolumes[v.Id] = true
  61. return
  62. } else {
  63. delete(vl.readonlyVolumes, v.Id)
  64. }
  65. } else {
  66. glog.V(1).Infof("vid %d removed from writable", v.Id)
  67. vl.removeFromWritable(v.Id)
  68. delete(vl.readonlyVolumes, v.Id)
  69. return
  70. }
  71. }
  72. }
  73. func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo) {
  74. if vl.isOversized(v) {
  75. vl.oversizedVolumes[v.Id] = true
  76. }
  77. }
  78. func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  79. vl.accessLock.Lock()
  80. defer vl.accessLock.Unlock()
  81. // remove from vid2location map
  82. location, ok := vl.vid2location[v.Id]
  83. if !ok {
  84. return
  85. }
  86. if location.Remove(dn) {
  87. vl.ensureCorrectWritables(v)
  88. if location.Length() == 0 {
  89. delete(vl.vid2location, v.Id)
  90. }
  91. }
  92. }
  93. func (vl *VolumeLayout) ensureCorrectWritables(v *storage.VolumeInfo) {
  94. if vl.enoughCopies(v.Id) && vl.isWritable(v) {
  95. if _, ok := vl.oversizedVolumes[v.Id]; !ok {
  96. vl.setVolumeWritable(v.Id)
  97. }
  98. } else {
  99. vl.removeFromWritable(v.Id)
  100. }
  101. }
  102. func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool {
  103. return uint64(v.Size) >= vl.volumeSizeLimit
  104. }
  105. func (vl *VolumeLayout) isWritable(v *storage.VolumeInfo) bool {
  106. return !vl.isOversized(v) &&
  107. v.Version == needle.CurrentVersion &&
  108. !v.ReadOnly
  109. }
  110. func (vl *VolumeLayout) isEmpty() bool {
  111. vl.accessLock.RLock()
  112. defer vl.accessLock.RUnlock()
  113. return len(vl.vid2location) == 0
  114. }
  115. func (vl *VolumeLayout) Lookup(vid needle.VolumeId) []*DataNode {
  116. vl.accessLock.RLock()
  117. defer vl.accessLock.RUnlock()
  118. if location := vl.vid2location[vid]; location != nil {
  119. return location.list
  120. }
  121. return nil
  122. }
  123. func (vl *VolumeLayout) ListVolumeServers() (nodes []*DataNode) {
  124. vl.accessLock.RLock()
  125. defer vl.accessLock.RUnlock()
  126. for _, location := range vl.vid2location {
  127. nodes = append(nodes, location.list...)
  128. }
  129. return
  130. }
  131. func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*needle.VolumeId, uint64, *VolumeLocationList, error) {
  132. vl.accessLock.RLock()
  133. defer vl.accessLock.RUnlock()
  134. lenWriters := len(vl.writables)
  135. if lenWriters <= 0 {
  136. glog.V(0).Infoln("No more writable volumes!")
  137. return nil, 0, nil, errors.New("No more writable volumes!")
  138. }
  139. if option.DataCenter == "" {
  140. vid := vl.writables[rand.Intn(lenWriters)]
  141. locationList := vl.vid2location[vid]
  142. if locationList != nil {
  143. return &vid, count, locationList, nil
  144. }
  145. return nil, 0, nil, errors.New("Strangely vid " + vid.String() + " is on no machine!")
  146. }
  147. var vid needle.VolumeId
  148. var locationList *VolumeLocationList
  149. counter := 0
  150. for _, v := range vl.writables {
  151. volumeLocationList := vl.vid2location[v]
  152. for _, dn := range volumeLocationList.list {
  153. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  154. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  155. continue
  156. }
  157. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  158. continue
  159. }
  160. counter++
  161. if rand.Intn(counter) < 1 {
  162. vid, locationList = v, volumeLocationList
  163. }
  164. }
  165. }
  166. }
  167. return &vid, count, locationList, nil
  168. }
  169. func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) int {
  170. vl.accessLock.RLock()
  171. defer vl.accessLock.RUnlock()
  172. if option.DataCenter == "" {
  173. return len(vl.writables)
  174. }
  175. counter := 0
  176. for _, v := range vl.writables {
  177. for _, dn := range vl.vid2location[v].list {
  178. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  179. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  180. continue
  181. }
  182. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  183. continue
  184. }
  185. counter++
  186. }
  187. }
  188. }
  189. return counter
  190. }
  191. func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool {
  192. toDeleteIndex := -1
  193. for k, id := range vl.writables {
  194. if id == vid {
  195. toDeleteIndex = k
  196. break
  197. }
  198. }
  199. if toDeleteIndex >= 0 {
  200. glog.V(0).Infoln("Volume", vid, "becomes unwritable")
  201. vl.writables = append(vl.writables[0:toDeleteIndex], vl.writables[toDeleteIndex+1:]...)
  202. return true
  203. }
  204. return false
  205. }
  206. func (vl *VolumeLayout) setVolumeWritable(vid needle.VolumeId) bool {
  207. for _, v := range vl.writables {
  208. if v == vid {
  209. return false
  210. }
  211. }
  212. glog.V(0).Infoln("Volume", vid, "becomes writable")
  213. vl.writables = append(vl.writables, vid)
  214. return true
  215. }
  216. func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) bool {
  217. vl.accessLock.Lock()
  218. defer vl.accessLock.Unlock()
  219. if location, ok := vl.vid2location[vid]; ok {
  220. if location.Remove(dn) {
  221. if location.Length() < vl.rp.GetCopyCount() {
  222. glog.V(0).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount())
  223. return vl.removeFromWritable(vid)
  224. }
  225. }
  226. }
  227. return false
  228. }
  229. func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId, isReadOnly bool) bool {
  230. vl.accessLock.Lock()
  231. defer vl.accessLock.Unlock()
  232. vInfo, err := dn.GetVolumesById(vid)
  233. if err != nil {
  234. return false
  235. }
  236. vl.vid2location[vid].Set(dn)
  237. if vInfo.ReadOnly || isReadOnly {
  238. return false
  239. }
  240. if vl.enoughCopies(vid) {
  241. return vl.setVolumeWritable(vid)
  242. }
  243. return false
  244. }
  245. func (vl *VolumeLayout) enoughCopies(vid needle.VolumeId) bool {
  246. locations := vl.vid2location[vid].Length()
  247. desired := vl.rp.GetCopyCount()
  248. return locations == desired || (vl.replicationAsMin && locations > desired)
  249. }
  250. func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool {
  251. vl.accessLock.Lock()
  252. defer vl.accessLock.Unlock()
  253. // glog.V(0).Infoln("Volume", vid, "reaches full capacity.")
  254. return vl.removeFromWritable(vid)
  255. }
  256. func (vl *VolumeLayout) ToMap() map[string]interface{} {
  257. m := make(map[string]interface{})
  258. m["replication"] = vl.rp.String()
  259. m["ttl"] = vl.ttl.String()
  260. m["writables"] = vl.writables
  261. //m["locations"] = vl.vid2location
  262. return m
  263. }
  264. func (vl *VolumeLayout) Stats() *VolumeLayoutStats {
  265. vl.accessLock.RLock()
  266. defer vl.accessLock.RUnlock()
  267. ret := &VolumeLayoutStats{}
  268. freshThreshold := time.Now().Unix() - 60
  269. for vid, vll := range vl.vid2location {
  270. size, fileCount := vll.Stats(vid, freshThreshold)
  271. ret.FileCount += uint64(fileCount)
  272. ret.UsedSize += size
  273. if vl.readonlyVolumes[vid] {
  274. ret.TotalSize += size
  275. } else {
  276. ret.TotalSize += vl.volumeSizeLimit
  277. }
  278. }
  279. return ret
  280. }