topology.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. package topology
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "fmt"
  6. "math/rand"
  7. "sync"
  8. "time"
  9. "github.com/seaweedfs/seaweedfs/weed/pb"
  10. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  11. backoff "github.com/cenkalti/backoff/v4"
  12. hashicorpRaft "github.com/hashicorp/raft"
  13. "github.com/seaweedfs/raft"
  14. "github.com/seaweedfs/seaweedfs/weed/glog"
  15. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  16. "github.com/seaweedfs/seaweedfs/weed/sequence"
  17. "github.com/seaweedfs/seaweedfs/weed/stats"
  18. "github.com/seaweedfs/seaweedfs/weed/storage"
  19. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  20. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  21. "github.com/seaweedfs/seaweedfs/weed/util"
  22. )
  23. type Topology struct {
  24. vacuumLockCounter int64
  25. NodeImpl
  26. collectionMap *util.ConcurrentReadMap
  27. ecShardMap map[needle.VolumeId]*EcShardLocations
  28. ecShardMapLock sync.RWMutex
  29. pulse int64
  30. volumeSizeLimit uint64
  31. replicationAsMin bool
  32. isDisableVacuum bool
  33. Sequence sequence.Sequencer
  34. chanFullVolumes chan storage.VolumeInfo
  35. chanCrowdedVolumes chan storage.VolumeInfo
  36. Configuration *Configuration
  37. RaftServer raft.Server
  38. RaftServerAccessLock sync.RWMutex
  39. HashicorpRaft *hashicorpRaft.Raft
  40. UuidAccessLock sync.RWMutex
  41. UuidMap map[string][]string
  42. }
  43. func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, pulse int, replicationAsMin bool) *Topology {
  44. t := &Topology{}
  45. t.id = NodeId(id)
  46. t.nodeType = "Topology"
  47. t.NodeImpl.value = t
  48. t.diskUsages = newDiskUsages()
  49. t.children = make(map[NodeId]Node)
  50. t.collectionMap = util.NewConcurrentReadMap()
  51. t.ecShardMap = make(map[needle.VolumeId]*EcShardLocations)
  52. t.pulse = int64(pulse)
  53. t.volumeSizeLimit = volumeSizeLimit
  54. t.replicationAsMin = replicationAsMin
  55. t.Sequence = seq
  56. t.chanFullVolumes = make(chan storage.VolumeInfo)
  57. t.chanCrowdedVolumes = make(chan storage.VolumeInfo)
  58. t.Configuration = &Configuration{}
  59. return t
  60. }
  61. func (t *Topology) IsChildLocked() (bool, error) {
  62. if t.IsLocked() {
  63. return true, errors.New("topology is locked")
  64. }
  65. for _, dcNode := range t.Children() {
  66. if dcNode.IsLocked() {
  67. return true, fmt.Errorf("topology child %s is locked", dcNode.String())
  68. }
  69. for _, rackNode := range dcNode.Children() {
  70. if rackNode.IsLocked() {
  71. return true, fmt.Errorf("dc %s child %s is locked", dcNode.String(), rackNode.String())
  72. }
  73. for _, dataNode := range rackNode.Children() {
  74. if dataNode.IsLocked() {
  75. return true, fmt.Errorf("rack %s child %s is locked", rackNode.String(), dataNode.Id())
  76. }
  77. }
  78. }
  79. }
  80. return false, nil
  81. }
  82. func (t *Topology) IsLeader() bool {
  83. t.RaftServerAccessLock.RLock()
  84. defer t.RaftServerAccessLock.RUnlock()
  85. if t.RaftServer != nil {
  86. if t.RaftServer.State() == raft.Leader {
  87. return true
  88. }
  89. if leader, err := t.Leader(); err == nil {
  90. if pb.ServerAddress(t.RaftServer.Name()) == leader {
  91. return true
  92. }
  93. }
  94. } else if t.HashicorpRaft != nil {
  95. if t.HashicorpRaft.State() == hashicorpRaft.Leader {
  96. return true
  97. }
  98. }
  99. return false
  100. }
  101. func (t *Topology) Leader() (l pb.ServerAddress, err error) {
  102. exponentialBackoff := backoff.NewExponentialBackOff()
  103. exponentialBackoff.InitialInterval = 100 * time.Millisecond
  104. exponentialBackoff.MaxElapsedTime = 20 * time.Second
  105. leaderNotSelected := errors.New("leader not selected yet")
  106. l, err = backoff.RetryWithData(
  107. func() (l pb.ServerAddress, err error) {
  108. l, err = t.MaybeLeader()
  109. if err == nil && l == "" {
  110. err = leaderNotSelected
  111. }
  112. return l, err
  113. },
  114. exponentialBackoff)
  115. if err == leaderNotSelected {
  116. l = ""
  117. }
  118. return l, err
  119. }
  120. func (t *Topology) MaybeLeader() (l pb.ServerAddress, err error) {
  121. t.RaftServerAccessLock.RLock()
  122. defer t.RaftServerAccessLock.RUnlock()
  123. if t.RaftServer != nil {
  124. l = pb.ServerAddress(t.RaftServer.Leader())
  125. } else if t.HashicorpRaft != nil {
  126. l = pb.ServerAddress(t.HashicorpRaft.Leader())
  127. } else {
  128. err = errors.New("Raft Server not ready yet!")
  129. }
  130. return
  131. }
  132. func (t *Topology) Lookup(collection string, vid needle.VolumeId) (dataNodes []*DataNode) {
  133. // maybe an issue if lots of collections?
  134. if collection == "" {
  135. for _, c := range t.collectionMap.Items() {
  136. if list := c.(*Collection).Lookup(vid); list != nil {
  137. return list
  138. }
  139. }
  140. } else {
  141. if c, ok := t.collectionMap.Find(collection); ok {
  142. return c.(*Collection).Lookup(vid)
  143. }
  144. }
  145. if locations, found := t.LookupEcShards(vid); found {
  146. for _, loc := range locations.Locations {
  147. dataNodes = append(dataNodes, loc...)
  148. }
  149. return dataNodes
  150. }
  151. return nil
  152. }
  153. func (t *Topology) NextVolumeId() (needle.VolumeId, error) {
  154. vid := t.GetMaxVolumeId()
  155. next := vid.Next()
  156. t.RaftServerAccessLock.RLock()
  157. defer t.RaftServerAccessLock.RUnlock()
  158. if t.RaftServer != nil {
  159. if _, err := t.RaftServer.Do(NewMaxVolumeIdCommand(next)); err != nil {
  160. return 0, err
  161. }
  162. } else if t.HashicorpRaft != nil {
  163. b, err := json.Marshal(NewMaxVolumeIdCommand(next))
  164. if err != nil {
  165. return 0, fmt.Errorf("failed marshal NewMaxVolumeIdCommand: %+v", err)
  166. }
  167. if future := t.HashicorpRaft.Apply(b, time.Second); future.Error() != nil {
  168. return 0, future.Error()
  169. }
  170. }
  171. return next, nil
  172. }
  173. func (t *Topology) PickForWrite(requestedCount uint64, option *VolumeGrowOption, volumeLayout *VolumeLayout) (fileId string, count uint64, volumeLocationList *VolumeLocationList, shouldGrow bool, err error) {
  174. var vid needle.VolumeId
  175. vid, count, volumeLocationList, shouldGrow, err = volumeLayout.PickForWrite(requestedCount, option)
  176. if err != nil {
  177. return "", 0, nil, shouldGrow, fmt.Errorf("failed to find writable volumes for collection:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err)
  178. }
  179. if volumeLocationList.Length() == 0 {
  180. return "", 0, nil, shouldGrow, fmt.Errorf("no writable volumes available for collection:%s replication:%s ttl:%s", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String())
  181. }
  182. nextFileId := t.Sequence.NextFileId(requestedCount)
  183. fileId = needle.NewFileId(vid, nextFileId, rand.Uint32()).String()
  184. return fileId, count, volumeLocationList, shouldGrow, nil
  185. }
  186. func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) *VolumeLayout {
  187. return t.collectionMap.Get(collectionName, func() interface{} {
  188. return NewCollection(collectionName, t.volumeSizeLimit, t.replicationAsMin)
  189. }).(*Collection).GetOrCreateVolumeLayout(rp, ttl, diskType)
  190. }
  191. func (t *Topology) ListCollections(includeNormalVolumes, includeEcVolumes bool) (ret []string) {
  192. mapOfCollections := make(map[string]bool)
  193. for _, c := range t.collectionMap.Items() {
  194. mapOfCollections[c.(*Collection).Name] = true
  195. }
  196. if includeEcVolumes {
  197. t.ecShardMapLock.RLock()
  198. for _, ecVolumeLocation := range t.ecShardMap {
  199. mapOfCollections[ecVolumeLocation.Collection] = true
  200. }
  201. t.ecShardMapLock.RUnlock()
  202. }
  203. for k := range mapOfCollections {
  204. ret = append(ret, k)
  205. }
  206. return ret
  207. }
  208. func (t *Topology) FindCollection(collectionName string) (*Collection, bool) {
  209. c, hasCollection := t.collectionMap.Find(collectionName)
  210. if !hasCollection {
  211. return nil, false
  212. }
  213. return c.(*Collection), hasCollection
  214. }
  215. func (t *Topology) DeleteCollection(collectionName string) {
  216. t.collectionMap.Delete(collectionName)
  217. }
  218. func (t *Topology) DeleteLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) {
  219. collection, found := t.FindCollection(collectionName)
  220. if !found {
  221. return
  222. }
  223. collection.DeleteVolumeLayout(rp, ttl, diskType)
  224. if len(collection.storageType2VolumeLayout.Items()) == 0 {
  225. t.DeleteCollection(collectionName)
  226. }
  227. }
  228. func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
  229. diskType := types.ToDiskType(v.DiskType)
  230. vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
  231. vl.RegisterVolume(&v, dn)
  232. vl.EnsureCorrectWritables(&v)
  233. }
  234. func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
  235. glog.Infof("removing volume info: %+v from %v", v, dn.id)
  236. if v.ReplicaPlacement.GetCopyCount() > 1 {
  237. stats.MasterReplicaPlacementMismatch.WithLabelValues(v.Collection, v.Id.String()).Set(0)
  238. }
  239. diskType := types.ToDiskType(v.DiskType)
  240. volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
  241. volumeLayout.UnRegisterVolume(&v, dn)
  242. if volumeLayout.isEmpty() {
  243. t.DeleteLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
  244. }
  245. }
  246. func (t *Topology) GetOrCreateDataCenter(dcName string) *DataCenter {
  247. t.Lock()
  248. defer t.Unlock()
  249. for _, c := range t.children {
  250. dc := c.(*DataCenter)
  251. if string(dc.Id()) == dcName {
  252. return dc
  253. }
  254. }
  255. dc := NewDataCenter(dcName)
  256. t.doLinkChildNode(dc)
  257. return dc
  258. }
  259. func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformationMessage, dn *DataNode) (newVolumes, deletedVolumes []storage.VolumeInfo) {
  260. // convert into in memory struct storage.VolumeInfo
  261. var volumeInfos []storage.VolumeInfo
  262. for _, v := range volumes {
  263. if vi, err := storage.NewVolumeInfo(v); err == nil {
  264. volumeInfos = append(volumeInfos, vi)
  265. } else {
  266. glog.V(0).Infof("Fail to convert joined volume information: %v", err)
  267. }
  268. }
  269. // find out the delta volumes
  270. var changedVolumes []storage.VolumeInfo
  271. newVolumes, deletedVolumes, changedVolumes = dn.UpdateVolumes(volumeInfos)
  272. for _, v := range newVolumes {
  273. t.RegisterVolumeLayout(v, dn)
  274. }
  275. for _, v := range deletedVolumes {
  276. t.UnRegisterVolumeLayout(v, dn)
  277. }
  278. for _, v := range changedVolumes {
  279. diskType := types.ToDiskType(v.DiskType)
  280. vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
  281. vl.EnsureCorrectWritables(&v)
  282. }
  283. return
  284. }
  285. func (t *Topology) IncrementalSyncDataNodeRegistration(newVolumes, deletedVolumes []*master_pb.VolumeShortInformationMessage, dn *DataNode) {
  286. var newVis, oldVis []storage.VolumeInfo
  287. for _, v := range newVolumes {
  288. vi, err := storage.NewVolumeInfoFromShort(v)
  289. if err != nil {
  290. glog.V(0).Infof("NewVolumeInfoFromShort %v: %v", v, err)
  291. continue
  292. }
  293. newVis = append(newVis, vi)
  294. }
  295. for _, v := range deletedVolumes {
  296. vi, err := storage.NewVolumeInfoFromShort(v)
  297. if err != nil {
  298. glog.V(0).Infof("NewVolumeInfoFromShort %v: %v", v, err)
  299. continue
  300. }
  301. oldVis = append(oldVis, vi)
  302. }
  303. dn.DeltaUpdateVolumes(newVis, oldVis)
  304. for _, vi := range newVis {
  305. t.RegisterVolumeLayout(vi, dn)
  306. }
  307. for _, vi := range oldVis {
  308. t.UnRegisterVolumeLayout(vi, dn)
  309. }
  310. return
  311. }
  312. func (t *Topology) DataNodeRegistration(dcName, rackName string, dn *DataNode) {
  313. if dn.Parent() != nil {
  314. return
  315. }
  316. // registration to topo
  317. dc := t.GetOrCreateDataCenter(dcName)
  318. rack := dc.GetOrCreateRack(rackName)
  319. rack.LinkChildNode(dn)
  320. glog.Infof("[%s] reLink To topo ", dn.Id())
  321. }
  322. func (t *Topology) DisableVacuum() {
  323. glog.V(0).Infof("DisableVacuum")
  324. t.isDisableVacuum = true
  325. }
  326. func (t *Topology) EnableVacuum() {
  327. glog.V(0).Infof("EnableVacuum")
  328. t.isDisableVacuum = false
  329. }