data_node.go 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. package topology
  2. import (
  3. "fmt"
  4. "github.com/seaweedfs/seaweedfs/weed/glog"
  5. "github.com/seaweedfs/seaweedfs/weed/pb"
  6. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  7. "github.com/seaweedfs/seaweedfs/weed/storage"
  8. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  9. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  10. "github.com/seaweedfs/seaweedfs/weed/util"
  11. "sync/atomic"
  12. )
  13. type DataNode struct {
  14. NodeImpl
  15. Ip string
  16. Port int
  17. GrpcPort int
  18. PublicUrl string
  19. LastSeen int64 // unix time in seconds
  20. Counter int // in race condition, the previous dataNode was not dead
  21. IsTerminating bool
  22. }
  23. func NewDataNode(id string) *DataNode {
  24. dn := &DataNode{}
  25. dn.id = NodeId(id)
  26. dn.nodeType = "DataNode"
  27. dn.diskUsages = newDiskUsages()
  28. dn.children = make(map[NodeId]Node)
  29. dn.NodeImpl.value = dn
  30. return dn
  31. }
  32. func (dn *DataNode) String() string {
  33. dn.RLock()
  34. defer dn.RUnlock()
  35. return fmt.Sprintf("Node:%s, Ip:%s, Port:%d, PublicUrl:%s", dn.NodeImpl.String(), dn.Ip, dn.Port, dn.PublicUrl)
  36. }
  37. func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
  38. dn.Lock()
  39. defer dn.Unlock()
  40. return dn.doAddOrUpdateVolume(v)
  41. }
  42. func (dn *DataNode) getOrCreateDisk(diskType string) *Disk {
  43. c, found := dn.children[NodeId(diskType)]
  44. if !found {
  45. c = NewDisk(diskType)
  46. dn.doLinkChildNode(c)
  47. }
  48. disk := c.(*Disk)
  49. return disk
  50. }
  51. func (dn *DataNode) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChanged bool) {
  52. disk := dn.getOrCreateDisk(v.DiskType)
  53. return disk.AddOrUpdateVolume(v)
  54. }
  55. // UpdateVolumes detects new/deleted/changed volumes on a volume server
  56. // used in master to notify master clients of these changes.
  57. func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolumes, deletedVolumes, changedVolumes []storage.VolumeInfo) {
  58. actualVolumeMap := make(map[needle.VolumeId]storage.VolumeInfo)
  59. for _, v := range actualVolumes {
  60. actualVolumeMap[v.Id] = v
  61. }
  62. dn.Lock()
  63. defer dn.Unlock()
  64. existingVolumes := dn.getVolumes()
  65. for _, v := range existingVolumes {
  66. vid := v.Id
  67. if _, ok := actualVolumeMap[vid]; !ok {
  68. glog.V(0).Infoln("Deleting volume id:", vid)
  69. disk := dn.getOrCreateDisk(v.DiskType)
  70. delete(disk.volumes, vid)
  71. deletedVolumes = append(deletedVolumes, v)
  72. deltaDiskUsages := newDiskUsages()
  73. deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(v.DiskType))
  74. deltaDiskUsage.volumeCount = -1
  75. if v.IsRemote() {
  76. deltaDiskUsage.remoteVolumeCount = -1
  77. }
  78. if !v.ReadOnly {
  79. deltaDiskUsage.activeVolumeCount = -1
  80. }
  81. disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
  82. }
  83. }
  84. for _, v := range actualVolumes {
  85. isNew, isChanged := dn.doAddOrUpdateVolume(v)
  86. if isNew {
  87. newVolumes = append(newVolumes, v)
  88. }
  89. if isChanged {
  90. changedVolumes = append(changedVolumes, v)
  91. }
  92. }
  93. return
  94. }
  95. func (dn *DataNode) DeltaUpdateVolumes(newVolumes, deletedVolumes []storage.VolumeInfo) {
  96. dn.Lock()
  97. defer dn.Unlock()
  98. for _, v := range deletedVolumes {
  99. disk := dn.getOrCreateDisk(v.DiskType)
  100. if _, found := disk.volumes[v.Id]; !found {
  101. continue
  102. }
  103. delete(disk.volumes, v.Id)
  104. deltaDiskUsages := newDiskUsages()
  105. deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(v.DiskType))
  106. deltaDiskUsage.volumeCount = -1
  107. if v.IsRemote() {
  108. deltaDiskUsage.remoteVolumeCount = -1
  109. }
  110. if !v.ReadOnly {
  111. deltaDiskUsage.activeVolumeCount = -1
  112. }
  113. disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
  114. }
  115. for _, v := range newVolumes {
  116. dn.doAddOrUpdateVolume(v)
  117. }
  118. return
  119. }
  120. func (dn *DataNode) AdjustMaxVolumeCounts(maxVolumeCounts map[string]uint32) {
  121. deltaDiskUsages := newDiskUsages()
  122. for diskType, maxVolumeCount := range maxVolumeCounts {
  123. if maxVolumeCount == 0 {
  124. // the volume server may have set the max to zero
  125. continue
  126. }
  127. dt := types.ToDiskType(diskType)
  128. currentDiskUsage := dn.diskUsages.getOrCreateDisk(dt)
  129. currentDiskUsageMaxVolumeCount := atomic.LoadInt64(&currentDiskUsage.maxVolumeCount)
  130. if currentDiskUsageMaxVolumeCount == int64(maxVolumeCount) {
  131. continue
  132. }
  133. disk := dn.getOrCreateDisk(dt.String())
  134. deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(dt)
  135. deltaDiskUsage.maxVolumeCount = int64(maxVolumeCount) - currentDiskUsageMaxVolumeCount
  136. disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
  137. }
  138. }
  139. func (dn *DataNode) GetVolumes() (ret []storage.VolumeInfo) {
  140. dn.RLock()
  141. for _, c := range dn.children {
  142. disk := c.(*Disk)
  143. ret = append(ret, disk.GetVolumes()...)
  144. }
  145. dn.RUnlock()
  146. return ret
  147. }
  148. func (dn *DataNode) GetVolumesById(id needle.VolumeId) (vInfo storage.VolumeInfo, err error) {
  149. dn.RLock()
  150. defer dn.RUnlock()
  151. found := false
  152. for _, c := range dn.children {
  153. disk := c.(*Disk)
  154. vInfo, found = disk.volumes[id]
  155. if found {
  156. break
  157. }
  158. }
  159. if found {
  160. return vInfo, nil
  161. } else {
  162. return storage.VolumeInfo{}, fmt.Errorf("volumeInfo not found")
  163. }
  164. }
  165. func (dn *DataNode) GetDataCenter() *DataCenter {
  166. rack := dn.Parent()
  167. if rack == nil {
  168. return nil
  169. }
  170. dcNode := rack.Parent()
  171. if dcNode == nil {
  172. return nil
  173. }
  174. dcValue := dcNode.GetValue()
  175. return dcValue.(*DataCenter)
  176. }
  177. func (dn *DataNode) GetDataCenterId() string {
  178. if dc := dn.GetDataCenter(); dc != nil {
  179. return string(dc.Id())
  180. }
  181. return ""
  182. }
  183. func (dn *DataNode) GetRack() *Rack {
  184. return dn.Parent().(*NodeImpl).value.(*Rack)
  185. }
  186. func (dn *DataNode) GetTopology() *Topology {
  187. p := dn.Parent()
  188. for p.Parent() != nil {
  189. p = p.Parent()
  190. }
  191. t := p.(*Topology)
  192. return t
  193. }
  194. func (dn *DataNode) MatchLocation(ip string, port int) bool {
  195. return dn.Ip == ip && dn.Port == port
  196. }
  197. func (dn *DataNode) Url() string {
  198. return util.JoinHostPort(dn.Ip, dn.Port)
  199. }
  200. func (dn *DataNode) ServerAddress() pb.ServerAddress {
  201. return pb.NewServerAddress(dn.Ip, dn.Port, dn.GrpcPort)
  202. }
  203. type DataNodeInfo struct {
  204. Url string `json:"Url"`
  205. PublicUrl string `json:"PublicUrl"`
  206. Volumes int64 `json:"Volumes"`
  207. EcShards int64 `json:"EcShards"`
  208. Max int64 `json:"Max"`
  209. VolumeIds string `json:"VolumeIds"`
  210. }
  211. func (dn *DataNode) ToInfo() (info DataNodeInfo) {
  212. info.Url = dn.Url()
  213. info.PublicUrl = dn.PublicUrl
  214. // aggregated volume info
  215. var volumeCount, ecShardCount, maxVolumeCount int64
  216. var volumeIds string
  217. for _, diskUsage := range dn.diskUsages.usages {
  218. volumeCount += diskUsage.volumeCount
  219. ecShardCount += diskUsage.ecShardCount
  220. maxVolumeCount += diskUsage.maxVolumeCount
  221. }
  222. for _, disk := range dn.Children() {
  223. d := disk.(*Disk)
  224. volumeIds += " " + d.GetVolumeIds()
  225. }
  226. info.Volumes = volumeCount
  227. info.EcShards = ecShardCount
  228. info.Max = maxVolumeCount
  229. info.VolumeIds = volumeIds
  230. return
  231. }
  232. func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo {
  233. m := &master_pb.DataNodeInfo{
  234. Id: string(dn.Id()),
  235. DiskInfos: make(map[string]*master_pb.DiskInfo),
  236. GrpcPort: uint32(dn.GrpcPort),
  237. }
  238. for _, c := range dn.Children() {
  239. disk := c.(*Disk)
  240. m.DiskInfos[string(disk.Id())] = disk.ToDiskInfo()
  241. }
  242. return m
  243. }
  244. // GetVolumeIds returns the human readable volume ids limited to count of max 100.
  245. func (dn *DataNode) GetVolumeIds() string {
  246. dn.RLock()
  247. defer dn.RUnlock()
  248. existingVolumes := dn.getVolumes()
  249. ids := make([]int, 0, len(existingVolumes))
  250. for k := range existingVolumes {
  251. ids = append(ids, int(k))
  252. }
  253. return util.HumanReadableIntsMax(100, ids...)
  254. }
  255. func (dn *DataNode) getVolumes() []storage.VolumeInfo {
  256. var existingVolumes []storage.VolumeInfo
  257. for _, c := range dn.children {
  258. disk := c.(*Disk)
  259. existingVolumes = append(existingVolumes, disk.GetVolumes()...)
  260. }
  261. return existingVolumes
  262. }