topology_event_handling.go 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. package topology
  2. import (
  3. "math/rand/v2"
  4. "time"
  5. "github.com/seaweedfs/seaweedfs/weed/stats"
  6. "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
  7. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  8. "google.golang.org/grpc"
  9. "github.com/seaweedfs/seaweedfs/weed/glog"
  10. "github.com/seaweedfs/seaweedfs/weed/storage"
  11. )
  12. func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, garbageThreshold float64, concurrentVacuumLimitPerVolumeServer int, growThreshold float64, preallocate int64) {
  13. go func() {
  14. for {
  15. if t.IsLeader() {
  16. freshThreshHold := time.Now().Unix() - 3*t.pulse //3 times of sleep interval
  17. t.CollectDeadNodeAndFullVolumes(freshThreshHold, t.volumeSizeLimit, growThreshold)
  18. }
  19. time.Sleep(time.Duration(float32(t.pulse*1e3)*(1+rand.Float32())) * time.Millisecond)
  20. }
  21. }()
  22. go func(garbageThreshold float64) {
  23. for {
  24. if t.IsLeader() {
  25. if !t.isDisableVacuum {
  26. t.Vacuum(grpcDialOption, garbageThreshold, concurrentVacuumLimitPerVolumeServer, 0, "", preallocate)
  27. }
  28. } else {
  29. stats.MasterReplicaPlacementMismatch.Reset()
  30. }
  31. time.Sleep(14*time.Minute + time.Duration(120*rand.Float32())*time.Second)
  32. }
  33. }(garbageThreshold)
  34. go func() {
  35. for {
  36. select {
  37. case fv := <-t.chanFullVolumes:
  38. t.SetVolumeCapacityFull(fv)
  39. case cv := <-t.chanCrowdedVolumes:
  40. t.SetVolumeCrowded(cv)
  41. }
  42. }
  43. }()
  44. }
  45. func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
  46. diskType := types.ToDiskType(volumeInfo.DiskType)
  47. vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, diskType)
  48. if !vl.SetVolumeCapacityFull(volumeInfo.Id) {
  49. return false
  50. }
  51. vl.accessLock.RLock()
  52. defer vl.accessLock.RUnlock()
  53. vidLocations, found := vl.vid2location[volumeInfo.Id]
  54. if !found {
  55. return false
  56. }
  57. for _, dn := range vidLocations.list {
  58. if !volumeInfo.ReadOnly {
  59. disk := dn.getOrCreateDisk(volumeInfo.DiskType)
  60. deltaDiskUsages := newDiskUsages()
  61. deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(volumeInfo.DiskType))
  62. deltaDiskUsage.activeVolumeCount = -1
  63. disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
  64. }
  65. }
  66. return true
  67. }
  68. func (t *Topology) SetVolumeCrowded(volumeInfo storage.VolumeInfo) {
  69. diskType := types.ToDiskType(volumeInfo.DiskType)
  70. vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, diskType)
  71. vl.SetVolumeCrowded(volumeInfo.Id)
  72. }
  73. func (t *Topology) UnRegisterDataNode(dn *DataNode) {
  74. dn.IsTerminating = true
  75. for _, v := range dn.GetVolumes() {
  76. glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id())
  77. diskType := types.ToDiskType(v.DiskType)
  78. vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
  79. vl.SetVolumeUnavailable(dn, v.Id)
  80. }
  81. // unregister ec shards when volume server disconnected
  82. for _, s := range dn.GetEcShards() {
  83. t.UnRegisterEcShards(s, dn)
  84. }
  85. negativeUsages := dn.GetDiskUsages().negative()
  86. dn.UpAdjustDiskUsageDelta(negativeUsages)
  87. dn.DeltaUpdateVolumes([]storage.VolumeInfo{}, dn.GetVolumes())
  88. dn.DeltaUpdateEcShards([]*erasure_coding.EcVolumeInfo{}, dn.GetEcShards())
  89. if dn.Parent() != nil {
  90. dn.Parent().UnlinkChildNode(dn.Id())
  91. }
  92. }