topology_ec.go 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. package topology
  2. import (
  3. "github.com/seaweedfs/seaweedfs/weed/glog"
  4. "github.com/seaweedfs/seaweedfs/weed/pb"
  5. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  6. "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
  7. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  8. )
  9. type EcShardLocations struct {
  10. Collection string
  11. Locations [erasure_coding.TotalShardsCount][]*DataNode
  12. }
  13. func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInformationMessage, dn *DataNode) (newShards, deletedShards []*erasure_coding.EcVolumeInfo) {
  14. // convert into in memory struct storage.VolumeInfo
  15. var shards []*erasure_coding.EcVolumeInfo
  16. for _, shardInfo := range shardInfos {
  17. shards = append(shards,
  18. erasure_coding.NewEcVolumeInfo(
  19. shardInfo.DiskType,
  20. shardInfo.Collection,
  21. needle.VolumeId(shardInfo.Id),
  22. erasure_coding.ShardBits(shardInfo.EcIndexBits)))
  23. }
  24. // find out the delta volumes
  25. newShards, deletedShards = dn.UpdateEcShards(shards)
  26. for _, v := range newShards {
  27. t.RegisterEcShards(v, dn)
  28. }
  29. for _, v := range deletedShards {
  30. t.UnRegisterEcShards(v, dn)
  31. }
  32. return
  33. }
  34. func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards []*master_pb.VolumeEcShardInformationMessage, dn *DataNode) {
  35. // convert into in memory struct storage.VolumeInfo
  36. var newShards, deletedShards []*erasure_coding.EcVolumeInfo
  37. for _, shardInfo := range newEcShards {
  38. newShards = append(newShards,
  39. erasure_coding.NewEcVolumeInfo(
  40. shardInfo.DiskType,
  41. shardInfo.Collection,
  42. needle.VolumeId(shardInfo.Id),
  43. erasure_coding.ShardBits(shardInfo.EcIndexBits)))
  44. }
  45. for _, shardInfo := range deletedEcShards {
  46. deletedShards = append(deletedShards,
  47. erasure_coding.NewEcVolumeInfo(
  48. shardInfo.DiskType,
  49. shardInfo.Collection,
  50. needle.VolumeId(shardInfo.Id),
  51. erasure_coding.ShardBits(shardInfo.EcIndexBits)))
  52. }
  53. dn.DeltaUpdateEcShards(newShards, deletedShards)
  54. for _, v := range newShards {
  55. t.RegisterEcShards(v, dn)
  56. }
  57. for _, v := range deletedShards {
  58. t.UnRegisterEcShards(v, dn)
  59. }
  60. return
  61. }
  62. func NewEcShardLocations(collection string) *EcShardLocations {
  63. return &EcShardLocations{
  64. Collection: collection,
  65. }
  66. }
  67. func (loc *EcShardLocations) AddShard(shardId erasure_coding.ShardId, dn *DataNode) (added bool) {
  68. dataNodes := loc.Locations[shardId]
  69. for _, n := range dataNodes {
  70. if n.Id() == dn.Id() {
  71. return false
  72. }
  73. }
  74. loc.Locations[shardId] = append(dataNodes, dn)
  75. return true
  76. }
  77. func (loc *EcShardLocations) DeleteShard(shardId erasure_coding.ShardId, dn *DataNode) (deleted bool) {
  78. dataNodes := loc.Locations[shardId]
  79. foundIndex := -1
  80. for index, n := range dataNodes {
  81. if n.Id() == dn.Id() {
  82. foundIndex = index
  83. }
  84. }
  85. if foundIndex < 0 {
  86. return false
  87. }
  88. loc.Locations[shardId] = append(dataNodes[:foundIndex], dataNodes[foundIndex+1:]...)
  89. return true
  90. }
  91. func (t *Topology) RegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, dn *DataNode) {
  92. t.ecShardMapLock.Lock()
  93. defer t.ecShardMapLock.Unlock()
  94. locations, found := t.ecShardMap[ecShardInfos.VolumeId]
  95. if !found {
  96. locations = NewEcShardLocations(ecShardInfos.Collection)
  97. t.ecShardMap[ecShardInfos.VolumeId] = locations
  98. }
  99. for _, shardId := range ecShardInfos.ShardIds() {
  100. locations.AddShard(shardId, dn)
  101. }
  102. }
  103. func (t *Topology) UnRegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, dn *DataNode) {
  104. glog.Infof("removing ec shard info:%+v", ecShardInfos)
  105. t.ecShardMapLock.Lock()
  106. defer t.ecShardMapLock.Unlock()
  107. locations, found := t.ecShardMap[ecShardInfos.VolumeId]
  108. if !found {
  109. return
  110. }
  111. for _, shardId := range ecShardInfos.ShardIds() {
  112. locations.DeleteShard(shardId, dn)
  113. }
  114. }
  115. func (t *Topology) LookupEcShards(vid needle.VolumeId) (locations *EcShardLocations, found bool) {
  116. t.ecShardMapLock.RLock()
  117. defer t.ecShardMapLock.RUnlock()
  118. locations, found = t.ecShardMap[vid]
  119. return
  120. }
  121. func (t *Topology) ListEcServersByCollection(collection string) (dataNodes []pb.ServerAddress) {
  122. t.ecShardMapLock.RLock()
  123. defer t.ecShardMapLock.RUnlock()
  124. dateNodeMap := make(map[pb.ServerAddress]bool)
  125. for _, ecVolumeLocation := range t.ecShardMap {
  126. if ecVolumeLocation.Collection == collection {
  127. for _, locations := range ecVolumeLocation.Locations {
  128. for _, loc := range locations {
  129. dateNodeMap[loc.ServerAddress()] = true
  130. }
  131. }
  132. }
  133. }
  134. for k, _ := range dateNodeMap {
  135. dataNodes = append(dataNodes, k)
  136. }
  137. return
  138. }
  139. func (t *Topology) DeleteEcCollection(collection string) {
  140. t.ecShardMapLock.Lock()
  141. defer t.ecShardMapLock.Unlock()
  142. var vids []needle.VolumeId
  143. for vid, ecVolumeLocation := range t.ecShardMap {
  144. if ecVolumeLocation.Collection == collection {
  145. vids = append(vids, vid)
  146. }
  147. }
  148. for _, vid := range vids {
  149. delete(t.ecShardMap, vid)
  150. }
  151. return
  152. }