command_volume_balance_test.go 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. package shell
  2. import (
  3. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  4. "github.com/stretchr/testify/assert"
  5. "testing"
  6. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  7. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  8. )
  9. type testMoveCase struct {
  10. name string
  11. replication string
  12. replicas []*VolumeReplica
  13. sourceLocation location
  14. targetLocation location
  15. expected bool
  16. }
  17. func TestIsGoodMove(t *testing.T) {
  18. var tests = []testMoveCase{
  19. {
  20. name: "test 100 move to wrong data centers",
  21. replication: "100",
  22. replicas: []*VolumeReplica{
  23. {
  24. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  25. },
  26. {
  27. location: &location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  28. },
  29. },
  30. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  31. targetLocation: location{"dc2", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
  32. expected: false,
  33. },
  34. {
  35. name: "test 100 move to spread into proper data centers",
  36. replication: "100",
  37. replicas: []*VolumeReplica{
  38. {
  39. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  40. },
  41. {
  42. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  43. },
  44. },
  45. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  46. targetLocation: location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  47. expected: true,
  48. },
  49. {
  50. name: "test move to the same node",
  51. replication: "001",
  52. replicas: []*VolumeReplica{
  53. {
  54. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  55. },
  56. {
  57. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  58. },
  59. },
  60. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  61. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  62. expected: false,
  63. },
  64. {
  65. name: "test move to the same rack, but existing node",
  66. replication: "001",
  67. replicas: []*VolumeReplica{
  68. {
  69. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  70. },
  71. {
  72. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  73. },
  74. },
  75. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  76. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  77. expected: false,
  78. },
  79. {
  80. name: "test move to the same rack, a new node",
  81. replication: "001",
  82. replicas: []*VolumeReplica{
  83. {
  84. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  85. },
  86. {
  87. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  88. },
  89. },
  90. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  91. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
  92. expected: true,
  93. },
  94. {
  95. name: "test 010 move all to the same rack",
  96. replication: "010",
  97. replicas: []*VolumeReplica{
  98. {
  99. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  100. },
  101. {
  102. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  103. },
  104. },
  105. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  106. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
  107. expected: false,
  108. },
  109. {
  110. name: "test 010 move to spread racks",
  111. replication: "010",
  112. replicas: []*VolumeReplica{
  113. {
  114. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  115. },
  116. {
  117. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  118. },
  119. },
  120. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  121. targetLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
  122. expected: true,
  123. },
  124. {
  125. name: "test 010 move to spread racks",
  126. replication: "010",
  127. replicas: []*VolumeReplica{
  128. {
  129. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  130. },
  131. {
  132. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  133. },
  134. },
  135. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  136. targetLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  137. expected: true,
  138. },
  139. }
  140. for _, tt := range tests {
  141. replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication)
  142. println("replication:", tt.replication, "expected", tt.expected, "name:", tt.name)
  143. sourceNode := &Node{
  144. info: tt.sourceLocation.dataNode,
  145. dc: tt.sourceLocation.dc,
  146. rack: tt.sourceLocation.rack,
  147. }
  148. targetNode := &Node{
  149. info: tt.targetLocation.dataNode,
  150. dc: tt.targetLocation.dc,
  151. rack: tt.targetLocation.rack,
  152. }
  153. if isGoodMove(replicaPlacement, tt.replicas, sourceNode, targetNode) != tt.expected {
  154. t.Errorf("%s: expect %v move from %v to %s, replication:%v",
  155. tt.name, tt.expected, tt.sourceLocation, tt.targetLocation, tt.replication)
  156. }
  157. }
  158. }
  159. func TestBalance(t *testing.T) {
  160. topologyInfo := parseOutput(topoData)
  161. volumeServers := collectVolumeServersByDc(topologyInfo, "")
  162. volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
  163. diskTypes := collectVolumeDiskTypes(topologyInfo)
  164. if err := balanceVolumeServers(nil, diskTypes, volumeReplicas, volumeServers, 30*1024*1024*1024, "ALL_COLLECTIONS", false); err != nil {
  165. t.Errorf("balance: %v", err)
  166. }
  167. }
  168. func TestVolumeSelection(t *testing.T) {
  169. topologyInfo := parseOutput(topoData)
  170. vids, err := collectVolumeIdsForTierChange(nil, topologyInfo, 1000, types.ToDiskType("hdd"), "", 20.0, 0)
  171. if err != nil {
  172. t.Errorf("collectVolumeIdsForTierChange: %v", err)
  173. }
  174. assert.Equal(t, 378, len(vids))
  175. }