command_volume_balance_test.go 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. package shell
  2. import (
  3. "testing"
  4. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  5. "github.com/stretchr/testify/assert"
  6. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  7. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  8. )
  9. type testMoveCase struct {
  10. name string
  11. replication string
  12. replicas []*VolumeReplica
  13. sourceLocation location
  14. targetLocation location
  15. expected bool
  16. }
  17. func TestIsGoodMove(t *testing.T) {
  18. var tests = []testMoveCase{
  19. {
  20. name: "test 100 move to wrong data centers",
  21. replication: "100",
  22. replicas: []*VolumeReplica{
  23. {
  24. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  25. },
  26. {
  27. location: &location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  28. },
  29. },
  30. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  31. targetLocation: location{"dc2", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
  32. expected: false,
  33. },
  34. {
  35. name: "test 100 move to spread into proper data centers",
  36. replication: "100",
  37. replicas: []*VolumeReplica{
  38. {
  39. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  40. },
  41. {
  42. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  43. },
  44. },
  45. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  46. targetLocation: location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  47. expected: true,
  48. },
  49. {
  50. name: "test move to the same node",
  51. replication: "001",
  52. replicas: []*VolumeReplica{
  53. {
  54. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  55. },
  56. {
  57. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  58. },
  59. },
  60. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  61. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  62. expected: false,
  63. },
  64. {
  65. name: "test move to the same rack, but existing node",
  66. replication: "001",
  67. replicas: []*VolumeReplica{
  68. {
  69. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  70. },
  71. {
  72. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  73. },
  74. },
  75. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  76. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  77. expected: false,
  78. },
  79. {
  80. name: "test move to the same rack, a new node",
  81. replication: "001",
  82. replicas: []*VolumeReplica{
  83. {
  84. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  85. },
  86. {
  87. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  88. },
  89. },
  90. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  91. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
  92. expected: true,
  93. },
  94. {
  95. name: "test 010 move all to the same rack",
  96. replication: "010",
  97. replicas: []*VolumeReplica{
  98. {
  99. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  100. },
  101. {
  102. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  103. },
  104. },
  105. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  106. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
  107. expected: false,
  108. },
  109. {
  110. name: "test 010 move to spread racks",
  111. replication: "010",
  112. replicas: []*VolumeReplica{
  113. {
  114. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  115. },
  116. {
  117. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  118. },
  119. },
  120. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  121. targetLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
  122. expected: true,
  123. },
  124. {
  125. name: "test 010 move to spread racks",
  126. replication: "010",
  127. replicas: []*VolumeReplica{
  128. {
  129. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  130. },
  131. {
  132. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  133. },
  134. },
  135. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  136. targetLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  137. expected: true,
  138. },
  139. {
  140. name: "test 011 switch which rack has more replicas",
  141. replication: "011",
  142. replicas: []*VolumeReplica{
  143. {
  144. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  145. },
  146. {
  147. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  148. },
  149. {
  150. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  151. },
  152. },
  153. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  154. targetLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn4"}},
  155. expected: true,
  156. },
  157. {
  158. name: "test 011 move the lonely replica to another racks",
  159. replication: "011",
  160. replicas: []*VolumeReplica{
  161. {
  162. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  163. },
  164. {
  165. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  166. },
  167. {
  168. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  169. },
  170. },
  171. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  172. targetLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn4"}},
  173. expected: true,
  174. },
  175. {
  176. name: "test 011 move to wrong racks",
  177. replication: "011",
  178. replicas: []*VolumeReplica{
  179. {
  180. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  181. },
  182. {
  183. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  184. },
  185. {
  186. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  187. },
  188. },
  189. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  190. targetLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn4"}},
  191. expected: false,
  192. },
  193. {
  194. name: "test 011 move all to the same rack",
  195. replication: "011",
  196. replicas: []*VolumeReplica{
  197. {
  198. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  199. },
  200. {
  201. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  202. },
  203. {
  204. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  205. },
  206. },
  207. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  208. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}},
  209. expected: false,
  210. },
  211. }
  212. for _, tt := range tests {
  213. replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication)
  214. println("replication:", tt.replication, "expected", tt.expected, "name:", tt.name)
  215. sourceNode := &Node{
  216. info: tt.sourceLocation.dataNode,
  217. dc: tt.sourceLocation.dc,
  218. rack: tt.sourceLocation.rack,
  219. }
  220. targetNode := &Node{
  221. info: tt.targetLocation.dataNode,
  222. dc: tt.targetLocation.dc,
  223. rack: tt.targetLocation.rack,
  224. }
  225. if isGoodMove(replicaPlacement, tt.replicas, sourceNode, targetNode) != tt.expected {
  226. t.Errorf("%s: expect %v move from %v to %s, replication:%v",
  227. tt.name, tt.expected, tt.sourceLocation, tt.targetLocation, tt.replication)
  228. }
  229. }
  230. }
  231. func TestBalance(t *testing.T) {
  232. topologyInfo := parseOutput(topoData)
  233. volumeServers := collectVolumeServersByDc(topologyInfo, "")
  234. volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
  235. diskTypes := collectVolumeDiskTypes(topologyInfo)
  236. if err := balanceVolumeServers(nil, diskTypes, volumeReplicas, volumeServers, 30*1024*1024*1024, "ALL_COLLECTIONS", false); err != nil {
  237. t.Errorf("balance: %v", err)
  238. }
  239. }
  240. func TestVolumeSelection(t *testing.T) {
  241. topologyInfo := parseOutput(topoData)
  242. vids, err := collectVolumeIdsForTierChange(nil, topologyInfo, 1000, types.ToDiskType("hdd"), "", 20.0, 0)
  243. if err != nil {
  244. t.Errorf("collectVolumeIdsForTierChange: %v", err)
  245. }
  246. assert.Equal(t, 378, len(vids))
  247. }