command_volume_list.go 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. package shell
  2. import (
  3. "bytes"
  4. "flag"
  5. "fmt"
  6. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  7. "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
  8. "golang.org/x/exp/slices"
  9. "path/filepath"
  10. "io"
  11. )
  12. func init() {
  13. Commands = append(Commands, &commandVolumeList{})
  14. }
  15. type commandVolumeList struct {
  16. collectionPattern *string
  17. readonly *bool
  18. volumeId *uint64
  19. }
  20. func (c *commandVolumeList) Name() string {
  21. return "volume.list"
  22. }
  23. func (c *commandVolumeList) Help() string {
  24. return `list all volumes
  25. This command list all volumes as a tree of dataCenter > rack > dataNode > volume.
  26. `
  27. }
  28. func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
  29. volumeListCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
  30. verbosityLevel := volumeListCommand.Int("v", 5, "verbose mode: 0, 1, 2, 3, 4, 5")
  31. c.collectionPattern = volumeListCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
  32. c.readonly = volumeListCommand.Bool("readonly", false, "show only readonly")
  33. c.volumeId = volumeListCommand.Uint64("volumeId", 0, "show only volume id")
  34. if err = volumeListCommand.Parse(args); err != nil {
  35. return nil
  36. }
  37. // collect topology information
  38. topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv, 0)
  39. if err != nil {
  40. return err
  41. }
  42. c.writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb, *verbosityLevel)
  43. return nil
  44. }
  45. func diskInfosToString(diskInfos map[string]*master_pb.DiskInfo) string {
  46. var buf bytes.Buffer
  47. for diskType, diskInfo := range diskInfos {
  48. if diskType == "" {
  49. diskType = "hdd"
  50. }
  51. fmt.Fprintf(&buf, " %s(volume:%d/%d active:%d free:%d remote:%d)", diskType, diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
  52. }
  53. return buf.String()
  54. }
  55. func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
  56. var buf bytes.Buffer
  57. fmt.Fprintf(&buf, "volume:%d/%d active:%d free:%d remote:%d", diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
  58. return buf.String()
  59. }
  60. func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
  61. output(verbosityLevel >= 0, writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
  62. slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) bool {
  63. return a.Id < b.Id
  64. })
  65. var s statistics
  66. for _, dc := range t.DataCenterInfos {
  67. s = s.plus(c.writeDataCenterInfo(writer, dc, verbosityLevel))
  68. }
  69. output(verbosityLevel >= 0, writer, "%+v \n", s)
  70. return s
  71. }
  72. func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
  73. output(verbosityLevel >= 1, writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
  74. var s statistics
  75. slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) bool {
  76. return a.Id < b.Id
  77. })
  78. for _, r := range t.RackInfos {
  79. s = s.plus(c.writeRackInfo(writer, r, verbosityLevel))
  80. }
  81. output(verbosityLevel >= 1, writer, " DataCenter %s %+v \n", t.Id, s)
  82. return s
  83. }
  84. func (c *commandVolumeList) writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
  85. output(verbosityLevel >= 2, writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
  86. var s statistics
  87. slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) bool {
  88. return a.Id < b.Id
  89. })
  90. for _, dn := range t.DataNodeInfos {
  91. s = s.plus(c.writeDataNodeInfo(writer, dn, verbosityLevel))
  92. }
  93. output(verbosityLevel >= 2, writer, " Rack %s %+v \n", t.Id, s)
  94. return s
  95. }
  96. func (c *commandVolumeList) writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo, verbosityLevel int) statistics {
  97. output(verbosityLevel >= 3, writer, " DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
  98. var s statistics
  99. for _, diskInfo := range t.DiskInfos {
  100. s = s.plus(c.writeDiskInfo(writer, diskInfo, verbosityLevel))
  101. }
  102. output(verbosityLevel >= 3, writer, " DataNode %s %+v \n", t.Id, s)
  103. return s
  104. }
  105. func (c *commandVolumeList) isNotMatchDiskInfo(readOnly bool, collection string, volumeId uint32) bool {
  106. if *c.readonly && !readOnly {
  107. return true
  108. }
  109. if *c.collectionPattern != "" {
  110. if matched, _ := filepath.Match(*c.collectionPattern, collection); !matched {
  111. return true
  112. }
  113. }
  114. if *c.volumeId > 0 && *c.volumeId != uint64(volumeId) {
  115. return true
  116. }
  117. return false
  118. }
  119. func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int) statistics {
  120. var s statistics
  121. diskType := t.Type
  122. if diskType == "" {
  123. diskType = "hdd"
  124. }
  125. output(verbosityLevel >= 4, writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
  126. slices.SortFunc(t.VolumeInfos, func(a, b *master_pb.VolumeInformationMessage) bool {
  127. return a.Id < b.Id
  128. })
  129. for _, vi := range t.VolumeInfos {
  130. if c.isNotMatchDiskInfo(vi.ReadOnly, vi.Collection, vi.Id) {
  131. continue
  132. }
  133. s = s.plus(writeVolumeInformationMessage(writer, vi, verbosityLevel))
  134. }
  135. for _, ecShardInfo := range t.EcShardInfos {
  136. if c.isNotMatchDiskInfo(false, ecShardInfo.Collection, ecShardInfo.Id) {
  137. continue
  138. }
  139. output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
  140. }
  141. output(verbosityLevel >= 4, writer, " Disk %s %+v \n", diskType, s)
  142. return s
  143. }
  144. func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage, verbosityLevel int) statistics {
  145. output(verbosityLevel >= 5, writer, " volume %+v \n", t)
  146. return newStatistics(t)
  147. }
  148. func output(condition bool, w io.Writer, format string, a ...interface{}) {
  149. if condition {
  150. fmt.Fprintf(w, format, a...)
  151. }
  152. }
  153. type statistics struct {
  154. Size uint64
  155. FileCount uint64
  156. DeletedFileCount uint64
  157. DeletedBytes uint64
  158. }
  159. func newStatistics(t *master_pb.VolumeInformationMessage) statistics {
  160. return statistics{
  161. Size: t.Size,
  162. FileCount: t.FileCount,
  163. DeletedFileCount: t.DeleteCount,
  164. DeletedBytes: t.DeletedByteCount,
  165. }
  166. }
  167. func (s statistics) plus(t statistics) statistics {
  168. return statistics{
  169. Size: s.Size + t.Size,
  170. FileCount: s.FileCount + t.FileCount,
  171. DeletedFileCount: s.DeletedFileCount + t.DeletedFileCount,
  172. DeletedBytes: s.DeletedBytes + t.DeletedBytes,
  173. }
  174. }
  175. func (s statistics) String() string {
  176. if s.DeletedFileCount > 0 {
  177. return fmt.Sprintf("total size:%d file_count:%d deleted_file:%d deleted_bytes:%d", s.Size, s.FileCount, s.DeletedFileCount, s.DeletedBytes)
  178. }
  179. return fmt.Sprintf("total size:%d file_count:%d", s.Size, s.FileCount)
  180. }