volume_grpc_admin.go 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. package weed_server
  2. import (
  3. "context"
  4. "fmt"
  5. "path/filepath"
  6. "github.com/chrislusf/seaweedfs/weed/glog"
  7. "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
  8. "github.com/chrislusf/seaweedfs/weed/stats"
  9. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  10. "github.com/chrislusf/seaweedfs/weed/storage/super_block"
  11. )
  12. func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server_pb.DeleteCollectionRequest) (*volume_server_pb.DeleteCollectionResponse, error) {
  13. resp := &volume_server_pb.DeleteCollectionResponse{}
  14. err := vs.store.DeleteCollection(req.Collection)
  15. if err != nil {
  16. glog.Errorf("delete collection %s: %v", req.Collection, err)
  17. } else {
  18. glog.V(2).Infof("delete collection %v", req)
  19. }
  20. return resp, err
  21. }
  22. func (vs *VolumeServer) AllocateVolume(ctx context.Context, req *volume_server_pb.AllocateVolumeRequest) (*volume_server_pb.AllocateVolumeResponse, error) {
  23. resp := &volume_server_pb.AllocateVolumeResponse{}
  24. err := vs.store.AddVolume(
  25. needle.VolumeId(req.VolumeId),
  26. req.Collection,
  27. vs.needleMapKind,
  28. req.Replication,
  29. req.Ttl,
  30. req.Preallocate,
  31. req.MemoryMapMaxSizeMb,
  32. )
  33. if err != nil {
  34. glog.Errorf("assign volume %v: %v", req, err)
  35. } else {
  36. glog.V(2).Infof("assign volume %v", req)
  37. }
  38. return resp, err
  39. }
  40. func (vs *VolumeServer) VolumeMount(ctx context.Context, req *volume_server_pb.VolumeMountRequest) (*volume_server_pb.VolumeMountResponse, error) {
  41. resp := &volume_server_pb.VolumeMountResponse{}
  42. err := vs.store.MountVolume(needle.VolumeId(req.VolumeId))
  43. if err != nil {
  44. glog.Errorf("volume mount %v: %v", req, err)
  45. } else {
  46. glog.V(2).Infof("volume mount %v", req)
  47. }
  48. return resp, err
  49. }
  50. func (vs *VolumeServer) VolumeUnmount(ctx context.Context, req *volume_server_pb.VolumeUnmountRequest) (*volume_server_pb.VolumeUnmountResponse, error) {
  51. resp := &volume_server_pb.VolumeUnmountResponse{}
  52. err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId))
  53. if err != nil {
  54. glog.Errorf("volume unmount %v: %v", req, err)
  55. } else {
  56. glog.V(2).Infof("volume unmount %v", req)
  57. }
  58. return resp, err
  59. }
  60. func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb.VolumeDeleteRequest) (*volume_server_pb.VolumeDeleteResponse, error) {
  61. resp := &volume_server_pb.VolumeDeleteResponse{}
  62. err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
  63. if err != nil {
  64. glog.Errorf("volume delete %v: %v", req, err)
  65. } else {
  66. glog.V(2).Infof("volume delete %v", req)
  67. }
  68. return resp, err
  69. }
  70. func (vs *VolumeServer) VolumeConfigure(ctx context.Context, req *volume_server_pb.VolumeConfigureRequest) (*volume_server_pb.VolumeConfigureResponse, error) {
  71. resp := &volume_server_pb.VolumeConfigureResponse{}
  72. // check replication format
  73. if _, err := super_block.NewReplicaPlacementFromString(req.Replication); err != nil {
  74. resp.Error = fmt.Sprintf("volume configure replication %v: %v", req, err)
  75. return resp, nil
  76. }
  77. // unmount
  78. if err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)); err != nil {
  79. glog.Errorf("volume configure unmount %v: %v", req, err)
  80. resp.Error = fmt.Sprintf("volume configure unmount %v: %v", req, err)
  81. return resp, nil
  82. }
  83. // modify the volume info file
  84. if err := vs.store.ConfigureVolume(needle.VolumeId(req.VolumeId), req.Replication); err != nil {
  85. glog.Errorf("volume configure %v: %v", req, err)
  86. resp.Error = fmt.Sprintf("volume configure %v: %v", req, err)
  87. return resp, nil
  88. }
  89. // mount
  90. if err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)); err != nil {
  91. glog.Errorf("volume configure mount %v: %v", req, err)
  92. resp.Error = fmt.Sprintf("volume configure mount %v: %v", req, err)
  93. return resp, nil
  94. }
  95. return resp, nil
  96. }
  97. func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_server_pb.VolumeMarkReadonlyRequest) (*volume_server_pb.VolumeMarkReadonlyResponse, error) {
  98. resp := &volume_server_pb.VolumeMarkReadonlyResponse{}
  99. err := vs.store.MarkVolumeReadonly(needle.VolumeId(req.VolumeId))
  100. if err != nil {
  101. glog.Errorf("volume mark readonly %v: %v", req, err)
  102. } else {
  103. glog.V(2).Infof("volume mark readonly %v", req)
  104. }
  105. return resp, err
  106. }
  107. func (vs *VolumeServer) VolumeServerStatus(ctx context.Context, req *volume_server_pb.VolumeServerStatusRequest) (*volume_server_pb.VolumeServerStatusResponse, error) {
  108. resp := &volume_server_pb.VolumeServerStatusResponse{}
  109. for _, loc := range vs.store.Locations {
  110. if dir, e := filepath.Abs(loc.Directory); e == nil {
  111. resp.DiskStatuses = append(resp.DiskStatuses, stats.NewDiskStatus(dir))
  112. }
  113. }
  114. resp.MemoryStatus = stats.MemStat()
  115. return resp, nil
  116. }