master_grpc_server_volume.go 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. package weed_server
  2. import (
  3. "context"
  4. "fmt"
  5. "reflect"
  6. "strings"
  7. "sync"
  8. "time"
  9. "github.com/seaweedfs/raft"
  10. "github.com/seaweedfs/seaweedfs/weed/glog"
  11. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  12. "github.com/seaweedfs/seaweedfs/weed/security"
  13. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  14. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  15. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  16. )
  17. func (ms *MasterServer) ProcessGrowRequest() {
  18. go func() {
  19. filter := sync.Map{}
  20. for {
  21. req, ok := <-ms.vgCh
  22. if !ok {
  23. break
  24. }
  25. if !ms.Topo.IsLeader() {
  26. //discard buffered requests
  27. time.Sleep(time.Second * 1)
  28. continue
  29. }
  30. // filter out identical requests being processed
  31. found := false
  32. filter.Range(func(k, v interface{}) bool {
  33. if reflect.DeepEqual(k, req) {
  34. found = true
  35. }
  36. return !found
  37. })
  38. option := req.Option
  39. vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType)
  40. // not atomic but it's okay
  41. if !found && vl.ShouldGrowVolumes(option) {
  42. filter.Store(req, nil)
  43. // we have lock called inside vg
  44. go func() {
  45. glog.V(1).Infoln("starting automatic volume grow")
  46. start := time.Now()
  47. newVidLocations, err := ms.vg.AutomaticGrowByType(req.Option, ms.grpcDialOption, ms.Topo, req.Count)
  48. glog.V(1).Infoln("finished automatic volume grow, cost ", time.Now().Sub(start))
  49. if err == nil {
  50. for _, newVidLocation := range newVidLocations {
  51. ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: newVidLocation})
  52. }
  53. }
  54. vl.DoneGrowRequest()
  55. if req.ErrCh != nil {
  56. req.ErrCh <- err
  57. close(req.ErrCh)
  58. }
  59. filter.Delete(req)
  60. }()
  61. } else {
  62. glog.V(4).Infoln("discard volume grow request")
  63. }
  64. }
  65. }()
  66. }
  67. func (ms *MasterServer) LookupVolume(ctx context.Context, req *master_pb.LookupVolumeRequest) (*master_pb.LookupVolumeResponse, error) {
  68. resp := &master_pb.LookupVolumeResponse{}
  69. volumeLocations := ms.lookupVolumeId(req.VolumeOrFileIds, req.Collection)
  70. for _, volumeOrFileId := range req.VolumeOrFileIds {
  71. vid := volumeOrFileId
  72. commaSep := strings.Index(vid, ",")
  73. if commaSep > 0 {
  74. vid = vid[0:commaSep]
  75. }
  76. if result, found := volumeLocations[vid]; found {
  77. var locations []*master_pb.Location
  78. for _, loc := range result.Locations {
  79. locations = append(locations, &master_pb.Location{
  80. Url: loc.Url,
  81. PublicUrl: loc.PublicUrl,
  82. DataCenter: loc.DataCenter,
  83. })
  84. }
  85. var auth string
  86. if commaSep > 0 { // this is a file id
  87. auth = string(security.GenJwtForVolumeServer(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, result.VolumeOrFileId))
  88. }
  89. resp.VolumeIdLocations = append(resp.VolumeIdLocations, &master_pb.LookupVolumeResponse_VolumeIdLocation{
  90. VolumeOrFileId: result.VolumeOrFileId,
  91. Locations: locations,
  92. Error: result.Error,
  93. Auth: auth,
  94. })
  95. }
  96. }
  97. return resp, nil
  98. }
  99. func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.StatisticsRequest) (*master_pb.StatisticsResponse, error) {
  100. if !ms.Topo.IsLeader() {
  101. return nil, raft.NotLeaderError
  102. }
  103. if req.Replication == "" {
  104. req.Replication = ms.option.DefaultReplicaPlacement
  105. }
  106. replicaPlacement, err := super_block.NewReplicaPlacementFromString(req.Replication)
  107. if err != nil {
  108. return nil, err
  109. }
  110. ttl, err := needle.ReadTTL(req.Ttl)
  111. if err != nil {
  112. return nil, err
  113. }
  114. volumeLayout := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, ttl, types.ToDiskType(req.DiskType))
  115. stats := volumeLayout.Stats()
  116. totalSize := ms.Topo.GetDiskUsages().GetMaxVolumeCount() * int64(ms.option.VolumeSizeLimitMB) * 1024 * 1024
  117. resp := &master_pb.StatisticsResponse{
  118. TotalSize: uint64(totalSize),
  119. UsedSize: stats.UsedSize,
  120. FileCount: stats.FileCount,
  121. }
  122. return resp, nil
  123. }
  124. func (ms *MasterServer) VolumeList(ctx context.Context, req *master_pb.VolumeListRequest) (*master_pb.VolumeListResponse, error) {
  125. if !ms.Topo.IsLeader() {
  126. return nil, raft.NotLeaderError
  127. }
  128. resp := &master_pb.VolumeListResponse{
  129. TopologyInfo: ms.Topo.ToTopologyInfo(),
  130. VolumeSizeLimitMb: uint64(ms.option.VolumeSizeLimitMB),
  131. }
  132. return resp, nil
  133. }
  134. func (ms *MasterServer) LookupEcVolume(ctx context.Context, req *master_pb.LookupEcVolumeRequest) (*master_pb.LookupEcVolumeResponse, error) {
  135. if !ms.Topo.IsLeader() {
  136. return nil, raft.NotLeaderError
  137. }
  138. resp := &master_pb.LookupEcVolumeResponse{}
  139. ecLocations, found := ms.Topo.LookupEcShards(needle.VolumeId(req.VolumeId))
  140. if !found {
  141. return resp, fmt.Errorf("ec volume %d not found", req.VolumeId)
  142. }
  143. resp.VolumeId = req.VolumeId
  144. for shardId, shardLocations := range ecLocations.Locations {
  145. var locations []*master_pb.Location
  146. for _, dn := range shardLocations {
  147. locations = append(locations, &master_pb.Location{
  148. Url: string(dn.Id()),
  149. PublicUrl: dn.PublicUrl,
  150. DataCenter: dn.GetDataCenterId(),
  151. })
  152. }
  153. resp.ShardIdLocations = append(resp.ShardIdLocations, &master_pb.LookupEcVolumeResponse_EcShardIdLocation{
  154. ShardId: uint32(shardId),
  155. Locations: locations,
  156. })
  157. }
  158. return resp, nil
  159. }
  160. func (ms *MasterServer) VacuumVolume(ctx context.Context, req *master_pb.VacuumVolumeRequest) (*master_pb.VacuumVolumeResponse, error) {
  161. if !ms.Topo.IsLeader() {
  162. return nil, raft.NotLeaderError
  163. }
  164. resp := &master_pb.VacuumVolumeResponse{}
  165. ms.Topo.Vacuum(ms.grpcDialOption, float64(req.GarbageThreshold), req.VolumeId, req.Collection, ms.preallocateSize)
  166. return resp, nil
  167. }
  168. func (ms *MasterServer) DisableVacuum(ctx context.Context, req *master_pb.DisableVacuumRequest) (*master_pb.DisableVacuumResponse, error) {
  169. ms.Topo.DisableVacuum()
  170. resp := &master_pb.DisableVacuumResponse{}
  171. return resp, nil
  172. }
  173. func (ms *MasterServer) EnableVacuum(ctx context.Context, req *master_pb.EnableVacuumRequest) (*master_pb.EnableVacuumResponse, error) {
  174. ms.Topo.EnableVacuum()
  175. resp := &master_pb.EnableVacuumResponse{}
  176. return resp, nil
  177. }
  178. func (ms *MasterServer) VolumeMarkReadonly(ctx context.Context, req *master_pb.VolumeMarkReadonlyRequest) (*master_pb.VolumeMarkReadonlyResponse, error) {
  179. if !ms.Topo.IsLeader() {
  180. return nil, raft.NotLeaderError
  181. }
  182. resp := &master_pb.VolumeMarkReadonlyResponse{}
  183. replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(req.ReplicaPlacement))
  184. vl := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, needle.LoadTTLFromUint32(req.Ttl), types.ToDiskType(req.DiskType))
  185. dataNodes := ms.Topo.Lookup(req.Collection, needle.VolumeId(req.VolumeId))
  186. for _, dn := range dataNodes {
  187. if dn.Ip == req.Ip && dn.Port == int(req.Port) {
  188. if req.IsReadonly {
  189. vl.SetVolumeReadOnly(dn, needle.VolumeId(req.VolumeId))
  190. } else {
  191. vl.SetVolumeWritable(dn, needle.VolumeId(req.VolumeId))
  192. }
  193. }
  194. }
  195. return resp, nil
  196. }