filer_grpc_server_dlm.go 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. package weed_server
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/seaweedfs/seaweedfs/weed/cluster/lock_manager"
  6. "github.com/seaweedfs/seaweedfs/weed/glog"
  7. "github.com/seaweedfs/seaweedfs/weed/pb"
  8. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  9. "google.golang.org/grpc/codes"
  10. "google.golang.org/grpc/status"
  11. "time"
  12. )
  13. // DistributedLock is a grpc handler to handle FilerServer's LockRequest
  14. func (fs *FilerServer) DistributedLock(ctx context.Context, req *filer_pb.LockRequest) (resp *filer_pb.LockResponse, err error) {
  15. resp = &filer_pb.LockResponse{}
  16. var movedTo pb.ServerAddress
  17. expiredAtNs := time.Now().Add(time.Duration(req.SecondsToLock) * time.Second).UnixNano()
  18. resp.LockOwner, resp.RenewToken, movedTo, err = fs.filer.Dlm.LockWithTimeout(req.Name, expiredAtNs, req.RenewToken, req.Owner)
  19. glog.V(3).Infof("lock %s %v %v %v, isMoved=%v %v", req.Name, req.SecondsToLock, req.RenewToken, req.Owner, req.IsMoved, movedTo)
  20. if movedTo != "" && movedTo != fs.option.Host && !req.IsMoved {
  21. err = pb.WithFilerClient(false, 0, movedTo, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
  22. secondResp, err := client.DistributedLock(context.Background(), &filer_pb.LockRequest{
  23. Name: req.Name,
  24. SecondsToLock: req.SecondsToLock,
  25. RenewToken: req.RenewToken,
  26. IsMoved: true,
  27. Owner: req.Owner,
  28. })
  29. if err == nil {
  30. resp.RenewToken = secondResp.RenewToken
  31. resp.LockOwner = secondResp.LockOwner
  32. resp.Error = secondResp.Error
  33. }
  34. return err
  35. })
  36. }
  37. if err != nil {
  38. resp.Error = fmt.Sprintf("%v", err)
  39. }
  40. if movedTo != "" {
  41. resp.LockHostMovedTo = string(movedTo)
  42. }
  43. return resp, nil
  44. }
  45. // Unlock is a grpc handler to handle FilerServer's UnlockRequest
  46. func (fs *FilerServer) DistributedUnlock(ctx context.Context, req *filer_pb.UnlockRequest) (resp *filer_pb.UnlockResponse, err error) {
  47. resp = &filer_pb.UnlockResponse{}
  48. var movedTo pb.ServerAddress
  49. movedTo, err = fs.filer.Dlm.Unlock(req.Name, req.RenewToken)
  50. if !req.IsMoved && movedTo != "" {
  51. err = pb.WithFilerClient(false, 0, movedTo, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
  52. secondResp, err := client.DistributedUnlock(context.Background(), &filer_pb.UnlockRequest{
  53. Name: req.Name,
  54. RenewToken: req.RenewToken,
  55. IsMoved: true,
  56. })
  57. resp.Error = secondResp.Error
  58. return err
  59. })
  60. }
  61. if err != nil {
  62. resp.Error = fmt.Sprintf("%v", err)
  63. }
  64. if movedTo != "" {
  65. resp.MovedTo = string(movedTo)
  66. }
  67. return resp, nil
  68. }
  69. func (fs *FilerServer) FindLockOwner(ctx context.Context, req *filer_pb.FindLockOwnerRequest) (*filer_pb.FindLockOwnerResponse, error) {
  70. owner, movedTo, err := fs.filer.Dlm.FindLockOwner(req.Name)
  71. if !req.IsMoved && movedTo != "" || err == lock_manager.LockNotFound {
  72. err = pb.WithFilerClient(false, 0, movedTo, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
  73. secondResp, err := client.FindLockOwner(context.Background(), &filer_pb.FindLockOwnerRequest{
  74. Name: req.Name,
  75. IsMoved: true,
  76. })
  77. if err != nil {
  78. return err
  79. }
  80. owner = secondResp.Owner
  81. return nil
  82. })
  83. if err != nil {
  84. return nil, err
  85. }
  86. }
  87. if owner == "" {
  88. glog.V(0).Infof("find lock %s moved to %v: %v", req.Name, movedTo, err)
  89. return nil, status.Error(codes.NotFound, fmt.Sprintf("lock %s not found", req.Name))
  90. }
  91. if err != nil {
  92. return nil, status.Error(codes.Internal, err.Error())
  93. }
  94. return &filer_pb.FindLockOwnerResponse{
  95. Owner: owner,
  96. }, nil
  97. }
  98. // TransferLocks is a grpc handler to handle FilerServer's TransferLocksRequest
  99. func (fs *FilerServer) TransferLocks(ctx context.Context, req *filer_pb.TransferLocksRequest) (*filer_pb.TransferLocksResponse, error) {
  100. for _, lock := range req.Locks {
  101. fs.filer.Dlm.InsertLock(lock.Name, lock.ExpiredAtNs, lock.RenewToken, lock.Owner)
  102. }
  103. return &filer_pb.TransferLocksResponse{}, nil
  104. }
  105. func (fs *FilerServer) OnDlmChangeSnapshot(snapshot []pb.ServerAddress) {
  106. locks := fs.filer.Dlm.SelectNotOwnedLocks(snapshot)
  107. if len(locks) == 0 {
  108. return
  109. }
  110. for _, lock := range locks {
  111. server := fs.filer.Dlm.CalculateTargetServer(lock.Key, snapshot)
  112. if err := pb.WithFilerClient(false, 0, server, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
  113. _, err := client.TransferLocks(context.Background(), &filer_pb.TransferLocksRequest{
  114. Locks: []*filer_pb.Lock{
  115. {
  116. Name: lock.Key,
  117. RenewToken: lock.Token,
  118. ExpiredAtNs: lock.ExpiredAtNs,
  119. Owner: lock.Owner,
  120. },
  121. },
  122. })
  123. return err
  124. }); err != nil {
  125. // it may not be worth retrying, since the lock may have expired
  126. glog.Errorf("transfer lock %v to %v: %v", lock.Key, server, err)
  127. }
  128. }
  129. }