broker_grpc_server_discovery.go 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. package broker
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/chrislusf/seaweedfs/weed/cluster"
  6. "github.com/chrislusf/seaweedfs/weed/pb"
  7. "time"
  8. "github.com/chrislusf/seaweedfs/weed/glog"
  9. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  10. "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
  11. "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
  12. )
  13. /*
  14. Topic discovery:
  15. When pub or sub connects, it ask for the whole broker list, and run consistent hashing to find the broker.
  16. The broker will check peers whether it is already hosted by some other broker, if that broker is alive and acknowledged alive, redirect to it.
  17. Otherwise, just host the topic.
  18. So, if the pub or sub connects around the same time, they would connect to the same broker. Everyone is happy.
  19. If one of the pub or sub connects very late, and the system topo changed quite a bit with new servers added or old servers died, checking peers will help.
  20. */
  21. func (broker *MessageBroker) FindBroker(c context.Context, request *messaging_pb.FindBrokerRequest) (*messaging_pb.FindBrokerResponse, error) {
  22. t := &messaging_pb.FindBrokerResponse{}
  23. var peers []string
  24. targetTopicPartition := fmt.Sprintf(TopicPartitionFmt, request.Namespace, request.Topic, request.Parition)
  25. for _, filer := range broker.option.Filers {
  26. err := broker.withFilerClient(false, filer, func(client filer_pb.SeaweedFilerClient) error {
  27. resp, err := client.LocateBroker(context.Background(), &filer_pb.LocateBrokerRequest{
  28. Resource: targetTopicPartition,
  29. })
  30. if err != nil {
  31. return err
  32. }
  33. if resp.Found && len(resp.Resources) > 0 {
  34. t.Broker = resp.Resources[0].GrpcAddresses
  35. return nil
  36. }
  37. for _, b := range resp.Resources {
  38. peers = append(peers, b.GrpcAddresses)
  39. }
  40. return nil
  41. })
  42. if err != nil {
  43. return nil, err
  44. }
  45. }
  46. t.Broker = PickMember(peers, []byte(targetTopicPartition))
  47. return t, nil
  48. }
  49. func (broker *MessageBroker) checkFilers() {
  50. // contact a filer about masters
  51. var masters []pb.ServerAddress
  52. found := false
  53. for !found {
  54. for _, filer := range broker.option.Filers {
  55. err := broker.withFilerClient(false, filer, func(client filer_pb.SeaweedFilerClient) error {
  56. resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
  57. if err != nil {
  58. return err
  59. }
  60. for _, m := range resp.Masters {
  61. masters = append(masters, pb.ServerAddress(m))
  62. }
  63. return nil
  64. })
  65. if err == nil {
  66. found = true
  67. break
  68. }
  69. glog.V(0).Infof("failed to read masters from %+v: %v", broker.option.Filers, err)
  70. time.Sleep(time.Second)
  71. }
  72. }
  73. glog.V(0).Infof("received master list: %s", masters)
  74. // contact each masters for filers
  75. var filers []pb.ServerAddress
  76. found = false
  77. for !found {
  78. for _, master := range masters {
  79. err := broker.withMasterClient(false, master, func(client master_pb.SeaweedClient) error {
  80. resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
  81. ClientType: cluster.FilerType,
  82. })
  83. if err != nil {
  84. return err
  85. }
  86. for _, clusterNode := range resp.ClusterNodes {
  87. filers = append(filers, pb.ServerAddress(clusterNode.Address))
  88. }
  89. return nil
  90. })
  91. if err == nil {
  92. found = true
  93. break
  94. }
  95. glog.V(0).Infof("failed to list filers: %v", err)
  96. time.Sleep(time.Second)
  97. }
  98. }
  99. glog.V(0).Infof("received filer list: %s", filers)
  100. broker.option.Filers = filers
  101. }