raft_hashicorp.go 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. package weed_server
  2. // https://yusufs.medium.com/creating-distributed-kv-database-by-implementing-raft-consensus-using-golang-d0884eef2e28
  3. // https://github.com/Jille/raft-grpc-example/blob/cd5bcab0218f008e044fbeee4facdd01b06018ad/application.go#L18
  4. import (
  5. "fmt"
  6. transport "github.com/Jille/raft-grpc-transport"
  7. "github.com/armon/go-metrics"
  8. "github.com/armon/go-metrics/prometheus"
  9. "github.com/hashicorp/raft"
  10. boltdb "github.com/hashicorp/raft-boltdb/v2"
  11. "github.com/seaweedfs/seaweedfs/weed/glog"
  12. "github.com/seaweedfs/seaweedfs/weed/pb"
  13. "github.com/seaweedfs/seaweedfs/weed/stats"
  14. "google.golang.org/grpc"
  15. "math/rand"
  16. "os"
  17. "path"
  18. "path/filepath"
  19. "sort"
  20. "strings"
  21. "time"
  22. )
  23. const (
  24. ldbFile = "logs.dat"
  25. sdbFile = "stable.dat"
  26. updatePeersTimeout = 15 * time.Minute
  27. )
  28. func getPeerIdx(self pb.ServerAddress, mapPeers map[string]pb.ServerAddress) int {
  29. peers := make([]pb.ServerAddress, 0, len(mapPeers))
  30. for _, peer := range mapPeers {
  31. peers = append(peers, peer)
  32. }
  33. sort.Slice(peers, func(i, j int) bool {
  34. return strings.Compare(string(peers[i]), string(peers[j])) < 0
  35. })
  36. for i, peer := range peers {
  37. if string(peer) == string(self) {
  38. return i
  39. }
  40. }
  41. return -1
  42. }
  43. func (s *RaftServer) AddPeersConfiguration() (cfg raft.Configuration) {
  44. for _, peer := range s.peers {
  45. cfg.Servers = append(cfg.Servers, raft.Server{
  46. Suffrage: raft.Voter,
  47. ID: raft.ServerID(peer),
  48. Address: raft.ServerAddress(peer.ToGrpcAddress()),
  49. })
  50. }
  51. return cfg
  52. }
  53. func (s *RaftServer) UpdatePeers() {
  54. for {
  55. select {
  56. case isLeader := <-s.RaftHashicorp.LeaderCh():
  57. if isLeader {
  58. peerLeader := string(s.serverAddr)
  59. existsPeerName := make(map[string]bool)
  60. for _, server := range s.RaftHashicorp.GetConfiguration().Configuration().Servers {
  61. if string(server.ID) == peerLeader {
  62. continue
  63. }
  64. existsPeerName[string(server.ID)] = true
  65. }
  66. for _, peer := range s.peers {
  67. peerName := string(peer)
  68. if peerName == peerLeader || existsPeerName[peerName] {
  69. continue
  70. }
  71. glog.V(0).Infof("adding new peer: %s", peerName)
  72. s.RaftHashicorp.AddVoter(
  73. raft.ServerID(peerName), raft.ServerAddress(peer.ToGrpcAddress()), 0, 0)
  74. }
  75. for peer := range existsPeerName {
  76. if _, found := s.peers[peer]; !found {
  77. glog.V(0).Infof("removing old peer: %s", peer)
  78. s.RaftHashicorp.RemoveServer(raft.ServerID(peer), 0, 0)
  79. }
  80. }
  81. if _, found := s.peers[peerLeader]; !found {
  82. glog.V(0).Infof("removing old leader peer: %s", peerLeader)
  83. s.RaftHashicorp.RemoveServer(raft.ServerID(peerLeader), 0, 0)
  84. }
  85. }
  86. return
  87. case <-time.After(updatePeersTimeout):
  88. return
  89. }
  90. }
  91. }
  92. func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
  93. s := &RaftServer{
  94. peers: option.Peers,
  95. serverAddr: option.ServerAddr,
  96. dataDir: option.DataDir,
  97. topo: option.Topo,
  98. }
  99. c := raft.DefaultConfig()
  100. c.LocalID = raft.ServerID(s.serverAddr) // TODO maybee the IP:port address will change
  101. c.HeartbeatTimeout = time.Duration(float64(option.HeartbeatInterval) * (rand.Float64()*0.25 + 1))
  102. c.ElectionTimeout = option.ElectionTimeout
  103. if c.LeaderLeaseTimeout > c.HeartbeatTimeout {
  104. c.LeaderLeaseTimeout = c.HeartbeatTimeout
  105. }
  106. if glog.V(4) {
  107. c.LogLevel = "Debug"
  108. } else if glog.V(2) {
  109. c.LogLevel = "Info"
  110. } else if glog.V(1) {
  111. c.LogLevel = "Warn"
  112. } else if glog.V(0) {
  113. c.LogLevel = "Error"
  114. }
  115. if option.RaftBootstrap {
  116. os.RemoveAll(path.Join(s.dataDir, ldbFile))
  117. os.RemoveAll(path.Join(s.dataDir, sdbFile))
  118. os.RemoveAll(path.Join(s.dataDir, "snapshots"))
  119. }
  120. if err := os.MkdirAll(path.Join(s.dataDir, "snapshots"), os.ModePerm); err != nil {
  121. return nil, err
  122. }
  123. baseDir := s.dataDir
  124. ldb, err := boltdb.NewBoltStore(filepath.Join(baseDir, ldbFile))
  125. if err != nil {
  126. return nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "logs.dat"), err)
  127. }
  128. sdb, err := boltdb.NewBoltStore(filepath.Join(baseDir, sdbFile))
  129. if err != nil {
  130. return nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "stable.dat"), err)
  131. }
  132. fss, err := raft.NewFileSnapshotStore(baseDir, 3, os.Stderr)
  133. if err != nil {
  134. return nil, fmt.Errorf(`raft.NewFileSnapshotStore(%q, ...): %v`, baseDir, err)
  135. }
  136. s.TransportManager = transport.New(raft.ServerAddress(s.serverAddr), []grpc.DialOption{option.GrpcDialOption})
  137. stateMachine := StateMachine{topo: option.Topo}
  138. s.RaftHashicorp, err = raft.NewRaft(c, &stateMachine, ldb, sdb, fss, s.TransportManager.Transport())
  139. if err != nil {
  140. return nil, fmt.Errorf("raft.NewRaft: %v", err)
  141. }
  142. if option.RaftBootstrap || len(s.RaftHashicorp.GetConfiguration().Configuration().Servers) == 0 {
  143. cfg := s.AddPeersConfiguration()
  144. // Need to get lock, in case all servers do this at the same time.
  145. peerIdx := getPeerIdx(s.serverAddr, s.peers)
  146. timeSleep := time.Duration(float64(c.LeaderLeaseTimeout) * (rand.Float64()*0.25 + 1) * float64(peerIdx))
  147. glog.V(0).Infof("Bootstrapping idx: %d sleep: %v new cluster: %+v", peerIdx, timeSleep, cfg)
  148. time.Sleep(timeSleep)
  149. f := s.RaftHashicorp.BootstrapCluster(cfg)
  150. if err := f.Error(); err != nil {
  151. return nil, fmt.Errorf("raft.Raft.BootstrapCluster: %v", err)
  152. }
  153. } else {
  154. go s.UpdatePeers()
  155. }
  156. ticker := time.NewTicker(c.HeartbeatTimeout * 10)
  157. if glog.V(4) {
  158. go func() {
  159. for {
  160. select {
  161. case <-ticker.C:
  162. cfuture := s.RaftHashicorp.GetConfiguration()
  163. if err = cfuture.Error(); err != nil {
  164. glog.Fatalf("error getting config: %s", err)
  165. }
  166. configuration := cfuture.Configuration()
  167. glog.V(4).Infof("Showing peers known by %s:\n%+v", s.RaftHashicorp.String(), configuration.Servers)
  168. }
  169. }
  170. }()
  171. }
  172. // Configure a prometheus sink as the raft metrics sink
  173. if sink, err := prometheus.NewPrometheusSinkFrom(prometheus.PrometheusOpts{
  174. Registerer: stats.Gather,
  175. }); err != nil {
  176. return nil, fmt.Errorf("NewPrometheusSink: %v", err)
  177. } else {
  178. metricsConf := metrics.DefaultConfig(stats.Namespace)
  179. metricsConf.EnableRuntimeMetrics = false
  180. if _, err = metrics.NewGlobal(metricsConf, sink); err != nil {
  181. return nil, fmt.Errorf("metrics.NewGlobal: %v", err)
  182. }
  183. }
  184. return s, nil
  185. }