peer_server.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800
  1. package server
  2. import (
  3. "encoding/json"
  4. "fmt"
  5. "math/rand"
  6. "net/http"
  7. "net/url"
  8. "sort"
  9. "strings"
  10. "sync"
  11. "time"
  12. "github.com/coreos/etcd/third_party/github.com/goraft/raft"
  13. "github.com/coreos/etcd/third_party/github.com/gorilla/mux"
  14. "github.com/coreos/etcd/discovery"
  15. etcdErr "github.com/coreos/etcd/error"
  16. "github.com/coreos/etcd/log"
  17. "github.com/coreos/etcd/metrics"
  18. "github.com/coreos/etcd/pkg/btrfs"
  19. "github.com/coreos/etcd/store"
  20. )
  21. const (
  22. // ThresholdMonitorTimeout is the time between log notifications that the
  23. // Raft heartbeat is too close to the election timeout.
  24. ThresholdMonitorTimeout = 5 * time.Second
  25. // ActiveMonitorTimeout is the time between checks on the active size of
  26. // the cluster. If the active size is bigger than the actual size then
  27. // etcd attempts to demote to bring it to the correct number.
  28. ActiveMonitorTimeout = 1 * time.Second
  29. // PeerActivityMonitorTimeout is the time between checks for dead nodes in
  30. // the cluster.
  31. PeerActivityMonitorTimeout = 1 * time.Second
  32. )
  33. type PeerServerConfig struct {
  34. Name string
  35. Scheme string
  36. URL string
  37. SnapshotCount int
  38. RetryTimes int
  39. RetryInterval float64
  40. }
  41. type PeerServer struct {
  42. Config PeerServerConfig
  43. client *Client
  44. clusterConfig *ClusterConfig
  45. raftServer raft.Server
  46. server *Server
  47. joinIndex uint64
  48. followersStats *raftFollowersStats
  49. serverStats *raftServerStats
  50. registry *Registry
  51. store store.Store
  52. snapConf *snapshotConf
  53. stopNotify chan bool
  54. removeNotify chan bool
  55. started bool
  56. closeChan chan bool
  57. routineGroup sync.WaitGroup
  58. timeoutThresholdChan chan interface{}
  59. metrics *metrics.Bucket
  60. sync.Mutex
  61. }
  62. // TODO: find a good policy to do snapshot
  63. type snapshotConf struct {
  64. // Etcd will check if snapshot is need every checkingInterval
  65. checkingInterval time.Duration
  66. // The index when the last snapshot happened
  67. lastIndex uint64
  68. // If the incremental number of index since the last snapshot
  69. // exceeds the snapshot Threshold, etcd will do a snapshot
  70. snapshotThr uint64
  71. }
  72. func NewPeerServer(psConfig PeerServerConfig, client *Client, registry *Registry, store store.Store, mb *metrics.Bucket, followersStats *raftFollowersStats, serverStats *raftServerStats) *PeerServer {
  73. s := &PeerServer{
  74. Config: psConfig,
  75. client: client,
  76. clusterConfig: NewClusterConfig(),
  77. registry: registry,
  78. store: store,
  79. followersStats: followersStats,
  80. serverStats: serverStats,
  81. timeoutThresholdChan: make(chan interface{}, 1),
  82. metrics: mb,
  83. }
  84. return s
  85. }
  86. func (s *PeerServer) SetRaftServer(raftServer raft.Server) {
  87. s.snapConf = &snapshotConf{
  88. checkingInterval: time.Second * 3,
  89. // this is not accurate, we will update raft to provide an api
  90. lastIndex: raftServer.CommitIndex(),
  91. snapshotThr: uint64(s.Config.SnapshotCount),
  92. }
  93. raftServer.AddEventListener(raft.StateChangeEventType, s.raftEventLogger)
  94. raftServer.AddEventListener(raft.LeaderChangeEventType, s.raftEventLogger)
  95. raftServer.AddEventListener(raft.TermChangeEventType, s.raftEventLogger)
  96. raftServer.AddEventListener(raft.AddPeerEventType, s.raftEventLogger)
  97. raftServer.AddEventListener(raft.RemovePeerEventType, s.raftEventLogger)
  98. raftServer.AddEventListener(raft.HeartbeatIntervalEventType, s.raftEventLogger)
  99. raftServer.AddEventListener(raft.ElectionTimeoutThresholdEventType, s.raftEventLogger)
  100. raftServer.AddEventListener(raft.HeartbeatEventType, s.recordMetricEvent)
  101. s.raftServer = raftServer
  102. }
  103. // ClusterConfig retrieves the current cluster configuration.
  104. func (s *PeerServer) ClusterConfig() *ClusterConfig {
  105. return s.clusterConfig
  106. }
  107. // SetClusterConfig updates the current cluster configuration.
  108. // Adjusting the active size will cause the PeerServer to demote peers or
  109. // promote standbys to match the new size.
  110. func (s *PeerServer) SetClusterConfig(c *ClusterConfig) {
  111. // Set minimums.
  112. if c.ActiveSize < MinActiveSize {
  113. c.ActiveSize = MinActiveSize
  114. }
  115. if c.PromoteDelay < MinPromoteDelay {
  116. c.PromoteDelay = MinPromoteDelay
  117. }
  118. s.clusterConfig = c
  119. }
  120. // Try all possible ways to find clusters to join
  121. // Include log data in -data-dir, -discovery and -peers
  122. //
  123. // Peer discovery follows this order:
  124. // 1. previous peers in -data-dir
  125. // 2. -discovery
  126. // 3. -peers
  127. //
  128. // TODO(yichengq): RaftServer should be started as late as possible.
  129. // Current implementation to start it is not that good,
  130. // and should be refactored later.
  131. func (s *PeerServer) findCluster(discoverURL string, peers []string) {
  132. name := s.Config.Name
  133. isNewNode := s.raftServer.IsLogEmpty()
  134. // Try its best to find possible peers, and connect with them.
  135. if !isNewNode {
  136. // It is not allowed to join the cluster with existing peer address
  137. // This prevents old node joining with different name by mistake.
  138. if !s.checkPeerAddressNonconflict() {
  139. log.Fatalf("%v is not allowed to join the cluster with existing URL %v", s.Config.Name, s.Config.URL)
  140. }
  141. // Take old nodes into account.
  142. allPeers := s.getKnownPeers()
  143. // Discover registered peers.
  144. // TODO(yichengq): It may mess up discoverURL if this is
  145. // set wrong by mistake. This may need to refactor discovery
  146. // module. Fix it later.
  147. if discoverURL != "" {
  148. discoverPeers, _ := s.handleDiscovery(discoverURL)
  149. allPeers = append(allPeers, discoverPeers...)
  150. }
  151. allPeers = append(allPeers, peers...)
  152. allPeers = s.removeSelfFromList(allPeers)
  153. // If there is possible peer list, use it to find cluster.
  154. if len(allPeers) > 0 {
  155. // TODO(yichengq): joinCluster may fail if there's no leader for
  156. // current cluster. It should wait if the cluster is under
  157. // leader election, or the node with changed IP cannot join
  158. // the cluster then.
  159. if err := s.startAsFollower(allPeers, 1); err == nil {
  160. log.Debugf("%s joins to the previous cluster %v", name, allPeers)
  161. return
  162. }
  163. log.Warnf("%s cannot connect to previous cluster %v", name, allPeers)
  164. }
  165. // TODO(yichengq): Think about the action that should be done
  166. // if it cannot connect any of the previous known node.
  167. s.raftServer.Start()
  168. log.Debugf("%s is restarting the cluster %v", name, allPeers)
  169. return
  170. }
  171. // Attempt cluster discovery
  172. if discoverURL != "" {
  173. discoverPeers, discoverErr := s.handleDiscovery(discoverURL)
  174. // It is registered in discover url
  175. if discoverErr == nil {
  176. // start as a leader in a new cluster
  177. if len(discoverPeers) == 0 {
  178. log.Debugf("%s is starting a new cluster via discover service", name)
  179. s.startAsLeader()
  180. } else {
  181. log.Debugf("%s is joining a cluster %v via discover service", name, discoverPeers)
  182. if err := s.startAsFollower(discoverPeers, s.Config.RetryTimes); err != nil {
  183. log.Fatal(err)
  184. }
  185. }
  186. return
  187. }
  188. log.Warnf("%s failed to connect discovery service[%v]: %v", name, discoverURL, discoverErr)
  189. if len(peers) == 0 {
  190. log.Fatalf("%s, the new leader, must register itself to discovery service as required", name)
  191. }
  192. }
  193. if len(peers) > 0 {
  194. if err := s.startAsFollower(peers, s.Config.RetryTimes); err != nil {
  195. log.Fatalf("%s cannot connect to existing cluster %v", name, peers)
  196. }
  197. return
  198. }
  199. log.Infof("%s is starting a new cluster.", s.Config.Name)
  200. s.startAsLeader()
  201. return
  202. }
  203. // Start starts the raft server.
  204. // The function assumes that join has been accepted successfully.
  205. func (s *PeerServer) Start(snapshot bool, discoverURL string, peers []string) error {
  206. s.Lock()
  207. defer s.Unlock()
  208. if s.started {
  209. return nil
  210. }
  211. s.started = true
  212. // LoadSnapshot
  213. if snapshot {
  214. err := s.raftServer.LoadSnapshot()
  215. if err == nil {
  216. log.Debugf("%s finished load snapshot", s.Config.Name)
  217. } else {
  218. log.Debug(err)
  219. }
  220. }
  221. s.raftServer.Init()
  222. // Set NOCOW for data directory in btrfs
  223. if btrfs.IsBtrfs(s.raftServer.LogPath()) {
  224. if err := btrfs.SetNOCOWFile(s.raftServer.LogPath()); err != nil {
  225. log.Warnf("Failed setting NOCOW: %v", err)
  226. }
  227. }
  228. s.findCluster(discoverURL, peers)
  229. s.stopNotify = make(chan bool)
  230. s.removeNotify = make(chan bool)
  231. s.closeChan = make(chan bool)
  232. s.startRoutine(s.monitorSync)
  233. s.startRoutine(s.monitorTimeoutThreshold)
  234. s.startRoutine(s.monitorActiveSize)
  235. s.startRoutine(s.monitorPeerActivity)
  236. // open the snapshot
  237. if snapshot {
  238. s.startRoutine(s.monitorSnapshot)
  239. }
  240. return nil
  241. }
  242. // Stop stops the server gracefully.
  243. func (s *PeerServer) Stop() {
  244. s.Lock()
  245. defer s.Unlock()
  246. if !s.started {
  247. return
  248. }
  249. s.started = false
  250. close(s.closeChan)
  251. // TODO(yichengq): it should also call async stop for raft server,
  252. // but this functionality has not been implemented.
  253. s.raftServer.Stop()
  254. s.routineGroup.Wait()
  255. close(s.stopNotify)
  256. }
  257. // asyncRemove stops the server in peer mode.
  258. // It is called to stop the server internally when it has been removed
  259. // from the cluster.
  260. // The function triggers the stop action first to notice server that it
  261. // should not continue, and wait for its stop in separate goroutine because
  262. // the caller should also exit.
  263. func (s *PeerServer) asyncRemove() {
  264. s.Lock()
  265. if !s.started {
  266. s.Unlock()
  267. return
  268. }
  269. s.started = false
  270. close(s.closeChan)
  271. // TODO(yichengq): it should also call async stop for raft server,
  272. // but this functionality has not been implemented.
  273. go func() {
  274. s.raftServer.Stop()
  275. s.routineGroup.Wait()
  276. close(s.removeNotify)
  277. s.Unlock()
  278. }()
  279. }
  280. // StopNotify notifies the server is stopped.
  281. func (s *PeerServer) StopNotify() <-chan bool {
  282. return s.stopNotify
  283. }
  284. // RemoveNotify notifies the server is removed from peer mode due to
  285. // removal from the cluster.
  286. func (s *PeerServer) RemoveNotify() <-chan bool {
  287. return s.removeNotify
  288. }
  289. func (s *PeerServer) HTTPHandler() http.Handler {
  290. router := mux.NewRouter()
  291. // internal commands
  292. router.HandleFunc("/name", s.NameHttpHandler)
  293. router.HandleFunc("/version", s.VersionHttpHandler)
  294. router.HandleFunc("/version/{version:[0-9]+}/check", s.VersionCheckHttpHandler)
  295. router.HandleFunc("/upgrade", s.UpgradeHttpHandler)
  296. router.HandleFunc("/join", s.JoinHttpHandler)
  297. router.HandleFunc("/remove/{name:.+}", s.RemoveHttpHandler)
  298. router.HandleFunc("/vote", s.VoteHttpHandler)
  299. router.HandleFunc("/log", s.GetLogHttpHandler)
  300. router.HandleFunc("/log/append", s.AppendEntriesHttpHandler)
  301. router.HandleFunc("/snapshot", s.SnapshotHttpHandler)
  302. router.HandleFunc("/snapshotRecovery", s.SnapshotRecoveryHttpHandler)
  303. router.HandleFunc("/etcdURL", s.EtcdURLHttpHandler)
  304. router.HandleFunc("/v2/admin/config", s.getClusterConfigHttpHandler).Methods("GET")
  305. router.HandleFunc("/v2/admin/config", s.setClusterConfigHttpHandler).Methods("PUT")
  306. router.HandleFunc("/v2/admin/machines", s.getMachinesHttpHandler).Methods("GET")
  307. router.HandleFunc("/v2/admin/machines/{name}", s.getMachineHttpHandler).Methods("GET")
  308. return router
  309. }
  310. // Retrieves the underlying Raft server.
  311. func (s *PeerServer) RaftServer() raft.Server {
  312. return s.raftServer
  313. }
  314. // Associates the client server with the peer server.
  315. func (s *PeerServer) SetServer(server *Server) {
  316. s.server = server
  317. }
  318. func (s *PeerServer) startAsLeader() {
  319. s.raftServer.Start()
  320. // leader need to join self as a peer
  321. for {
  322. c := &JoinCommand{
  323. MinVersion: store.MinVersion(),
  324. MaxVersion: store.MaxVersion(),
  325. Name: s.raftServer.Name(),
  326. RaftURL: s.Config.URL,
  327. EtcdURL: s.server.URL(),
  328. }
  329. if _, err := s.raftServer.Do(c); err == nil {
  330. break
  331. }
  332. }
  333. log.Debugf("%s start as a leader", s.Config.Name)
  334. }
  335. func (s *PeerServer) startAsFollower(cluster []string, retryTimes int) error {
  336. // start as a follower in a existing cluster
  337. for i := 0; ; i++ {
  338. ok := s.joinCluster(cluster)
  339. if ok {
  340. break
  341. }
  342. if i == retryTimes-1 {
  343. return fmt.Errorf("Cannot join the cluster via given peers after %x retries", s.Config.RetryTimes)
  344. }
  345. log.Warnf("%v is unable to join the cluster using any of the peers %v at %dth time. Retrying in %.1f seconds", s.Config.Name, cluster, i, s.Config.RetryInterval)
  346. time.Sleep(time.Second * time.Duration(s.Config.RetryInterval))
  347. }
  348. s.raftServer.Start()
  349. return nil
  350. }
  351. // Upgradable checks whether all peers in a cluster support an upgrade to the next store version.
  352. func (s *PeerServer) Upgradable() error {
  353. nextVersion := s.store.Version() + 1
  354. for _, peerURL := range s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name) {
  355. u, err := url.Parse(peerURL)
  356. if err != nil {
  357. return fmt.Errorf("PeerServer: Cannot parse URL: '%s' (%s)", peerURL, err)
  358. }
  359. url := (&url.URL{Host: u.Host, Scheme: s.Config.Scheme}).String()
  360. ok, err := s.client.CheckVersion(url, nextVersion)
  361. if err != nil {
  362. return err
  363. }
  364. if !ok {
  365. return fmt.Errorf("PeerServer: Version %d is not compatible with peer: %s", nextVersion, u.Host)
  366. }
  367. }
  368. return nil
  369. }
  370. // checkPeerAddressNonconflict checks whether the peer address has existed with different name.
  371. func (s *PeerServer) checkPeerAddressNonconflict() bool {
  372. // there exists the (name, peer address) pair
  373. if peerURL, ok := s.registry.PeerURL(s.Config.Name); ok {
  374. if peerURL == s.Config.URL {
  375. return true
  376. }
  377. }
  378. // check all existing peer addresses
  379. peerURLs := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  380. for _, peerURL := range peerURLs {
  381. if peerURL == s.Config.URL {
  382. return false
  383. }
  384. }
  385. return true
  386. }
  387. // Helper function to do discovery and return results in expected format
  388. func (s *PeerServer) handleDiscovery(discoverURL string) (peers []string, err error) {
  389. peers, err = discovery.Do(discoverURL, s.Config.Name, s.Config.URL, s.closeChan, s.startRoutine)
  390. // Warn about errors coming from discovery, this isn't fatal
  391. // since the user might have provided a peer list elsewhere,
  392. // or there is some log in data dir.
  393. if err != nil {
  394. log.Warnf("Discovery encountered an error: %v", err)
  395. return
  396. }
  397. for i := range peers {
  398. // Strip the scheme off of the peer if it has one
  399. // TODO(bp): clean this up!
  400. purl, err := url.Parse(peers[i])
  401. if err == nil {
  402. peers[i] = purl.Host
  403. }
  404. }
  405. log.Infof("Discovery fetched back peer list: %v", peers)
  406. return
  407. }
  408. // getKnownPeers gets the previous peers from log
  409. func (s *PeerServer) getKnownPeers() []string {
  410. peers := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  411. log.Infof("Peer URLs in log: %s / %s (%s)", s.raftServer.Leader(), s.Config.Name, strings.Join(peers, ","))
  412. for i := range peers {
  413. u, err := url.Parse(peers[i])
  414. if err != nil {
  415. log.Debug("getPrevPeers cannot parse url %v", peers[i])
  416. }
  417. peers[i] = u.Host
  418. }
  419. return peers
  420. }
  421. // removeSelfFromList removes url of the peerServer from the peer list
  422. func (s *PeerServer) removeSelfFromList(peers []string) []string {
  423. // Remove its own peer address from the peer list to join
  424. u, err := url.Parse(s.Config.URL)
  425. if err != nil {
  426. log.Fatalf("removeSelfFromList cannot parse peer address %v", s.Config.URL)
  427. }
  428. newPeers := make([]string, 0)
  429. for _, v := range peers {
  430. if v != u.Host {
  431. newPeers = append(newPeers, v)
  432. }
  433. }
  434. return newPeers
  435. }
  436. func (s *PeerServer) joinCluster(cluster []string) bool {
  437. for _, peer := range cluster {
  438. if len(peer) == 0 {
  439. continue
  440. }
  441. err := s.joinByPeer(s.raftServer, peer, s.Config.Scheme)
  442. if err == nil {
  443. log.Debugf("%s joined the cluster via peer %s", s.Config.Name, peer)
  444. return true
  445. }
  446. if _, ok := err.(etcdErr.Error); ok {
  447. log.Fatal(err)
  448. }
  449. log.Warnf("Attempt to join via %s failed: %s", peer, err)
  450. }
  451. return false
  452. }
  453. // Send join requests to peer.
  454. func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string) error {
  455. u := (&url.URL{Host: peer, Scheme: scheme}).String()
  456. // Our version must match the leaders version
  457. version, err := s.client.GetVersion(u)
  458. if err != nil {
  459. return fmt.Errorf("fail checking join version: %v", err)
  460. }
  461. if version < store.MinVersion() || version > store.MaxVersion() {
  462. return fmt.Errorf("fail passing version compatibility(%d-%d) using %d", store.MinVersion(), store.MaxVersion(), version)
  463. }
  464. // Fetch current peer list
  465. machines, err := s.client.GetMachines(u)
  466. if err != nil {
  467. return fmt.Errorf("fail getting machine messages: %v", err)
  468. }
  469. exist := false
  470. for _, machine := range machines {
  471. if machine.Name == server.Name() {
  472. exist = true
  473. break
  474. }
  475. }
  476. // Fetch cluster config to see whether exists some place.
  477. clusterConfig, err := s.client.GetClusterConfig(u)
  478. if err != nil {
  479. return fmt.Errorf("fail getting cluster config: %v", err)
  480. }
  481. if !exist && clusterConfig.ActiveSize <= len(machines) {
  482. return fmt.Errorf("stop joining because the cluster is full with %d nodes", len(machines))
  483. }
  484. joinIndex, err := s.client.AddMachine(u,
  485. &JoinCommand{
  486. MinVersion: store.MinVersion(),
  487. MaxVersion: store.MaxVersion(),
  488. Name: server.Name(),
  489. RaftURL: s.Config.URL,
  490. EtcdURL: s.server.URL(),
  491. })
  492. if err != nil {
  493. return fmt.Errorf("fail on join request: %v", err)
  494. }
  495. s.joinIndex = joinIndex
  496. return nil
  497. }
  498. func (s *PeerServer) Stats() []byte {
  499. s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.startTime).String()
  500. // TODO: register state listener to raft to change this field
  501. // rather than compare the state each time Stats() is called.
  502. if s.RaftServer().State() == raft.Leader {
  503. s.serverStats.LeaderInfo.Name = s.RaftServer().Name()
  504. }
  505. queue := s.serverStats.sendRateQueue
  506. s.serverStats.SendingPkgRate, s.serverStats.SendingBandwidthRate = queue.Rate()
  507. queue = s.serverStats.recvRateQueue
  508. s.serverStats.RecvingPkgRate, s.serverStats.RecvingBandwidthRate = queue.Rate()
  509. b, _ := json.Marshal(s.serverStats)
  510. return b
  511. }
  512. func (s *PeerServer) PeerStats() []byte {
  513. if s.raftServer.State() == raft.Leader {
  514. b, _ := json.Marshal(s.followersStats)
  515. return b
  516. }
  517. return nil
  518. }
  519. // raftEventLogger converts events from the Raft server into log messages.
  520. func (s *PeerServer) raftEventLogger(event raft.Event) {
  521. value := event.Value()
  522. prevValue := event.PrevValue()
  523. if value == nil {
  524. value = "<nil>"
  525. }
  526. if prevValue == nil {
  527. prevValue = "<nil>"
  528. }
  529. switch event.Type() {
  530. case raft.StateChangeEventType:
  531. log.Infof("%s: state changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  532. case raft.TermChangeEventType:
  533. log.Infof("%s: term #%v started.", s.Config.Name, value)
  534. case raft.LeaderChangeEventType:
  535. log.Infof("%s: leader changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  536. case raft.AddPeerEventType:
  537. log.Infof("%s: peer added: '%v'", s.Config.Name, value)
  538. case raft.RemovePeerEventType:
  539. log.Infof("%s: peer removed: '%v'", s.Config.Name, value)
  540. case raft.HeartbeatIntervalEventType:
  541. var name = "<unknown>"
  542. if peer, ok := value.(*raft.Peer); ok {
  543. name = peer.Name
  544. }
  545. log.Infof("%s: warning: heartbeat timed out: '%v'", s.Config.Name, name)
  546. case raft.ElectionTimeoutThresholdEventType:
  547. select {
  548. case s.timeoutThresholdChan <- value:
  549. default:
  550. }
  551. }
  552. }
  553. func (s *PeerServer) recordMetricEvent(event raft.Event) {
  554. name := fmt.Sprintf("raft.event.%s", event.Type())
  555. value := event.Value().(time.Duration)
  556. (*s.metrics).Timer(name).Update(value)
  557. }
  558. // logSnapshot logs about the snapshot that was taken.
  559. func (s *PeerServer) logSnapshot(err error, currentIndex, count uint64) {
  560. info := fmt.Sprintf("%s: snapshot of %d events at index %d", s.Config.Name, count, currentIndex)
  561. if err != nil {
  562. log.Infof("%s attempted and failed: %v", info, err)
  563. } else {
  564. log.Infof("%s completed", info)
  565. }
  566. }
  567. func (s *PeerServer) startRoutine(f func()) {
  568. s.routineGroup.Add(1)
  569. go func() {
  570. defer s.routineGroup.Done()
  571. f()
  572. }()
  573. }
  574. func (s *PeerServer) monitorSnapshot() {
  575. for {
  576. timer := time.NewTimer(s.snapConf.checkingInterval)
  577. defer timer.Stop()
  578. select {
  579. case <-s.closeChan:
  580. return
  581. case <-timer.C:
  582. }
  583. currentIndex := s.RaftServer().CommitIndex()
  584. count := currentIndex - s.snapConf.lastIndex
  585. if uint64(count) > s.snapConf.snapshotThr {
  586. err := s.raftServer.TakeSnapshot()
  587. s.logSnapshot(err, currentIndex, count)
  588. s.snapConf.lastIndex = currentIndex
  589. }
  590. }
  591. }
  592. func (s *PeerServer) monitorSync() {
  593. ticker := time.NewTicker(time.Millisecond * 500)
  594. defer ticker.Stop()
  595. for {
  596. select {
  597. case <-s.closeChan:
  598. return
  599. case now := <-ticker.C:
  600. if s.raftServer.State() == raft.Leader {
  601. s.raftServer.Do(s.store.CommandFactory().CreateSyncCommand(now))
  602. }
  603. }
  604. }
  605. }
  606. // monitorTimeoutThreshold groups timeout threshold events together and prints
  607. // them as a single log line.
  608. func (s *PeerServer) monitorTimeoutThreshold() {
  609. for {
  610. select {
  611. case <-s.closeChan:
  612. return
  613. case value := <-s.timeoutThresholdChan:
  614. log.Infof("%s: warning: heartbeat near election timeout: %v", s.Config.Name, value)
  615. }
  616. timer := time.NewTimer(ThresholdMonitorTimeout)
  617. defer timer.Stop()
  618. select {
  619. case <-s.closeChan:
  620. return
  621. case <-timer.C:
  622. }
  623. }
  624. }
  625. // monitorActiveSize has the leader periodically check the status of cluster
  626. // nodes and swaps them out for standbys as needed.
  627. func (s *PeerServer) monitorActiveSize() {
  628. for {
  629. timer := time.NewTimer(ActiveMonitorTimeout)
  630. defer timer.Stop()
  631. select {
  632. case <-s.closeChan:
  633. return
  634. case <-timer.C:
  635. }
  636. // Ignore while this peer is not a leader.
  637. if s.raftServer.State() != raft.Leader {
  638. continue
  639. }
  640. // Retrieve target active size and actual active size.
  641. activeSize := s.ClusterConfig().ActiveSize
  642. peers := s.registry.Names()
  643. peerCount := s.registry.Count()
  644. if index := sort.SearchStrings(peers, s.Config.Name); index < len(peers) && peers[index] == s.Config.Name {
  645. peers = append(peers[:index], peers[index+1:]...)
  646. }
  647. // If we have more active nodes than we should then remove.
  648. if peerCount > activeSize {
  649. peer := peers[rand.Intn(len(peers))]
  650. log.Infof("%s: removing: %v", s.Config.Name, peer)
  651. if _, err := s.raftServer.Do(&RemoveCommand{Name: peer}); err != nil {
  652. log.Infof("%s: warning: remove error: %v", s.Config.Name, err)
  653. }
  654. continue
  655. }
  656. }
  657. }
  658. // monitorPeerActivity has the leader periodically for dead nodes and demotes them.
  659. func (s *PeerServer) monitorPeerActivity() {
  660. for {
  661. timer := time.NewTimer(PeerActivityMonitorTimeout)
  662. defer timer.Stop()
  663. select {
  664. case <-s.closeChan:
  665. return
  666. case <-timer.C:
  667. }
  668. // Ignore while this peer is not a leader.
  669. if s.raftServer.State() != raft.Leader {
  670. continue
  671. }
  672. // Check last activity for all peers.
  673. now := time.Now()
  674. promoteDelay := time.Duration(s.ClusterConfig().PromoteDelay) * time.Second
  675. peers := s.raftServer.Peers()
  676. for _, peer := range peers {
  677. // If the last response from the peer is longer than the promote delay
  678. // then automatically demote the peer.
  679. if !peer.LastActivity().IsZero() && now.Sub(peer.LastActivity()) > promoteDelay {
  680. log.Infof("%s: removing node: %v; last activity %v ago", s.Config.Name, peer.Name, now.Sub(peer.LastActivity()))
  681. if _, err := s.raftServer.Do(&RemoveCommand{Name: peer.Name}); err != nil {
  682. log.Infof("%s: warning: autodemotion error: %v", s.Config.Name, err)
  683. }
  684. continue
  685. }
  686. }
  687. }
  688. }