peer_server.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. package server
  2. import (
  3. "encoding/json"
  4. "fmt"
  5. "math/rand"
  6. "net/http"
  7. "net/url"
  8. "sort"
  9. "strings"
  10. "sync"
  11. "time"
  12. "github.com/coreos/etcd/third_party/github.com/goraft/raft"
  13. "github.com/coreos/etcd/third_party/github.com/gorilla/mux"
  14. "github.com/coreos/etcd/discovery"
  15. etcdErr "github.com/coreos/etcd/error"
  16. "github.com/coreos/etcd/log"
  17. "github.com/coreos/etcd/metrics"
  18. "github.com/coreos/etcd/pkg/btrfs"
  19. "github.com/coreos/etcd/store"
  20. )
  21. const (
  22. // ThresholdMonitorTimeout is the time between log notifications that the
  23. // Raft heartbeat is too close to the election timeout.
  24. ThresholdMonitorTimeout = 5 * time.Second
  25. // ActiveMonitorTimeout is the time between checks on the active size of
  26. // the cluster. If the active size is bigger than the actual size then
  27. // etcd attempts to demote to bring it to the correct number.
  28. ActiveMonitorTimeout = 1 * time.Second
  29. // PeerActivityMonitorTimeout is the time between checks for dead nodes in
  30. // the cluster.
  31. PeerActivityMonitorTimeout = 1 * time.Second
  32. )
  33. type PeerServerConfig struct {
  34. Name string
  35. Scheme string
  36. URL string
  37. SnapshotCount int
  38. RetryTimes int
  39. RetryInterval float64
  40. }
  41. type PeerServer struct {
  42. Config PeerServerConfig
  43. client *Client
  44. clusterConfig *ClusterConfig
  45. raftServer raft.Server
  46. server *Server
  47. joinIndex uint64
  48. followersStats *raftFollowersStats
  49. serverStats *raftServerStats
  50. registry *Registry
  51. store store.Store
  52. snapConf *snapshotConf
  53. closeChan chan bool
  54. routineGroup sync.WaitGroup
  55. timeoutThresholdChan chan interface{}
  56. metrics *metrics.Bucket
  57. sync.Mutex
  58. }
  59. // TODO: find a good policy to do snapshot
  60. type snapshotConf struct {
  61. // Etcd will check if snapshot is need every checkingInterval
  62. checkingInterval time.Duration
  63. // The index when the last snapshot happened
  64. lastIndex uint64
  65. // If the incremental number of index since the last snapshot
  66. // exceeds the snapshot Threshold, etcd will do a snapshot
  67. snapshotThr uint64
  68. }
  69. func NewPeerServer(psConfig PeerServerConfig, registry *Registry, store store.Store, mb *metrics.Bucket, followersStats *raftFollowersStats, serverStats *raftServerStats) *PeerServer {
  70. s := &PeerServer{
  71. Config: psConfig,
  72. clusterConfig: NewClusterConfig(),
  73. registry: registry,
  74. store: store,
  75. followersStats: followersStats,
  76. serverStats: serverStats,
  77. timeoutThresholdChan: make(chan interface{}, 1),
  78. metrics: mb,
  79. }
  80. return s
  81. }
  82. func (s *PeerServer) SetRaftServer(raftServer raft.Server) {
  83. s.snapConf = &snapshotConf{
  84. checkingInterval: time.Second * 3,
  85. // this is not accurate, we will update raft to provide an api
  86. lastIndex: raftServer.CommitIndex(),
  87. snapshotThr: uint64(s.Config.SnapshotCount),
  88. }
  89. raftServer.AddEventListener(raft.StateChangeEventType, s.raftEventLogger)
  90. raftServer.AddEventListener(raft.LeaderChangeEventType, s.raftEventLogger)
  91. raftServer.AddEventListener(raft.TermChangeEventType, s.raftEventLogger)
  92. raftServer.AddEventListener(raft.AddPeerEventType, s.raftEventLogger)
  93. raftServer.AddEventListener(raft.RemovePeerEventType, s.raftEventLogger)
  94. raftServer.AddEventListener(raft.HeartbeatIntervalEventType, s.raftEventLogger)
  95. raftServer.AddEventListener(raft.ElectionTimeoutThresholdEventType, s.raftEventLogger)
  96. raftServer.AddEventListener(raft.HeartbeatEventType, s.recordMetricEvent)
  97. s.raftServer = raftServer
  98. }
  99. // ClusterConfig retrieves the current cluster configuration.
  100. func (s *PeerServer) ClusterConfig() *ClusterConfig {
  101. return s.clusterConfig
  102. }
  103. // SetClusterConfig updates the current cluster configuration.
  104. // Adjusting the active size will cause the PeerServer to demote peers or
  105. // promote standbys to match the new size.
  106. func (s *PeerServer) SetClusterConfig(c *ClusterConfig) {
  107. // Set minimums.
  108. if c.ActiveSize < MinActiveSize {
  109. c.ActiveSize = MinActiveSize
  110. }
  111. if c.PromoteDelay < MinPromoteDelay {
  112. c.PromoteDelay = MinPromoteDelay
  113. }
  114. s.clusterConfig = c
  115. }
  116. // Try all possible ways to find clusters to join
  117. // Include log data in -data-dir, -discovery and -peers
  118. //
  119. // Peer discovery follows this order:
  120. // 1. previous peers in -data-dir
  121. // 2. -discovery
  122. // 3. -peers
  123. //
  124. // TODO(yichengq): RaftServer should be started as late as possible.
  125. // Current implementation to start it is not that good,
  126. // and should be refactored later.
  127. func (s *PeerServer) findCluster(discoverURL string, peers []string) {
  128. name := s.Config.Name
  129. isNewNode := s.raftServer.IsLogEmpty()
  130. // Try its best to find possible peers, and connect with them.
  131. if !isNewNode {
  132. // It is not allowed to join the cluster with existing peer address
  133. // This prevents old node joining with different name by mistake.
  134. if !s.checkPeerAddressNonconflict() {
  135. log.Fatalf("%v is not allowed to join the cluster with existing URL %v", s.Config.Name, s.Config.URL)
  136. }
  137. // Take old nodes into account.
  138. allPeers := s.getKnownPeers()
  139. // Discover registered peers.
  140. // TODO(yichengq): It may mess up discoverURL if this is
  141. // set wrong by mistake. This may need to refactor discovery
  142. // module. Fix it later.
  143. if discoverURL != "" {
  144. discoverPeers, _ := s.handleDiscovery(discoverURL)
  145. allPeers = append(allPeers, discoverPeers...)
  146. }
  147. allPeers = append(allPeers, peers...)
  148. allPeers = s.removeSelfFromList(allPeers)
  149. // If there is possible peer list, use it to find cluster.
  150. if len(allPeers) > 0 {
  151. // TODO(yichengq): joinCluster may fail if there's no leader for
  152. // current cluster. It should wait if the cluster is under
  153. // leader election, or the node with changed IP cannot join
  154. // the cluster then.
  155. if err := s.startAsFollower(allPeers, 1); err == nil {
  156. log.Debugf("%s joins to the previous cluster %v", name, allPeers)
  157. return
  158. }
  159. log.Warnf("%s cannot connect to previous cluster %v", name, allPeers)
  160. }
  161. // TODO(yichengq): Think about the action that should be done
  162. // if it cannot connect any of the previous known node.
  163. s.raftServer.Start()
  164. log.Debugf("%s is restarting the cluster %v", name, allPeers)
  165. return
  166. }
  167. // Attempt cluster discovery
  168. if discoverURL != "" {
  169. discoverPeers, discoverErr := s.handleDiscovery(discoverURL)
  170. // It is registered in discover url
  171. if discoverErr == nil {
  172. // start as a leader in a new cluster
  173. if len(discoverPeers) == 0 {
  174. log.Debugf("%s is starting a new cluster via discover service", name)
  175. s.startAsLeader()
  176. } else {
  177. log.Debugf("%s is joining a cluster %v via discover service", name, discoverPeers)
  178. if err := s.startAsFollower(discoverPeers, s.Config.RetryTimes); err != nil {
  179. log.Fatal(err)
  180. }
  181. }
  182. return
  183. }
  184. log.Warnf("%s failed to connect discovery service[%v]: %v", name, discoverURL, discoverErr)
  185. if len(peers) == 0 {
  186. log.Fatalf("%s, the new leader, must register itself to discovery service as required", name)
  187. }
  188. }
  189. if len(peers) > 0 {
  190. if err := s.startAsFollower(peers, s.Config.RetryTimes); err != nil {
  191. log.Fatalf("%s cannot connect to existing cluster %v", name, peers)
  192. }
  193. return
  194. }
  195. log.Infof("%s is starting a new cluster.", s.Config.Name)
  196. s.startAsLeader()
  197. return
  198. }
  199. // Start the raft server
  200. func (s *PeerServer) Start(snapshot bool, discoverURL string, peers []string) error {
  201. s.Lock()
  202. defer s.Unlock()
  203. // LoadSnapshot
  204. if snapshot {
  205. err := s.raftServer.LoadSnapshot()
  206. if err == nil {
  207. log.Debugf("%s finished load snapshot", s.Config.Name)
  208. } else {
  209. log.Debug(err)
  210. }
  211. }
  212. // TODO(yichengq): client for HTTP API usage could use transport other
  213. // than the raft one. The transport should have longer timeout because
  214. // it doesn't have fault tolerance of raft protocol.
  215. s.client = NewClient(s.raftServer.Transporter().(*transporter).transport)
  216. s.raftServer.Init()
  217. // Set NOCOW for data directory in btrfs
  218. if btrfs.IsBtrfs(s.raftServer.LogPath()) {
  219. if err := btrfs.SetNOCOWFile(s.raftServer.LogPath()); err != nil {
  220. log.Warnf("Failed setting NOCOW: %v", err)
  221. }
  222. }
  223. s.findCluster(discoverURL, peers)
  224. s.closeChan = make(chan bool)
  225. s.startRoutine(s.monitorSync)
  226. s.startRoutine(s.monitorTimeoutThreshold)
  227. s.startRoutine(s.monitorActiveSize)
  228. s.startRoutine(s.monitorPeerActivity)
  229. // open the snapshot
  230. if snapshot {
  231. s.startRoutine(s.monitorSnapshot)
  232. }
  233. return nil
  234. }
  235. func (s *PeerServer) Stop() {
  236. s.Lock()
  237. defer s.Unlock()
  238. if s.closeChan != nil {
  239. close(s.closeChan)
  240. }
  241. s.raftServer.Stop()
  242. s.routineGroup.Wait()
  243. s.closeChan = nil
  244. }
  245. func (s *PeerServer) HTTPHandler() http.Handler {
  246. router := mux.NewRouter()
  247. // internal commands
  248. router.HandleFunc("/name", s.NameHttpHandler)
  249. router.HandleFunc("/version", s.VersionHttpHandler)
  250. router.HandleFunc("/version/{version:[0-9]+}/check", s.VersionCheckHttpHandler)
  251. router.HandleFunc("/upgrade", s.UpgradeHttpHandler)
  252. router.HandleFunc("/join", s.JoinHttpHandler)
  253. router.HandleFunc("/remove/{name:.+}", s.RemoveHttpHandler)
  254. router.HandleFunc("/vote", s.VoteHttpHandler)
  255. router.HandleFunc("/log", s.GetLogHttpHandler)
  256. router.HandleFunc("/log/append", s.AppendEntriesHttpHandler)
  257. router.HandleFunc("/snapshot", s.SnapshotHttpHandler)
  258. router.HandleFunc("/snapshotRecovery", s.SnapshotRecoveryHttpHandler)
  259. router.HandleFunc("/etcdURL", s.EtcdURLHttpHandler)
  260. router.HandleFunc("/v2/admin/config", s.getClusterConfigHttpHandler).Methods("GET")
  261. router.HandleFunc("/v2/admin/config", s.setClusterConfigHttpHandler).Methods("PUT")
  262. router.HandleFunc("/v2/admin/machines", s.getMachinesHttpHandler).Methods("GET")
  263. router.HandleFunc("/v2/admin/machines/{name}", s.getMachineHttpHandler).Methods("GET")
  264. return router
  265. }
  266. // Retrieves the underlying Raft server.
  267. func (s *PeerServer) RaftServer() raft.Server {
  268. return s.raftServer
  269. }
  270. // Associates the client server with the peer server.
  271. func (s *PeerServer) SetServer(server *Server) {
  272. s.server = server
  273. }
  274. func (s *PeerServer) startAsLeader() {
  275. s.raftServer.Start()
  276. // leader need to join self as a peer
  277. for {
  278. c := &JoinCommand{
  279. MinVersion: store.MinVersion(),
  280. MaxVersion: store.MaxVersion(),
  281. Name: s.raftServer.Name(),
  282. RaftURL: s.Config.URL,
  283. EtcdURL: s.server.URL(),
  284. }
  285. if _, err := s.raftServer.Do(c); err == nil {
  286. break
  287. }
  288. }
  289. log.Debugf("%s start as a leader", s.Config.Name)
  290. }
  291. func (s *PeerServer) startAsFollower(cluster []string, retryTimes int) error {
  292. // start as a follower in a existing cluster
  293. for i := 0; ; i++ {
  294. ok := s.joinCluster(cluster)
  295. if ok {
  296. break
  297. }
  298. if i == retryTimes-1 {
  299. return fmt.Errorf("Cannot join the cluster via given peers after %x retries", s.Config.RetryTimes)
  300. }
  301. log.Warnf("%v is unable to join the cluster using any of the peers %v at %dth time. Retrying in %.1f seconds", s.Config.Name, cluster, i, s.Config.RetryInterval)
  302. time.Sleep(time.Second * time.Duration(s.Config.RetryInterval))
  303. }
  304. s.raftServer.Start()
  305. return nil
  306. }
  307. // Upgradable checks whether all peers in a cluster support an upgrade to the next store version.
  308. func (s *PeerServer) Upgradable() error {
  309. nextVersion := s.store.Version() + 1
  310. for _, peerURL := range s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name) {
  311. u, err := url.Parse(peerURL)
  312. if err != nil {
  313. return fmt.Errorf("PeerServer: Cannot parse URL: '%s' (%s)", peerURL, err)
  314. }
  315. url := (&url.URL{Host: u.Host, Scheme: s.Config.Scheme}).String()
  316. ok, err := s.client.CheckVersion(url, nextVersion)
  317. if err != nil {
  318. return err
  319. }
  320. if !ok {
  321. return fmt.Errorf("PeerServer: Version %d is not compatible with peer: %s", nextVersion, u.Host)
  322. }
  323. }
  324. return nil
  325. }
  326. // checkPeerAddressNonconflict checks whether the peer address has existed with different name.
  327. func (s *PeerServer) checkPeerAddressNonconflict() bool {
  328. // there exists the (name, peer address) pair
  329. if peerURL, ok := s.registry.PeerURL(s.Config.Name); ok {
  330. if peerURL == s.Config.URL {
  331. return true
  332. }
  333. }
  334. // check all existing peer addresses
  335. peerURLs := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  336. for _, peerURL := range peerURLs {
  337. if peerURL == s.Config.URL {
  338. return false
  339. }
  340. }
  341. return true
  342. }
  343. // Helper function to do discovery and return results in expected format
  344. func (s *PeerServer) handleDiscovery(discoverURL string) (peers []string, err error) {
  345. peers, err = discovery.Do(discoverURL, s.Config.Name, s.Config.URL, s.closeChan, s.startRoutine)
  346. // Warn about errors coming from discovery, this isn't fatal
  347. // since the user might have provided a peer list elsewhere,
  348. // or there is some log in data dir.
  349. if err != nil {
  350. log.Warnf("Discovery encountered an error: %v", err)
  351. return
  352. }
  353. for i := range peers {
  354. // Strip the scheme off of the peer if it has one
  355. // TODO(bp): clean this up!
  356. purl, err := url.Parse(peers[i])
  357. if err == nil {
  358. peers[i] = purl.Host
  359. }
  360. }
  361. log.Infof("Discovery fetched back peer list: %v", peers)
  362. return
  363. }
  364. // getKnownPeers gets the previous peers from log
  365. func (s *PeerServer) getKnownPeers() []string {
  366. peers := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  367. log.Infof("Peer URLs in log: %s / %s (%s)", s.raftServer.Leader(), s.Config.Name, strings.Join(peers, ","))
  368. for i := range peers {
  369. u, err := url.Parse(peers[i])
  370. if err != nil {
  371. log.Debug("getPrevPeers cannot parse url %v", peers[i])
  372. }
  373. peers[i] = u.Host
  374. }
  375. return peers
  376. }
  377. // removeSelfFromList removes url of the peerServer from the peer list
  378. func (s *PeerServer) removeSelfFromList(peers []string) []string {
  379. // Remove its own peer address from the peer list to join
  380. u, err := url.Parse(s.Config.URL)
  381. if err != nil {
  382. log.Fatalf("removeSelfFromList cannot parse peer address %v", s.Config.URL)
  383. }
  384. newPeers := make([]string, 0)
  385. for _, v := range peers {
  386. if v != u.Host {
  387. newPeers = append(newPeers, v)
  388. }
  389. }
  390. return newPeers
  391. }
  392. func (s *PeerServer) joinCluster(cluster []string) bool {
  393. for _, peer := range cluster {
  394. if len(peer) == 0 {
  395. continue
  396. }
  397. err := s.joinByPeer(s.raftServer, peer, s.Config.Scheme)
  398. if err == nil {
  399. log.Debugf("%s joined the cluster via peer %s", s.Config.Name, peer)
  400. return true
  401. }
  402. if _, ok := err.(etcdErr.Error); ok {
  403. log.Fatal(err)
  404. }
  405. log.Warnf("Attempt to join via %s failed: %s", peer, err)
  406. }
  407. return false
  408. }
  409. // Send join requests to peer.
  410. func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string) error {
  411. u := (&url.URL{Host: peer, Scheme: scheme}).String()
  412. // Our version must match the leaders version
  413. version, err := s.client.GetVersion(u)
  414. if err != nil {
  415. log.Debugf("fail checking join version")
  416. return err
  417. }
  418. if version < store.MinVersion() || version > store.MaxVersion() {
  419. log.Infof("fail passing version compatibility(%d-%d) using %d", store.MinVersion(), store.MaxVersion(), version)
  420. return fmt.Errorf("incompatible version")
  421. }
  422. // Fetch current peer list
  423. machines, err := s.client.GetMachines(u)
  424. if err != nil {
  425. log.Debugf("fail getting machine messages")
  426. return err
  427. }
  428. exist := false
  429. for _, machine := range machines {
  430. if machine.Name == server.Name() {
  431. exist = true
  432. // TODO(yichengq): cannot set join index for it.
  433. // Need discussion about the best way to do it.
  434. //
  435. // if machine.PeerURL == s.Config.URL {
  436. // log.Infof("has joined the cluster(%v) before", machines)
  437. // return nil
  438. // }
  439. break
  440. }
  441. }
  442. // Fetch cluster config to see whether exists some place.
  443. clusterConfig, err := s.client.GetClusterConfig(u)
  444. if err != nil {
  445. log.Debugf("fail getting cluster config")
  446. return err
  447. }
  448. if !exist && clusterConfig.ActiveSize <= len(machines) {
  449. log.Infof("stop joining because the cluster is full with %d nodes", len(machines))
  450. return fmt.Errorf("out of quota")
  451. }
  452. joinIndex, err := s.client.AddMachine(u,
  453. &JoinCommand{
  454. MinVersion: store.MinVersion(),
  455. MaxVersion: store.MaxVersion(),
  456. Name: server.Name(),
  457. RaftURL: s.Config.URL,
  458. EtcdURL: s.server.URL(),
  459. })
  460. if err != nil {
  461. log.Debugf("fail on join request")
  462. return err
  463. }
  464. s.joinIndex = joinIndex
  465. return nil
  466. }
  467. func (s *PeerServer) Stats() []byte {
  468. s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.startTime).String()
  469. // TODO: register state listener to raft to change this field
  470. // rather than compare the state each time Stats() is called.
  471. if s.RaftServer().State() == raft.Leader {
  472. s.serverStats.LeaderInfo.Name = s.RaftServer().Name()
  473. }
  474. queue := s.serverStats.sendRateQueue
  475. s.serverStats.SendingPkgRate, s.serverStats.SendingBandwidthRate = queue.Rate()
  476. queue = s.serverStats.recvRateQueue
  477. s.serverStats.RecvingPkgRate, s.serverStats.RecvingBandwidthRate = queue.Rate()
  478. b, _ := json.Marshal(s.serverStats)
  479. return b
  480. }
  481. func (s *PeerServer) PeerStats() []byte {
  482. if s.raftServer.State() == raft.Leader {
  483. b, _ := json.Marshal(s.followersStats)
  484. return b
  485. }
  486. return nil
  487. }
  488. // raftEventLogger converts events from the Raft server into log messages.
  489. func (s *PeerServer) raftEventLogger(event raft.Event) {
  490. value := event.Value()
  491. prevValue := event.PrevValue()
  492. if value == nil {
  493. value = "<nil>"
  494. }
  495. if prevValue == nil {
  496. prevValue = "<nil>"
  497. }
  498. switch event.Type() {
  499. case raft.StateChangeEventType:
  500. log.Infof("%s: state changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  501. case raft.TermChangeEventType:
  502. log.Infof("%s: term #%v started.", s.Config.Name, value)
  503. case raft.LeaderChangeEventType:
  504. log.Infof("%s: leader changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  505. case raft.AddPeerEventType:
  506. log.Infof("%s: peer added: '%v'", s.Config.Name, value)
  507. case raft.RemovePeerEventType:
  508. log.Infof("%s: peer removed: '%v'", s.Config.Name, value)
  509. case raft.HeartbeatIntervalEventType:
  510. var name = "<unknown>"
  511. if peer, ok := value.(*raft.Peer); ok {
  512. name = peer.Name
  513. }
  514. log.Infof("%s: warning: heartbeat timed out: '%v'", s.Config.Name, name)
  515. case raft.ElectionTimeoutThresholdEventType:
  516. select {
  517. case s.timeoutThresholdChan <- value:
  518. default:
  519. }
  520. }
  521. }
  522. func (s *PeerServer) recordMetricEvent(event raft.Event) {
  523. name := fmt.Sprintf("raft.event.%s", event.Type())
  524. value := event.Value().(time.Duration)
  525. (*s.metrics).Timer(name).Update(value)
  526. }
  527. // logSnapshot logs about the snapshot that was taken.
  528. func (s *PeerServer) logSnapshot(err error, currentIndex, count uint64) {
  529. info := fmt.Sprintf("%s: snapshot of %d events at index %d", s.Config.Name, count, currentIndex)
  530. if err != nil {
  531. log.Infof("%s attempted and failed: %v", info, err)
  532. } else {
  533. log.Infof("%s completed", info)
  534. }
  535. }
  536. func (s *PeerServer) startRoutine(f func()) {
  537. s.routineGroup.Add(1)
  538. go func() {
  539. defer s.routineGroup.Done()
  540. f()
  541. }()
  542. }
  543. func (s *PeerServer) monitorSnapshot() {
  544. for {
  545. timer := time.NewTimer(s.snapConf.checkingInterval)
  546. defer timer.Stop()
  547. select {
  548. case <-s.closeChan:
  549. return
  550. case <-timer.C:
  551. }
  552. currentIndex := s.RaftServer().CommitIndex()
  553. count := currentIndex - s.snapConf.lastIndex
  554. if uint64(count) > s.snapConf.snapshotThr {
  555. err := s.raftServer.TakeSnapshot()
  556. s.logSnapshot(err, currentIndex, count)
  557. s.snapConf.lastIndex = currentIndex
  558. }
  559. }
  560. }
  561. func (s *PeerServer) monitorSync() {
  562. ticker := time.NewTicker(time.Millisecond * 500)
  563. defer ticker.Stop()
  564. for {
  565. select {
  566. case <-s.closeChan:
  567. return
  568. case now := <-ticker.C:
  569. if s.raftServer.State() == raft.Leader {
  570. s.raftServer.Do(s.store.CommandFactory().CreateSyncCommand(now))
  571. }
  572. }
  573. }
  574. }
  575. // monitorTimeoutThreshold groups timeout threshold events together and prints
  576. // them as a single log line.
  577. func (s *PeerServer) monitorTimeoutThreshold() {
  578. for {
  579. select {
  580. case <-s.closeChan:
  581. return
  582. case value := <-s.timeoutThresholdChan:
  583. log.Infof("%s: warning: heartbeat near election timeout: %v", s.Config.Name, value)
  584. }
  585. timer := time.NewTimer(ThresholdMonitorTimeout)
  586. defer timer.Stop()
  587. select {
  588. case <-s.closeChan:
  589. return
  590. case <-timer.C:
  591. }
  592. }
  593. }
  594. // monitorActiveSize has the leader periodically check the status of cluster
  595. // nodes and swaps them out for standbys as needed.
  596. func (s *PeerServer) monitorActiveSize() {
  597. for {
  598. timer := time.NewTimer(ActiveMonitorTimeout)
  599. defer timer.Stop()
  600. select {
  601. case <-s.closeChan:
  602. return
  603. case <-timer.C:
  604. }
  605. // Ignore while this peer is not a leader.
  606. if s.raftServer.State() != raft.Leader {
  607. continue
  608. }
  609. // Retrieve target active size and actual active size.
  610. activeSize := s.ClusterConfig().ActiveSize
  611. peerCount := s.registry.Count()
  612. peers := s.registry.Names()
  613. if index := sort.SearchStrings(peers, s.Config.Name); index < len(peers) && peers[index] == s.Config.Name {
  614. peers = append(peers[:index], peers[index+1:]...)
  615. }
  616. // If we have more active nodes than we should then remove.
  617. if peerCount > activeSize {
  618. peer := peers[rand.Intn(len(peers))]
  619. log.Infof("%s: removing: %v", s.Config.Name, peer)
  620. if _, err := s.raftServer.Do(&RemoveCommand{Name: peer}); err != nil {
  621. log.Infof("%s: warning: remove error: %v", s.Config.Name, err)
  622. }
  623. continue
  624. }
  625. }
  626. }
  627. // monitorPeerActivity has the leader periodically for dead nodes and demotes them.
  628. func (s *PeerServer) monitorPeerActivity() {
  629. for {
  630. timer := time.NewTimer(PeerActivityMonitorTimeout)
  631. defer timer.Stop()
  632. select {
  633. case <-s.closeChan:
  634. return
  635. case <-timer.C:
  636. }
  637. // Ignore while this peer is not a leader.
  638. if s.raftServer.State() != raft.Leader {
  639. continue
  640. }
  641. // Check last activity for all peers.
  642. now := time.Now()
  643. promoteDelay := time.Duration(s.ClusterConfig().PromoteDelay) * time.Second
  644. peers := s.raftServer.Peers()
  645. for _, peer := range peers {
  646. // If the last response from the peer is longer than the promote delay
  647. // then automatically demote the peer.
  648. if !peer.LastActivity().IsZero() && now.Sub(peer.LastActivity()) > promoteDelay {
  649. log.Infof("%s: removing node: %v; last activity %v ago", s.Config.Name, peer.Name, now.Sub(peer.LastActivity()))
  650. if _, err := s.raftServer.Do(&RemoveCommand{Name: peer.Name}); err != nil {
  651. log.Infof("%s: warning: autodemotion error: %v", s.Config.Name, err)
  652. }
  653. continue
  654. }
  655. }
  656. }
  657. }