peer_server.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790
  1. package server
  2. import (
  3. "bytes"
  4. "encoding/binary"
  5. "encoding/json"
  6. "fmt"
  7. "io/ioutil"
  8. "math/rand"
  9. "net/http"
  10. "net/url"
  11. "sort"
  12. "strconv"
  13. "strings"
  14. "sync"
  15. "time"
  16. "github.com/coreos/etcd/third_party/github.com/goraft/raft"
  17. "github.com/coreos/etcd/third_party/github.com/gorilla/mux"
  18. "github.com/coreos/etcd/discovery"
  19. etcdErr "github.com/coreos/etcd/error"
  20. "github.com/coreos/etcd/log"
  21. "github.com/coreos/etcd/metrics"
  22. "github.com/coreos/etcd/pkg/btrfs"
  23. "github.com/coreos/etcd/store"
  24. )
  25. const (
  26. // ThresholdMonitorTimeout is the time between log notifications that the
  27. // Raft heartbeat is too close to the election timeout.
  28. ThresholdMonitorTimeout = 5 * time.Second
  29. // ActiveMonitorTimeout is the time between checks on the active size of
  30. // the cluster. If the active size is bigger than the actual size then
  31. // etcd attempts to demote to bring it to the correct number.
  32. ActiveMonitorTimeout = 1 * time.Second
  33. // PeerActivityMonitorTimeout is the time between checks for dead nodes in
  34. // the cluster.
  35. PeerActivityMonitorTimeout = 1 * time.Second
  36. )
  37. type PeerServerConfig struct {
  38. Name string
  39. Scheme string
  40. URL string
  41. SnapshotCount int
  42. RetryTimes int
  43. RetryInterval float64
  44. }
  45. type PeerServer struct {
  46. Config PeerServerConfig
  47. clusterConfig *ClusterConfig
  48. raftServer raft.Server
  49. server *Server
  50. joinIndex uint64
  51. followersStats *raftFollowersStats
  52. serverStats *raftServerStats
  53. registry *Registry
  54. store store.Store
  55. snapConf *snapshotConf
  56. closeChan chan bool
  57. routineGroup sync.WaitGroup
  58. timeoutThresholdChan chan interface{}
  59. metrics *metrics.Bucket
  60. sync.Mutex
  61. }
  62. // TODO: find a good policy to do snapshot
  63. type snapshotConf struct {
  64. // Etcd will check if snapshot is need every checkingInterval
  65. checkingInterval time.Duration
  66. // The index when the last snapshot happened
  67. lastIndex uint64
  68. // If the incremental number of index since the last snapshot
  69. // exceeds the snapshot Threshold, etcd will do a snapshot
  70. snapshotThr uint64
  71. }
  72. func NewPeerServer(psConfig PeerServerConfig, registry *Registry, store store.Store, mb *metrics.Bucket, followersStats *raftFollowersStats, serverStats *raftServerStats) *PeerServer {
  73. s := &PeerServer{
  74. Config: psConfig,
  75. clusterConfig: NewClusterConfig(),
  76. registry: registry,
  77. store: store,
  78. followersStats: followersStats,
  79. serverStats: serverStats,
  80. timeoutThresholdChan: make(chan interface{}, 1),
  81. metrics: mb,
  82. }
  83. return s
  84. }
  85. func (s *PeerServer) SetRaftServer(raftServer raft.Server) {
  86. s.snapConf = &snapshotConf{
  87. checkingInterval: time.Second * 3,
  88. // this is not accurate, we will update raft to provide an api
  89. lastIndex: raftServer.CommitIndex(),
  90. snapshotThr: uint64(s.Config.SnapshotCount),
  91. }
  92. raftServer.AddEventListener(raft.StateChangeEventType, s.raftEventLogger)
  93. raftServer.AddEventListener(raft.LeaderChangeEventType, s.raftEventLogger)
  94. raftServer.AddEventListener(raft.TermChangeEventType, s.raftEventLogger)
  95. raftServer.AddEventListener(raft.AddPeerEventType, s.raftEventLogger)
  96. raftServer.AddEventListener(raft.RemovePeerEventType, s.raftEventLogger)
  97. raftServer.AddEventListener(raft.HeartbeatIntervalEventType, s.raftEventLogger)
  98. raftServer.AddEventListener(raft.ElectionTimeoutThresholdEventType, s.raftEventLogger)
  99. raftServer.AddEventListener(raft.HeartbeatEventType, s.recordMetricEvent)
  100. s.raftServer = raftServer
  101. }
  102. // ClusterConfig retrieves the current cluster configuration.
  103. func (s *PeerServer) ClusterConfig() *ClusterConfig {
  104. return s.clusterConfig
  105. }
  106. // SetClusterConfig updates the current cluster configuration.
  107. // Adjusting the active size will cause the PeerServer to demote peers or
  108. // promote standbys to match the new size.
  109. func (s *PeerServer) SetClusterConfig(c *ClusterConfig) {
  110. // Set minimums.
  111. if c.ActiveSize < MinActiveSize {
  112. c.ActiveSize = MinActiveSize
  113. }
  114. if c.PromoteDelay < MinPromoteDelay {
  115. c.PromoteDelay = MinPromoteDelay
  116. }
  117. s.clusterConfig = c
  118. }
  119. // Try all possible ways to find clusters to join
  120. // Include log data in -data-dir, -discovery and -peers
  121. //
  122. // Peer discovery follows this order:
  123. // 1. previous peers in -data-dir
  124. // 2. -discovery
  125. // 3. -peers
  126. //
  127. // TODO(yichengq): RaftServer should be started as late as possible.
  128. // Current implementation to start it is not that good,
  129. // and should be refactored later.
  130. func (s *PeerServer) findCluster(discoverURL string, peers []string) {
  131. name := s.Config.Name
  132. isNewNode := s.raftServer.IsLogEmpty()
  133. // Try its best to find possible peers, and connect with them.
  134. if !isNewNode {
  135. // It is not allowed to join the cluster with existing peer address
  136. // This prevents old node joining with different name by mistake.
  137. if !s.checkPeerAddressNonconflict() {
  138. log.Fatalf("%v is not allowed to join the cluster with existing URL %v", s.Config.Name, s.Config.URL)
  139. }
  140. // Take old nodes into account.
  141. allPeers := s.getKnownPeers()
  142. // Discover registered peers.
  143. // TODO(yichengq): It may mess up discoverURL if this is
  144. // set wrong by mistake. This may need to refactor discovery
  145. // module. Fix it later.
  146. if discoverURL != "" {
  147. discoverPeers, _ := s.handleDiscovery(discoverURL)
  148. allPeers = append(allPeers, discoverPeers...)
  149. }
  150. allPeers = append(allPeers, peers...)
  151. allPeers = s.removeSelfFromList(allPeers)
  152. // If there is possible peer list, use it to find cluster.
  153. if len(allPeers) > 0 {
  154. // TODO(yichengq): joinCluster may fail if there's no leader for
  155. // current cluster. It should wait if the cluster is under
  156. // leader election, or the node with changed IP cannot join
  157. // the cluster then.
  158. if err := s.startAsFollower(allPeers, 1); err == nil {
  159. log.Debugf("%s joins to the previous cluster %v", name, allPeers)
  160. return
  161. }
  162. log.Warnf("%s cannot connect to previous cluster %v", name, allPeers)
  163. }
  164. // TODO(yichengq): Think about the action that should be done
  165. // if it cannot connect any of the previous known node.
  166. s.raftServer.Start()
  167. log.Debugf("%s is restarting the cluster %v", name, allPeers)
  168. return
  169. }
  170. // Attempt cluster discovery
  171. if discoverURL != "" {
  172. discoverPeers, discoverErr := s.handleDiscovery(discoverURL)
  173. // It is registered in discover url
  174. if discoverErr == nil {
  175. // start as a leader in a new cluster
  176. if len(discoverPeers) == 0 {
  177. log.Debugf("%s is starting a new cluster via discover service", name)
  178. s.startAsLeader()
  179. } else {
  180. log.Debugf("%s is joining a cluster %v via discover service", name, discoverPeers)
  181. if err := s.startAsFollower(discoverPeers, s.Config.RetryTimes); err != nil {
  182. log.Fatal(err)
  183. }
  184. }
  185. return
  186. }
  187. log.Warnf("%s failed to connect discovery service[%v]: %v", name, discoverURL, discoverErr)
  188. if len(peers) == 0 {
  189. log.Fatalf("%s, the new leader, must register itself to discovery service as required", name)
  190. }
  191. }
  192. if len(peers) > 0 {
  193. if err := s.startAsFollower(peers, s.Config.RetryTimes); err != nil {
  194. log.Fatalf("%s cannot connect to existing cluster %v", name, peers)
  195. }
  196. return
  197. }
  198. log.Infof("%s is starting a new cluster.", s.Config.Name)
  199. s.startAsLeader()
  200. return
  201. }
  202. // Start the raft server
  203. func (s *PeerServer) Start(snapshot bool, discoverURL string, peers []string) error {
  204. s.Lock()
  205. defer s.Unlock()
  206. // LoadSnapshot
  207. if snapshot {
  208. err := s.raftServer.LoadSnapshot()
  209. if err == nil {
  210. log.Debugf("%s finished load snapshot", s.Config.Name)
  211. } else {
  212. log.Debug(err)
  213. }
  214. }
  215. s.raftServer.Init()
  216. // Set NOCOW for data directory in btrfs
  217. if btrfs.IsBtrfs(s.raftServer.LogPath()) {
  218. if err := btrfs.SetNOCOWFile(s.raftServer.LogPath()); err != nil {
  219. log.Warnf("Failed setting NOCOW: %v", err)
  220. }
  221. }
  222. s.findCluster(discoverURL, peers)
  223. s.closeChan = make(chan bool)
  224. s.startRoutine(s.monitorSync)
  225. s.startRoutine(s.monitorTimeoutThreshold)
  226. s.startRoutine(s.monitorActiveSize)
  227. s.startRoutine(s.monitorPeerActivity)
  228. // open the snapshot
  229. if snapshot {
  230. s.startRoutine(s.monitorSnapshot)
  231. }
  232. return nil
  233. }
  234. func (s *PeerServer) Stop() {
  235. s.Lock()
  236. defer s.Unlock()
  237. if s.closeChan != nil {
  238. close(s.closeChan)
  239. }
  240. s.raftServer.Stop()
  241. s.routineGroup.Wait()
  242. s.closeChan = nil
  243. }
  244. func (s *PeerServer) HTTPHandler() http.Handler {
  245. router := mux.NewRouter()
  246. // internal commands
  247. router.HandleFunc("/name", s.NameHttpHandler)
  248. router.HandleFunc("/version", s.VersionHttpHandler)
  249. router.HandleFunc("/version/{version:[0-9]+}/check", s.VersionCheckHttpHandler)
  250. router.HandleFunc("/upgrade", s.UpgradeHttpHandler)
  251. router.HandleFunc("/join", s.JoinHttpHandler)
  252. router.HandleFunc("/remove/{name:.+}", s.RemoveHttpHandler)
  253. router.HandleFunc("/vote", s.VoteHttpHandler)
  254. router.HandleFunc("/log", s.GetLogHttpHandler)
  255. router.HandleFunc("/log/append", s.AppendEntriesHttpHandler)
  256. router.HandleFunc("/snapshot", s.SnapshotHttpHandler)
  257. router.HandleFunc("/snapshotRecovery", s.SnapshotRecoveryHttpHandler)
  258. router.HandleFunc("/etcdURL", s.EtcdURLHttpHandler)
  259. router.HandleFunc("/v2/admin/config", s.getClusterConfigHttpHandler).Methods("GET")
  260. router.HandleFunc("/v2/admin/config", s.setClusterConfigHttpHandler).Methods("PUT")
  261. router.HandleFunc("/v2/admin/machines", s.getMachinesHttpHandler).Methods("GET")
  262. router.HandleFunc("/v2/admin/machines/{name}", s.getMachineHttpHandler).Methods("GET")
  263. return router
  264. }
  265. // Retrieves the underlying Raft server.
  266. func (s *PeerServer) RaftServer() raft.Server {
  267. return s.raftServer
  268. }
  269. // Associates the client server with the peer server.
  270. func (s *PeerServer) SetServer(server *Server) {
  271. s.server = server
  272. }
  273. func (s *PeerServer) startAsLeader() {
  274. s.raftServer.Start()
  275. // leader need to join self as a peer
  276. for {
  277. c := &JoinCommand{
  278. MinVersion: store.MinVersion(),
  279. MaxVersion: store.MaxVersion(),
  280. Name: s.raftServer.Name(),
  281. RaftURL: s.Config.URL,
  282. EtcdURL: s.server.URL(),
  283. }
  284. if _, err := s.raftServer.Do(c); err == nil {
  285. break
  286. }
  287. }
  288. log.Debugf("%s start as a leader", s.Config.Name)
  289. }
  290. func (s *PeerServer) startAsFollower(cluster []string, retryTimes int) error {
  291. // start as a follower in a existing cluster
  292. for i := 0; ; i++ {
  293. ok := s.joinCluster(cluster)
  294. if ok {
  295. break
  296. }
  297. if i == retryTimes-1 {
  298. return fmt.Errorf("Cannot join the cluster via given peers after %x retries", s.Config.RetryTimes)
  299. }
  300. log.Warnf("%v is unable to join the cluster using any of the peers %v at %dth time. Retrying in %.1f seconds", s.Config.Name, cluster, i, s.Config.RetryInterval)
  301. time.Sleep(time.Second * time.Duration(s.Config.RetryInterval))
  302. }
  303. s.raftServer.Start()
  304. return nil
  305. }
  306. // getVersion fetches the peer version of a cluster.
  307. func getVersion(t *transporter, versionURL url.URL) (int, error) {
  308. resp, _, err := t.Get(versionURL.String())
  309. if err != nil {
  310. return 0, err
  311. }
  312. defer resp.Body.Close()
  313. body, err := ioutil.ReadAll(resp.Body)
  314. if err != nil {
  315. return 0, err
  316. }
  317. // Parse version number.
  318. version, _ := strconv.Atoi(string(body))
  319. return version, nil
  320. }
  321. // Upgradable checks whether all peers in a cluster support an upgrade to the next store version.
  322. func (s *PeerServer) Upgradable() error {
  323. nextVersion := s.store.Version() + 1
  324. for _, peerURL := range s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name) {
  325. u, err := url.Parse(peerURL)
  326. if err != nil {
  327. return fmt.Errorf("PeerServer: Cannot parse URL: '%s' (%s)", peerURL, err)
  328. }
  329. t, _ := s.raftServer.Transporter().(*transporter)
  330. checkURL := (&url.URL{Host: u.Host, Scheme: s.Config.Scheme, Path: fmt.Sprintf("/version/%d/check", nextVersion)}).String()
  331. resp, _, err := t.Get(checkURL)
  332. if err != nil {
  333. return fmt.Errorf("PeerServer: Cannot check version compatibility: %s", u.Host)
  334. }
  335. if resp.StatusCode != 200 {
  336. return fmt.Errorf("PeerServer: Version %d is not compatible with peer: %s", nextVersion, u.Host)
  337. }
  338. }
  339. return nil
  340. }
  341. // checkPeerAddressNonconflict checks whether the peer address has existed with different name.
  342. func (s *PeerServer) checkPeerAddressNonconflict() bool {
  343. // there exists the (name, peer address) pair
  344. if peerURL, ok := s.registry.PeerURL(s.Config.Name); ok {
  345. if peerURL == s.Config.URL {
  346. return true
  347. }
  348. }
  349. // check all existing peer addresses
  350. peerURLs := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  351. for _, peerURL := range peerURLs {
  352. if peerURL == s.Config.URL {
  353. return false
  354. }
  355. }
  356. return true
  357. }
  358. // Helper function to do discovery and return results in expected format
  359. func (s *PeerServer) handleDiscovery(discoverURL string) (peers []string, err error) {
  360. peers, err = discovery.Do(discoverURL, s.Config.Name, s.Config.URL, s.closeChan, s.startRoutine)
  361. // Warn about errors coming from discovery, this isn't fatal
  362. // since the user might have provided a peer list elsewhere,
  363. // or there is some log in data dir.
  364. if err != nil {
  365. log.Warnf("Discovery encountered an error: %v", err)
  366. return
  367. }
  368. for i := range peers {
  369. // Strip the scheme off of the peer if it has one
  370. // TODO(bp): clean this up!
  371. purl, err := url.Parse(peers[i])
  372. if err == nil {
  373. peers[i] = purl.Host
  374. }
  375. }
  376. log.Infof("Discovery fetched back peer list: %v", peers)
  377. return
  378. }
  379. // getKnownPeers gets the previous peers from log
  380. func (s *PeerServer) getKnownPeers() []string {
  381. peers := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  382. log.Infof("Peer URLs in log: %s / %s (%s)", s.raftServer.Leader(), s.Config.Name, strings.Join(peers, ","))
  383. for i := range peers {
  384. u, err := url.Parse(peers[i])
  385. if err != nil {
  386. log.Debug("getPrevPeers cannot parse url %v", peers[i])
  387. }
  388. peers[i] = u.Host
  389. }
  390. return peers
  391. }
  392. // removeSelfFromList removes url of the peerServer from the peer list
  393. func (s *PeerServer) removeSelfFromList(peers []string) []string {
  394. // Remove its own peer address from the peer list to join
  395. u, err := url.Parse(s.Config.URL)
  396. if err != nil {
  397. log.Fatalf("removeSelfFromList cannot parse peer address %v", s.Config.URL)
  398. }
  399. newPeers := make([]string, 0)
  400. for _, v := range peers {
  401. if v != u.Host {
  402. newPeers = append(newPeers, v)
  403. }
  404. }
  405. return newPeers
  406. }
  407. func (s *PeerServer) joinCluster(cluster []string) bool {
  408. for _, peer := range cluster {
  409. if len(peer) == 0 {
  410. continue
  411. }
  412. err := s.joinByPeer(s.raftServer, peer, s.Config.Scheme)
  413. if err == nil {
  414. log.Debugf("%s joined the cluster via peer %s", s.Config.Name, peer)
  415. return true
  416. }
  417. if _, ok := err.(etcdErr.Error); ok {
  418. log.Fatal(err)
  419. }
  420. log.Warnf("Attempt to join via %s failed: %s", peer, err)
  421. }
  422. return false
  423. }
  424. // Send join requests to peer.
  425. func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string) error {
  426. // t must be ok
  427. t, _ := server.Transporter().(*transporter)
  428. // Our version must match the leaders version
  429. versionURL := url.URL{Host: peer, Scheme: scheme, Path: "/version"}
  430. version, err := getVersion(t, versionURL)
  431. if err != nil {
  432. return fmt.Errorf("Error during join version check: %v", err)
  433. }
  434. if version < store.MinVersion() || version > store.MaxVersion() {
  435. return fmt.Errorf("Unable to join: cluster version is %d; version compatibility is %d - %d", version, store.MinVersion(), store.MaxVersion())
  436. }
  437. var b bytes.Buffer
  438. c := &JoinCommand{
  439. MinVersion: store.MinVersion(),
  440. MaxVersion: store.MaxVersion(),
  441. Name: server.Name(),
  442. RaftURL: s.Config.URL,
  443. EtcdURL: s.server.URL(),
  444. }
  445. json.NewEncoder(&b).Encode(c)
  446. joinURL := url.URL{Host: peer, Scheme: scheme, Path: "/join"}
  447. log.Infof("Send Join Request to %s", joinURL.String())
  448. req, _ := http.NewRequest("PUT", joinURL.String(), &b)
  449. resp, err := t.client.Do(req)
  450. for {
  451. if err != nil {
  452. return fmt.Errorf("Unable to join: %v", err)
  453. }
  454. if resp != nil {
  455. defer resp.Body.Close()
  456. log.Infof("»»»» %d", resp.StatusCode)
  457. if resp.StatusCode == http.StatusOK {
  458. b, _ := ioutil.ReadAll(resp.Body)
  459. s.joinIndex, _ = binary.Uvarint(b)
  460. return nil
  461. }
  462. if resp.StatusCode == http.StatusTemporaryRedirect {
  463. address := resp.Header.Get("Location")
  464. log.Debugf("Send Join Request to %s", address)
  465. c := &JoinCommand{
  466. MinVersion: store.MinVersion(),
  467. MaxVersion: store.MaxVersion(),
  468. Name: server.Name(),
  469. RaftURL: s.Config.URL,
  470. EtcdURL: s.server.URL(),
  471. }
  472. json.NewEncoder(&b).Encode(c)
  473. resp, _, err = t.Put(address, &b)
  474. } else if resp.StatusCode == http.StatusBadRequest {
  475. log.Debug("Reach max number peers in the cluster")
  476. decoder := json.NewDecoder(resp.Body)
  477. err := &etcdErr.Error{}
  478. decoder.Decode(err)
  479. return *err
  480. } else {
  481. return fmt.Errorf("Unable to join")
  482. }
  483. }
  484. }
  485. }
  486. func (s *PeerServer) Stats() []byte {
  487. s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.startTime).String()
  488. // TODO: register state listener to raft to change this field
  489. // rather than compare the state each time Stats() is called.
  490. if s.RaftServer().State() == raft.Leader {
  491. s.serverStats.LeaderInfo.Name = s.RaftServer().Name()
  492. }
  493. queue := s.serverStats.sendRateQueue
  494. s.serverStats.SendingPkgRate, s.serverStats.SendingBandwidthRate = queue.Rate()
  495. queue = s.serverStats.recvRateQueue
  496. s.serverStats.RecvingPkgRate, s.serverStats.RecvingBandwidthRate = queue.Rate()
  497. b, _ := json.Marshal(s.serverStats)
  498. return b
  499. }
  500. func (s *PeerServer) PeerStats() []byte {
  501. if s.raftServer.State() == raft.Leader {
  502. b, _ := json.Marshal(s.followersStats)
  503. return b
  504. }
  505. return nil
  506. }
  507. // raftEventLogger converts events from the Raft server into log messages.
  508. func (s *PeerServer) raftEventLogger(event raft.Event) {
  509. value := event.Value()
  510. prevValue := event.PrevValue()
  511. if value == nil {
  512. value = "<nil>"
  513. }
  514. if prevValue == nil {
  515. prevValue = "<nil>"
  516. }
  517. switch event.Type() {
  518. case raft.StateChangeEventType:
  519. log.Infof("%s: state changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  520. case raft.TermChangeEventType:
  521. log.Infof("%s: term #%v started.", s.Config.Name, value)
  522. case raft.LeaderChangeEventType:
  523. log.Infof("%s: leader changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  524. case raft.AddPeerEventType:
  525. log.Infof("%s: peer added: '%v'", s.Config.Name, value)
  526. case raft.RemovePeerEventType:
  527. log.Infof("%s: peer removed: '%v'", s.Config.Name, value)
  528. case raft.HeartbeatIntervalEventType:
  529. var name = "<unknown>"
  530. if peer, ok := value.(*raft.Peer); ok {
  531. name = peer.Name
  532. }
  533. log.Infof("%s: warning: heartbeat timed out: '%v'", s.Config.Name, name)
  534. case raft.ElectionTimeoutThresholdEventType:
  535. select {
  536. case s.timeoutThresholdChan <- value:
  537. default:
  538. }
  539. }
  540. }
  541. func (s *PeerServer) recordMetricEvent(event raft.Event) {
  542. name := fmt.Sprintf("raft.event.%s", event.Type())
  543. value := event.Value().(time.Duration)
  544. (*s.metrics).Timer(name).Update(value)
  545. }
  546. // logSnapshot logs about the snapshot that was taken.
  547. func (s *PeerServer) logSnapshot(err error, currentIndex, count uint64) {
  548. info := fmt.Sprintf("%s: snapshot of %d events at index %d", s.Config.Name, count, currentIndex)
  549. if err != nil {
  550. log.Infof("%s attempted and failed: %v", info, err)
  551. } else {
  552. log.Infof("%s completed", info)
  553. }
  554. }
  555. func (s *PeerServer) startRoutine(f func()) {
  556. s.routineGroup.Add(1)
  557. go func() {
  558. defer s.routineGroup.Done()
  559. f()
  560. }()
  561. }
  562. func (s *PeerServer) monitorSnapshot() {
  563. for {
  564. timer := time.NewTimer(s.snapConf.checkingInterval)
  565. defer timer.Stop()
  566. select {
  567. case <-s.closeChan:
  568. return
  569. case <-timer.C:
  570. }
  571. currentIndex := s.RaftServer().CommitIndex()
  572. count := currentIndex - s.snapConf.lastIndex
  573. if uint64(count) > s.snapConf.snapshotThr {
  574. err := s.raftServer.TakeSnapshot()
  575. s.logSnapshot(err, currentIndex, count)
  576. s.snapConf.lastIndex = currentIndex
  577. }
  578. }
  579. }
  580. func (s *PeerServer) monitorSync() {
  581. ticker := time.NewTicker(time.Millisecond * 500)
  582. defer ticker.Stop()
  583. for {
  584. select {
  585. case <-s.closeChan:
  586. return
  587. case now := <-ticker.C:
  588. if s.raftServer.State() == raft.Leader {
  589. s.raftServer.Do(s.store.CommandFactory().CreateSyncCommand(now))
  590. }
  591. }
  592. }
  593. }
  594. // monitorTimeoutThreshold groups timeout threshold events together and prints
  595. // them as a single log line.
  596. func (s *PeerServer) monitorTimeoutThreshold() {
  597. for {
  598. select {
  599. case <-s.closeChan:
  600. return
  601. case value := <-s.timeoutThresholdChan:
  602. log.Infof("%s: warning: heartbeat near election timeout: %v", s.Config.Name, value)
  603. }
  604. timer := time.NewTimer(ThresholdMonitorTimeout)
  605. defer timer.Stop()
  606. select {
  607. case <-s.closeChan:
  608. return
  609. case <-timer.C:
  610. }
  611. }
  612. }
  613. // monitorActiveSize has the leader periodically check the status of cluster
  614. // nodes and swaps them out for standbys as needed.
  615. func (s *PeerServer) monitorActiveSize() {
  616. for {
  617. timer := time.NewTimer(ActiveMonitorTimeout)
  618. defer timer.Stop()
  619. select {
  620. case <-s.closeChan:
  621. return
  622. case <-timer.C:
  623. }
  624. // Ignore while this peer is not a leader.
  625. if s.raftServer.State() != raft.Leader {
  626. continue
  627. }
  628. // Retrieve target active size and actual active size.
  629. activeSize := s.ClusterConfig().ActiveSize
  630. peers := s.registry.Names()
  631. peerCount := s.registry.Count()
  632. if index := sort.SearchStrings(peers, s.Config.Name); index < len(peers) && peers[index] == s.Config.Name {
  633. peers = append(peers[:index], peers[index+1:]...)
  634. }
  635. // If we have more active nodes than we should then remove.
  636. if peerCount > activeSize {
  637. peer := peers[rand.Intn(len(peers))]
  638. log.Infof("%s: removing: %v", s.Config.Name, peer)
  639. if _, err := s.raftServer.Do(&RemoveCommand{Name: peer}); err != nil {
  640. log.Infof("%s: warning: remove error: %v", s.Config.Name, err)
  641. }
  642. continue
  643. }
  644. }
  645. }
  646. // monitorPeerActivity has the leader periodically for dead nodes and demotes them.
  647. func (s *PeerServer) monitorPeerActivity() {
  648. for {
  649. timer := time.NewTimer(PeerActivityMonitorTimeout)
  650. defer timer.Stop()
  651. select {
  652. case <-s.closeChan:
  653. return
  654. case <-timer.C:
  655. }
  656. // Ignore while this peer is not a leader.
  657. if s.raftServer.State() != raft.Leader {
  658. continue
  659. }
  660. // Check last activity for all peers.
  661. now := time.Now()
  662. promoteDelay := time.Duration(s.ClusterConfig().PromoteDelay) * time.Second
  663. peers := s.raftServer.Peers()
  664. for _, peer := range peers {
  665. // If the last response from the peer is longer than the promote delay
  666. // then automatically demote the peer.
  667. if !peer.LastActivity().IsZero() && now.Sub(peer.LastActivity()) > promoteDelay {
  668. log.Infof("%s: removing node: %v; last activity %v ago", s.Config.Name, peer.Name, now.Sub(peer.LastActivity()))
  669. if _, err := s.raftServer.Do(&RemoveCommand{Name: peer.Name}); err != nil {
  670. log.Infof("%s: warning: autodemotion error: %v", s.Config.Name, err)
  671. }
  672. continue
  673. }
  674. }
  675. }
  676. }