peer_server.go 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. // +build ignore
  2. package server
  3. import (
  4. "encoding/json"
  5. "fmt"
  6. "math/rand"
  7. "net/http"
  8. "net/url"
  9. "sort"
  10. "strings"
  11. "sync"
  12. "time"
  13. "github.com/coreos/etcd/third_party/github.com/goraft/raft"
  14. "github.com/coreos/etcd/third_party/github.com/gorilla/mux"
  15. "github.com/coreos/etcd/discovery"
  16. etcdErr "github.com/coreos/etcd/error"
  17. "github.com/coreos/etcd/log"
  18. "github.com/coreos/etcd/metrics"
  19. "github.com/coreos/etcd/pkg/btrfs"
  20. "github.com/coreos/etcd/store"
  21. )
  22. const (
  23. // MaxHeartbeatTimeoutBackoff is the maximum number of seconds before we warn
  24. // the user again about a peer not accepting heartbeats.
  25. MaxHeartbeatTimeoutBackoff = 15 * time.Second
  26. // ThresholdMonitorTimeout is the time between log notifications that the
  27. // Raft heartbeat is too close to the election timeout.
  28. ThresholdMonitorTimeout = 5 * time.Second
  29. // ActiveMonitorTimeout is the time between checks on the active size of
  30. // the cluster. If the active size is bigger than the actual size then
  31. // etcd attempts to demote to bring it to the correct number.
  32. ActiveMonitorTimeout = 1 * time.Second
  33. // PeerActivityMonitorTimeout is the time between checks for dead nodes in
  34. // the cluster.
  35. PeerActivityMonitorTimeout = 1 * time.Second
  36. // The location of cluster config in key space.
  37. ClusterConfigKey = "/_etcd/config"
  38. )
  39. type PeerServerConfig struct {
  40. Name string
  41. Scheme string
  42. URL string
  43. SnapshotCount int
  44. RetryTimes int
  45. RetryInterval float64
  46. }
  47. type PeerServer struct {
  48. Config PeerServerConfig
  49. client *Client
  50. raftServer raft.Server
  51. server *Server
  52. followersStats *raftFollowersStats
  53. serverStats *raftServerStats
  54. registry *Registry
  55. store store.Store
  56. snapConf *snapshotConf
  57. joinIndex uint64
  58. isNewCluster bool
  59. removedInLog bool
  60. removeNotify chan bool
  61. started bool
  62. closeChan chan bool
  63. routineGroup sync.WaitGroup
  64. timeoutThresholdChan chan interface{}
  65. logBackoffs map[string]*logBackoff
  66. metrics *metrics.Bucket
  67. sync.Mutex
  68. }
  69. type logBackoff struct {
  70. next time.Time
  71. backoff time.Duration
  72. count int
  73. }
  74. // TODO: find a good policy to do snapshot
  75. type snapshotConf struct {
  76. // Etcd will check if snapshot is need every checkingInterval
  77. checkingInterval time.Duration
  78. // The index when the last snapshot happened
  79. lastIndex uint64
  80. // If the incremental number of index since the last snapshot
  81. // exceeds the snapshot Threshold, etcd will do a snapshot
  82. snapshotThr uint64
  83. }
  84. func NewPeerServer(psConfig PeerServerConfig, client *Client, registry *Registry, store store.Store, mb *metrics.Bucket, followersStats *raftFollowersStats, serverStats *raftServerStats) *PeerServer {
  85. s := &PeerServer{
  86. Config: psConfig,
  87. client: client,
  88. registry: registry,
  89. store: store,
  90. followersStats: followersStats,
  91. serverStats: serverStats,
  92. timeoutThresholdChan: make(chan interface{}, 1),
  93. logBackoffs: make(map[string]*logBackoff),
  94. metrics: mb,
  95. }
  96. return s
  97. }
  98. func (s *PeerServer) SetRaftServer(raftServer raft.Server, snapshot bool) {
  99. s.snapConf = &snapshotConf{
  100. checkingInterval: time.Second * 3,
  101. // this is not accurate, we will update raft to provide an api
  102. lastIndex: raftServer.CommitIndex(),
  103. snapshotThr: uint64(s.Config.SnapshotCount),
  104. }
  105. raftServer.AddEventListener(raft.StateChangeEventType, s.raftEventLogger)
  106. raftServer.AddEventListener(raft.LeaderChangeEventType, s.raftEventLogger)
  107. raftServer.AddEventListener(raft.TermChangeEventType, s.raftEventLogger)
  108. raftServer.AddEventListener(raft.AddPeerEventType, s.raftEventLogger)
  109. raftServer.AddEventListener(raft.RemovePeerEventType, s.raftEventLogger)
  110. raftServer.AddEventListener(raft.HeartbeatIntervalEventType, s.raftEventLogger)
  111. raftServer.AddEventListener(raft.ElectionTimeoutThresholdEventType, s.raftEventLogger)
  112. raftServer.AddEventListener(raft.HeartbeatEventType, s.recordMetricEvent)
  113. raftServer.AddEventListener(raft.RemovedEventType, s.removedEvent)
  114. s.raftServer = raftServer
  115. s.removedInLog = false
  116. // LoadSnapshot
  117. if snapshot {
  118. err := s.raftServer.LoadSnapshot()
  119. if err == nil {
  120. log.Debugf("%s finished load snapshot", s.Config.Name)
  121. } else {
  122. log.Debug(err)
  123. }
  124. }
  125. s.raftServer.Init()
  126. // Set NOCOW for data directory in btrfs
  127. if btrfs.IsBtrfs(s.raftServer.LogPath()) {
  128. if err := btrfs.SetNOCOWFile(s.raftServer.LogPath()); err != nil {
  129. log.Warnf("Failed setting NOCOW: %v", err)
  130. }
  131. }
  132. }
  133. func (s *PeerServer) SetRegistry(registry *Registry) {
  134. s.registry = registry
  135. }
  136. func (s *PeerServer) SetStore(store store.Store) {
  137. s.store = store
  138. }
  139. // Try all possible ways to find clusters to join
  140. // Include log data in -data-dir, -discovery and -peers
  141. //
  142. // Peer discovery follows this order:
  143. // 1. previous peers in -data-dir
  144. // 2. -discovery
  145. // 3. -peers
  146. func (s *PeerServer) FindCluster(discoverURL string, peers []string) (toStart bool, possiblePeers []string, err error) {
  147. name := s.Config.Name
  148. isNewNode := s.raftServer.IsLogEmpty()
  149. // Try its best to find possible peers, and connect with them.
  150. if !isNewNode {
  151. // It is not allowed to join the cluster with existing peer address
  152. // This prevents old node joining with different name by mistake.
  153. if !s.checkPeerAddressNonconflict() {
  154. err = fmt.Errorf("%v is not allowed to join the cluster with existing URL %v", s.Config.Name, s.Config.URL)
  155. return
  156. }
  157. // Take old nodes into account.
  158. possiblePeers = s.getKnownPeers()
  159. // Discover registered peers.
  160. // TODO(yichengq): It may mess up discoverURL if this is
  161. // set wrong by mistake. This may need to refactor discovery
  162. // module. Fix it later.
  163. if discoverURL != "" {
  164. discoverPeers, _ := s.handleDiscovery(discoverURL)
  165. possiblePeers = append(possiblePeers, discoverPeers...)
  166. }
  167. possiblePeers = append(possiblePeers, peers...)
  168. possiblePeers = s.removeSelfFromList(possiblePeers)
  169. if s.removedInLog {
  170. return
  171. }
  172. // If there is possible peer list, use it to find cluster.
  173. if len(possiblePeers) > 0 {
  174. // TODO(yichengq): joinCluster may fail if there's no leader for
  175. // current cluster. It should wait if the cluster is under
  176. // leader election, or the node with changed IP cannot join
  177. // the cluster then.
  178. if rejected, ierr := s.startAsFollower(possiblePeers, 1); rejected {
  179. log.Debugf("%s should work as standby for the cluster %v: %v", name, possiblePeers, ierr)
  180. return
  181. } else if ierr != nil {
  182. log.Warnf("%s cannot connect to previous cluster %v: %v", name, possiblePeers, ierr)
  183. } else {
  184. log.Debugf("%s joins to the previous cluster %v", name, possiblePeers)
  185. toStart = true
  186. return
  187. }
  188. }
  189. // TODO(yichengq): Think about the action that should be done
  190. // if it cannot connect any of the previous known node.
  191. log.Debugf("%s is restarting the cluster %v", name, possiblePeers)
  192. s.SetJoinIndex(s.raftServer.CommitIndex())
  193. toStart = true
  194. return
  195. }
  196. // Attempt cluster discovery
  197. if discoverURL != "" {
  198. discoverPeers, discoverErr := s.handleDiscovery(discoverURL)
  199. // It is not registered in discover url
  200. if discoverErr != nil {
  201. log.Warnf("%s failed to connect discovery service[%v]: %v", name, discoverURL, discoverErr)
  202. if len(peers) == 0 {
  203. err = fmt.Errorf("%s, the new instance, must register itself to discovery service as required", name)
  204. return
  205. }
  206. log.Debugf("%s is joining peers %v from -peers flag", name, peers)
  207. } else {
  208. log.Debugf("%s is joining a cluster %v via discover service", name, discoverPeers)
  209. peers = discoverPeers
  210. }
  211. }
  212. possiblePeers = peers
  213. if len(possiblePeers) > 0 {
  214. if rejected, ierr := s.startAsFollower(possiblePeers, s.Config.RetryTimes); rejected {
  215. log.Debugf("%s should work as standby for the cluster %v: %v", name, possiblePeers, ierr)
  216. } else if ierr != nil {
  217. log.Warnf("%s cannot connect to existing peers %v: %v", name, possiblePeers, ierr)
  218. err = ierr
  219. } else {
  220. toStart = true
  221. }
  222. return
  223. }
  224. // start as a leader in a new cluster
  225. s.isNewCluster = true
  226. log.Infof("%s is starting a new cluster", s.Config.Name)
  227. toStart = true
  228. return
  229. }
  230. // Start starts the raft server.
  231. // The function assumes that join has been accepted successfully.
  232. func (s *PeerServer) Start(snapshot bool, clusterConfig *ClusterConfig) error {
  233. s.Lock()
  234. defer s.Unlock()
  235. if s.started {
  236. return nil
  237. }
  238. s.started = true
  239. s.removeNotify = make(chan bool)
  240. s.closeChan = make(chan bool)
  241. s.raftServer.Start()
  242. if s.isNewCluster {
  243. s.InitNewCluster(clusterConfig)
  244. s.isNewCluster = false
  245. }
  246. s.startRoutine(s.monitorSync)
  247. s.startRoutine(s.monitorTimeoutThreshold)
  248. s.startRoutine(s.monitorActiveSize)
  249. s.startRoutine(s.monitorPeerActivity)
  250. // open the snapshot
  251. if snapshot {
  252. s.startRoutine(s.monitorSnapshot)
  253. }
  254. return nil
  255. }
  256. // Stop stops the server gracefully.
  257. func (s *PeerServer) Stop() {
  258. s.Lock()
  259. defer s.Unlock()
  260. if !s.started {
  261. return
  262. }
  263. s.started = false
  264. close(s.closeChan)
  265. // TODO(yichengq): it should also call async stop for raft server,
  266. // but this functionality has not been implemented.
  267. s.raftServer.Stop()
  268. s.routineGroup.Wait()
  269. }
  270. // asyncRemove stops the server in peer mode.
  271. // It is called to stop the server internally when it has been removed
  272. // from the cluster.
  273. // The function triggers the stop action first to notice server that it
  274. // should not continue, and wait for its stop in separate goroutine because
  275. // the caller should also exit.
  276. func (s *PeerServer) asyncRemove() {
  277. s.Lock()
  278. if !s.started {
  279. s.Unlock()
  280. return
  281. }
  282. s.started = false
  283. close(s.closeChan)
  284. // TODO(yichengq): it should also call async stop for raft server,
  285. // but this functionality has not been implemented.
  286. go func() {
  287. s.raftServer.Stop()
  288. s.routineGroup.Wait()
  289. close(s.removeNotify)
  290. s.Unlock()
  291. }()
  292. }
  293. // RemoveNotify notifies the server is removed from peer mode due to
  294. // removal from the cluster.
  295. func (s *PeerServer) RemoveNotify() <-chan bool {
  296. return s.removeNotify
  297. }
  298. func (s *PeerServer) HTTPHandler() http.Handler {
  299. router := mux.NewRouter()
  300. // internal commands
  301. router.HandleFunc("/name", s.NameHttpHandler)
  302. router.HandleFunc("/version", s.VersionHttpHandler)
  303. router.HandleFunc("/version/{version:[0-9]+}/check", s.VersionCheckHttpHandler)
  304. router.HandleFunc("/upgrade", s.UpgradeHttpHandler)
  305. router.HandleFunc("/join", s.JoinHttpHandler)
  306. router.HandleFunc("/remove/{name:.+}", s.RemoveHttpHandler)
  307. router.HandleFunc("/vote", s.VoteHttpHandler)
  308. router.HandleFunc("/log", s.GetLogHttpHandler)
  309. router.HandleFunc("/log/append", s.AppendEntriesHttpHandler)
  310. router.HandleFunc("/snapshot", s.SnapshotHttpHandler)
  311. router.HandleFunc("/snapshotRecovery", s.SnapshotRecoveryHttpHandler)
  312. router.HandleFunc("/etcdURL", s.EtcdURLHttpHandler)
  313. router.HandleFunc("/v2/admin/config", s.getClusterConfigHttpHandler).Methods("GET")
  314. router.HandleFunc("/v2/admin/config", s.setClusterConfigHttpHandler).Methods("PUT")
  315. router.HandleFunc("/v2/admin/machines", s.getMachinesHttpHandler).Methods("GET")
  316. router.HandleFunc("/v2/admin/machines/{name}", s.getMachineHttpHandler).Methods("GET")
  317. router.HandleFunc("/v2/admin/machines/{name}", s.RemoveHttpHandler).Methods("DELETE")
  318. return router
  319. }
  320. func (s *PeerServer) SetJoinIndex(joinIndex uint64) {
  321. s.joinIndex = joinIndex
  322. }
  323. // ClusterConfig retrieves the current cluster configuration.
  324. func (s *PeerServer) ClusterConfig() *ClusterConfig {
  325. e, err := s.store.Get(ClusterConfigKey, false, false)
  326. // This is useful for backward compatibility because it doesn't
  327. // set cluster config in older version.
  328. if err != nil {
  329. log.Debugf("failed getting cluster config key: %v", err)
  330. return NewClusterConfig()
  331. }
  332. var c ClusterConfig
  333. if err = json.Unmarshal([]byte(*e.Node.Value), &c); err != nil {
  334. log.Debugf("failed unmarshaling cluster config: %v", err)
  335. return NewClusterConfig()
  336. }
  337. return &c
  338. }
  339. // SetClusterConfig updates the current cluster configuration.
  340. // Adjusting the active size will cause cluster to add or remove machines
  341. // to match the new size.
  342. func (s *PeerServer) SetClusterConfig(c *ClusterConfig) {
  343. // Set minimums.
  344. if c.ActiveSize < MinActiveSize {
  345. c.ActiveSize = MinActiveSize
  346. }
  347. if c.RemoveDelay < MinRemoveDelay {
  348. c.RemoveDelay = MinRemoveDelay
  349. }
  350. if c.SyncInterval < MinSyncInterval {
  351. c.SyncInterval = MinSyncInterval
  352. }
  353. log.Debugf("set cluster config as %v", c)
  354. b, _ := json.Marshal(c)
  355. s.store.Set(ClusterConfigKey, false, string(b), store.Permanent)
  356. }
  357. // Retrieves the underlying Raft server.
  358. func (s *PeerServer) RaftServer() raft.Server {
  359. return s.raftServer
  360. }
  361. // Associates the client server with the peer server.
  362. func (s *PeerServer) SetServer(server *Server) {
  363. s.server = server
  364. }
  365. func (s *PeerServer) InitNewCluster(clusterConfig *ClusterConfig) {
  366. // leader need to join self as a peer
  367. s.doCommand(&JoinCommand{
  368. MinVersion: store.MinVersion(),
  369. MaxVersion: store.MaxVersion(),
  370. Name: s.raftServer.Name(),
  371. RaftURL: s.Config.URL,
  372. EtcdURL: s.server.URL(),
  373. })
  374. log.Debugf("%s start as a leader", s.Config.Name)
  375. s.joinIndex = 1
  376. s.doCommand(&SetClusterConfigCommand{Config: clusterConfig})
  377. log.Debugf("%s sets cluster config as %v", s.Config.Name, clusterConfig)
  378. }
  379. func (s *PeerServer) doCommand(cmd raft.Command) {
  380. for {
  381. if _, err := s.raftServer.Do(cmd); err == nil {
  382. break
  383. }
  384. }
  385. log.Debugf("%s start as a leader", s.Config.Name)
  386. }
  387. func (s *PeerServer) startAsFollower(cluster []string, retryTimes int) (bool, error) {
  388. // start as a follower in a existing cluster
  389. for i := 0; ; i++ {
  390. if rejected, err := s.joinCluster(cluster); rejected {
  391. return true, err
  392. } else if err == nil {
  393. return false, nil
  394. }
  395. if i == retryTimes-1 {
  396. break
  397. }
  398. log.Infof("%v is unable to join the cluster using any of the peers %v at %dth time. Retrying in %.1f seconds", s.Config.Name, cluster, i, s.Config.RetryInterval)
  399. time.Sleep(time.Second * time.Duration(s.Config.RetryInterval))
  400. continue
  401. }
  402. return false, fmt.Errorf("fail joining the cluster via given peers after %x retries", retryTimes)
  403. }
  404. // Upgradable checks whether all peers in a cluster support an upgrade to the next store version.
  405. func (s *PeerServer) Upgradable() error {
  406. nextVersion := s.store.Version() + 1
  407. for _, peerURL := range s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name) {
  408. u, err := url.Parse(peerURL)
  409. if err != nil {
  410. return fmt.Errorf("PeerServer: Cannot parse URL: '%s' (%s)", peerURL, err)
  411. }
  412. url := (&url.URL{Host: u.Host, Scheme: s.Config.Scheme}).String()
  413. ok, err := s.client.CheckVersion(url, nextVersion)
  414. if err != nil {
  415. return err
  416. }
  417. if !ok {
  418. return fmt.Errorf("PeerServer: Version %d is not compatible with peer: %s", nextVersion, u.Host)
  419. }
  420. }
  421. return nil
  422. }
  423. // checkPeerAddressNonconflict checks whether the peer address has existed with different name.
  424. func (s *PeerServer) checkPeerAddressNonconflict() bool {
  425. // there exists the (name, peer address) pair
  426. if peerURL, ok := s.registry.PeerURL(s.Config.Name); ok {
  427. if peerURL == s.Config.URL {
  428. return true
  429. }
  430. }
  431. // check all existing peer addresses
  432. peerURLs := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  433. for _, peerURL := range peerURLs {
  434. if peerURL == s.Config.URL {
  435. return false
  436. }
  437. }
  438. return true
  439. }
  440. // Helper function to do discovery and return results in expected format
  441. func (s *PeerServer) handleDiscovery(discoverURL string) (peers []string, err error) {
  442. peers, err = discovery.Do(discoverURL, s.Config.Name, s.Config.URL, s.closeChan, s.startRoutine)
  443. // Warn about errors coming from discovery, this isn't fatal
  444. // since the user might have provided a peer list elsewhere,
  445. // or there is some log in data dir.
  446. if err != nil {
  447. log.Warnf("Discovery encountered an error: %v", err)
  448. return
  449. }
  450. for i := range peers {
  451. // Strip the scheme off of the peer if it has one
  452. // TODO(bp): clean this up!
  453. purl, err := url.Parse(peers[i])
  454. if err == nil {
  455. peers[i] = purl.Host
  456. }
  457. }
  458. log.Infof("Discovery fetched back peer list: %v", peers)
  459. return
  460. }
  461. // getKnownPeers gets the previous peers from log
  462. func (s *PeerServer) getKnownPeers() []string {
  463. peers := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  464. log.Infof("Peer URLs in log: %s / %s (%s)", s.raftServer.Leader(), s.Config.Name, strings.Join(peers, ","))
  465. for i := range peers {
  466. u, err := url.Parse(peers[i])
  467. if err != nil {
  468. log.Debugf("getKnownPeers cannot parse url %v", peers[i])
  469. }
  470. peers[i] = u.Host
  471. }
  472. return peers
  473. }
  474. // removeSelfFromList removes url of the peerServer from the peer list
  475. func (s *PeerServer) removeSelfFromList(peers []string) []string {
  476. // Remove its own peer address from the peer list to join
  477. u, err := url.Parse(s.Config.URL)
  478. if err != nil {
  479. log.Warnf("failed parsing self peer address %v", s.Config.URL)
  480. u = nil
  481. }
  482. newPeers := make([]string, 0)
  483. for _, v := range peers {
  484. if u == nil || v != u.Host {
  485. newPeers = append(newPeers, v)
  486. }
  487. }
  488. return newPeers
  489. }
  490. func (s *PeerServer) joinCluster(cluster []string) (bool, error) {
  491. for _, peer := range cluster {
  492. if len(peer) == 0 {
  493. continue
  494. }
  495. if rejected, err := s.joinByPeer(s.raftServer, peer, s.Config.Scheme); rejected {
  496. return true, fmt.Errorf("rejected by peer %s: %v", peer, err)
  497. } else if err == nil {
  498. log.Infof("%s joined the cluster via peer %s", s.Config.Name, peer)
  499. return false, nil
  500. } else {
  501. log.Infof("%s attempted to join via %s failed: %v", s.Config.Name, peer, err)
  502. }
  503. }
  504. return false, fmt.Errorf("unreachable cluster")
  505. }
  506. // Send join requests to peer.
  507. // The first return tells whether it is rejected by the cluster directly.
  508. func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string) (bool, error) {
  509. u := (&url.URL{Host: peer, Scheme: scheme}).String()
  510. // Our version must match the leaders version
  511. version, err := s.client.GetVersion(u)
  512. if err != nil {
  513. return false, fmt.Errorf("fail checking join version: %v", err)
  514. }
  515. if version < store.MinVersion() || version > store.MaxVersion() {
  516. return true, fmt.Errorf("fail passing version compatibility(%d-%d) using %d", store.MinVersion(), store.MaxVersion(), version)
  517. }
  518. // Fetch current peer list
  519. machines, err := s.client.GetMachines(u)
  520. if err != nil {
  521. return false, fmt.Errorf("fail getting machine messages: %v", err)
  522. }
  523. exist := false
  524. for _, machine := range machines {
  525. if machine.Name == server.Name() {
  526. exist = true
  527. break
  528. }
  529. }
  530. // Fetch cluster config to see whether exists some place.
  531. clusterConfig, err := s.client.GetClusterConfig(u)
  532. if err != nil {
  533. return false, fmt.Errorf("fail getting cluster config: %v", err)
  534. }
  535. if !exist && clusterConfig.ActiveSize <= len(machines) {
  536. return true, fmt.Errorf("stop joining because the cluster is full with %d nodes", len(machines))
  537. }
  538. joinIndex, err := s.client.AddMachine(u,
  539. &JoinCommand{
  540. MinVersion: store.MinVersion(),
  541. MaxVersion: store.MaxVersion(),
  542. Name: server.Name(),
  543. RaftURL: s.Config.URL,
  544. EtcdURL: s.server.URL(),
  545. })
  546. if err != nil {
  547. return err.ErrorCode == etcdErr.EcodeNoMorePeer, fmt.Errorf("fail on join request: %v", err)
  548. }
  549. s.joinIndex = joinIndex
  550. return false, nil
  551. }
  552. func (s *PeerServer) Stats() []byte {
  553. s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.StartTime).String()
  554. // TODO: register state listener to raft to change this field
  555. // rather than compare the state each time Stats() is called.
  556. if s.RaftServer().State() == raft.Leader {
  557. s.serverStats.LeaderInfo.Name = s.RaftServer().Name()
  558. }
  559. queue := s.serverStats.sendRateQueue
  560. s.serverStats.SendingPkgRate, s.serverStats.SendingBandwidthRate = queue.Rate()
  561. queue = s.serverStats.recvRateQueue
  562. s.serverStats.RecvingPkgRate, s.serverStats.RecvingBandwidthRate = queue.Rate()
  563. b, _ := json.Marshal(s.serverStats)
  564. return b
  565. }
  566. func (s *PeerServer) PeerStats() []byte {
  567. if s.raftServer.State() == raft.Leader {
  568. b, _ := json.Marshal(s.followersStats)
  569. return b
  570. }
  571. return nil
  572. }
  573. // removedEvent handles the case where a machine has been removed from the
  574. // cluster and is notified when it tries to become a candidate.
  575. func (s *PeerServer) removedEvent(event raft.Event) {
  576. // HACK(philips): we need to find a better notification for this.
  577. log.Infof("removed during cluster re-configuration")
  578. s.asyncRemove()
  579. }
  580. // raftEventLogger converts events from the Raft server into log messages.
  581. func (s *PeerServer) raftEventLogger(event raft.Event) {
  582. value := event.Value()
  583. prevValue := event.PrevValue()
  584. if value == nil {
  585. value = "<nil>"
  586. }
  587. if prevValue == nil {
  588. prevValue = "<nil>"
  589. }
  590. switch event.Type() {
  591. case raft.StateChangeEventType:
  592. log.Infof("%s: state changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  593. case raft.TermChangeEventType:
  594. log.Infof("%s: term #%v started.", s.Config.Name, value)
  595. case raft.LeaderChangeEventType:
  596. log.Infof("%s: leader changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  597. case raft.AddPeerEventType:
  598. log.Infof("%s: peer added: '%v'", s.Config.Name, value)
  599. case raft.RemovePeerEventType:
  600. log.Infof("%s: peer removed: '%v'", s.Config.Name, value)
  601. case raft.HeartbeatIntervalEventType:
  602. peer, ok := value.(*raft.Peer)
  603. if !ok {
  604. log.Warnf("%s: heatbeat timeout from unknown peer", s.Config.Name)
  605. return
  606. }
  607. s.logHeartbeatTimeout(peer)
  608. case raft.ElectionTimeoutThresholdEventType:
  609. select {
  610. case s.timeoutThresholdChan <- value:
  611. default:
  612. }
  613. }
  614. }
  615. // logHeartbeatTimeout logs about the edge triggered heartbeat timeout event
  616. // only if we haven't warned within a reasonable interval.
  617. func (s *PeerServer) logHeartbeatTimeout(peer *raft.Peer) {
  618. b, ok := s.logBackoffs[peer.Name]
  619. if !ok {
  620. b = &logBackoff{time.Time{}, time.Second, 1}
  621. s.logBackoffs[peer.Name] = b
  622. }
  623. if peer.LastActivity().After(b.next) {
  624. b.next = time.Time{}
  625. b.backoff = time.Second
  626. b.count = 1
  627. }
  628. if b.next.After(time.Now()) {
  629. b.count++
  630. return
  631. }
  632. b.backoff = 2 * b.backoff
  633. if b.backoff > MaxHeartbeatTimeoutBackoff {
  634. b.backoff = MaxHeartbeatTimeoutBackoff
  635. }
  636. b.next = time.Now().Add(b.backoff)
  637. log.Infof("%s: warning: heartbeat time out peer=%q missed=%d backoff=%q", s.Config.Name, peer.Name, b.count, b.backoff)
  638. }
  639. func (s *PeerServer) recordMetricEvent(event raft.Event) {
  640. name := fmt.Sprintf("raft.event.%s", event.Type())
  641. value := event.Value().(time.Duration)
  642. (*s.metrics).Timer(name).Update(value)
  643. }
  644. // logSnapshot logs about the snapshot that was taken.
  645. func (s *PeerServer) logSnapshot(err error, currentIndex, count uint64) {
  646. info := fmt.Sprintf("%s: snapshot of %d events at index %d", s.Config.Name, count, currentIndex)
  647. if err != nil {
  648. log.Infof("%s attempted and failed: %v", info, err)
  649. } else {
  650. log.Infof("%s completed", info)
  651. }
  652. }
  653. func (s *PeerServer) startRoutine(f func()) {
  654. s.routineGroup.Add(1)
  655. go func() {
  656. defer s.routineGroup.Done()
  657. f()
  658. }()
  659. }
  660. func (s *PeerServer) monitorSnapshot() {
  661. for {
  662. timer := time.NewTimer(s.snapConf.checkingInterval)
  663. select {
  664. case <-s.closeChan:
  665. timer.Stop()
  666. return
  667. case <-timer.C:
  668. }
  669. currentIndex := s.RaftServer().CommitIndex()
  670. count := currentIndex - s.snapConf.lastIndex
  671. if uint64(count) > s.snapConf.snapshotThr {
  672. err := s.raftServer.TakeSnapshot()
  673. s.logSnapshot(err, currentIndex, count)
  674. s.snapConf.lastIndex = currentIndex
  675. }
  676. }
  677. }
  678. func (s *PeerServer) monitorSync() {
  679. ticker := time.NewTicker(time.Millisecond * 500)
  680. defer ticker.Stop()
  681. for {
  682. select {
  683. case <-s.closeChan:
  684. return
  685. case now := <-ticker.C:
  686. if s.raftServer.State() == raft.Leader {
  687. s.raftServer.Do(s.store.CommandFactory().CreateSyncCommand(now))
  688. }
  689. }
  690. }
  691. }
  692. // monitorTimeoutThreshold groups timeout threshold events together and prints
  693. // them as a single log line.
  694. func (s *PeerServer) monitorTimeoutThreshold() {
  695. ticker := time.NewTicker(ThresholdMonitorTimeout)
  696. defer ticker.Stop()
  697. for {
  698. select {
  699. case <-s.closeChan:
  700. return
  701. case value := <-s.timeoutThresholdChan:
  702. log.Infof("%s: warning: heartbeat near election timeout: %v", s.Config.Name, value)
  703. }
  704. select {
  705. case <-s.closeChan:
  706. return
  707. case <-ticker.C:
  708. }
  709. }
  710. }
  711. // monitorActiveSize has the leader periodically check the status of cluster
  712. // nodes and swaps them out for standbys as needed.
  713. func (s *PeerServer) monitorActiveSize() {
  714. ticker := time.NewTicker(ActiveMonitorTimeout)
  715. defer ticker.Stop()
  716. for {
  717. select {
  718. case <-s.closeChan:
  719. return
  720. case <-ticker.C:
  721. }
  722. // Ignore while this peer is not a leader.
  723. if s.raftServer.State() != raft.Leader {
  724. continue
  725. }
  726. // Retrieve target active size and actual active size.
  727. activeSize := s.ClusterConfig().ActiveSize
  728. peers := s.registry.Names()
  729. peerCount := len(peers)
  730. if index := sort.SearchStrings(peers, s.Config.Name); index < len(peers) && peers[index] == s.Config.Name {
  731. peers = append(peers[:index], peers[index+1:]...)
  732. }
  733. // If we have more active nodes than we should then remove.
  734. if peerCount > activeSize {
  735. peer := peers[rand.Intn(len(peers))]
  736. log.Infof("%s: removing node: %v; peer number %d > expected size %d", s.Config.Name, peer, peerCount, activeSize)
  737. if _, err := s.raftServer.Do(&RemoveCommand{Name: peer}); err != nil {
  738. log.Infof("%s: warning: remove error: %v", s.Config.Name, err)
  739. }
  740. continue
  741. }
  742. }
  743. }
  744. // monitorPeerActivity has the leader periodically for dead nodes and demotes them.
  745. func (s *PeerServer) monitorPeerActivity() {
  746. ticker := time.NewTicker(PeerActivityMonitorTimeout)
  747. defer ticker.Stop()
  748. for {
  749. select {
  750. case <-s.closeChan:
  751. return
  752. case <-ticker.C:
  753. }
  754. // Ignore while this peer is not a leader.
  755. if s.raftServer.State() != raft.Leader {
  756. continue
  757. }
  758. // Check last activity for all peers.
  759. now := time.Now()
  760. removeDelay := time.Duration(int64(s.ClusterConfig().RemoveDelay * float64(time.Second)))
  761. peers := s.raftServer.Peers()
  762. for _, peer := range peers {
  763. // If the last response from the peer is longer than the remove delay
  764. // then automatically demote the peer.
  765. if !peer.LastActivity().IsZero() && now.Sub(peer.LastActivity()) > removeDelay {
  766. log.Infof("%s: removing node: %v; last activity %v ago", s.Config.Name, peer.Name, now.Sub(peer.LastActivity()))
  767. if _, err := s.raftServer.Do(&RemoveCommand{Name: peer.Name}); err != nil {
  768. log.Infof("%s: warning: autodemotion error: %v", s.Config.Name, err)
  769. }
  770. continue
  771. }
  772. }
  773. }
  774. }