peer_server.go 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. package server
  2. import (
  3. "encoding/json"
  4. "fmt"
  5. "math/rand"
  6. "net/http"
  7. "net/url"
  8. "sort"
  9. "strings"
  10. "sync"
  11. "time"
  12. "github.com/coreos/etcd/third_party/github.com/goraft/raft"
  13. "github.com/coreos/etcd/third_party/github.com/gorilla/mux"
  14. "github.com/coreos/etcd/discovery"
  15. etcdErr "github.com/coreos/etcd/error"
  16. "github.com/coreos/etcd/log"
  17. "github.com/coreos/etcd/metrics"
  18. "github.com/coreos/etcd/pkg/btrfs"
  19. "github.com/coreos/etcd/store"
  20. )
  21. const (
  22. // ThresholdMonitorTimeout is the time between log notifications that the
  23. // Raft heartbeat is too close to the election timeout.
  24. ThresholdMonitorTimeout = 5 * time.Second
  25. // ActiveMonitorTimeout is the time between checks on the active size of
  26. // the cluster. If the active size is bigger than the actual size then
  27. // etcd attempts to demote to bring it to the correct number.
  28. ActiveMonitorTimeout = 1 * time.Second
  29. // PeerActivityMonitorTimeout is the time between checks for dead nodes in
  30. // the cluster.
  31. PeerActivityMonitorTimeout = 1 * time.Second
  32. // The location of cluster config in key space.
  33. ClusterConfigKey = "/_etcd/config"
  34. )
  35. type PeerServerConfig struct {
  36. Name string
  37. Scheme string
  38. URL string
  39. SnapshotCount int
  40. RetryTimes int
  41. RetryInterval float64
  42. }
  43. type PeerServer struct {
  44. Config PeerServerConfig
  45. client *Client
  46. raftServer raft.Server
  47. server *Server
  48. followersStats *raftFollowersStats
  49. serverStats *raftServerStats
  50. registry *Registry
  51. store store.Store
  52. snapConf *snapshotConf
  53. joinIndex uint64
  54. isNewCluster bool
  55. removedInLog bool
  56. removeNotify chan bool
  57. started bool
  58. closeChan chan bool
  59. routineGroup sync.WaitGroup
  60. timeoutThresholdChan chan interface{}
  61. metrics *metrics.Bucket
  62. sync.Mutex
  63. }
  64. // TODO: find a good policy to do snapshot
  65. type snapshotConf struct {
  66. // Etcd will check if snapshot is need every checkingInterval
  67. checkingInterval time.Duration
  68. // The index when the last snapshot happened
  69. lastIndex uint64
  70. // If the incremental number of index since the last snapshot
  71. // exceeds the snapshot Threshold, etcd will do a snapshot
  72. snapshotThr uint64
  73. }
  74. func NewPeerServer(psConfig PeerServerConfig, client *Client, registry *Registry, store store.Store, mb *metrics.Bucket, followersStats *raftFollowersStats, serverStats *raftServerStats) *PeerServer {
  75. s := &PeerServer{
  76. Config: psConfig,
  77. client: client,
  78. registry: registry,
  79. store: store,
  80. followersStats: followersStats,
  81. serverStats: serverStats,
  82. timeoutThresholdChan: make(chan interface{}, 1),
  83. metrics: mb,
  84. }
  85. return s
  86. }
  87. func (s *PeerServer) SetRaftServer(raftServer raft.Server, snapshot bool) {
  88. s.snapConf = &snapshotConf{
  89. checkingInterval: time.Second * 3,
  90. // this is not accurate, we will update raft to provide an api
  91. lastIndex: raftServer.CommitIndex(),
  92. snapshotThr: uint64(s.Config.SnapshotCount),
  93. }
  94. raftServer.AddEventListener(raft.StateChangeEventType, s.raftEventLogger)
  95. raftServer.AddEventListener(raft.LeaderChangeEventType, s.raftEventLogger)
  96. raftServer.AddEventListener(raft.TermChangeEventType, s.raftEventLogger)
  97. raftServer.AddEventListener(raft.AddPeerEventType, s.raftEventLogger)
  98. raftServer.AddEventListener(raft.RemovePeerEventType, s.raftEventLogger)
  99. raftServer.AddEventListener(raft.HeartbeatIntervalEventType, s.raftEventLogger)
  100. raftServer.AddEventListener(raft.ElectionTimeoutThresholdEventType, s.raftEventLogger)
  101. raftServer.AddEventListener(raft.HeartbeatEventType, s.recordMetricEvent)
  102. raftServer.AddEventListener(raft.RemovedEventType, s.removedEvent)
  103. s.raftServer = raftServer
  104. s.removedInLog = false
  105. // LoadSnapshot
  106. if snapshot {
  107. err := s.raftServer.LoadSnapshot()
  108. if err == nil {
  109. log.Debugf("%s finished load snapshot", s.Config.Name)
  110. } else {
  111. log.Debug(err)
  112. }
  113. }
  114. s.raftServer.Init()
  115. // Set NOCOW for data directory in btrfs
  116. if btrfs.IsBtrfs(s.raftServer.LogPath()) {
  117. if err := btrfs.SetNOCOWFile(s.raftServer.LogPath()); err != nil {
  118. log.Warnf("Failed setting NOCOW: %v", err)
  119. }
  120. }
  121. }
  122. func (s *PeerServer) SetRegistry(registry *Registry) {
  123. s.registry = registry
  124. }
  125. func (s *PeerServer) SetStore(store store.Store) {
  126. s.store = store
  127. }
  128. // Try all possible ways to find clusters to join
  129. // Include log data in -data-dir, -discovery and -peers
  130. //
  131. // Peer discovery follows this order:
  132. // 1. previous peers in -data-dir
  133. // 2. -discovery
  134. // 3. -peers
  135. func (s *PeerServer) FindCluster(discoverURL string, peers []string) (toStart bool, possiblePeers []string, err error) {
  136. name := s.Config.Name
  137. isNewNode := s.raftServer.IsLogEmpty()
  138. // Try its best to find possible peers, and connect with them.
  139. if !isNewNode {
  140. // It is not allowed to join the cluster with existing peer address
  141. // This prevents old node joining with different name by mistake.
  142. if !s.checkPeerAddressNonconflict() {
  143. err = fmt.Errorf("%v is not allowed to join the cluster with existing URL %v", s.Config.Name, s.Config.URL)
  144. return
  145. }
  146. // Take old nodes into account.
  147. possiblePeers = s.getKnownPeers()
  148. // Discover registered peers.
  149. // TODO(yichengq): It may mess up discoverURL if this is
  150. // set wrong by mistake. This may need to refactor discovery
  151. // module. Fix it later.
  152. if discoverURL != "" {
  153. discoverPeers, _ := s.handleDiscovery(discoverURL)
  154. possiblePeers = append(possiblePeers, discoverPeers...)
  155. }
  156. possiblePeers = append(possiblePeers, peers...)
  157. possiblePeers = s.removeSelfFromList(possiblePeers)
  158. if s.removedInLog {
  159. return
  160. }
  161. // If there is possible peer list, use it to find cluster.
  162. if len(possiblePeers) > 0 {
  163. // TODO(yichengq): joinCluster may fail if there's no leader for
  164. // current cluster. It should wait if the cluster is under
  165. // leader election, or the node with changed IP cannot join
  166. // the cluster then.
  167. if rejected, ierr := s.startAsFollower(possiblePeers, 1); rejected {
  168. log.Debugf("%s should work as standby for the cluster %v: %v", name, possiblePeers, ierr)
  169. return
  170. } else if ierr != nil {
  171. log.Warnf("%s cannot connect to previous cluster %v: %v", name, possiblePeers, ierr)
  172. } else {
  173. log.Debugf("%s joins to the previous cluster %v", name, possiblePeers)
  174. toStart = true
  175. return
  176. }
  177. }
  178. // TODO(yichengq): Think about the action that should be done
  179. // if it cannot connect any of the previous known node.
  180. log.Debugf("%s is restarting the cluster %v", name, possiblePeers)
  181. s.SetJoinIndex(s.raftServer.CommitIndex())
  182. toStart = true
  183. return
  184. }
  185. // Attempt cluster discovery
  186. if discoverURL != "" {
  187. discoverPeers, discoverErr := s.handleDiscovery(discoverURL)
  188. // It is not registered in discover url
  189. if discoverErr != nil {
  190. log.Warnf("%s failed to connect discovery service[%v]: %v", name, discoverURL, discoverErr)
  191. if len(peers) == 0 {
  192. err = fmt.Errorf("%s, the new instance, must register itself to discovery service as required", name)
  193. return
  194. }
  195. log.Debugf("%s is joining peers %v from -peers flag", name, peers)
  196. } else {
  197. log.Debugf("%s is joining a cluster %v via discover service", name, discoverPeers)
  198. peers = discoverPeers
  199. }
  200. }
  201. possiblePeers = peers
  202. if len(possiblePeers) > 0 {
  203. if rejected, ierr := s.startAsFollower(possiblePeers, s.Config.RetryTimes); rejected {
  204. log.Debugf("%s should work as standby for the cluster %v: %v", name, possiblePeers, ierr)
  205. } else if ierr != nil {
  206. log.Warnf("%s cannot connect to existing peers %v: %v", name, possiblePeers, ierr)
  207. err = ierr
  208. } else {
  209. toStart = true
  210. }
  211. return
  212. }
  213. // start as a leader in a new cluster
  214. s.isNewCluster = true
  215. log.Infof("%s is starting a new cluster", s.Config.Name)
  216. toStart = true
  217. return
  218. }
  219. // Start starts the raft server.
  220. // The function assumes that join has been accepted successfully.
  221. func (s *PeerServer) Start(snapshot bool, clusterConfig *ClusterConfig) error {
  222. s.Lock()
  223. defer s.Unlock()
  224. if s.started {
  225. return nil
  226. }
  227. s.started = true
  228. s.removeNotify = make(chan bool)
  229. s.closeChan = make(chan bool)
  230. s.raftServer.Start()
  231. if s.isNewCluster {
  232. s.InitNewCluster(clusterConfig)
  233. s.isNewCluster = false
  234. }
  235. s.startRoutine(s.monitorSync)
  236. s.startRoutine(s.monitorTimeoutThreshold)
  237. s.startRoutine(s.monitorActiveSize)
  238. s.startRoutine(s.monitorPeerActivity)
  239. // open the snapshot
  240. if snapshot {
  241. s.startRoutine(s.monitorSnapshot)
  242. }
  243. return nil
  244. }
  245. // Stop stops the server gracefully.
  246. func (s *PeerServer) Stop() {
  247. s.Lock()
  248. defer s.Unlock()
  249. if !s.started {
  250. return
  251. }
  252. s.started = false
  253. close(s.closeChan)
  254. // TODO(yichengq): it should also call async stop for raft server,
  255. // but this functionality has not been implemented.
  256. s.raftServer.Stop()
  257. s.routineGroup.Wait()
  258. }
  259. // asyncRemove stops the server in peer mode.
  260. // It is called to stop the server internally when it has been removed
  261. // from the cluster.
  262. // The function triggers the stop action first to notice server that it
  263. // should not continue, and wait for its stop in separate goroutine because
  264. // the caller should also exit.
  265. func (s *PeerServer) asyncRemove() {
  266. s.Lock()
  267. if !s.started {
  268. s.Unlock()
  269. return
  270. }
  271. s.started = false
  272. close(s.closeChan)
  273. // TODO(yichengq): it should also call async stop for raft server,
  274. // but this functionality has not been implemented.
  275. go func() {
  276. s.raftServer.Stop()
  277. s.routineGroup.Wait()
  278. close(s.removeNotify)
  279. s.Unlock()
  280. }()
  281. }
  282. // RemoveNotify notifies the server is removed from peer mode due to
  283. // removal from the cluster.
  284. func (s *PeerServer) RemoveNotify() <-chan bool {
  285. return s.removeNotify
  286. }
  287. func (s *PeerServer) HTTPHandler() http.Handler {
  288. router := mux.NewRouter()
  289. // internal commands
  290. router.HandleFunc("/name", s.NameHttpHandler)
  291. router.HandleFunc("/version", s.VersionHttpHandler)
  292. router.HandleFunc("/version/{version:[0-9]+}/check", s.VersionCheckHttpHandler)
  293. router.HandleFunc("/upgrade", s.UpgradeHttpHandler)
  294. router.HandleFunc("/join", s.JoinHttpHandler)
  295. router.HandleFunc("/remove/{name:.+}", s.RemoveHttpHandler)
  296. router.HandleFunc("/vote", s.VoteHttpHandler)
  297. router.HandleFunc("/log", s.GetLogHttpHandler)
  298. router.HandleFunc("/log/append", s.AppendEntriesHttpHandler)
  299. router.HandleFunc("/snapshot", s.SnapshotHttpHandler)
  300. router.HandleFunc("/snapshotRecovery", s.SnapshotRecoveryHttpHandler)
  301. router.HandleFunc("/etcdURL", s.EtcdURLHttpHandler)
  302. router.HandleFunc("/v2/admin/config", s.getClusterConfigHttpHandler).Methods("GET")
  303. router.HandleFunc("/v2/admin/config", s.setClusterConfigHttpHandler).Methods("PUT")
  304. router.HandleFunc("/v2/admin/machines", s.getMachinesHttpHandler).Methods("GET")
  305. router.HandleFunc("/v2/admin/machines/{name}", s.getMachineHttpHandler).Methods("GET")
  306. router.HandleFunc("/v2/admin/machines/{name}", s.RemoveHttpHandler).Methods("DELETE")
  307. return router
  308. }
  309. func (s *PeerServer) SetJoinIndex(joinIndex uint64) {
  310. s.joinIndex = joinIndex
  311. }
  312. // ClusterConfig retrieves the current cluster configuration.
  313. func (s *PeerServer) ClusterConfig() *ClusterConfig {
  314. e, err := s.store.Get(ClusterConfigKey, false, false)
  315. // This is useful for backward compatibility because it doesn't
  316. // set cluster config in older version.
  317. if err != nil {
  318. log.Debugf("failed getting cluster config key: %v", err)
  319. return NewClusterConfig()
  320. }
  321. var c ClusterConfig
  322. if err = json.Unmarshal([]byte(*e.Node.Value), &c); err != nil {
  323. log.Debugf("failed unmarshaling cluster config: %v", err)
  324. return NewClusterConfig()
  325. }
  326. return &c
  327. }
  328. // SetClusterConfig updates the current cluster configuration.
  329. // Adjusting the active size will cause cluster to add or remove machines
  330. // to match the new size.
  331. func (s *PeerServer) SetClusterConfig(c *ClusterConfig) {
  332. // Set minimums.
  333. if c.ActiveSize < MinActiveSize {
  334. c.ActiveSize = MinActiveSize
  335. }
  336. if c.RemoveDelay < MinRemoveDelay {
  337. c.RemoveDelay = MinRemoveDelay
  338. }
  339. if c.SyncInterval < MinSyncInterval {
  340. c.SyncInterval = MinSyncInterval
  341. }
  342. log.Debugf("set cluster config as %v", c)
  343. b, _ := json.Marshal(c)
  344. s.store.Set(ClusterConfigKey, false, string(b), store.Permanent)
  345. }
  346. // Retrieves the underlying Raft server.
  347. func (s *PeerServer) RaftServer() raft.Server {
  348. return s.raftServer
  349. }
  350. // Associates the client server with the peer server.
  351. func (s *PeerServer) SetServer(server *Server) {
  352. s.server = server
  353. }
  354. func (s *PeerServer) InitNewCluster(clusterConfig *ClusterConfig) {
  355. // leader need to join self as a peer
  356. s.doCommand(&JoinCommand{
  357. MinVersion: store.MinVersion(),
  358. MaxVersion: store.MaxVersion(),
  359. Name: s.raftServer.Name(),
  360. RaftURL: s.Config.URL,
  361. EtcdURL: s.server.URL(),
  362. })
  363. log.Debugf("%s start as a leader", s.Config.Name)
  364. s.joinIndex = 1
  365. s.doCommand(&SetClusterConfigCommand{Config: clusterConfig})
  366. log.Debugf("%s sets cluster config as %v", s.Config.Name, clusterConfig)
  367. }
  368. func (s *PeerServer) doCommand(cmd raft.Command) {
  369. for {
  370. if _, err := s.raftServer.Do(cmd); err == nil {
  371. break
  372. }
  373. }
  374. log.Debugf("%s start as a leader", s.Config.Name)
  375. }
  376. func (s *PeerServer) startAsFollower(cluster []string, retryTimes int) (bool, error) {
  377. // start as a follower in a existing cluster
  378. for i := 0; ; i++ {
  379. if rejected, err := s.joinCluster(cluster); rejected {
  380. return true, err
  381. } else if err == nil {
  382. return false, nil
  383. }
  384. if i == retryTimes-1 {
  385. break
  386. }
  387. log.Infof("%v is unable to join the cluster using any of the peers %v at %dth time. Retrying in %.1f seconds", s.Config.Name, cluster, i, s.Config.RetryInterval)
  388. time.Sleep(time.Second * time.Duration(s.Config.RetryInterval))
  389. continue
  390. }
  391. return false, fmt.Errorf("fail joining the cluster via given peers after %x retries", retryTimes)
  392. }
  393. // Upgradable checks whether all peers in a cluster support an upgrade to the next store version.
  394. func (s *PeerServer) Upgradable() error {
  395. nextVersion := s.store.Version() + 1
  396. for _, peerURL := range s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name) {
  397. u, err := url.Parse(peerURL)
  398. if err != nil {
  399. return fmt.Errorf("PeerServer: Cannot parse URL: '%s' (%s)", peerURL, err)
  400. }
  401. url := (&url.URL{Host: u.Host, Scheme: s.Config.Scheme}).String()
  402. ok, err := s.client.CheckVersion(url, nextVersion)
  403. if err != nil {
  404. return err
  405. }
  406. if !ok {
  407. return fmt.Errorf("PeerServer: Version %d is not compatible with peer: %s", nextVersion, u.Host)
  408. }
  409. }
  410. return nil
  411. }
  412. // checkPeerAddressNonconflict checks whether the peer address has existed with different name.
  413. func (s *PeerServer) checkPeerAddressNonconflict() bool {
  414. // there exists the (name, peer address) pair
  415. if peerURL, ok := s.registry.PeerURL(s.Config.Name); ok {
  416. if peerURL == s.Config.URL {
  417. return true
  418. }
  419. }
  420. // check all existing peer addresses
  421. peerURLs := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  422. for _, peerURL := range peerURLs {
  423. if peerURL == s.Config.URL {
  424. return false
  425. }
  426. }
  427. return true
  428. }
  429. // Helper function to do discovery and return results in expected format
  430. func (s *PeerServer) handleDiscovery(discoverURL string) (peers []string, err error) {
  431. peers, err = discovery.Do(discoverURL, s.Config.Name, s.Config.URL, s.closeChan, s.startRoutine)
  432. // Warn about errors coming from discovery, this isn't fatal
  433. // since the user might have provided a peer list elsewhere,
  434. // or there is some log in data dir.
  435. if err != nil {
  436. log.Warnf("Discovery encountered an error: %v", err)
  437. return
  438. }
  439. for i := range peers {
  440. // Strip the scheme off of the peer if it has one
  441. // TODO(bp): clean this up!
  442. purl, err := url.Parse(peers[i])
  443. if err == nil {
  444. peers[i] = purl.Host
  445. }
  446. }
  447. log.Infof("Discovery fetched back peer list: %v", peers)
  448. return
  449. }
  450. // getKnownPeers gets the previous peers from log
  451. func (s *PeerServer) getKnownPeers() []string {
  452. peers := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  453. log.Infof("Peer URLs in log: %s / %s (%s)", s.raftServer.Leader(), s.Config.Name, strings.Join(peers, ","))
  454. for i := range peers {
  455. u, err := url.Parse(peers[i])
  456. if err != nil {
  457. log.Debugf("getKnownPeers cannot parse url %v", peers[i])
  458. }
  459. peers[i] = u.Host
  460. }
  461. return peers
  462. }
  463. // removeSelfFromList removes url of the peerServer from the peer list
  464. func (s *PeerServer) removeSelfFromList(peers []string) []string {
  465. // Remove its own peer address from the peer list to join
  466. u, err := url.Parse(s.Config.URL)
  467. if err != nil {
  468. log.Warnf("failed parsing self peer address %v", s.Config.URL)
  469. u = nil
  470. }
  471. newPeers := make([]string, 0)
  472. for _, v := range peers {
  473. if u == nil || v != u.Host {
  474. newPeers = append(newPeers, v)
  475. }
  476. }
  477. return newPeers
  478. }
  479. func (s *PeerServer) joinCluster(cluster []string) (bool, error) {
  480. for _, peer := range cluster {
  481. if len(peer) == 0 {
  482. continue
  483. }
  484. if rejected, err := s.joinByPeer(s.raftServer, peer, s.Config.Scheme); rejected {
  485. return true, fmt.Errorf("rejected by peer %s: %v", peer, err)
  486. } else if err == nil {
  487. log.Infof("%s joined the cluster via peer %s", s.Config.Name, peer)
  488. return false, nil
  489. } else {
  490. log.Infof("%s attempted to join via %s failed: %v", s.Config.Name, peer, err)
  491. }
  492. }
  493. return false, fmt.Errorf("unreachable cluster")
  494. }
  495. // Send join requests to peer.
  496. // The first return tells whether it is rejected by the cluster directly.
  497. func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string) (bool, error) {
  498. u := (&url.URL{Host: peer, Scheme: scheme}).String()
  499. // Our version must match the leaders version
  500. version, err := s.client.GetVersion(u)
  501. if err != nil {
  502. return false, fmt.Errorf("fail checking join version: %v", err)
  503. }
  504. if version < store.MinVersion() || version > store.MaxVersion() {
  505. return true, fmt.Errorf("fail passing version compatibility(%d-%d) using %d", store.MinVersion(), store.MaxVersion(), version)
  506. }
  507. // Fetch current peer list
  508. machines, err := s.client.GetMachines(u)
  509. if err != nil {
  510. return false, fmt.Errorf("fail getting machine messages: %v", err)
  511. }
  512. exist := false
  513. for _, machine := range machines {
  514. if machine.Name == server.Name() {
  515. exist = true
  516. break
  517. }
  518. }
  519. // Fetch cluster config to see whether exists some place.
  520. clusterConfig, err := s.client.GetClusterConfig(u)
  521. if err != nil {
  522. return false, fmt.Errorf("fail getting cluster config: %v", err)
  523. }
  524. if !exist && clusterConfig.ActiveSize <= len(machines) {
  525. return true, fmt.Errorf("stop joining because the cluster is full with %d nodes", len(machines))
  526. }
  527. joinIndex, err := s.client.AddMachine(u,
  528. &JoinCommand{
  529. MinVersion: store.MinVersion(),
  530. MaxVersion: store.MaxVersion(),
  531. Name: server.Name(),
  532. RaftURL: s.Config.URL,
  533. EtcdURL: s.server.URL(),
  534. })
  535. if err != nil {
  536. return err.ErrorCode == etcdErr.EcodeNoMorePeer, fmt.Errorf("fail on join request: %v", err)
  537. }
  538. s.joinIndex = joinIndex
  539. return false, nil
  540. }
  541. func (s *PeerServer) Stats() []byte {
  542. s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.startTime).String()
  543. // TODO: register state listener to raft to change this field
  544. // rather than compare the state each time Stats() is called.
  545. if s.RaftServer().State() == raft.Leader {
  546. s.serverStats.LeaderInfo.Name = s.RaftServer().Name()
  547. }
  548. queue := s.serverStats.sendRateQueue
  549. s.serverStats.SendingPkgRate, s.serverStats.SendingBandwidthRate = queue.Rate()
  550. queue = s.serverStats.recvRateQueue
  551. s.serverStats.RecvingPkgRate, s.serverStats.RecvingBandwidthRate = queue.Rate()
  552. b, _ := json.Marshal(s.serverStats)
  553. return b
  554. }
  555. func (s *PeerServer) PeerStats() []byte {
  556. if s.raftServer.State() == raft.Leader {
  557. b, _ := json.Marshal(s.followersStats)
  558. return b
  559. }
  560. return nil
  561. }
  562. // removedEvent handles the case where a machine has been removed from the
  563. // cluster and is notified when it tries to become a candidate.
  564. func (s *PeerServer) removedEvent(event raft.Event) {
  565. // HACK(philips): we need to find a better notification for this.
  566. log.Infof("removed during cluster re-configuration")
  567. s.asyncRemove()
  568. }
  569. // raftEventLogger converts events from the Raft server into log messages.
  570. func (s *PeerServer) raftEventLogger(event raft.Event) {
  571. value := event.Value()
  572. prevValue := event.PrevValue()
  573. if value == nil {
  574. value = "<nil>"
  575. }
  576. if prevValue == nil {
  577. prevValue = "<nil>"
  578. }
  579. switch event.Type() {
  580. case raft.StateChangeEventType:
  581. log.Infof("%s: state changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  582. case raft.TermChangeEventType:
  583. log.Infof("%s: term #%v started.", s.Config.Name, value)
  584. case raft.LeaderChangeEventType:
  585. log.Infof("%s: leader changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  586. case raft.AddPeerEventType:
  587. log.Infof("%s: peer added: '%v'", s.Config.Name, value)
  588. case raft.RemovePeerEventType:
  589. log.Infof("%s: peer removed: '%v'", s.Config.Name, value)
  590. case raft.HeartbeatIntervalEventType:
  591. var name = "<unknown>"
  592. if peer, ok := value.(*raft.Peer); ok {
  593. name = peer.Name
  594. }
  595. log.Infof("%s: warning: heartbeat timed out: '%v'", s.Config.Name, name)
  596. case raft.ElectionTimeoutThresholdEventType:
  597. select {
  598. case s.timeoutThresholdChan <- value:
  599. default:
  600. }
  601. }
  602. }
  603. func (s *PeerServer) recordMetricEvent(event raft.Event) {
  604. name := fmt.Sprintf("raft.event.%s", event.Type())
  605. value := event.Value().(time.Duration)
  606. (*s.metrics).Timer(name).Update(value)
  607. }
  608. // logSnapshot logs about the snapshot that was taken.
  609. func (s *PeerServer) logSnapshot(err error, currentIndex, count uint64) {
  610. info := fmt.Sprintf("%s: snapshot of %d events at index %d", s.Config.Name, count, currentIndex)
  611. if err != nil {
  612. log.Infof("%s attempted and failed: %v", info, err)
  613. } else {
  614. log.Infof("%s completed", info)
  615. }
  616. }
  617. func (s *PeerServer) startRoutine(f func()) {
  618. s.routineGroup.Add(1)
  619. go func() {
  620. defer s.routineGroup.Done()
  621. f()
  622. }()
  623. }
  624. func (s *PeerServer) monitorSnapshot() {
  625. for {
  626. timer := time.NewTimer(s.snapConf.checkingInterval)
  627. defer timer.Stop()
  628. select {
  629. case <-s.closeChan:
  630. return
  631. case <-timer.C:
  632. }
  633. currentIndex := s.RaftServer().CommitIndex()
  634. count := currentIndex - s.snapConf.lastIndex
  635. if uint64(count) > s.snapConf.snapshotThr {
  636. err := s.raftServer.TakeSnapshot()
  637. s.logSnapshot(err, currentIndex, count)
  638. s.snapConf.lastIndex = currentIndex
  639. }
  640. }
  641. }
  642. func (s *PeerServer) monitorSync() {
  643. ticker := time.NewTicker(time.Millisecond * 500)
  644. defer ticker.Stop()
  645. for {
  646. select {
  647. case <-s.closeChan:
  648. return
  649. case now := <-ticker.C:
  650. if s.raftServer.State() == raft.Leader {
  651. s.raftServer.Do(s.store.CommandFactory().CreateSyncCommand(now))
  652. }
  653. }
  654. }
  655. }
  656. // monitorTimeoutThreshold groups timeout threshold events together and prints
  657. // them as a single log line.
  658. func (s *PeerServer) monitorTimeoutThreshold() {
  659. for {
  660. select {
  661. case <-s.closeChan:
  662. return
  663. case value := <-s.timeoutThresholdChan:
  664. log.Infof("%s: warning: heartbeat near election timeout: %v", s.Config.Name, value)
  665. }
  666. timer := time.NewTimer(ThresholdMonitorTimeout)
  667. defer timer.Stop()
  668. select {
  669. case <-s.closeChan:
  670. return
  671. case <-timer.C:
  672. }
  673. }
  674. }
  675. // monitorActiveSize has the leader periodically check the status of cluster
  676. // nodes and swaps them out for standbys as needed.
  677. func (s *PeerServer) monitorActiveSize() {
  678. for {
  679. timer := time.NewTimer(ActiveMonitorTimeout)
  680. defer timer.Stop()
  681. select {
  682. case <-s.closeChan:
  683. return
  684. case <-timer.C:
  685. }
  686. // Ignore while this peer is not a leader.
  687. if s.raftServer.State() != raft.Leader {
  688. continue
  689. }
  690. // Retrieve target active size and actual active size.
  691. activeSize := s.ClusterConfig().ActiveSize
  692. peers := s.registry.Names()
  693. peerCount := len(peers)
  694. if index := sort.SearchStrings(peers, s.Config.Name); index < len(peers) && peers[index] == s.Config.Name {
  695. peers = append(peers[:index], peers[index+1:]...)
  696. }
  697. // If we have more active nodes than we should then remove.
  698. if peerCount > activeSize {
  699. peer := peers[rand.Intn(len(peers))]
  700. log.Infof("%s: removing: %v", s.Config.Name, peer)
  701. if _, err := s.raftServer.Do(&RemoveCommand{Name: peer}); err != nil {
  702. log.Infof("%s: warning: remove error: %v", s.Config.Name, err)
  703. }
  704. continue
  705. }
  706. }
  707. }
  708. // monitorPeerActivity has the leader periodically for dead nodes and demotes them.
  709. func (s *PeerServer) monitorPeerActivity() {
  710. for {
  711. timer := time.NewTimer(PeerActivityMonitorTimeout)
  712. defer timer.Stop()
  713. select {
  714. case <-s.closeChan:
  715. return
  716. case <-timer.C:
  717. }
  718. // Ignore while this peer is not a leader.
  719. if s.raftServer.State() != raft.Leader {
  720. continue
  721. }
  722. // Check last activity for all peers.
  723. now := time.Now()
  724. removeDelay := time.Duration(int64(s.ClusterConfig().RemoveDelay * float64(time.Second)))
  725. peers := s.raftServer.Peers()
  726. for _, peer := range peers {
  727. // If the last response from the peer is longer than the remove delay
  728. // then automatically demote the peer.
  729. if !peer.LastActivity().IsZero() && now.Sub(peer.LastActivity()) > removeDelay {
  730. log.Infof("%s: removing node: %v; last activity %v ago", s.Config.Name, peer.Name, now.Sub(peer.LastActivity()))
  731. if _, err := s.raftServer.Do(&RemoveCommand{Name: peer.Name}); err != nil {
  732. log.Infof("%s: warning: autodemotion error: %v", s.Config.Name, err)
  733. }
  734. continue
  735. }
  736. }
  737. }
  738. }