peer_server.go 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876
  1. package server
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "fmt"
  6. "io/ioutil"
  7. "math/rand"
  8. "net/http"
  9. "net/url"
  10. "sort"
  11. "strconv"
  12. "strings"
  13. "sync"
  14. "time"
  15. "github.com/coreos/etcd/third_party/github.com/goraft/raft"
  16. "github.com/coreos/etcd/third_party/github.com/gorilla/mux"
  17. "github.com/coreos/etcd/discovery"
  18. etcdErr "github.com/coreos/etcd/error"
  19. "github.com/coreos/etcd/log"
  20. "github.com/coreos/etcd/metrics"
  21. "github.com/coreos/etcd/pkg/btrfs"
  22. "github.com/coreos/etcd/store"
  23. )
  24. const (
  25. // ThresholdMonitorTimeout is the time between log notifications that the
  26. // Raft heartbeat is too close to the election timeout.
  27. ThresholdMonitorTimeout = 5 * time.Second
  28. // ActiveMonitorTimeout is the time between checks on the active size of
  29. // the cluster. If the active size is different than the actual size then
  30. // etcd attempts to promote/demote to bring it to the correct number.
  31. ActiveMonitorTimeout = 1 * time.Second
  32. // PeerActivityMonitorTimeout is the time between checks for dead nodes in
  33. // the cluster.
  34. PeerActivityMonitorTimeout = 1 * time.Second
  35. )
  36. const (
  37. peerModeFlag = 0
  38. standbyModeFlag = 1
  39. )
  40. type PeerServerConfig struct {
  41. Name string
  42. Scheme string
  43. URL string
  44. SnapshotCount int
  45. RetryTimes int
  46. RetryInterval float64
  47. }
  48. type PeerServer struct {
  49. Config PeerServerConfig
  50. clusterConfig *ClusterConfig
  51. raftServer raft.Server
  52. server *Server
  53. joinIndex uint64
  54. followersStats *raftFollowersStats
  55. serverStats *raftServerStats
  56. registry *Registry
  57. store store.Store
  58. snapConf *snapshotConf
  59. mode Mode
  60. closeChan chan bool
  61. routineGroup sync.WaitGroup
  62. timeoutThresholdChan chan interface{}
  63. standbyPeerURL string
  64. standbyClientURL string
  65. metrics *metrics.Bucket
  66. sync.Mutex
  67. }
  68. // TODO: find a good policy to do snapshot
  69. type snapshotConf struct {
  70. // Etcd will check if snapshot is need every checkingInterval
  71. checkingInterval time.Duration
  72. // The index when the last snapshot happened
  73. lastIndex uint64
  74. // If the incremental number of index since the last snapshot
  75. // exceeds the snapshot Threshold, etcd will do a snapshot
  76. snapshotThr uint64
  77. }
  78. func NewPeerServer(psConfig PeerServerConfig, registry *Registry, store store.Store, mb *metrics.Bucket, followersStats *raftFollowersStats, serverStats *raftServerStats) *PeerServer {
  79. s := &PeerServer{
  80. Config: psConfig,
  81. clusterConfig: NewClusterConfig(),
  82. registry: registry,
  83. store: store,
  84. followersStats: followersStats,
  85. serverStats: serverStats,
  86. timeoutThresholdChan: make(chan interface{}, 1),
  87. metrics: mb,
  88. }
  89. return s
  90. }
  91. func (s *PeerServer) SetRaftServer(raftServer raft.Server) {
  92. s.snapConf = &snapshotConf{
  93. checkingInterval: time.Second * 3,
  94. // this is not accurate, we will update raft to provide an api
  95. lastIndex: raftServer.CommitIndex(),
  96. snapshotThr: uint64(s.Config.SnapshotCount),
  97. }
  98. raftServer.AddEventListener(raft.StateChangeEventType, s.raftEventLogger)
  99. raftServer.AddEventListener(raft.LeaderChangeEventType, s.raftEventLogger)
  100. raftServer.AddEventListener(raft.TermChangeEventType, s.raftEventLogger)
  101. raftServer.AddEventListener(raft.AddPeerEventType, s.raftEventLogger)
  102. raftServer.AddEventListener(raft.RemovePeerEventType, s.raftEventLogger)
  103. raftServer.AddEventListener(raft.HeartbeatIntervalEventType, s.raftEventLogger)
  104. raftServer.AddEventListener(raft.ElectionTimeoutThresholdEventType, s.raftEventLogger)
  105. raftServer.AddEventListener(raft.HeartbeatEventType, s.recordMetricEvent)
  106. s.raftServer = raftServer
  107. }
  108. // Mode retrieves the current mode of the server.
  109. func (s *PeerServer) Mode() Mode {
  110. return s.mode
  111. }
  112. // SetMode updates the current mode of the server.
  113. // Switching to a peer mode will start the Raft server.
  114. // Switching to a standby mode will stop the Raft server.
  115. func (s *PeerServer) setMode(mode Mode) {
  116. s.mode = mode
  117. switch mode {
  118. case PeerMode:
  119. if !s.raftServer.Running() {
  120. s.raftServer.Start()
  121. }
  122. case StandbyMode:
  123. if s.raftServer.Running() {
  124. s.raftServer.Stop()
  125. }
  126. }
  127. }
  128. // ClusterConfig retrieves the current cluster configuration.
  129. func (s *PeerServer) ClusterConfig() *ClusterConfig {
  130. return s.clusterConfig
  131. }
  132. // SetClusterConfig updates the current cluster configuration.
  133. // Adjusting the active size will cause the PeerServer to demote peers or
  134. // promote standbys to match the new size.
  135. func (s *PeerServer) SetClusterConfig(c *ClusterConfig) {
  136. // Set minimums.
  137. if c.ActiveSize < MinActiveSize {
  138. c.ActiveSize = MinActiveSize
  139. }
  140. if c.PromoteDelay < MinPromoteDelay {
  141. c.PromoteDelay = MinPromoteDelay
  142. }
  143. s.clusterConfig = c
  144. }
  145. // Try all possible ways to find clusters to join
  146. // Include log data in -data-dir, -discovery and -peers
  147. //
  148. // Peer discovery follows this order:
  149. // 1. previous peers in -data-dir
  150. // 2. -discovery
  151. // 3. -peers
  152. //
  153. // TODO(yichengq): RaftServer should be started as late as possible.
  154. // Current implementation to start it is not that good,
  155. // and should be refactored later.
  156. func (s *PeerServer) findCluster(discoverURL string, peers []string) {
  157. name := s.Config.Name
  158. isNewNode := s.raftServer.IsLogEmpty()
  159. // Try its best to find possible peers, and connect with them.
  160. if !isNewNode {
  161. // It is not allowed to join the cluster with existing peer address
  162. // This prevents old node joining with different name by mistake.
  163. if !s.checkPeerAddressNonconflict() {
  164. log.Fatalf("%v is not allowed to join the cluster with existing URL %v", s.Config.Name, s.Config.URL)
  165. }
  166. // Take old nodes into account.
  167. allPeers := s.getKnownPeers()
  168. // Discover registered peers.
  169. // TODO(yichengq): It may mess up discoverURL if this is
  170. // set wrong by mistake. This may need to refactor discovery
  171. // module. Fix it later.
  172. if discoverURL != "" {
  173. discoverPeers, _ := s.handleDiscovery(discoverURL)
  174. allPeers = append(allPeers, discoverPeers...)
  175. }
  176. allPeers = append(allPeers, peers...)
  177. allPeers = s.removeSelfFromList(allPeers)
  178. // If there is possible peer list, use it to find cluster.
  179. if len(allPeers) > 0 {
  180. // TODO(yichengq): joinCluster may fail if there's no leader for
  181. // current cluster. It should wait if the cluster is under
  182. // leader election, or the node with changed IP cannot join
  183. // the cluster then.
  184. if err := s.startAsFollower(allPeers, 1); err == nil {
  185. log.Debugf("%s joins to the previous cluster %v", name, allPeers)
  186. return
  187. }
  188. log.Warnf("%s cannot connect to previous cluster %v", name, allPeers)
  189. }
  190. // TODO(yichengq): Think about the action that should be done
  191. // if it cannot connect any of the previous known node.
  192. s.raftServer.Start()
  193. log.Debugf("%s is restarting the cluster %v", name, allPeers)
  194. return
  195. }
  196. // Attempt cluster discovery
  197. if discoverURL != "" {
  198. discoverPeers, discoverErr := s.handleDiscovery(discoverURL)
  199. // It is registered in discover url
  200. if discoverErr == nil {
  201. // start as a leader in a new cluster
  202. if len(discoverPeers) == 0 {
  203. log.Debugf("%s is starting a new cluster via discover service", name)
  204. s.startAsLeader()
  205. } else {
  206. log.Debugf("%s is joining a cluster %v via discover service", name, discoverPeers)
  207. if err := s.startAsFollower(discoverPeers, s.Config.RetryTimes); err != nil {
  208. log.Fatal(err)
  209. }
  210. }
  211. return
  212. }
  213. log.Warnf("%s failed to connect discovery service[%v]: %v", name, discoverURL, discoverErr)
  214. if len(peers) == 0 {
  215. log.Fatalf("%s, the new leader, must register itself to discovery service as required", name)
  216. }
  217. }
  218. if len(peers) > 0 {
  219. if err := s.startAsFollower(peers, s.Config.RetryTimes); err != nil {
  220. log.Fatalf("%s cannot connect to existing cluster %v", name, peers)
  221. }
  222. return
  223. }
  224. log.Infof("%s is starting a new cluster.", s.Config.Name)
  225. s.startAsLeader()
  226. return
  227. }
  228. // Start the raft server
  229. func (s *PeerServer) Start(snapshot bool, discoverURL string, peers []string) error {
  230. s.Lock()
  231. defer s.Unlock()
  232. // LoadSnapshot
  233. if snapshot {
  234. err := s.raftServer.LoadSnapshot()
  235. if err == nil {
  236. log.Debugf("%s finished load snapshot", s.Config.Name)
  237. } else {
  238. log.Debug(err)
  239. }
  240. }
  241. s.raftServer.Init()
  242. // Set NOCOW for data directory in btrfs
  243. if btrfs.IsBtrfs(s.raftServer.LogPath()) {
  244. if err := btrfs.SetNOCOWFile(s.raftServer.LogPath()); err != nil {
  245. log.Warnf("Failed setting NOCOW: %v", err)
  246. }
  247. }
  248. s.findCluster(discoverURL, peers)
  249. s.closeChan = make(chan bool)
  250. s.startRoutine(s.monitorSync)
  251. s.startRoutine(s.monitorTimeoutThreshold)
  252. s.startRoutine(s.monitorActiveSize)
  253. s.startRoutine(s.monitorPeerActivity)
  254. // open the snapshot
  255. if snapshot {
  256. s.startRoutine(s.monitorSnapshot)
  257. }
  258. return nil
  259. }
  260. func (s *PeerServer) Stop() {
  261. s.Lock()
  262. defer s.Unlock()
  263. if s.closeChan != nil {
  264. close(s.closeChan)
  265. }
  266. s.raftServer.Stop()
  267. s.routineGroup.Wait()
  268. s.closeChan = nil
  269. }
  270. func (s *PeerServer) HTTPHandler() http.Handler {
  271. router := mux.NewRouter()
  272. // internal commands
  273. router.HandleFunc("/name", s.NameHttpHandler)
  274. router.HandleFunc("/version", s.VersionHttpHandler)
  275. router.HandleFunc("/version/{version:[0-9]+}/check", s.VersionCheckHttpHandler)
  276. router.HandleFunc("/upgrade", s.UpgradeHttpHandler)
  277. router.HandleFunc("/join", s.JoinHttpHandler)
  278. router.HandleFunc("/promote", s.PromoteHttpHandler).Methods("POST")
  279. router.HandleFunc("/remove/{name:.+}", s.RemoveHttpHandler)
  280. router.HandleFunc("/vote", s.VoteHttpHandler)
  281. router.HandleFunc("/log", s.GetLogHttpHandler)
  282. router.HandleFunc("/log/append", s.AppendEntriesHttpHandler)
  283. router.HandleFunc("/snapshot", s.SnapshotHttpHandler)
  284. router.HandleFunc("/snapshotRecovery", s.SnapshotRecoveryHttpHandler)
  285. router.HandleFunc("/etcdURL", s.EtcdURLHttpHandler)
  286. router.HandleFunc("/v2/admin/config", s.getClusterConfigHttpHandler).Methods("GET")
  287. router.HandleFunc("/v2/admin/config", s.setClusterConfigHttpHandler).Methods("PUT")
  288. router.HandleFunc("/v2/admin/machines", s.getMachinesHttpHandler).Methods("GET")
  289. router.HandleFunc("/v2/admin/machines/{name}", s.getMachineHttpHandler).Methods("GET")
  290. router.HandleFunc("/v2/admin/machines/{name}", s.addMachineHttpHandler).Methods("PUT")
  291. router.HandleFunc("/v2/admin/machines/{name}", s.removeMachineHttpHandler).Methods("DELETE")
  292. return router
  293. }
  294. // Retrieves the underlying Raft server.
  295. func (s *PeerServer) RaftServer() raft.Server {
  296. return s.raftServer
  297. }
  298. // Associates the client server with the peer server.
  299. func (s *PeerServer) SetServer(server *Server) {
  300. s.server = server
  301. }
  302. func (s *PeerServer) startAsLeader() {
  303. s.raftServer.Start()
  304. // leader need to join self as a peer
  305. for {
  306. c := &JoinCommandV1{
  307. MinVersion: store.MinVersion(),
  308. MaxVersion: store.MaxVersion(),
  309. Name: s.raftServer.Name(),
  310. RaftURL: s.Config.URL,
  311. EtcdURL: s.server.URL(),
  312. }
  313. _, err := s.raftServer.Do(c)
  314. if err == nil {
  315. break
  316. }
  317. }
  318. log.Debugf("%s start as a leader", s.Config.Name)
  319. }
  320. func (s *PeerServer) startAsFollower(cluster []string, retryTimes int) error {
  321. // start as a follower in a existing cluster
  322. for i := 0; ; i++ {
  323. ok := s.joinCluster(cluster)
  324. if ok {
  325. break
  326. }
  327. if i == retryTimes-1 {
  328. return fmt.Errorf("Cannot join the cluster via given peers after %x retries", s.Config.RetryTimes)
  329. }
  330. log.Warnf("%v is unable to join the cluster using any of the peers %v at %dth time. Retrying in %.1f seconds", s.Config.Name, cluster, i, s.Config.RetryInterval)
  331. time.Sleep(time.Second * time.Duration(s.Config.RetryInterval))
  332. }
  333. s.raftServer.Start()
  334. return nil
  335. }
  336. // getVersion fetches the peer version of a cluster.
  337. func getVersion(t *transporter, versionURL url.URL) (int, error) {
  338. resp, _, err := t.Get(versionURL.String())
  339. if err != nil {
  340. return 0, err
  341. }
  342. defer resp.Body.Close()
  343. body, err := ioutil.ReadAll(resp.Body)
  344. if err != nil {
  345. return 0, err
  346. }
  347. // Parse version number.
  348. version, _ := strconv.Atoi(string(body))
  349. return version, nil
  350. }
  351. // Upgradable checks whether all peers in a cluster support an upgrade to the next store version.
  352. func (s *PeerServer) Upgradable() error {
  353. nextVersion := s.store.Version() + 1
  354. for _, peerURL := range s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name) {
  355. u, err := url.Parse(peerURL)
  356. if err != nil {
  357. return fmt.Errorf("PeerServer: Cannot parse URL: '%s' (%s)", peerURL, err)
  358. }
  359. t, _ := s.raftServer.Transporter().(*transporter)
  360. checkURL := (&url.URL{Host: u.Host, Scheme: s.Config.Scheme, Path: fmt.Sprintf("/version/%d/check", nextVersion)}).String()
  361. resp, _, err := t.Get(checkURL)
  362. if err != nil {
  363. return fmt.Errorf("PeerServer: Cannot check version compatibility: %s", u.Host)
  364. }
  365. if resp.StatusCode != 200 {
  366. return fmt.Errorf("PeerServer: Version %d is not compatible with peer: %s", nextVersion, u.Host)
  367. }
  368. }
  369. return nil
  370. }
  371. // checkPeerAddressNonconflict checks whether the peer address has existed with different name.
  372. func (s *PeerServer) checkPeerAddressNonconflict() bool {
  373. // there exists the (name, peer address) pair
  374. if peerURL, ok := s.registry.PeerURL(s.Config.Name); ok {
  375. if peerURL == s.Config.URL {
  376. return true
  377. }
  378. }
  379. // check all existing peer addresses
  380. peerURLs := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  381. for _, peerURL := range peerURLs {
  382. if peerURL == s.Config.URL {
  383. return false
  384. }
  385. }
  386. return true
  387. }
  388. // Helper function to do discovery and return results in expected format
  389. func (s *PeerServer) handleDiscovery(discoverURL string) (peers []string, err error) {
  390. peers, err = discovery.Do(discoverURL, s.Config.Name, s.Config.URL, s.closeChan, s.startRoutine)
  391. // Warn about errors coming from discovery, this isn't fatal
  392. // since the user might have provided a peer list elsewhere,
  393. // or there is some log in data dir.
  394. if err != nil {
  395. log.Warnf("Discovery encountered an error: %v", err)
  396. return
  397. }
  398. for i := range peers {
  399. // Strip the scheme off of the peer if it has one
  400. // TODO(bp): clean this up!
  401. purl, err := url.Parse(peers[i])
  402. if err == nil {
  403. peers[i] = purl.Host
  404. }
  405. }
  406. log.Infof("Discovery fetched back peer list: %v", peers)
  407. return
  408. }
  409. // getKnownPeers gets the previous peers from log
  410. func (s *PeerServer) getKnownPeers() []string {
  411. peers := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  412. log.Infof("Peer URLs in log: %s / %s (%s)", s.raftServer.Leader(), s.Config.Name, strings.Join(peers, ","))
  413. for i := range peers {
  414. u, err := url.Parse(peers[i])
  415. if err != nil {
  416. log.Debug("getPrevPeers cannot parse url %v", peers[i])
  417. }
  418. peers[i] = u.Host
  419. }
  420. return peers
  421. }
  422. // removeSelfFromList removes url of the peerServer from the peer list
  423. func (s *PeerServer) removeSelfFromList(peers []string) []string {
  424. // Remove its own peer address from the peer list to join
  425. u, err := url.Parse(s.Config.URL)
  426. if err != nil {
  427. log.Fatalf("removeSelfFromList cannot parse peer address %v", s.Config.URL)
  428. }
  429. newPeers := make([]string, 0)
  430. for _, v := range peers {
  431. if v != u.Host {
  432. newPeers = append(newPeers, v)
  433. }
  434. }
  435. return newPeers
  436. }
  437. func (s *PeerServer) joinCluster(cluster []string) bool {
  438. for _, peer := range cluster {
  439. if len(peer) == 0 {
  440. continue
  441. }
  442. err := s.joinByPeer(s.raftServer, peer, s.Config.Scheme)
  443. if err == nil {
  444. log.Debugf("%s joined the cluster via peer %s", s.Config.Name, peer)
  445. return true
  446. }
  447. if _, ok := err.(etcdErr.Error); ok {
  448. log.Fatal(err)
  449. }
  450. log.Warnf("Attempt to join via %s failed: %s", peer, err)
  451. }
  452. return false
  453. }
  454. // Send join requests to peer.
  455. func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string) error {
  456. // t must be ok
  457. t, _ := server.Transporter().(*transporter)
  458. // Our version must match the leaders version
  459. versionURL := url.URL{Host: peer, Scheme: scheme, Path: "/version"}
  460. version, err := getVersion(t, versionURL)
  461. if err != nil {
  462. return fmt.Errorf("Error during join version check: %v", err)
  463. }
  464. if version < store.MinVersion() || version > store.MaxVersion() {
  465. return fmt.Errorf("Unable to join: cluster version is %d; version compatibility is %d - %d", version, store.MinVersion(), store.MaxVersion())
  466. }
  467. var b bytes.Buffer
  468. c := &JoinCommandV2{
  469. MinVersion: store.MinVersion(),
  470. MaxVersion: store.MaxVersion(),
  471. Name: server.Name(),
  472. PeerURL: s.Config.URL,
  473. ClientURL: s.server.URL(),
  474. }
  475. json.NewEncoder(&b).Encode(c)
  476. joinURL := url.URL{Host: peer, Scheme: scheme, Path: "/v2/admin/machines/" + server.Name()}
  477. log.Infof("Send Join Request to %s", joinURL.String())
  478. req, _ := http.NewRequest("PUT", joinURL.String(), &b)
  479. resp, err := t.client.Do(req)
  480. for {
  481. if err != nil {
  482. return fmt.Errorf("Unable to join: %v", err)
  483. }
  484. if resp != nil {
  485. defer resp.Body.Close()
  486. log.Infof("»»»» %d", resp.StatusCode)
  487. if resp.StatusCode == http.StatusOK {
  488. var msg joinMessageV2
  489. if err := json.NewDecoder(resp.Body).Decode(&msg); err != nil {
  490. log.Debugf("Error reading join response: %v", err)
  491. return err
  492. }
  493. s.joinIndex = msg.CommitIndex
  494. s.setMode(msg.Mode)
  495. if msg.Mode == StandbyMode {
  496. s.standbyClientURL = resp.Header.Get("X-Leader-Client-URL")
  497. s.standbyPeerURL = resp.Header.Get("X-Leader-Peer-URL")
  498. }
  499. return nil
  500. }
  501. if resp.StatusCode == http.StatusTemporaryRedirect {
  502. address := resp.Header.Get("Location")
  503. log.Debugf("Send Join Request to %s", address)
  504. c := &JoinCommandV2{
  505. MinVersion: store.MinVersion(),
  506. MaxVersion: store.MaxVersion(),
  507. Name: server.Name(),
  508. PeerURL: s.Config.URL,
  509. ClientURL: s.server.URL(),
  510. }
  511. json.NewEncoder(&b).Encode(c)
  512. resp, _, err = t.Put(address, &b)
  513. } else if resp.StatusCode == http.StatusBadRequest {
  514. log.Debug("Reach max number peers in the cluster")
  515. decoder := json.NewDecoder(resp.Body)
  516. err := &etcdErr.Error{}
  517. decoder.Decode(err)
  518. return *err
  519. } else {
  520. return fmt.Errorf("Unable to join")
  521. }
  522. }
  523. }
  524. }
  525. func (s *PeerServer) Stats() []byte {
  526. s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.startTime).String()
  527. // TODO: register state listener to raft to change this field
  528. // rather than compare the state each time Stats() is called.
  529. if s.RaftServer().State() == raft.Leader {
  530. s.serverStats.LeaderInfo.Name = s.RaftServer().Name()
  531. }
  532. queue := s.serverStats.sendRateQueue
  533. s.serverStats.SendingPkgRate, s.serverStats.SendingBandwidthRate = queue.Rate()
  534. queue = s.serverStats.recvRateQueue
  535. s.serverStats.RecvingPkgRate, s.serverStats.RecvingBandwidthRate = queue.Rate()
  536. b, _ := json.Marshal(s.serverStats)
  537. return b
  538. }
  539. func (s *PeerServer) PeerStats() []byte {
  540. if s.raftServer.State() == raft.Leader {
  541. b, _ := json.Marshal(s.followersStats)
  542. return b
  543. }
  544. return nil
  545. }
  546. // raftEventLogger converts events from the Raft server into log messages.
  547. func (s *PeerServer) raftEventLogger(event raft.Event) {
  548. value := event.Value()
  549. prevValue := event.PrevValue()
  550. if value == nil {
  551. value = "<nil>"
  552. }
  553. if prevValue == nil {
  554. prevValue = "<nil>"
  555. }
  556. switch event.Type() {
  557. case raft.StateChangeEventType:
  558. log.Infof("%s: state changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  559. case raft.TermChangeEventType:
  560. log.Infof("%s: term #%v started.", s.Config.Name, value)
  561. case raft.LeaderChangeEventType:
  562. log.Infof("%s: leader changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  563. case raft.AddPeerEventType:
  564. log.Infof("%s: peer added: '%v'", s.Config.Name, value)
  565. case raft.RemovePeerEventType:
  566. log.Infof("%s: peer removed: '%v'", s.Config.Name, value)
  567. case raft.HeartbeatIntervalEventType:
  568. var name = "<unknown>"
  569. if peer, ok := value.(*raft.Peer); ok {
  570. name = peer.Name
  571. }
  572. log.Infof("%s: warning: heartbeat timed out: '%v'", s.Config.Name, name)
  573. case raft.ElectionTimeoutThresholdEventType:
  574. select {
  575. case s.timeoutThresholdChan <- value:
  576. default:
  577. }
  578. }
  579. }
  580. func (s *PeerServer) recordMetricEvent(event raft.Event) {
  581. name := fmt.Sprintf("raft.event.%s", event.Type())
  582. value := event.Value().(time.Duration)
  583. (*s.metrics).Timer(name).Update(value)
  584. }
  585. // logSnapshot logs about the snapshot that was taken.
  586. func (s *PeerServer) logSnapshot(err error, currentIndex, count uint64) {
  587. info := fmt.Sprintf("%s: snapshot of %d events at index %d", s.Config.Name, count, currentIndex)
  588. if err != nil {
  589. log.Infof("%s attempted and failed: %v", info, err)
  590. } else {
  591. log.Infof("%s completed", info)
  592. }
  593. }
  594. func (s *PeerServer) startRoutine(f func()) {
  595. s.routineGroup.Add(1)
  596. go func() {
  597. defer s.routineGroup.Done()
  598. f()
  599. }()
  600. }
  601. func (s *PeerServer) monitorSnapshot() {
  602. for {
  603. timer := time.NewTimer(s.snapConf.checkingInterval)
  604. defer timer.Stop()
  605. select {
  606. case <-s.closeChan:
  607. return
  608. case <-timer.C:
  609. }
  610. currentIndex := s.RaftServer().CommitIndex()
  611. count := currentIndex - s.snapConf.lastIndex
  612. if uint64(count) > s.snapConf.snapshotThr {
  613. err := s.raftServer.TakeSnapshot()
  614. s.logSnapshot(err, currentIndex, count)
  615. s.snapConf.lastIndex = currentIndex
  616. }
  617. }
  618. }
  619. func (s *PeerServer) monitorSync() {
  620. ticker := time.NewTicker(time.Millisecond * 500)
  621. defer ticker.Stop()
  622. for {
  623. select {
  624. case <-s.closeChan:
  625. return
  626. case now := <-ticker.C:
  627. if s.raftServer.State() == raft.Leader {
  628. s.raftServer.Do(s.store.CommandFactory().CreateSyncCommand(now))
  629. }
  630. }
  631. }
  632. }
  633. // monitorTimeoutThreshold groups timeout threshold events together and prints
  634. // them as a single log line.
  635. func (s *PeerServer) monitorTimeoutThreshold() {
  636. for {
  637. select {
  638. case <-s.closeChan:
  639. return
  640. case value := <-s.timeoutThresholdChan:
  641. log.Infof("%s: warning: heartbeat near election timeout: %v", s.Config.Name, value)
  642. }
  643. timer := time.NewTimer(ThresholdMonitorTimeout)
  644. defer timer.Stop()
  645. select {
  646. case <-s.closeChan:
  647. return
  648. case <-timer.C:
  649. }
  650. }
  651. }
  652. // monitorActiveSize has the leader periodically check the status of cluster
  653. // nodes and swaps them out for standbys as needed.
  654. func (s *PeerServer) monitorActiveSize() {
  655. for {
  656. timer := time.NewTimer(ActiveMonitorTimeout)
  657. defer timer.Stop()
  658. select {
  659. case <-s.closeChan:
  660. return
  661. case <-timer.C:
  662. }
  663. // Ignore while this peer is not a leader.
  664. if s.raftServer.State() != raft.Leader {
  665. continue
  666. }
  667. // Retrieve target active size and actual active size.
  668. activeSize := s.ClusterConfig().ActiveSize
  669. peerCount := s.registry.PeerCount()
  670. standbys := s.registry.Standbys()
  671. peers := s.registry.Peers()
  672. if index := sort.SearchStrings(peers, s.Config.Name); index < len(peers) && peers[index] == s.Config.Name {
  673. peers = append(peers[:index], peers[index+1:]...)
  674. }
  675. // If we have more active nodes than we should then demote.
  676. if peerCount > activeSize {
  677. peer := peers[rand.Intn(len(peers))]
  678. log.Infof("%s: demoting: %v", s.Config.Name, peer)
  679. if _, err := s.raftServer.Do(&DemoteCommand{Name: peer}); err != nil {
  680. log.Infof("%s: warning: demotion error: %v", s.Config.Name, err)
  681. }
  682. continue
  683. }
  684. // If we don't have enough active nodes then try to promote a standby.
  685. if peerCount < activeSize && len(standbys) > 0 {
  686. loop:
  687. for _, i := range rand.Perm(len(standbys)) {
  688. standby := standbys[i]
  689. standbyPeerURL, _ := s.registry.StandbyPeerURL(standby)
  690. log.Infof("%s: attempting to promote: %v (%s)", s.Config.Name, standby, standbyPeerURL)
  691. // Notify standby to promote itself.
  692. client := &http.Client{
  693. Transport: &http.Transport{
  694. DisableKeepAlives: false,
  695. ResponseHeaderTimeout: ActiveMonitorTimeout,
  696. },
  697. }
  698. resp, err := client.Post(fmt.Sprintf("%s/promote", standbyPeerURL), "application/json", nil)
  699. if err != nil {
  700. log.Infof("%s: warning: promotion error: %v", s.Config.Name, err)
  701. continue
  702. } else if resp.StatusCode != http.StatusOK {
  703. log.Infof("%s: warning: promotion failure: %v", s.Config.Name, resp.StatusCode)
  704. continue
  705. }
  706. break loop
  707. }
  708. }
  709. }
  710. }
  711. // monitorPeerActivity has the leader periodically for dead nodes and demotes them.
  712. func (s *PeerServer) monitorPeerActivity() {
  713. for {
  714. timer := time.NewTimer(PeerActivityMonitorTimeout)
  715. defer timer.Stop()
  716. select {
  717. case <-s.closeChan:
  718. return
  719. case <-timer.C:
  720. }
  721. // Ignore while this peer is not a leader.
  722. if s.raftServer.State() != raft.Leader {
  723. continue
  724. }
  725. // Check last activity for all peers.
  726. now := time.Now()
  727. promoteDelay := time.Duration(s.ClusterConfig().PromoteDelay) * time.Second
  728. peers := s.raftServer.Peers()
  729. for _, peer := range peers {
  730. // If the last response from the peer is longer than the promote delay
  731. // then automatically demote the peer.
  732. if !peer.LastActivity().IsZero() && now.Sub(peer.LastActivity()) > promoteDelay {
  733. log.Infof("%s: demoting node: %v; last activity %v ago", s.Config.Name, peer.Name, now.Sub(peer.LastActivity()))
  734. if _, err := s.raftServer.Do(&DemoteCommand{Name: peer.Name}); err != nil {
  735. log.Infof("%s: warning: autodemotion error: %v", s.Config.Name, err)
  736. }
  737. continue
  738. }
  739. }
  740. }
  741. }
  742. // Mode represents whether the server is an active peer or if the server is
  743. // simply acting as a standby.
  744. type Mode string
  745. const (
  746. // PeerMode is when the server is an active node in Raft.
  747. PeerMode = Mode("peer")
  748. // StandbyMode is when the server is an inactive, request-forwarding node.
  749. StandbyMode = Mode("standby")
  750. )