peer_server.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802
  1. package server
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "fmt"
  6. "io/ioutil"
  7. "math/rand"
  8. "net/http"
  9. "net/url"
  10. "sort"
  11. "strconv"
  12. "time"
  13. "github.com/coreos/etcd/third_party/github.com/goraft/raft"
  14. "github.com/coreos/etcd/third_party/github.com/gorilla/mux"
  15. "github.com/coreos/etcd/discovery"
  16. etcdErr "github.com/coreos/etcd/error"
  17. "github.com/coreos/etcd/log"
  18. "github.com/coreos/etcd/metrics"
  19. "github.com/coreos/etcd/store"
  20. )
  21. const (
  22. // ThresholdMonitorTimeout is the time between log notifications that the
  23. // Raft heartbeat is too close to the election timeout.
  24. ThresholdMonitorTimeout = 5 * time.Second
  25. // ActiveMonitorTimeout is the time between checks on the active size of
  26. // the cluster. If the active size is different than the actual size then
  27. // etcd attempts to promote/demote to bring it to the correct number.
  28. ActiveMonitorTimeout = 1 * time.Second
  29. // PeerActivityMonitorTimeout is the time between checks for dead nodes in
  30. // the cluster.
  31. PeerActivityMonitorTimeout = 1 * time.Second
  32. )
  33. const (
  34. peerModeFlag = 0
  35. proxyModeFlag = 1
  36. )
  37. type PeerServerConfig struct {
  38. Name string
  39. Scheme string
  40. URL string
  41. SnapshotCount int
  42. RetryTimes int
  43. RetryInterval float64
  44. }
  45. type PeerServer struct {
  46. Config PeerServerConfig
  47. clusterConfig *ClusterConfig
  48. raftServer raft.Server
  49. server *Server
  50. joinIndex uint64
  51. followersStats *raftFollowersStats
  52. serverStats *raftServerStats
  53. registry *Registry
  54. store store.Store
  55. snapConf *snapshotConf
  56. mode Mode
  57. closeChan chan bool
  58. timeoutThresholdChan chan interface{}
  59. proxyPeerURL string
  60. proxyClientURL string
  61. metrics *metrics.Bucket
  62. }
  63. // TODO: find a good policy to do snapshot
  64. type snapshotConf struct {
  65. // Etcd will check if snapshot is need every checkingInterval
  66. checkingInterval time.Duration
  67. // The index when the last snapshot happened
  68. lastIndex uint64
  69. // If the incremental number of index since the last snapshot
  70. // exceeds the snapshot Threshold, etcd will do a snapshot
  71. snapshotThr uint64
  72. }
  73. func NewPeerServer(psConfig PeerServerConfig, registry *Registry, store store.Store, mb *metrics.Bucket, followersStats *raftFollowersStats, serverStats *raftServerStats) *PeerServer {
  74. s := &PeerServer{
  75. Config: psConfig,
  76. clusterConfig: NewClusterConfig(),
  77. registry: registry,
  78. store: store,
  79. followersStats: followersStats,
  80. serverStats: serverStats,
  81. timeoutThresholdChan: make(chan interface{}, 1),
  82. metrics: mb,
  83. }
  84. return s
  85. }
  86. func (s *PeerServer) SetRaftServer(raftServer raft.Server) {
  87. s.snapConf = &snapshotConf{
  88. checkingInterval: time.Second * 3,
  89. // this is not accurate, we will update raft to provide an api
  90. lastIndex: raftServer.CommitIndex(),
  91. snapshotThr: uint64(s.Config.SnapshotCount),
  92. }
  93. raftServer.AddEventListener(raft.StateChangeEventType, s.raftEventLogger)
  94. raftServer.AddEventListener(raft.LeaderChangeEventType, s.raftEventLogger)
  95. raftServer.AddEventListener(raft.TermChangeEventType, s.raftEventLogger)
  96. raftServer.AddEventListener(raft.AddPeerEventType, s.raftEventLogger)
  97. raftServer.AddEventListener(raft.RemovePeerEventType, s.raftEventLogger)
  98. raftServer.AddEventListener(raft.HeartbeatIntervalEventType, s.raftEventLogger)
  99. raftServer.AddEventListener(raft.ElectionTimeoutThresholdEventType, s.raftEventLogger)
  100. raftServer.AddEventListener(raft.HeartbeatEventType, s.recordMetricEvent)
  101. s.raftServer = raftServer
  102. }
  103. // Mode retrieves the current mode of the server.
  104. func (s *PeerServer) Mode() Mode {
  105. return s.mode
  106. }
  107. // SetMode updates the current mode of the server.
  108. // Switching to a peer mode will start the Raft server.
  109. // Switching to a proxy mode will stop the Raft server.
  110. func (s *PeerServer) setMode(mode Mode) {
  111. s.mode = mode
  112. switch mode {
  113. case PeerMode:
  114. if !s.raftServer.Running() {
  115. s.raftServer.Start()
  116. }
  117. case ProxyMode:
  118. if s.raftServer.Running() {
  119. s.raftServer.Stop()
  120. }
  121. }
  122. }
  123. // ClusterConfig retrieves the current cluster configuration.
  124. func (s *PeerServer) ClusterConfig() *ClusterConfig {
  125. return s.clusterConfig
  126. }
  127. // SetClusterConfig updates the current cluster configuration.
  128. // Adjusting the active size will cause the PeerServer to demote peers or
  129. // promote proxies to match the new size.
  130. func (s *PeerServer) SetClusterConfig(c *ClusterConfig) {
  131. // Set minimums.
  132. if c.ActiveSize < MinActiveSize {
  133. c.ActiveSize = MinActiveSize
  134. }
  135. if c.PromoteDelay < MinPromoteDelay {
  136. c.PromoteDelay = MinPromoteDelay
  137. }
  138. s.clusterConfig = c
  139. }
  140. // Try all possible ways to find clusters to join
  141. // Include log data in -data-dir, -discovery and -peers
  142. //
  143. // Peer discovery follows this order:
  144. // 1. previous peers in -data-dir
  145. // 2. -discovery
  146. // 3. -peers
  147. //
  148. // TODO(yichengq): RaftServer should be started as late as possible.
  149. // Current implementation to start it is not that good,
  150. // and should be refactored later.
  151. func (s *PeerServer) findCluster(discoverURL string, peers []string) {
  152. name := s.Config.Name
  153. isNewNode := s.raftServer.IsLogEmpty()
  154. // Try its best to find possible peers, and connect with them.
  155. if !isNewNode {
  156. // Take old nodes into account.
  157. allPeers := s.getKnownPeers()
  158. // Discover registered peers.
  159. // TODO(yichengq): It may mess up discoverURL if this is
  160. // set wrong by mistake. This may need to refactor discovery
  161. // module. Fix it later.
  162. if discoverURL != "" {
  163. discoverPeers, _ := s.handleDiscovery(discoverURL)
  164. allPeers = append(allPeers, discoverPeers...)
  165. }
  166. allPeers = append(allPeers, peers...)
  167. allPeers = s.removeSelfFromList(allPeers)
  168. // If there is possible peer list, use it to find cluster.
  169. if len(allPeers) > 0 {
  170. // TODO(yichengq): joinCluster may fail if there's no leader for
  171. // current cluster. It should wait if the cluster is under
  172. // leader election, or the node with changed IP cannot join
  173. // the cluster then.
  174. if err := s.startAsFollower(allPeers, 1); err == nil {
  175. log.Debugf("%s joins to the previous cluster %v", name, allPeers)
  176. return
  177. }
  178. log.Warnf("%s cannot connect to previous cluster %v", name, allPeers)
  179. }
  180. // TODO(yichengq): Think about the action that should be done
  181. // if it cannot connect any of the previous known node.
  182. s.raftServer.Start()
  183. log.Debugf("%s is restarting the cluster %v", name, allPeers)
  184. return
  185. }
  186. // Attempt cluster discovery
  187. if discoverURL != "" {
  188. discoverPeers, discoverErr := s.handleDiscovery(discoverURL)
  189. // It is registered in discover url
  190. if discoverErr == nil {
  191. // start as a leader in a new cluster
  192. if len(discoverPeers) == 0 {
  193. log.Debugf("%s is starting a new cluster via discover service", name)
  194. s.startAsLeader()
  195. } else {
  196. log.Debugf("%s is joining a cluster %v via discover service", name, discoverPeers)
  197. if err := s.startAsFollower(discoverPeers, s.Config.RetryTimes); err != nil {
  198. log.Fatal(err)
  199. }
  200. }
  201. return
  202. }
  203. log.Warnf("%s failed to connect discovery service[%v]: %v", name, discoverURL, discoverErr)
  204. if len(peers) == 0 {
  205. log.Fatalf("%s, the new leader, must register itself to discovery service as required", name)
  206. }
  207. }
  208. if len(peers) > 0 {
  209. if err := s.startAsFollower(peers, s.Config.RetryTimes); err != nil {
  210. log.Fatalf("%s cannot connect to existing cluster %v", name, peers)
  211. }
  212. return
  213. }
  214. log.Infof("%s is starting a new cluster.", s.Config.Name)
  215. s.startAsLeader()
  216. return
  217. }
  218. // Start the raft server
  219. func (s *PeerServer) Start(snapshot bool, discoverURL string, peers []string) error {
  220. // LoadSnapshot
  221. if snapshot {
  222. err := s.raftServer.LoadSnapshot()
  223. if err == nil {
  224. log.Debugf("%s finished load snapshot", s.Config.Name)
  225. } else {
  226. log.Debug(err)
  227. }
  228. }
  229. s.raftServer.Init()
  230. s.findCluster(discoverURL, peers)
  231. s.closeChan = make(chan bool)
  232. go s.monitorSync()
  233. go s.monitorTimeoutThreshold(s.closeChan)
  234. go s.monitorActiveSize(s.closeChan)
  235. go s.monitorPeerActivity(s.closeChan)
  236. // open the snapshot
  237. if snapshot {
  238. go s.monitorSnapshot()
  239. }
  240. return nil
  241. }
  242. func (s *PeerServer) Stop() {
  243. if s.closeChan != nil {
  244. close(s.closeChan)
  245. s.closeChan = nil
  246. }
  247. s.raftServer.Stop()
  248. }
  249. func (s *PeerServer) HTTPHandler() http.Handler {
  250. router := mux.NewRouter()
  251. // internal commands
  252. router.HandleFunc("/name", s.NameHttpHandler)
  253. router.HandleFunc("/version", s.VersionHttpHandler)
  254. router.HandleFunc("/version/{version:[0-9]+}/check", s.VersionCheckHttpHandler)
  255. router.HandleFunc("/upgrade", s.UpgradeHttpHandler)
  256. router.HandleFunc("/join", s.JoinHttpHandler)
  257. router.HandleFunc("/promote", s.PromoteHttpHandler).Methods("POST")
  258. router.HandleFunc("/remove/{name:.+}", s.RemoveHttpHandler)
  259. router.HandleFunc("/vote", s.VoteHttpHandler)
  260. router.HandleFunc("/log", s.GetLogHttpHandler)
  261. router.HandleFunc("/log/append", s.AppendEntriesHttpHandler)
  262. router.HandleFunc("/snapshot", s.SnapshotHttpHandler)
  263. router.HandleFunc("/snapshotRecovery", s.SnapshotRecoveryHttpHandler)
  264. router.HandleFunc("/etcdURL", s.EtcdURLHttpHandler)
  265. router.HandleFunc("/v2/admin/config", s.getClusterConfigHttpHandler).Methods("GET")
  266. router.HandleFunc("/v2/admin/config", s.setClusterConfigHttpHandler).Methods("PUT")
  267. router.HandleFunc("/v2/admin/machines", s.getMachinesHttpHandler).Methods("GET")
  268. router.HandleFunc("/v2/admin/machines/{name}", s.getMachineHttpHandler).Methods("GET")
  269. router.HandleFunc("/v2/admin/machines/{name}", s.addMachineHttpHandler).Methods("PUT")
  270. router.HandleFunc("/v2/admin/machines/{name}", s.removeMachineHttpHandler).Methods("DELETE")
  271. return router
  272. }
  273. // Retrieves the underlying Raft server.
  274. func (s *PeerServer) RaftServer() raft.Server {
  275. return s.raftServer
  276. }
  277. // Associates the client server with the peer server.
  278. func (s *PeerServer) SetServer(server *Server) {
  279. s.server = server
  280. }
  281. func (s *PeerServer) startAsLeader() {
  282. s.raftServer.Start()
  283. // leader need to join self as a peer
  284. for {
  285. c := &JoinCommandV1{
  286. MinVersion: store.MinVersion(),
  287. MaxVersion: store.MaxVersion(),
  288. Name: s.raftServer.Name(),
  289. RaftURL: s.Config.URL,
  290. EtcdURL: s.server.URL(),
  291. }
  292. _, err := s.raftServer.Do(c)
  293. if err == nil {
  294. break
  295. }
  296. }
  297. log.Debugf("%s start as a leader", s.Config.Name)
  298. }
  299. func (s *PeerServer) startAsFollower(cluster []string, retryTimes int) error {
  300. // start as a follower in a existing cluster
  301. for i := 0; ; i++ {
  302. ok := s.joinCluster(cluster)
  303. if ok {
  304. break
  305. }
  306. if i == retryTimes - 1 {
  307. return fmt.Errorf("Cannot join the cluster via given peers after %x retries", s.Config.RetryTimes)
  308. }
  309. log.Warnf("%v is unable to join the cluster using any of the peers %v at %dth time. Retrying in %.1f seconds", s.Config.Name, cluster, i, s.Config.RetryInterval)
  310. time.Sleep(time.Second * time.Duration(s.Config.RetryInterval))
  311. }
  312. s.raftServer.Start()
  313. return nil
  314. }
  315. // getVersion fetches the peer version of a cluster.
  316. func getVersion(t *transporter, versionURL url.URL) (int, error) {
  317. resp, _, err := t.Get(versionURL.String())
  318. if err != nil {
  319. return 0, err
  320. }
  321. defer resp.Body.Close()
  322. body, err := ioutil.ReadAll(resp.Body)
  323. if err != nil {
  324. return 0, err
  325. }
  326. // Parse version number.
  327. version, _ := strconv.Atoi(string(body))
  328. return version, nil
  329. }
  330. // Upgradable checks whether all peers in a cluster support an upgrade to the next store version.
  331. func (s *PeerServer) Upgradable() error {
  332. nextVersion := s.store.Version() + 1
  333. for _, peerURL := range s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name) {
  334. u, err := url.Parse(peerURL)
  335. if err != nil {
  336. return fmt.Errorf("PeerServer: Cannot parse URL: '%s' (%s)", peerURL, err)
  337. }
  338. t, _ := s.raftServer.Transporter().(*transporter)
  339. checkURL := (&url.URL{Host: u.Host, Scheme: s.Config.Scheme, Path: fmt.Sprintf("/version/%d/check", nextVersion)}).String()
  340. resp, _, err := t.Get(checkURL)
  341. if err != nil {
  342. return fmt.Errorf("PeerServer: Cannot check version compatibility: %s", u.Host)
  343. }
  344. if resp.StatusCode != 200 {
  345. return fmt.Errorf("PeerServer: Version %d is not compatible with peer: %s", nextVersion, u.Host)
  346. }
  347. }
  348. return nil
  349. }
  350. // Helper function to do discovery and return results in expected format
  351. func (s *PeerServer) handleDiscovery(discoverURL string) (peers []string, err error) {
  352. peers, err = discovery.Do(discoverURL, s.Config.Name, s.Config.URL)
  353. // Warn about errors coming from discovery, this isn't fatal
  354. // since the user might have provided a peer list elsewhere,
  355. // or there is some log in data dir.
  356. if err != nil {
  357. log.Warnf("Discovery encountered an error: %v", err)
  358. return
  359. }
  360. for i := range peers {
  361. // Strip the scheme off of the peer if it has one
  362. // TODO(bp): clean this up!
  363. purl, err := url.Parse(peers[i])
  364. if err == nil {
  365. peers[i] = purl.Host
  366. }
  367. }
  368. log.Infof("Discovery fetched back peer list: %v", peers)
  369. return
  370. }
  371. // getKnownPeers gets the previous peers from log
  372. func (s *PeerServer) getKnownPeers() []string {
  373. peers := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  374. for i := range peers {
  375. u, err := url.Parse(peers[i])
  376. if err != nil {
  377. log.Debug("getPrevPeers cannot parse url %v", peers[i])
  378. }
  379. peers[i] = u.Host
  380. }
  381. return peers
  382. }
  383. // removeSelfFromList removes url of the peerServer from the peer list
  384. func (s *PeerServer) removeSelfFromList(peers []string) []string {
  385. // Remove its own peer address from the peer list to join
  386. u, err := url.Parse(s.Config.URL)
  387. if err != nil {
  388. log.Fatalf("removeSelfFromList cannot parse peer address %v", s.Config.URL)
  389. }
  390. newPeers := make([]string, 0)
  391. for _, v := range peers {
  392. if v != u.Host {
  393. newPeers = append(newPeers, v)
  394. }
  395. }
  396. return newPeers
  397. }
  398. func (s *PeerServer) joinCluster(cluster []string) bool {
  399. for _, peer := range cluster {
  400. if len(peer) == 0 {
  401. continue
  402. }
  403. err := s.joinByPeer(s.raftServer, peer, s.Config.Scheme)
  404. if err == nil {
  405. log.Debugf("%s joined the cluster via peer %s", s.Config.Name, peer)
  406. return true
  407. }
  408. if _, ok := err.(etcdErr.Error); ok {
  409. log.Fatal(err)
  410. }
  411. log.Warnf("Attempt to join via %s failed: %s", peer, err)
  412. }
  413. return false
  414. }
  415. // Send join requests to peer.
  416. func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string) error {
  417. // t must be ok
  418. t, _ := server.Transporter().(*transporter)
  419. // Our version must match the leaders version
  420. versionURL := url.URL{Host: peer, Scheme: scheme, Path: "/version"}
  421. version, err := getVersion(t, versionURL)
  422. if err != nil {
  423. return fmt.Errorf("Error during join version check: %v", err)
  424. }
  425. if version < store.MinVersion() || version > store.MaxVersion() {
  426. return fmt.Errorf("Unable to join: cluster version is %d; version compatibility is %d - %d", version, store.MinVersion(), store.MaxVersion())
  427. }
  428. var b bytes.Buffer
  429. c := &JoinCommandV2{
  430. MinVersion: store.MinVersion(),
  431. MaxVersion: store.MaxVersion(),
  432. Name: server.Name(),
  433. PeerURL: s.Config.URL,
  434. ClientURL: s.server.URL(),
  435. }
  436. json.NewEncoder(&b).Encode(c)
  437. joinURL := url.URL{Host: peer, Scheme: scheme, Path: "/v2/admin/machines/" + server.Name()}
  438. log.Infof("Send Join Request to %s", joinURL.String())
  439. req, _ := http.NewRequest("PUT", joinURL.String(), &b)
  440. resp, err := t.client.Do(req)
  441. for {
  442. if err != nil {
  443. return fmt.Errorf("Unable to join: %v", err)
  444. }
  445. if resp != nil {
  446. defer resp.Body.Close()
  447. log.Infof("»»»» %d", resp.StatusCode)
  448. if resp.StatusCode == http.StatusOK {
  449. var msg joinMessageV2
  450. if err := json.NewDecoder(resp.Body).Decode(&msg); err != nil {
  451. log.Debugf("Error reading join response: %v", err)
  452. return err
  453. }
  454. s.joinIndex = msg.CommitIndex
  455. s.setMode(msg.Mode)
  456. if msg.Mode == ProxyMode {
  457. s.proxyClientURL = resp.Header.Get("X-Leader-Client-URL")
  458. s.proxyPeerURL = resp.Header.Get("X-Leader-Peer-URL")
  459. }
  460. return nil
  461. }
  462. if resp.StatusCode == http.StatusTemporaryRedirect {
  463. address := resp.Header.Get("Location")
  464. log.Debugf("Send Join Request to %s", address)
  465. c := &JoinCommandV1{
  466. MinVersion: store.MinVersion(),
  467. MaxVersion: store.MaxVersion(),
  468. Name: server.Name(),
  469. RaftURL: s.Config.URL,
  470. EtcdURL: s.server.URL(),
  471. }
  472. json.NewEncoder(&b).Encode(c)
  473. resp, _, err = t.Post(address, &b)
  474. } else if resp.StatusCode == http.StatusBadRequest {
  475. log.Debug("Reach max number peers in the cluster")
  476. decoder := json.NewDecoder(resp.Body)
  477. err := &etcdErr.Error{}
  478. decoder.Decode(err)
  479. return *err
  480. } else {
  481. return fmt.Errorf("Unable to join")
  482. }
  483. }
  484. }
  485. }
  486. func (s *PeerServer) Stats() []byte {
  487. s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.startTime).String()
  488. // TODO: register state listener to raft to change this field
  489. // rather than compare the state each time Stats() is called.
  490. if s.RaftServer().State() == raft.Leader {
  491. s.serverStats.LeaderInfo.Name = s.RaftServer().Name()
  492. }
  493. queue := s.serverStats.sendRateQueue
  494. s.serverStats.SendingPkgRate, s.serverStats.SendingBandwidthRate = queue.Rate()
  495. queue = s.serverStats.recvRateQueue
  496. s.serverStats.RecvingPkgRate, s.serverStats.RecvingBandwidthRate = queue.Rate()
  497. b, _ := json.Marshal(s.serverStats)
  498. return b
  499. }
  500. func (s *PeerServer) PeerStats() []byte {
  501. if s.raftServer.State() == raft.Leader {
  502. b, _ := json.Marshal(s.followersStats)
  503. return b
  504. }
  505. return nil
  506. }
  507. // raftEventLogger converts events from the Raft server into log messages.
  508. func (s *PeerServer) raftEventLogger(event raft.Event) {
  509. value := event.Value()
  510. prevValue := event.PrevValue()
  511. if value == nil {
  512. value = "<nil>"
  513. }
  514. if prevValue == nil {
  515. prevValue = "<nil>"
  516. }
  517. switch event.Type() {
  518. case raft.StateChangeEventType:
  519. log.Infof("%s: state changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  520. case raft.TermChangeEventType:
  521. log.Infof("%s: term #%v started.", s.Config.Name, value)
  522. case raft.LeaderChangeEventType:
  523. log.Infof("%s: leader changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  524. case raft.AddPeerEventType:
  525. log.Infof("%s: peer added: '%v'", s.Config.Name, value)
  526. case raft.RemovePeerEventType:
  527. log.Infof("%s: peer removed: '%v'", s.Config.Name, value)
  528. case raft.HeartbeatIntervalEventType:
  529. var name = "<unknown>"
  530. if peer, ok := value.(*raft.Peer); ok {
  531. name = peer.Name
  532. }
  533. log.Infof("%s: warning: heartbeat timed out: '%v'", s.Config.Name, name)
  534. case raft.ElectionTimeoutThresholdEventType:
  535. select {
  536. case s.timeoutThresholdChan <- value:
  537. default:
  538. }
  539. }
  540. }
  541. func (s *PeerServer) recordMetricEvent(event raft.Event) {
  542. name := fmt.Sprintf("raft.event.%s", event.Type())
  543. value := event.Value().(time.Duration)
  544. (*s.metrics).Timer(name).Update(value)
  545. }
  546. // logSnapshot logs about the snapshot that was taken.
  547. func (s *PeerServer) logSnapshot(err error, currentIndex, count uint64) {
  548. info := fmt.Sprintf("%s: snapshot of %d events at index %d", s.Config.Name, count, currentIndex)
  549. if err != nil {
  550. log.Infof("%s attempted and failed: %v", info, err)
  551. } else {
  552. log.Infof("%s completed", info)
  553. }
  554. }
  555. func (s *PeerServer) monitorSnapshot() {
  556. for {
  557. time.Sleep(s.snapConf.checkingInterval)
  558. currentIndex := s.RaftServer().CommitIndex()
  559. count := currentIndex - s.snapConf.lastIndex
  560. if uint64(count) > s.snapConf.snapshotThr {
  561. err := s.raftServer.TakeSnapshot()
  562. s.logSnapshot(err, currentIndex, count)
  563. s.snapConf.lastIndex = currentIndex
  564. }
  565. }
  566. }
  567. func (s *PeerServer) monitorSync() {
  568. ticker := time.Tick(time.Millisecond * 500)
  569. for {
  570. select {
  571. case now := <-ticker:
  572. if s.raftServer.State() == raft.Leader {
  573. s.raftServer.Do(s.store.CommandFactory().CreateSyncCommand(now))
  574. }
  575. }
  576. }
  577. }
  578. // monitorTimeoutThreshold groups timeout threshold events together and prints
  579. // them as a single log line.
  580. func (s *PeerServer) monitorTimeoutThreshold(closeChan chan bool) {
  581. for {
  582. select {
  583. case value := <-s.timeoutThresholdChan:
  584. log.Infof("%s: warning: heartbeat near election timeout: %v", s.Config.Name, value)
  585. case <-closeChan:
  586. return
  587. }
  588. time.Sleep(ThresholdMonitorTimeout)
  589. }
  590. }
  591. // monitorActiveSize has the leader periodically check the status of cluster
  592. // nodes and swaps them out for proxies as needed.
  593. func (s *PeerServer) monitorActiveSize(closeChan chan bool) {
  594. for {
  595. select {
  596. case <-time.After(ActiveMonitorTimeout):
  597. case <-closeChan:
  598. return
  599. }
  600. // Ignore while this peer is not a leader.
  601. if s.raftServer.State() != raft.Leader {
  602. continue
  603. }
  604. // Retrieve target active size and actual active size.
  605. activeSize := s.ClusterConfig().ActiveSize
  606. peerCount := s.registry.PeerCount()
  607. proxies := s.registry.Proxies()
  608. peers := s.registry.Peers()
  609. if index := sort.SearchStrings(peers, s.Config.Name); index < len(peers) && peers[index] == s.Config.Name {
  610. peers = append(peers[:index], peers[index+1:]...)
  611. }
  612. // If we have more active nodes than we should then demote.
  613. if peerCount > activeSize {
  614. peer := peers[rand.Intn(len(peers))]
  615. log.Infof("%s: demoting: %v", s.Config.Name, peer)
  616. if _, err := s.raftServer.Do(&DemoteCommand{Name: peer}); err != nil {
  617. log.Infof("%s: warning: demotion error: %v", s.Config.Name, err)
  618. }
  619. continue
  620. }
  621. // If we don't have enough active nodes then try to promote a proxy.
  622. if peerCount < activeSize && len(proxies) > 0 {
  623. loop:
  624. for _, i := range rand.Perm(len(proxies)) {
  625. proxy := proxies[i]
  626. proxyPeerURL, _ := s.registry.ProxyPeerURL(proxy)
  627. log.Infof("%s: attempting to promote: %v (%s)", s.Config.Name, proxy, proxyPeerURL)
  628. // Notify proxy to promote itself.
  629. client := &http.Client{
  630. Transport: &http.Transport{
  631. DisableKeepAlives: false,
  632. ResponseHeaderTimeout: ActiveMonitorTimeout,
  633. },
  634. }
  635. resp, err := client.Post(fmt.Sprintf("%s/promote", proxyPeerURL), "application/json", nil)
  636. if err != nil {
  637. log.Infof("%s: warning: promotion error: %v", s.Config.Name, err)
  638. continue
  639. } else if resp.StatusCode != http.StatusOK {
  640. log.Infof("%s: warning: promotion failure: %v", s.Config.Name, resp.StatusCode)
  641. continue
  642. }
  643. break loop
  644. }
  645. }
  646. }
  647. }
  648. // monitorPeerActivity has the leader periodically for dead nodes and demotes them.
  649. func (s *PeerServer) monitorPeerActivity(closeChan chan bool) {
  650. for {
  651. select {
  652. case <-time.After(PeerActivityMonitorTimeout):
  653. case <-closeChan:
  654. return
  655. }
  656. // Ignore while this peer is not a leader.
  657. if s.raftServer.State() != raft.Leader {
  658. continue
  659. }
  660. // Check last activity for all peers.
  661. now := time.Now()
  662. promoteDelay := time.Duration(s.ClusterConfig().PromoteDelay) * time.Second
  663. peers := s.raftServer.Peers()
  664. for _, peer := range peers {
  665. // If the last response from the peer is longer than the promote delay
  666. // then automatically demote the peer.
  667. if !peer.LastActivity().IsZero() && now.Sub(peer.LastActivity()) > promoteDelay {
  668. log.Infof("%s: demoting node: %v; last activity %v ago", s.Config.Name, peer.Name, now.Sub(peer.LastActivity()))
  669. if _, err := s.raftServer.Do(&DemoteCommand{Name: peer.Name}); err != nil {
  670. log.Infof("%s: warning: autodemotion error: %v", s.Config.Name, err)
  671. }
  672. continue
  673. }
  674. }
  675. }
  676. }
  677. // Mode represents whether the server is an active peer or if the server is
  678. // simply acting as a proxy.
  679. type Mode string
  680. const (
  681. // PeerMode is when the server is an active node in Raft.
  682. PeerMode = Mode("peer")
  683. // ProxyMode is when the server is an inactive, request-forwarding node.
  684. ProxyMode = Mode("proxy")
  685. )