peer_server.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. package server
  2. import (
  3. "bufio"
  4. "bytes"
  5. "encoding/binary"
  6. "encoding/json"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "math/rand"
  11. "net/http"
  12. "net/url"
  13. "sort"
  14. "strconv"
  15. "time"
  16. "github.com/coreos/etcd/third_party/github.com/coreos/raft"
  17. "github.com/coreos/etcd/third_party/github.com/gorilla/mux"
  18. "github.com/coreos/etcd/discovery"
  19. etcdErr "github.com/coreos/etcd/error"
  20. "github.com/coreos/etcd/log"
  21. "github.com/coreos/etcd/metrics"
  22. "github.com/coreos/etcd/store"
  23. )
  24. const ThresholdMonitorTimeout = 5 * time.Second
  25. const ActiveMonitorTimeout = 1 * time.Second
  26. const PeerActivityMonitorTimeout = 1 * time.Second
  27. type PeerServerConfig struct {
  28. Name string
  29. Scheme string
  30. URL string
  31. SnapshotCount int
  32. RetryTimes int
  33. RetryInterval float64
  34. }
  35. type PeerServer struct {
  36. Config PeerServerConfig
  37. clusterConfig *ClusterConfig
  38. raftServer raft.Server
  39. server *Server
  40. joinIndex uint64
  41. followersStats *raftFollowersStats
  42. serverStats *raftServerStats
  43. registry *Registry
  44. store store.Store
  45. snapConf *snapshotConf
  46. mode Mode
  47. closeChan chan bool
  48. timeoutThresholdChan chan interface{}
  49. proxyPeerURL string
  50. proxyClientURL string
  51. metrics *metrics.Bucket
  52. }
  53. // TODO: find a good policy to do snapshot
  54. type snapshotConf struct {
  55. // Etcd will check if snapshot is need every checkingInterval
  56. checkingInterval time.Duration
  57. // The index when the last snapshot happened
  58. lastIndex uint64
  59. // If the incremental number of index since the last snapshot
  60. // exceeds the snapshot Threshold, etcd will do a snapshot
  61. snapshotThr uint64
  62. }
  63. func NewPeerServer(psConfig PeerServerConfig, registry *Registry, store store.Store, mb *metrics.Bucket, followersStats *raftFollowersStats, serverStats *raftServerStats) *PeerServer {
  64. s := &PeerServer{
  65. Config: psConfig,
  66. clusterConfig: NewClusterConfig(),
  67. registry: registry,
  68. store: store,
  69. followersStats: followersStats,
  70. serverStats: serverStats,
  71. timeoutThresholdChan: make(chan interface{}, 1),
  72. metrics: mb,
  73. }
  74. return s
  75. }
  76. func (s *PeerServer) SetRaftServer(raftServer raft.Server) {
  77. s.snapConf = &snapshotConf{
  78. checkingInterval: time.Second * 3,
  79. // this is not accurate, we will update raft to provide an api
  80. lastIndex: raftServer.CommitIndex(),
  81. snapshotThr: uint64(s.Config.SnapshotCount),
  82. }
  83. raftServer.AddEventListener(raft.StateChangeEventType, s.raftEventLogger)
  84. raftServer.AddEventListener(raft.LeaderChangeEventType, s.raftEventLogger)
  85. raftServer.AddEventListener(raft.TermChangeEventType, s.raftEventLogger)
  86. raftServer.AddEventListener(raft.AddPeerEventType, s.raftEventLogger)
  87. raftServer.AddEventListener(raft.RemovePeerEventType, s.raftEventLogger)
  88. raftServer.AddEventListener(raft.HeartbeatIntervalEventType, s.raftEventLogger)
  89. raftServer.AddEventListener(raft.ElectionTimeoutThresholdEventType, s.raftEventLogger)
  90. raftServer.AddEventListener(raft.HeartbeatEventType, s.recordMetricEvent)
  91. s.raftServer = raftServer
  92. }
  93. // Mode retrieves the current mode of the server.
  94. func (s *PeerServer) Mode() Mode {
  95. return s.mode
  96. }
  97. // SetMode updates the current mode of the server.
  98. // Switching to a peer mode will start the Raft server.
  99. // Switching to a proxy mode will stop the Raft server.
  100. func (s *PeerServer) setMode(mode Mode) {
  101. s.mode = mode
  102. switch mode {
  103. case PeerMode:
  104. if !s.raftServer.Running() {
  105. s.raftServer.Start()
  106. }
  107. case ProxyMode:
  108. if s.raftServer.Running() {
  109. s.raftServer.Stop()
  110. }
  111. }
  112. }
  113. // ClusterConfig retrieves the current cluster configuration.
  114. func (s *PeerServer) ClusterConfig() *ClusterConfig {
  115. return s.clusterConfig
  116. }
  117. // SetClusterConfig updates the current cluster configuration.
  118. // Adjusting the active size will cause the PeerServer to demote peers or
  119. // promote proxies to match the new size.
  120. func (s *PeerServer) SetClusterConfig(c *ClusterConfig) error {
  121. // Validate configuration.
  122. if c.ActiveSize < 1 {
  123. return etcdErr.NewError(etcdErr.EcodeInvalidActiveSize, "Post", 0)
  124. } else if c.PromoteDelay < 0 {
  125. return etcdErr.NewError(etcdErr.EcodeInvalidPromoteDelay, "Post", 0)
  126. }
  127. s.clusterConfig = c
  128. return nil
  129. }
  130. // Helper function to do discovery and return results in expected format
  131. func (s *PeerServer) handleDiscovery(discoverURL string) (peers []string, err error) {
  132. peers, err = discovery.Do(discoverURL, s.Config.Name, s.Config.URL)
  133. // Warn about errors coming from discovery, this isn't fatal
  134. // since the user might have provided a peer list elsewhere,
  135. // or there is some log in data dir.
  136. if err != nil {
  137. log.Warnf("Discovery encountered an error: %v", err)
  138. return
  139. }
  140. for i := range peers {
  141. // Strip the scheme off of the peer if it has one
  142. // TODO(bp): clean this up!
  143. purl, err := url.Parse(peers[i])
  144. if err == nil {
  145. peers[i] = purl.Host
  146. }
  147. }
  148. log.Infof("Discovery fetched back peer list: %v", peers)
  149. return
  150. }
  151. // Try all possible ways to find clusters to join
  152. // Include -discovery, -peers and log data in -data-dir
  153. //
  154. // Peer discovery follows this order:
  155. // 1. -discovery
  156. // 2. -peers
  157. // 3. previous peers in -data-dir
  158. func (s *PeerServer) findCluster(discoverURL string, peers []string) {
  159. // Attempt cluster discovery
  160. toDiscover := discoverURL != ""
  161. if toDiscover {
  162. discoverPeers, discoverErr := s.handleDiscovery(discoverURL)
  163. // It is registered in discover url
  164. if discoverErr == nil {
  165. // start as a leader in a new cluster
  166. if len(discoverPeers) == 0 {
  167. log.Debug("This peer is starting a brand new cluster based on discover URL.")
  168. s.startAsLeader()
  169. } else {
  170. s.startAsFollower(discoverPeers)
  171. }
  172. return
  173. }
  174. }
  175. hasPeerList := len(peers) > 0
  176. // if there is log in data dir, append previous peers to peers in config
  177. // to find cluster
  178. prevPeers := s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  179. for i := 0; i < len(prevPeers); i++ {
  180. u, err := url.Parse(prevPeers[i])
  181. if err != nil {
  182. log.Debug("rejoin cannot parse url: ", err)
  183. }
  184. prevPeers[i] = u.Host
  185. }
  186. peers = append(peers, prevPeers...)
  187. // if there is backup peer lists, use it to find cluster
  188. if len(peers) > 0 {
  189. ok := s.joinCluster(peers)
  190. if !ok {
  191. log.Warn("No living peers are found!")
  192. } else {
  193. log.Debugf("%s restart as a follower based on peers[%v]", s.Config.Name)
  194. return
  195. }
  196. }
  197. if !s.raftServer.IsLogEmpty() {
  198. log.Debug("Entire cluster is down! %v will restart the cluster.", s.Config.Name)
  199. return
  200. }
  201. if toDiscover {
  202. log.Fatalf("Discovery failed, no available peers in backup list, and no log data")
  203. }
  204. if hasPeerList {
  205. log.Fatalf("No available peers in backup list, and no log data")
  206. }
  207. log.Infof("This peer is starting a brand new cluster now.")
  208. s.startAsLeader()
  209. }
  210. // Start the raft server
  211. func (s *PeerServer) Start(snapshot bool, discoverURL string, peers []string) error {
  212. // LoadSnapshot
  213. if snapshot {
  214. err := s.raftServer.LoadSnapshot()
  215. if err == nil {
  216. log.Debugf("%s finished load snapshot", s.Config.Name)
  217. } else {
  218. log.Debug(err)
  219. }
  220. }
  221. s.raftServer.Start()
  222. s.findCluster(discoverURL, peers)
  223. s.closeChan = make(chan bool)
  224. go s.monitorSync()
  225. go s.monitorTimeoutThreshold(s.closeChan)
  226. go s.monitorActive(s.closeChan)
  227. go s.monitorPeerActivity(s.closeChan)
  228. // open the snapshot
  229. if snapshot {
  230. go s.monitorSnapshot()
  231. }
  232. return nil
  233. }
  234. func (s *PeerServer) Stop() {
  235. if s.closeChan != nil {
  236. close(s.closeChan)
  237. s.closeChan = nil
  238. }
  239. s.raftServer.Stop()
  240. }
  241. func (s *PeerServer) HTTPHandler() http.Handler {
  242. router := mux.NewRouter()
  243. // internal commands
  244. router.HandleFunc("/name", s.NameHttpHandler)
  245. router.HandleFunc("/version", s.VersionHttpHandler)
  246. router.HandleFunc("/version/{version:[0-9]+}/check", s.VersionCheckHttpHandler)
  247. router.HandleFunc("/upgrade", s.UpgradeHttpHandler)
  248. router.HandleFunc("/join", s.JoinHttpHandler)
  249. router.HandleFunc("/promote", s.PromoteHttpHandler).Methods("POST")
  250. router.HandleFunc("/remove/{name:.+}", s.RemoveHttpHandler)
  251. router.HandleFunc("/config", s.getClusterConfigHttpHandler).Methods("GET")
  252. router.HandleFunc("/config", s.setClusterConfigHttpHandler).Methods("PUT")
  253. router.HandleFunc("/vote", s.VoteHttpHandler)
  254. router.HandleFunc("/log", s.GetLogHttpHandler)
  255. router.HandleFunc("/log/append", s.AppendEntriesHttpHandler)
  256. router.HandleFunc("/snapshot", s.SnapshotHttpHandler)
  257. router.HandleFunc("/snapshotRecovery", s.SnapshotRecoveryHttpHandler)
  258. router.HandleFunc("/etcdURL", s.EtcdURLHttpHandler)
  259. return router
  260. }
  261. // Retrieves the underlying Raft server.
  262. func (s *PeerServer) RaftServer() raft.Server {
  263. return s.raftServer
  264. }
  265. // Associates the client server with the peer server.
  266. func (s *PeerServer) SetServer(server *Server) {
  267. s.server = server
  268. }
  269. func (s *PeerServer) startAsLeader() {
  270. // leader need to join self as a peer
  271. for {
  272. _, err := s.raftServer.Do(NewJoinCommand(store.MinVersion(), store.MaxVersion(), s.raftServer.Name(), s.Config.URL, s.server.URL()))
  273. if err == nil {
  274. break
  275. }
  276. }
  277. log.Debugf("%s start as a leader", s.Config.Name)
  278. }
  279. func (s *PeerServer) startAsFollower(cluster []string) {
  280. // start as a follower in a existing cluster
  281. for i := 0; i < s.Config.RetryTimes; i++ {
  282. ok := s.joinCluster(cluster)
  283. if ok {
  284. return
  285. }
  286. log.Warnf("%v is unable to join the cluster using any of the peers %v at %dth time. Retrying in %.1f seconds", s.Config.Name, cluster, i, s.Config.RetryInterval)
  287. time.Sleep(time.Second * time.Duration(s.Config.RetryInterval))
  288. }
  289. log.Fatalf("Cannot join the cluster via given peers after %x retries", s.Config.RetryTimes)
  290. }
  291. // getVersion fetches the peer version of a cluster.
  292. func getVersion(t *transporter, versionURL url.URL) (int, error) {
  293. resp, req, err := t.Get(versionURL.String())
  294. if err != nil {
  295. return 0, err
  296. }
  297. defer resp.Body.Close()
  298. t.CancelWhenTimeout(req)
  299. body, err := ioutil.ReadAll(resp.Body)
  300. if err != nil {
  301. return 0, err
  302. }
  303. // Parse version number.
  304. version, _ := strconv.Atoi(string(body))
  305. return version, nil
  306. }
  307. // Upgradable checks whether all peers in a cluster support an upgrade to the next store version.
  308. func (s *PeerServer) Upgradable() error {
  309. nextVersion := s.store.Version() + 1
  310. for _, peerURL := range s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name) {
  311. u, err := url.Parse(peerURL)
  312. if err != nil {
  313. return fmt.Errorf("PeerServer: Cannot parse URL: '%s' (%s)", peerURL, err)
  314. }
  315. t, _ := s.raftServer.Transporter().(*transporter)
  316. checkURL := (&url.URL{Host: u.Host, Scheme: s.Config.Scheme, Path: fmt.Sprintf("/version/%d/check", nextVersion)}).String()
  317. resp, _, err := t.Get(checkURL)
  318. if err != nil {
  319. return fmt.Errorf("PeerServer: Cannot check version compatibility: %s", u.Host)
  320. }
  321. if resp.StatusCode != 200 {
  322. return fmt.Errorf("PeerServer: Version %d is not compatible with peer: %s", nextVersion, u.Host)
  323. }
  324. }
  325. return nil
  326. }
  327. func (s *PeerServer) joinCluster(cluster []string) bool {
  328. for _, peer := range cluster {
  329. if len(peer) == 0 {
  330. continue
  331. }
  332. err := s.joinByPeer(s.raftServer, peer, s.Config.Scheme)
  333. if err == nil {
  334. log.Debugf("%s joined the cluster via peer %s", s.Config.Name, peer)
  335. return true
  336. }
  337. if _, ok := err.(etcdErr.Error); ok {
  338. log.Fatal(err)
  339. }
  340. log.Warnf("Attempt to join via %s failed: %s", peer, err)
  341. }
  342. return false
  343. }
  344. // Send join requests to peer.
  345. func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string) error {
  346. var b bytes.Buffer
  347. // t must be ok
  348. t, _ := server.Transporter().(*transporter)
  349. // Our version must match the leaders version
  350. versionURL := url.URL{Host: peer, Scheme: scheme, Path: "/version"}
  351. version, err := getVersion(t, versionURL)
  352. if err != nil {
  353. return fmt.Errorf("Error during join version check: %v", err)
  354. }
  355. if version < store.MinVersion() || version > store.MaxVersion() {
  356. return fmt.Errorf("Unable to join: cluster version is %d; version compatibility is %d - %d", version, store.MinVersion(), store.MaxVersion())
  357. }
  358. json.NewEncoder(&b).Encode(NewJoinCommand(store.MinVersion(), store.MaxVersion(), server.Name(), s.Config.URL, s.server.URL()))
  359. joinURL := url.URL{Host: peer, Scheme: scheme, Path: "/join"}
  360. log.Debugf("Send Join Request to %s", joinURL.String())
  361. resp, req, err := t.Post(joinURL.String(), &b)
  362. for {
  363. if err != nil {
  364. return fmt.Errorf("Unable to join: %v", err)
  365. }
  366. if resp != nil {
  367. defer resp.Body.Close()
  368. t.CancelWhenTimeout(req)
  369. if resp.StatusCode == http.StatusOK {
  370. r := bufio.NewReader(resp.Body)
  371. s.joinIndex, _ = binary.ReadUvarint(r)
  372. // Determine whether the server joined as a proxy or peer.
  373. var mode uint64
  374. if mode, err = binary.ReadUvarint(r); err == io.EOF {
  375. mode = 0
  376. } else if err != nil {
  377. log.Debugf("Error reading join mode: %v", err)
  378. return err
  379. }
  380. switch mode {
  381. case 0:
  382. s.setMode(PeerMode)
  383. case 1:
  384. s.setMode(ProxyMode)
  385. s.proxyClientURL = resp.Header.Get("X-Leader-Client-URL")
  386. s.proxyPeerURL = resp.Header.Get("X-Leader-Peer-URL")
  387. default:
  388. log.Debugf("Invalid join mode: %v", err)
  389. return fmt.Errorf("Invalid join mode (%d): %v", mode, err)
  390. }
  391. return nil
  392. }
  393. if resp.StatusCode == http.StatusTemporaryRedirect {
  394. address := resp.Header.Get("Location")
  395. log.Debugf("Send Join Request to %s", address)
  396. json.NewEncoder(&b).Encode(NewJoinCommand(store.MinVersion(), store.MaxVersion(), server.Name(), s.Config.URL, s.server.URL()))
  397. resp, req, err = t.Post(address, &b)
  398. } else if resp.StatusCode == http.StatusBadRequest {
  399. log.Debug("Reach max number peers in the cluster")
  400. decoder := json.NewDecoder(resp.Body)
  401. err := &etcdErr.Error{}
  402. decoder.Decode(err)
  403. return *err
  404. } else {
  405. return fmt.Errorf("Unable to join")
  406. }
  407. }
  408. }
  409. }
  410. func (s *PeerServer) Stats() []byte {
  411. s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.startTime).String()
  412. // TODO: register state listener to raft to change this field
  413. // rather than compare the state each time Stats() is called.
  414. if s.RaftServer().State() == raft.Leader {
  415. s.serverStats.LeaderInfo.Name = s.RaftServer().Name()
  416. }
  417. queue := s.serverStats.sendRateQueue
  418. s.serverStats.SendingPkgRate, s.serverStats.SendingBandwidthRate = queue.Rate()
  419. queue = s.serverStats.recvRateQueue
  420. s.serverStats.RecvingPkgRate, s.serverStats.RecvingBandwidthRate = queue.Rate()
  421. b, _ := json.Marshal(s.serverStats)
  422. return b
  423. }
  424. func (s *PeerServer) PeerStats() []byte {
  425. if s.raftServer.State() == raft.Leader {
  426. b, _ := json.Marshal(s.followersStats)
  427. return b
  428. }
  429. return nil
  430. }
  431. // raftEventLogger converts events from the Raft server into log messages.
  432. func (s *PeerServer) raftEventLogger(event raft.Event) {
  433. value := event.Value()
  434. prevValue := event.PrevValue()
  435. if value == nil {
  436. value = "<nil>"
  437. }
  438. if prevValue == nil {
  439. prevValue = "<nil>"
  440. }
  441. switch event.Type() {
  442. case raft.StateChangeEventType:
  443. log.Infof("%s: state changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  444. case raft.TermChangeEventType:
  445. log.Infof("%s: term #%v started.", s.Config.Name, value)
  446. case raft.LeaderChangeEventType:
  447. log.Infof("%s: leader changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  448. case raft.AddPeerEventType:
  449. log.Infof("%s: peer added: '%v'", s.Config.Name, value)
  450. case raft.RemovePeerEventType:
  451. log.Infof("%s: peer removed: '%v'", s.Config.Name, value)
  452. case raft.HeartbeatIntervalEventType:
  453. var name = "<unknown>"
  454. if peer, ok := value.(*raft.Peer); ok {
  455. name = peer.Name
  456. }
  457. log.Infof("%s: warning: heartbeat timed out: '%v'", s.Config.Name, name)
  458. case raft.ElectionTimeoutThresholdEventType:
  459. select {
  460. case s.timeoutThresholdChan <- value:
  461. default:
  462. }
  463. }
  464. }
  465. func (s *PeerServer) recordMetricEvent(event raft.Event) {
  466. name := fmt.Sprintf("raft.event.%s", event.Type())
  467. value := event.Value().(time.Duration)
  468. (*s.metrics).Timer(name).Update(value)
  469. }
  470. // logSnapshot logs about the snapshot that was taken.
  471. func (s *PeerServer) logSnapshot(err error, currentIndex, count uint64) {
  472. info := fmt.Sprintf("%s: snapshot of %d events at index %d", s.Config.Name, count, currentIndex)
  473. if err != nil {
  474. log.Infof("%s attempted and failed: %v", info, err)
  475. } else {
  476. log.Infof("%s completed", info)
  477. }
  478. }
  479. func (s *PeerServer) monitorSnapshot() {
  480. for {
  481. time.Sleep(s.snapConf.checkingInterval)
  482. currentIndex := s.RaftServer().CommitIndex()
  483. count := currentIndex - s.snapConf.lastIndex
  484. if uint64(count) > s.snapConf.snapshotThr {
  485. err := s.raftServer.TakeSnapshot()
  486. s.logSnapshot(err, currentIndex, count)
  487. s.snapConf.lastIndex = currentIndex
  488. }
  489. }
  490. }
  491. func (s *PeerServer) monitorSync() {
  492. ticker := time.Tick(time.Millisecond * 500)
  493. for {
  494. select {
  495. case now := <-ticker:
  496. if s.raftServer.State() == raft.Leader {
  497. s.raftServer.Do(s.store.CommandFactory().CreateSyncCommand(now))
  498. }
  499. }
  500. }
  501. }
  502. // monitorTimeoutThreshold groups timeout threshold events together and prints
  503. // them as a single log line.
  504. func (s *PeerServer) monitorTimeoutThreshold(closeChan chan bool) {
  505. for {
  506. select {
  507. case value := <-s.timeoutThresholdChan:
  508. log.Infof("%s: warning: heartbeat near election timeout: %v", s.Config.Name, value)
  509. case <-closeChan:
  510. return
  511. }
  512. time.Sleep(ThresholdMonitorTimeout)
  513. }
  514. }
  515. // monitorActive has the leader periodically check the status of cluster nodes
  516. // and swaps them out for proxies as needed.
  517. func (s *PeerServer) monitorActive(closeChan chan bool) {
  518. for {
  519. select {
  520. case <-time.After(ActiveMonitorTimeout):
  521. case <-closeChan:
  522. return
  523. }
  524. // Ignore while this peer is not a leader.
  525. if s.raftServer.State() != raft.Leader {
  526. continue
  527. }
  528. // Retrieve target active size and actual active size.
  529. activeSize := s.ClusterConfig().ActiveSize
  530. peerCount := s.registry.PeerCount()
  531. proxies := s.registry.Proxies()
  532. peers := s.registry.Peers()
  533. if index := sort.SearchStrings(peers, s.Config.Name); index < len(peers) && peers[index] == s.Config.Name {
  534. peers = append(peers[:index], peers[index+1:]...)
  535. }
  536. // If we have more active nodes than we should then demote.
  537. if peerCount > activeSize {
  538. peer := peers[rand.Intn(len(peers))]
  539. log.Infof("%s: demoting: %v", s.Config.Name, peer)
  540. if _, err := s.raftServer.Do(&DemoteCommand{Name: peer}); err != nil {
  541. log.Infof("%s: warning: demotion error: %v", s.Config.Name, err)
  542. }
  543. continue
  544. }
  545. // If we don't have enough active nodes then try to promote a proxy.
  546. if peerCount < activeSize && len(proxies) > 0 {
  547. loop:
  548. for _, i := range rand.Perm(len(proxies)) {
  549. proxy := proxies[i]
  550. proxyPeerURL, _ := s.registry.ProxyPeerURL(proxy)
  551. log.Infof("%s: attempting to promote: %v (%s)", s.Config.Name, proxy, proxyPeerURL)
  552. // Notify proxy to promote itself.
  553. client := &http.Client{
  554. Transport: &http.Transport{
  555. DisableKeepAlives: false,
  556. ResponseHeaderTimeout: ActiveMonitorTimeout,
  557. },
  558. }
  559. resp, err := client.Post(fmt.Sprintf("%s/promote", proxyPeerURL), "application/json", nil)
  560. if err != nil {
  561. log.Infof("%s: warning: promotion error: %v", s.Config.Name, err)
  562. continue
  563. } else if resp.StatusCode != http.StatusOK {
  564. log.Infof("%s: warning: promotion failure: %v", s.Config.Name, resp.StatusCode)
  565. continue
  566. }
  567. break loop
  568. }
  569. }
  570. }
  571. }
  572. // monitorPeerActivity has the leader periodically for dead nodes and demotes them.
  573. func (s *PeerServer) monitorPeerActivity(closeChan chan bool) {
  574. for {
  575. select {
  576. case <-time.After(PeerActivityMonitorTimeout):
  577. case <-closeChan:
  578. return
  579. }
  580. // Ignore while this peer is not a leader.
  581. if s.raftServer.State() != raft.Leader {
  582. continue
  583. }
  584. // Check last activity for all peers.
  585. now := time.Now()
  586. promoteDelay := time.Duration(s.ClusterConfig().PromoteDelay) * time.Second
  587. peers := s.raftServer.Peers()
  588. for _, peer := range peers {
  589. // If the last response from the peer is longer than the promote delay
  590. // then automatically demote the peer.
  591. if !peer.LastActivity().IsZero() && now.Sub(peer.LastActivity()) > promoteDelay {
  592. log.Infof("%s: demoting node: %v; last activity %v ago", s.Config.Name, peer.Name, now.Sub(peer.LastActivity()))
  593. if _, err := s.raftServer.Do(&DemoteCommand{Name: peer.Name}); err != nil {
  594. log.Infof("%s: warning: autodemotion error: %v", s.Config.Name, err)
  595. }
  596. continue
  597. }
  598. }
  599. }
  600. }
  601. // Mode represents whether the server is an active peer or if the server is
  602. // simply acting as a proxy.
  603. type Mode string
  604. const (
  605. // PeerMode is when the server is an active node in Raft.
  606. PeerMode = Mode("peer")
  607. // ProxyMode is when the server is an inactive, request-forwarding node.
  608. ProxyMode = Mode("proxy")
  609. )