peer_server.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. package server
  2. import (
  3. "bytes"
  4. "crypto/tls"
  5. "encoding/binary"
  6. "encoding/json"
  7. "fmt"
  8. "io/ioutil"
  9. "net"
  10. "net/http"
  11. "net/url"
  12. "strconv"
  13. "time"
  14. etcdErr "github.com/coreos/etcd/error"
  15. "github.com/coreos/etcd/log"
  16. "github.com/coreos/etcd/metrics"
  17. "github.com/coreos/etcd/store"
  18. "github.com/coreos/raft"
  19. "github.com/gorilla/mux"
  20. )
  21. const retryInterval = 10
  22. const ThresholdMonitorTimeout = 5 * time.Second
  23. type PeerServerConfig struct {
  24. Name string
  25. Path string
  26. URL string
  27. BindAddr string
  28. SnapshotCount int
  29. HeartbeatTimeout time.Duration
  30. ElectionTimeout time.Duration
  31. MaxClusterSize int
  32. RetryTimes int
  33. CORS *corsInfo
  34. }
  35. type PeerServer struct {
  36. Config PeerServerConfig
  37. raftServer raft.Server
  38. server *Server
  39. httpServer *http.Server
  40. listener net.Listener
  41. joinIndex uint64
  42. tlsConf *TLSConfig
  43. tlsInfo *TLSInfo
  44. followersStats *raftFollowersStats
  45. serverStats *raftServerStats
  46. registry *Registry
  47. store store.Store
  48. snapConf *snapshotConf
  49. closeChan chan bool
  50. timeoutThresholdChan chan interface{}
  51. metrics *metrics.Bucket
  52. }
  53. // TODO: find a good policy to do snapshot
  54. type snapshotConf struct {
  55. // Etcd will check if snapshot is need every checkingInterval
  56. checkingInterval time.Duration
  57. // The index when the last snapshot happened
  58. lastIndex uint64
  59. // If the incremental number of index since the last snapshot
  60. // exceeds the snapshot Threshold, etcd will do a snapshot
  61. snapshotThr uint64
  62. }
  63. func NewPeerServer(psConfig PeerServerConfig, tlsConf *TLSConfig, tlsInfo *TLSInfo, registry *Registry, store store.Store, mb *metrics.Bucket) *PeerServer {
  64. s := &PeerServer{
  65. Config: psConfig,
  66. tlsConf: tlsConf,
  67. tlsInfo: tlsInfo,
  68. registry: registry,
  69. store: store,
  70. followersStats: &raftFollowersStats{
  71. Leader: psConfig.Name,
  72. Followers: make(map[string]*raftFollowerStats),
  73. },
  74. serverStats: &raftServerStats{
  75. Name: psConfig.Name,
  76. StartTime: time.Now(),
  77. sendRateQueue: &statsQueue{
  78. back: -1,
  79. },
  80. recvRateQueue: &statsQueue{
  81. back: -1,
  82. },
  83. },
  84. timeoutThresholdChan: make(chan interface{}, 1),
  85. metrics: mb,
  86. }
  87. // Create transporter for raft
  88. raftTransporter := newTransporter(tlsConf.Scheme, tlsConf.Client, s)
  89. // Create raft server
  90. raftServer, err := raft.NewServer(psConfig.Name, psConfig.Path, raftTransporter, s.store, s, "")
  91. if err != nil {
  92. log.Fatal(err)
  93. }
  94. s.snapConf = &snapshotConf{
  95. checkingInterval: time.Second * 3,
  96. // this is not accurate, we will update raft to provide an api
  97. lastIndex: raftServer.CommitIndex(),
  98. snapshotThr: uint64(psConfig.SnapshotCount),
  99. }
  100. s.raftServer = raftServer
  101. s.raftServer.AddEventListener(raft.StateChangeEventType, s.raftEventLogger)
  102. s.raftServer.AddEventListener(raft.LeaderChangeEventType, s.raftEventLogger)
  103. s.raftServer.AddEventListener(raft.TermChangeEventType, s.raftEventLogger)
  104. s.raftServer.AddEventListener(raft.AddPeerEventType, s.raftEventLogger)
  105. s.raftServer.AddEventListener(raft.RemovePeerEventType, s.raftEventLogger)
  106. s.raftServer.AddEventListener(raft.HeartbeatTimeoutEventType, s.raftEventLogger)
  107. s.raftServer.AddEventListener(raft.ElectionTimeoutThresholdEventType, s.raftEventLogger)
  108. s.raftServer.AddEventListener(raft.HeartbeatEventType, s.recordMetricEvent)
  109. return s
  110. }
  111. // Start the raft server
  112. func (s *PeerServer) ListenAndServe(snapshot bool, cluster []string) error {
  113. // LoadSnapshot
  114. if snapshot {
  115. err := s.raftServer.LoadSnapshot()
  116. if err == nil {
  117. log.Debugf("%s finished load snapshot", s.Config.Name)
  118. } else {
  119. log.Debug(err)
  120. }
  121. }
  122. s.raftServer.SetElectionTimeout(s.Config.ElectionTimeout)
  123. s.raftServer.SetHeartbeatTimeout(s.Config.HeartbeatTimeout)
  124. s.raftServer.Start()
  125. if s.raftServer.IsLogEmpty() {
  126. // start as a leader in a new cluster
  127. if len(cluster) == 0 {
  128. s.startAsLeader()
  129. } else {
  130. s.startAsFollower(cluster)
  131. }
  132. } else {
  133. // Rejoin the previous cluster
  134. cluster = s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name)
  135. for i := 0; i < len(cluster); i++ {
  136. u, err := url.Parse(cluster[i])
  137. if err != nil {
  138. log.Debug("rejoin cannot parse url: ", err)
  139. }
  140. cluster[i] = u.Host
  141. }
  142. ok := s.joinCluster(cluster)
  143. if !ok {
  144. log.Warn("the entire cluster is down! this peer will restart the cluster.")
  145. }
  146. log.Debugf("%s restart as a follower", s.Config.Name)
  147. }
  148. s.closeChan = make(chan bool)
  149. go s.monitorSync()
  150. go s.monitorTimeoutThreshold(s.closeChan)
  151. // open the snapshot
  152. if snapshot {
  153. go s.monitorSnapshot()
  154. }
  155. // start to response to raft requests
  156. return s.startTransport(s.tlsConf.Scheme, s.tlsConf.Server)
  157. }
  158. // Overridden version of net/http added so we can manage the listener.
  159. func (s *PeerServer) listenAndServe() error {
  160. addr := s.httpServer.Addr
  161. if addr == "" {
  162. addr = ":http"
  163. }
  164. l, e := net.Listen("tcp", addr)
  165. if e != nil {
  166. return e
  167. }
  168. s.listener = l
  169. return s.httpServer.Serve(l)
  170. }
  171. // Overridden version of net/http added so we can manage the listener.
  172. func (s *PeerServer) listenAndServeTLS(certFile, keyFile string) error {
  173. addr := s.httpServer.Addr
  174. if addr == "" {
  175. addr = ":https"
  176. }
  177. config := &tls.Config{}
  178. if s.httpServer.TLSConfig != nil {
  179. *config = *s.httpServer.TLSConfig
  180. }
  181. if config.NextProtos == nil {
  182. config.NextProtos = []string{"http/1.1"}
  183. }
  184. var err error
  185. config.Certificates = make([]tls.Certificate, 1)
  186. config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
  187. if err != nil {
  188. return err
  189. }
  190. conn, err := net.Listen("tcp", addr)
  191. if err != nil {
  192. return err
  193. }
  194. tlsListener := tls.NewListener(conn, config)
  195. s.listener = tlsListener
  196. return s.httpServer.Serve(tlsListener)
  197. }
  198. // Stops the server.
  199. func (s *PeerServer) Close() {
  200. if s.closeChan != nil {
  201. close(s.closeChan)
  202. s.closeChan = nil
  203. }
  204. if s.listener != nil {
  205. s.listener.Close()
  206. s.listener = nil
  207. }
  208. }
  209. // Retrieves the underlying Raft server.
  210. func (s *PeerServer) RaftServer() raft.Server {
  211. return s.raftServer
  212. }
  213. // Associates the client server with the peer server.
  214. func (s *PeerServer) SetServer(server *Server) {
  215. s.server = server
  216. }
  217. func (s *PeerServer) startAsLeader() {
  218. // leader need to join self as a peer
  219. for {
  220. _, err := s.raftServer.Do(NewJoinCommand(store.MinVersion(), store.MaxVersion(), s.raftServer.Name(), s.Config.URL, s.server.URL()))
  221. if err == nil {
  222. break
  223. }
  224. }
  225. log.Debugf("%s start as a leader", s.Config.Name)
  226. }
  227. func (s *PeerServer) startAsFollower(cluster []string) {
  228. // start as a follower in a existing cluster
  229. for i := 0; i < s.Config.RetryTimes; i++ {
  230. ok := s.joinCluster(cluster)
  231. if ok {
  232. return
  233. }
  234. log.Warnf("cannot join to cluster via given peers, retry in %d seconds", retryInterval)
  235. time.Sleep(time.Second * retryInterval)
  236. }
  237. log.Fatalf("Cannot join the cluster via given peers after %x retries", s.Config.RetryTimes)
  238. }
  239. // Start to listen and response raft command
  240. func (s *PeerServer) startTransport(scheme string, tlsConf tls.Config) error {
  241. log.Infof("raft server [name %s, listen on %s, advertised url %s]", s.Config.Name, s.Config.BindAddr, s.Config.URL)
  242. router := mux.NewRouter()
  243. s.httpServer = &http.Server{
  244. Handler: router,
  245. TLSConfig: &tlsConf,
  246. Addr: s.Config.BindAddr,
  247. }
  248. // internal commands
  249. router.HandleFunc("/name", s.NameHttpHandler)
  250. router.HandleFunc("/version", s.VersionHttpHandler)
  251. router.HandleFunc("/version/{version:[0-9]+}/check", s.VersionCheckHttpHandler)
  252. router.HandleFunc("/upgrade", s.UpgradeHttpHandler)
  253. router.HandleFunc("/join", s.JoinHttpHandler)
  254. router.HandleFunc("/remove/{name:.+}", s.RemoveHttpHandler)
  255. router.HandleFunc("/vote", s.VoteHttpHandler)
  256. router.HandleFunc("/log", s.GetLogHttpHandler)
  257. router.HandleFunc("/log/append", s.AppendEntriesHttpHandler)
  258. router.HandleFunc("/snapshot", s.SnapshotHttpHandler)
  259. router.HandleFunc("/snapshotRecovery", s.SnapshotRecoveryHttpHandler)
  260. router.HandleFunc("/etcdURL", s.EtcdURLHttpHandler)
  261. if scheme == "http" {
  262. return s.listenAndServe()
  263. } else {
  264. return s.listenAndServeTLS(s.tlsInfo.CertFile, s.tlsInfo.KeyFile)
  265. }
  266. }
  267. // getVersion fetches the peer version of a cluster.
  268. func getVersion(t *transporter, versionURL url.URL) (int, error) {
  269. resp, req, err := t.Get(versionURL.String())
  270. if err != nil {
  271. return 0, err
  272. }
  273. defer resp.Body.Close()
  274. t.CancelWhenTimeout(req)
  275. body, err := ioutil.ReadAll(resp.Body)
  276. if err != nil {
  277. return 0, err
  278. }
  279. // Parse version number.
  280. version, _ := strconv.Atoi(string(body))
  281. return version, nil
  282. }
  283. // Upgradable checks whether all peers in a cluster support an upgrade to the next store version.
  284. func (s *PeerServer) Upgradable() error {
  285. nextVersion := s.store.Version() + 1
  286. for _, peerURL := range s.registry.PeerURLs(s.raftServer.Leader(), s.Config.Name) {
  287. u, err := url.Parse(peerURL)
  288. if err != nil {
  289. return fmt.Errorf("PeerServer: Cannot parse URL: '%s' (%s)", peerURL, err)
  290. }
  291. t, _ := s.raftServer.Transporter().(*transporter)
  292. checkURL := (&url.URL{Host: u.Host, Scheme: s.tlsConf.Scheme, Path: fmt.Sprintf("/version/%d/check", nextVersion)}).String()
  293. resp, _, err := t.Get(checkURL)
  294. if err != nil {
  295. return fmt.Errorf("PeerServer: Cannot check version compatibility: %s", u.Host)
  296. }
  297. if resp.StatusCode != 200 {
  298. return fmt.Errorf("PeerServer: Version %d is not compatible with peer: %s", nextVersion, u.Host)
  299. }
  300. }
  301. return nil
  302. }
  303. func (s *PeerServer) joinCluster(cluster []string) bool {
  304. for _, peer := range cluster {
  305. if len(peer) == 0 {
  306. continue
  307. }
  308. err := s.joinByPeer(s.raftServer, peer, s.tlsConf.Scheme)
  309. if err == nil {
  310. log.Debugf("%s success join to the cluster via peer %s", s.Config.Name, peer)
  311. return true
  312. } else {
  313. if _, ok := err.(etcdErr.Error); ok {
  314. log.Fatal(err)
  315. }
  316. log.Debugf("cannot join to cluster via peer %s %s", peer, err)
  317. }
  318. }
  319. return false
  320. }
  321. // Send join requests to peer.
  322. func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string) error {
  323. var b bytes.Buffer
  324. // t must be ok
  325. t, _ := server.Transporter().(*transporter)
  326. // Our version must match the leaders version
  327. versionURL := url.URL{Host: peer, Scheme: scheme, Path: "/version"}
  328. version, err := getVersion(t, versionURL)
  329. if err != nil {
  330. return fmt.Errorf("Error during join version check: %v", err)
  331. }
  332. if version < store.MinVersion() || version > store.MaxVersion() {
  333. return fmt.Errorf("Unable to join: cluster version is %d; version compatibility is %d - %d", version, store.MinVersion(), store.MaxVersion())
  334. }
  335. json.NewEncoder(&b).Encode(NewJoinCommand(store.MinVersion(), store.MaxVersion(), server.Name(), s.Config.URL, s.server.URL()))
  336. joinURL := url.URL{Host: peer, Scheme: scheme, Path: "/join"}
  337. log.Debugf("Send Join Request to %s", joinURL.String())
  338. resp, req, err := t.Post(joinURL.String(), &b)
  339. for {
  340. if err != nil {
  341. return fmt.Errorf("Unable to join: %v", err)
  342. }
  343. if resp != nil {
  344. defer resp.Body.Close()
  345. t.CancelWhenTimeout(req)
  346. if resp.StatusCode == http.StatusOK {
  347. b, _ := ioutil.ReadAll(resp.Body)
  348. s.joinIndex, _ = binary.Uvarint(b)
  349. return nil
  350. }
  351. if resp.StatusCode == http.StatusTemporaryRedirect {
  352. address := resp.Header.Get("Location")
  353. log.Debugf("Send Join Request to %s", address)
  354. json.NewEncoder(&b).Encode(NewJoinCommand(store.MinVersion(), store.MaxVersion(), server.Name(), s.Config.URL, s.server.URL()))
  355. resp, req, err = t.Post(address, &b)
  356. } else if resp.StatusCode == http.StatusBadRequest {
  357. log.Debug("Reach max number peers in the cluster")
  358. decoder := json.NewDecoder(resp.Body)
  359. err := &etcdErr.Error{}
  360. decoder.Decode(err)
  361. return *err
  362. } else {
  363. return fmt.Errorf("Unable to join")
  364. }
  365. }
  366. }
  367. }
  368. func (s *PeerServer) Stats() []byte {
  369. s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.startTime).String()
  370. // TODO: register state listener to raft to change this field
  371. // rather than compare the state each time Stats() is called.
  372. if s.RaftServer().State() == raft.Leader {
  373. s.serverStats.LeaderInfo.Name = s.RaftServer().Name()
  374. }
  375. queue := s.serverStats.sendRateQueue
  376. s.serverStats.SendingPkgRate, s.serverStats.SendingBandwidthRate = queue.Rate()
  377. queue = s.serverStats.recvRateQueue
  378. s.serverStats.RecvingPkgRate, s.serverStats.RecvingBandwidthRate = queue.Rate()
  379. b, _ := json.Marshal(s.serverStats)
  380. return b
  381. }
  382. func (s *PeerServer) PeerStats() []byte {
  383. if s.raftServer.State() == raft.Leader {
  384. b, _ := json.Marshal(s.followersStats)
  385. return b
  386. }
  387. return nil
  388. }
  389. // raftEventLogger converts events from the Raft server into log messages.
  390. func (s *PeerServer) raftEventLogger(event raft.Event) {
  391. value := event.Value()
  392. prevValue := event.PrevValue()
  393. if value == nil {
  394. value = "<nil>"
  395. }
  396. if prevValue == nil {
  397. prevValue = "<nil>"
  398. }
  399. switch event.Type() {
  400. case raft.StateChangeEventType:
  401. log.Infof("%s: state changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  402. case raft.TermChangeEventType:
  403. log.Infof("%s: term #%v started.", s.Config.Name, value)
  404. case raft.LeaderChangeEventType:
  405. log.Infof("%s: leader changed from '%v' to '%v'.", s.Config.Name, prevValue, value)
  406. case raft.AddPeerEventType:
  407. log.Infof("%s: peer added: '%v'", s.Config.Name, value)
  408. case raft.RemovePeerEventType:
  409. log.Infof("%s: peer removed: '%v'", s.Config.Name, value)
  410. case raft.HeartbeatTimeoutEventType:
  411. var name = "<unknown>"
  412. if peer, ok := value.(*raft.Peer); ok {
  413. name = peer.Name
  414. }
  415. log.Infof("%s: warning: heartbeat timed out: '%v'", s.Config.Name, name)
  416. case raft.ElectionTimeoutThresholdEventType:
  417. select {
  418. case s.timeoutThresholdChan <- value:
  419. default:
  420. }
  421. }
  422. }
  423. func (s *PeerServer) recordMetricEvent(event raft.Event) {
  424. name := fmt.Sprintf("raft.event.%s", event.Type())
  425. value := event.Value().(time.Duration)
  426. (*s.metrics).Timer(name).Update(value)
  427. }
  428. func (s *PeerServer) monitorSnapshot() {
  429. for {
  430. time.Sleep(s.snapConf.checkingInterval)
  431. currentIndex := s.RaftServer().CommitIndex()
  432. count := currentIndex - s.snapConf.lastIndex
  433. if uint64(count) > s.snapConf.snapshotThr {
  434. s.raftServer.TakeSnapshot()
  435. s.snapConf.lastIndex = currentIndex
  436. }
  437. }
  438. }
  439. func (s *PeerServer) monitorSync() {
  440. ticker := time.Tick(time.Millisecond * 500)
  441. for {
  442. select {
  443. case now := <-ticker:
  444. if s.raftServer.State() == raft.Leader {
  445. s.raftServer.Do(s.store.CommandFactory().CreateSyncCommand(now))
  446. }
  447. }
  448. }
  449. }
  450. // monitorTimeoutThreshold groups timeout threshold events together and prints
  451. // them as a single log line.
  452. func (s *PeerServer) monitorTimeoutThreshold(closeChan chan bool) {
  453. for {
  454. select {
  455. case value := <-s.timeoutThresholdChan:
  456. log.Infof("%s: warning: heartbeat near election timeout: %v", s.Config.Name, value)
  457. case <-closeChan:
  458. return
  459. }
  460. time.Sleep(ThresholdMonitorTimeout)
  461. }
  462. }