peer_server.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548
  1. package server
  2. import (
  3. "bytes"
  4. "crypto/tls"
  5. "encoding/binary"
  6. "encoding/json"
  7. "fmt"
  8. "io/ioutil"
  9. "net"
  10. "net/http"
  11. "net/url"
  12. "strconv"
  13. "time"
  14. etcdErr "github.com/coreos/etcd/error"
  15. "github.com/coreos/etcd/log"
  16. "github.com/coreos/etcd/metrics"
  17. "github.com/coreos/etcd/store"
  18. "github.com/coreos/raft"
  19. "github.com/gorilla/mux"
  20. )
  21. const retryInterval = 10
  22. const ThresholdMonitorTimeout = 5 * time.Second
  23. type PeerServer struct {
  24. raftServer raft.Server
  25. server *Server
  26. httpServer *http.Server
  27. listener net.Listener
  28. joinIndex uint64
  29. name string
  30. url string
  31. bindAddr string
  32. tlsConf *TLSConfig
  33. tlsInfo *TLSInfo
  34. followersStats *raftFollowersStats
  35. serverStats *raftServerStats
  36. registry *Registry
  37. store store.Store
  38. snapConf *snapshotConf
  39. MaxClusterSize int
  40. RetryTimes int
  41. HeartbeatTimeout time.Duration
  42. ElectionTimeout time.Duration
  43. closeChan chan bool
  44. timeoutThresholdChan chan interface{}
  45. metrics *metrics.Bucket
  46. }
  47. // TODO: find a good policy to do snapshot
  48. type snapshotConf struct {
  49. // Etcd will check if snapshot is need every checkingInterval
  50. checkingInterval time.Duration
  51. // The index when the last snapshot happened
  52. lastIndex uint64
  53. // If the incremental number of index since the last snapshot
  54. // exceeds the snapshot Threshold, etcd will do a snapshot
  55. snapshotThr uint64
  56. }
  57. func NewPeerServer(name string, path string, url string, bindAddr string, tlsConf *TLSConfig, tlsInfo *TLSInfo, registry *Registry, store store.Store, snapshotCount int, heartbeatTimeout, electionTimeout time.Duration, mb *metrics.Bucket) *PeerServer {
  58. s := &PeerServer{
  59. name: name,
  60. url: url,
  61. bindAddr: bindAddr,
  62. tlsConf: tlsConf,
  63. tlsInfo: tlsInfo,
  64. registry: registry,
  65. store: store,
  66. followersStats: &raftFollowersStats{
  67. Leader: name,
  68. Followers: make(map[string]*raftFollowerStats),
  69. },
  70. serverStats: &raftServerStats{
  71. Name: name,
  72. StartTime: time.Now(),
  73. sendRateQueue: &statsQueue{
  74. back: -1,
  75. },
  76. recvRateQueue: &statsQueue{
  77. back: -1,
  78. },
  79. },
  80. HeartbeatTimeout: heartbeatTimeout,
  81. ElectionTimeout: electionTimeout,
  82. timeoutThresholdChan: make(chan interface{}, 1),
  83. metrics: mb,
  84. }
  85. // Create transporter for raft
  86. raftTransporter := newTransporter(tlsConf.Scheme, tlsConf.Client, s)
  87. // Create raft server
  88. raftServer, err := raft.NewServer(name, path, raftTransporter, s.store, s, "")
  89. if err != nil {
  90. log.Fatal(err)
  91. }
  92. s.snapConf = &snapshotConf{
  93. checkingInterval: time.Second * 3,
  94. // this is not accurate, we will update raft to provide an api
  95. lastIndex: raftServer.CommitIndex(),
  96. snapshotThr: uint64(snapshotCount),
  97. }
  98. s.raftServer = raftServer
  99. s.raftServer.AddEventListener(raft.StateChangeEventType, s.raftEventLogger)
  100. s.raftServer.AddEventListener(raft.LeaderChangeEventType, s.raftEventLogger)
  101. s.raftServer.AddEventListener(raft.TermChangeEventType, s.raftEventLogger)
  102. s.raftServer.AddEventListener(raft.AddPeerEventType, s.raftEventLogger)
  103. s.raftServer.AddEventListener(raft.RemovePeerEventType, s.raftEventLogger)
  104. s.raftServer.AddEventListener(raft.HeartbeatTimeoutEventType, s.raftEventLogger)
  105. s.raftServer.AddEventListener(raft.ElectionTimeoutThresholdEventType, s.raftEventLogger)
  106. s.raftServer.AddEventListener(raft.HeartbeatEventType, s.recordMetricEvent)
  107. return s
  108. }
  109. // Start the raft server
  110. func (s *PeerServer) ListenAndServe(snapshot bool, cluster []string) error {
  111. // LoadSnapshot
  112. if snapshot {
  113. err := s.raftServer.LoadSnapshot()
  114. if err == nil {
  115. log.Debugf("%s finished load snapshot", s.name)
  116. } else {
  117. log.Debug(err)
  118. }
  119. }
  120. s.raftServer.SetElectionTimeout(s.ElectionTimeout)
  121. s.raftServer.SetHeartbeatTimeout(s.HeartbeatTimeout)
  122. s.raftServer.Start()
  123. if s.raftServer.IsLogEmpty() {
  124. // start as a leader in a new cluster
  125. if len(cluster) == 0 {
  126. s.startAsLeader()
  127. } else {
  128. s.startAsFollower(cluster)
  129. }
  130. } else {
  131. // Rejoin the previous cluster
  132. cluster = s.registry.PeerURLs(s.raftServer.Leader(), s.name)
  133. for i := 0; i < len(cluster); i++ {
  134. u, err := url.Parse(cluster[i])
  135. if err != nil {
  136. log.Debug("rejoin cannot parse url: ", err)
  137. }
  138. cluster[i] = u.Host
  139. }
  140. ok := s.joinCluster(cluster)
  141. if !ok {
  142. log.Warn("the entire cluster is down! this peer will restart the cluster.")
  143. }
  144. log.Debugf("%s restart as a follower", s.name)
  145. }
  146. s.closeChan = make(chan bool)
  147. go s.monitorSync()
  148. go s.monitorTimeoutThreshold(s.closeChan)
  149. // open the snapshot
  150. if snapshot {
  151. go s.monitorSnapshot()
  152. }
  153. // start to response to raft requests
  154. return s.startTransport(s.tlsConf.Scheme, s.tlsConf.Server)
  155. }
  156. // Overridden version of net/http added so we can manage the listener.
  157. func (s *PeerServer) listenAndServe() error {
  158. addr := s.httpServer.Addr
  159. if addr == "" {
  160. addr = ":http"
  161. }
  162. l, e := net.Listen("tcp", addr)
  163. if e != nil {
  164. return e
  165. }
  166. s.listener = l
  167. return s.httpServer.Serve(l)
  168. }
  169. // Overridden version of net/http added so we can manage the listener.
  170. func (s *PeerServer) listenAndServeTLS(certFile, keyFile string) error {
  171. addr := s.httpServer.Addr
  172. if addr == "" {
  173. addr = ":https"
  174. }
  175. config := &tls.Config{}
  176. if s.httpServer.TLSConfig != nil {
  177. *config = *s.httpServer.TLSConfig
  178. }
  179. if config.NextProtos == nil {
  180. config.NextProtos = []string{"http/1.1"}
  181. }
  182. var err error
  183. config.Certificates = make([]tls.Certificate, 1)
  184. config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
  185. if err != nil {
  186. return err
  187. }
  188. conn, err := net.Listen("tcp", addr)
  189. if err != nil {
  190. return err
  191. }
  192. tlsListener := tls.NewListener(conn, config)
  193. s.listener = tlsListener
  194. return s.httpServer.Serve(tlsListener)
  195. }
  196. // Stops the server.
  197. func (s *PeerServer) Close() {
  198. if s.closeChan != nil {
  199. close(s.closeChan)
  200. s.closeChan = nil
  201. }
  202. if s.listener != nil {
  203. s.listener.Close()
  204. s.listener = nil
  205. }
  206. }
  207. // Retrieves the underlying Raft server.
  208. func (s *PeerServer) RaftServer() raft.Server {
  209. return s.raftServer
  210. }
  211. // Associates the client server with the peer server.
  212. func (s *PeerServer) SetServer(server *Server) {
  213. s.server = server
  214. }
  215. func (s *PeerServer) startAsLeader() {
  216. // leader need to join self as a peer
  217. for {
  218. _, err := s.raftServer.Do(NewJoinCommand(store.MinVersion(), store.MaxVersion(), s.raftServer.Name(), s.url, s.server.URL()))
  219. if err == nil {
  220. break
  221. }
  222. }
  223. log.Debugf("%s start as a leader", s.name)
  224. }
  225. func (s *PeerServer) startAsFollower(cluster []string) {
  226. // start as a follower in a existing cluster
  227. for i := 0; i < s.RetryTimes; i++ {
  228. ok := s.joinCluster(cluster)
  229. if ok {
  230. return
  231. }
  232. log.Warnf("cannot join to cluster via given peers, retry in %d seconds", retryInterval)
  233. time.Sleep(time.Second * retryInterval)
  234. }
  235. log.Fatalf("Cannot join the cluster via given peers after %x retries", s.RetryTimes)
  236. }
  237. // Start to listen and response raft command
  238. func (s *PeerServer) startTransport(scheme string, tlsConf tls.Config) error {
  239. log.Infof("raft server [name %s, listen on %s, advertised url %s]", s.name, s.bindAddr, s.url)
  240. router := mux.NewRouter()
  241. s.httpServer = &http.Server{
  242. Handler: router,
  243. TLSConfig: &tlsConf,
  244. Addr: s.bindAddr,
  245. }
  246. // internal commands
  247. router.HandleFunc("/name", s.NameHttpHandler)
  248. router.HandleFunc("/version", s.VersionHttpHandler)
  249. router.HandleFunc("/version/{version:[0-9]+}/check", s.VersionCheckHttpHandler)
  250. router.HandleFunc("/upgrade", s.UpgradeHttpHandler)
  251. router.HandleFunc("/join", s.JoinHttpHandler)
  252. router.HandleFunc("/remove/{name:.+}", s.RemoveHttpHandler)
  253. router.HandleFunc("/vote", s.VoteHttpHandler)
  254. router.HandleFunc("/log", s.GetLogHttpHandler)
  255. router.HandleFunc("/log/append", s.AppendEntriesHttpHandler)
  256. router.HandleFunc("/snapshot", s.SnapshotHttpHandler)
  257. router.HandleFunc("/snapshotRecovery", s.SnapshotRecoveryHttpHandler)
  258. router.HandleFunc("/etcdURL", s.EtcdURLHttpHandler)
  259. if scheme == "http" {
  260. return s.listenAndServe()
  261. } else {
  262. return s.listenAndServeTLS(s.tlsInfo.CertFile, s.tlsInfo.KeyFile)
  263. }
  264. }
  265. // getVersion fetches the peer version of a cluster.
  266. func getVersion(t *transporter, versionURL url.URL) (int, error) {
  267. resp, req, err := t.Get(versionURL.String())
  268. if err != nil {
  269. return 0, err
  270. }
  271. defer resp.Body.Close()
  272. t.CancelWhenTimeout(req)
  273. body, err := ioutil.ReadAll(resp.Body)
  274. if err != nil {
  275. return 0, err
  276. }
  277. // Parse version number.
  278. version, _ := strconv.Atoi(string(body))
  279. return version, nil
  280. }
  281. // Upgradable checks whether all peers in a cluster support an upgrade to the next store version.
  282. func (s *PeerServer) Upgradable() error {
  283. nextVersion := s.store.Version() + 1
  284. for _, peerURL := range s.registry.PeerURLs(s.raftServer.Leader(), s.name) {
  285. u, err := url.Parse(peerURL)
  286. if err != nil {
  287. return fmt.Errorf("PeerServer: Cannot parse URL: '%s' (%s)", peerURL, err)
  288. }
  289. t, _ := s.raftServer.Transporter().(*transporter)
  290. checkURL := (&url.URL{Host: u.Host, Scheme: s.tlsConf.Scheme, Path: fmt.Sprintf("/version/%d/check", nextVersion)}).String()
  291. resp, _, err := t.Get(checkURL)
  292. if err != nil {
  293. return fmt.Errorf("PeerServer: Cannot check version compatibility: %s", u.Host)
  294. }
  295. if resp.StatusCode != 200 {
  296. return fmt.Errorf("PeerServer: Version %d is not compatible with peer: %s", nextVersion, u.Host)
  297. }
  298. }
  299. return nil
  300. }
  301. func (s *PeerServer) joinCluster(cluster []string) bool {
  302. for _, peer := range cluster {
  303. if len(peer) == 0 {
  304. continue
  305. }
  306. err := s.joinByPeer(s.raftServer, peer, s.tlsConf.Scheme)
  307. if err == nil {
  308. log.Debugf("%s success join to the cluster via peer %s", s.name, peer)
  309. return true
  310. } else {
  311. if _, ok := err.(etcdErr.Error); ok {
  312. log.Fatal(err)
  313. }
  314. log.Debugf("cannot join to cluster via peer %s %s", peer, err)
  315. }
  316. }
  317. return false
  318. }
  319. // Send join requests to peer.
  320. func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string) error {
  321. var b bytes.Buffer
  322. // t must be ok
  323. t, _ := server.Transporter().(*transporter)
  324. // Our version must match the leaders version
  325. versionURL := url.URL{Host: peer, Scheme: scheme, Path: "/version"}
  326. version, err := getVersion(t, versionURL)
  327. if err != nil {
  328. return fmt.Errorf("Error during join version check: %v", err)
  329. }
  330. if version < store.MinVersion() || version > store.MaxVersion() {
  331. return fmt.Errorf("Unable to join: cluster version is %d; version compatibility is %d - %d", version, store.MinVersion(), store.MaxVersion())
  332. }
  333. json.NewEncoder(&b).Encode(NewJoinCommand(store.MinVersion(), store.MaxVersion(), server.Name(), s.url, s.server.URL()))
  334. joinURL := url.URL{Host: peer, Scheme: scheme, Path: "/join"}
  335. log.Debugf("Send Join Request to %s", joinURL.String())
  336. resp, req, err := t.Post(joinURL.String(), &b)
  337. for {
  338. if err != nil {
  339. return fmt.Errorf("Unable to join: %v", err)
  340. }
  341. if resp != nil {
  342. defer resp.Body.Close()
  343. t.CancelWhenTimeout(req)
  344. if resp.StatusCode == http.StatusOK {
  345. b, _ := ioutil.ReadAll(resp.Body)
  346. s.joinIndex, _ = binary.Uvarint(b)
  347. return nil
  348. }
  349. if resp.StatusCode == http.StatusTemporaryRedirect {
  350. address := resp.Header.Get("Location")
  351. log.Debugf("Send Join Request to %s", address)
  352. json.NewEncoder(&b).Encode(NewJoinCommand(store.MinVersion(), store.MaxVersion(), server.Name(), s.url, s.server.URL()))
  353. resp, req, err = t.Post(address, &b)
  354. } else if resp.StatusCode == http.StatusBadRequest {
  355. log.Debug("Reach max number peers in the cluster")
  356. decoder := json.NewDecoder(resp.Body)
  357. err := &etcdErr.Error{}
  358. decoder.Decode(err)
  359. return *err
  360. } else {
  361. return fmt.Errorf("Unable to join")
  362. }
  363. }
  364. }
  365. }
  366. func (s *PeerServer) Stats() []byte {
  367. s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.startTime).String()
  368. // TODO: register state listener to raft to change this field
  369. // rather than compare the state each time Stats() is called.
  370. if s.RaftServer().State() == raft.Leader {
  371. s.serverStats.LeaderInfo.Name = s.RaftServer().Name()
  372. }
  373. queue := s.serverStats.sendRateQueue
  374. s.serverStats.SendingPkgRate, s.serverStats.SendingBandwidthRate = queue.Rate()
  375. queue = s.serverStats.recvRateQueue
  376. s.serverStats.RecvingPkgRate, s.serverStats.RecvingBandwidthRate = queue.Rate()
  377. b, _ := json.Marshal(s.serverStats)
  378. return b
  379. }
  380. func (s *PeerServer) PeerStats() []byte {
  381. if s.raftServer.State() == raft.Leader {
  382. b, _ := json.Marshal(s.followersStats)
  383. return b
  384. }
  385. return nil
  386. }
  387. // raftEventLogger converts events from the Raft server into log messages.
  388. func (s *PeerServer) raftEventLogger(event raft.Event) {
  389. value := event.Value()
  390. prevValue := event.PrevValue()
  391. if value == nil {
  392. value = "<nil>"
  393. }
  394. if prevValue == nil {
  395. prevValue = "<nil>"
  396. }
  397. switch event.Type() {
  398. case raft.StateChangeEventType:
  399. log.Infof("%s: state changed from '%v' to '%v'.", s.name, prevValue, value)
  400. case raft.TermChangeEventType:
  401. log.Infof("%s: term #%v started.", s.name, value)
  402. case raft.LeaderChangeEventType:
  403. log.Infof("%s: leader changed from '%v' to '%v'.", s.name, prevValue, value)
  404. case raft.AddPeerEventType:
  405. log.Infof("%s: peer added: '%v'", s.name, value)
  406. case raft.RemovePeerEventType:
  407. log.Infof("%s: peer removed: '%v'", s.name, value)
  408. case raft.HeartbeatTimeoutEventType:
  409. var name = "<unknown>"
  410. if peer, ok := value.(*raft.Peer); ok {
  411. name = peer.Name
  412. }
  413. log.Infof("%s: warning: heartbeat timed out: '%v'", s.name, name)
  414. case raft.ElectionTimeoutThresholdEventType:
  415. select {
  416. case s.timeoutThresholdChan <- value:
  417. default:
  418. }
  419. }
  420. }
  421. func (s *PeerServer) recordMetricEvent(event raft.Event) {
  422. name := fmt.Sprintf("raft.event.%s", event.Type())
  423. value := event.Value().(time.Duration)
  424. (*s.metrics).Timer(name).Update(value)
  425. }
  426. func (s *PeerServer) monitorSnapshot() {
  427. for {
  428. time.Sleep(s.snapConf.checkingInterval)
  429. currentIndex := s.RaftServer().CommitIndex()
  430. count := currentIndex - s.snapConf.lastIndex
  431. if uint64(count) > s.snapConf.snapshotThr {
  432. s.raftServer.TakeSnapshot()
  433. s.snapConf.lastIndex = currentIndex
  434. }
  435. }
  436. }
  437. func (s *PeerServer) monitorSync() {
  438. ticker := time.Tick(time.Millisecond * 500)
  439. for {
  440. select {
  441. case now := <-ticker:
  442. if s.raftServer.State() == raft.Leader {
  443. s.raftServer.Do(s.store.CommandFactory().CreateSyncCommand(now))
  444. }
  445. }
  446. }
  447. }
  448. // monitorTimeoutThreshold groups timeout threshold events together and prints
  449. // them as a single log line.
  450. func (s *PeerServer) monitorTimeoutThreshold(closeChan chan bool) {
  451. for {
  452. select {
  453. case value := <-s.timeoutThresholdChan:
  454. log.Infof("%s: warning: heartbeat near election timeout: %v", s.name, value)
  455. case <-closeChan:
  456. return
  457. }
  458. time.Sleep(ThresholdMonitorTimeout)
  459. }
  460. }