server.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. package etcdserver
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "log"
  6. "math/rand"
  7. "os"
  8. "strconv"
  9. "sync/atomic"
  10. "time"
  11. "github.com/coreos/etcd/Godeps/_workspace/src/code.google.com/p/go.net/context"
  12. "github.com/coreos/etcd/discovery"
  13. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  14. "github.com/coreos/etcd/etcdserver/stats"
  15. "github.com/coreos/etcd/pkg/pbutil"
  16. "github.com/coreos/etcd/raft"
  17. "github.com/coreos/etcd/raft/raftpb"
  18. "github.com/coreos/etcd/snap"
  19. "github.com/coreos/etcd/store"
  20. "github.com/coreos/etcd/wait"
  21. "github.com/coreos/etcd/wal"
  22. )
  23. const (
  24. // owner can make/remove files inside the directory
  25. privateDirMode = 0700
  26. defaultSyncTimeout = time.Second
  27. DefaultSnapCount = 10000
  28. // TODO: calculate based on heartbeat interval
  29. defaultPublishRetryInterval = 5 * time.Second
  30. )
  31. var (
  32. ErrUnknownMethod = errors.New("etcdserver: unknown method")
  33. ErrStopped = errors.New("etcdserver: server stopped")
  34. ErrIDRemoved = errors.New("etcdserver: ID removed")
  35. ErrIDExists = errors.New("etcdserver: ID exists")
  36. ErrIDNotFound = errors.New("etcdserver: ID not found")
  37. )
  38. func init() {
  39. rand.Seed(time.Now().UnixNano())
  40. }
  41. type sendFunc func(m []raftpb.Message)
  42. type Response struct {
  43. Event *store.Event
  44. Watcher store.Watcher
  45. err error
  46. }
  47. type Storage interface {
  48. // Save function saves ents and state to the underlying stable storage.
  49. // Save MUST block until st and ents are on stable storage.
  50. Save(st raftpb.HardState, ents []raftpb.Entry)
  51. // SaveSnap function saves snapshot to the underlying stable storage.
  52. SaveSnap(snap raftpb.Snapshot)
  53. // TODO: WAL should be able to control cut itself. After implement self-controled cut,
  54. // remove it in this interface.
  55. // Cut cuts out a new wal file for saving new state and entries.
  56. Cut() error
  57. }
  58. type Server interface {
  59. // Start performs any initialization of the Server necessary for it to
  60. // begin serving requests. It must be called before Do or Process.
  61. // Start must be non-blocking; any long-running server functionality
  62. // should be implemented in goroutines.
  63. Start()
  64. // Stop terminates the Server and performs any necessary finalization.
  65. // Do and Process cannot be called after Stop has been invoked.
  66. Stop()
  67. // Do takes a request and attempts to fulfil it, returning a Response.
  68. Do(ctx context.Context, r pb.Request) (Response, error)
  69. // Process takes a raft message and applies it to the server's raft state
  70. // machine, respecting any timeout of the given context.
  71. Process(ctx context.Context, m raftpb.Message) error
  72. // AddMember attempts to add a member into the cluster. It will return
  73. // ErrIDRemoved if member ID is removed from the cluster, or return
  74. // ErrIDExists if member ID exists in the cluster.
  75. AddMember(ctx context.Context, memb Member) error
  76. // RemoveMember attempts to remove a member from the cluster. It will
  77. // return ErrIDRemoved if member ID is removed from the cluster, or return
  78. // ErrIDNotFound if member ID is not in the cluster.
  79. RemoveMember(ctx context.Context, id uint64) error
  80. }
  81. type ServerStats interface {
  82. // SelfStats returns the statistics of this server
  83. SelfStats() *stats.ServerStats
  84. // LeaderStats returns the statistics of all followers in the cluster
  85. // if this server is leader. Otherwise, nil is returned.
  86. LeaderStats() *stats.LeaderStats
  87. }
  88. type StoreStats interface {
  89. // JSON returns statistics of the underlying Store used by the
  90. // EtcdServer, in JSON format
  91. JSON() []byte
  92. }
  93. type RaftTimer interface {
  94. Index() uint64
  95. Term() uint64
  96. }
  97. // EtcdServer is the production implementation of the Server interface
  98. type EtcdServer struct {
  99. w wait.Wait
  100. done chan struct{}
  101. id uint64
  102. attributes Attributes
  103. ClusterStore ClusterStore
  104. node raft.Node
  105. store store.Store
  106. stats *stats.ServerStats
  107. lstats *stats.LeaderStats
  108. // send specifies the send function for sending msgs to members. send
  109. // MUST NOT block. It is okay to drop messages, since clients should
  110. // timeout and reissue their messages. If send is nil, server will
  111. // panic.
  112. send sendFunc
  113. storage Storage
  114. ticker <-chan time.Time
  115. syncTicker <-chan time.Time
  116. snapCount uint64 // number of entries to trigger a snapshot
  117. // Cache of the latest raft index and raft term the server has seen
  118. raftIndex uint64
  119. raftTerm uint64
  120. }
  121. // NewServer creates a new EtcdServer from the supplied configuration. The
  122. // configuration is considered static for the lifetime of the EtcdServer.
  123. func NewServer(cfg *ServerConfig) *EtcdServer {
  124. if err := cfg.Verify(); err != nil {
  125. log.Fatalln(err)
  126. }
  127. if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
  128. log.Fatalf("etcdserver: cannot create snapshot directory: %v", err)
  129. }
  130. ss := snap.New(cfg.SnapDir())
  131. st := store.New()
  132. var w *wal.WAL
  133. var n raft.Node
  134. if !wal.Exist(cfg.WALDir()) {
  135. if !cfg.IsBootstrap() {
  136. log.Fatalf("etcdserver: initial cluster state unset and no wal or discovery URL found")
  137. }
  138. if cfg.ShouldDiscover() {
  139. d, err := discovery.New(cfg.DiscoveryURL, cfg.ID(), cfg.Cluster.String())
  140. if err != nil {
  141. log.Fatalf("etcdserver: cannot init discovery %v", err)
  142. }
  143. s, err := d.Discover()
  144. if err != nil {
  145. log.Fatalf("etcdserver: %v", err)
  146. }
  147. if err = cfg.Cluster.Set(s); err != nil {
  148. log.Fatalf("etcdserver: %v", err)
  149. }
  150. }
  151. n, w = startNode(cfg)
  152. } else {
  153. if cfg.ShouldDiscover() {
  154. log.Printf("etcdserver: warn: ignoring discovery: etcd has already been initialized and has a valid log in %q", cfg.WALDir())
  155. }
  156. var index uint64
  157. snapshot, err := ss.Load()
  158. if err != nil && err != snap.ErrNoSnapshot {
  159. log.Fatal(err)
  160. }
  161. if snapshot != nil {
  162. log.Printf("etcdserver: recovering from snapshot at index %d", snapshot.Index)
  163. st.Recovery(snapshot.Data)
  164. index = snapshot.Index
  165. }
  166. n, w = restartNode(cfg, index, snapshot)
  167. }
  168. cls := &clusterStore{Store: st}
  169. sstats := &stats.ServerStats{
  170. Name: cfg.Name,
  171. ID: strconv.FormatUint(cfg.ID(), 16),
  172. }
  173. lstats := stats.NewLeaderStats(strconv.FormatUint(cfg.ID(), 16))
  174. s := &EtcdServer{
  175. store: st,
  176. node: n,
  177. id: cfg.ID(),
  178. attributes: Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
  179. storage: struct {
  180. *wal.WAL
  181. *snap.Snapshotter
  182. }{w, ss},
  183. stats: sstats,
  184. lstats: lstats,
  185. send: Sender(cfg.Transport, cls, sstats, lstats),
  186. ticker: time.Tick(100 * time.Millisecond),
  187. syncTicker: time.Tick(500 * time.Millisecond),
  188. snapCount: cfg.SnapCount,
  189. ClusterStore: cls,
  190. }
  191. return s
  192. }
  193. // Start prepares and starts server in a new goroutine. It is no longer safe to
  194. // modify a server's fields after it has been sent to Start.
  195. // It also starts a goroutine to publish its server information.
  196. func (s *EtcdServer) Start() {
  197. s.start()
  198. go s.publish(defaultPublishRetryInterval)
  199. }
  200. // start prepares and starts server in a new goroutine. It is no longer safe to
  201. // modify a server's fields after it has been sent to Start.
  202. // This function is just used for testing.
  203. func (s *EtcdServer) start() {
  204. if s.snapCount == 0 {
  205. log.Printf("etcdserver: set snapshot count to default %d", DefaultSnapCount)
  206. s.snapCount = DefaultSnapCount
  207. }
  208. s.w = wait.New()
  209. s.done = make(chan struct{})
  210. s.stats.Initialize()
  211. // TODO: if this is an empty log, writes all peer infos
  212. // into the first entry
  213. go s.run()
  214. }
  215. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  216. return s.node.Step(ctx, m)
  217. }
  218. func (s *EtcdServer) run() {
  219. var syncC <-chan time.Time
  220. // snapi indicates the index of the last submitted snapshot request
  221. var snapi, appliedi uint64
  222. var nodes, removedNodes []uint64
  223. for {
  224. select {
  225. case <-s.ticker:
  226. s.node.Tick()
  227. case rd := <-s.node.Ready():
  228. if rd.SoftState != nil {
  229. nodes = rd.SoftState.Nodes
  230. removedNodes = rd.SoftState.RemovedNodes
  231. if rd.RaftState == raft.StateLeader {
  232. syncC = s.syncTicker
  233. } else {
  234. syncC = nil
  235. }
  236. if rd.SoftState.ShouldStop {
  237. s.Stop()
  238. return
  239. }
  240. }
  241. s.storage.Save(rd.HardState, rd.Entries)
  242. s.storage.SaveSnap(rd.Snapshot)
  243. s.send(rd.Messages)
  244. // TODO(bmizerany): do this in the background, but take
  245. // care to apply entries in a single goroutine, and not
  246. // race them.
  247. // TODO: apply configuration change into ClusterStore.
  248. if len(rd.CommittedEntries) != 0 {
  249. appliedi = s.apply(rd.CommittedEntries, nodes, removedNodes)
  250. }
  251. if rd.Snapshot.Index > snapi {
  252. snapi = rd.Snapshot.Index
  253. }
  254. // recover from snapshot if it is more updated than current applied
  255. if rd.Snapshot.Index > appliedi {
  256. if err := s.store.Recovery(rd.Snapshot.Data); err != nil {
  257. panic("TODO: this is bad, what do we do about it?")
  258. }
  259. appliedi = rd.Snapshot.Index
  260. }
  261. if appliedi-snapi > s.snapCount {
  262. s.snapshot(appliedi, nodes)
  263. snapi = appliedi
  264. }
  265. case <-syncC:
  266. s.sync(defaultSyncTimeout)
  267. case <-s.done:
  268. return
  269. }
  270. }
  271. }
  272. // Stop stops the server, and shuts down the running goroutine. Stop should be
  273. // called after a Start(s), otherwise it will block forever.
  274. func (s *EtcdServer) Stop() {
  275. s.node.Stop()
  276. close(s.done)
  277. }
  278. // Do interprets r and performs an operation on s.store according to r.Method
  279. // and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
  280. // Quorum == true, r will be sent through consensus before performing its
  281. // respective operation. Do will block until an action is performed or there is
  282. // an error.
  283. func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
  284. if r.ID == 0 {
  285. panic("r.ID cannot be 0")
  286. }
  287. if r.Method == "GET" && r.Quorum {
  288. r.Method = "QGET"
  289. }
  290. switch r.Method {
  291. case "POST", "PUT", "DELETE", "QGET":
  292. data, err := r.Marshal()
  293. if err != nil {
  294. return Response{}, err
  295. }
  296. ch := s.w.Register(r.ID)
  297. s.node.Propose(ctx, data)
  298. select {
  299. case x := <-ch:
  300. resp := x.(Response)
  301. return resp, resp.err
  302. case <-ctx.Done():
  303. s.w.Trigger(r.ID, nil) // GC wait
  304. return Response{}, ctx.Err()
  305. case <-s.done:
  306. return Response{}, ErrStopped
  307. }
  308. case "GET":
  309. switch {
  310. case r.Wait:
  311. wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
  312. if err != nil {
  313. return Response{}, err
  314. }
  315. return Response{Watcher: wc}, nil
  316. default:
  317. ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
  318. if err != nil {
  319. return Response{}, err
  320. }
  321. return Response{Event: ev}, nil
  322. }
  323. default:
  324. return Response{}, ErrUnknownMethod
  325. }
  326. }
  327. func (s *EtcdServer) SelfStats() *stats.ServerStats {
  328. s.stats.LeaderInfo.Uptime = time.Now().Sub(s.stats.LeaderInfo.StartTime).String()
  329. s.stats.SendingPkgRate, s.stats.SendingBandwidthRate = s.stats.SendRates()
  330. s.stats.RecvingPkgRate, s.stats.RecvingBandwidthRate = s.stats.RecvRates()
  331. return s.stats
  332. }
  333. func (s *EtcdServer) LeaderStats() *stats.LeaderStats {
  334. // TODO(jonboulle): need to lock access to lstats, set it to nil when not leader, ...
  335. return s.lstats
  336. }
  337. func (s *EtcdServer) StoreStats() []byte {
  338. return s.store.JsonStats()
  339. }
  340. func (s *EtcdServer) AddMember(ctx context.Context, memb Member) error {
  341. // TODO: move Member to protobuf type
  342. b, err := json.Marshal(memb)
  343. if err != nil {
  344. return err
  345. }
  346. cc := raftpb.ConfChange{
  347. ID: GenID(),
  348. Type: raftpb.ConfChangeAddNode,
  349. NodeID: memb.ID,
  350. Context: b,
  351. }
  352. return s.configure(ctx, cc)
  353. }
  354. func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
  355. cc := raftpb.ConfChange{
  356. ID: GenID(),
  357. Type: raftpb.ConfChangeRemoveNode,
  358. NodeID: id,
  359. }
  360. return s.configure(ctx, cc)
  361. }
  362. // Implement the RaftTimer interface
  363. func (s *EtcdServer) Index() uint64 {
  364. return atomic.LoadUint64(&s.raftIndex)
  365. }
  366. func (s *EtcdServer) Term() uint64 {
  367. return atomic.LoadUint64(&s.raftTerm)
  368. }
  369. // configure sends configuration change through consensus then performs it.
  370. // It will block until the change is performed or there is an error.
  371. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error {
  372. ch := s.w.Register(cc.ID)
  373. if err := s.node.ProposeConfChange(ctx, cc); err != nil {
  374. log.Printf("configure error: %v", err)
  375. s.w.Trigger(cc.ID, nil)
  376. return err
  377. }
  378. select {
  379. case x := <-ch:
  380. if err, ok := x.(error); ok {
  381. return err
  382. }
  383. if x != nil {
  384. log.Panicf("unexpected return type")
  385. }
  386. return nil
  387. case <-ctx.Done():
  388. s.w.Trigger(cc.ID, nil) // GC wait
  389. return ctx.Err()
  390. case <-s.done:
  391. return ErrStopped
  392. }
  393. }
  394. // sync proposes a SYNC request and is non-blocking.
  395. // This makes no guarantee that the request will be proposed or performed.
  396. // The request will be cancelled after the given timeout.
  397. func (s *EtcdServer) sync(timeout time.Duration) {
  398. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  399. req := pb.Request{
  400. Method: "SYNC",
  401. ID: GenID(),
  402. Time: time.Now().UnixNano(),
  403. }
  404. data := pbutil.MustMarshal(&req)
  405. // There is no promise that node has leader when do SYNC request,
  406. // so it uses goroutine to propose.
  407. go func() {
  408. s.node.Propose(ctx, data)
  409. cancel()
  410. }()
  411. }
  412. // publish registers server information into the cluster. The information
  413. // is the JSON representation of this server's member struct, updated with the
  414. // static clientURLs of the server.
  415. // The function keeps attempting to register until it succeeds,
  416. // or its server is stopped.
  417. func (s *EtcdServer) publish(retryInterval time.Duration) {
  418. b, err := json.Marshal(s.attributes)
  419. if err != nil {
  420. log.Printf("etcdserver: json marshal error: %v", err)
  421. return
  422. }
  423. req := pb.Request{
  424. ID: GenID(),
  425. Method: "PUT",
  426. Path: Member{ID: s.id}.storeKey() + attributesSuffix,
  427. Val: string(b),
  428. }
  429. for {
  430. ctx, cancel := context.WithTimeout(context.Background(), retryInterval)
  431. _, err := s.Do(ctx, req)
  432. cancel()
  433. switch err {
  434. case nil:
  435. log.Printf("etcdserver: published %+v to the cluster", s.attributes)
  436. return
  437. case ErrStopped:
  438. log.Printf("etcdserver: aborting publish because server is stopped")
  439. return
  440. default:
  441. log.Printf("etcdserver: publish error: %v", err)
  442. }
  443. }
  444. }
  445. func getExpirationTime(r *pb.Request) time.Time {
  446. var t time.Time
  447. if r.Expiration != 0 {
  448. t = time.Unix(0, r.Expiration)
  449. }
  450. return t
  451. }
  452. func (s *EtcdServer) apply(es []raftpb.Entry, nodes, removedNodes []uint64) uint64 {
  453. var applied uint64
  454. for i := range es {
  455. e := es[i]
  456. switch e.Type {
  457. case raftpb.EntryNormal:
  458. var r pb.Request
  459. pbutil.MustUnmarshal(&r, e.Data)
  460. s.w.Trigger(r.ID, s.applyRequest(r))
  461. case raftpb.EntryConfChange:
  462. var cc raftpb.ConfChange
  463. pbutil.MustUnmarshal(&cc, e.Data)
  464. s.w.Trigger(cc.ID, s.applyConfChange(cc, nodes, removedNodes))
  465. default:
  466. panic("unexpected entry type")
  467. }
  468. atomic.StoreUint64(&s.raftIndex, e.Index)
  469. atomic.StoreUint64(&s.raftTerm, e.Term)
  470. applied = e.Index
  471. }
  472. return applied
  473. }
  474. // applyRequest interprets r as a call to store.X and returns a Response interpreted
  475. // from store.Event
  476. func (s *EtcdServer) applyRequest(r pb.Request) Response {
  477. f := func(ev *store.Event, err error) Response {
  478. return Response{Event: ev, err: err}
  479. }
  480. expr := getExpirationTime(&r)
  481. switch r.Method {
  482. case "POST":
  483. return f(s.store.Create(r.Path, r.Dir, r.Val, true, expr))
  484. case "PUT":
  485. exists, existsSet := getBool(r.PrevExist)
  486. switch {
  487. case existsSet:
  488. if exists {
  489. return f(s.store.Update(r.Path, r.Val, expr))
  490. }
  491. return f(s.store.Create(r.Path, r.Dir, r.Val, false, expr))
  492. case r.PrevIndex > 0 || r.PrevValue != "":
  493. return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, expr))
  494. default:
  495. return f(s.store.Set(r.Path, r.Dir, r.Val, expr))
  496. }
  497. case "DELETE":
  498. switch {
  499. case r.PrevIndex > 0 || r.PrevValue != "":
  500. return f(s.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
  501. default:
  502. return f(s.store.Delete(r.Path, r.Dir, r.Recursive))
  503. }
  504. case "QGET":
  505. return f(s.store.Get(r.Path, r.Recursive, r.Sorted))
  506. case "SYNC":
  507. s.store.DeleteExpiredKeys(time.Unix(0, r.Time))
  508. return Response{}
  509. default:
  510. // This should never be reached, but just in case:
  511. return Response{err: ErrUnknownMethod}
  512. }
  513. }
  514. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, nodes, removedNodes []uint64) error {
  515. if err := checkConfChange(cc, nodes, removedNodes); err != nil {
  516. cc.NodeID = raft.None
  517. s.node.ApplyConfChange(cc)
  518. return err
  519. }
  520. s.node.ApplyConfChange(cc)
  521. switch cc.Type {
  522. case raftpb.ConfChangeAddNode:
  523. var m Member
  524. if err := json.Unmarshal(cc.Context, &m); err != nil {
  525. panic("unexpected unmarshal error")
  526. }
  527. if cc.NodeID != m.ID {
  528. panic("unexpected nodeID mismatch")
  529. }
  530. s.ClusterStore.Add(m)
  531. case raftpb.ConfChangeRemoveNode:
  532. s.ClusterStore.Remove(cc.NodeID)
  533. }
  534. return nil
  535. }
  536. func checkConfChange(cc raftpb.ConfChange, nodes, removedNodes []uint64) error {
  537. if containsUint64(removedNodes, cc.NodeID) {
  538. return ErrIDRemoved
  539. }
  540. switch cc.Type {
  541. case raftpb.ConfChangeAddNode:
  542. if containsUint64(nodes, cc.NodeID) {
  543. return ErrIDExists
  544. }
  545. case raftpb.ConfChangeRemoveNode:
  546. if !containsUint64(nodes, cc.NodeID) {
  547. return ErrIDNotFound
  548. }
  549. default:
  550. panic("unexpected ConfChange type")
  551. }
  552. return nil
  553. }
  554. // TODO: non-blocking snapshot
  555. func (s *EtcdServer) snapshot(snapi uint64, snapnodes []uint64) {
  556. d, err := s.store.Save()
  557. // TODO: current store will never fail to do a snapshot
  558. // what should we do if the store might fail?
  559. if err != nil {
  560. panic("TODO: this is bad, what do we do about it?")
  561. }
  562. s.node.Compact(snapi, snapnodes, d)
  563. s.storage.Cut()
  564. }
  565. func startNode(cfg *ServerConfig) (n raft.Node, w *wal.WAL) {
  566. var err error
  567. metadata := pbutil.MustMarshal(&pb.Metadata{NodeID: cfg.ID()})
  568. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  569. log.Fatal(err)
  570. }
  571. ids := cfg.Cluster.IDs()
  572. peers := make([]raft.Peer, len(ids))
  573. for i, id := range ids {
  574. ctx, err := json.Marshal((*cfg.Cluster)[id])
  575. if err != nil {
  576. log.Fatal(err)
  577. }
  578. peers[i] = raft.Peer{ID: id, Context: ctx}
  579. }
  580. log.Printf("etcdserver: start node %d", cfg.ID())
  581. n = raft.StartNode(cfg.ID(), peers, 10, 1)
  582. return
  583. }
  584. func restartNode(cfg *ServerConfig, index uint64, snapshot *raftpb.Snapshot) (n raft.Node, w *wal.WAL) {
  585. var err error
  586. // restart a node from previous wal
  587. if w, err = wal.OpenAtIndex(cfg.WALDir(), index); err != nil {
  588. log.Fatal(err)
  589. }
  590. wmetadata, st, ents, err := w.ReadAll()
  591. if err != nil {
  592. log.Fatal(err)
  593. }
  594. var metadata pb.Metadata
  595. pbutil.MustUnmarshal(&metadata, wmetadata)
  596. log.Printf("etcdserver: restart node %d at commit index %d", metadata.NodeID, st.Commit)
  597. n = raft.RestartNode(metadata.NodeID, 10, 1, snapshot, st, ents)
  598. return
  599. }
  600. // TODO: move the function to /id pkg maybe?
  601. // GenID generates a random id that is not equal to 0.
  602. func GenID() (n uint64) {
  603. for n == 0 {
  604. n = uint64(rand.Int63())
  605. }
  606. return
  607. }
  608. func getBool(v *bool) (vv bool, set bool) {
  609. if v == nil {
  610. return false, false
  611. }
  612. return *v, true
  613. }
  614. func containsUint64(a []uint64, x uint64) bool {
  615. for _, v := range a {
  616. if v == x {
  617. return true
  618. }
  619. }
  620. return false
  621. }