server.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. /*
  2. Copyright 2014 CoreOS, Inc.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package etcdserver
  14. import (
  15. "encoding/json"
  16. "errors"
  17. "log"
  18. "math/rand"
  19. "os"
  20. "path"
  21. "strconv"
  22. "sync/atomic"
  23. "time"
  24. "github.com/coreos/etcd/Godeps/_workspace/src/code.google.com/p/go.net/context"
  25. "github.com/coreos/etcd/discovery"
  26. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  27. "github.com/coreos/etcd/etcdserver/stats"
  28. "github.com/coreos/etcd/pkg/pbutil"
  29. "github.com/coreos/etcd/raft"
  30. "github.com/coreos/etcd/raft/raftpb"
  31. "github.com/coreos/etcd/snap"
  32. "github.com/coreos/etcd/store"
  33. "github.com/coreos/etcd/wait"
  34. "github.com/coreos/etcd/wal"
  35. )
  36. const (
  37. // owner can make/remove files inside the directory
  38. privateDirMode = 0700
  39. defaultSyncTimeout = time.Second
  40. DefaultSnapCount = 10000
  41. // TODO: calculate based on heartbeat interval
  42. defaultPublishRetryInterval = 5 * time.Second
  43. StoreAdminPrefix = "/0"
  44. StoreKeysPrefix = "/1"
  45. )
  46. var (
  47. ErrUnknownMethod = errors.New("etcdserver: unknown method")
  48. ErrStopped = errors.New("etcdserver: server stopped")
  49. ErrRemoved = errors.New("etcdserver: server removed")
  50. ErrIDRemoved = errors.New("etcdserver: ID removed")
  51. ErrIDExists = errors.New("etcdserver: ID exists")
  52. ErrIDNotFound = errors.New("etcdserver: ID not found")
  53. storeMembersPrefix = path.Join(StoreAdminPrefix, "members")
  54. storeRemovedMembersPrefix = path.Join(StoreAdminPrefix, "removed_members")
  55. )
  56. func init() {
  57. rand.Seed(time.Now().UnixNano())
  58. }
  59. type sendFunc func(m []raftpb.Message)
  60. type Response struct {
  61. Event *store.Event
  62. Watcher store.Watcher
  63. err error
  64. }
  65. type Storage interface {
  66. // Save function saves ents and state to the underlying stable storage.
  67. // Save MUST block until st and ents are on stable storage.
  68. Save(st raftpb.HardState, ents []raftpb.Entry)
  69. // SaveSnap function saves snapshot to the underlying stable storage.
  70. SaveSnap(snap raftpb.Snapshot)
  71. // TODO: WAL should be able to control cut itself. After implement self-controled cut,
  72. // remove it in this interface.
  73. // Cut cuts out a new wal file for saving new state and entries.
  74. Cut() error
  75. }
  76. type Server interface {
  77. // Start performs any initialization of the Server necessary for it to
  78. // begin serving requests. It must be called before Do or Process.
  79. // Start must be non-blocking; any long-running server functionality
  80. // should be implemented in goroutines.
  81. Start()
  82. // Stop terminates the Server and performs any necessary finalization.
  83. // Do and Process cannot be called after Stop has been invoked.
  84. Stop()
  85. // Do takes a request and attempts to fulfil it, returning a Response.
  86. Do(ctx context.Context, r pb.Request) (Response, error)
  87. // Process takes a raft message and applies it to the server's raft state
  88. // machine, respecting any timeout of the given context.
  89. Process(ctx context.Context, m raftpb.Message) error
  90. // AddMember attempts to add a member into the cluster. It will return
  91. // ErrIDRemoved if member ID is removed from the cluster, or return
  92. // ErrIDExists if member ID exists in the cluster.
  93. AddMember(ctx context.Context, memb Member) error
  94. // RemoveMember attempts to remove a member from the cluster. It will
  95. // return ErrIDRemoved if member ID is removed from the cluster, or return
  96. // ErrIDNotFound if member ID is not in the cluster.
  97. RemoveMember(ctx context.Context, id uint64) error
  98. }
  99. type Stats interface {
  100. // SelfStats returns the struct representing statistics of this server
  101. SelfStats() []byte
  102. // LeaderStats returns the statistics of all followers in the cluster
  103. // if this server is leader. Otherwise, nil is returned.
  104. LeaderStats() []byte
  105. // StoreStats returns statistics of the store backing this EtcdServer
  106. StoreStats() []byte
  107. // UpdateRecvApp updates the underlying statistics in response to a receiving an Append request
  108. UpdateRecvApp(from uint64, length int64)
  109. }
  110. type RaftTimer interface {
  111. Index() uint64
  112. Term() uint64
  113. }
  114. // EtcdServer is the production implementation of the Server interface
  115. type EtcdServer struct {
  116. w wait.Wait
  117. done chan struct{}
  118. stopped chan struct{}
  119. id uint64
  120. clusterID uint64
  121. attributes Attributes
  122. ClusterStore ClusterStore
  123. node raft.Node
  124. store store.Store
  125. stats *stats.ServerStats
  126. lstats *stats.LeaderStats
  127. // send specifies the send function for sending msgs to members. send
  128. // MUST NOT block. It is okay to drop messages, since clients should
  129. // timeout and reissue their messages. If send is nil, server will
  130. // panic.
  131. send sendFunc
  132. storage Storage
  133. Ticker <-chan time.Time
  134. SyncTicker <-chan time.Time
  135. snapCount uint64 // number of entries to trigger a snapshot
  136. // Cache of the latest raft index and raft term the server has seen
  137. raftIndex uint64
  138. raftTerm uint64
  139. }
  140. // NewServer creates a new EtcdServer from the supplied configuration. The
  141. // configuration is considered static for the lifetime of the EtcdServer.
  142. func NewServer(cfg *ServerConfig) *EtcdServer {
  143. if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
  144. log.Fatalf("etcdserver: cannot create snapshot directory: %v", err)
  145. }
  146. ss := snap.New(cfg.SnapDir())
  147. st := store.New()
  148. var w *wal.WAL
  149. var n raft.Node
  150. var id, cid uint64
  151. if !wal.Exist(cfg.WALDir()) {
  152. if err := cfg.VerifyBootstrapConfig(); err != nil {
  153. log.Fatalf("etcdserver: %v", err)
  154. }
  155. m := cfg.Cluster.FindName(cfg.Name)
  156. if cfg.ShouldDiscover() {
  157. d, err := discovery.New(cfg.DiscoveryURL, m.ID, cfg.Cluster.String())
  158. if err != nil {
  159. log.Fatalf("etcdserver: cannot init discovery %v", err)
  160. }
  161. s, err := d.Discover()
  162. if err != nil {
  163. log.Fatalf("etcdserver: %v", err)
  164. }
  165. if err = cfg.Cluster.SetMembersFromString(s); err != nil {
  166. log.Fatalf("etcdserver: %v", err)
  167. }
  168. }
  169. id, cid, n, w = startNode(cfg)
  170. } else {
  171. if cfg.ShouldDiscover() {
  172. log.Printf("etcdserver: warn: ignoring discovery: etcd has already been initialized and has a valid log in %q", cfg.WALDir())
  173. }
  174. var index uint64
  175. snapshot, err := ss.Load()
  176. if err != nil && err != snap.ErrNoSnapshot {
  177. log.Fatal(err)
  178. }
  179. if snapshot != nil {
  180. log.Printf("etcdserver: recovering from snapshot at index %d", snapshot.Index)
  181. st.Recovery(snapshot.Data)
  182. index = snapshot.Index
  183. }
  184. id, cid, n, w = restartNode(cfg, index, snapshot)
  185. }
  186. cls := &clusterStore{Store: st, id: cid}
  187. sstats := &stats.ServerStats{
  188. Name: cfg.Name,
  189. ID: idAsHex(id),
  190. }
  191. lstats := stats.NewLeaderStats(idAsHex(id))
  192. s := &EtcdServer{
  193. store: st,
  194. node: n,
  195. id: id,
  196. clusterID: cid,
  197. attributes: Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
  198. storage: struct {
  199. *wal.WAL
  200. *snap.Snapshotter
  201. }{w, ss},
  202. stats: sstats,
  203. lstats: lstats,
  204. send: Sender(cfg.Transport, cls, sstats, lstats),
  205. Ticker: time.Tick(100 * time.Millisecond),
  206. SyncTicker: time.Tick(500 * time.Millisecond),
  207. snapCount: cfg.SnapCount,
  208. ClusterStore: cls,
  209. }
  210. return s
  211. }
  212. // Start prepares and starts server in a new goroutine. It is no longer safe to
  213. // modify a server's fields after it has been sent to Start.
  214. // It also starts a goroutine to publish its server information.
  215. func (s *EtcdServer) Start() {
  216. s.start()
  217. go s.publish(defaultPublishRetryInterval)
  218. }
  219. // start prepares and starts server in a new goroutine. It is no longer safe to
  220. // modify a server's fields after it has been sent to Start.
  221. // This function is just used for testing.
  222. func (s *EtcdServer) start() {
  223. if s.snapCount == 0 {
  224. log.Printf("etcdserver: set snapshot count to default %d", DefaultSnapCount)
  225. s.snapCount = DefaultSnapCount
  226. }
  227. s.w = wait.New()
  228. s.done = make(chan struct{})
  229. s.stopped = make(chan struct{})
  230. s.stats.Initialize()
  231. // TODO: if this is an empty log, writes all peer infos
  232. // into the first entry
  233. go s.run()
  234. }
  235. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  236. if s.ClusterStore.IsRemoved(m.From) {
  237. return ErrRemoved
  238. }
  239. return s.node.Step(ctx, m)
  240. }
  241. func (s *EtcdServer) run() {
  242. var syncC <-chan time.Time
  243. // snapi indicates the index of the last submitted snapshot request
  244. var snapi, appliedi uint64
  245. var nodes []uint64
  246. for {
  247. select {
  248. case <-s.Ticker:
  249. s.node.Tick()
  250. case rd := <-s.node.Ready():
  251. if rd.SoftState != nil {
  252. nodes = rd.SoftState.Nodes
  253. if rd.RaftState == raft.StateLeader {
  254. syncC = s.SyncTicker
  255. } else {
  256. syncC = nil
  257. }
  258. }
  259. s.storage.Save(rd.HardState, rd.Entries)
  260. s.storage.SaveSnap(rd.Snapshot)
  261. s.send(rd.Messages)
  262. // TODO(bmizerany): do this in the background, but take
  263. // care to apply entries in a single goroutine, and not
  264. // race them.
  265. // TODO: apply configuration change into ClusterStore.
  266. if len(rd.CommittedEntries) != 0 {
  267. appliedi = s.apply(rd.CommittedEntries, nodes)
  268. }
  269. if rd.Snapshot.Index > snapi {
  270. snapi = rd.Snapshot.Index
  271. }
  272. // recover from snapshot if it is more updated than current applied
  273. if rd.Snapshot.Index > appliedi {
  274. if err := s.store.Recovery(rd.Snapshot.Data); err != nil {
  275. panic("TODO: this is bad, what do we do about it?")
  276. }
  277. appliedi = rd.Snapshot.Index
  278. }
  279. if appliedi-snapi > s.snapCount {
  280. s.snapshot(appliedi, nodes)
  281. snapi = appliedi
  282. }
  283. case <-syncC:
  284. s.sync(defaultSyncTimeout)
  285. case <-s.done:
  286. close(s.stopped)
  287. return
  288. }
  289. }
  290. }
  291. // Stop stops the server gracefully, and shuts down the running goroutine.
  292. // Stop should be called after a Start(s), otherwise it will block forever.
  293. func (s *EtcdServer) Stop() {
  294. s.node.Stop()
  295. close(s.done)
  296. <-s.stopped
  297. }
  298. // Do interprets r and performs an operation on s.store according to r.Method
  299. // and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
  300. // Quorum == true, r will be sent through consensus before performing its
  301. // respective operation. Do will block until an action is performed or there is
  302. // an error.
  303. func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
  304. if r.ID == 0 {
  305. panic("r.ID cannot be 0")
  306. }
  307. if r.Method == "GET" && r.Quorum {
  308. r.Method = "QGET"
  309. }
  310. switch r.Method {
  311. case "POST", "PUT", "DELETE", "QGET":
  312. data, err := r.Marshal()
  313. if err != nil {
  314. return Response{}, err
  315. }
  316. ch := s.w.Register(r.ID)
  317. s.node.Propose(ctx, data)
  318. select {
  319. case x := <-ch:
  320. resp := x.(Response)
  321. return resp, resp.err
  322. case <-ctx.Done():
  323. s.w.Trigger(r.ID, nil) // GC wait
  324. return Response{}, ctx.Err()
  325. case <-s.done:
  326. return Response{}, ErrStopped
  327. }
  328. case "GET":
  329. switch {
  330. case r.Wait:
  331. wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
  332. if err != nil {
  333. return Response{}, err
  334. }
  335. return Response{Watcher: wc}, nil
  336. default:
  337. ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
  338. if err != nil {
  339. return Response{}, err
  340. }
  341. return Response{Event: ev}, nil
  342. }
  343. default:
  344. return Response{}, ErrUnknownMethod
  345. }
  346. }
  347. func (s *EtcdServer) SelfStats() []byte {
  348. return s.stats.JSON()
  349. }
  350. func (s *EtcdServer) LeaderStats() []byte {
  351. // TODO(jonboulle): need to lock access to lstats, set it to nil when not leader, ...
  352. return s.lstats.JSON()
  353. }
  354. func (s *EtcdServer) StoreStats() []byte {
  355. return s.store.JsonStats()
  356. }
  357. func (s *EtcdServer) UpdateRecvApp(from uint64, length int64) {
  358. s.stats.RecvAppendReq(idAsHex(from), int(length))
  359. }
  360. func (s *EtcdServer) AddMember(ctx context.Context, memb Member) error {
  361. // TODO: move Member to protobuf type
  362. b, err := json.Marshal(memb)
  363. if err != nil {
  364. return err
  365. }
  366. cc := raftpb.ConfChange{
  367. ID: GenID(),
  368. Type: raftpb.ConfChangeAddNode,
  369. NodeID: memb.ID,
  370. Context: b,
  371. }
  372. return s.configure(ctx, cc)
  373. }
  374. func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
  375. cc := raftpb.ConfChange{
  376. ID: GenID(),
  377. Type: raftpb.ConfChangeRemoveNode,
  378. NodeID: id,
  379. }
  380. return s.configure(ctx, cc)
  381. }
  382. // Implement the RaftTimer interface
  383. func (s *EtcdServer) Index() uint64 {
  384. return atomic.LoadUint64(&s.raftIndex)
  385. }
  386. func (s *EtcdServer) Term() uint64 {
  387. return atomic.LoadUint64(&s.raftTerm)
  388. }
  389. // configure sends configuration change through consensus then performs it.
  390. // It will block until the change is performed or there is an error.
  391. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error {
  392. ch := s.w.Register(cc.ID)
  393. if err := s.node.ProposeConfChange(ctx, cc); err != nil {
  394. log.Printf("configure error: %v", err)
  395. s.w.Trigger(cc.ID, nil)
  396. return err
  397. }
  398. select {
  399. case x := <-ch:
  400. if err, ok := x.(error); ok {
  401. return err
  402. }
  403. if x != nil {
  404. log.Panicf("unexpected return type")
  405. }
  406. return nil
  407. case <-ctx.Done():
  408. s.w.Trigger(cc.ID, nil) // GC wait
  409. return ctx.Err()
  410. case <-s.done:
  411. return ErrStopped
  412. }
  413. }
  414. // sync proposes a SYNC request and is non-blocking.
  415. // This makes no guarantee that the request will be proposed or performed.
  416. // The request will be cancelled after the given timeout.
  417. func (s *EtcdServer) sync(timeout time.Duration) {
  418. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  419. req := pb.Request{
  420. Method: "SYNC",
  421. ID: GenID(),
  422. Time: time.Now().UnixNano(),
  423. }
  424. data := pbutil.MustMarshal(&req)
  425. // There is no promise that node has leader when do SYNC request,
  426. // so it uses goroutine to propose.
  427. go func() {
  428. s.node.Propose(ctx, data)
  429. cancel()
  430. }()
  431. }
  432. // publish registers server information into the cluster. The information
  433. // is the JSON representation of this server's member struct, updated with the
  434. // static clientURLs of the server.
  435. // The function keeps attempting to register until it succeeds,
  436. // or its server is stopped.
  437. func (s *EtcdServer) publish(retryInterval time.Duration) {
  438. b, err := json.Marshal(s.attributes)
  439. if err != nil {
  440. log.Printf("etcdserver: json marshal error: %v", err)
  441. return
  442. }
  443. req := pb.Request{
  444. ID: GenID(),
  445. Method: "PUT",
  446. Path: memberStoreKey(s.id) + attributesSuffix,
  447. Val: string(b),
  448. }
  449. for {
  450. ctx, cancel := context.WithTimeout(context.Background(), retryInterval)
  451. _, err := s.Do(ctx, req)
  452. cancel()
  453. switch err {
  454. case nil:
  455. log.Printf("etcdserver: published %+v to the cluster", s.attributes)
  456. return
  457. case ErrStopped:
  458. log.Printf("etcdserver: aborting publish because server is stopped")
  459. return
  460. default:
  461. log.Printf("etcdserver: publish error: %v", err)
  462. }
  463. }
  464. }
  465. func getExpirationTime(r *pb.Request) time.Time {
  466. var t time.Time
  467. if r.Expiration != 0 {
  468. t = time.Unix(0, r.Expiration)
  469. }
  470. return t
  471. }
  472. func (s *EtcdServer) apply(es []raftpb.Entry, nodes []uint64) uint64 {
  473. var applied uint64
  474. for i := range es {
  475. e := es[i]
  476. switch e.Type {
  477. case raftpb.EntryNormal:
  478. var r pb.Request
  479. pbutil.MustUnmarshal(&r, e.Data)
  480. s.w.Trigger(r.ID, s.applyRequest(r))
  481. case raftpb.EntryConfChange:
  482. var cc raftpb.ConfChange
  483. pbutil.MustUnmarshal(&cc, e.Data)
  484. s.w.Trigger(cc.ID, s.applyConfChange(cc, nodes))
  485. default:
  486. panic("unexpected entry type")
  487. }
  488. atomic.StoreUint64(&s.raftIndex, e.Index)
  489. atomic.StoreUint64(&s.raftTerm, e.Term)
  490. applied = e.Index
  491. }
  492. return applied
  493. }
  494. // applyRequest interprets r as a call to store.X and returns a Response interpreted
  495. // from store.Event
  496. func (s *EtcdServer) applyRequest(r pb.Request) Response {
  497. f := func(ev *store.Event, err error) Response {
  498. return Response{Event: ev, err: err}
  499. }
  500. expr := getExpirationTime(&r)
  501. switch r.Method {
  502. case "POST":
  503. return f(s.store.Create(r.Path, r.Dir, r.Val, true, expr))
  504. case "PUT":
  505. exists, existsSet := getBool(r.PrevExist)
  506. switch {
  507. case existsSet:
  508. if exists {
  509. return f(s.store.Update(r.Path, r.Val, expr))
  510. }
  511. return f(s.store.Create(r.Path, r.Dir, r.Val, false, expr))
  512. case r.PrevIndex > 0 || r.PrevValue != "":
  513. return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, expr))
  514. default:
  515. return f(s.store.Set(r.Path, r.Dir, r.Val, expr))
  516. }
  517. case "DELETE":
  518. switch {
  519. case r.PrevIndex > 0 || r.PrevValue != "":
  520. return f(s.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
  521. default:
  522. return f(s.store.Delete(r.Path, r.Dir, r.Recursive))
  523. }
  524. case "QGET":
  525. return f(s.store.Get(r.Path, r.Recursive, r.Sorted))
  526. case "SYNC":
  527. s.store.DeleteExpiredKeys(time.Unix(0, r.Time))
  528. return Response{}
  529. default:
  530. // This should never be reached, but just in case:
  531. return Response{err: ErrUnknownMethod}
  532. }
  533. }
  534. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, nodes []uint64) error {
  535. if err := s.checkConfChange(cc, nodes); err != nil {
  536. cc.NodeID = raft.None
  537. s.node.ApplyConfChange(cc)
  538. return err
  539. }
  540. s.node.ApplyConfChange(cc)
  541. switch cc.Type {
  542. case raftpb.ConfChangeAddNode:
  543. var m Member
  544. if err := json.Unmarshal(cc.Context, &m); err != nil {
  545. panic("unexpected unmarshal error")
  546. }
  547. if cc.NodeID != m.ID {
  548. panic("unexpected nodeID mismatch")
  549. }
  550. s.ClusterStore.Add(m)
  551. case raftpb.ConfChangeRemoveNode:
  552. s.ClusterStore.Remove(cc.NodeID)
  553. }
  554. return nil
  555. }
  556. func (s *EtcdServer) checkConfChange(cc raftpb.ConfChange, nodes []uint64) error {
  557. if s.ClusterStore.IsRemoved(cc.NodeID) {
  558. return ErrIDRemoved
  559. }
  560. switch cc.Type {
  561. case raftpb.ConfChangeAddNode:
  562. if containsUint64(nodes, cc.NodeID) {
  563. return ErrIDExists
  564. }
  565. case raftpb.ConfChangeRemoveNode:
  566. if !containsUint64(nodes, cc.NodeID) {
  567. return ErrIDNotFound
  568. }
  569. default:
  570. panic("unexpected ConfChange type")
  571. }
  572. return nil
  573. }
  574. // TODO: non-blocking snapshot
  575. func (s *EtcdServer) snapshot(snapi uint64, snapnodes []uint64) {
  576. d, err := s.store.Save()
  577. // TODO: current store will never fail to do a snapshot
  578. // what should we do if the store might fail?
  579. if err != nil {
  580. panic("TODO: this is bad, what do we do about it?")
  581. }
  582. s.node.Compact(snapi, snapnodes, d)
  583. s.storage.Cut()
  584. }
  585. func startNode(cfg *ServerConfig) (id, cid uint64, n raft.Node, w *wal.WAL) {
  586. var err error
  587. // TODO: remove the discoveryURL when it becomes part of the source for
  588. // generating nodeID.
  589. member := cfg.Cluster.FindName(cfg.Name)
  590. cfg.Cluster.GenID([]byte(cfg.DiscoveryURL))
  591. metadata := pbutil.MustMarshal(&pb.Metadata{NodeID: member.ID, ClusterID: cfg.Cluster.ID()})
  592. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  593. log.Fatal(err)
  594. }
  595. ids := cfg.Cluster.MemberIDs()
  596. peers := make([]raft.Peer, len(ids))
  597. for i, id := range ids {
  598. ctx, err := json.Marshal((*cfg.Cluster).FindID(id))
  599. if err != nil {
  600. log.Fatal(err)
  601. }
  602. peers[i] = raft.Peer{ID: id, Context: ctx}
  603. }
  604. id, cid = member.ID, cfg.Cluster.ID()
  605. log.Printf("etcdserver: start node %d in cluster %d", id, cid)
  606. n = raft.StartNode(member.ID, peers, 10, 1)
  607. return
  608. }
  609. func restartNode(cfg *ServerConfig, index uint64, snapshot *raftpb.Snapshot) (id, cid uint64, n raft.Node, w *wal.WAL) {
  610. var err error
  611. // restart a node from previous wal
  612. if w, err = wal.OpenAtIndex(cfg.WALDir(), index); err != nil {
  613. log.Fatal(err)
  614. }
  615. wmetadata, st, ents, err := w.ReadAll()
  616. if err != nil {
  617. log.Fatal(err)
  618. }
  619. var metadata pb.Metadata
  620. pbutil.MustUnmarshal(&metadata, wmetadata)
  621. id, cid = metadata.NodeID, metadata.ClusterID
  622. log.Printf("etcdserver: restart node %d in cluster %d at commit index %d", id, cid, st.Commit)
  623. n = raft.RestartNode(id, 10, 1, snapshot, st, ents)
  624. return
  625. }
  626. // TODO: move the function to /id pkg maybe?
  627. // GenID generates a random id that is not equal to 0.
  628. func GenID() (n uint64) {
  629. for n == 0 {
  630. n = uint64(rand.Int63())
  631. }
  632. return
  633. }
  634. func getBool(v *bool) (vv bool, set bool) {
  635. if v == nil {
  636. return false, false
  637. }
  638. return *v, true
  639. }
  640. func containsUint64(a []uint64, x uint64) bool {
  641. for _, v := range a {
  642. if v == x {
  643. return true
  644. }
  645. }
  646. return false
  647. }
  648. func idAsHex(id uint64) string {
  649. return strconv.FormatUint(id, 16)
  650. }