server.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716
  1. /*
  2. Copyright 2014 CoreOS, Inc.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package etcdserver
  14. import (
  15. "encoding/json"
  16. "errors"
  17. "log"
  18. "math/rand"
  19. "os"
  20. "path"
  21. "strconv"
  22. "sync/atomic"
  23. "time"
  24. "github.com/coreos/etcd/Godeps/_workspace/src/code.google.com/p/go.net/context"
  25. "github.com/coreos/etcd/discovery"
  26. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  27. "github.com/coreos/etcd/etcdserver/stats"
  28. "github.com/coreos/etcd/pkg/pbutil"
  29. "github.com/coreos/etcd/raft"
  30. "github.com/coreos/etcd/raft/raftpb"
  31. "github.com/coreos/etcd/snap"
  32. "github.com/coreos/etcd/store"
  33. "github.com/coreos/etcd/wait"
  34. "github.com/coreos/etcd/wal"
  35. )
  36. const (
  37. // owner can make/remove files inside the directory
  38. privateDirMode = 0700
  39. defaultSyncTimeout = time.Second
  40. DefaultSnapCount = 10000
  41. // TODO: calculate based on heartbeat interval
  42. defaultPublishRetryInterval = 5 * time.Second
  43. StoreAdminPrefix = "/0"
  44. StoreKeysPrefix = "/1"
  45. )
  46. var (
  47. ErrUnknownMethod = errors.New("etcdserver: unknown method")
  48. ErrStopped = errors.New("etcdserver: server stopped")
  49. ErrRemoved = errors.New("etcdserver: server removed")
  50. ErrIDRemoved = errors.New("etcdserver: ID removed")
  51. ErrIDExists = errors.New("etcdserver: ID exists")
  52. ErrIDNotFound = errors.New("etcdserver: ID not found")
  53. storeMembersPrefix = path.Join(StoreAdminPrefix, "members")
  54. storeRemovedMembersPrefix = path.Join(StoreAdminPrefix, "removed_members")
  55. )
  56. func init() {
  57. rand.Seed(time.Now().UnixNano())
  58. }
  59. type sendFunc func(m []raftpb.Message)
  60. type Response struct {
  61. Event *store.Event
  62. Watcher store.Watcher
  63. err error
  64. }
  65. type Storage interface {
  66. // Save function saves ents and state to the underlying stable storage.
  67. // Save MUST block until st and ents are on stable storage.
  68. Save(st raftpb.HardState, ents []raftpb.Entry)
  69. // SaveSnap function saves snapshot to the underlying stable storage.
  70. SaveSnap(snap raftpb.Snapshot)
  71. // TODO: WAL should be able to control cut itself. After implement self-controled cut,
  72. // remove it in this interface.
  73. // Cut cuts out a new wal file for saving new state and entries.
  74. Cut() error
  75. }
  76. type Server interface {
  77. // Start performs any initialization of the Server necessary for it to
  78. // begin serving requests. It must be called before Do or Process.
  79. // Start must be non-blocking; any long-running server functionality
  80. // should be implemented in goroutines.
  81. Start()
  82. // Stop terminates the Server and performs any necessary finalization.
  83. // Do and Process cannot be called after Stop has been invoked.
  84. Stop()
  85. // Do takes a request and attempts to fulfil it, returning a Response.
  86. Do(ctx context.Context, r pb.Request) (Response, error)
  87. // Process takes a raft message and applies it to the server's raft state
  88. // machine, respecting any timeout of the given context.
  89. Process(ctx context.Context, m raftpb.Message) error
  90. // AddMember attempts to add a member into the cluster. It will return
  91. // ErrIDRemoved if member ID is removed from the cluster, or return
  92. // ErrIDExists if member ID exists in the cluster.
  93. AddMember(ctx context.Context, memb Member) error
  94. // RemoveMember attempts to remove a member from the cluster. It will
  95. // return ErrIDRemoved if member ID is removed from the cluster, or return
  96. // ErrIDNotFound if member ID is not in the cluster.
  97. RemoveMember(ctx context.Context, id uint64) error
  98. }
  99. type Stats interface {
  100. // SelfStats returns the struct representing statistics of this server
  101. SelfStats() []byte
  102. // LeaderStats returns the statistics of all followers in the cluster
  103. // if this server is leader. Otherwise, nil is returned.
  104. LeaderStats() []byte
  105. // StoreStats returns statistics of the store backing this EtcdServer
  106. StoreStats() []byte
  107. // UpdateRecvApp updates the underlying statistics in response to a receiving an Append request
  108. UpdateRecvApp(from uint64, length int64)
  109. }
  110. type RaftTimer interface {
  111. Index() uint64
  112. Term() uint64
  113. }
  114. // EtcdServer is the production implementation of the Server interface
  115. type EtcdServer struct {
  116. w wait.Wait
  117. done chan struct{}
  118. stopped chan struct{}
  119. id uint64
  120. clusterID uint64
  121. attributes Attributes
  122. ClusterStore ClusterStore
  123. node raft.Node
  124. store store.Store
  125. stats *stats.ServerStats
  126. lstats *stats.LeaderStats
  127. // send specifies the send function for sending msgs to members. send
  128. // MUST NOT block. It is okay to drop messages, since clients should
  129. // timeout and reissue their messages. If send is nil, server will
  130. // panic.
  131. send sendFunc
  132. storage Storage
  133. Ticker <-chan time.Time
  134. SyncTicker <-chan time.Time
  135. snapCount uint64 // number of entries to trigger a snapshot
  136. // Cache of the latest raft index and raft term the server has seen
  137. raftIndex uint64
  138. raftTerm uint64
  139. }
  140. // NewServer creates a new EtcdServer from the supplied configuration. The
  141. // configuration is considered static for the lifetime of the EtcdServer.
  142. func NewServer(cfg *ServerConfig) *EtcdServer {
  143. if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
  144. log.Fatalf("etcdserver: cannot create snapshot directory: %v", err)
  145. }
  146. ss := snap.New(cfg.SnapDir())
  147. st := store.New()
  148. var w *wal.WAL
  149. var n raft.Node
  150. var id, cid uint64
  151. if !wal.Exist(cfg.WALDir()) {
  152. if err := cfg.VerifyBootstrapConfig(); err != nil {
  153. log.Fatalf("etcdserver: %v", err)
  154. }
  155. if cfg.ShouldDiscover() {
  156. d, err := discovery.New(cfg.DiscoveryURL, cfg.NodeID, cfg.Cluster.String())
  157. if err != nil {
  158. log.Fatalf("etcdserver: cannot init discovery %v", err)
  159. }
  160. s, err := d.Discover()
  161. if err != nil {
  162. log.Fatalf("etcdserver: %v", err)
  163. }
  164. if err = cfg.Cluster.SetMembersFromString(s); err != nil {
  165. log.Fatalf("etcdserver: %v", err)
  166. }
  167. }
  168. id, cid, n, w = startNode(cfg)
  169. } else {
  170. if cfg.ShouldDiscover() {
  171. log.Printf("etcdserver: warn: ignoring discovery: etcd has already been initialized and has a valid log in %q", cfg.WALDir())
  172. }
  173. var index uint64
  174. snapshot, err := ss.Load()
  175. if err != nil && err != snap.ErrNoSnapshot {
  176. log.Fatal(err)
  177. }
  178. if snapshot != nil {
  179. log.Printf("etcdserver: recovering from snapshot at index %d", snapshot.Index)
  180. st.Recovery(snapshot.Data)
  181. index = snapshot.Index
  182. }
  183. id, cid, n, w = restartNode(cfg, index, snapshot)
  184. }
  185. cls := &clusterStore{Store: st, id: cid}
  186. sstats := &stats.ServerStats{
  187. Name: cfg.Name,
  188. ID: idAsHex(cfg.NodeID),
  189. }
  190. lstats := stats.NewLeaderStats(idAsHex(cfg.NodeID))
  191. s := &EtcdServer{
  192. store: st,
  193. node: n,
  194. id: id,
  195. clusterID: cid,
  196. attributes: Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
  197. storage: struct {
  198. *wal.WAL
  199. *snap.Snapshotter
  200. }{w, ss},
  201. stats: sstats,
  202. lstats: lstats,
  203. send: Sender(cfg.Transport, cls, sstats, lstats),
  204. Ticker: time.Tick(100 * time.Millisecond),
  205. SyncTicker: time.Tick(500 * time.Millisecond),
  206. snapCount: cfg.SnapCount,
  207. ClusterStore: cls,
  208. }
  209. return s
  210. }
  211. // Start prepares and starts server in a new goroutine. It is no longer safe to
  212. // modify a server's fields after it has been sent to Start.
  213. // It also starts a goroutine to publish its server information.
  214. func (s *EtcdServer) Start() {
  215. s.start()
  216. go s.publish(defaultPublishRetryInterval)
  217. }
  218. // start prepares and starts server in a new goroutine. It is no longer safe to
  219. // modify a server's fields after it has been sent to Start.
  220. // This function is just used for testing.
  221. func (s *EtcdServer) start() {
  222. if s.snapCount == 0 {
  223. log.Printf("etcdserver: set snapshot count to default %d", DefaultSnapCount)
  224. s.snapCount = DefaultSnapCount
  225. }
  226. s.w = wait.New()
  227. s.done = make(chan struct{})
  228. s.stopped = make(chan struct{})
  229. s.stats.Initialize()
  230. // TODO: if this is an empty log, writes all peer infos
  231. // into the first entry
  232. go s.run()
  233. }
  234. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  235. if s.ClusterStore.IsRemoved(m.From) {
  236. return ErrRemoved
  237. }
  238. return s.node.Step(ctx, m)
  239. }
  240. func (s *EtcdServer) run() {
  241. var syncC <-chan time.Time
  242. // snapi indicates the index of the last submitted snapshot request
  243. var snapi, appliedi uint64
  244. var nodes []uint64
  245. for {
  246. select {
  247. case <-s.Ticker:
  248. s.node.Tick()
  249. case rd := <-s.node.Ready():
  250. if rd.SoftState != nil {
  251. nodes = rd.SoftState.Nodes
  252. if rd.RaftState == raft.StateLeader {
  253. syncC = s.SyncTicker
  254. } else {
  255. syncC = nil
  256. }
  257. }
  258. s.storage.Save(rd.HardState, rd.Entries)
  259. s.storage.SaveSnap(rd.Snapshot)
  260. s.send(rd.Messages)
  261. // TODO(bmizerany): do this in the background, but take
  262. // care to apply entries in a single goroutine, and not
  263. // race them.
  264. // TODO: apply configuration change into ClusterStore.
  265. if len(rd.CommittedEntries) != 0 {
  266. appliedi = s.apply(rd.CommittedEntries, nodes)
  267. }
  268. if rd.Snapshot.Index > snapi {
  269. snapi = rd.Snapshot.Index
  270. }
  271. // recover from snapshot if it is more updated than current applied
  272. if rd.Snapshot.Index > appliedi {
  273. if err := s.store.Recovery(rd.Snapshot.Data); err != nil {
  274. panic("TODO: this is bad, what do we do about it?")
  275. }
  276. appliedi = rd.Snapshot.Index
  277. }
  278. if appliedi-snapi > s.snapCount {
  279. s.snapshot(appliedi, nodes)
  280. snapi = appliedi
  281. }
  282. case <-syncC:
  283. s.sync(defaultSyncTimeout)
  284. case <-s.done:
  285. close(s.stopped)
  286. return
  287. }
  288. }
  289. }
  290. // Stop stops the server gracefully, and shuts down the running goroutine.
  291. // Stop should be called after a Start(s), otherwise it will block forever.
  292. func (s *EtcdServer) Stop() {
  293. s.node.Stop()
  294. close(s.done)
  295. <-s.stopped
  296. }
  297. // Do interprets r and performs an operation on s.store according to r.Method
  298. // and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
  299. // Quorum == true, r will be sent through consensus before performing its
  300. // respective operation. Do will block until an action is performed or there is
  301. // an error.
  302. func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
  303. if r.ID == 0 {
  304. panic("r.ID cannot be 0")
  305. }
  306. if r.Method == "GET" && r.Quorum {
  307. r.Method = "QGET"
  308. }
  309. switch r.Method {
  310. case "POST", "PUT", "DELETE", "QGET":
  311. data, err := r.Marshal()
  312. if err != nil {
  313. return Response{}, err
  314. }
  315. ch := s.w.Register(r.ID)
  316. s.node.Propose(ctx, data)
  317. select {
  318. case x := <-ch:
  319. resp := x.(Response)
  320. return resp, resp.err
  321. case <-ctx.Done():
  322. s.w.Trigger(r.ID, nil) // GC wait
  323. return Response{}, ctx.Err()
  324. case <-s.done:
  325. return Response{}, ErrStopped
  326. }
  327. case "GET":
  328. switch {
  329. case r.Wait:
  330. wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
  331. if err != nil {
  332. return Response{}, err
  333. }
  334. return Response{Watcher: wc}, nil
  335. default:
  336. ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
  337. if err != nil {
  338. return Response{}, err
  339. }
  340. return Response{Event: ev}, nil
  341. }
  342. default:
  343. return Response{}, ErrUnknownMethod
  344. }
  345. }
  346. func (s *EtcdServer) SelfStats() []byte {
  347. return s.stats.JSON()
  348. }
  349. func (s *EtcdServer) LeaderStats() []byte {
  350. // TODO(jonboulle): need to lock access to lstats, set it to nil when not leader, ...
  351. return s.lstats.JSON()
  352. }
  353. func (s *EtcdServer) StoreStats() []byte {
  354. return s.store.JsonStats()
  355. }
  356. func (s *EtcdServer) UpdateRecvApp(from uint64, length int64) {
  357. s.stats.RecvAppendReq(idAsHex(from), int(length))
  358. }
  359. func (s *EtcdServer) AddMember(ctx context.Context, memb Member) error {
  360. // TODO: move Member to protobuf type
  361. b, err := json.Marshal(memb)
  362. if err != nil {
  363. return err
  364. }
  365. cc := raftpb.ConfChange{
  366. ID: GenID(),
  367. Type: raftpb.ConfChangeAddNode,
  368. NodeID: memb.ID,
  369. Context: b,
  370. }
  371. return s.configure(ctx, cc)
  372. }
  373. func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
  374. cc := raftpb.ConfChange{
  375. ID: GenID(),
  376. Type: raftpb.ConfChangeRemoveNode,
  377. NodeID: id,
  378. }
  379. return s.configure(ctx, cc)
  380. }
  381. // Implement the RaftTimer interface
  382. func (s *EtcdServer) Index() uint64 {
  383. return atomic.LoadUint64(&s.raftIndex)
  384. }
  385. func (s *EtcdServer) Term() uint64 {
  386. return atomic.LoadUint64(&s.raftTerm)
  387. }
  388. // configure sends configuration change through consensus then performs it.
  389. // It will block until the change is performed or there is an error.
  390. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error {
  391. ch := s.w.Register(cc.ID)
  392. if err := s.node.ProposeConfChange(ctx, cc); err != nil {
  393. log.Printf("configure error: %v", err)
  394. s.w.Trigger(cc.ID, nil)
  395. return err
  396. }
  397. select {
  398. case x := <-ch:
  399. if err, ok := x.(error); ok {
  400. return err
  401. }
  402. if x != nil {
  403. log.Panicf("unexpected return type")
  404. }
  405. return nil
  406. case <-ctx.Done():
  407. s.w.Trigger(cc.ID, nil) // GC wait
  408. return ctx.Err()
  409. case <-s.done:
  410. return ErrStopped
  411. }
  412. }
  413. // sync proposes a SYNC request and is non-blocking.
  414. // This makes no guarantee that the request will be proposed or performed.
  415. // The request will be cancelled after the given timeout.
  416. func (s *EtcdServer) sync(timeout time.Duration) {
  417. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  418. req := pb.Request{
  419. Method: "SYNC",
  420. ID: GenID(),
  421. Time: time.Now().UnixNano(),
  422. }
  423. data := pbutil.MustMarshal(&req)
  424. // There is no promise that node has leader when do SYNC request,
  425. // so it uses goroutine to propose.
  426. go func() {
  427. s.node.Propose(ctx, data)
  428. cancel()
  429. }()
  430. }
  431. // publish registers server information into the cluster. The information
  432. // is the JSON representation of this server's member struct, updated with the
  433. // static clientURLs of the server.
  434. // The function keeps attempting to register until it succeeds,
  435. // or its server is stopped.
  436. func (s *EtcdServer) publish(retryInterval time.Duration) {
  437. b, err := json.Marshal(s.attributes)
  438. if err != nil {
  439. log.Printf("etcdserver: json marshal error: %v", err)
  440. return
  441. }
  442. req := pb.Request{
  443. ID: GenID(),
  444. Method: "PUT",
  445. Path: memberStoreKey(s.id) + attributesSuffix,
  446. Val: string(b),
  447. }
  448. for {
  449. ctx, cancel := context.WithTimeout(context.Background(), retryInterval)
  450. _, err := s.Do(ctx, req)
  451. cancel()
  452. switch err {
  453. case nil:
  454. log.Printf("etcdserver: published %+v to the cluster", s.attributes)
  455. return
  456. case ErrStopped:
  457. log.Printf("etcdserver: aborting publish because server is stopped")
  458. return
  459. default:
  460. log.Printf("etcdserver: publish error: %v", err)
  461. }
  462. }
  463. }
  464. func getExpirationTime(r *pb.Request) time.Time {
  465. var t time.Time
  466. if r.Expiration != 0 {
  467. t = time.Unix(0, r.Expiration)
  468. }
  469. return t
  470. }
  471. func (s *EtcdServer) apply(es []raftpb.Entry, nodes []uint64) uint64 {
  472. var applied uint64
  473. for i := range es {
  474. e := es[i]
  475. switch e.Type {
  476. case raftpb.EntryNormal:
  477. var r pb.Request
  478. pbutil.MustUnmarshal(&r, e.Data)
  479. s.w.Trigger(r.ID, s.applyRequest(r))
  480. case raftpb.EntryConfChange:
  481. var cc raftpb.ConfChange
  482. pbutil.MustUnmarshal(&cc, e.Data)
  483. s.w.Trigger(cc.ID, s.applyConfChange(cc, nodes))
  484. default:
  485. panic("unexpected entry type")
  486. }
  487. atomic.StoreUint64(&s.raftIndex, e.Index)
  488. atomic.StoreUint64(&s.raftTerm, e.Term)
  489. applied = e.Index
  490. }
  491. return applied
  492. }
  493. // applyRequest interprets r as a call to store.X and returns a Response interpreted
  494. // from store.Event
  495. func (s *EtcdServer) applyRequest(r pb.Request) Response {
  496. f := func(ev *store.Event, err error) Response {
  497. return Response{Event: ev, err: err}
  498. }
  499. expr := getExpirationTime(&r)
  500. switch r.Method {
  501. case "POST":
  502. return f(s.store.Create(r.Path, r.Dir, r.Val, true, expr))
  503. case "PUT":
  504. exists, existsSet := getBool(r.PrevExist)
  505. switch {
  506. case existsSet:
  507. if exists {
  508. return f(s.store.Update(r.Path, r.Val, expr))
  509. }
  510. return f(s.store.Create(r.Path, r.Dir, r.Val, false, expr))
  511. case r.PrevIndex > 0 || r.PrevValue != "":
  512. return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, expr))
  513. default:
  514. return f(s.store.Set(r.Path, r.Dir, r.Val, expr))
  515. }
  516. case "DELETE":
  517. switch {
  518. case r.PrevIndex > 0 || r.PrevValue != "":
  519. return f(s.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
  520. default:
  521. return f(s.store.Delete(r.Path, r.Dir, r.Recursive))
  522. }
  523. case "QGET":
  524. return f(s.store.Get(r.Path, r.Recursive, r.Sorted))
  525. case "SYNC":
  526. s.store.DeleteExpiredKeys(time.Unix(0, r.Time))
  527. return Response{}
  528. default:
  529. // This should never be reached, but just in case:
  530. return Response{err: ErrUnknownMethod}
  531. }
  532. }
  533. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, nodes []uint64) error {
  534. if err := s.checkConfChange(cc, nodes); err != nil {
  535. cc.NodeID = raft.None
  536. s.node.ApplyConfChange(cc)
  537. return err
  538. }
  539. s.node.ApplyConfChange(cc)
  540. switch cc.Type {
  541. case raftpb.ConfChangeAddNode:
  542. var m Member
  543. if err := json.Unmarshal(cc.Context, &m); err != nil {
  544. panic("unexpected unmarshal error")
  545. }
  546. if cc.NodeID != m.ID {
  547. panic("unexpected nodeID mismatch")
  548. }
  549. s.ClusterStore.Add(m)
  550. case raftpb.ConfChangeRemoveNode:
  551. s.ClusterStore.Remove(cc.NodeID)
  552. }
  553. return nil
  554. }
  555. func (s *EtcdServer) checkConfChange(cc raftpb.ConfChange, nodes []uint64) error {
  556. if s.ClusterStore.IsRemoved(cc.NodeID) {
  557. return ErrIDRemoved
  558. }
  559. switch cc.Type {
  560. case raftpb.ConfChangeAddNode:
  561. if containsUint64(nodes, cc.NodeID) {
  562. return ErrIDExists
  563. }
  564. case raftpb.ConfChangeRemoveNode:
  565. if !containsUint64(nodes, cc.NodeID) {
  566. return ErrIDNotFound
  567. }
  568. default:
  569. panic("unexpected ConfChange type")
  570. }
  571. return nil
  572. }
  573. // TODO: non-blocking snapshot
  574. func (s *EtcdServer) snapshot(snapi uint64, snapnodes []uint64) {
  575. d, err := s.store.Save()
  576. // TODO: current store will never fail to do a snapshot
  577. // what should we do if the store might fail?
  578. if err != nil {
  579. panic("TODO: this is bad, what do we do about it?")
  580. }
  581. s.node.Compact(snapi, snapnodes, d)
  582. s.storage.Cut()
  583. }
  584. func startNode(cfg *ServerConfig) (id, cid uint64, n raft.Node, w *wal.WAL) {
  585. var err error
  586. // TODO: remove the discoveryURL when it becomes part of the source for
  587. // generating nodeID.
  588. cfg.Cluster.GenID([]byte(cfg.DiscoveryURL))
  589. metadata := pbutil.MustMarshal(&pb.Metadata{NodeID: cfg.NodeID, ClusterID: cfg.Cluster.ID()})
  590. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  591. log.Fatal(err)
  592. }
  593. ids := cfg.Cluster.MemberIDs()
  594. peers := make([]raft.Peer, len(ids))
  595. for i, id := range ids {
  596. ctx, err := json.Marshal((*cfg.Cluster).FindID(id))
  597. if err != nil {
  598. log.Fatal(err)
  599. }
  600. peers[i] = raft.Peer{ID: id, Context: ctx}
  601. }
  602. id, cid = cfg.NodeID, cfg.Cluster.ID()
  603. log.Printf("etcdserver: start node %d in cluster %d", id, cid)
  604. n = raft.StartNode(cfg.NodeID, peers, 10, 1)
  605. return
  606. }
  607. func restartNode(cfg *ServerConfig, index uint64, snapshot *raftpb.Snapshot) (id, cid uint64, n raft.Node, w *wal.WAL) {
  608. var err error
  609. // restart a node from previous wal
  610. if w, err = wal.OpenAtIndex(cfg.WALDir(), index); err != nil {
  611. log.Fatal(err)
  612. }
  613. wmetadata, st, ents, err := w.ReadAll()
  614. if err != nil {
  615. log.Fatal(err)
  616. }
  617. var metadata pb.Metadata
  618. pbutil.MustUnmarshal(&metadata, wmetadata)
  619. id, cid = metadata.NodeID, metadata.ClusterID
  620. log.Printf("etcdserver: restart node %d in cluster %d at commit index %d", id, cid, st.Commit)
  621. n = raft.RestartNode(id, 10, 1, snapshot, st, ents)
  622. return
  623. }
  624. // TODO: move the function to /id pkg maybe?
  625. // GenID generates a random id that is not equal to 0.
  626. func GenID() (n uint64) {
  627. for n == 0 {
  628. n = uint64(rand.Int63())
  629. }
  630. return
  631. }
  632. func getBool(v *bool) (vv bool, set bool) {
  633. if v == nil {
  634. return false, false
  635. }
  636. return *v, true
  637. }
  638. func containsUint64(a []uint64, x uint64) bool {
  639. for _, v := range a {
  640. if v == x {
  641. return true
  642. }
  643. }
  644. return false
  645. }
  646. func idAsHex(id uint64) string {
  647. return strconv.FormatUint(id, 16)
  648. }