raft.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "log"
  19. "sort"
  20. "sync"
  21. "time"
  22. "go.etcd.io/etcd/etcdserver/api/membership"
  23. "go.etcd.io/etcd/etcdserver/api/rafthttp"
  24. pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
  25. "go.etcd.io/etcd/pkg/contention"
  26. "go.etcd.io/etcd/pkg/logutil"
  27. "go.etcd.io/etcd/pkg/pbutil"
  28. "go.etcd.io/etcd/pkg/types"
  29. "go.etcd.io/etcd/raft"
  30. "go.etcd.io/etcd/raft/raftpb"
  31. "go.etcd.io/etcd/wal"
  32. "go.etcd.io/etcd/wal/walpb"
  33. "go.uber.org/zap"
  34. )
  35. const (
  36. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  37. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  38. maxSizePerMsg = 1 * 1024 * 1024
  39. // Never overflow the rafthttp buffer, which is 4096.
  40. // TODO: a better const?
  41. maxInflightMsgs = 4096 / 8
  42. )
  43. var (
  44. // protects raftStatus
  45. raftStatusMu sync.Mutex
  46. // indirection for expvar func interface
  47. // expvar panics when publishing duplicate name
  48. // expvar does not support remove a registered name
  49. // so only register a func that calls raftStatus
  50. // and change raftStatus as we need.
  51. raftStatus func() raft.Status
  52. )
  53. func init() {
  54. lcfg := logutil.DefaultZapLoggerConfig
  55. lg, err := logutil.NewRaftLogger(&lcfg)
  56. if err != nil {
  57. log.Fatalf("cannot create raft logger %v", err)
  58. }
  59. raft.SetLogger(lg)
  60. expvar.Publish("raft.status", expvar.Func(func() interface{} {
  61. raftStatusMu.Lock()
  62. defer raftStatusMu.Unlock()
  63. return raftStatus()
  64. }))
  65. }
  66. // apply contains entries, snapshot to be applied. Once
  67. // an apply is consumed, the entries will be persisted to
  68. // to raft storage concurrently; the application must read
  69. // raftDone before assuming the raft messages are stable.
  70. type apply struct {
  71. entries []raftpb.Entry
  72. snapshot raftpb.Snapshot
  73. // notifyc synchronizes etcd server applies with the raft node
  74. notifyc chan struct{}
  75. }
  76. type raftNode struct {
  77. lg *zap.Logger
  78. tickMu *sync.Mutex
  79. raftNodeConfig
  80. // a chan to send/receive snapshot
  81. msgSnapC chan raftpb.Message
  82. // a chan to send out apply
  83. applyc chan apply
  84. // a chan to send out readState
  85. readStateC chan raft.ReadState
  86. // utility
  87. ticker *time.Ticker
  88. // contention detectors for raft heartbeat message
  89. td *contention.TimeoutDetector
  90. stopped chan struct{}
  91. done chan struct{}
  92. }
  93. type raftNodeConfig struct {
  94. lg *zap.Logger
  95. // to check if msg receiver is removed from cluster
  96. isIDRemoved func(id uint64) bool
  97. raft.Node
  98. raftStorage *raft.MemoryStorage
  99. storage Storage
  100. heartbeat time.Duration // for logging
  101. // transport specifies the transport to send and receive msgs to members.
  102. // Sending messages MUST NOT block. It is okay to drop messages, since
  103. // clients should timeout and reissue their messages.
  104. // If transport is nil, server will panic.
  105. transport rafthttp.Transporter
  106. }
  107. func newRaftNode(cfg raftNodeConfig) *raftNode {
  108. r := &raftNode{
  109. lg: cfg.lg,
  110. tickMu: new(sync.Mutex),
  111. raftNodeConfig: cfg,
  112. // set up contention detectors for raft heartbeat message.
  113. // expect to send a heartbeat within 2 heartbeat intervals.
  114. td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
  115. readStateC: make(chan raft.ReadState, 1),
  116. msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
  117. applyc: make(chan apply),
  118. stopped: make(chan struct{}),
  119. done: make(chan struct{}),
  120. }
  121. if r.heartbeat == 0 {
  122. r.ticker = &time.Ticker{}
  123. } else {
  124. r.ticker = time.NewTicker(r.heartbeat)
  125. }
  126. return r
  127. }
  128. // raft.Node does not have locks in Raft package
  129. func (r *raftNode) tick() {
  130. r.tickMu.Lock()
  131. r.Tick()
  132. r.tickMu.Unlock()
  133. }
  134. // start prepares and starts raftNode in a new goroutine. It is no longer safe
  135. // to modify the fields after it has been started.
  136. func (r *raftNode) start(rh *raftReadyHandler) {
  137. internalTimeout := time.Second
  138. go func() {
  139. defer r.onStop()
  140. islead := false
  141. for {
  142. select {
  143. case <-r.ticker.C:
  144. r.tick()
  145. case rd := <-r.Ready():
  146. if rd.SoftState != nil {
  147. newLeader := rd.SoftState.Lead != raft.None && rh.getLead() != rd.SoftState.Lead
  148. if newLeader {
  149. leaderChanges.Inc()
  150. }
  151. if rd.SoftState.Lead == raft.None {
  152. hasLeader.Set(0)
  153. } else {
  154. hasLeader.Set(1)
  155. }
  156. rh.updateLead(rd.SoftState.Lead)
  157. islead = rd.RaftState == raft.StateLeader
  158. if islead {
  159. isLeader.Set(1)
  160. } else {
  161. isLeader.Set(0)
  162. }
  163. rh.updateLeadership(newLeader)
  164. r.td.Reset()
  165. }
  166. if len(rd.ReadStates) != 0 {
  167. select {
  168. case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
  169. case <-time.After(internalTimeout):
  170. if r.lg != nil {
  171. r.lg.Warn("timed out sending read state", zap.Duration("timeout", internalTimeout))
  172. } else {
  173. plog.Warningf("timed out sending read state")
  174. }
  175. case <-r.stopped:
  176. return
  177. }
  178. }
  179. notifyc := make(chan struct{}, 1)
  180. ap := apply{
  181. entries: rd.CommittedEntries,
  182. snapshot: rd.Snapshot,
  183. notifyc: notifyc,
  184. }
  185. updateCommittedIndex(&ap, rh)
  186. select {
  187. case r.applyc <- ap:
  188. case <-r.stopped:
  189. return
  190. }
  191. // the leader can write to its disk in parallel with replicating to the followers and them
  192. // writing to their disks.
  193. // For more details, check raft thesis 10.2.1
  194. if islead {
  195. // gofail: var raftBeforeLeaderSend struct{}
  196. r.transport.Send(r.processMessages(rd.Messages))
  197. }
  198. // gofail: var raftBeforeSave struct{}
  199. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  200. if r.lg != nil {
  201. r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err))
  202. } else {
  203. plog.Fatalf("raft save state and entries error: %v", err)
  204. }
  205. }
  206. if !raft.IsEmptyHardState(rd.HardState) {
  207. proposalsCommitted.Set(float64(rd.HardState.Commit))
  208. }
  209. // gofail: var raftAfterSave struct{}
  210. if !raft.IsEmptySnap(rd.Snapshot) {
  211. // gofail: var raftBeforeSaveSnap struct{}
  212. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  213. if r.lg != nil {
  214. r.lg.Fatal("failed to save Raft snapshot", zap.Error(err))
  215. } else {
  216. plog.Fatalf("raft save snapshot error: %v", err)
  217. }
  218. }
  219. // etcdserver now claim the snapshot has been persisted onto the disk
  220. notifyc <- struct{}{}
  221. // gofail: var raftAfterSaveSnap struct{}
  222. r.raftStorage.ApplySnapshot(rd.Snapshot)
  223. if r.lg != nil {
  224. r.lg.Info("applied incoming Raft snapshot", zap.Uint64("snapshot-index", rd.Snapshot.Metadata.Index))
  225. } else {
  226. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  227. }
  228. // gofail: var raftAfterApplySnap struct{}
  229. }
  230. r.raftStorage.Append(rd.Entries)
  231. if !islead {
  232. // finish processing incoming messages before we signal raftdone chan
  233. msgs := r.processMessages(rd.Messages)
  234. // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
  235. notifyc <- struct{}{}
  236. // Candidate or follower needs to wait for all pending configuration
  237. // changes to be applied before sending messages.
  238. // Otherwise we might incorrectly count votes (e.g. votes from removed members).
  239. // Also slow machine's follower raft-layer could proceed to become the leader
  240. // on its own single-node cluster, before apply-layer applies the config change.
  241. // We simply wait for ALL pending entries to be applied for now.
  242. // We might improve this later on if it causes unnecessary long blocking issues.
  243. waitApply := false
  244. for _, ent := range rd.CommittedEntries {
  245. if ent.Type == raftpb.EntryConfChange {
  246. waitApply = true
  247. break
  248. }
  249. }
  250. if waitApply {
  251. // blocks until 'applyAll' calls 'applyWait.Trigger'
  252. // to be in sync with scheduled config-change job
  253. // (assume notifyc has cap of 1)
  254. select {
  255. case notifyc <- struct{}{}:
  256. case <-r.stopped:
  257. return
  258. }
  259. }
  260. // gofail: var raftBeforeFollowerSend struct{}
  261. r.transport.Send(msgs)
  262. } else {
  263. // leader already processed 'MsgSnap' and signaled
  264. notifyc <- struct{}{}
  265. }
  266. r.Advance()
  267. case <-r.stopped:
  268. return
  269. }
  270. }
  271. }()
  272. }
  273. func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
  274. var ci uint64
  275. if len(ap.entries) != 0 {
  276. ci = ap.entries[len(ap.entries)-1].Index
  277. }
  278. if ap.snapshot.Metadata.Index > ci {
  279. ci = ap.snapshot.Metadata.Index
  280. }
  281. if ci != 0 {
  282. rh.updateCommittedIndex(ci)
  283. }
  284. }
  285. func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
  286. sentAppResp := false
  287. for i := len(ms) - 1; i >= 0; i-- {
  288. if r.isIDRemoved(ms[i].To) {
  289. ms[i].To = 0
  290. }
  291. if ms[i].Type == raftpb.MsgAppResp {
  292. if sentAppResp {
  293. ms[i].To = 0
  294. } else {
  295. sentAppResp = true
  296. }
  297. }
  298. if ms[i].Type == raftpb.MsgSnap {
  299. // There are two separate data store: the store for v2, and the KV for v3.
  300. // The msgSnap only contains the most recent snapshot of store without KV.
  301. // So we need to redirect the msgSnap to etcd server main loop for merging in the
  302. // current store snapshot and KV snapshot.
  303. select {
  304. case r.msgSnapC <- ms[i]:
  305. default:
  306. // drop msgSnap if the inflight chan if full.
  307. }
  308. ms[i].To = 0
  309. }
  310. if ms[i].Type == raftpb.MsgHeartbeat {
  311. ok, exceed := r.td.Observe(ms[i].To)
  312. if !ok {
  313. // TODO: limit request rate.
  314. if r.lg != nil {
  315. r.lg.Warn(
  316. "leader failed to send out heartbeat on time; took too long, leader is overloaded likely from slow disk",
  317. zap.Duration("heartbeat-interval", r.heartbeat),
  318. zap.Duration("expected-duration", 2*r.heartbeat),
  319. zap.Duration("exceeded-duration", exceed),
  320. )
  321. } else {
  322. plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
  323. plog.Warningf("server is likely overloaded")
  324. }
  325. heartbeatSendFailures.Inc()
  326. }
  327. }
  328. }
  329. return ms
  330. }
  331. func (r *raftNode) apply() chan apply {
  332. return r.applyc
  333. }
  334. func (r *raftNode) stop() {
  335. r.stopped <- struct{}{}
  336. <-r.done
  337. }
  338. func (r *raftNode) onStop() {
  339. r.Stop()
  340. r.ticker.Stop()
  341. r.transport.Stop()
  342. if err := r.storage.Close(); err != nil {
  343. if r.lg != nil {
  344. r.lg.Panic("failed to close Raft storage", zap.Error(err))
  345. } else {
  346. plog.Panicf("raft close storage error: %v", err)
  347. }
  348. }
  349. close(r.done)
  350. }
  351. // for testing
  352. func (r *raftNode) pauseSending() {
  353. p := r.transport.(rafthttp.Pausable)
  354. p.Pause()
  355. }
  356. func (r *raftNode) resumeSending() {
  357. p := r.transport.(rafthttp.Pausable)
  358. p.Resume()
  359. }
  360. // advanceTicks advances ticks of Raft node.
  361. // This can be used for fast-forwarding election
  362. // ticks in multi data-center deployments, thus
  363. // speeding up election process.
  364. func (r *raftNode) advanceTicks(ticks int) {
  365. for i := 0; i < ticks; i++ {
  366. r.tick()
  367. }
  368. }
  369. func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  370. var err error
  371. member := cl.MemberByName(cfg.Name)
  372. metadata := pbutil.MustMarshal(
  373. &pb.Metadata{
  374. NodeID: uint64(member.ID),
  375. ClusterID: uint64(cl.ID()),
  376. },
  377. )
  378. if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil {
  379. if cfg.Logger != nil {
  380. cfg.Logger.Fatal("failed to create WAL", zap.Error(err))
  381. } else {
  382. plog.Fatalf("create wal error: %v", err)
  383. }
  384. }
  385. peers := make([]raft.Peer, len(ids))
  386. for i, id := range ids {
  387. var ctx []byte
  388. ctx, err = json.Marshal((*cl).Member(id))
  389. if err != nil {
  390. if cfg.Logger != nil {
  391. cfg.Logger.Panic("failed to marshal member", zap.Error(err))
  392. } else {
  393. plog.Panicf("marshal member should never fail: %v", err)
  394. }
  395. }
  396. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  397. }
  398. id = member.ID
  399. if cfg.Logger != nil {
  400. cfg.Logger.Info(
  401. "starting local member",
  402. zap.String("local-member-id", id.String()),
  403. zap.String("cluster-id", cl.ID().String()),
  404. )
  405. } else {
  406. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  407. }
  408. s = raft.NewMemoryStorage()
  409. c := &raft.Config{
  410. ID: uint64(id),
  411. ElectionTick: cfg.ElectionTicks,
  412. HeartbeatTick: 1,
  413. Storage: s,
  414. MaxSizePerMsg: maxSizePerMsg,
  415. MaxInflightMsgs: maxInflightMsgs,
  416. CheckQuorum: true,
  417. PreVote: cfg.PreVote,
  418. }
  419. if cfg.Logger != nil {
  420. // called after capnslog setting in "init" function
  421. if cfg.LoggerConfig != nil {
  422. c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
  423. if err != nil {
  424. log.Fatalf("cannot create raft logger %v", err)
  425. }
  426. } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil {
  427. c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer)
  428. }
  429. }
  430. n = raft.StartNode(c, peers)
  431. raftStatusMu.Lock()
  432. raftStatus = n.Status
  433. raftStatusMu.Unlock()
  434. return id, n, s, w
  435. }
  436. func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  437. var walsnap walpb.Snapshot
  438. if snapshot != nil {
  439. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  440. }
  441. w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
  442. if cfg.Logger != nil {
  443. cfg.Logger.Info(
  444. "restarting local member",
  445. zap.String("cluster-id", cid.String()),
  446. zap.String("local-member-id", id.String()),
  447. zap.Uint64("commit-index", st.Commit),
  448. )
  449. } else {
  450. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  451. }
  452. cl := membership.NewCluster(cfg.Logger, "")
  453. cl.SetID(id, cid)
  454. s := raft.NewMemoryStorage()
  455. if snapshot != nil {
  456. s.ApplySnapshot(*snapshot)
  457. }
  458. s.SetHardState(st)
  459. s.Append(ents)
  460. c := &raft.Config{
  461. ID: uint64(id),
  462. ElectionTick: cfg.ElectionTicks,
  463. HeartbeatTick: 1,
  464. Storage: s,
  465. MaxSizePerMsg: maxSizePerMsg,
  466. MaxInflightMsgs: maxInflightMsgs,
  467. CheckQuorum: true,
  468. PreVote: cfg.PreVote,
  469. }
  470. if cfg.Logger != nil {
  471. // called after capnslog setting in "init" function
  472. var err error
  473. if cfg.LoggerConfig != nil {
  474. c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
  475. if err != nil {
  476. log.Fatalf("cannot create raft logger %v", err)
  477. }
  478. } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil {
  479. c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer)
  480. }
  481. }
  482. n := raft.RestartNode(c)
  483. raftStatusMu.Lock()
  484. raftStatus = n.Status
  485. raftStatusMu.Unlock()
  486. return id, cl, n, s, w
  487. }
  488. func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  489. var walsnap walpb.Snapshot
  490. if snapshot != nil {
  491. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  492. }
  493. w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
  494. // discard the previously uncommitted entries
  495. for i, ent := range ents {
  496. if ent.Index > st.Commit {
  497. if cfg.Logger != nil {
  498. cfg.Logger.Info(
  499. "discarding uncommitted WAL entries",
  500. zap.Uint64("entry-index", ent.Index),
  501. zap.Uint64("commit-index-from-wal", st.Commit),
  502. zap.Int("number-of-discarded-entries", len(ents)-i),
  503. )
  504. } else {
  505. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  506. }
  507. ents = ents[:i]
  508. break
  509. }
  510. }
  511. // force append the configuration change entries
  512. toAppEnts := createConfigChangeEnts(
  513. cfg.Logger,
  514. getIDs(cfg.Logger, snapshot, ents),
  515. uint64(id),
  516. st.Term,
  517. st.Commit,
  518. )
  519. ents = append(ents, toAppEnts...)
  520. // force commit newly appended entries
  521. err := w.Save(raftpb.HardState{}, toAppEnts)
  522. if err != nil {
  523. if cfg.Logger != nil {
  524. cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err))
  525. } else {
  526. plog.Fatalf("%v", err)
  527. }
  528. }
  529. if len(ents) != 0 {
  530. st.Commit = ents[len(ents)-1].Index
  531. }
  532. if cfg.Logger != nil {
  533. cfg.Logger.Info(
  534. "forcing restart member",
  535. zap.String("cluster-id", cid.String()),
  536. zap.String("local-member-id", id.String()),
  537. zap.Uint64("commit-index", st.Commit),
  538. )
  539. } else {
  540. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  541. }
  542. cl := membership.NewCluster(cfg.Logger, "")
  543. cl.SetID(id, cid)
  544. s := raft.NewMemoryStorage()
  545. if snapshot != nil {
  546. s.ApplySnapshot(*snapshot)
  547. }
  548. s.SetHardState(st)
  549. s.Append(ents)
  550. c := &raft.Config{
  551. ID: uint64(id),
  552. ElectionTick: cfg.ElectionTicks,
  553. HeartbeatTick: 1,
  554. Storage: s,
  555. MaxSizePerMsg: maxSizePerMsg,
  556. MaxInflightMsgs: maxInflightMsgs,
  557. CheckQuorum: true,
  558. PreVote: cfg.PreVote,
  559. }
  560. if cfg.Logger != nil {
  561. // called after capnslog setting in "init" function
  562. if cfg.LoggerConfig != nil {
  563. c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
  564. if err != nil {
  565. log.Fatalf("cannot create raft logger %v", err)
  566. }
  567. } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil {
  568. c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer)
  569. }
  570. }
  571. n := raft.RestartNode(c)
  572. raftStatus = n.Status
  573. return id, cl, n, s, w
  574. }
  575. // getIDs returns an ordered set of IDs included in the given snapshot and
  576. // the entries. The given snapshot/entries can contain two kinds of
  577. // ID-related entry:
  578. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  579. // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
  580. func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  581. ids := make(map[uint64]bool)
  582. if snap != nil {
  583. for _, id := range snap.Metadata.ConfState.Nodes {
  584. ids[id] = true
  585. }
  586. }
  587. for _, e := range ents {
  588. if e.Type != raftpb.EntryConfChange {
  589. continue
  590. }
  591. var cc raftpb.ConfChange
  592. pbutil.MustUnmarshal(&cc, e.Data)
  593. switch cc.Type {
  594. case raftpb.ConfChangeAddNode:
  595. ids[cc.NodeID] = true
  596. case raftpb.ConfChangeRemoveNode:
  597. delete(ids, cc.NodeID)
  598. case raftpb.ConfChangeUpdateNode:
  599. // do nothing
  600. default:
  601. if lg != nil {
  602. lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String()))
  603. } else {
  604. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  605. }
  606. }
  607. }
  608. sids := make(types.Uint64Slice, 0, len(ids))
  609. for id := range ids {
  610. sids = append(sids, id)
  611. }
  612. sort.Sort(sids)
  613. return []uint64(sids)
  614. }
  615. // createConfigChangeEnts creates a series of Raft entries (i.e.
  616. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  617. // `self` is _not_ removed, even if present in the set.
  618. // If `self` is not inside the given ids, it creates a Raft entry to add a
  619. // default member with the given `self`.
  620. func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  621. ents := make([]raftpb.Entry, 0)
  622. next := index + 1
  623. found := false
  624. for _, id := range ids {
  625. if id == self {
  626. found = true
  627. continue
  628. }
  629. cc := &raftpb.ConfChange{
  630. Type: raftpb.ConfChangeRemoveNode,
  631. NodeID: id,
  632. }
  633. e := raftpb.Entry{
  634. Type: raftpb.EntryConfChange,
  635. Data: pbutil.MustMarshal(cc),
  636. Term: term,
  637. Index: next,
  638. }
  639. ents = append(ents, e)
  640. next++
  641. }
  642. if !found {
  643. m := membership.Member{
  644. ID: types.ID(self),
  645. RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
  646. }
  647. ctx, err := json.Marshal(m)
  648. if err != nil {
  649. if lg != nil {
  650. lg.Panic("failed to marshal member", zap.Error(err))
  651. } else {
  652. plog.Panicf("marshal member should never fail: %v", err)
  653. }
  654. }
  655. cc := &raftpb.ConfChange{
  656. Type: raftpb.ConfChangeAddNode,
  657. NodeID: self,
  658. Context: ctx,
  659. }
  660. e := raftpb.Entry{
  661. Type: raftpb.EntryConfChange,
  662. Data: pbutil.MustMarshal(cc),
  663. Term: term,
  664. Index: next,
  665. }
  666. ents = append(ents, e)
  667. }
  668. return ents
  669. }