raft.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "log"
  19. "sort"
  20. "sync"
  21. "time"
  22. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  23. "github.com/coreos/etcd/etcdserver/membership"
  24. "github.com/coreos/etcd/pkg/contention"
  25. "github.com/coreos/etcd/pkg/logutil"
  26. "github.com/coreos/etcd/pkg/pbutil"
  27. "github.com/coreos/etcd/pkg/types"
  28. "github.com/coreos/etcd/raft"
  29. "github.com/coreos/etcd/raft/raftpb"
  30. "github.com/coreos/etcd/rafthttp"
  31. "github.com/coreos/etcd/wal"
  32. "github.com/coreos/etcd/wal/walpb"
  33. "github.com/coreos/pkg/capnslog"
  34. "go.uber.org/zap"
  35. )
  36. const (
  37. // Number of entries for slow follower to catch-up after compacting
  38. // the raft storage entries.
  39. // We expect the follower has a millisecond level latency with the leader.
  40. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  41. // follower to catch up.
  42. numberOfCatchUpEntries = 5000
  43. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  44. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  45. maxSizePerMsg = 1 * 1024 * 1024
  46. // Never overflow the rafthttp buffer, which is 4096.
  47. // TODO: a better const?
  48. maxInflightMsgs = 4096 / 8
  49. )
  50. var (
  51. // protects raftStatus
  52. raftStatusMu sync.Mutex
  53. // indirection for expvar func interface
  54. // expvar panics when publishing duplicate name
  55. // expvar does not support remove a registered name
  56. // so only register a func that calls raftStatus
  57. // and change raftStatus as we need.
  58. raftStatus func() raft.Status
  59. )
  60. func init() {
  61. raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
  62. expvar.Publish("raft.status", expvar.Func(func() interface{} {
  63. raftStatusMu.Lock()
  64. defer raftStatusMu.Unlock()
  65. return raftStatus()
  66. }))
  67. }
  68. // apply contains entries, snapshot to be applied. Once
  69. // an apply is consumed, the entries will be persisted to
  70. // to raft storage concurrently; the application must read
  71. // raftDone before assuming the raft messages are stable.
  72. type apply struct {
  73. entries []raftpb.Entry
  74. snapshot raftpb.Snapshot
  75. // notifyc synchronizes etcd server applies with the raft node
  76. notifyc chan struct{}
  77. }
  78. type raftNode struct {
  79. lg *zap.Logger
  80. tickMu *sync.Mutex
  81. raftNodeConfig
  82. // a chan to send/receive snapshot
  83. msgSnapC chan raftpb.Message
  84. // a chan to send out apply
  85. applyc chan apply
  86. // a chan to send out readState
  87. readStateC chan raft.ReadState
  88. // utility
  89. ticker *time.Ticker
  90. // contention detectors for raft heartbeat message
  91. td *contention.TimeoutDetector
  92. stopped chan struct{}
  93. done chan struct{}
  94. }
  95. type raftNodeConfig struct {
  96. lg *zap.Logger
  97. // to check if msg receiver is removed from cluster
  98. isIDRemoved func(id uint64) bool
  99. raft.Node
  100. raftStorage *raft.MemoryStorage
  101. storage Storage
  102. heartbeat time.Duration // for logging
  103. // transport specifies the transport to send and receive msgs to members.
  104. // Sending messages MUST NOT block. It is okay to drop messages, since
  105. // clients should timeout and reissue their messages.
  106. // If transport is nil, server will panic.
  107. transport rafthttp.Transporter
  108. }
  109. func newRaftNode(cfg raftNodeConfig) *raftNode {
  110. r := &raftNode{
  111. lg: cfg.lg,
  112. tickMu: new(sync.Mutex),
  113. raftNodeConfig: cfg,
  114. // set up contention detectors for raft heartbeat message.
  115. // expect to send a heartbeat within 2 heartbeat intervals.
  116. td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
  117. readStateC: make(chan raft.ReadState, 1),
  118. msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
  119. applyc: make(chan apply),
  120. stopped: make(chan struct{}),
  121. done: make(chan struct{}),
  122. }
  123. if r.heartbeat == 0 {
  124. r.ticker = &time.Ticker{}
  125. } else {
  126. r.ticker = time.NewTicker(r.heartbeat)
  127. }
  128. return r
  129. }
  130. // raft.Node does not have locks in Raft package
  131. func (r *raftNode) tick() {
  132. r.tickMu.Lock()
  133. r.Tick()
  134. r.tickMu.Unlock()
  135. }
  136. // start prepares and starts raftNode in a new goroutine. It is no longer safe
  137. // to modify the fields after it has been started.
  138. func (r *raftNode) start(rh *raftReadyHandler) {
  139. internalTimeout := time.Second
  140. go func() {
  141. defer r.onStop()
  142. islead := false
  143. for {
  144. select {
  145. case <-r.ticker.C:
  146. r.tick()
  147. case rd := <-r.Ready():
  148. if rd.SoftState != nil {
  149. newLeader := rd.SoftState.Lead != raft.None && rh.getLead() != rd.SoftState.Lead
  150. if newLeader {
  151. leaderChanges.Inc()
  152. }
  153. if rd.SoftState.Lead == raft.None {
  154. hasLeader.Set(0)
  155. } else {
  156. hasLeader.Set(1)
  157. }
  158. rh.updateLead(rd.SoftState.Lead)
  159. islead = rd.RaftState == raft.StateLeader
  160. if islead {
  161. isLeader.Set(1)
  162. } else {
  163. isLeader.Set(0)
  164. }
  165. rh.updateLeadership(newLeader)
  166. r.td.Reset()
  167. }
  168. if len(rd.ReadStates) != 0 {
  169. select {
  170. case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
  171. case <-time.After(internalTimeout):
  172. if r.lg != nil {
  173. r.lg.Warn("timed out sending read state", zap.Duration("timeout", internalTimeout))
  174. } else {
  175. plog.Warningf("timed out sending read state")
  176. }
  177. case <-r.stopped:
  178. return
  179. }
  180. }
  181. notifyc := make(chan struct{}, 1)
  182. ap := apply{
  183. entries: rd.CommittedEntries,
  184. snapshot: rd.Snapshot,
  185. notifyc: notifyc,
  186. }
  187. updateCommittedIndex(&ap, rh)
  188. select {
  189. case r.applyc <- ap:
  190. case <-r.stopped:
  191. return
  192. }
  193. // the leader can write to its disk in parallel with replicating to the followers and them
  194. // writing to their disks.
  195. // For more details, check raft thesis 10.2.1
  196. if islead {
  197. // gofail: var raftBeforeLeaderSend struct{}
  198. r.transport.Send(r.processMessages(rd.Messages))
  199. }
  200. // gofail: var raftBeforeSave struct{}
  201. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  202. if r.lg != nil {
  203. r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err))
  204. } else {
  205. plog.Fatalf("raft save state and entries error: %v", err)
  206. }
  207. }
  208. if !raft.IsEmptyHardState(rd.HardState) {
  209. proposalsCommitted.Set(float64(rd.HardState.Commit))
  210. }
  211. // gofail: var raftAfterSave struct{}
  212. if !raft.IsEmptySnap(rd.Snapshot) {
  213. // gofail: var raftBeforeSaveSnap struct{}
  214. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  215. if r.lg != nil {
  216. r.lg.Fatal("failed to save Raft snapshot", zap.Error(err))
  217. } else {
  218. plog.Fatalf("raft save snapshot error: %v", err)
  219. }
  220. }
  221. // etcdserver now claim the snapshot has been persisted onto the disk
  222. notifyc <- struct{}{}
  223. // gofail: var raftAfterSaveSnap struct{}
  224. r.raftStorage.ApplySnapshot(rd.Snapshot)
  225. if r.lg != nil {
  226. r.lg.Info("applied incoming Raft snapshot", zap.Uint64("snapshot-index", rd.Snapshot.Metadata.Index))
  227. } else {
  228. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  229. }
  230. // gofail: var raftAfterApplySnap struct{}
  231. }
  232. r.raftStorage.Append(rd.Entries)
  233. if !islead {
  234. // finish processing incoming messages before we signal raftdone chan
  235. msgs := r.processMessages(rd.Messages)
  236. // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
  237. notifyc <- struct{}{}
  238. // Candidate or follower needs to wait for all pending configuration
  239. // changes to be applied before sending messages.
  240. // Otherwise we might incorrectly count votes (e.g. votes from removed members).
  241. // Also slow machine's follower raft-layer could proceed to become the leader
  242. // on its own single-node cluster, before apply-layer applies the config change.
  243. // We simply wait for ALL pending entries to be applied for now.
  244. // We might improve this later on if it causes unnecessary long blocking issues.
  245. waitApply := false
  246. for _, ent := range rd.CommittedEntries {
  247. if ent.Type == raftpb.EntryConfChange {
  248. waitApply = true
  249. break
  250. }
  251. }
  252. if waitApply {
  253. // blocks until 'applyAll' calls 'applyWait.Trigger'
  254. // to be in sync with scheduled config-change job
  255. // (assume notifyc has cap of 1)
  256. select {
  257. case notifyc <- struct{}{}:
  258. case <-r.stopped:
  259. return
  260. }
  261. }
  262. // gofail: var raftBeforeFollowerSend struct{}
  263. r.transport.Send(msgs)
  264. } else {
  265. // leader already processed 'MsgSnap' and signaled
  266. notifyc <- struct{}{}
  267. }
  268. r.Advance()
  269. case <-r.stopped:
  270. return
  271. }
  272. }
  273. }()
  274. }
  275. func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
  276. var ci uint64
  277. if len(ap.entries) != 0 {
  278. ci = ap.entries[len(ap.entries)-1].Index
  279. }
  280. if ap.snapshot.Metadata.Index > ci {
  281. ci = ap.snapshot.Metadata.Index
  282. }
  283. if ci != 0 {
  284. rh.updateCommittedIndex(ci)
  285. }
  286. }
  287. func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
  288. sentAppResp := false
  289. for i := len(ms) - 1; i >= 0; i-- {
  290. if r.isIDRemoved(ms[i].To) {
  291. ms[i].To = 0
  292. }
  293. if ms[i].Type == raftpb.MsgAppResp {
  294. if sentAppResp {
  295. ms[i].To = 0
  296. } else {
  297. sentAppResp = true
  298. }
  299. }
  300. if ms[i].Type == raftpb.MsgSnap {
  301. // There are two separate data store: the store for v2, and the KV for v3.
  302. // The msgSnap only contains the most recent snapshot of store without KV.
  303. // So we need to redirect the msgSnap to etcd server main loop for merging in the
  304. // current store snapshot and KV snapshot.
  305. select {
  306. case r.msgSnapC <- ms[i]:
  307. default:
  308. // drop msgSnap if the inflight chan if full.
  309. }
  310. ms[i].To = 0
  311. }
  312. if ms[i].Type == raftpb.MsgHeartbeat {
  313. ok, exceed := r.td.Observe(ms[i].To)
  314. if !ok {
  315. // TODO: limit request rate.
  316. if r.lg != nil {
  317. r.lg.Warn(
  318. "heartbeat took too long to send out; server is overloaded, likely from slow disk",
  319. zap.Duration("exceeded", exceed),
  320. zap.Duration("heartbeat-interval", r.heartbeat),
  321. )
  322. } else {
  323. plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
  324. plog.Warningf("server is likely overloaded")
  325. }
  326. }
  327. }
  328. }
  329. return ms
  330. }
  331. func (r *raftNode) apply() chan apply {
  332. return r.applyc
  333. }
  334. func (r *raftNode) stop() {
  335. r.stopped <- struct{}{}
  336. <-r.done
  337. }
  338. func (r *raftNode) onStop() {
  339. r.Stop()
  340. r.ticker.Stop()
  341. r.transport.Stop()
  342. if err := r.storage.Close(); err != nil {
  343. if r.lg != nil {
  344. r.lg.Panic("failed to close Raft storage", zap.Error(err))
  345. } else {
  346. plog.Panicf("raft close storage error: %v", err)
  347. }
  348. }
  349. close(r.done)
  350. }
  351. // for testing
  352. func (r *raftNode) pauseSending() {
  353. p := r.transport.(rafthttp.Pausable)
  354. p.Pause()
  355. }
  356. func (r *raftNode) resumeSending() {
  357. p := r.transport.(rafthttp.Pausable)
  358. p.Resume()
  359. }
  360. // advanceTicks advances ticks of Raft node.
  361. // This can be used for fast-forwarding election
  362. // ticks in multi data-center deployments, thus
  363. // speeding up election process.
  364. func (r *raftNode) advanceTicks(ticks int) {
  365. for i := 0; i < ticks; i++ {
  366. r.tick()
  367. }
  368. }
  369. func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  370. var err error
  371. member := cl.MemberByName(cfg.Name)
  372. metadata := pbutil.MustMarshal(
  373. &pb.Metadata{
  374. NodeID: uint64(member.ID),
  375. ClusterID: uint64(cl.ID()),
  376. },
  377. )
  378. if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil {
  379. if cfg.Logger != nil {
  380. cfg.Logger.Fatal("failed to create WAL", zap.Error(err))
  381. } else {
  382. plog.Fatalf("create wal error: %v", err)
  383. }
  384. }
  385. peers := make([]raft.Peer, len(ids))
  386. for i, id := range ids {
  387. var ctx []byte
  388. ctx, err = json.Marshal((*cl).Member(id))
  389. if err != nil {
  390. if cfg.Logger != nil {
  391. cfg.Logger.Panic("failed to marshal member", zap.Error(err))
  392. } else {
  393. plog.Panicf("marshal member should never fail: %v", err)
  394. }
  395. }
  396. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  397. }
  398. id = member.ID
  399. if cfg.Logger != nil {
  400. cfg.Logger.Info(
  401. "starting local member",
  402. zap.String("local-member-id", id.String()),
  403. zap.String("cluster-id", cl.ID().String()),
  404. )
  405. } else {
  406. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  407. }
  408. s = raft.NewMemoryStorage()
  409. c := &raft.Config{
  410. ID: uint64(id),
  411. ElectionTick: cfg.ElectionTicks,
  412. HeartbeatTick: 1,
  413. Storage: s,
  414. MaxSizePerMsg: maxSizePerMsg,
  415. MaxInflightMsgs: maxInflightMsgs,
  416. CheckQuorum: true,
  417. PreVote: cfg.PreVote,
  418. }
  419. if cfg.Logger != nil {
  420. // called after capnslog setting in "init" function
  421. c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
  422. if err != nil {
  423. log.Fatalf("cannot create raft logger %v", err)
  424. }
  425. }
  426. n = raft.StartNode(c, peers)
  427. raftStatusMu.Lock()
  428. raftStatus = n.Status
  429. raftStatusMu.Unlock()
  430. return id, n, s, w
  431. }
  432. func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  433. var walsnap walpb.Snapshot
  434. if snapshot != nil {
  435. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  436. }
  437. w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
  438. if cfg.Logger != nil {
  439. cfg.Logger.Info(
  440. "restarting local member",
  441. zap.String("cluster-id", cid.String()),
  442. zap.String("local-member-id", id.String()),
  443. zap.Uint64("commit-index", st.Commit),
  444. )
  445. } else {
  446. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  447. }
  448. cl := membership.NewCluster(cfg.Logger, "")
  449. cl.SetID(id, cid)
  450. s := raft.NewMemoryStorage()
  451. if snapshot != nil {
  452. s.ApplySnapshot(*snapshot)
  453. }
  454. s.SetHardState(st)
  455. s.Append(ents)
  456. c := &raft.Config{
  457. ID: uint64(id),
  458. ElectionTick: cfg.ElectionTicks,
  459. HeartbeatTick: 1,
  460. Storage: s,
  461. MaxSizePerMsg: maxSizePerMsg,
  462. MaxInflightMsgs: maxInflightMsgs,
  463. CheckQuorum: true,
  464. PreVote: cfg.PreVote,
  465. }
  466. if cfg.Logger != nil {
  467. // called after capnslog setting in "init" function
  468. var err error
  469. c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
  470. if err != nil {
  471. log.Fatalf("cannot create raft logger %v", err)
  472. }
  473. }
  474. n := raft.RestartNode(c)
  475. raftStatusMu.Lock()
  476. raftStatus = n.Status
  477. raftStatusMu.Unlock()
  478. return id, cl, n, s, w
  479. }
  480. func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  481. var walsnap walpb.Snapshot
  482. if snapshot != nil {
  483. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  484. }
  485. w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
  486. // discard the previously uncommitted entries
  487. for i, ent := range ents {
  488. if ent.Index > st.Commit {
  489. if cfg.Logger != nil {
  490. cfg.Logger.Info(
  491. "discarding uncommitted WAL entries",
  492. zap.Uint64("entry-index", ent.Index),
  493. zap.Uint64("commit-index-from-wal", st.Commit),
  494. zap.Int("number-of-discarded-entries", len(ents)-i),
  495. )
  496. } else {
  497. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  498. }
  499. ents = ents[:i]
  500. break
  501. }
  502. }
  503. // force append the configuration change entries
  504. toAppEnts := createConfigChangeEnts(
  505. cfg.Logger,
  506. getIDs(cfg.Logger, snapshot, ents),
  507. uint64(id),
  508. st.Term,
  509. st.Commit,
  510. )
  511. ents = append(ents, toAppEnts...)
  512. // force commit newly appended entries
  513. err := w.Save(raftpb.HardState{}, toAppEnts)
  514. if err != nil {
  515. if cfg.Logger != nil {
  516. cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err))
  517. } else {
  518. plog.Fatalf("%v", err)
  519. }
  520. }
  521. if len(ents) != 0 {
  522. st.Commit = ents[len(ents)-1].Index
  523. }
  524. if cfg.Logger != nil {
  525. cfg.Logger.Info(
  526. "forcing restart member",
  527. zap.String("cluster-id", cid.String()),
  528. zap.String("local-member-id", id.String()),
  529. zap.Uint64("commit-index", st.Commit),
  530. )
  531. } else {
  532. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  533. }
  534. cl := membership.NewCluster(cfg.Logger, "")
  535. cl.SetID(id, cid)
  536. s := raft.NewMemoryStorage()
  537. if snapshot != nil {
  538. s.ApplySnapshot(*snapshot)
  539. }
  540. s.SetHardState(st)
  541. s.Append(ents)
  542. c := &raft.Config{
  543. ID: uint64(id),
  544. ElectionTick: cfg.ElectionTicks,
  545. HeartbeatTick: 1,
  546. Storage: s,
  547. MaxSizePerMsg: maxSizePerMsg,
  548. MaxInflightMsgs: maxInflightMsgs,
  549. CheckQuorum: true,
  550. PreVote: cfg.PreVote,
  551. }
  552. if cfg.Logger != nil {
  553. // called after capnslog setting in "init" function
  554. c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
  555. if err != nil {
  556. log.Fatalf("cannot create raft logger %v", err)
  557. }
  558. }
  559. n := raft.RestartNode(c)
  560. raftStatus = n.Status
  561. return id, cl, n, s, w
  562. }
  563. // getIDs returns an ordered set of IDs included in the given snapshot and
  564. // the entries. The given snapshot/entries can contain two kinds of
  565. // ID-related entry:
  566. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  567. // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
  568. func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  569. ids := make(map[uint64]bool)
  570. if snap != nil {
  571. for _, id := range snap.Metadata.ConfState.Nodes {
  572. ids[id] = true
  573. }
  574. }
  575. for _, e := range ents {
  576. if e.Type != raftpb.EntryConfChange {
  577. continue
  578. }
  579. var cc raftpb.ConfChange
  580. pbutil.MustUnmarshal(&cc, e.Data)
  581. switch cc.Type {
  582. case raftpb.ConfChangeAddNode:
  583. ids[cc.NodeID] = true
  584. case raftpb.ConfChangeRemoveNode:
  585. delete(ids, cc.NodeID)
  586. case raftpb.ConfChangeUpdateNode:
  587. // do nothing
  588. default:
  589. if lg != nil {
  590. lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String()))
  591. } else {
  592. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  593. }
  594. }
  595. }
  596. sids := make(types.Uint64Slice, 0, len(ids))
  597. for id := range ids {
  598. sids = append(sids, id)
  599. }
  600. sort.Sort(sids)
  601. return []uint64(sids)
  602. }
  603. // createConfigChangeEnts creates a series of Raft entries (i.e.
  604. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  605. // `self` is _not_ removed, even if present in the set.
  606. // If `self` is not inside the given ids, it creates a Raft entry to add a
  607. // default member with the given `self`.
  608. func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  609. ents := make([]raftpb.Entry, 0)
  610. next := index + 1
  611. found := false
  612. for _, id := range ids {
  613. if id == self {
  614. found = true
  615. continue
  616. }
  617. cc := &raftpb.ConfChange{
  618. Type: raftpb.ConfChangeRemoveNode,
  619. NodeID: id,
  620. }
  621. e := raftpb.Entry{
  622. Type: raftpb.EntryConfChange,
  623. Data: pbutil.MustMarshal(cc),
  624. Term: term,
  625. Index: next,
  626. }
  627. ents = append(ents, e)
  628. next++
  629. }
  630. if !found {
  631. m := membership.Member{
  632. ID: types.ID(self),
  633. RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
  634. }
  635. ctx, err := json.Marshal(m)
  636. if err != nil {
  637. if lg != nil {
  638. lg.Panic("failed to marshal member", zap.Error(err))
  639. } else {
  640. plog.Panicf("marshal member should never fail: %v", err)
  641. }
  642. }
  643. cc := &raftpb.ConfChange{
  644. Type: raftpb.ConfChangeAddNode,
  645. NodeID: self,
  646. Context: ctx,
  647. }
  648. e := raftpb.Entry{
  649. Type: raftpb.EntryConfChange,
  650. Data: pbutil.MustMarshal(cc),
  651. Term: term,
  652. Index: next,
  653. }
  654. ents = append(ents, e)
  655. }
  656. return ents
  657. }