raft.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "fmt"
  19. "log"
  20. "sort"
  21. "sync"
  22. "time"
  23. "go.etcd.io/etcd/etcdserver/api/membership"
  24. "go.etcd.io/etcd/etcdserver/api/rafthttp"
  25. pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
  26. "go.etcd.io/etcd/pkg/contention"
  27. "go.etcd.io/etcd/pkg/logutil"
  28. "go.etcd.io/etcd/pkg/pbutil"
  29. "go.etcd.io/etcd/pkg/types"
  30. "go.etcd.io/etcd/raft"
  31. "go.etcd.io/etcd/raft/raftpb"
  32. "go.etcd.io/etcd/wal"
  33. "go.etcd.io/etcd/wal/walpb"
  34. "go.uber.org/zap"
  35. )
  36. const (
  37. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  38. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  39. maxSizePerMsg = 1 * 1024 * 1024
  40. // Never overflow the rafthttp buffer, which is 4096.
  41. // TODO: a better const?
  42. maxInflightMsgs = 4096 / 8
  43. )
  44. var (
  45. // protects raftStatus
  46. raftStatusMu sync.Mutex
  47. // indirection for expvar func interface
  48. // expvar panics when publishing duplicate name
  49. // expvar does not support remove a registered name
  50. // so only register a func that calls raftStatus
  51. // and change raftStatus as we need.
  52. raftStatus func() raft.Status
  53. )
  54. func init() {
  55. lcfg := logutil.DefaultZapLoggerConfig
  56. lg, err := logutil.NewRaftLogger(&lcfg)
  57. if err != nil {
  58. log.Fatalf("cannot create raft logger %v", err)
  59. }
  60. raft.SetLogger(lg)
  61. expvar.Publish("raft.status", expvar.Func(func() interface{} {
  62. raftStatusMu.Lock()
  63. defer raftStatusMu.Unlock()
  64. return raftStatus()
  65. }))
  66. }
  67. // apply contains entries, snapshot to be applied. Once
  68. // an apply is consumed, the entries will be persisted to
  69. // to raft storage concurrently; the application must read
  70. // raftDone before assuming the raft messages are stable.
  71. type apply struct {
  72. entries []raftpb.Entry
  73. snapshot raftpb.Snapshot
  74. // notifyc synchronizes etcd server applies with the raft node
  75. notifyc chan struct{}
  76. }
  77. type raftNode struct {
  78. lg *zap.Logger
  79. tickMu *sync.Mutex
  80. raftNodeConfig
  81. // a chan to send/receive snapshot
  82. msgSnapC chan raftpb.Message
  83. // a chan to send out apply
  84. applyc chan apply
  85. // a chan to send out readState
  86. readStateC chan raft.ReadState
  87. // utility
  88. ticker *time.Ticker
  89. // contention detectors for raft heartbeat message
  90. td *contention.TimeoutDetector
  91. stopped chan struct{}
  92. done chan struct{}
  93. }
  94. type raftNodeConfig struct {
  95. lg *zap.Logger
  96. // to check if msg receiver is removed from cluster
  97. isIDRemoved func(id uint64) bool
  98. raft.Node
  99. raftStorage *raft.MemoryStorage
  100. storage Storage
  101. heartbeat time.Duration // for logging
  102. // transport specifies the transport to send and receive msgs to members.
  103. // Sending messages MUST NOT block. It is okay to drop messages, since
  104. // clients should timeout and reissue their messages.
  105. // If transport is nil, server will panic.
  106. transport rafthttp.Transporter
  107. }
  108. func newRaftNode(cfg raftNodeConfig) *raftNode {
  109. r := &raftNode{
  110. lg: cfg.lg,
  111. tickMu: new(sync.Mutex),
  112. raftNodeConfig: cfg,
  113. // set up contention detectors for raft heartbeat message.
  114. // expect to send a heartbeat within 2 heartbeat intervals.
  115. td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
  116. readStateC: make(chan raft.ReadState, 1),
  117. msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
  118. applyc: make(chan apply),
  119. stopped: make(chan struct{}),
  120. done: make(chan struct{}),
  121. }
  122. if r.heartbeat == 0 {
  123. r.ticker = &time.Ticker{}
  124. } else {
  125. r.ticker = time.NewTicker(r.heartbeat)
  126. }
  127. return r
  128. }
  129. // raft.Node does not have locks in Raft package
  130. func (r *raftNode) tick() {
  131. r.tickMu.Lock()
  132. r.Tick()
  133. r.tickMu.Unlock()
  134. }
  135. // start prepares and starts raftNode in a new goroutine. It is no longer safe
  136. // to modify the fields after it has been started.
  137. func (r *raftNode) start(rh *raftReadyHandler) {
  138. internalTimeout := time.Second
  139. go func() {
  140. defer r.onStop()
  141. islead := false
  142. for {
  143. select {
  144. case <-r.ticker.C:
  145. r.tick()
  146. case rd := <-r.Ready():
  147. if rd.SoftState != nil {
  148. newLeader := rd.SoftState.Lead != raft.None && rh.getLead() != rd.SoftState.Lead
  149. if newLeader {
  150. leaderChanges.Inc()
  151. }
  152. if rd.SoftState.Lead == raft.None {
  153. hasLeader.Set(0)
  154. } else {
  155. hasLeader.Set(1)
  156. }
  157. rh.updateLead(rd.SoftState.Lead)
  158. islead = rd.RaftState == raft.StateLeader
  159. if islead {
  160. isLeader.Set(1)
  161. } else {
  162. isLeader.Set(0)
  163. }
  164. rh.updateLeadership(newLeader)
  165. r.td.Reset()
  166. }
  167. if len(rd.ReadStates) != 0 {
  168. select {
  169. case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
  170. case <-time.After(internalTimeout):
  171. if r.lg != nil {
  172. r.lg.Warn("timed out sending read state", zap.Duration("timeout", internalTimeout))
  173. } else {
  174. plog.Warningf("timed out sending read state")
  175. }
  176. case <-r.stopped:
  177. return
  178. }
  179. }
  180. notifyc := make(chan struct{}, 1)
  181. ap := apply{
  182. entries: rd.CommittedEntries,
  183. snapshot: rd.Snapshot,
  184. notifyc: notifyc,
  185. }
  186. updateCommittedIndex(&ap, rh)
  187. select {
  188. case r.applyc <- ap:
  189. case <-r.stopped:
  190. return
  191. }
  192. // the leader can write to its disk in parallel with replicating to the followers and them
  193. // writing to their disks.
  194. // For more details, check raft thesis 10.2.1
  195. if islead {
  196. // gofail: var raftBeforeLeaderSend struct{}
  197. r.transport.Send(r.processMessages(rd.Messages))
  198. }
  199. // gofail: var raftBeforeSave struct{}
  200. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  201. if r.lg != nil {
  202. r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err))
  203. } else {
  204. plog.Fatalf("raft save state and entries error: %v", err)
  205. }
  206. }
  207. if !raft.IsEmptyHardState(rd.HardState) {
  208. proposalsCommitted.Set(float64(rd.HardState.Commit))
  209. }
  210. // gofail: var raftAfterSave struct{}
  211. if !raft.IsEmptySnap(rd.Snapshot) {
  212. // gofail: var raftBeforeSaveSnap struct{}
  213. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  214. if r.lg != nil {
  215. r.lg.Fatal("failed to save Raft snapshot", zap.Error(err))
  216. } else {
  217. plog.Fatalf("raft save snapshot error: %v", err)
  218. }
  219. }
  220. // etcdserver now claim the snapshot has been persisted onto the disk
  221. notifyc <- struct{}{}
  222. // gofail: var raftAfterSaveSnap struct{}
  223. r.raftStorage.ApplySnapshot(rd.Snapshot)
  224. if r.lg != nil {
  225. r.lg.Info("applied incoming Raft snapshot", zap.Uint64("snapshot-index", rd.Snapshot.Metadata.Index))
  226. } else {
  227. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  228. }
  229. // gofail: var raftAfterApplySnap struct{}
  230. }
  231. r.raftStorage.Append(rd.Entries)
  232. if !islead {
  233. // finish processing incoming messages before we signal raftdone chan
  234. msgs := r.processMessages(rd.Messages)
  235. // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
  236. notifyc <- struct{}{}
  237. // Candidate or follower needs to wait for all pending configuration
  238. // changes to be applied before sending messages.
  239. // Otherwise we might incorrectly count votes (e.g. votes from removed members).
  240. // Also slow machine's follower raft-layer could proceed to become the leader
  241. // on its own single-node cluster, before apply-layer applies the config change.
  242. // We simply wait for ALL pending entries to be applied for now.
  243. // We might improve this later on if it causes unnecessary long blocking issues.
  244. waitApply := false
  245. for _, ent := range rd.CommittedEntries {
  246. if ent.Type == raftpb.EntryConfChange {
  247. waitApply = true
  248. break
  249. }
  250. }
  251. if waitApply {
  252. // blocks until 'applyAll' calls 'applyWait.Trigger'
  253. // to be in sync with scheduled config-change job
  254. // (assume notifyc has cap of 1)
  255. select {
  256. case notifyc <- struct{}{}:
  257. case <-r.stopped:
  258. return
  259. }
  260. }
  261. // gofail: var raftBeforeFollowerSend struct{}
  262. r.transport.Send(msgs)
  263. } else {
  264. // leader already processed 'MsgSnap' and signaled
  265. notifyc <- struct{}{}
  266. }
  267. r.Advance()
  268. case <-r.stopped:
  269. return
  270. }
  271. }
  272. }()
  273. }
  274. func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
  275. var ci uint64
  276. if len(ap.entries) != 0 {
  277. ci = ap.entries[len(ap.entries)-1].Index
  278. }
  279. if ap.snapshot.Metadata.Index > ci {
  280. ci = ap.snapshot.Metadata.Index
  281. }
  282. if ci != 0 {
  283. rh.updateCommittedIndex(ci)
  284. }
  285. }
  286. func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
  287. sentAppResp := false
  288. for i := len(ms) - 1; i >= 0; i-- {
  289. if r.isIDRemoved(ms[i].To) {
  290. ms[i].To = 0
  291. }
  292. if ms[i].Type == raftpb.MsgAppResp {
  293. if sentAppResp {
  294. ms[i].To = 0
  295. } else {
  296. sentAppResp = true
  297. }
  298. }
  299. if ms[i].Type == raftpb.MsgSnap {
  300. // There are two separate data store: the store for v2, and the KV for v3.
  301. // The msgSnap only contains the most recent snapshot of store without KV.
  302. // So we need to redirect the msgSnap to etcd server main loop for merging in the
  303. // current store snapshot and KV snapshot.
  304. select {
  305. case r.msgSnapC <- ms[i]:
  306. default:
  307. // drop msgSnap if the inflight chan if full.
  308. }
  309. ms[i].To = 0
  310. }
  311. if ms[i].Type == raftpb.MsgHeartbeat {
  312. ok, exceed := r.td.Observe(ms[i].To)
  313. if !ok {
  314. // TODO: limit request rate.
  315. if r.lg != nil {
  316. r.lg.Warn(
  317. "leader failed to send out heartbeat on time; took too long, leader is overloaded likely from slow disk",
  318. zap.String("to", fmt.Sprintf("%x", ms[i].To)),
  319. zap.Duration("heartbeat-interval", r.heartbeat),
  320. zap.Duration("expected-duration", 2*r.heartbeat),
  321. zap.Duration("exceeded-duration", exceed),
  322. )
  323. } else {
  324. plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v, to %x)", r.heartbeat, exceed, ms[i].To)
  325. plog.Warningf("server is likely overloaded")
  326. }
  327. heartbeatSendFailures.Inc()
  328. }
  329. }
  330. }
  331. return ms
  332. }
  333. func (r *raftNode) apply() chan apply {
  334. return r.applyc
  335. }
  336. func (r *raftNode) stop() {
  337. r.stopped <- struct{}{}
  338. <-r.done
  339. }
  340. func (r *raftNode) onStop() {
  341. r.Stop()
  342. r.ticker.Stop()
  343. r.transport.Stop()
  344. if err := r.storage.Close(); err != nil {
  345. if r.lg != nil {
  346. r.lg.Panic("failed to close Raft storage", zap.Error(err))
  347. } else {
  348. plog.Panicf("raft close storage error: %v", err)
  349. }
  350. }
  351. close(r.done)
  352. }
  353. // for testing
  354. func (r *raftNode) pauseSending() {
  355. p := r.transport.(rafthttp.Pausable)
  356. p.Pause()
  357. }
  358. func (r *raftNode) resumeSending() {
  359. p := r.transport.(rafthttp.Pausable)
  360. p.Resume()
  361. }
  362. // advanceTicks advances ticks of Raft node.
  363. // This can be used for fast-forwarding election
  364. // ticks in multi data-center deployments, thus
  365. // speeding up election process.
  366. func (r *raftNode) advanceTicks(ticks int) {
  367. for i := 0; i < ticks; i++ {
  368. r.tick()
  369. }
  370. }
  371. func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  372. var err error
  373. member := cl.MemberByName(cfg.Name)
  374. metadata := pbutil.MustMarshal(
  375. &pb.Metadata{
  376. NodeID: uint64(member.ID),
  377. ClusterID: uint64(cl.ID()),
  378. },
  379. )
  380. if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil {
  381. if cfg.Logger != nil {
  382. cfg.Logger.Panic("failed to create WAL", zap.Error(err))
  383. } else {
  384. plog.Panicf("create wal error: %v", err)
  385. }
  386. }
  387. peers := make([]raft.Peer, len(ids))
  388. for i, id := range ids {
  389. var ctx []byte
  390. ctx, err = json.Marshal((*cl).Member(id))
  391. if err != nil {
  392. if cfg.Logger != nil {
  393. cfg.Logger.Panic("failed to marshal member", zap.Error(err))
  394. } else {
  395. plog.Panicf("marshal member should never fail: %v", err)
  396. }
  397. }
  398. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  399. }
  400. id = member.ID
  401. if cfg.Logger != nil {
  402. cfg.Logger.Info(
  403. "starting local member",
  404. zap.String("local-member-id", id.String()),
  405. zap.String("cluster-id", cl.ID().String()),
  406. )
  407. } else {
  408. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  409. }
  410. s = raft.NewMemoryStorage()
  411. c := &raft.Config{
  412. ID: uint64(id),
  413. ElectionTick: cfg.ElectionTicks,
  414. HeartbeatTick: 1,
  415. Storage: s,
  416. MaxSizePerMsg: maxSizePerMsg,
  417. MaxInflightMsgs: maxInflightMsgs,
  418. CheckQuorum: true,
  419. PreVote: cfg.PreVote,
  420. }
  421. if cfg.Logger != nil {
  422. // called after capnslog setting in "init" function
  423. if cfg.LoggerConfig != nil {
  424. c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
  425. if err != nil {
  426. log.Fatalf("cannot create raft logger %v", err)
  427. }
  428. } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil {
  429. c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer)
  430. }
  431. }
  432. n = raft.StartNode(c, peers)
  433. raftStatusMu.Lock()
  434. raftStatus = n.Status
  435. raftStatusMu.Unlock()
  436. return id, n, s, w
  437. }
  438. func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  439. var walsnap walpb.Snapshot
  440. if snapshot != nil {
  441. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  442. }
  443. w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
  444. if cfg.Logger != nil {
  445. cfg.Logger.Info(
  446. "restarting local member",
  447. zap.String("cluster-id", cid.String()),
  448. zap.String("local-member-id", id.String()),
  449. zap.Uint64("commit-index", st.Commit),
  450. )
  451. } else {
  452. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  453. }
  454. cl := membership.NewCluster(cfg.Logger, "")
  455. cl.SetID(id, cid)
  456. s := raft.NewMemoryStorage()
  457. if snapshot != nil {
  458. s.ApplySnapshot(*snapshot)
  459. }
  460. s.SetHardState(st)
  461. s.Append(ents)
  462. c := &raft.Config{
  463. ID: uint64(id),
  464. ElectionTick: cfg.ElectionTicks,
  465. HeartbeatTick: 1,
  466. Storage: s,
  467. MaxSizePerMsg: maxSizePerMsg,
  468. MaxInflightMsgs: maxInflightMsgs,
  469. CheckQuorum: true,
  470. PreVote: cfg.PreVote,
  471. }
  472. if cfg.Logger != nil {
  473. // called after capnslog setting in "init" function
  474. var err error
  475. if cfg.LoggerConfig != nil {
  476. c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
  477. if err != nil {
  478. log.Fatalf("cannot create raft logger %v", err)
  479. }
  480. } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil {
  481. c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer)
  482. }
  483. }
  484. n := raft.RestartNode(c)
  485. raftStatusMu.Lock()
  486. raftStatus = n.Status
  487. raftStatusMu.Unlock()
  488. return id, cl, n, s, w
  489. }
  490. func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  491. var walsnap walpb.Snapshot
  492. if snapshot != nil {
  493. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  494. }
  495. w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
  496. // discard the previously uncommitted entries
  497. for i, ent := range ents {
  498. if ent.Index > st.Commit {
  499. if cfg.Logger != nil {
  500. cfg.Logger.Info(
  501. "discarding uncommitted WAL entries",
  502. zap.Uint64("entry-index", ent.Index),
  503. zap.Uint64("commit-index-from-wal", st.Commit),
  504. zap.Int("number-of-discarded-entries", len(ents)-i),
  505. )
  506. } else {
  507. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  508. }
  509. ents = ents[:i]
  510. break
  511. }
  512. }
  513. // force append the configuration change entries
  514. toAppEnts := createConfigChangeEnts(
  515. cfg.Logger,
  516. getIDs(cfg.Logger, snapshot, ents),
  517. uint64(id),
  518. st.Term,
  519. st.Commit,
  520. )
  521. ents = append(ents, toAppEnts...)
  522. // force commit newly appended entries
  523. err := w.Save(raftpb.HardState{}, toAppEnts)
  524. if err != nil {
  525. if cfg.Logger != nil {
  526. cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err))
  527. } else {
  528. plog.Fatalf("%v", err)
  529. }
  530. }
  531. if len(ents) != 0 {
  532. st.Commit = ents[len(ents)-1].Index
  533. }
  534. if cfg.Logger != nil {
  535. cfg.Logger.Info(
  536. "forcing restart member",
  537. zap.String("cluster-id", cid.String()),
  538. zap.String("local-member-id", id.String()),
  539. zap.Uint64("commit-index", st.Commit),
  540. )
  541. } else {
  542. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  543. }
  544. cl := membership.NewCluster(cfg.Logger, "")
  545. cl.SetID(id, cid)
  546. s := raft.NewMemoryStorage()
  547. if snapshot != nil {
  548. s.ApplySnapshot(*snapshot)
  549. }
  550. s.SetHardState(st)
  551. s.Append(ents)
  552. c := &raft.Config{
  553. ID: uint64(id),
  554. ElectionTick: cfg.ElectionTicks,
  555. HeartbeatTick: 1,
  556. Storage: s,
  557. MaxSizePerMsg: maxSizePerMsg,
  558. MaxInflightMsgs: maxInflightMsgs,
  559. CheckQuorum: true,
  560. PreVote: cfg.PreVote,
  561. }
  562. if cfg.Logger != nil {
  563. // called after capnslog setting in "init" function
  564. if cfg.LoggerConfig != nil {
  565. c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
  566. if err != nil {
  567. log.Fatalf("cannot create raft logger %v", err)
  568. }
  569. } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil {
  570. c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer)
  571. }
  572. }
  573. n := raft.RestartNode(c)
  574. raftStatus = n.Status
  575. return id, cl, n, s, w
  576. }
  577. // getIDs returns an ordered set of IDs included in the given snapshot and
  578. // the entries. The given snapshot/entries can contain two kinds of
  579. // ID-related entry:
  580. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  581. // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
  582. func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  583. ids := make(map[uint64]bool)
  584. if snap != nil {
  585. for _, id := range snap.Metadata.ConfState.Nodes {
  586. ids[id] = true
  587. }
  588. }
  589. for _, e := range ents {
  590. if e.Type != raftpb.EntryConfChange {
  591. continue
  592. }
  593. var cc raftpb.ConfChange
  594. pbutil.MustUnmarshal(&cc, e.Data)
  595. switch cc.Type {
  596. case raftpb.ConfChangeAddNode:
  597. ids[cc.NodeID] = true
  598. case raftpb.ConfChangeRemoveNode:
  599. delete(ids, cc.NodeID)
  600. case raftpb.ConfChangeUpdateNode:
  601. // do nothing
  602. default:
  603. if lg != nil {
  604. lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String()))
  605. } else {
  606. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  607. }
  608. }
  609. }
  610. sids := make(types.Uint64Slice, 0, len(ids))
  611. for id := range ids {
  612. sids = append(sids, id)
  613. }
  614. sort.Sort(sids)
  615. return []uint64(sids)
  616. }
  617. // createConfigChangeEnts creates a series of Raft entries (i.e.
  618. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  619. // `self` is _not_ removed, even if present in the set.
  620. // If `self` is not inside the given ids, it creates a Raft entry to add a
  621. // default member with the given `self`.
  622. func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  623. ents := make([]raftpb.Entry, 0)
  624. next := index + 1
  625. found := false
  626. for _, id := range ids {
  627. if id == self {
  628. found = true
  629. continue
  630. }
  631. cc := &raftpb.ConfChange{
  632. Type: raftpb.ConfChangeRemoveNode,
  633. NodeID: id,
  634. }
  635. e := raftpb.Entry{
  636. Type: raftpb.EntryConfChange,
  637. Data: pbutil.MustMarshal(cc),
  638. Term: term,
  639. Index: next,
  640. }
  641. ents = append(ents, e)
  642. next++
  643. }
  644. if !found {
  645. m := membership.Member{
  646. ID: types.ID(self),
  647. RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
  648. }
  649. ctx, err := json.Marshal(m)
  650. if err != nil {
  651. if lg != nil {
  652. lg.Panic("failed to marshal member", zap.Error(err))
  653. } else {
  654. plog.Panicf("marshal member should never fail: %v", err)
  655. }
  656. }
  657. cc := &raftpb.ConfChange{
  658. Type: raftpb.ConfChangeAddNode,
  659. NodeID: self,
  660. Context: ctx,
  661. }
  662. e := raftpb.Entry{
  663. Type: raftpb.EntryConfChange,
  664. Data: pbutil.MustMarshal(cc),
  665. Term: term,
  666. Index: next,
  667. }
  668. ents = append(ents, e)
  669. }
  670. return ents
  671. }