node.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package raft
  15. import (
  16. "context"
  17. "errors"
  18. pb "go.etcd.io/etcd/raft/raftpb"
  19. )
  20. type SnapshotStatus int
  21. const (
  22. SnapshotFinish SnapshotStatus = 1
  23. SnapshotFailure SnapshotStatus = 2
  24. )
  25. var (
  26. emptyState = pb.HardState{}
  27. // ErrStopped is returned by methods on Nodes that have been stopped.
  28. ErrStopped = errors.New("raft: stopped")
  29. )
  30. // SoftState provides state that is useful for logging and debugging.
  31. // The state is volatile and does not need to be persisted to the WAL.
  32. type SoftState struct {
  33. Lead uint64 // must use atomic operations to access; keep 64-bit aligned.
  34. RaftState StateType
  35. }
  36. func (a *SoftState) equal(b *SoftState) bool {
  37. return a.Lead == b.Lead && a.RaftState == b.RaftState
  38. }
  39. // Ready encapsulates the entries and messages that are ready to read,
  40. // be saved to stable storage, committed or sent to other peers.
  41. // All fields in Ready are read-only.
  42. type Ready struct {
  43. // The current volatile state of a Node.
  44. // SoftState will be nil if there is no update.
  45. // It is not required to consume or store SoftState.
  46. *SoftState
  47. // The current state of a Node to be saved to stable storage BEFORE
  48. // Messages are sent.
  49. // HardState will be equal to empty state if there is no update.
  50. pb.HardState
  51. // ReadStates can be used for node to serve linearizable read requests locally
  52. // when its applied index is greater than the index in ReadState.
  53. // Note that the readState will be returned when raft receives msgReadIndex.
  54. // The returned is only valid for the request that requested to read.
  55. ReadStates []ReadState
  56. // Entries specifies entries to be saved to stable storage BEFORE
  57. // Messages are sent.
  58. Entries []pb.Entry
  59. // Snapshot specifies the snapshot to be saved to stable storage.
  60. Snapshot pb.Snapshot
  61. // CommittedEntries specifies entries to be committed to a
  62. // store/state-machine. These have previously been committed to stable
  63. // store.
  64. CommittedEntries []pb.Entry
  65. // Messages specifies outbound messages to be sent AFTER Entries are
  66. // committed to stable storage.
  67. // If it contains a MsgSnap message, the application MUST report back to raft
  68. // when the snapshot has been received or has failed by calling ReportSnapshot.
  69. Messages []pb.Message
  70. // MustSync indicates whether the HardState and Entries must be synchronously
  71. // written to disk or if an asynchronous write is permissible.
  72. MustSync bool
  73. }
  74. func isHardStateEqual(a, b pb.HardState) bool {
  75. return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
  76. }
  77. // IsEmptyHardState returns true if the given HardState is empty.
  78. func IsEmptyHardState(st pb.HardState) bool {
  79. return isHardStateEqual(st, emptyState)
  80. }
  81. // IsEmptySnap returns true if the given Snapshot is empty.
  82. func IsEmptySnap(sp pb.Snapshot) bool {
  83. return sp.Metadata.Index == 0
  84. }
  85. func (rd Ready) containsUpdates() bool {
  86. return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
  87. !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
  88. len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
  89. }
  90. // appliedCursor extracts from the Ready the highest index the client has
  91. // applied (once the Ready is confirmed via Advance). If no information is
  92. // contained in the Ready, returns zero.
  93. func (rd Ready) appliedCursor() uint64 {
  94. if n := len(rd.CommittedEntries); n > 0 {
  95. return rd.CommittedEntries[n-1].Index
  96. }
  97. if index := rd.Snapshot.Metadata.Index; index > 0 {
  98. return index
  99. }
  100. return 0
  101. }
  102. // Node represents a node in a raft cluster.
  103. type Node interface {
  104. // Tick increments the internal logical clock for the Node by a single tick. Election
  105. // timeouts and heartbeat timeouts are in units of ticks.
  106. Tick()
  107. // Campaign causes the Node to transition to candidate state and start campaigning to become leader.
  108. Campaign(ctx context.Context) error
  109. // Propose proposes that data be appended to the log.
  110. Propose(ctx context.Context, data []byte) error
  111. // ProposeConfChange proposes config change.
  112. // At most one ConfChange can be in the process of going through consensus.
  113. // Application needs to call ApplyConfChange when applying EntryConfChange type entry.
  114. ProposeConfChange(ctx context.Context, cc pb.ConfChange) error
  115. // Step advances the state machine using the given message. ctx.Err() will be returned, if any.
  116. Step(ctx context.Context, msg pb.Message) error
  117. // Ready returns a channel that returns the current point-in-time state.
  118. // Users of the Node must call Advance after retrieving the state returned by Ready.
  119. //
  120. // NOTE: No committed entries from the next Ready may be applied until all committed entries
  121. // and snapshots from the previous one have finished.
  122. Ready() <-chan Ready
  123. // Advance notifies the Node that the application has saved progress up to the last Ready.
  124. // It prepares the node to return the next available Ready.
  125. //
  126. // The application should generally call Advance after it applies the entries in last Ready.
  127. //
  128. // However, as an optimization, the application may call Advance while it is applying the
  129. // commands. For example. when the last Ready contains a snapshot, the application might take
  130. // a long time to apply the snapshot data. To continue receiving Ready without blocking raft
  131. // progress, it can call Advance before finishing applying the last ready.
  132. Advance()
  133. // ApplyConfChange applies config change to the local node.
  134. // Returns an opaque ConfState protobuf which must be recorded
  135. // in snapshots. Will never return nil; it returns a pointer only
  136. // to match MemoryStorage.Compact.
  137. ApplyConfChange(cc pb.ConfChange) *pb.ConfState
  138. // TransferLeadership attempts to transfer leadership to the given transferee.
  139. TransferLeadership(ctx context.Context, lead, transferee uint64)
  140. // ReadIndex request a read state. The read state will be set in the ready.
  141. // Read state has a read index. Once the application advances further than the read
  142. // index, any linearizable read requests issued before the read request can be
  143. // processed safely. The read state will have the same rctx attached.
  144. ReadIndex(ctx context.Context, rctx []byte) error
  145. // Status returns the current status of the raft state machine.
  146. Status() Status
  147. // ReportUnreachable reports the given node is not reachable for the last send.
  148. ReportUnreachable(id uint64)
  149. // ReportSnapshot reports the status of the sent snapshot.
  150. ReportSnapshot(id uint64, status SnapshotStatus)
  151. // Stop performs any necessary termination of the Node.
  152. Stop()
  153. }
  154. type Peer struct {
  155. ID uint64
  156. Context []byte
  157. }
  158. // StartNode returns a new Node given configuration and a list of raft peers.
  159. // It appends a ConfChangeAddNode entry for each given peer to the initial log.
  160. func StartNode(c *Config, peers []Peer) Node {
  161. r := newRaft(c)
  162. // become the follower at term 1 and apply initial configuration
  163. // entries of term 1
  164. r.becomeFollower(1, None)
  165. for _, peer := range peers {
  166. cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
  167. d, err := cc.Marshal()
  168. if err != nil {
  169. panic("unexpected marshal error")
  170. }
  171. e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d}
  172. r.raftLog.append(e)
  173. }
  174. // Mark these initial entries as committed.
  175. // TODO(bdarnell): These entries are still unstable; do we need to preserve
  176. // the invariant that committed < unstable?
  177. r.raftLog.committed = r.raftLog.lastIndex()
  178. // Now apply them, mainly so that the application can call Campaign
  179. // immediately after StartNode in tests. Note that these nodes will
  180. // be added to raft twice: here and when the application's Ready
  181. // loop calls ApplyConfChange. The calls to addNode must come after
  182. // all calls to raftLog.append so progress.next is set after these
  183. // bootstrapping entries (it is an error if we try to append these
  184. // entries since they have already been committed).
  185. // We do not set raftLog.applied so the application will be able
  186. // to observe all conf changes via Ready.CommittedEntries.
  187. for _, peer := range peers {
  188. r.addNode(peer.ID)
  189. }
  190. n := newNode()
  191. n.logger = c.Logger
  192. go n.run(r)
  193. return &n
  194. }
  195. // RestartNode is similar to StartNode but does not take a list of peers.
  196. // The current membership of the cluster will be restored from the Storage.
  197. // If the caller has an existing state machine, pass in the last log index that
  198. // has been applied to it; otherwise use zero.
  199. func RestartNode(c *Config) Node {
  200. r := newRaft(c)
  201. n := newNode()
  202. n.logger = c.Logger
  203. go n.run(r)
  204. return &n
  205. }
  206. type msgWithResult struct {
  207. m pb.Message
  208. result chan error
  209. }
  210. // node is the canonical implementation of the Node interface
  211. type node struct {
  212. propc chan msgWithResult
  213. recvc chan pb.Message
  214. confc chan pb.ConfChange
  215. confstatec chan pb.ConfState
  216. readyc chan Ready
  217. advancec chan struct{}
  218. tickc chan struct{}
  219. done chan struct{}
  220. stop chan struct{}
  221. status chan chan Status
  222. logger Logger
  223. }
  224. func newNode() node {
  225. return node{
  226. propc: make(chan msgWithResult),
  227. recvc: make(chan pb.Message),
  228. confc: make(chan pb.ConfChange),
  229. confstatec: make(chan pb.ConfState),
  230. readyc: make(chan Ready),
  231. advancec: make(chan struct{}),
  232. // make tickc a buffered chan, so raft node can buffer some ticks when the node
  233. // is busy processing raft messages. Raft node will resume process buffered
  234. // ticks when it becomes idle.
  235. tickc: make(chan struct{}, 128),
  236. done: make(chan struct{}),
  237. stop: make(chan struct{}),
  238. status: make(chan chan Status),
  239. }
  240. }
  241. func (n *node) Stop() {
  242. select {
  243. case n.stop <- struct{}{}:
  244. // Not already stopped, so trigger it
  245. case <-n.done:
  246. // Node has already been stopped - no need to do anything
  247. return
  248. }
  249. // Block until the stop has been acknowledged by run()
  250. <-n.done
  251. }
  252. func (n *node) run(r *raft) {
  253. var propc chan msgWithResult
  254. var readyc chan Ready
  255. var advancec chan struct{}
  256. var prevLastUnstablei, prevLastUnstablet uint64
  257. var havePrevLastUnstablei bool
  258. var prevSnapi uint64
  259. var applyingToI uint64
  260. var rd Ready
  261. lead := None
  262. prevSoftSt := r.softState()
  263. prevHardSt := emptyState
  264. for {
  265. if advancec != nil {
  266. readyc = nil
  267. } else {
  268. rd = newReady(r, prevSoftSt, prevHardSt)
  269. if rd.containsUpdates() {
  270. readyc = n.readyc
  271. } else {
  272. readyc = nil
  273. }
  274. }
  275. if lead != r.lead {
  276. if r.hasLeader() {
  277. if lead == None {
  278. r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
  279. } else {
  280. r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
  281. }
  282. propc = n.propc
  283. } else {
  284. r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
  285. propc = nil
  286. }
  287. lead = r.lead
  288. }
  289. select {
  290. // TODO: maybe buffer the config propose if there exists one (the way
  291. // described in raft dissertation)
  292. // Currently it is dropped in Step silently.
  293. case pm := <-propc:
  294. m := pm.m
  295. m.From = r.id
  296. err := r.Step(m)
  297. if pm.result != nil {
  298. pm.result <- err
  299. close(pm.result)
  300. }
  301. case m := <-n.recvc:
  302. // filter out response message from unknown From.
  303. if pr := r.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
  304. r.Step(m)
  305. }
  306. case cc := <-n.confc:
  307. if cc.NodeID == None {
  308. select {
  309. case n.confstatec <- pb.ConfState{
  310. Nodes: r.nodes(),
  311. Learners: r.learnerNodes()}:
  312. case <-n.done:
  313. }
  314. break
  315. }
  316. switch cc.Type {
  317. case pb.ConfChangeAddNode:
  318. r.addNode(cc.NodeID)
  319. case pb.ConfChangeAddLearnerNode:
  320. r.addLearner(cc.NodeID)
  321. case pb.ConfChangeRemoveNode:
  322. // block incoming proposal when local node is
  323. // removed
  324. if cc.NodeID == r.id {
  325. propc = nil
  326. }
  327. r.removeNode(cc.NodeID)
  328. case pb.ConfChangeUpdateNode:
  329. default:
  330. panic("unexpected conf type")
  331. }
  332. select {
  333. case n.confstatec <- pb.ConfState{
  334. Nodes: r.nodes(),
  335. Learners: r.learnerNodes()}:
  336. case <-n.done:
  337. }
  338. case <-n.tickc:
  339. r.tick()
  340. case readyc <- rd:
  341. if rd.SoftState != nil {
  342. prevSoftSt = rd.SoftState
  343. }
  344. if len(rd.Entries) > 0 {
  345. prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index
  346. prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term
  347. havePrevLastUnstablei = true
  348. }
  349. if !IsEmptyHardState(rd.HardState) {
  350. prevHardSt = rd.HardState
  351. }
  352. if !IsEmptySnap(rd.Snapshot) {
  353. prevSnapi = rd.Snapshot.Metadata.Index
  354. }
  355. if index := rd.appliedCursor(); index != 0 {
  356. applyingToI = index
  357. }
  358. r.msgs = nil
  359. r.readStates = nil
  360. r.reduceUncommittedSize(rd.CommittedEntries)
  361. advancec = n.advancec
  362. case <-advancec:
  363. if applyingToI != 0 {
  364. r.raftLog.appliedTo(applyingToI)
  365. applyingToI = 0
  366. }
  367. if havePrevLastUnstablei {
  368. r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet)
  369. havePrevLastUnstablei = false
  370. }
  371. r.raftLog.stableSnapTo(prevSnapi)
  372. advancec = nil
  373. case c := <-n.status:
  374. c <- getStatus(r)
  375. case <-n.stop:
  376. close(n.done)
  377. return
  378. }
  379. }
  380. }
  381. // Tick increments the internal logical clock for this Node. Election timeouts
  382. // and heartbeat timeouts are in units of ticks.
  383. func (n *node) Tick() {
  384. select {
  385. case n.tickc <- struct{}{}:
  386. case <-n.done:
  387. default:
  388. n.logger.Warningf("A tick missed to fire. Node blocks too long!")
  389. }
  390. }
  391. func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
  392. func (n *node) Propose(ctx context.Context, data []byte) error {
  393. return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
  394. }
  395. func (n *node) Step(ctx context.Context, m pb.Message) error {
  396. // ignore unexpected local messages receiving over network
  397. if IsLocalMsg(m.Type) {
  398. // TODO: return an error?
  399. return nil
  400. }
  401. return n.step(ctx, m)
  402. }
  403. func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error {
  404. data, err := cc.Marshal()
  405. if err != nil {
  406. return err
  407. }
  408. return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}})
  409. }
  410. func (n *node) step(ctx context.Context, m pb.Message) error {
  411. return n.stepWithWaitOption(ctx, m, false)
  412. }
  413. func (n *node) stepWait(ctx context.Context, m pb.Message) error {
  414. return n.stepWithWaitOption(ctx, m, true)
  415. }
  416. // Step advances the state machine using msgs. The ctx.Err() will be returned,
  417. // if any.
  418. func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error {
  419. if m.Type != pb.MsgProp {
  420. select {
  421. case n.recvc <- m:
  422. return nil
  423. case <-ctx.Done():
  424. return ctx.Err()
  425. case <-n.done:
  426. return ErrStopped
  427. }
  428. }
  429. ch := n.propc
  430. pm := msgWithResult{m: m}
  431. if wait {
  432. pm.result = make(chan error, 1)
  433. }
  434. select {
  435. case ch <- pm:
  436. if !wait {
  437. return nil
  438. }
  439. case <-ctx.Done():
  440. return ctx.Err()
  441. case <-n.done:
  442. return ErrStopped
  443. }
  444. select {
  445. case rsp := <-pm.result:
  446. if rsp != nil {
  447. return rsp
  448. }
  449. case <-ctx.Done():
  450. return ctx.Err()
  451. case <-n.done:
  452. return ErrStopped
  453. }
  454. return nil
  455. }
  456. func (n *node) Ready() <-chan Ready { return n.readyc }
  457. func (n *node) Advance() {
  458. select {
  459. case n.advancec <- struct{}{}:
  460. case <-n.done:
  461. }
  462. }
  463. func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
  464. var cs pb.ConfState
  465. select {
  466. case n.confc <- cc:
  467. case <-n.done:
  468. }
  469. select {
  470. case cs = <-n.confstatec:
  471. case <-n.done:
  472. }
  473. return &cs
  474. }
  475. func (n *node) Status() Status {
  476. c := make(chan Status)
  477. select {
  478. case n.status <- c:
  479. return <-c
  480. case <-n.done:
  481. return Status{}
  482. }
  483. }
  484. func (n *node) ReportUnreachable(id uint64) {
  485. select {
  486. case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
  487. case <-n.done:
  488. }
  489. }
  490. func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
  491. rej := status == SnapshotFailure
  492. select {
  493. case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
  494. case <-n.done:
  495. }
  496. }
  497. func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
  498. select {
  499. // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
  500. case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
  501. case <-n.done:
  502. case <-ctx.Done():
  503. }
  504. }
  505. func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
  506. return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
  507. }
  508. func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
  509. rd := Ready{
  510. Entries: r.raftLog.unstableEntries(),
  511. CommittedEntries: r.raftLog.nextEnts(),
  512. Messages: r.msgs,
  513. }
  514. if softSt := r.softState(); !softSt.equal(prevSoftSt) {
  515. rd.SoftState = softSt
  516. }
  517. if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
  518. rd.HardState = hardSt
  519. }
  520. if r.raftLog.unstable.snapshot != nil {
  521. rd.Snapshot = *r.raftLog.unstable.snapshot
  522. }
  523. if len(r.readStates) != 0 {
  524. rd.ReadStates = r.readStates
  525. }
  526. rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries))
  527. return rd
  528. }
  529. // MustSync returns true if the hard state and count of Raft entries indicate
  530. // that a synchronous write to persistent storage is required.
  531. func MustSync(st, prevst pb.HardState, entsnum int) bool {
  532. // Persistent state on all servers:
  533. // (Updated on stable storage before responding to RPCs)
  534. // currentTerm
  535. // votedFor
  536. // log entries[]
  537. return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
  538. }