node.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package raft
  15. import (
  16. "context"
  17. "errors"
  18. pb "go.etcd.io/etcd/raft/raftpb"
  19. )
  20. type SnapshotStatus int
  21. const (
  22. SnapshotFinish SnapshotStatus = 1
  23. SnapshotFailure SnapshotStatus = 2
  24. )
  25. var (
  26. emptyState = pb.HardState{}
  27. // ErrStopped is returned by methods on Nodes that have been stopped.
  28. ErrStopped = errors.New("raft: stopped")
  29. )
  30. // SoftState provides state that is useful for logging and debugging.
  31. // The state is volatile and does not need to be persisted to the WAL.
  32. type SoftState struct {
  33. Lead uint64 // must use atomic operations to access; keep 64-bit aligned.
  34. RaftState StateType
  35. }
  36. func (a *SoftState) equal(b *SoftState) bool {
  37. return a.Lead == b.Lead && a.RaftState == b.RaftState
  38. }
  39. // Ready encapsulates the entries and messages that are ready to read,
  40. // be saved to stable storage, committed or sent to other peers.
  41. // All fields in Ready are read-only.
  42. type Ready struct {
  43. // The current volatile state of a Node.
  44. // SoftState will be nil if there is no update.
  45. // It is not required to consume or store SoftState.
  46. *SoftState
  47. // The current state of a Node to be saved to stable storage BEFORE
  48. // Messages are sent.
  49. // HardState will be equal to empty state if there is no update.
  50. pb.HardState
  51. // ReadStates can be used for node to serve linearizable read requests locally
  52. // when its applied index is greater than the index in ReadState.
  53. // Note that the readState will be returned when raft receives msgReadIndex.
  54. // The returned is only valid for the request that requested to read.
  55. ReadStates []ReadState
  56. // Entries specifies entries to be saved to stable storage BEFORE
  57. // Messages are sent.
  58. Entries []pb.Entry
  59. // Snapshot specifies the snapshot to be saved to stable storage.
  60. Snapshot pb.Snapshot
  61. // CommittedEntries specifies entries to be committed to a
  62. // store/state-machine. These have previously been committed to stable
  63. // store.
  64. CommittedEntries []pb.Entry
  65. // Messages specifies outbound messages to be sent AFTER Entries are
  66. // committed to stable storage.
  67. // If it contains a MsgSnap message, the application MUST report back to raft
  68. // when the snapshot has been received or has failed by calling ReportSnapshot.
  69. Messages []pb.Message
  70. // MustSync indicates whether the HardState and Entries must be synchronously
  71. // written to disk or if an asynchronous write is permissible.
  72. MustSync bool
  73. }
  74. func isHardStateEqual(a, b pb.HardState) bool {
  75. return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
  76. }
  77. // IsEmptyHardState returns true if the given HardState is empty.
  78. func IsEmptyHardState(st pb.HardState) bool {
  79. return isHardStateEqual(st, emptyState)
  80. }
  81. // IsEmptySnap returns true if the given Snapshot is empty.
  82. func IsEmptySnap(sp pb.Snapshot) bool {
  83. return sp.Metadata.Index == 0
  84. }
  85. func (rd Ready) containsUpdates() bool {
  86. return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
  87. !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
  88. len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
  89. }
  90. // appliedCursor extracts from the Ready the highest index the client has
  91. // applied (once the Ready is confirmed via Advance). If no information is
  92. // contained in the Ready, returns zero.
  93. func (rd Ready) appliedCursor() uint64 {
  94. if n := len(rd.CommittedEntries); n > 0 {
  95. return rd.CommittedEntries[n-1].Index
  96. }
  97. if index := rd.Snapshot.Metadata.Index; index > 0 {
  98. return index
  99. }
  100. return 0
  101. }
  102. // Node represents a node in a raft cluster.
  103. type Node interface {
  104. // Tick increments the internal logical clock for the Node by a single tick. Election
  105. // timeouts and heartbeat timeouts are in units of ticks.
  106. Tick()
  107. // Campaign causes the Node to transition to candidate state and start campaigning to become leader.
  108. Campaign(ctx context.Context) error
  109. // Propose proposes that data be appended to the log. Note that proposals can be lost without
  110. // notice, therefore it is user's job to ensure proposal retries.
  111. Propose(ctx context.Context, data []byte) error
  112. // ProposeConfChange proposes a configuration change. Like any proposal, the
  113. // configuration change may be dropped with or without an error being
  114. // returned. In particular, configuration changes are dropped unless the
  115. // leader has certainty that there is no prior unapplied configuration
  116. // change in its log.
  117. //
  118. // The method accepts either a pb.ConfChange (deprecated) or pb.ConfChangeV2
  119. // message. The latter allows arbitrary configuration changes via joint
  120. // consensus, notably including replacing a voter. Passing a ConfChangeV2
  121. // message is only allowed if all Nodes participating in the cluster run a
  122. // version of this library aware of the V2 API. See pb.ConfChangeV2 for
  123. // usage details and semantics.
  124. ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error
  125. // Step advances the state machine using the given message. ctx.Err() will be returned, if any.
  126. Step(ctx context.Context, msg pb.Message) error
  127. // Ready returns a channel that returns the current point-in-time state.
  128. // Users of the Node must call Advance after retrieving the state returned by Ready.
  129. //
  130. // NOTE: No committed entries from the next Ready may be applied until all committed entries
  131. // and snapshots from the previous one have finished.
  132. Ready() <-chan Ready
  133. // Advance notifies the Node that the application has saved progress up to the last Ready.
  134. // It prepares the node to return the next available Ready.
  135. //
  136. // The application should generally call Advance after it applies the entries in last Ready.
  137. //
  138. // However, as an optimization, the application may call Advance while it is applying the
  139. // commands. For example. when the last Ready contains a snapshot, the application might take
  140. // a long time to apply the snapshot data. To continue receiving Ready without blocking raft
  141. // progress, it can call Advance before finishing applying the last ready.
  142. Advance()
  143. // ApplyConfChange applies a config change (previously passed to
  144. // ProposeConfChange) to the node. This must be called whenever a config
  145. // change is observed in Ready.CommittedEntries.
  146. //
  147. // Returns an opaque non-nil ConfState protobuf which must be recorded in
  148. // snapshots.
  149. ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState
  150. // TransferLeadership attempts to transfer leadership to the given transferee.
  151. TransferLeadership(ctx context.Context, lead, transferee uint64)
  152. // ReadIndex request a read state. The read state will be set in the ready.
  153. // Read state has a read index. Once the application advances further than the read
  154. // index, any linearizable read requests issued before the read request can be
  155. // processed safely. The read state will have the same rctx attached.
  156. ReadIndex(ctx context.Context, rctx []byte) error
  157. // Status returns the current status of the raft state machine.
  158. Status() Status
  159. // ReportUnreachable reports the given node is not reachable for the last send.
  160. ReportUnreachable(id uint64)
  161. // ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower
  162. // who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure.
  163. // Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a
  164. // snapshot (for e.g., while streaming it from leader to follower), should be reported to the
  165. // leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft
  166. // log probes until the follower can apply the snapshot and advance its state. If the follower
  167. // can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any
  168. // updates from the leader. Therefore, it is crucial that the application ensures that any
  169. // failure in snapshot sending is caught and reported back to the leader; so it can resume raft
  170. // log probing in the follower.
  171. ReportSnapshot(id uint64, status SnapshotStatus)
  172. // Stop performs any necessary termination of the Node.
  173. Stop()
  174. }
  175. type Peer struct {
  176. ID uint64
  177. Context []byte
  178. }
  179. // StartNode returns a new Node given configuration and a list of raft peers.
  180. // It appends a ConfChangeAddNode entry for each given peer to the initial log.
  181. //
  182. // Peers must not be zero length; call RestartNode in that case.
  183. func StartNode(c *Config, peers []Peer) Node {
  184. if len(peers) == 0 {
  185. panic("no peers given; use RestartNode instead")
  186. }
  187. rn, err := NewRawNode(c)
  188. if err != nil {
  189. panic(err)
  190. }
  191. rn.Bootstrap(peers)
  192. n := newNode(rn)
  193. go n.run()
  194. return &n
  195. }
  196. // RestartNode is similar to StartNode but does not take a list of peers.
  197. // The current membership of the cluster will be restored from the Storage.
  198. // If the caller has an existing state machine, pass in the last log index that
  199. // has been applied to it; otherwise use zero.
  200. func RestartNode(c *Config) Node {
  201. rn, err := NewRawNode(c)
  202. if err != nil {
  203. panic(err)
  204. }
  205. n := newNode(rn)
  206. go n.run()
  207. return &n
  208. }
  209. type msgWithResult struct {
  210. m pb.Message
  211. result chan error
  212. }
  213. // node is the canonical implementation of the Node interface
  214. type node struct {
  215. propc chan msgWithResult
  216. recvc chan pb.Message
  217. confc chan pb.ConfChangeV2
  218. confstatec chan pb.ConfState
  219. readyc chan Ready
  220. advancec chan struct{}
  221. tickc chan struct{}
  222. done chan struct{}
  223. stop chan struct{}
  224. status chan chan Status
  225. rn *RawNode
  226. }
  227. func newNode(rn *RawNode) node {
  228. return node{
  229. propc: make(chan msgWithResult),
  230. recvc: make(chan pb.Message),
  231. confc: make(chan pb.ConfChangeV2),
  232. confstatec: make(chan pb.ConfState),
  233. readyc: make(chan Ready),
  234. advancec: make(chan struct{}),
  235. // make tickc a buffered chan, so raft node can buffer some ticks when the node
  236. // is busy processing raft messages. Raft node will resume process buffered
  237. // ticks when it becomes idle.
  238. tickc: make(chan struct{}, 128),
  239. done: make(chan struct{}),
  240. stop: make(chan struct{}),
  241. status: make(chan chan Status),
  242. rn: rn,
  243. }
  244. }
  245. func (n *node) Stop() {
  246. select {
  247. case n.stop <- struct{}{}:
  248. // Not already stopped, so trigger it
  249. case <-n.done:
  250. // Node has already been stopped - no need to do anything
  251. return
  252. }
  253. // Block until the stop has been acknowledged by run()
  254. <-n.done
  255. }
  256. func (n *node) run() {
  257. var propc chan msgWithResult
  258. var readyc chan Ready
  259. var advancec chan struct{}
  260. var rd Ready
  261. r := n.rn.raft
  262. lead := None
  263. for {
  264. if advancec != nil {
  265. readyc = nil
  266. } else if n.rn.HasReady() {
  267. // Populate a Ready. Note that this Ready is not guaranteed to
  268. // actually be handled. We will arm readyc, but there's no guarantee
  269. // that we will actually send on it. It's possible that we will
  270. // service another channel instead, loop around, and then populate
  271. // the Ready again. We could instead force the previous Ready to be
  272. // handled first, but it's generally good to emit larger Readys plus
  273. // it simplifies testing (by emitting less frequently and more
  274. // predictably).
  275. rd = n.rn.readyWithoutAccept()
  276. readyc = n.readyc
  277. }
  278. if lead != r.lead {
  279. if r.hasLeader() {
  280. if lead == None {
  281. r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
  282. } else {
  283. r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
  284. }
  285. propc = n.propc
  286. } else {
  287. r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
  288. propc = nil
  289. }
  290. lead = r.lead
  291. }
  292. select {
  293. // TODO: maybe buffer the config propose if there exists one (the way
  294. // described in raft dissertation)
  295. // Currently it is dropped in Step silently.
  296. case pm := <-propc:
  297. m := pm.m
  298. m.From = r.id
  299. err := r.Step(m)
  300. if pm.result != nil {
  301. pm.result <- err
  302. close(pm.result)
  303. }
  304. case m := <-n.recvc:
  305. // filter out response message from unknown From.
  306. if pr := r.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) {
  307. r.Step(m)
  308. }
  309. case cc := <-n.confc:
  310. _, okBefore := r.prs.Progress[r.id]
  311. cs := r.applyConfChange(cc)
  312. // If the node was removed, block incoming proposals. Note that we
  313. // only do this if the node was in the config before. Nodes may be
  314. // a member of the group without knowing this (when they're catching
  315. // up on the log and don't have the latest config) and we don't want
  316. // to block the proposal channel in that case.
  317. //
  318. // NB: propc is reset when the leader changes, which, if we learn
  319. // about it, sort of implies that we got readded, maybe? This isn't
  320. // very sound and likely has bugs.
  321. if _, okAfter := r.prs.Progress[r.id]; okBefore && !okAfter {
  322. var found bool
  323. for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} {
  324. for _, id := range sl {
  325. if id == r.id {
  326. found = true
  327. }
  328. }
  329. }
  330. if !found {
  331. propc = nil
  332. }
  333. }
  334. select {
  335. case n.confstatec <- cs:
  336. case <-n.done:
  337. }
  338. case <-n.tickc:
  339. n.rn.Tick()
  340. case readyc <- rd:
  341. n.rn.acceptReady(rd)
  342. advancec = n.advancec
  343. case <-advancec:
  344. n.rn.Advance(rd)
  345. rd = Ready{}
  346. advancec = nil
  347. case c := <-n.status:
  348. c <- getStatus(r)
  349. case <-n.stop:
  350. close(n.done)
  351. return
  352. }
  353. }
  354. }
  355. // Tick increments the internal logical clock for this Node. Election timeouts
  356. // and heartbeat timeouts are in units of ticks.
  357. func (n *node) Tick() {
  358. select {
  359. case n.tickc <- struct{}{}:
  360. case <-n.done:
  361. default:
  362. n.rn.raft.logger.Warningf("%x (leader %v) A tick missed to fire. Node blocks too long!", n.rn.raft.id, n.rn.raft.id == n.rn.raft.lead)
  363. }
  364. }
  365. func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
  366. func (n *node) Propose(ctx context.Context, data []byte) error {
  367. return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
  368. }
  369. func (n *node) Step(ctx context.Context, m pb.Message) error {
  370. // ignore unexpected local messages receiving over network
  371. if IsLocalMsg(m.Type) {
  372. // TODO: return an error?
  373. return nil
  374. }
  375. return n.step(ctx, m)
  376. }
  377. func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) {
  378. typ, data, err := pb.MarshalConfChange(c)
  379. if err != nil {
  380. return pb.Message{}, err
  381. }
  382. return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil
  383. }
  384. func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error {
  385. msg, err := confChangeToMsg(cc)
  386. if err != nil {
  387. return err
  388. }
  389. return n.Step(ctx, msg)
  390. }
  391. func (n *node) step(ctx context.Context, m pb.Message) error {
  392. return n.stepWithWaitOption(ctx, m, false)
  393. }
  394. func (n *node) stepWait(ctx context.Context, m pb.Message) error {
  395. return n.stepWithWaitOption(ctx, m, true)
  396. }
  397. // Step advances the state machine using msgs. The ctx.Err() will be returned,
  398. // if any.
  399. func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error {
  400. if m.Type != pb.MsgProp {
  401. select {
  402. case n.recvc <- m:
  403. return nil
  404. case <-ctx.Done():
  405. return ctx.Err()
  406. case <-n.done:
  407. return ErrStopped
  408. }
  409. }
  410. ch := n.propc
  411. pm := msgWithResult{m: m}
  412. if wait {
  413. pm.result = make(chan error, 1)
  414. }
  415. select {
  416. case ch <- pm:
  417. if !wait {
  418. return nil
  419. }
  420. case <-ctx.Done():
  421. return ctx.Err()
  422. case <-n.done:
  423. return ErrStopped
  424. }
  425. select {
  426. case err := <-pm.result:
  427. if err != nil {
  428. return err
  429. }
  430. case <-ctx.Done():
  431. return ctx.Err()
  432. case <-n.done:
  433. return ErrStopped
  434. }
  435. return nil
  436. }
  437. func (n *node) Ready() <-chan Ready { return n.readyc }
  438. func (n *node) Advance() {
  439. select {
  440. case n.advancec <- struct{}{}:
  441. case <-n.done:
  442. }
  443. }
  444. func (n *node) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState {
  445. var cs pb.ConfState
  446. select {
  447. case n.confc <- cc.AsV2():
  448. case <-n.done:
  449. }
  450. select {
  451. case cs = <-n.confstatec:
  452. case <-n.done:
  453. }
  454. return &cs
  455. }
  456. func (n *node) Status() Status {
  457. c := make(chan Status)
  458. select {
  459. case n.status <- c:
  460. return <-c
  461. case <-n.done:
  462. return Status{}
  463. }
  464. }
  465. func (n *node) ReportUnreachable(id uint64) {
  466. select {
  467. case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
  468. case <-n.done:
  469. }
  470. }
  471. func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
  472. rej := status == SnapshotFailure
  473. select {
  474. case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
  475. case <-n.done:
  476. }
  477. }
  478. func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
  479. select {
  480. // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
  481. case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
  482. case <-n.done:
  483. case <-ctx.Done():
  484. }
  485. }
  486. func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
  487. return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
  488. }
  489. func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
  490. rd := Ready{
  491. Entries: r.raftLog.unstableEntries(),
  492. CommittedEntries: r.raftLog.nextEnts(),
  493. Messages: r.msgs,
  494. }
  495. if softSt := r.softState(); !softSt.equal(prevSoftSt) {
  496. rd.SoftState = softSt
  497. }
  498. if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
  499. rd.HardState = hardSt
  500. }
  501. if r.raftLog.unstable.snapshot != nil {
  502. rd.Snapshot = *r.raftLog.unstable.snapshot
  503. }
  504. if len(r.readStates) != 0 {
  505. rd.ReadStates = r.readStates
  506. }
  507. rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries))
  508. return rd
  509. }
  510. // MustSync returns true if the hard state and count of Raft entries indicate
  511. // that a synchronous write to persistent storage is required.
  512. func MustSync(st, prevst pb.HardState, entsnum int) bool {
  513. // Persistent state on all servers:
  514. // (Updated on stable storage before responding to RPCs)
  515. // currentTerm
  516. // votedFor
  517. // log entries[]
  518. return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
  519. }