123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584 |
- // Copyright 2015 The etcd Authors
- //
- // Licensed under the Apache License, Version 2.0 (the "License");
- // you may not use this file except in compliance with the License.
- // You may obtain a copy of the License at
- //
- // http://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS,
- // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- // See the License for the specific language governing permissions and
- // limitations under the License.
- package raft
- import (
- "context"
- "errors"
- pb "go.etcd.io/etcd/raft/raftpb"
- )
- type SnapshotStatus int
- const (
- SnapshotFinish SnapshotStatus = 1
- SnapshotFailure SnapshotStatus = 2
- )
- var (
- emptyState = pb.HardState{}
- // ErrStopped is returned by methods on Nodes that have been stopped.
- ErrStopped = errors.New("raft: stopped")
- )
- // SoftState provides state that is useful for logging and debugging.
- // The state is volatile and does not need to be persisted to the WAL.
- type SoftState struct {
- Lead uint64 // must use atomic operations to access; keep 64-bit aligned.
- RaftState StateType
- }
- func (a *SoftState) equal(b *SoftState) bool {
- return a.Lead == b.Lead && a.RaftState == b.RaftState
- }
- // Ready encapsulates the entries and messages that are ready to read,
- // be saved to stable storage, committed or sent to other peers.
- // All fields in Ready are read-only.
- type Ready struct {
- // The current volatile state of a Node.
- // SoftState will be nil if there is no update.
- // It is not required to consume or store SoftState.
- *SoftState
- // The current state of a Node to be saved to stable storage BEFORE
- // Messages are sent.
- // HardState will be equal to empty state if there is no update.
- pb.HardState
- // ReadStates can be used for node to serve linearizable read requests locally
- // when its applied index is greater than the index in ReadState.
- // Note that the readState will be returned when raft receives msgReadIndex.
- // The returned is only valid for the request that requested to read.
- ReadStates []ReadState
- // Entries specifies entries to be saved to stable storage BEFORE
- // Messages are sent.
- Entries []pb.Entry
- // Snapshot specifies the snapshot to be saved to stable storage.
- Snapshot pb.Snapshot
- // CommittedEntries specifies entries to be committed to a
- // store/state-machine. These have previously been committed to stable
- // store.
- CommittedEntries []pb.Entry
- // Messages specifies outbound messages to be sent AFTER Entries are
- // committed to stable storage.
- // If it contains a MsgSnap message, the application MUST report back to raft
- // when the snapshot has been received or has failed by calling ReportSnapshot.
- Messages []pb.Message
- // MustSync indicates whether the HardState and Entries must be synchronously
- // written to disk or if an asynchronous write is permissible.
- MustSync bool
- }
- func isHardStateEqual(a, b pb.HardState) bool {
- return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
- }
- // IsEmptyHardState returns true if the given HardState is empty.
- func IsEmptyHardState(st pb.HardState) bool {
- return isHardStateEqual(st, emptyState)
- }
- // IsEmptySnap returns true if the given Snapshot is empty.
- func IsEmptySnap(sp pb.Snapshot) bool {
- return sp.Metadata.Index == 0
- }
- func (rd Ready) containsUpdates() bool {
- return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
- !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
- len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
- }
- // appliedCursor extracts from the Ready the highest index the client has
- // applied (once the Ready is confirmed via Advance). If no information is
- // contained in the Ready, returns zero.
- func (rd Ready) appliedCursor() uint64 {
- if n := len(rd.CommittedEntries); n > 0 {
- return rd.CommittedEntries[n-1].Index
- }
- if index := rd.Snapshot.Metadata.Index; index > 0 {
- return index
- }
- return 0
- }
- // Node represents a node in a raft cluster.
- type Node interface {
- // Tick increments the internal logical clock for the Node by a single tick. Election
- // timeouts and heartbeat timeouts are in units of ticks.
- Tick()
- // Campaign causes the Node to transition to candidate state and start campaigning to become leader.
- Campaign(ctx context.Context) error
- // Propose proposes that data be appended to the log. Note that proposals can be lost without
- // notice, therefore it is user's job to ensure proposal retries.
- Propose(ctx context.Context, data []byte) error
- // ProposeConfChange proposes a configuration change. Like any proposal, the
- // configuration change may be dropped with or without an error being
- // returned. In particular, configuration changes are dropped unless the
- // leader has certainty that there is no prior unapplied configuration
- // change in its log.
- //
- // The method accepts either a pb.ConfChange (deprecated) or pb.ConfChangeV2
- // message. The latter allows arbitrary configuration changes via joint
- // consensus, notably including replacing a voter. Passing a ConfChangeV2
- // message is only allowed if all Nodes participating in the cluster run a
- // version of this library aware of the V2 API. See pb.ConfChangeV2 for
- // usage details and semantics.
- ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error
- // Step advances the state machine using the given message. ctx.Err() will be returned, if any.
- Step(ctx context.Context, msg pb.Message) error
- // Ready returns a channel that returns the current point-in-time state.
- // Users of the Node must call Advance after retrieving the state returned by Ready.
- //
- // NOTE: No committed entries from the next Ready may be applied until all committed entries
- // and snapshots from the previous one have finished.
- Ready() <-chan Ready
- // Advance notifies the Node that the application has saved progress up to the last Ready.
- // It prepares the node to return the next available Ready.
- //
- // The application should generally call Advance after it applies the entries in last Ready.
- //
- // However, as an optimization, the application may call Advance while it is applying the
- // commands. For example. when the last Ready contains a snapshot, the application might take
- // a long time to apply the snapshot data. To continue receiving Ready without blocking raft
- // progress, it can call Advance before finishing applying the last ready.
- Advance()
- // ApplyConfChange applies a config change (previously passed to
- // ProposeConfChange) to the node. This must be called whenever a config
- // change is observed in Ready.CommittedEntries.
- //
- // Returns an opaque non-nil ConfState protobuf which must be recorded in
- // snapshots.
- ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState
- // TransferLeadership attempts to transfer leadership to the given transferee.
- TransferLeadership(ctx context.Context, lead, transferee uint64)
- // ReadIndex request a read state. The read state will be set in the ready.
- // Read state has a read index. Once the application advances further than the read
- // index, any linearizable read requests issued before the read request can be
- // processed safely. The read state will have the same rctx attached.
- ReadIndex(ctx context.Context, rctx []byte) error
- // Status returns the current status of the raft state machine.
- Status() Status
- // ReportUnreachable reports the given node is not reachable for the last send.
- ReportUnreachable(id uint64)
- // ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower
- // who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure.
- // Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a
- // snapshot (for e.g., while streaming it from leader to follower), should be reported to the
- // leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft
- // log probes until the follower can apply the snapshot and advance its state. If the follower
- // can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any
- // updates from the leader. Therefore, it is crucial that the application ensures that any
- // failure in snapshot sending is caught and reported back to the leader; so it can resume raft
- // log probing in the follower.
- ReportSnapshot(id uint64, status SnapshotStatus)
- // Stop performs any necessary termination of the Node.
- Stop()
- }
- type Peer struct {
- ID uint64
- Context []byte
- }
- // StartNode returns a new Node given configuration and a list of raft peers.
- // It appends a ConfChangeAddNode entry for each given peer to the initial log.
- //
- // Peers must not be zero length; call RestartNode in that case.
- func StartNode(c *Config, peers []Peer) Node {
- if len(peers) == 0 {
- panic("no peers given; use RestartNode instead")
- }
- rn, err := NewRawNode(c)
- if err != nil {
- panic(err)
- }
- rn.Bootstrap(peers)
- n := newNode(rn)
- go n.run()
- return &n
- }
- // RestartNode is similar to StartNode but does not take a list of peers.
- // The current membership of the cluster will be restored from the Storage.
- // If the caller has an existing state machine, pass in the last log index that
- // has been applied to it; otherwise use zero.
- func RestartNode(c *Config) Node {
- rn, err := NewRawNode(c)
- if err != nil {
- panic(err)
- }
- n := newNode(rn)
- go n.run()
- return &n
- }
- type msgWithResult struct {
- m pb.Message
- result chan error
- }
- // node is the canonical implementation of the Node interface
- type node struct {
- propc chan msgWithResult
- recvc chan pb.Message
- confc chan pb.ConfChangeV2
- confstatec chan pb.ConfState
- readyc chan Ready
- advancec chan struct{}
- tickc chan struct{}
- done chan struct{}
- stop chan struct{}
- status chan chan Status
- rn *RawNode
- }
- func newNode(rn *RawNode) node {
- return node{
- propc: make(chan msgWithResult),
- recvc: make(chan pb.Message),
- confc: make(chan pb.ConfChangeV2),
- confstatec: make(chan pb.ConfState),
- readyc: make(chan Ready),
- advancec: make(chan struct{}),
- // make tickc a buffered chan, so raft node can buffer some ticks when the node
- // is busy processing raft messages. Raft node will resume process buffered
- // ticks when it becomes idle.
- tickc: make(chan struct{}, 128),
- done: make(chan struct{}),
- stop: make(chan struct{}),
- status: make(chan chan Status),
- rn: rn,
- }
- }
- func (n *node) Stop() {
- select {
- case n.stop <- struct{}{}:
- // Not already stopped, so trigger it
- case <-n.done:
- // Node has already been stopped - no need to do anything
- return
- }
- // Block until the stop has been acknowledged by run()
- <-n.done
- }
- func (n *node) run() {
- var propc chan msgWithResult
- var readyc chan Ready
- var advancec chan struct{}
- var rd Ready
- r := n.rn.raft
- lead := None
- for {
- if advancec != nil {
- readyc = nil
- } else if n.rn.HasReady() {
- // Populate a Ready. Note that this Ready is not guaranteed to
- // actually be handled. We will arm readyc, but there's no guarantee
- // that we will actually send on it. It's possible that we will
- // service another channel instead, loop around, and then populate
- // the Ready again. We could instead force the previous Ready to be
- // handled first, but it's generally good to emit larger Readys plus
- // it simplifies testing (by emitting less frequently and more
- // predictably).
- rd = n.rn.readyWithoutAccept()
- readyc = n.readyc
- }
- if lead != r.lead {
- if r.hasLeader() {
- if lead == None {
- r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
- } else {
- r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
- }
- propc = n.propc
- } else {
- r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
- propc = nil
- }
- lead = r.lead
- }
- select {
- // TODO: maybe buffer the config propose if there exists one (the way
- // described in raft dissertation)
- // Currently it is dropped in Step silently.
- case pm := <-propc:
- m := pm.m
- m.From = r.id
- err := r.Step(m)
- if pm.result != nil {
- pm.result <- err
- close(pm.result)
- }
- case m := <-n.recvc:
- // filter out response message from unknown From.
- if pr := r.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) {
- r.Step(m)
- }
- case cc := <-n.confc:
- _, okBefore := r.prs.Progress[r.id]
- cs := r.applyConfChange(cc)
- // If the node was removed, block incoming proposals. Note that we
- // only do this if the node was in the config before. Nodes may be
- // a member of the group without knowing this (when they're catching
- // up on the log and don't have the latest config) and we don't want
- // to block the proposal channel in that case.
- //
- // NB: propc is reset when the leader changes, which, if we learn
- // about it, sort of implies that we got readded, maybe? This isn't
- // very sound and likely has bugs.
- if _, okAfter := r.prs.Progress[r.id]; okBefore && !okAfter {
- var found bool
- for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} {
- for _, id := range sl {
- if id == r.id {
- found = true
- }
- }
- }
- if !found {
- propc = nil
- }
- }
- select {
- case n.confstatec <- cs:
- case <-n.done:
- }
- case <-n.tickc:
- n.rn.Tick()
- case readyc <- rd:
- n.rn.acceptReady(rd)
- advancec = n.advancec
- case <-advancec:
- n.rn.Advance(rd)
- rd = Ready{}
- advancec = nil
- case c := <-n.status:
- c <- getStatus(r)
- case <-n.stop:
- close(n.done)
- return
- }
- }
- }
- // Tick increments the internal logical clock for this Node. Election timeouts
- // and heartbeat timeouts are in units of ticks.
- func (n *node) Tick() {
- select {
- case n.tickc <- struct{}{}:
- case <-n.done:
- default:
- n.rn.raft.logger.Warningf("%x (leader %v) A tick missed to fire. Node blocks too long!", n.rn.raft.id, n.rn.raft.id == n.rn.raft.lead)
- }
- }
- func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
- func (n *node) Propose(ctx context.Context, data []byte) error {
- return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
- }
- func (n *node) Step(ctx context.Context, m pb.Message) error {
- // ignore unexpected local messages receiving over network
- if IsLocalMsg(m.Type) {
- // TODO: return an error?
- return nil
- }
- return n.step(ctx, m)
- }
- func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) {
- typ, data, err := pb.MarshalConfChange(c)
- if err != nil {
- return pb.Message{}, err
- }
- return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil
- }
- func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error {
- msg, err := confChangeToMsg(cc)
- if err != nil {
- return err
- }
- return n.Step(ctx, msg)
- }
- func (n *node) step(ctx context.Context, m pb.Message) error {
- return n.stepWithWaitOption(ctx, m, false)
- }
- func (n *node) stepWait(ctx context.Context, m pb.Message) error {
- return n.stepWithWaitOption(ctx, m, true)
- }
- // Step advances the state machine using msgs. The ctx.Err() will be returned,
- // if any.
- func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error {
- if m.Type != pb.MsgProp {
- select {
- case n.recvc <- m:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- case <-n.done:
- return ErrStopped
- }
- }
- ch := n.propc
- pm := msgWithResult{m: m}
- if wait {
- pm.result = make(chan error, 1)
- }
- select {
- case ch <- pm:
- if !wait {
- return nil
- }
- case <-ctx.Done():
- return ctx.Err()
- case <-n.done:
- return ErrStopped
- }
- select {
- case err := <-pm.result:
- if err != nil {
- return err
- }
- case <-ctx.Done():
- return ctx.Err()
- case <-n.done:
- return ErrStopped
- }
- return nil
- }
- func (n *node) Ready() <-chan Ready { return n.readyc }
- func (n *node) Advance() {
- select {
- case n.advancec <- struct{}{}:
- case <-n.done:
- }
- }
- func (n *node) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState {
- var cs pb.ConfState
- select {
- case n.confc <- cc.AsV2():
- case <-n.done:
- }
- select {
- case cs = <-n.confstatec:
- case <-n.done:
- }
- return &cs
- }
- func (n *node) Status() Status {
- c := make(chan Status)
- select {
- case n.status <- c:
- return <-c
- case <-n.done:
- return Status{}
- }
- }
- func (n *node) ReportUnreachable(id uint64) {
- select {
- case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
- case <-n.done:
- }
- }
- func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
- rej := status == SnapshotFailure
- select {
- case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
- case <-n.done:
- }
- }
- func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
- select {
- // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
- case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
- case <-n.done:
- case <-ctx.Done():
- }
- }
- func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
- return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
- }
- func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
- rd := Ready{
- Entries: r.raftLog.unstableEntries(),
- CommittedEntries: r.raftLog.nextEnts(),
- Messages: r.msgs,
- }
- if softSt := r.softState(); !softSt.equal(prevSoftSt) {
- rd.SoftState = softSt
- }
- if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
- rd.HardState = hardSt
- }
- if r.raftLog.unstable.snapshot != nil {
- rd.Snapshot = *r.raftLog.unstable.snapshot
- }
- if len(r.readStates) != 0 {
- rd.ReadStates = r.readStates
- }
- rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries))
- return rd
- }
- // MustSync returns true if the hard state and count of Raft entries indicate
- // that a synchronous write to persistent storage is required.
- func MustSync(st, prevst pb.HardState, entsnum int) bool {
- // Persistent state on all servers:
- // (Updated on stable storage before responding to RPCs)
- // currentTerm
- // votedFor
- // log entries[]
- return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
- }
|