raft.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "sort"
  19. "sync"
  20. "sync/atomic"
  21. "time"
  22. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  23. "github.com/coreos/etcd/etcdserver/membership"
  24. "github.com/coreos/etcd/pkg/contention"
  25. "github.com/coreos/etcd/pkg/pbutil"
  26. "github.com/coreos/etcd/pkg/types"
  27. "github.com/coreos/etcd/raft"
  28. "github.com/coreos/etcd/raft/raftpb"
  29. "github.com/coreos/etcd/rafthttp"
  30. "github.com/coreos/etcd/wal"
  31. "github.com/coreos/etcd/wal/walpb"
  32. "github.com/coreos/pkg/capnslog"
  33. )
  34. const (
  35. // Number of entries for slow follower to catch-up after compacting
  36. // the raft storage entries.
  37. // We expect the follower has a millisecond level latency with the leader.
  38. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  39. // follower to catch up.
  40. numberOfCatchUpEntries = 5000
  41. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  42. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  43. maxSizePerMsg = 1 * 1024 * 1024
  44. // Never overflow the rafthttp buffer, which is 4096.
  45. // TODO: a better const?
  46. maxInflightMsgs = 4096 / 8
  47. )
  48. var (
  49. // protects raftStatus
  50. raftStatusMu sync.Mutex
  51. // indirection for expvar func interface
  52. // expvar panics when publishing duplicate name
  53. // expvar does not support remove a registered name
  54. // so only register a func that calls raftStatus
  55. // and change raftStatus as we need.
  56. raftStatus func() raft.Status
  57. )
  58. func init() {
  59. raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
  60. expvar.Publish("raft.status", expvar.Func(func() interface{} {
  61. raftStatusMu.Lock()
  62. defer raftStatusMu.Unlock()
  63. return raftStatus()
  64. }))
  65. }
  66. type RaftTimer interface {
  67. Index() uint64
  68. AppliedIndex() uint64
  69. Term() uint64
  70. }
  71. // apply contains entries, snapshot to be applied. Once
  72. // an apply is consumed, the entries will be persisted to
  73. // to raft storage concurrently; the application must read
  74. // raftDone before assuming the raft messages are stable.
  75. type apply struct {
  76. entries []raftpb.Entry
  77. snapshot raftpb.Snapshot
  78. // notifyc synchronizes etcd server applies with the raft node
  79. notifyc chan struct{}
  80. }
  81. type raftNode struct {
  82. // Cache of the latest raft index and raft term the server has seen.
  83. // These three unit64 fields must be the first elements to keep 64-bit
  84. // alignment for atomic access to the fields.
  85. index uint64
  86. appliedindex uint64
  87. term uint64
  88. lead uint64
  89. raftNodeConfig
  90. // a chan to send/receive snapshot
  91. msgSnapC chan raftpb.Message
  92. // a chan to send out apply
  93. applyc chan apply
  94. // a chan to send out readState
  95. readStateC chan raft.ReadState
  96. // utility
  97. ticker *time.Ticker
  98. // contention detectors for raft heartbeat message
  99. td *contention.TimeoutDetector
  100. stopped chan struct{}
  101. done chan struct{}
  102. }
  103. type raftNodeConfig struct {
  104. // to check if msg receiver is removed from cluster
  105. isIDRemoved func(id uint64) bool
  106. raft.Node
  107. raftStorage *raft.MemoryStorage
  108. storage Storage
  109. heartbeat time.Duration // for logging
  110. // transport specifies the transport to send and receive msgs to members.
  111. // Sending messages MUST NOT block. It is okay to drop messages, since
  112. // clients should timeout and reissue their messages.
  113. // If transport is nil, server will panic.
  114. transport rafthttp.Transporter
  115. }
  116. func newRaftNode(cfg raftNodeConfig) *raftNode {
  117. r := &raftNode{
  118. raftNodeConfig: cfg,
  119. // set up contention detectors for raft heartbeat message.
  120. // expect to send a heartbeat within 2 heartbeat intervals.
  121. td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
  122. readStateC: make(chan raft.ReadState, 1),
  123. msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
  124. applyc: make(chan apply),
  125. stopped: make(chan struct{}),
  126. done: make(chan struct{}),
  127. }
  128. if r.heartbeat == 0 {
  129. r.ticker = &time.Ticker{}
  130. } else {
  131. r.ticker = time.NewTicker(r.heartbeat)
  132. }
  133. return r
  134. }
  135. // start prepares and starts raftNode in a new goroutine. It is no longer safe
  136. // to modify the fields after it has been started.
  137. func (r *raftNode) start(rh *raftReadyHandler) {
  138. internalTimeout := time.Second
  139. go func() {
  140. defer r.onStop()
  141. islead := false
  142. for {
  143. select {
  144. case <-r.ticker.C:
  145. r.Tick()
  146. case rd := <-r.Ready():
  147. if rd.SoftState != nil {
  148. newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead
  149. if newLeader {
  150. leaderChanges.Inc()
  151. }
  152. if rd.SoftState.Lead == raft.None {
  153. hasLeader.Set(0)
  154. } else {
  155. hasLeader.Set(1)
  156. }
  157. atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
  158. islead = rd.RaftState == raft.StateLeader
  159. rh.updateLeadership(newLeader)
  160. r.td.Reset()
  161. }
  162. if len(rd.ReadStates) != 0 {
  163. select {
  164. case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
  165. case <-time.After(internalTimeout):
  166. plog.Warningf("timed out sending read state")
  167. case <-r.stopped:
  168. return
  169. }
  170. }
  171. notifyc := make(chan struct{}, 1)
  172. ap := apply{
  173. entries: rd.CommittedEntries,
  174. snapshot: rd.Snapshot,
  175. notifyc: notifyc,
  176. }
  177. updateCommittedIndex(&ap, rh)
  178. select {
  179. case r.applyc <- ap:
  180. case <-r.stopped:
  181. return
  182. }
  183. // the leader can write to its disk in parallel with replicating to the followers and them
  184. // writing to their disks.
  185. // For more details, check raft thesis 10.2.1
  186. if islead {
  187. // gofail: var raftBeforeLeaderSend struct{}
  188. r.transport.Send(r.processMessages(rd.Messages))
  189. }
  190. // gofail: var raftBeforeSave struct{}
  191. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  192. plog.Fatalf("raft save state and entries error: %v", err)
  193. }
  194. if !raft.IsEmptyHardState(rd.HardState) {
  195. proposalsCommitted.Set(float64(rd.HardState.Commit))
  196. }
  197. // gofail: var raftAfterSave struct{}
  198. if !raft.IsEmptySnap(rd.Snapshot) {
  199. // gofail: var raftBeforeSaveSnap struct{}
  200. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  201. plog.Fatalf("raft save snapshot error: %v", err)
  202. }
  203. // etcdserver now claim the snapshot has been persisted onto the disk
  204. notifyc <- struct{}{}
  205. // gofail: var raftAfterSaveSnap struct{}
  206. r.raftStorage.ApplySnapshot(rd.Snapshot)
  207. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  208. // gofail: var raftAfterApplySnap struct{}
  209. }
  210. r.raftStorage.Append(rd.Entries)
  211. if !islead {
  212. // finish processing incoming messages before we signal raftdone chan
  213. msgs := r.processMessages(rd.Messages)
  214. // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
  215. notifyc <- struct{}{}
  216. // Candidate or follower needs to wait for all pending configuration
  217. // changes to be applied before sending messages.
  218. // Otherwise we might incorrectly count votes (e.g. votes from removed members).
  219. // Also slow machine's follower raft-layer could proceed to become the leader
  220. // on its own single-node cluster, before apply-layer applies the config change.
  221. // We simply wait for ALL pending entries to be applied for now.
  222. // We might improve this later on if it causes unnecessary long blocking issues.
  223. waitApply := false
  224. for _, ent := range rd.CommittedEntries {
  225. if ent.Type == raftpb.EntryConfChange {
  226. waitApply = true
  227. break
  228. }
  229. }
  230. if waitApply {
  231. // blocks until 'applyAll' calls 'applyWait.Trigger'
  232. // to be in sync with scheduled config-change job
  233. // (assume notifyc has cap of 1)
  234. select {
  235. case notifyc <- struct{}{}:
  236. case <-r.stopped:
  237. return
  238. }
  239. }
  240. // gofail: var raftBeforeFollowerSend struct{}
  241. r.transport.Send(msgs)
  242. } else {
  243. // leader already processed 'MsgSnap' and signaled
  244. notifyc <- struct{}{}
  245. }
  246. r.Advance()
  247. case <-r.stopped:
  248. return
  249. }
  250. }
  251. }()
  252. }
  253. func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
  254. var ci uint64
  255. if len(ap.entries) != 0 {
  256. ci = ap.entries[len(ap.entries)-1].Index
  257. }
  258. if ap.snapshot.Metadata.Index > ci {
  259. ci = ap.snapshot.Metadata.Index
  260. }
  261. if ci != 0 {
  262. rh.updateCommittedIndex(ci)
  263. }
  264. }
  265. func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
  266. sentAppResp := false
  267. for i := len(ms) - 1; i >= 0; i-- {
  268. if r.isIDRemoved(ms[i].To) {
  269. ms[i].To = 0
  270. }
  271. if ms[i].Type == raftpb.MsgAppResp {
  272. if sentAppResp {
  273. ms[i].To = 0
  274. } else {
  275. sentAppResp = true
  276. }
  277. }
  278. if ms[i].Type == raftpb.MsgSnap {
  279. // There are two separate data store: the store for v2, and the KV for v3.
  280. // The msgSnap only contains the most recent snapshot of store without KV.
  281. // So we need to redirect the msgSnap to etcd server main loop for merging in the
  282. // current store snapshot and KV snapshot.
  283. select {
  284. case r.msgSnapC <- ms[i]:
  285. default:
  286. // drop msgSnap if the inflight chan if full.
  287. }
  288. ms[i].To = 0
  289. }
  290. if ms[i].Type == raftpb.MsgHeartbeat {
  291. ok, exceed := r.td.Observe(ms[i].To)
  292. if !ok {
  293. // TODO: limit request rate.
  294. plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
  295. plog.Warningf("server is likely overloaded")
  296. }
  297. }
  298. }
  299. return ms
  300. }
  301. func (r *raftNode) apply() chan apply {
  302. return r.applyc
  303. }
  304. func (r *raftNode) stop() {
  305. r.stopped <- struct{}{}
  306. <-r.done
  307. }
  308. func (r *raftNode) onStop() {
  309. r.Stop()
  310. r.ticker.Stop()
  311. r.transport.Stop()
  312. if err := r.storage.Close(); err != nil {
  313. plog.Panicf("raft close storage error: %v", err)
  314. }
  315. close(r.done)
  316. }
  317. // for testing
  318. func (r *raftNode) pauseSending() {
  319. p := r.transport.(rafthttp.Pausable)
  320. p.Pause()
  321. }
  322. func (r *raftNode) resumeSending() {
  323. p := r.transport.(rafthttp.Pausable)
  324. p.Resume()
  325. }
  326. // advanceTicksForElection advances ticks to the node for fast election.
  327. // This reduces the time to wait for first leader election if bootstrapping the whole
  328. // cluster, while leaving at least 1 heartbeat for possible existing leader
  329. // to contact it.
  330. func advanceTicksForElection(n raft.Node, electionTicks int) {
  331. for i := 0; i < electionTicks-1; i++ {
  332. n.Tick()
  333. }
  334. }
  335. func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  336. var err error
  337. member := cl.MemberByName(cfg.Name)
  338. metadata := pbutil.MustMarshal(
  339. &pb.Metadata{
  340. NodeID: uint64(member.ID),
  341. ClusterID: uint64(cl.ID()),
  342. },
  343. )
  344. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  345. plog.Fatalf("create wal error: %v", err)
  346. }
  347. peers := make([]raft.Peer, len(ids))
  348. for i, id := range ids {
  349. ctx, err := json.Marshal((*cl).Member(id))
  350. if err != nil {
  351. plog.Panicf("marshal member should never fail: %v", err)
  352. }
  353. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  354. }
  355. id = member.ID
  356. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  357. s = raft.NewMemoryStorage()
  358. c := &raft.Config{
  359. ID: uint64(id),
  360. ElectionTick: cfg.ElectionTicks,
  361. HeartbeatTick: 1,
  362. Storage: s,
  363. MaxSizePerMsg: maxSizePerMsg,
  364. MaxInflightMsgs: maxInflightMsgs,
  365. CheckQuorum: true,
  366. }
  367. n = raft.StartNode(c, peers)
  368. raftStatusMu.Lock()
  369. raftStatus = n.Status
  370. raftStatusMu.Unlock()
  371. advanceTicksForElection(n, c.ElectionTick)
  372. return id, n, s, w
  373. }
  374. func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  375. var walsnap walpb.Snapshot
  376. if snapshot != nil {
  377. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  378. }
  379. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  380. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  381. cl := membership.NewCluster("")
  382. cl.SetID(cid)
  383. s := raft.NewMemoryStorage()
  384. if snapshot != nil {
  385. s.ApplySnapshot(*snapshot)
  386. }
  387. s.SetHardState(st)
  388. s.Append(ents)
  389. c := &raft.Config{
  390. ID: uint64(id),
  391. ElectionTick: cfg.ElectionTicks,
  392. HeartbeatTick: 1,
  393. Storage: s,
  394. MaxSizePerMsg: maxSizePerMsg,
  395. MaxInflightMsgs: maxInflightMsgs,
  396. CheckQuorum: true,
  397. }
  398. n := raft.RestartNode(c)
  399. raftStatusMu.Lock()
  400. raftStatus = n.Status
  401. raftStatusMu.Unlock()
  402. advanceTicksForElection(n, c.ElectionTick)
  403. return id, cl, n, s, w
  404. }
  405. func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  406. var walsnap walpb.Snapshot
  407. if snapshot != nil {
  408. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  409. }
  410. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  411. // discard the previously uncommitted entries
  412. for i, ent := range ents {
  413. if ent.Index > st.Commit {
  414. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  415. ents = ents[:i]
  416. break
  417. }
  418. }
  419. // force append the configuration change entries
  420. toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
  421. ents = append(ents, toAppEnts...)
  422. // force commit newly appended entries
  423. err := w.Save(raftpb.HardState{}, toAppEnts)
  424. if err != nil {
  425. plog.Fatalf("%v", err)
  426. }
  427. if len(ents) != 0 {
  428. st.Commit = ents[len(ents)-1].Index
  429. }
  430. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  431. cl := membership.NewCluster("")
  432. cl.SetID(cid)
  433. s := raft.NewMemoryStorage()
  434. if snapshot != nil {
  435. s.ApplySnapshot(*snapshot)
  436. }
  437. s.SetHardState(st)
  438. s.Append(ents)
  439. c := &raft.Config{
  440. ID: uint64(id),
  441. ElectionTick: cfg.ElectionTicks,
  442. HeartbeatTick: 1,
  443. Storage: s,
  444. MaxSizePerMsg: maxSizePerMsg,
  445. MaxInflightMsgs: maxInflightMsgs,
  446. }
  447. n := raft.RestartNode(c)
  448. raftStatus = n.Status
  449. return id, cl, n, s, w
  450. }
  451. // getIDs returns an ordered set of IDs included in the given snapshot and
  452. // the entries. The given snapshot/entries can contain two kinds of
  453. // ID-related entry:
  454. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  455. // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
  456. func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  457. ids := make(map[uint64]bool)
  458. if snap != nil {
  459. for _, id := range snap.Metadata.ConfState.Nodes {
  460. ids[id] = true
  461. }
  462. }
  463. for _, e := range ents {
  464. if e.Type != raftpb.EntryConfChange {
  465. continue
  466. }
  467. var cc raftpb.ConfChange
  468. pbutil.MustUnmarshal(&cc, e.Data)
  469. switch cc.Type {
  470. case raftpb.ConfChangeAddNode:
  471. ids[cc.NodeID] = true
  472. case raftpb.ConfChangeRemoveNode:
  473. delete(ids, cc.NodeID)
  474. case raftpb.ConfChangeUpdateNode:
  475. // do nothing
  476. default:
  477. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  478. }
  479. }
  480. sids := make(types.Uint64Slice, 0, len(ids))
  481. for id := range ids {
  482. sids = append(sids, id)
  483. }
  484. sort.Sort(sids)
  485. return []uint64(sids)
  486. }
  487. // createConfigChangeEnts creates a series of Raft entries (i.e.
  488. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  489. // `self` is _not_ removed, even if present in the set.
  490. // If `self` is not inside the given ids, it creates a Raft entry to add a
  491. // default member with the given `self`.
  492. func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  493. ents := make([]raftpb.Entry, 0)
  494. next := index + 1
  495. found := false
  496. for _, id := range ids {
  497. if id == self {
  498. found = true
  499. continue
  500. }
  501. cc := &raftpb.ConfChange{
  502. Type: raftpb.ConfChangeRemoveNode,
  503. NodeID: id,
  504. }
  505. e := raftpb.Entry{
  506. Type: raftpb.EntryConfChange,
  507. Data: pbutil.MustMarshal(cc),
  508. Term: term,
  509. Index: next,
  510. }
  511. ents = append(ents, e)
  512. next++
  513. }
  514. if !found {
  515. m := membership.Member{
  516. ID: types.ID(self),
  517. RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
  518. }
  519. ctx, err := json.Marshal(m)
  520. if err != nil {
  521. plog.Panicf("marshal member should never fail: %v", err)
  522. }
  523. cc := &raftpb.ConfChange{
  524. Type: raftpb.ConfChangeAddNode,
  525. NodeID: self,
  526. Context: ctx,
  527. }
  528. e := raftpb.Entry{
  529. Type: raftpb.EntryConfChange,
  530. Data: pbutil.MustMarshal(cc),
  531. Term: term,
  532. Index: next,
  533. }
  534. ents = append(ents, e)
  535. }
  536. return ents
  537. }