raft.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "sort"
  19. "sync"
  20. "sync/atomic"
  21. "time"
  22. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  23. "github.com/coreos/etcd/etcdserver/membership"
  24. "github.com/coreos/etcd/pkg/contention"
  25. "github.com/coreos/etcd/pkg/pbutil"
  26. "github.com/coreos/etcd/pkg/types"
  27. "github.com/coreos/etcd/raft"
  28. "github.com/coreos/etcd/raft/raftpb"
  29. "github.com/coreos/etcd/rafthttp"
  30. "github.com/coreos/etcd/wal"
  31. "github.com/coreos/etcd/wal/walpb"
  32. "github.com/coreos/pkg/capnslog"
  33. )
  34. const (
  35. // Number of entries for slow follower to catch-up after compacting
  36. // the raft storage entries.
  37. // We expect the follower has a millisecond level latency with the leader.
  38. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  39. // follower to catch up.
  40. numberOfCatchUpEntries = 5000
  41. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  42. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  43. maxSizePerMsg = 1 * 1024 * 1024
  44. // Never overflow the rafthttp buffer, which is 4096.
  45. // TODO: a better const?
  46. maxInflightMsgs = 4096 / 8
  47. )
  48. var (
  49. // protects raftStatus
  50. raftStatusMu sync.Mutex
  51. // indirection for expvar func interface
  52. // expvar panics when publishing duplicate name
  53. // expvar does not support remove a registered name
  54. // so only register a func that calls raftStatus
  55. // and change raftStatus as we need.
  56. raftStatus func() raft.Status
  57. )
  58. func init() {
  59. raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
  60. expvar.Publish("raft.status", expvar.Func(func() interface{} {
  61. raftStatusMu.Lock()
  62. defer raftStatusMu.Unlock()
  63. return raftStatus()
  64. }))
  65. }
  66. type RaftTimer interface {
  67. Index() uint64
  68. Term() uint64
  69. }
  70. // apply contains entries, snapshot to be applied. Once
  71. // an apply is consumed, the entries will be persisted to
  72. // to raft storage concurrently; the application must read
  73. // raftDone before assuming the raft messages are stable.
  74. type apply struct {
  75. entries []raftpb.Entry
  76. snapshot raftpb.Snapshot
  77. // notifyc synchronizes etcd server applies with the raft node
  78. notifyc chan struct{}
  79. }
  80. type raftNode struct {
  81. // Cache of the latest raft index and raft term the server has seen.
  82. // These three unit64 fields must be the first elements to keep 64-bit
  83. // alignment for atomic access to the fields.
  84. index uint64
  85. term uint64
  86. lead uint64
  87. raftNodeConfig
  88. // a chan to send/receive snapshot
  89. msgSnapC chan raftpb.Message
  90. // a chan to send out apply
  91. applyc chan apply
  92. // a chan to send out readState
  93. readStateC chan raft.ReadState
  94. // utility
  95. ticker *time.Ticker
  96. // contention detectors for raft heartbeat message
  97. td *contention.TimeoutDetector
  98. stopped chan struct{}
  99. done chan struct{}
  100. }
  101. type raftNodeConfig struct {
  102. // to check if msg receiver is removed from cluster
  103. isIDRemoved func(id uint64) bool
  104. raft.Node
  105. raftStorage *raft.MemoryStorage
  106. storage Storage
  107. heartbeat time.Duration // for logging
  108. // transport specifies the transport to send and receive msgs to members.
  109. // Sending messages MUST NOT block. It is okay to drop messages, since
  110. // clients should timeout and reissue their messages.
  111. // If transport is nil, server will panic.
  112. transport rafthttp.Transporter
  113. }
  114. func newRaftNode(cfg raftNodeConfig) *raftNode {
  115. r := &raftNode{
  116. raftNodeConfig: cfg,
  117. // set up contention detectors for raft heartbeat message.
  118. // expect to send a heartbeat within 2 heartbeat intervals.
  119. td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
  120. readStateC: make(chan raft.ReadState, 1),
  121. msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
  122. applyc: make(chan apply),
  123. stopped: make(chan struct{}),
  124. done: make(chan struct{}),
  125. }
  126. if r.heartbeat == 0 {
  127. r.ticker = &time.Ticker{}
  128. } else {
  129. r.ticker = time.NewTicker(r.heartbeat)
  130. }
  131. return r
  132. }
  133. // start prepares and starts raftNode in a new goroutine. It is no longer safe
  134. // to modify the fields after it has been started.
  135. func (r *raftNode) start(rh *raftReadyHandler) {
  136. internalTimeout := time.Second
  137. go func() {
  138. defer r.onStop()
  139. islead := false
  140. for {
  141. select {
  142. case <-r.ticker.C:
  143. r.Tick()
  144. case rd := <-r.Ready():
  145. if rd.SoftState != nil {
  146. newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead
  147. if newLeader {
  148. leaderChanges.Inc()
  149. }
  150. if rd.SoftState.Lead == raft.None {
  151. hasLeader.Set(0)
  152. } else {
  153. hasLeader.Set(1)
  154. }
  155. atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
  156. islead = rd.RaftState == raft.StateLeader
  157. rh.updateLeadership(newLeader)
  158. r.td.Reset()
  159. }
  160. if len(rd.ReadStates) != 0 {
  161. select {
  162. case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
  163. case <-time.After(internalTimeout):
  164. plog.Warningf("timed out sending read state")
  165. case <-r.stopped:
  166. return
  167. }
  168. }
  169. notifyc := make(chan struct{}, 1)
  170. ap := apply{
  171. entries: rd.CommittedEntries,
  172. snapshot: rd.Snapshot,
  173. notifyc: notifyc,
  174. }
  175. updateCommittedIndex(&ap, rh)
  176. select {
  177. case r.applyc <- ap:
  178. case <-r.stopped:
  179. return
  180. }
  181. // the leader can write to its disk in parallel with replicating to the followers and them
  182. // writing to their disks.
  183. // For more details, check raft thesis 10.2.1
  184. if islead {
  185. // gofail: var raftBeforeLeaderSend struct{}
  186. r.transport.Send(r.processMessages(rd.Messages))
  187. }
  188. // gofail: var raftBeforeSave struct{}
  189. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  190. plog.Fatalf("raft save state and entries error: %v", err)
  191. }
  192. if !raft.IsEmptyHardState(rd.HardState) {
  193. proposalsCommitted.Set(float64(rd.HardState.Commit))
  194. }
  195. // gofail: var raftAfterSave struct{}
  196. if !raft.IsEmptySnap(rd.Snapshot) {
  197. // gofail: var raftBeforeSaveSnap struct{}
  198. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  199. plog.Fatalf("raft save snapshot error: %v", err)
  200. }
  201. // etcdserver now claim the snapshot has been persisted onto the disk
  202. notifyc <- struct{}{}
  203. // gofail: var raftAfterSaveSnap struct{}
  204. r.raftStorage.ApplySnapshot(rd.Snapshot)
  205. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  206. // gofail: var raftAfterApplySnap struct{}
  207. }
  208. r.raftStorage.Append(rd.Entries)
  209. if !islead {
  210. // finish processing incoming messages before we signal raftdone chan
  211. msgs := r.processMessages(rd.Messages)
  212. // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
  213. notifyc <- struct{}{}
  214. // Candidate or follower needs to wait for all pending configuration
  215. // changes to be applied before sending messages.
  216. // Otherwise we might incorrectly count votes (e.g. votes from removed members).
  217. // Also slow machine's follower raft-layer could proceed to become the leader
  218. // on its own single-node cluster, before apply-layer applies the config change.
  219. // We simply wait for ALL pending entries to be applied for now.
  220. // We might improve this later on if it causes unnecessary long blocking issues.
  221. waitApply := false
  222. for _, ent := range rd.CommittedEntries {
  223. if ent.Type == raftpb.EntryConfChange {
  224. waitApply = true
  225. break
  226. }
  227. }
  228. if waitApply {
  229. // blocks until 'applyAll' calls 'applyWait.Trigger'
  230. // to be in sync with scheduled config-change job
  231. // (assume notifyc has cap of 1)
  232. select {
  233. case notifyc <- struct{}{}:
  234. case <-r.stopped:
  235. return
  236. }
  237. }
  238. // gofail: var raftBeforeFollowerSend struct{}
  239. r.transport.Send(msgs)
  240. } else {
  241. // leader already processed 'MsgSnap' and signaled
  242. notifyc <- struct{}{}
  243. }
  244. r.Advance()
  245. case <-r.stopped:
  246. return
  247. }
  248. }
  249. }()
  250. }
  251. func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
  252. var ci uint64
  253. if len(ap.entries) != 0 {
  254. ci = ap.entries[len(ap.entries)-1].Index
  255. }
  256. if ap.snapshot.Metadata.Index > ci {
  257. ci = ap.snapshot.Metadata.Index
  258. }
  259. if ci != 0 {
  260. rh.updateCommittedIndex(ci)
  261. }
  262. }
  263. func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
  264. sentAppResp := false
  265. for i := len(ms) - 1; i >= 0; i-- {
  266. if r.isIDRemoved(ms[i].To) {
  267. ms[i].To = 0
  268. }
  269. if ms[i].Type == raftpb.MsgAppResp {
  270. if sentAppResp {
  271. ms[i].To = 0
  272. } else {
  273. sentAppResp = true
  274. }
  275. }
  276. if ms[i].Type == raftpb.MsgSnap {
  277. // There are two separate data store: the store for v2, and the KV for v3.
  278. // The msgSnap only contains the most recent snapshot of store without KV.
  279. // So we need to redirect the msgSnap to etcd server main loop for merging in the
  280. // current store snapshot and KV snapshot.
  281. select {
  282. case r.msgSnapC <- ms[i]:
  283. default:
  284. // drop msgSnap if the inflight chan if full.
  285. }
  286. ms[i].To = 0
  287. }
  288. if ms[i].Type == raftpb.MsgHeartbeat {
  289. ok, exceed := r.td.Observe(ms[i].To)
  290. if !ok {
  291. // TODO: limit request rate.
  292. plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
  293. plog.Warningf("server is likely overloaded")
  294. }
  295. }
  296. }
  297. return ms
  298. }
  299. func (r *raftNode) apply() chan apply {
  300. return r.applyc
  301. }
  302. func (r *raftNode) stop() {
  303. r.stopped <- struct{}{}
  304. <-r.done
  305. }
  306. func (r *raftNode) onStop() {
  307. r.Stop()
  308. r.ticker.Stop()
  309. r.transport.Stop()
  310. if err := r.storage.Close(); err != nil {
  311. plog.Panicf("raft close storage error: %v", err)
  312. }
  313. close(r.done)
  314. }
  315. // for testing
  316. func (r *raftNode) pauseSending() {
  317. p := r.transport.(rafthttp.Pausable)
  318. p.Pause()
  319. }
  320. func (r *raftNode) resumeSending() {
  321. p := r.transport.(rafthttp.Pausable)
  322. p.Resume()
  323. }
  324. // advanceTicksForElection advances ticks to the node for fast election.
  325. // This reduces the time to wait for first leader election if bootstrapping the whole
  326. // cluster, while leaving at least 1 heartbeat for possible existing leader
  327. // to contact it.
  328. func advanceTicksForElection(n raft.Node, electionTicks int) {
  329. for i := 0; i < electionTicks-1; i++ {
  330. n.Tick()
  331. }
  332. }
  333. func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  334. var err error
  335. member := cl.MemberByName(cfg.Name)
  336. metadata := pbutil.MustMarshal(
  337. &pb.Metadata{
  338. NodeID: uint64(member.ID),
  339. ClusterID: uint64(cl.ID()),
  340. },
  341. )
  342. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  343. plog.Fatalf("create wal error: %v", err)
  344. }
  345. peers := make([]raft.Peer, len(ids))
  346. for i, id := range ids {
  347. ctx, err := json.Marshal((*cl).Member(id))
  348. if err != nil {
  349. plog.Panicf("marshal member should never fail: %v", err)
  350. }
  351. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  352. }
  353. id = member.ID
  354. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  355. s = raft.NewMemoryStorage()
  356. c := &raft.Config{
  357. ID: uint64(id),
  358. ElectionTick: cfg.ElectionTicks,
  359. HeartbeatTick: 1,
  360. Storage: s,
  361. MaxSizePerMsg: maxSizePerMsg,
  362. MaxInflightMsgs: maxInflightMsgs,
  363. CheckQuorum: true,
  364. }
  365. n = raft.StartNode(c, peers)
  366. raftStatusMu.Lock()
  367. raftStatus = n.Status
  368. raftStatusMu.Unlock()
  369. advanceTicksForElection(n, c.ElectionTick)
  370. return id, n, s, w
  371. }
  372. func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  373. var walsnap walpb.Snapshot
  374. if snapshot != nil {
  375. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  376. }
  377. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  378. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  379. cl := membership.NewCluster("")
  380. cl.SetID(cid)
  381. s := raft.NewMemoryStorage()
  382. if snapshot != nil {
  383. s.ApplySnapshot(*snapshot)
  384. }
  385. s.SetHardState(st)
  386. s.Append(ents)
  387. c := &raft.Config{
  388. ID: uint64(id),
  389. ElectionTick: cfg.ElectionTicks,
  390. HeartbeatTick: 1,
  391. Storage: s,
  392. MaxSizePerMsg: maxSizePerMsg,
  393. MaxInflightMsgs: maxInflightMsgs,
  394. CheckQuorum: true,
  395. }
  396. n := raft.RestartNode(c)
  397. raftStatusMu.Lock()
  398. raftStatus = n.Status
  399. raftStatusMu.Unlock()
  400. advanceTicksForElection(n, c.ElectionTick)
  401. return id, cl, n, s, w
  402. }
  403. func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  404. var walsnap walpb.Snapshot
  405. if snapshot != nil {
  406. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  407. }
  408. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  409. // discard the previously uncommitted entries
  410. for i, ent := range ents {
  411. if ent.Index > st.Commit {
  412. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  413. ents = ents[:i]
  414. break
  415. }
  416. }
  417. // force append the configuration change entries
  418. toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
  419. ents = append(ents, toAppEnts...)
  420. // force commit newly appended entries
  421. err := w.Save(raftpb.HardState{}, toAppEnts)
  422. if err != nil {
  423. plog.Fatalf("%v", err)
  424. }
  425. if len(ents) != 0 {
  426. st.Commit = ents[len(ents)-1].Index
  427. }
  428. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  429. cl := membership.NewCluster("")
  430. cl.SetID(cid)
  431. s := raft.NewMemoryStorage()
  432. if snapshot != nil {
  433. s.ApplySnapshot(*snapshot)
  434. }
  435. s.SetHardState(st)
  436. s.Append(ents)
  437. c := &raft.Config{
  438. ID: uint64(id),
  439. ElectionTick: cfg.ElectionTicks,
  440. HeartbeatTick: 1,
  441. Storage: s,
  442. MaxSizePerMsg: maxSizePerMsg,
  443. MaxInflightMsgs: maxInflightMsgs,
  444. }
  445. n := raft.RestartNode(c)
  446. raftStatus = n.Status
  447. return id, cl, n, s, w
  448. }
  449. // getIDs returns an ordered set of IDs included in the given snapshot and
  450. // the entries. The given snapshot/entries can contain two kinds of
  451. // ID-related entry:
  452. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  453. // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
  454. func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  455. ids := make(map[uint64]bool)
  456. if snap != nil {
  457. for _, id := range snap.Metadata.ConfState.Nodes {
  458. ids[id] = true
  459. }
  460. }
  461. for _, e := range ents {
  462. if e.Type != raftpb.EntryConfChange {
  463. continue
  464. }
  465. var cc raftpb.ConfChange
  466. pbutil.MustUnmarshal(&cc, e.Data)
  467. switch cc.Type {
  468. case raftpb.ConfChangeAddNode:
  469. ids[cc.NodeID] = true
  470. case raftpb.ConfChangeRemoveNode:
  471. delete(ids, cc.NodeID)
  472. case raftpb.ConfChangeUpdateNode:
  473. // do nothing
  474. default:
  475. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  476. }
  477. }
  478. sids := make(types.Uint64Slice, 0, len(ids))
  479. for id := range ids {
  480. sids = append(sids, id)
  481. }
  482. sort.Sort(sids)
  483. return []uint64(sids)
  484. }
  485. // createConfigChangeEnts creates a series of Raft entries (i.e.
  486. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  487. // `self` is _not_ removed, even if present in the set.
  488. // If `self` is not inside the given ids, it creates a Raft entry to add a
  489. // default member with the given `self`.
  490. func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  491. ents := make([]raftpb.Entry, 0)
  492. next := index + 1
  493. found := false
  494. for _, id := range ids {
  495. if id == self {
  496. found = true
  497. continue
  498. }
  499. cc := &raftpb.ConfChange{
  500. Type: raftpb.ConfChangeRemoveNode,
  501. NodeID: id,
  502. }
  503. e := raftpb.Entry{
  504. Type: raftpb.EntryConfChange,
  505. Data: pbutil.MustMarshal(cc),
  506. Term: term,
  507. Index: next,
  508. }
  509. ents = append(ents, e)
  510. next++
  511. }
  512. if !found {
  513. m := membership.Member{
  514. ID: types.ID(self),
  515. RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
  516. }
  517. ctx, err := json.Marshal(m)
  518. if err != nil {
  519. plog.Panicf("marshal member should never fail: %v", err)
  520. }
  521. cc := &raftpb.ConfChange{
  522. Type: raftpb.ConfChangeAddNode,
  523. NodeID: self,
  524. Context: ctx,
  525. }
  526. e := raftpb.Entry{
  527. Type: raftpb.EntryConfChange,
  528. Data: pbutil.MustMarshal(cc),
  529. Term: term,
  530. Index: next,
  531. }
  532. ents = append(ents, e)
  533. }
  534. return ents
  535. }