raft.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "sort"
  19. "sync"
  20. "sync/atomic"
  21. "time"
  22. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  23. "github.com/coreos/etcd/etcdserver/membership"
  24. "github.com/coreos/etcd/pkg/contention"
  25. "github.com/coreos/etcd/pkg/pbutil"
  26. "github.com/coreos/etcd/pkg/types"
  27. "github.com/coreos/etcd/raft"
  28. "github.com/coreos/etcd/raft/raftpb"
  29. "github.com/coreos/etcd/rafthttp"
  30. "github.com/coreos/etcd/wal"
  31. "github.com/coreos/etcd/wal/walpb"
  32. "github.com/coreos/pkg/capnslog"
  33. )
  34. const (
  35. // Number of entries for slow follower to catch-up after compacting
  36. // the raft storage entries.
  37. // We expect the follower has a millisecond level latency with the leader.
  38. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  39. // follower to catch up.
  40. numberOfCatchUpEntries = 5000
  41. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  42. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  43. maxSizePerMsg = 1 * 1024 * 1024
  44. // Never overflow the rafthttp buffer, which is 4096.
  45. // TODO: a better const?
  46. maxInflightMsgs = 4096 / 8
  47. )
  48. var (
  49. // protects raftStatus
  50. raftStatusMu sync.Mutex
  51. // indirection for expvar func interface
  52. // expvar panics when publishing duplicate name
  53. // expvar does not support remove a registered name
  54. // so only register a func that calls raftStatus
  55. // and change raftStatus as we need.
  56. raftStatus func() raft.Status
  57. )
  58. func init() {
  59. raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
  60. expvar.Publish("raft.status", expvar.Func(func() interface{} {
  61. raftStatusMu.Lock()
  62. defer raftStatusMu.Unlock()
  63. return raftStatus()
  64. }))
  65. }
  66. type RaftTimer interface {
  67. Index() uint64
  68. Term() uint64
  69. }
  70. // apply contains entries, snapshot to be applied. Once
  71. // an apply is consumed, the entries will be persisted to
  72. // to raft storage concurrently; the application must read
  73. // raftDone before assuming the raft messages are stable.
  74. type apply struct {
  75. entries []raftpb.Entry
  76. snapshot raftpb.Snapshot
  77. raftDone <-chan struct{} // rx {} after raft has persisted messages
  78. }
  79. type raftNode struct {
  80. // Cache of the latest raft index and raft term the server has seen.
  81. // These three unit64 fields must be the first elements to keep 64-bit
  82. // alignment for atomic access to the fields.
  83. index uint64
  84. term uint64
  85. lead uint64
  86. raftNodeConfig
  87. // a chan to send/receive snapshot
  88. msgSnapC chan raftpb.Message
  89. // a chan to send out apply
  90. applyc chan apply
  91. // a chan to send out readState
  92. readStateC chan raft.ReadState
  93. // utility
  94. ticker *time.Ticker
  95. // contention detectors for raft heartbeat message
  96. td *contention.TimeoutDetector
  97. stopped chan struct{}
  98. done chan struct{}
  99. }
  100. type raftNodeConfig struct {
  101. // to check if msg receiver is removed from cluster
  102. isIDRemoved func(id uint64) bool
  103. raft.Node
  104. raftStorage *raft.MemoryStorage
  105. storage Storage
  106. heartbeat time.Duration // for logging
  107. // transport specifies the transport to send and receive msgs to members.
  108. // Sending messages MUST NOT block. It is okay to drop messages, since
  109. // clients should timeout and reissue their messages.
  110. // If transport is nil, server will panic.
  111. transport rafthttp.Transporter
  112. }
  113. func newRaftNode(cfg raftNodeConfig) *raftNode {
  114. r := &raftNode{
  115. raftNodeConfig: cfg,
  116. // set up contention detectors for raft heartbeat message.
  117. // expect to send a heartbeat within 2 heartbeat intervals.
  118. td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
  119. readStateC: make(chan raft.ReadState, 1),
  120. msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
  121. applyc: make(chan apply),
  122. stopped: make(chan struct{}),
  123. done: make(chan struct{}),
  124. }
  125. if r.heartbeat == 0 {
  126. r.ticker = &time.Ticker{}
  127. } else {
  128. r.ticker = time.NewTicker(r.heartbeat)
  129. }
  130. return r
  131. }
  132. // start prepares and starts raftNode in a new goroutine. It is no longer safe
  133. // to modify the fields after it has been started.
  134. func (r *raftNode) start(rh *raftReadyHandler) {
  135. internalTimeout := time.Second
  136. go func() {
  137. defer r.onStop()
  138. islead := false
  139. for {
  140. select {
  141. case <-r.ticker.C:
  142. r.Tick()
  143. case rd := <-r.Ready():
  144. if rd.SoftState != nil {
  145. newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead
  146. if newLeader {
  147. leaderChanges.Inc()
  148. }
  149. if rd.SoftState.Lead == raft.None {
  150. hasLeader.Set(0)
  151. } else {
  152. hasLeader.Set(1)
  153. }
  154. atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
  155. islead = rd.RaftState == raft.StateLeader
  156. rh.updateLeadership(newLeader)
  157. r.td.Reset()
  158. }
  159. if len(rd.ReadStates) != 0 {
  160. select {
  161. case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
  162. case <-time.After(internalTimeout):
  163. plog.Warningf("timed out sending read state")
  164. case <-r.stopped:
  165. return
  166. }
  167. }
  168. raftDone := make(chan struct{}, 1)
  169. ap := apply{
  170. entries: rd.CommittedEntries,
  171. snapshot: rd.Snapshot,
  172. raftDone: raftDone,
  173. }
  174. updateCommittedIndex(&ap, rh)
  175. select {
  176. case r.applyc <- ap:
  177. case <-r.stopped:
  178. return
  179. }
  180. // the leader can write to its disk in parallel with replicating to the followers and them
  181. // writing to their disks.
  182. // For more details, check raft thesis 10.2.1
  183. if islead {
  184. // gofail: var raftBeforeLeaderSend struct{}
  185. r.transport.Send(r.processMessages(rd.Messages))
  186. }
  187. // gofail: var raftBeforeSave struct{}
  188. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  189. plog.Fatalf("raft save state and entries error: %v", err)
  190. }
  191. if !raft.IsEmptyHardState(rd.HardState) {
  192. proposalsCommitted.Set(float64(rd.HardState.Commit))
  193. }
  194. // gofail: var raftAfterSave struct{}
  195. if !raft.IsEmptySnap(rd.Snapshot) {
  196. // gofail: var raftBeforeSaveSnap struct{}
  197. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  198. plog.Fatalf("raft save snapshot error: %v", err)
  199. }
  200. // gofail: var raftAfterSaveSnap struct{}
  201. r.raftStorage.ApplySnapshot(rd.Snapshot)
  202. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  203. // gofail: var raftAfterApplySnap struct{}
  204. }
  205. r.raftStorage.Append(rd.Entries)
  206. if !islead {
  207. // finish processing incoming messages before we signal raftdone chan
  208. msgs := r.processMessages(rd.Messages)
  209. // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
  210. raftDone <- struct{}{}
  211. // Candidate or follower needs to wait for all pending configuration
  212. // changes to be applied before sending messages.
  213. // Otherwise we might incorrectly count votes (e.g. votes from removed members).
  214. // Also slow machine's follower raft-layer could proceed to become the leader
  215. // on its own single-node cluster, before apply-layer applies the config change.
  216. // We simply wait for ALL pending entries to be applied for now.
  217. // We might improve this later on if it causes unnecessary long blocking issues.
  218. waitApply := false
  219. for _, ent := range rd.CommittedEntries {
  220. if ent.Type == raftpb.EntryConfChange {
  221. waitApply = true
  222. break
  223. }
  224. }
  225. if waitApply {
  226. // blocks until 'applyAll' calls 'applyWait.Trigger'
  227. // to be in sync with scheduled config-change job
  228. // (assume raftDone has cap of 1)
  229. raftDone <- struct{}{}
  230. }
  231. // gofail: var raftBeforeFollowerSend struct{}
  232. r.transport.Send(msgs)
  233. } else {
  234. // leader already processed 'MsgSnap' and signaled
  235. raftDone <- struct{}{}
  236. }
  237. r.Advance()
  238. case <-r.stopped:
  239. return
  240. }
  241. }
  242. }()
  243. }
  244. func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
  245. var ci uint64
  246. if len(ap.entries) != 0 {
  247. ci = ap.entries[len(ap.entries)-1].Index
  248. }
  249. if ap.snapshot.Metadata.Index > ci {
  250. ci = ap.snapshot.Metadata.Index
  251. }
  252. if ci != 0 {
  253. rh.updateCommittedIndex(ci)
  254. }
  255. }
  256. func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
  257. sentAppResp := false
  258. for i := len(ms) - 1; i >= 0; i-- {
  259. if r.isIDRemoved(ms[i].To) {
  260. ms[i].To = 0
  261. }
  262. if ms[i].Type == raftpb.MsgAppResp {
  263. if sentAppResp {
  264. ms[i].To = 0
  265. } else {
  266. sentAppResp = true
  267. }
  268. }
  269. if ms[i].Type == raftpb.MsgSnap {
  270. // There are two separate data store: the store for v2, and the KV for v3.
  271. // The msgSnap only contains the most recent snapshot of store without KV.
  272. // So we need to redirect the msgSnap to etcd server main loop for merging in the
  273. // current store snapshot and KV snapshot.
  274. select {
  275. case r.msgSnapC <- ms[i]:
  276. default:
  277. // drop msgSnap if the inflight chan if full.
  278. }
  279. ms[i].To = 0
  280. }
  281. if ms[i].Type == raftpb.MsgHeartbeat {
  282. ok, exceed := r.td.Observe(ms[i].To)
  283. if !ok {
  284. // TODO: limit request rate.
  285. plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
  286. plog.Warningf("server is likely overloaded")
  287. }
  288. }
  289. }
  290. return ms
  291. }
  292. func (r *raftNode) apply() chan apply {
  293. return r.applyc
  294. }
  295. func (r *raftNode) stop() {
  296. r.stopped <- struct{}{}
  297. <-r.done
  298. }
  299. func (r *raftNode) onStop() {
  300. r.Stop()
  301. r.ticker.Stop()
  302. r.transport.Stop()
  303. if err := r.storage.Close(); err != nil {
  304. plog.Panicf("raft close storage error: %v", err)
  305. }
  306. close(r.done)
  307. }
  308. // for testing
  309. func (r *raftNode) pauseSending() {
  310. p := r.transport.(rafthttp.Pausable)
  311. p.Pause()
  312. }
  313. func (r *raftNode) resumeSending() {
  314. p := r.transport.(rafthttp.Pausable)
  315. p.Resume()
  316. }
  317. // advanceTicksForElection advances ticks to the node for fast election.
  318. // This reduces the time to wait for first leader election if bootstrapping the whole
  319. // cluster, while leaving at least 1 heartbeat for possible existing leader
  320. // to contact it.
  321. func advanceTicksForElection(n raft.Node, electionTicks int) {
  322. for i := 0; i < electionTicks-1; i++ {
  323. n.Tick()
  324. }
  325. }
  326. func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  327. var err error
  328. member := cl.MemberByName(cfg.Name)
  329. metadata := pbutil.MustMarshal(
  330. &pb.Metadata{
  331. NodeID: uint64(member.ID),
  332. ClusterID: uint64(cl.ID()),
  333. },
  334. )
  335. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  336. plog.Fatalf("create wal error: %v", err)
  337. }
  338. peers := make([]raft.Peer, len(ids))
  339. for i, id := range ids {
  340. ctx, err := json.Marshal((*cl).Member(id))
  341. if err != nil {
  342. plog.Panicf("marshal member should never fail: %v", err)
  343. }
  344. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  345. }
  346. id = member.ID
  347. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  348. s = raft.NewMemoryStorage()
  349. c := &raft.Config{
  350. ID: uint64(id),
  351. ElectionTick: cfg.ElectionTicks,
  352. HeartbeatTick: 1,
  353. Storage: s,
  354. MaxSizePerMsg: maxSizePerMsg,
  355. MaxInflightMsgs: maxInflightMsgs,
  356. CheckQuorum: true,
  357. }
  358. n = raft.StartNode(c, peers)
  359. raftStatusMu.Lock()
  360. raftStatus = n.Status
  361. raftStatusMu.Unlock()
  362. advanceTicksForElection(n, c.ElectionTick)
  363. return
  364. }
  365. func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  366. var walsnap walpb.Snapshot
  367. if snapshot != nil {
  368. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  369. }
  370. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  371. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  372. cl := membership.NewCluster("")
  373. cl.SetID(cid)
  374. s := raft.NewMemoryStorage()
  375. if snapshot != nil {
  376. s.ApplySnapshot(*snapshot)
  377. }
  378. s.SetHardState(st)
  379. s.Append(ents)
  380. c := &raft.Config{
  381. ID: uint64(id),
  382. ElectionTick: cfg.ElectionTicks,
  383. HeartbeatTick: 1,
  384. Storage: s,
  385. MaxSizePerMsg: maxSizePerMsg,
  386. MaxInflightMsgs: maxInflightMsgs,
  387. CheckQuorum: true,
  388. }
  389. n := raft.RestartNode(c)
  390. raftStatusMu.Lock()
  391. raftStatus = n.Status
  392. raftStatusMu.Unlock()
  393. advanceTicksForElection(n, c.ElectionTick)
  394. return id, cl, n, s, w
  395. }
  396. func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  397. var walsnap walpb.Snapshot
  398. if snapshot != nil {
  399. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  400. }
  401. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  402. // discard the previously uncommitted entries
  403. for i, ent := range ents {
  404. if ent.Index > st.Commit {
  405. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  406. ents = ents[:i]
  407. break
  408. }
  409. }
  410. // force append the configuration change entries
  411. toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
  412. ents = append(ents, toAppEnts...)
  413. // force commit newly appended entries
  414. err := w.Save(raftpb.HardState{}, toAppEnts)
  415. if err != nil {
  416. plog.Fatalf("%v", err)
  417. }
  418. if len(ents) != 0 {
  419. st.Commit = ents[len(ents)-1].Index
  420. }
  421. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  422. cl := membership.NewCluster("")
  423. cl.SetID(cid)
  424. s := raft.NewMemoryStorage()
  425. if snapshot != nil {
  426. s.ApplySnapshot(*snapshot)
  427. }
  428. s.SetHardState(st)
  429. s.Append(ents)
  430. c := &raft.Config{
  431. ID: uint64(id),
  432. ElectionTick: cfg.ElectionTicks,
  433. HeartbeatTick: 1,
  434. Storage: s,
  435. MaxSizePerMsg: maxSizePerMsg,
  436. MaxInflightMsgs: maxInflightMsgs,
  437. }
  438. n := raft.RestartNode(c)
  439. raftStatus = n.Status
  440. return id, cl, n, s, w
  441. }
  442. // getIDs returns an ordered set of IDs included in the given snapshot and
  443. // the entries. The given snapshot/entries can contain two kinds of
  444. // ID-related entry:
  445. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  446. // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
  447. func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  448. ids := make(map[uint64]bool)
  449. if snap != nil {
  450. for _, id := range snap.Metadata.ConfState.Nodes {
  451. ids[id] = true
  452. }
  453. }
  454. for _, e := range ents {
  455. if e.Type != raftpb.EntryConfChange {
  456. continue
  457. }
  458. var cc raftpb.ConfChange
  459. pbutil.MustUnmarshal(&cc, e.Data)
  460. switch cc.Type {
  461. case raftpb.ConfChangeAddNode:
  462. ids[cc.NodeID] = true
  463. case raftpb.ConfChangeRemoveNode:
  464. delete(ids, cc.NodeID)
  465. case raftpb.ConfChangeUpdateNode:
  466. // do nothing
  467. default:
  468. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  469. }
  470. }
  471. sids := make(types.Uint64Slice, 0, len(ids))
  472. for id := range ids {
  473. sids = append(sids, id)
  474. }
  475. sort.Sort(sids)
  476. return []uint64(sids)
  477. }
  478. // createConfigChangeEnts creates a series of Raft entries (i.e.
  479. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  480. // `self` is _not_ removed, even if present in the set.
  481. // If `self` is not inside the given ids, it creates a Raft entry to add a
  482. // default member with the given `self`.
  483. func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  484. ents := make([]raftpb.Entry, 0)
  485. next := index + 1
  486. found := false
  487. for _, id := range ids {
  488. if id == self {
  489. found = true
  490. continue
  491. }
  492. cc := &raftpb.ConfChange{
  493. Type: raftpb.ConfChangeRemoveNode,
  494. NodeID: id,
  495. }
  496. e := raftpb.Entry{
  497. Type: raftpb.EntryConfChange,
  498. Data: pbutil.MustMarshal(cc),
  499. Term: term,
  500. Index: next,
  501. }
  502. ents = append(ents, e)
  503. next++
  504. }
  505. if !found {
  506. m := membership.Member{
  507. ID: types.ID(self),
  508. RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
  509. }
  510. ctx, err := json.Marshal(m)
  511. if err != nil {
  512. plog.Panicf("marshal member should never fail: %v", err)
  513. }
  514. cc := &raftpb.ConfChange{
  515. Type: raftpb.ConfChangeAddNode,
  516. NodeID: self,
  517. Context: ctx,
  518. }
  519. e := raftpb.Entry{
  520. Type: raftpb.EntryConfChange,
  521. Data: pbutil.MustMarshal(cc),
  522. Term: term,
  523. Index: next,
  524. }
  525. ents = append(ents, e)
  526. }
  527. return ents
  528. }