raft.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "sort"
  19. "sync"
  20. "time"
  21. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  22. "github.com/coreos/etcd/etcdserver/membership"
  23. "github.com/coreos/etcd/pkg/contention"
  24. "github.com/coreos/etcd/pkg/pbutil"
  25. "github.com/coreos/etcd/pkg/types"
  26. "github.com/coreos/etcd/raft"
  27. "github.com/coreos/etcd/raft/raftpb"
  28. "github.com/coreos/etcd/rafthttp"
  29. "github.com/coreos/etcd/wal"
  30. "github.com/coreos/etcd/wal/walpb"
  31. "github.com/coreos/pkg/capnslog"
  32. )
  33. const (
  34. // Number of entries for slow follower to catch-up after compacting
  35. // the raft storage entries.
  36. // We expect the follower has a millisecond level latency with the leader.
  37. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  38. // follower to catch up.
  39. numberOfCatchUpEntries = 5000
  40. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  41. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  42. maxSizePerMsg = 1 * 1024 * 1024
  43. // Never overflow the rafthttp buffer, which is 4096.
  44. // TODO: a better const?
  45. maxInflightMsgs = 4096 / 8
  46. )
  47. var (
  48. // protects raftStatus
  49. raftStatusMu sync.Mutex
  50. // indirection for expvar func interface
  51. // expvar panics when publishing duplicate name
  52. // expvar does not support remove a registered name
  53. // so only register a func that calls raftStatus
  54. // and change raftStatus as we need.
  55. raftStatus func() raft.Status
  56. )
  57. func init() {
  58. raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
  59. expvar.Publish("raft.status", expvar.Func(func() interface{} {
  60. raftStatusMu.Lock()
  61. defer raftStatusMu.Unlock()
  62. return raftStatus()
  63. }))
  64. }
  65. // apply contains entries, snapshot to be applied. Once
  66. // an apply is consumed, the entries will be persisted to
  67. // to raft storage concurrently; the application must read
  68. // raftDone before assuming the raft messages are stable.
  69. type apply struct {
  70. entries []raftpb.Entry
  71. snapshot raftpb.Snapshot
  72. // notifyc synchronizes etcd server applies with the raft node
  73. notifyc chan struct{}
  74. }
  75. type raftNode struct {
  76. tickMu *sync.Mutex
  77. raftNodeConfig
  78. // a chan to send/receive snapshot
  79. msgSnapC chan raftpb.Message
  80. // a chan to send out apply
  81. applyc chan apply
  82. // a chan to send out readState
  83. readStateC chan raft.ReadState
  84. // utility
  85. ticker *time.Ticker
  86. // contention detectors for raft heartbeat message
  87. td *contention.TimeoutDetector
  88. stopped chan struct{}
  89. done chan struct{}
  90. }
  91. type raftNodeConfig struct {
  92. // to check if msg receiver is removed from cluster
  93. isIDRemoved func(id uint64) bool
  94. raft.Node
  95. raftStorage *raft.MemoryStorage
  96. storage Storage
  97. heartbeat time.Duration // for logging
  98. // transport specifies the transport to send and receive msgs to members.
  99. // Sending messages MUST NOT block. It is okay to drop messages, since
  100. // clients should timeout and reissue their messages.
  101. // If transport is nil, server will panic.
  102. transport rafthttp.Transporter
  103. }
  104. func newRaftNode(cfg raftNodeConfig) *raftNode {
  105. r := &raftNode{
  106. tickMu: new(sync.Mutex),
  107. raftNodeConfig: cfg,
  108. // set up contention detectors for raft heartbeat message.
  109. // expect to send a heartbeat within 2 heartbeat intervals.
  110. td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
  111. readStateC: make(chan raft.ReadState, 1),
  112. msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
  113. applyc: make(chan apply),
  114. stopped: make(chan struct{}),
  115. done: make(chan struct{}),
  116. }
  117. if r.heartbeat == 0 {
  118. r.ticker = &time.Ticker{}
  119. } else {
  120. r.ticker = time.NewTicker(r.heartbeat)
  121. }
  122. return r
  123. }
  124. // raft.Node does not have locks in Raft package
  125. func (r *raftNode) tick() {
  126. r.tickMu.Lock()
  127. r.Tick()
  128. r.tickMu.Unlock()
  129. }
  130. // start prepares and starts raftNode in a new goroutine. It is no longer safe
  131. // to modify the fields after it has been started.
  132. func (r *raftNode) start(rh *raftReadyHandler) {
  133. internalTimeout := time.Second
  134. go func() {
  135. defer r.onStop()
  136. islead := false
  137. for {
  138. select {
  139. case <-r.ticker.C:
  140. r.tick()
  141. case rd := <-r.Ready():
  142. if rd.SoftState != nil {
  143. newLeader := rd.SoftState.Lead != raft.None && rh.getLead() != rd.SoftState.Lead
  144. if newLeader {
  145. leaderChanges.Inc()
  146. }
  147. if rd.SoftState.Lead == raft.None {
  148. hasLeader.Set(0)
  149. } else {
  150. hasLeader.Set(1)
  151. }
  152. rh.updateLead(rd.SoftState.Lead)
  153. islead = rd.RaftState == raft.StateLeader
  154. rh.updateLeadership(newLeader)
  155. r.td.Reset()
  156. }
  157. if len(rd.ReadStates) != 0 {
  158. select {
  159. case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
  160. case <-time.After(internalTimeout):
  161. plog.Warningf("timed out sending read state")
  162. case <-r.stopped:
  163. return
  164. }
  165. }
  166. notifyc := make(chan struct{}, 1)
  167. ap := apply{
  168. entries: rd.CommittedEntries,
  169. snapshot: rd.Snapshot,
  170. notifyc: notifyc,
  171. }
  172. updateCommittedIndex(&ap, rh)
  173. select {
  174. case r.applyc <- ap:
  175. case <-r.stopped:
  176. return
  177. }
  178. // the leader can write to its disk in parallel with replicating to the followers and them
  179. // writing to their disks.
  180. // For more details, check raft thesis 10.2.1
  181. if islead {
  182. // gofail: var raftBeforeLeaderSend struct{}
  183. r.transport.Send(r.processMessages(rd.Messages))
  184. }
  185. // gofail: var raftBeforeSave struct{}
  186. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  187. plog.Fatalf("raft save state and entries error: %v", err)
  188. }
  189. if !raft.IsEmptyHardState(rd.HardState) {
  190. proposalsCommitted.Set(float64(rd.HardState.Commit))
  191. }
  192. // gofail: var raftAfterSave struct{}
  193. if !raft.IsEmptySnap(rd.Snapshot) {
  194. // gofail: var raftBeforeSaveSnap struct{}
  195. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  196. plog.Fatalf("raft save snapshot error: %v", err)
  197. }
  198. // etcdserver now claim the snapshot has been persisted onto the disk
  199. notifyc <- struct{}{}
  200. // gofail: var raftAfterSaveSnap struct{}
  201. r.raftStorage.ApplySnapshot(rd.Snapshot)
  202. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  203. // gofail: var raftAfterApplySnap struct{}
  204. }
  205. r.raftStorage.Append(rd.Entries)
  206. if !islead {
  207. // finish processing incoming messages before we signal raftdone chan
  208. msgs := r.processMessages(rd.Messages)
  209. // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
  210. notifyc <- struct{}{}
  211. // Candidate or follower needs to wait for all pending configuration
  212. // changes to be applied before sending messages.
  213. // Otherwise we might incorrectly count votes (e.g. votes from removed members).
  214. // Also slow machine's follower raft-layer could proceed to become the leader
  215. // on its own single-node cluster, before apply-layer applies the config change.
  216. // We simply wait for ALL pending entries to be applied for now.
  217. // We might improve this later on if it causes unnecessary long blocking issues.
  218. waitApply := false
  219. for _, ent := range rd.CommittedEntries {
  220. if ent.Type == raftpb.EntryConfChange {
  221. waitApply = true
  222. break
  223. }
  224. }
  225. if waitApply {
  226. // blocks until 'applyAll' calls 'applyWait.Trigger'
  227. // to be in sync with scheduled config-change job
  228. // (assume notifyc has cap of 1)
  229. select {
  230. case notifyc <- struct{}{}:
  231. case <-r.stopped:
  232. return
  233. }
  234. }
  235. // gofail: var raftBeforeFollowerSend struct{}
  236. r.transport.Send(msgs)
  237. } else {
  238. // leader already processed 'MsgSnap' and signaled
  239. notifyc <- struct{}{}
  240. }
  241. r.Advance()
  242. case <-r.stopped:
  243. return
  244. }
  245. }
  246. }()
  247. }
  248. func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
  249. var ci uint64
  250. if len(ap.entries) != 0 {
  251. ci = ap.entries[len(ap.entries)-1].Index
  252. }
  253. if ap.snapshot.Metadata.Index > ci {
  254. ci = ap.snapshot.Metadata.Index
  255. }
  256. if ci != 0 {
  257. rh.updateCommittedIndex(ci)
  258. }
  259. }
  260. func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
  261. sentAppResp := false
  262. for i := len(ms) - 1; i >= 0; i-- {
  263. if r.isIDRemoved(ms[i].To) {
  264. ms[i].To = 0
  265. }
  266. if ms[i].Type == raftpb.MsgAppResp {
  267. if sentAppResp {
  268. ms[i].To = 0
  269. } else {
  270. sentAppResp = true
  271. }
  272. }
  273. if ms[i].Type == raftpb.MsgSnap {
  274. // There are two separate data store: the store for v2, and the KV for v3.
  275. // The msgSnap only contains the most recent snapshot of store without KV.
  276. // So we need to redirect the msgSnap to etcd server main loop for merging in the
  277. // current store snapshot and KV snapshot.
  278. select {
  279. case r.msgSnapC <- ms[i]:
  280. default:
  281. // drop msgSnap if the inflight chan if full.
  282. }
  283. ms[i].To = 0
  284. }
  285. if ms[i].Type == raftpb.MsgHeartbeat {
  286. ok, exceed := r.td.Observe(ms[i].To)
  287. if !ok {
  288. // TODO: limit request rate.
  289. plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
  290. plog.Warningf("server is likely overloaded")
  291. }
  292. }
  293. }
  294. return ms
  295. }
  296. func (r *raftNode) apply() chan apply {
  297. return r.applyc
  298. }
  299. func (r *raftNode) stop() {
  300. r.stopped <- struct{}{}
  301. <-r.done
  302. }
  303. func (r *raftNode) onStop() {
  304. r.Stop()
  305. r.ticker.Stop()
  306. r.transport.Stop()
  307. if err := r.storage.Close(); err != nil {
  308. plog.Panicf("raft close storage error: %v", err)
  309. }
  310. close(r.done)
  311. }
  312. // for testing
  313. func (r *raftNode) pauseSending() {
  314. p := r.transport.(rafthttp.Pausable)
  315. p.Pause()
  316. }
  317. func (r *raftNode) resumeSending() {
  318. p := r.transport.(rafthttp.Pausable)
  319. p.Resume()
  320. }
  321. // advanceTicks advances ticks of Raft node.
  322. // This can be used for fast-forwarding election
  323. // ticks in multi data-center deployments, thus
  324. // speeding up election process.
  325. func (r *raftNode) advanceTicks(ticks int) {
  326. for i := 0; i < ticks; i++ {
  327. r.tick()
  328. }
  329. }
  330. func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  331. var err error
  332. member := cl.MemberByName(cfg.Name)
  333. metadata := pbutil.MustMarshal(
  334. &pb.Metadata{
  335. NodeID: uint64(member.ID),
  336. ClusterID: uint64(cl.ID()),
  337. },
  338. )
  339. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  340. plog.Fatalf("create wal error: %v", err)
  341. }
  342. peers := make([]raft.Peer, len(ids))
  343. for i, id := range ids {
  344. ctx, err := json.Marshal((*cl).Member(id))
  345. if err != nil {
  346. plog.Panicf("marshal member should never fail: %v", err)
  347. }
  348. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  349. }
  350. id = member.ID
  351. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  352. s = raft.NewMemoryStorage()
  353. c := &raft.Config{
  354. ID: uint64(id),
  355. ElectionTick: cfg.ElectionTicks,
  356. HeartbeatTick: 1,
  357. Storage: s,
  358. MaxSizePerMsg: maxSizePerMsg,
  359. MaxInflightMsgs: maxInflightMsgs,
  360. CheckQuorum: true,
  361. PreVote: cfg.PreVote,
  362. }
  363. n = raft.StartNode(c, peers)
  364. raftStatusMu.Lock()
  365. raftStatus = n.Status
  366. raftStatusMu.Unlock()
  367. return id, n, s, w
  368. }
  369. func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  370. var walsnap walpb.Snapshot
  371. if snapshot != nil {
  372. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  373. }
  374. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  375. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  376. cl := membership.NewCluster("")
  377. cl.SetID(cid)
  378. s := raft.NewMemoryStorage()
  379. if snapshot != nil {
  380. s.ApplySnapshot(*snapshot)
  381. }
  382. s.SetHardState(st)
  383. s.Append(ents)
  384. c := &raft.Config{
  385. ID: uint64(id),
  386. ElectionTick: cfg.ElectionTicks,
  387. HeartbeatTick: 1,
  388. Storage: s,
  389. MaxSizePerMsg: maxSizePerMsg,
  390. MaxInflightMsgs: maxInflightMsgs,
  391. CheckQuorum: true,
  392. PreVote: cfg.PreVote,
  393. }
  394. n := raft.RestartNode(c)
  395. raftStatusMu.Lock()
  396. raftStatus = n.Status
  397. raftStatusMu.Unlock()
  398. return id, cl, n, s, w
  399. }
  400. func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  401. var walsnap walpb.Snapshot
  402. if snapshot != nil {
  403. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  404. }
  405. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  406. // discard the previously uncommitted entries
  407. for i, ent := range ents {
  408. if ent.Index > st.Commit {
  409. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  410. ents = ents[:i]
  411. break
  412. }
  413. }
  414. // force append the configuration change entries
  415. toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
  416. ents = append(ents, toAppEnts...)
  417. // force commit newly appended entries
  418. err := w.Save(raftpb.HardState{}, toAppEnts)
  419. if err != nil {
  420. plog.Fatalf("%v", err)
  421. }
  422. if len(ents) != 0 {
  423. st.Commit = ents[len(ents)-1].Index
  424. }
  425. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  426. cl := membership.NewCluster("")
  427. cl.SetID(cid)
  428. s := raft.NewMemoryStorage()
  429. if snapshot != nil {
  430. s.ApplySnapshot(*snapshot)
  431. }
  432. s.SetHardState(st)
  433. s.Append(ents)
  434. c := &raft.Config{
  435. ID: uint64(id),
  436. ElectionTick: cfg.ElectionTicks,
  437. HeartbeatTick: 1,
  438. Storage: s,
  439. MaxSizePerMsg: maxSizePerMsg,
  440. MaxInflightMsgs: maxInflightMsgs,
  441. CheckQuorum: true,
  442. PreVote: cfg.PreVote,
  443. }
  444. n := raft.RestartNode(c)
  445. raftStatus = n.Status
  446. return id, cl, n, s, w
  447. }
  448. // getIDs returns an ordered set of IDs included in the given snapshot and
  449. // the entries. The given snapshot/entries can contain two kinds of
  450. // ID-related entry:
  451. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  452. // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
  453. func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  454. ids := make(map[uint64]bool)
  455. if snap != nil {
  456. for _, id := range snap.Metadata.ConfState.Nodes {
  457. ids[id] = true
  458. }
  459. }
  460. for _, e := range ents {
  461. if e.Type != raftpb.EntryConfChange {
  462. continue
  463. }
  464. var cc raftpb.ConfChange
  465. pbutil.MustUnmarshal(&cc, e.Data)
  466. switch cc.Type {
  467. case raftpb.ConfChangeAddNode:
  468. ids[cc.NodeID] = true
  469. case raftpb.ConfChangeRemoveNode:
  470. delete(ids, cc.NodeID)
  471. case raftpb.ConfChangeUpdateNode:
  472. // do nothing
  473. default:
  474. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  475. }
  476. }
  477. sids := make(types.Uint64Slice, 0, len(ids))
  478. for id := range ids {
  479. sids = append(sids, id)
  480. }
  481. sort.Sort(sids)
  482. return []uint64(sids)
  483. }
  484. // createConfigChangeEnts creates a series of Raft entries (i.e.
  485. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  486. // `self` is _not_ removed, even if present in the set.
  487. // If `self` is not inside the given ids, it creates a Raft entry to add a
  488. // default member with the given `self`.
  489. func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  490. ents := make([]raftpb.Entry, 0)
  491. next := index + 1
  492. found := false
  493. for _, id := range ids {
  494. if id == self {
  495. found = true
  496. continue
  497. }
  498. cc := &raftpb.ConfChange{
  499. Type: raftpb.ConfChangeRemoveNode,
  500. NodeID: id,
  501. }
  502. e := raftpb.Entry{
  503. Type: raftpb.EntryConfChange,
  504. Data: pbutil.MustMarshal(cc),
  505. Term: term,
  506. Index: next,
  507. }
  508. ents = append(ents, e)
  509. next++
  510. }
  511. if !found {
  512. m := membership.Member{
  513. ID: types.ID(self),
  514. RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
  515. }
  516. ctx, err := json.Marshal(m)
  517. if err != nil {
  518. plog.Panicf("marshal member should never fail: %v", err)
  519. }
  520. cc := &raftpb.ConfChange{
  521. Type: raftpb.ConfChangeAddNode,
  522. NodeID: self,
  523. Context: ctx,
  524. }
  525. e := raftpb.Entry{
  526. Type: raftpb.EntryConfChange,
  527. Data: pbutil.MustMarshal(cc),
  528. Term: term,
  529. Index: next,
  530. }
  531. ents = append(ents, e)
  532. }
  533. return ents
  534. }