raft.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "sort"
  19. "sync"
  20. "sync/atomic"
  21. "time"
  22. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  23. "github.com/coreos/etcd/etcdserver/membership"
  24. "github.com/coreos/etcd/pkg/contention"
  25. "github.com/coreos/etcd/pkg/pbutil"
  26. "github.com/coreos/etcd/pkg/types"
  27. "github.com/coreos/etcd/raft"
  28. "github.com/coreos/etcd/raft/raftpb"
  29. "github.com/coreos/etcd/rafthttp"
  30. "github.com/coreos/etcd/wal"
  31. "github.com/coreos/etcd/wal/walpb"
  32. "github.com/coreos/pkg/capnslog"
  33. )
  34. const (
  35. // Number of entries for slow follower to catch-up after compacting
  36. // the raft storage entries.
  37. // We expect the follower has a millisecond level latency with the leader.
  38. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  39. // follower to catch up.
  40. numberOfCatchUpEntries = 5000
  41. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  42. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  43. maxSizePerMsg = 1 * 1024 * 1024
  44. // Never overflow the rafthttp buffer, which is 4096.
  45. // TODO: a better const?
  46. maxInflightMsgs = 4096 / 8
  47. )
  48. var (
  49. // protects raftStatus
  50. raftStatusMu sync.Mutex
  51. // indirection for expvar func interface
  52. // expvar panics when publishing duplicate name
  53. // expvar does not support remove a registered name
  54. // so only register a func that calls raftStatus
  55. // and change raftStatus as we need.
  56. raftStatus func() raft.Status
  57. )
  58. func init() {
  59. raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
  60. expvar.Publish("raft.status", expvar.Func(func() interface{} {
  61. raftStatusMu.Lock()
  62. defer raftStatusMu.Unlock()
  63. return raftStatus()
  64. }))
  65. }
  66. type RaftTimer interface {
  67. Index() uint64
  68. Term() uint64
  69. }
  70. // apply contains entries, snapshot to be applied. Once
  71. // an apply is consumed, the entries will be persisted to
  72. // to raft storage concurrently; the application must read
  73. // raftDone before assuming the raft messages are stable.
  74. type apply struct {
  75. entries []raftpb.Entry
  76. snapshot raftpb.Snapshot
  77. raftDone <-chan struct{} // rx {} after raft has persisted messages
  78. }
  79. type raftNode struct {
  80. // Cache of the latest raft index and raft term the server has seen.
  81. // These three unit64 fields must be the first elements to keep 64-bit
  82. // alignment for atomic access to the fields.
  83. index uint64
  84. term uint64
  85. lead uint64
  86. mu sync.Mutex
  87. // last lead elected time
  88. lt time.Time
  89. // to check if msg receiver is removed from cluster
  90. isIDRemoved func(id uint64) bool
  91. raft.Node
  92. // a chan to send/receive snapshot
  93. msgSnapC chan raftpb.Message
  94. // a chan to send out apply
  95. applyc chan apply
  96. // a chan to send out readState
  97. readStateC chan raft.ReadState
  98. // utility
  99. ticker *time.Ticker
  100. // contention detectors for raft heartbeat message
  101. td *contention.TimeoutDetector
  102. heartbeat time.Duration // for logging
  103. raftStorage *raft.MemoryStorage
  104. storage Storage
  105. // transport specifies the transport to send and receive msgs to members.
  106. // Sending messages MUST NOT block. It is okay to drop messages, since
  107. // clients should timeout and reissue their messages.
  108. // If transport is nil, server will panic.
  109. transport rafthttp.Transporter
  110. stopped chan struct{}
  111. done chan struct{}
  112. }
  113. // start prepares and starts raftNode in a new goroutine. It is no longer safe
  114. // to modify the fields after it has been started.
  115. func (r *raftNode) start(rh *raftReadyHandler) {
  116. r.applyc = make(chan apply)
  117. r.stopped = make(chan struct{})
  118. r.done = make(chan struct{})
  119. internalTimeout := time.Second
  120. go func() {
  121. defer r.onStop()
  122. islead := false
  123. for {
  124. select {
  125. case <-r.ticker.C:
  126. r.Tick()
  127. case rd := <-r.Ready():
  128. if rd.SoftState != nil {
  129. if lead := atomic.LoadUint64(&r.lead); rd.SoftState.Lead != raft.None && lead != rd.SoftState.Lead {
  130. r.mu.Lock()
  131. r.lt = time.Now()
  132. r.mu.Unlock()
  133. leaderChanges.Inc()
  134. }
  135. if rd.SoftState.Lead == raft.None {
  136. hasLeader.Set(0)
  137. } else {
  138. hasLeader.Set(1)
  139. }
  140. atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
  141. islead = rd.RaftState == raft.StateLeader
  142. rh.updateLeadership()
  143. }
  144. if len(rd.ReadStates) != 0 {
  145. select {
  146. case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
  147. case <-time.After(internalTimeout):
  148. plog.Warningf("timed out sending read state")
  149. case <-r.stopped:
  150. return
  151. }
  152. }
  153. raftDone := make(chan struct{}, 1)
  154. ap := apply{
  155. entries: rd.CommittedEntries,
  156. snapshot: rd.Snapshot,
  157. raftDone: raftDone,
  158. }
  159. updateCommittedIndex(&ap, rh)
  160. select {
  161. case r.applyc <- ap:
  162. case <-r.stopped:
  163. return
  164. }
  165. // the leader can write to its disk in parallel with replicating to the followers and them
  166. // writing to their disks.
  167. // For more details, check raft thesis 10.2.1
  168. if islead {
  169. // gofail: var raftBeforeLeaderSend struct{}
  170. r.transport.Send(r.processMessages(rd.Messages))
  171. }
  172. // gofail: var raftBeforeSave struct{}
  173. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  174. plog.Fatalf("raft save state and entries error: %v", err)
  175. }
  176. if !raft.IsEmptyHardState(rd.HardState) {
  177. proposalsCommitted.Set(float64(rd.HardState.Commit))
  178. }
  179. // gofail: var raftAfterSave struct{}
  180. if !raft.IsEmptySnap(rd.Snapshot) {
  181. // gofail: var raftBeforeSaveSnap struct{}
  182. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  183. plog.Fatalf("raft save snapshot error: %v", err)
  184. }
  185. // gofail: var raftAfterSaveSnap struct{}
  186. r.raftStorage.ApplySnapshot(rd.Snapshot)
  187. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  188. // gofail: var raftAfterApplySnap struct{}
  189. }
  190. r.raftStorage.Append(rd.Entries)
  191. if !islead {
  192. // finish processing incoming messages before we signal raftdone chan
  193. msgs := r.processMessages(rd.Messages)
  194. // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
  195. raftDone <- struct{}{}
  196. // Candidate or follower needs to wait for all pending configuration
  197. // changes to be applied before sending messages.
  198. // Otherwise we might incorrectly count votes (e.g. votes from removed members).
  199. // Also slow machine's follower raft-layer could proceed to become the leader
  200. // on its own single-node cluster, before apply-layer applies the config change.
  201. // We simply wait for ALL pending entries to be applied for now.
  202. // We might improve this later on if it causes unnecessary long blocking issues.
  203. waitApply := false
  204. for _, ent := range rd.CommittedEntries {
  205. if ent.Type == raftpb.EntryConfChange {
  206. waitApply = true
  207. break
  208. }
  209. }
  210. if waitApply {
  211. rh.waitForApply()
  212. }
  213. // gofail: var raftBeforeFollowerSend struct{}
  214. r.transport.Send(msgs)
  215. } else {
  216. // leader already processed 'MsgSnap' and signaled
  217. raftDone <- struct{}{}
  218. }
  219. r.Advance()
  220. case <-r.stopped:
  221. return
  222. }
  223. }
  224. }()
  225. }
  226. func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
  227. var ci uint64
  228. if len(ap.entries) != 0 {
  229. ci = ap.entries[len(ap.entries)-1].Index
  230. }
  231. if ap.snapshot.Metadata.Index > ci {
  232. ci = ap.snapshot.Metadata.Index
  233. }
  234. if ci != 0 {
  235. rh.updateCommittedIndex(ci)
  236. }
  237. }
  238. func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
  239. sentAppResp := false
  240. for i := len(ms) - 1; i >= 0; i-- {
  241. if r.isIDRemoved(ms[i].To) {
  242. ms[i].To = 0
  243. }
  244. if ms[i].Type == raftpb.MsgAppResp {
  245. if sentAppResp {
  246. ms[i].To = 0
  247. } else {
  248. sentAppResp = true
  249. }
  250. }
  251. if ms[i].Type == raftpb.MsgSnap {
  252. // There are two separate data store: the store for v2, and the KV for v3.
  253. // The msgSnap only contains the most recent snapshot of store without KV.
  254. // So we need to redirect the msgSnap to etcd server main loop for merging in the
  255. // current store snapshot and KV snapshot.
  256. select {
  257. case r.msgSnapC <- ms[i]:
  258. default:
  259. // drop msgSnap if the inflight chan if full.
  260. }
  261. ms[i].To = 0
  262. }
  263. if ms[i].Type == raftpb.MsgHeartbeat {
  264. ok, exceed := r.td.Observe(ms[i].To)
  265. if !ok {
  266. // TODO: limit request rate.
  267. plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
  268. plog.Warningf("server is likely overloaded")
  269. }
  270. }
  271. }
  272. return ms
  273. }
  274. func (r *raftNode) apply() chan apply {
  275. return r.applyc
  276. }
  277. func (r *raftNode) leadElectedTime() time.Time {
  278. r.mu.Lock()
  279. defer r.mu.Unlock()
  280. return r.lt
  281. }
  282. func (r *raftNode) stop() {
  283. r.stopped <- struct{}{}
  284. <-r.done
  285. }
  286. func (r *raftNode) onStop() {
  287. r.Stop()
  288. r.ticker.Stop()
  289. r.transport.Stop()
  290. if err := r.storage.Close(); err != nil {
  291. plog.Panicf("raft close storage error: %v", err)
  292. }
  293. close(r.done)
  294. }
  295. // for testing
  296. func (r *raftNode) pauseSending() {
  297. p := r.transport.(rafthttp.Pausable)
  298. p.Pause()
  299. }
  300. func (r *raftNode) resumeSending() {
  301. p := r.transport.(rafthttp.Pausable)
  302. p.Resume()
  303. }
  304. // advanceTicksForElection advances ticks to the node for fast election.
  305. // This reduces the time to wait for first leader election if bootstrapping the whole
  306. // cluster, while leaving at least 1 heartbeat for possible existing leader
  307. // to contact it.
  308. func advanceTicksForElection(n raft.Node, electionTicks int) {
  309. for i := 0; i < electionTicks-1; i++ {
  310. n.Tick()
  311. }
  312. }
  313. func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  314. var err error
  315. member := cl.MemberByName(cfg.Name)
  316. metadata := pbutil.MustMarshal(
  317. &pb.Metadata{
  318. NodeID: uint64(member.ID),
  319. ClusterID: uint64(cl.ID()),
  320. },
  321. )
  322. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  323. plog.Fatalf("create wal error: %v", err)
  324. }
  325. peers := make([]raft.Peer, len(ids))
  326. for i, id := range ids {
  327. ctx, err := json.Marshal((*cl).Member(id))
  328. if err != nil {
  329. plog.Panicf("marshal member should never fail: %v", err)
  330. }
  331. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  332. }
  333. id = member.ID
  334. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  335. s = raft.NewMemoryStorage()
  336. c := &raft.Config{
  337. ID: uint64(id),
  338. ElectionTick: cfg.ElectionTicks,
  339. HeartbeatTick: 1,
  340. Storage: s,
  341. MaxSizePerMsg: maxSizePerMsg,
  342. MaxInflightMsgs: maxInflightMsgs,
  343. CheckQuorum: true,
  344. }
  345. n = raft.StartNode(c, peers)
  346. raftStatusMu.Lock()
  347. raftStatus = n.Status
  348. raftStatusMu.Unlock()
  349. advanceTicksForElection(n, c.ElectionTick)
  350. return
  351. }
  352. func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  353. var walsnap walpb.Snapshot
  354. if snapshot != nil {
  355. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  356. }
  357. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  358. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  359. cl := membership.NewCluster("")
  360. cl.SetID(cid)
  361. s := raft.NewMemoryStorage()
  362. if snapshot != nil {
  363. s.ApplySnapshot(*snapshot)
  364. }
  365. s.SetHardState(st)
  366. s.Append(ents)
  367. c := &raft.Config{
  368. ID: uint64(id),
  369. ElectionTick: cfg.ElectionTicks,
  370. HeartbeatTick: 1,
  371. Storage: s,
  372. MaxSizePerMsg: maxSizePerMsg,
  373. MaxInflightMsgs: maxInflightMsgs,
  374. CheckQuorum: true,
  375. }
  376. n := raft.RestartNode(c)
  377. raftStatusMu.Lock()
  378. raftStatus = n.Status
  379. raftStatusMu.Unlock()
  380. advanceTicksForElection(n, c.ElectionTick)
  381. return id, cl, n, s, w
  382. }
  383. func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  384. var walsnap walpb.Snapshot
  385. if snapshot != nil {
  386. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  387. }
  388. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  389. // discard the previously uncommitted entries
  390. for i, ent := range ents {
  391. if ent.Index > st.Commit {
  392. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  393. ents = ents[:i]
  394. break
  395. }
  396. }
  397. // force append the configuration change entries
  398. toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
  399. ents = append(ents, toAppEnts...)
  400. // force commit newly appended entries
  401. err := w.Save(raftpb.HardState{}, toAppEnts)
  402. if err != nil {
  403. plog.Fatalf("%v", err)
  404. }
  405. if len(ents) != 0 {
  406. st.Commit = ents[len(ents)-1].Index
  407. }
  408. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  409. cl := membership.NewCluster("")
  410. cl.SetID(cid)
  411. s := raft.NewMemoryStorage()
  412. if snapshot != nil {
  413. s.ApplySnapshot(*snapshot)
  414. }
  415. s.SetHardState(st)
  416. s.Append(ents)
  417. c := &raft.Config{
  418. ID: uint64(id),
  419. ElectionTick: cfg.ElectionTicks,
  420. HeartbeatTick: 1,
  421. Storage: s,
  422. MaxSizePerMsg: maxSizePerMsg,
  423. MaxInflightMsgs: maxInflightMsgs,
  424. }
  425. n := raft.RestartNode(c)
  426. raftStatus = n.Status
  427. return id, cl, n, s, w
  428. }
  429. // getIDs returns an ordered set of IDs included in the given snapshot and
  430. // the entries. The given snapshot/entries can contain two kinds of
  431. // ID-related entry:
  432. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  433. // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
  434. func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  435. ids := make(map[uint64]bool)
  436. if snap != nil {
  437. for _, id := range snap.Metadata.ConfState.Nodes {
  438. ids[id] = true
  439. }
  440. }
  441. for _, e := range ents {
  442. if e.Type != raftpb.EntryConfChange {
  443. continue
  444. }
  445. var cc raftpb.ConfChange
  446. pbutil.MustUnmarshal(&cc, e.Data)
  447. switch cc.Type {
  448. case raftpb.ConfChangeAddNode:
  449. ids[cc.NodeID] = true
  450. case raftpb.ConfChangeRemoveNode:
  451. delete(ids, cc.NodeID)
  452. case raftpb.ConfChangeUpdateNode:
  453. // do nothing
  454. default:
  455. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  456. }
  457. }
  458. sids := make(types.Uint64Slice, 0, len(ids))
  459. for id := range ids {
  460. sids = append(sids, id)
  461. }
  462. sort.Sort(sids)
  463. return []uint64(sids)
  464. }
  465. // createConfigChangeEnts creates a series of Raft entries (i.e.
  466. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  467. // `self` is _not_ removed, even if present in the set.
  468. // If `self` is not inside the given ids, it creates a Raft entry to add a
  469. // default member with the given `self`.
  470. func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  471. ents := make([]raftpb.Entry, 0)
  472. next := index + 1
  473. found := false
  474. for _, id := range ids {
  475. if id == self {
  476. found = true
  477. continue
  478. }
  479. cc := &raftpb.ConfChange{
  480. Type: raftpb.ConfChangeRemoveNode,
  481. NodeID: id,
  482. }
  483. e := raftpb.Entry{
  484. Type: raftpb.EntryConfChange,
  485. Data: pbutil.MustMarshal(cc),
  486. Term: term,
  487. Index: next,
  488. }
  489. ents = append(ents, e)
  490. next++
  491. }
  492. if !found {
  493. m := membership.Member{
  494. ID: types.ID(self),
  495. RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
  496. }
  497. ctx, err := json.Marshal(m)
  498. if err != nil {
  499. plog.Panicf("marshal member should never fail: %v", err)
  500. }
  501. cc := &raftpb.ConfChange{
  502. Type: raftpb.ConfChangeAddNode,
  503. NodeID: self,
  504. Context: ctx,
  505. }
  506. e := raftpb.Entry{
  507. Type: raftpb.EntryConfChange,
  508. Data: pbutil.MustMarshal(cc),
  509. Term: term,
  510. Index: next,
  511. }
  512. ents = append(ents, e)
  513. }
  514. return ents
  515. }