raft.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "sort"
  19. "sync"
  20. "sync/atomic"
  21. "time"
  22. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  23. "github.com/coreos/etcd/etcdserver/membership"
  24. "github.com/coreos/etcd/pkg/contention"
  25. "github.com/coreos/etcd/pkg/pbutil"
  26. "github.com/coreos/etcd/pkg/types"
  27. "github.com/coreos/etcd/raft"
  28. "github.com/coreos/etcd/raft/raftpb"
  29. "github.com/coreos/etcd/rafthttp"
  30. "github.com/coreos/etcd/wal"
  31. "github.com/coreos/etcd/wal/walpb"
  32. "github.com/coreos/pkg/capnslog"
  33. )
  34. const (
  35. // Number of entries for slow follower to catch-up after compacting
  36. // the raft storage entries.
  37. // We expect the follower has a millisecond level latency with the leader.
  38. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  39. // follower to catch up.
  40. numberOfCatchUpEntries = 5000
  41. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  42. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  43. maxSizePerMsg = 1 * 1024 * 1024
  44. // Never overflow the rafthttp buffer, which is 4096.
  45. // TODO: a better const?
  46. maxInflightMsgs = 4096 / 8
  47. )
  48. var (
  49. // protects raftStatus
  50. raftStatusMu sync.Mutex
  51. // indirection for expvar func interface
  52. // expvar panics when publishing duplicate name
  53. // expvar does not support remove a registered name
  54. // so only register a func that calls raftStatus
  55. // and change raftStatus as we need.
  56. raftStatus func() raft.Status
  57. )
  58. func init() {
  59. raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
  60. expvar.Publish("raft.status", expvar.Func(func() interface{} {
  61. raftStatusMu.Lock()
  62. defer raftStatusMu.Unlock()
  63. return raftStatus()
  64. }))
  65. }
  66. type RaftTimer interface {
  67. Index() uint64
  68. Term() uint64
  69. }
  70. // apply contains entries, snapshot to be applied. Once
  71. // an apply is consumed, the entries will be persisted to
  72. // to raft storage concurrently; the application must read
  73. // raftDone before assuming the raft messages are stable.
  74. type apply struct {
  75. entries []raftpb.Entry
  76. snapshot raftpb.Snapshot
  77. raftDone <-chan struct{} // rx {} after raft has persisted messages
  78. }
  79. type raftNode struct {
  80. // Cache of the latest raft index and raft term the server has seen.
  81. // These three unit64 fields must be the first elements to keep 64-bit
  82. // alignment for atomic access to the fields.
  83. index uint64
  84. term uint64
  85. lead uint64
  86. mu sync.Mutex
  87. // last lead elected time
  88. lt time.Time
  89. // to check if msg receiver is removed from cluster
  90. isIDRemoved func(id uint64) bool
  91. raft.Node
  92. // a chan to send/receive snapshot
  93. msgSnapC chan raftpb.Message
  94. // a chan to send out apply
  95. applyc chan apply
  96. // a chan to send out readState
  97. readStateC chan raft.ReadState
  98. // utility
  99. ticker *time.Ticker
  100. // contention detectors for raft heartbeat message
  101. td *contention.TimeoutDetector
  102. heartbeat time.Duration // for logging
  103. raftStorage *raft.MemoryStorage
  104. storage Storage
  105. // transport specifies the transport to send and receive msgs to members.
  106. // Sending messages MUST NOT block. It is okay to drop messages, since
  107. // clients should timeout and reissue their messages.
  108. // If transport is nil, server will panic.
  109. transport rafthttp.Transporter
  110. stopped chan struct{}
  111. done chan struct{}
  112. }
  113. // start prepares and starts raftNode in a new goroutine. It is no longer safe
  114. // to modify the fields after it has been started.
  115. func (r *raftNode) start(rh *raftReadyHandler) {
  116. r.applyc = make(chan apply)
  117. r.stopped = make(chan struct{})
  118. r.done = make(chan struct{})
  119. internalTimeout := time.Second
  120. go func() {
  121. defer r.onStop()
  122. islead := false
  123. isCandidate := false
  124. for {
  125. select {
  126. case <-r.ticker.C:
  127. r.Tick()
  128. case rd := <-r.Ready():
  129. if rd.SoftState != nil {
  130. if lead := atomic.LoadUint64(&r.lead); rd.SoftState.Lead != raft.None && lead != rd.SoftState.Lead {
  131. r.mu.Lock()
  132. r.lt = time.Now()
  133. r.mu.Unlock()
  134. leaderChanges.Inc()
  135. }
  136. if rd.SoftState.Lead == raft.None {
  137. hasLeader.Set(0)
  138. } else {
  139. hasLeader.Set(1)
  140. }
  141. atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
  142. islead = rd.RaftState == raft.StateLeader
  143. isCandidate = rd.RaftState == raft.StateCandidate
  144. rh.updateLeadership()
  145. }
  146. if len(rd.ReadStates) != 0 {
  147. select {
  148. case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
  149. case <-time.After(internalTimeout):
  150. plog.Warningf("timed out sending read state")
  151. case <-r.stopped:
  152. return
  153. }
  154. }
  155. raftDone := make(chan struct{}, 1)
  156. ap := apply{
  157. entries: rd.CommittedEntries,
  158. snapshot: rd.Snapshot,
  159. raftDone: raftDone,
  160. }
  161. updateCommittedIndex(&ap, rh)
  162. select {
  163. case r.applyc <- ap:
  164. case <-r.stopped:
  165. return
  166. }
  167. // the leader can write to its disk in parallel with replicating to the followers and them
  168. // writing to their disks.
  169. // For more details, check raft thesis 10.2.1
  170. if islead {
  171. // gofail: var raftBeforeLeaderSend struct{}
  172. r.sendMessages(rd.Messages)
  173. }
  174. // gofail: var raftBeforeSave struct{}
  175. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  176. plog.Fatalf("raft save state and entries error: %v", err)
  177. }
  178. if !raft.IsEmptyHardState(rd.HardState) {
  179. proposalsCommitted.Set(float64(rd.HardState.Commit))
  180. }
  181. // gofail: var raftAfterSave struct{}
  182. if !raft.IsEmptySnap(rd.Snapshot) {
  183. // gofail: var raftBeforeSaveSnap struct{}
  184. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  185. plog.Fatalf("raft save snapshot error: %v", err)
  186. }
  187. // gofail: var raftAfterSaveSnap struct{}
  188. r.raftStorage.ApplySnapshot(rd.Snapshot)
  189. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  190. // gofail: var raftAfterApplySnap struct{}
  191. }
  192. r.raftStorage.Append(rd.Entries)
  193. if !islead {
  194. // gofail: var raftBeforeFollowerSend struct{}
  195. r.sendMessages(rd.Messages)
  196. }
  197. raftDone <- struct{}{}
  198. r.Advance()
  199. if isCandidate {
  200. // candidate needs to wait for all pending configuration changes to be applied
  201. // before continue. Or we might incorrectly count the number of votes (e.g. receive vote from
  202. // a removed member).
  203. // We simply wait for ALL pending entries to be applied for now.
  204. // We might improve this later on if it causes unnecessary long blocking issues.
  205. rh.waitForApply()
  206. }
  207. case <-r.stopped:
  208. return
  209. }
  210. }
  211. }()
  212. }
  213. func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
  214. var ci uint64
  215. if len(ap.entries) != 0 {
  216. ci = ap.entries[len(ap.entries)-1].Index
  217. }
  218. if ap.snapshot.Metadata.Index > ci {
  219. ci = ap.snapshot.Metadata.Index
  220. }
  221. if ci != 0 {
  222. rh.updateCommittedIndex(ci)
  223. }
  224. }
  225. func (r *raftNode) sendMessages(ms []raftpb.Message) {
  226. sentAppResp := false
  227. for i := len(ms) - 1; i >= 0; i-- {
  228. if r.isIDRemoved(ms[i].To) {
  229. ms[i].To = 0
  230. }
  231. if ms[i].Type == raftpb.MsgAppResp {
  232. if sentAppResp {
  233. ms[i].To = 0
  234. } else {
  235. sentAppResp = true
  236. }
  237. }
  238. if ms[i].Type == raftpb.MsgSnap {
  239. // There are two separate data store: the store for v2, and the KV for v3.
  240. // The msgSnap only contains the most recent snapshot of store without KV.
  241. // So we need to redirect the msgSnap to etcd server main loop for merging in the
  242. // current store snapshot and KV snapshot.
  243. select {
  244. case r.msgSnapC <- ms[i]:
  245. default:
  246. // drop msgSnap if the inflight chan if full.
  247. }
  248. ms[i].To = 0
  249. }
  250. if ms[i].Type == raftpb.MsgHeartbeat {
  251. ok, exceed := r.td.Observe(ms[i].To)
  252. if !ok {
  253. // TODO: limit request rate.
  254. plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
  255. plog.Warningf("server is likely overloaded")
  256. }
  257. }
  258. }
  259. r.transport.Send(ms)
  260. }
  261. func (r *raftNode) apply() chan apply {
  262. return r.applyc
  263. }
  264. func (r *raftNode) leadElectedTime() time.Time {
  265. r.mu.Lock()
  266. defer r.mu.Unlock()
  267. return r.lt
  268. }
  269. func (r *raftNode) stop() {
  270. r.stopped <- struct{}{}
  271. <-r.done
  272. }
  273. func (r *raftNode) onStop() {
  274. r.Stop()
  275. r.ticker.Stop()
  276. r.transport.Stop()
  277. if err := r.storage.Close(); err != nil {
  278. plog.Panicf("raft close storage error: %v", err)
  279. }
  280. close(r.done)
  281. }
  282. // for testing
  283. func (r *raftNode) pauseSending() {
  284. p := r.transport.(rafthttp.Pausable)
  285. p.Pause()
  286. }
  287. func (r *raftNode) resumeSending() {
  288. p := r.transport.(rafthttp.Pausable)
  289. p.Resume()
  290. }
  291. // advanceTicksForElection advances ticks to the node for fast election.
  292. // This reduces the time to wait for first leader election if bootstrapping the whole
  293. // cluster, while leaving at least 1 heartbeat for possible existing leader
  294. // to contact it.
  295. func advanceTicksForElection(n raft.Node, electionTicks int) {
  296. for i := 0; i < electionTicks-1; i++ {
  297. n.Tick()
  298. }
  299. }
  300. func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  301. var err error
  302. member := cl.MemberByName(cfg.Name)
  303. metadata := pbutil.MustMarshal(
  304. &pb.Metadata{
  305. NodeID: uint64(member.ID),
  306. ClusterID: uint64(cl.ID()),
  307. },
  308. )
  309. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  310. plog.Fatalf("create wal error: %v", err)
  311. }
  312. peers := make([]raft.Peer, len(ids))
  313. for i, id := range ids {
  314. ctx, err := json.Marshal((*cl).Member(id))
  315. if err != nil {
  316. plog.Panicf("marshal member should never fail: %v", err)
  317. }
  318. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  319. }
  320. id = member.ID
  321. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  322. s = raft.NewMemoryStorage()
  323. c := &raft.Config{
  324. ID: uint64(id),
  325. ElectionTick: cfg.ElectionTicks,
  326. HeartbeatTick: 1,
  327. Storage: s,
  328. MaxSizePerMsg: maxSizePerMsg,
  329. MaxInflightMsgs: maxInflightMsgs,
  330. CheckQuorum: true,
  331. }
  332. n = raft.StartNode(c, peers)
  333. raftStatusMu.Lock()
  334. raftStatus = n.Status
  335. raftStatusMu.Unlock()
  336. advanceTicksForElection(n, c.ElectionTick)
  337. return
  338. }
  339. func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  340. var walsnap walpb.Snapshot
  341. if snapshot != nil {
  342. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  343. }
  344. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  345. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  346. cl := membership.NewCluster("")
  347. cl.SetID(cid)
  348. s := raft.NewMemoryStorage()
  349. if snapshot != nil {
  350. s.ApplySnapshot(*snapshot)
  351. }
  352. s.SetHardState(st)
  353. s.Append(ents)
  354. c := &raft.Config{
  355. ID: uint64(id),
  356. ElectionTick: cfg.ElectionTicks,
  357. HeartbeatTick: 1,
  358. Storage: s,
  359. MaxSizePerMsg: maxSizePerMsg,
  360. MaxInflightMsgs: maxInflightMsgs,
  361. CheckQuorum: true,
  362. }
  363. n := raft.RestartNode(c)
  364. raftStatusMu.Lock()
  365. raftStatus = n.Status
  366. raftStatusMu.Unlock()
  367. advanceTicksForElection(n, c.ElectionTick)
  368. return id, cl, n, s, w
  369. }
  370. func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  371. var walsnap walpb.Snapshot
  372. if snapshot != nil {
  373. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  374. }
  375. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  376. // discard the previously uncommitted entries
  377. for i, ent := range ents {
  378. if ent.Index > st.Commit {
  379. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  380. ents = ents[:i]
  381. break
  382. }
  383. }
  384. // force append the configuration change entries
  385. toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
  386. ents = append(ents, toAppEnts...)
  387. // force commit newly appended entries
  388. err := w.Save(raftpb.HardState{}, toAppEnts)
  389. if err != nil {
  390. plog.Fatalf("%v", err)
  391. }
  392. if len(ents) != 0 {
  393. st.Commit = ents[len(ents)-1].Index
  394. }
  395. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  396. cl := membership.NewCluster("")
  397. cl.SetID(cid)
  398. s := raft.NewMemoryStorage()
  399. if snapshot != nil {
  400. s.ApplySnapshot(*snapshot)
  401. }
  402. s.SetHardState(st)
  403. s.Append(ents)
  404. c := &raft.Config{
  405. ID: uint64(id),
  406. ElectionTick: cfg.ElectionTicks,
  407. HeartbeatTick: 1,
  408. Storage: s,
  409. MaxSizePerMsg: maxSizePerMsg,
  410. MaxInflightMsgs: maxInflightMsgs,
  411. }
  412. n := raft.RestartNode(c)
  413. raftStatus = n.Status
  414. return id, cl, n, s, w
  415. }
  416. // getIDs returns an ordered set of IDs included in the given snapshot and
  417. // the entries. The given snapshot/entries can contain two kinds of
  418. // ID-related entry:
  419. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  420. // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
  421. func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  422. ids := make(map[uint64]bool)
  423. if snap != nil {
  424. for _, id := range snap.Metadata.ConfState.Nodes {
  425. ids[id] = true
  426. }
  427. }
  428. for _, e := range ents {
  429. if e.Type != raftpb.EntryConfChange {
  430. continue
  431. }
  432. var cc raftpb.ConfChange
  433. pbutil.MustUnmarshal(&cc, e.Data)
  434. switch cc.Type {
  435. case raftpb.ConfChangeAddNode:
  436. ids[cc.NodeID] = true
  437. case raftpb.ConfChangeRemoveNode:
  438. delete(ids, cc.NodeID)
  439. case raftpb.ConfChangeUpdateNode:
  440. // do nothing
  441. default:
  442. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  443. }
  444. }
  445. sids := make(types.Uint64Slice, 0, len(ids))
  446. for id := range ids {
  447. sids = append(sids, id)
  448. }
  449. sort.Sort(sids)
  450. return []uint64(sids)
  451. }
  452. // createConfigChangeEnts creates a series of Raft entries (i.e.
  453. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  454. // `self` is _not_ removed, even if present in the set.
  455. // If `self` is not inside the given ids, it creates a Raft entry to add a
  456. // default member with the given `self`.
  457. func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  458. ents := make([]raftpb.Entry, 0)
  459. next := index + 1
  460. found := false
  461. for _, id := range ids {
  462. if id == self {
  463. found = true
  464. continue
  465. }
  466. cc := &raftpb.ConfChange{
  467. Type: raftpb.ConfChangeRemoveNode,
  468. NodeID: id,
  469. }
  470. e := raftpb.Entry{
  471. Type: raftpb.EntryConfChange,
  472. Data: pbutil.MustMarshal(cc),
  473. Term: term,
  474. Index: next,
  475. }
  476. ents = append(ents, e)
  477. next++
  478. }
  479. if !found {
  480. m := membership.Member{
  481. ID: types.ID(self),
  482. RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
  483. }
  484. ctx, err := json.Marshal(m)
  485. if err != nil {
  486. plog.Panicf("marshal member should never fail: %v", err)
  487. }
  488. cc := &raftpb.ConfChange{
  489. Type: raftpb.ConfChangeAddNode,
  490. NodeID: self,
  491. Context: ctx,
  492. }
  493. e := raftpb.Entry{
  494. Type: raftpb.EntryConfChange,
  495. Data: pbutil.MustMarshal(cc),
  496. Term: term,
  497. Index: next,
  498. }
  499. ents = append(ents, e)
  500. }
  501. return ents
  502. }