raft.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "sort"
  19. "sync"
  20. "sync/atomic"
  21. "time"
  22. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  23. "github.com/coreos/etcd/etcdserver/membership"
  24. "github.com/coreos/etcd/pkg/pbutil"
  25. "github.com/coreos/etcd/pkg/types"
  26. "github.com/coreos/etcd/raft"
  27. "github.com/coreos/etcd/raft/raftpb"
  28. "github.com/coreos/etcd/rafthttp"
  29. "github.com/coreos/etcd/wal"
  30. "github.com/coreos/etcd/wal/walpb"
  31. "github.com/coreos/pkg/capnslog"
  32. )
  33. const (
  34. // Number of entries for slow follower to catch-up after compacting
  35. // the raft storage entries.
  36. // We expect the follower has a millisecond level latency with the leader.
  37. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  38. // follower to catch up.
  39. numberOfCatchUpEntries = 5000
  40. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  41. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  42. maxSizePerMsg = 1 * 1024 * 1024
  43. // Never overflow the rafthttp buffer, which is 4096.
  44. // TODO: a better const?
  45. maxInflightMsgs = 4096 / 8
  46. )
  47. var (
  48. // protects raftStatus
  49. raftStatusMu sync.Mutex
  50. // indirection for expvar func interface
  51. // expvar panics when publishing duplicate name
  52. // expvar does not support remove a registered name
  53. // so only register a func that calls raftStatus
  54. // and change raftStatus as we need.
  55. raftStatus func() raft.Status
  56. )
  57. func init() {
  58. raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
  59. expvar.Publish("raft.status", expvar.Func(func() interface{} {
  60. raftStatusMu.Lock()
  61. defer raftStatusMu.Unlock()
  62. return raftStatus()
  63. }))
  64. }
  65. type RaftTimer interface {
  66. Index() uint64
  67. Term() uint64
  68. }
  69. // apply contains entries, snapshot to be applied. Once
  70. // an apply is consumed, the entries will be persisted to
  71. // to raft storage concurrently; the application must read
  72. // raftDone before assuming the raft messages are stable.
  73. type apply struct {
  74. entries []raftpb.Entry
  75. snapshot raftpb.Snapshot
  76. raftDone <-chan struct{} // rx {} after raft has persisted messages
  77. }
  78. type raftNode struct {
  79. // Cache of the latest raft index and raft term the server has seen.
  80. // These three unit64 fields must be the first elements to keep 64-bit
  81. // alignment for atomic access to the fields.
  82. index uint64
  83. term uint64
  84. lead uint64
  85. mu sync.Mutex
  86. // last lead elected time
  87. lt time.Time
  88. raft.Node
  89. // a chan to send out apply
  90. applyc chan apply
  91. // a chan to send out readState
  92. readStateC chan raft.ReadState
  93. // utility
  94. ticker <-chan time.Time
  95. raftStorage *raft.MemoryStorage
  96. storage Storage
  97. // transport specifies the transport to send and receive msgs to members.
  98. // Sending messages MUST NOT block. It is okay to drop messages, since
  99. // clients should timeout and reissue their messages.
  100. // If transport is nil, server will panic.
  101. transport rafthttp.Transporter
  102. stopped chan struct{}
  103. done chan struct{}
  104. }
  105. // start prepares and starts raftNode in a new goroutine. It is no longer safe
  106. // to modify the fields after it has been started.
  107. func (r *raftNode) start(rh *raftReadyHandler) {
  108. r.applyc = make(chan apply)
  109. r.stopped = make(chan struct{})
  110. r.done = make(chan struct{})
  111. go func() {
  112. defer r.onStop()
  113. islead := false
  114. for {
  115. select {
  116. case <-r.ticker:
  117. r.Tick()
  118. case rd := <-r.Ready():
  119. if rd.SoftState != nil {
  120. if lead := atomic.LoadUint64(&r.lead); rd.SoftState.Lead != raft.None && lead != rd.SoftState.Lead {
  121. r.mu.Lock()
  122. r.lt = time.Now()
  123. r.mu.Unlock()
  124. leaderChanges.Inc()
  125. }
  126. if rd.SoftState.Lead == raft.None {
  127. hasLeader.Set(0)
  128. } else {
  129. hasLeader.Set(1)
  130. }
  131. atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
  132. islead = rd.RaftState == raft.StateLeader
  133. rh.leadershipUpdate()
  134. }
  135. if len(rd.ReadStates) != 0 {
  136. select {
  137. case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
  138. case <-r.stopped:
  139. return
  140. }
  141. }
  142. raftDone := make(chan struct{}, 1)
  143. ap := apply{
  144. entries: rd.CommittedEntries,
  145. snapshot: rd.Snapshot,
  146. raftDone: raftDone,
  147. }
  148. select {
  149. case r.applyc <- ap:
  150. case <-r.stopped:
  151. return
  152. }
  153. // the leader can write to its disk in parallel with replicating to the followers and them
  154. // writing to their disks.
  155. // For more details, check raft thesis 10.2.1
  156. if islead {
  157. // gofail: var raftBeforeLeaderSend struct{}
  158. rh.sendMessage(rd.Messages)
  159. }
  160. // gofail: var raftBeforeSave struct{}
  161. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  162. plog.Fatalf("raft save state and entries error: %v", err)
  163. }
  164. if !raft.IsEmptyHardState(rd.HardState) {
  165. proposalsCommitted.Set(float64(rd.HardState.Commit))
  166. }
  167. // gofail: var raftAfterSave struct{}
  168. if !raft.IsEmptySnap(rd.Snapshot) {
  169. // gofail: var raftBeforeSaveSnap struct{}
  170. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  171. plog.Fatalf("raft save snapshot error: %v", err)
  172. }
  173. // gofail: var raftAfterSaveSnap struct{}
  174. r.raftStorage.ApplySnapshot(rd.Snapshot)
  175. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  176. // gofail: var raftAfterApplySnap struct{}
  177. }
  178. r.raftStorage.Append(rd.Entries)
  179. if !islead {
  180. // gofail: var raftBeforeFollowerSend struct{}
  181. rh.sendMessage(rd.Messages)
  182. }
  183. raftDone <- struct{}{}
  184. r.Advance()
  185. case <-r.stopped:
  186. return
  187. }
  188. }
  189. }()
  190. }
  191. func (r *raftNode) apply() chan apply {
  192. return r.applyc
  193. }
  194. func (r *raftNode) leadElectedTime() time.Time {
  195. r.mu.Lock()
  196. defer r.mu.Unlock()
  197. return r.lt
  198. }
  199. func (r *raftNode) stop() {
  200. r.stopped <- struct{}{}
  201. <-r.done
  202. }
  203. func (r *raftNode) onStop() {
  204. r.Stop()
  205. r.transport.Stop()
  206. if err := r.storage.Close(); err != nil {
  207. plog.Panicf("raft close storage error: %v", err)
  208. }
  209. close(r.done)
  210. }
  211. // for testing
  212. func (r *raftNode) pauseSending() {
  213. p := r.transport.(rafthttp.Pausable)
  214. p.Pause()
  215. }
  216. func (r *raftNode) resumeSending() {
  217. p := r.transport.(rafthttp.Pausable)
  218. p.Resume()
  219. }
  220. // advanceTicksForElection advances ticks to the node for fast election.
  221. // This reduces the time to wait for first leader election if bootstrapping the whole
  222. // cluster, while leaving at least 1 heartbeat for possible existing leader
  223. // to contact it.
  224. func advanceTicksForElection(n raft.Node, electionTicks int) {
  225. for i := 0; i < electionTicks-1; i++ {
  226. n.Tick()
  227. }
  228. }
  229. func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  230. var err error
  231. member := cl.MemberByName(cfg.Name)
  232. metadata := pbutil.MustMarshal(
  233. &pb.Metadata{
  234. NodeID: uint64(member.ID),
  235. ClusterID: uint64(cl.ID()),
  236. },
  237. )
  238. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  239. plog.Fatalf("create wal error: %v", err)
  240. }
  241. peers := make([]raft.Peer, len(ids))
  242. for i, id := range ids {
  243. ctx, err := json.Marshal((*cl).Member(id))
  244. if err != nil {
  245. plog.Panicf("marshal member should never fail: %v", err)
  246. }
  247. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  248. }
  249. id = member.ID
  250. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  251. s = raft.NewMemoryStorage()
  252. c := &raft.Config{
  253. ID: uint64(id),
  254. ElectionTick: cfg.ElectionTicks,
  255. HeartbeatTick: 1,
  256. Storage: s,
  257. MaxSizePerMsg: maxSizePerMsg,
  258. MaxInflightMsgs: maxInflightMsgs,
  259. CheckQuorum: true,
  260. }
  261. n = raft.StartNode(c, peers)
  262. raftStatusMu.Lock()
  263. raftStatus = n.Status
  264. raftStatusMu.Unlock()
  265. advanceTicksForElection(n, c.ElectionTick)
  266. return
  267. }
  268. func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  269. var walsnap walpb.Snapshot
  270. if snapshot != nil {
  271. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  272. }
  273. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  274. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  275. cl := membership.NewCluster("")
  276. cl.SetID(cid)
  277. s := raft.NewMemoryStorage()
  278. if snapshot != nil {
  279. s.ApplySnapshot(*snapshot)
  280. }
  281. s.SetHardState(st)
  282. s.Append(ents)
  283. c := &raft.Config{
  284. ID: uint64(id),
  285. ElectionTick: cfg.ElectionTicks,
  286. HeartbeatTick: 1,
  287. Storage: s,
  288. MaxSizePerMsg: maxSizePerMsg,
  289. MaxInflightMsgs: maxInflightMsgs,
  290. CheckQuorum: true,
  291. }
  292. n := raft.RestartNode(c)
  293. raftStatusMu.Lock()
  294. raftStatus = n.Status
  295. raftStatusMu.Unlock()
  296. advanceTicksForElection(n, c.ElectionTick)
  297. return id, cl, n, s, w
  298. }
  299. func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  300. var walsnap walpb.Snapshot
  301. if snapshot != nil {
  302. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  303. }
  304. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  305. // discard the previously uncommitted entries
  306. for i, ent := range ents {
  307. if ent.Index > st.Commit {
  308. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  309. ents = ents[:i]
  310. break
  311. }
  312. }
  313. // force append the configuration change entries
  314. toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
  315. ents = append(ents, toAppEnts...)
  316. // force commit newly appended entries
  317. err := w.Save(raftpb.HardState{}, toAppEnts)
  318. if err != nil {
  319. plog.Fatalf("%v", err)
  320. }
  321. if len(ents) != 0 {
  322. st.Commit = ents[len(ents)-1].Index
  323. }
  324. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  325. cl := membership.NewCluster("")
  326. cl.SetID(cid)
  327. s := raft.NewMemoryStorage()
  328. if snapshot != nil {
  329. s.ApplySnapshot(*snapshot)
  330. }
  331. s.SetHardState(st)
  332. s.Append(ents)
  333. c := &raft.Config{
  334. ID: uint64(id),
  335. ElectionTick: cfg.ElectionTicks,
  336. HeartbeatTick: 1,
  337. Storage: s,
  338. MaxSizePerMsg: maxSizePerMsg,
  339. MaxInflightMsgs: maxInflightMsgs,
  340. }
  341. n := raft.RestartNode(c)
  342. raftStatus = n.Status
  343. return id, cl, n, s, w
  344. }
  345. // getIDs returns an ordered set of IDs included in the given snapshot and
  346. // the entries. The given snapshot/entries can contain two kinds of
  347. // ID-related entry:
  348. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  349. // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
  350. func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  351. ids := make(map[uint64]bool)
  352. if snap != nil {
  353. for _, id := range snap.Metadata.ConfState.Nodes {
  354. ids[id] = true
  355. }
  356. }
  357. for _, e := range ents {
  358. if e.Type != raftpb.EntryConfChange {
  359. continue
  360. }
  361. var cc raftpb.ConfChange
  362. pbutil.MustUnmarshal(&cc, e.Data)
  363. switch cc.Type {
  364. case raftpb.ConfChangeAddNode:
  365. ids[cc.NodeID] = true
  366. case raftpb.ConfChangeRemoveNode:
  367. delete(ids, cc.NodeID)
  368. case raftpb.ConfChangeUpdateNode:
  369. // do nothing
  370. default:
  371. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  372. }
  373. }
  374. sids := make(types.Uint64Slice, 0, len(ids))
  375. for id := range ids {
  376. sids = append(sids, id)
  377. }
  378. sort.Sort(sids)
  379. return []uint64(sids)
  380. }
  381. // createConfigChangeEnts creates a series of Raft entries (i.e.
  382. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  383. // `self` is _not_ removed, even if present in the set.
  384. // If `self` is not inside the given ids, it creates a Raft entry to add a
  385. // default member with the given `self`.
  386. func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  387. ents := make([]raftpb.Entry, 0)
  388. next := index + 1
  389. found := false
  390. for _, id := range ids {
  391. if id == self {
  392. found = true
  393. continue
  394. }
  395. cc := &raftpb.ConfChange{
  396. Type: raftpb.ConfChangeRemoveNode,
  397. NodeID: id,
  398. }
  399. e := raftpb.Entry{
  400. Type: raftpb.EntryConfChange,
  401. Data: pbutil.MustMarshal(cc),
  402. Term: term,
  403. Index: next,
  404. }
  405. ents = append(ents, e)
  406. next++
  407. }
  408. if !found {
  409. m := membership.Member{
  410. ID: types.ID(self),
  411. RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
  412. }
  413. ctx, err := json.Marshal(m)
  414. if err != nil {
  415. plog.Panicf("marshal member should never fail: %v", err)
  416. }
  417. cc := &raftpb.ConfChange{
  418. Type: raftpb.ConfChangeAddNode,
  419. NodeID: self,
  420. Context: ctx,
  421. }
  422. e := raftpb.Entry{
  423. Type: raftpb.EntryConfChange,
  424. Data: pbutil.MustMarshal(cc),
  425. Term: term,
  426. Index: next,
  427. }
  428. ents = append(ents, e)
  429. }
  430. return ents
  431. }