raft.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. // Copyright 2015 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "os"
  19. "sort"
  20. "sync"
  21. "sync/atomic"
  22. "time"
  23. "github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/pkg/capnslog"
  24. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  25. "github.com/coreos/etcd/pkg/pbutil"
  26. "github.com/coreos/etcd/pkg/types"
  27. "github.com/coreos/etcd/raft"
  28. "github.com/coreos/etcd/raft/raftpb"
  29. "github.com/coreos/etcd/rafthttp"
  30. "github.com/coreos/etcd/wal"
  31. "github.com/coreos/etcd/wal/walpb"
  32. )
  33. const (
  34. // Number of entries for slow follower to catch-up after compacting
  35. // the raft storage entries.
  36. // We expect the follower has a millisecond level latency with the leader.
  37. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  38. // follower to catch up.
  39. numberOfCatchUpEntries = 5000
  40. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  41. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  42. maxSizePerMsg = 1 * 1024 * 1024
  43. // Never overflow the rafthttp buffer, which is 4096.
  44. // TODO: a better const?
  45. maxInflightMsgs = 4096 / 8
  46. )
  47. var (
  48. // protects raftStatus
  49. raftStatusMu sync.Mutex
  50. // indirection for expvar func interface
  51. // expvar panics when publishing duplicate name
  52. // expvar does not support remove a registered name
  53. // so only register a func that calls raftStatus
  54. // and change raftStatus as we need.
  55. raftStatus func() raft.Status
  56. )
  57. func init() {
  58. raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
  59. expvar.Publish("raft.status", expvar.Func(func() interface{} {
  60. raftStatusMu.Lock()
  61. defer raftStatusMu.Unlock()
  62. return raftStatus()
  63. }))
  64. }
  65. type RaftTimer interface {
  66. Index() uint64
  67. Term() uint64
  68. }
  69. // apply contains entries, snapshot to be applied. Once
  70. // an apply is consumed, the entries will be persisted to
  71. // to raft storage concurrently; the application must read
  72. // raftDone before assuming the raft messages are stable.
  73. type apply struct {
  74. entries []raftpb.Entry
  75. snapshot raftpb.Snapshot
  76. raftDone <-chan struct{} // rx {} after raft has persisted messages
  77. }
  78. type raftNode struct {
  79. // Cache of the latest raft index and raft term the server has seen.
  80. // These three unit64 fields must be the first elements to keep 64-bit
  81. // alignment for atomic access to the fields.
  82. index uint64
  83. term uint64
  84. lead uint64
  85. mu sync.Mutex
  86. // last lead elected time
  87. lt time.Time
  88. raft.Node
  89. // a chan to send out apply
  90. applyc chan apply
  91. // TODO: remove the etcdserver related logic from raftNode
  92. // TODO: add a state machine interface to apply the commit entries
  93. // and do snapshot/recover
  94. s *EtcdServer
  95. // utility
  96. ticker <-chan time.Time
  97. raftStorage *raft.MemoryStorage
  98. storage Storage
  99. // transport specifies the transport to send and receive msgs to members.
  100. // Sending messages MUST NOT block. It is okay to drop messages, since
  101. // clients should timeout and reissue their messages.
  102. // If transport is nil, server will panic.
  103. transport rafthttp.Transporter
  104. stopped chan struct{}
  105. done chan struct{}
  106. }
  107. // start prepares and starts raftNode in a new goroutine. It is no longer safe
  108. // to modify the fields after it has been started.
  109. // TODO: Ideally raftNode should get rid of the passed in server structure.
  110. func (r *raftNode) start(s *EtcdServer) {
  111. r.s = s
  112. r.applyc = make(chan apply)
  113. r.stopped = make(chan struct{})
  114. r.done = make(chan struct{})
  115. go func() {
  116. var syncC <-chan time.Time
  117. defer r.onStop()
  118. for {
  119. select {
  120. case <-r.ticker:
  121. r.Tick()
  122. case rd := <-r.Ready():
  123. if rd.SoftState != nil {
  124. if lead := atomic.LoadUint64(&r.lead); rd.SoftState.Lead != raft.None && lead != rd.SoftState.Lead {
  125. r.mu.Lock()
  126. r.lt = time.Now()
  127. r.mu.Unlock()
  128. }
  129. atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
  130. if rd.RaftState == raft.StateLeader {
  131. syncC = r.s.SyncTicker
  132. // TODO: remove the nil checking
  133. // current test utility does not provide the stats
  134. if r.s.stats != nil {
  135. r.s.stats.BecomeLeader()
  136. }
  137. } else {
  138. syncC = nil
  139. }
  140. }
  141. raftDone := make(chan struct{}, 1)
  142. ap := apply{
  143. entries: rd.CommittedEntries,
  144. snapshot: rd.Snapshot,
  145. raftDone: raftDone,
  146. }
  147. select {
  148. case r.applyc <- ap:
  149. case <-r.stopped:
  150. return
  151. }
  152. if !raft.IsEmptySnap(rd.Snapshot) {
  153. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  154. plog.Fatalf("raft save snapshot error: %v", err)
  155. }
  156. r.raftStorage.ApplySnapshot(rd.Snapshot)
  157. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  158. }
  159. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  160. plog.Fatalf("raft save state and entries error: %v", err)
  161. }
  162. r.raftStorage.Append(rd.Entries)
  163. r.s.send(rd.Messages)
  164. raftDone <- struct{}{}
  165. r.Advance()
  166. case <-syncC:
  167. r.s.sync(r.s.cfg.ReqTimeout())
  168. case <-r.stopped:
  169. return
  170. }
  171. }
  172. }()
  173. }
  174. func (r *raftNode) apply() chan apply {
  175. return r.applyc
  176. }
  177. func (r *raftNode) leadElectedTime() time.Time {
  178. r.mu.Lock()
  179. defer r.mu.Unlock()
  180. return r.lt
  181. }
  182. func (r *raftNode) stop() {
  183. r.stopped <- struct{}{}
  184. <-r.done
  185. }
  186. func (r *raftNode) onStop() {
  187. r.Stop()
  188. r.transport.Stop()
  189. if err := r.storage.Close(); err != nil {
  190. plog.Panicf("raft close storage error: %v", err)
  191. }
  192. close(r.done)
  193. }
  194. // for testing
  195. func (r *raftNode) pauseSending() {
  196. p := r.transport.(rafthttp.Pausable)
  197. p.Pause()
  198. }
  199. func (r *raftNode) resumeSending() {
  200. p := r.transport.(rafthttp.Pausable)
  201. p.Resume()
  202. }
  203. // advanceTicksForElection advances ticks to the node for fast election.
  204. // This reduces the time to wait for first leader election if bootstrapping the whole
  205. // cluster, while leaving at least 1 heartbeat for possible existing leader
  206. // to contact it.
  207. func advanceTicksForElection(n raft.Node, electionTicks int) {
  208. for i := 0; i < electionTicks-1; i++ {
  209. n.Tick()
  210. }
  211. }
  212. func startNode(cfg *ServerConfig, cl *cluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  213. var err error
  214. member := cl.MemberByName(cfg.Name)
  215. metadata := pbutil.MustMarshal(
  216. &pb.Metadata{
  217. NodeID: uint64(member.ID),
  218. ClusterID: uint64(cl.ID()),
  219. },
  220. )
  221. if err = os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
  222. plog.Fatalf("create snapshot directory error: %v", err)
  223. }
  224. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  225. plog.Fatalf("create wal error: %v", err)
  226. }
  227. peers := make([]raft.Peer, len(ids))
  228. for i, id := range ids {
  229. ctx, err := json.Marshal((*cl).Member(id))
  230. if err != nil {
  231. plog.Panicf("marshal member should never fail: %v", err)
  232. }
  233. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  234. }
  235. id = member.ID
  236. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  237. s = raft.NewMemoryStorage()
  238. c := &raft.Config{
  239. ID: uint64(id),
  240. ElectionTick: cfg.ElectionTicks,
  241. HeartbeatTick: 1,
  242. Storage: s,
  243. MaxSizePerMsg: maxSizePerMsg,
  244. MaxInflightMsgs: maxInflightMsgs,
  245. }
  246. n = raft.StartNode(c, peers)
  247. raftStatusMu.Lock()
  248. raftStatus = n.Status
  249. raftStatusMu.Unlock()
  250. advanceTicksForElection(n, c.ElectionTick)
  251. return
  252. }
  253. func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *cluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  254. var walsnap walpb.Snapshot
  255. if snapshot != nil {
  256. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  257. }
  258. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  259. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  260. cl := newCluster("")
  261. cl.SetID(cid)
  262. s := raft.NewMemoryStorage()
  263. if snapshot != nil {
  264. s.ApplySnapshot(*snapshot)
  265. }
  266. s.SetHardState(st)
  267. s.Append(ents)
  268. c := &raft.Config{
  269. ID: uint64(id),
  270. ElectionTick: cfg.ElectionTicks,
  271. HeartbeatTick: 1,
  272. Storage: s,
  273. MaxSizePerMsg: maxSizePerMsg,
  274. MaxInflightMsgs: maxInflightMsgs,
  275. }
  276. n := raft.RestartNode(c)
  277. raftStatusMu.Lock()
  278. raftStatus = n.Status
  279. raftStatusMu.Unlock()
  280. advanceTicksForElection(n, c.ElectionTick)
  281. return id, cl, n, s, w
  282. }
  283. func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *cluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  284. var walsnap walpb.Snapshot
  285. if snapshot != nil {
  286. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  287. }
  288. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  289. // discard the previously uncommitted entries
  290. for i, ent := range ents {
  291. if ent.Index > st.Commit {
  292. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  293. ents = ents[:i]
  294. break
  295. }
  296. }
  297. // force append the configuration change entries
  298. toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
  299. ents = append(ents, toAppEnts...)
  300. // force commit newly appended entries
  301. err := w.Save(raftpb.HardState{}, toAppEnts)
  302. if err != nil {
  303. plog.Fatalf("%v", err)
  304. }
  305. if len(ents) != 0 {
  306. st.Commit = ents[len(ents)-1].Index
  307. }
  308. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  309. cl := newCluster("")
  310. cl.SetID(cid)
  311. s := raft.NewMemoryStorage()
  312. if snapshot != nil {
  313. s.ApplySnapshot(*snapshot)
  314. }
  315. s.SetHardState(st)
  316. s.Append(ents)
  317. c := &raft.Config{
  318. ID: uint64(id),
  319. ElectionTick: cfg.ElectionTicks,
  320. HeartbeatTick: 1,
  321. Storage: s,
  322. MaxSizePerMsg: maxSizePerMsg,
  323. MaxInflightMsgs: maxInflightMsgs,
  324. }
  325. n := raft.RestartNode(c)
  326. raftStatus = n.Status
  327. return id, cl, n, s, w
  328. }
  329. // getIDs returns an ordered set of IDs included in the given snapshot and
  330. // the entries. The given snapshot/entries can contain two kinds of
  331. // ID-related entry:
  332. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  333. // - ConfChangeAddRemove, in which case the contained ID will be removed from the set.
  334. func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  335. ids := make(map[uint64]bool)
  336. if snap != nil {
  337. for _, id := range snap.Metadata.ConfState.Nodes {
  338. ids[id] = true
  339. }
  340. }
  341. for _, e := range ents {
  342. if e.Type != raftpb.EntryConfChange {
  343. continue
  344. }
  345. var cc raftpb.ConfChange
  346. pbutil.MustUnmarshal(&cc, e.Data)
  347. switch cc.Type {
  348. case raftpb.ConfChangeAddNode:
  349. ids[cc.NodeID] = true
  350. case raftpb.ConfChangeRemoveNode:
  351. delete(ids, cc.NodeID)
  352. case raftpb.ConfChangeUpdateNode:
  353. // do nothing
  354. default:
  355. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  356. }
  357. }
  358. sids := make(types.Uint64Slice, 0)
  359. for id := range ids {
  360. sids = append(sids, id)
  361. }
  362. sort.Sort(sids)
  363. return []uint64(sids)
  364. }
  365. // createConfigChangeEnts creates a series of Raft entries (i.e.
  366. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  367. // `self` is _not_ removed, even if present in the set.
  368. // If `self` is not inside the given ids, it creates a Raft entry to add a
  369. // default member with the given `self`.
  370. func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  371. ents := make([]raftpb.Entry, 0)
  372. next := index + 1
  373. found := false
  374. for _, id := range ids {
  375. if id == self {
  376. found = true
  377. continue
  378. }
  379. cc := &raftpb.ConfChange{
  380. Type: raftpb.ConfChangeRemoveNode,
  381. NodeID: id,
  382. }
  383. e := raftpb.Entry{
  384. Type: raftpb.EntryConfChange,
  385. Data: pbutil.MustMarshal(cc),
  386. Term: term,
  387. Index: next,
  388. }
  389. ents = append(ents, e)
  390. next++
  391. }
  392. if !found {
  393. m := Member{
  394. ID: types.ID(self),
  395. RaftAttributes: RaftAttributes{PeerURLs: []string{"http://localhost:7001", "http://localhost:2380"}},
  396. }
  397. ctx, err := json.Marshal(m)
  398. if err != nil {
  399. plog.Panicf("marshal member should never fail: %v", err)
  400. }
  401. cc := &raftpb.ConfChange{
  402. Type: raftpb.ConfChangeAddNode,
  403. NodeID: self,
  404. Context: ctx,
  405. }
  406. e := raftpb.Entry{
  407. Type: raftpb.EntryConfChange,
  408. Data: pbutil.MustMarshal(cc),
  409. Term: term,
  410. Index: next,
  411. }
  412. ents = append(ents, e)
  413. }
  414. return ents
  415. }