raft.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. // Copyright 2015 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "os"
  19. "sort"
  20. "sync/atomic"
  21. "time"
  22. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  23. "github.com/coreos/etcd/pkg/pbutil"
  24. "github.com/coreos/etcd/pkg/types"
  25. "github.com/coreos/etcd/raft"
  26. "github.com/coreos/etcd/raft/raftpb"
  27. "github.com/coreos/etcd/rafthttp"
  28. "github.com/coreos/etcd/wal"
  29. "github.com/coreos/etcd/wal/walpb"
  30. "github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/pkg/capnslog"
  31. )
  32. const (
  33. // Number of entries for slow follower to catch-up after compacting
  34. // the raft storage entries.
  35. // We expect the follower has a millisecond level latency with the leader.
  36. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  37. // follower to catch up.
  38. numberOfCatchUpEntries = 5000
  39. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  40. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  41. maxSizePerMsg = 1 * 1024 * 1024
  42. // Never overflow the rafthttp buffer, which is 4096.
  43. // TODO: a better const?
  44. maxInflightMsgs = 4096 / 8
  45. )
  46. var (
  47. // indirection for expvar func interface
  48. // expvar panics when publishing duplicate name
  49. // expvar does not support remove a registered name
  50. // so only register a func that calls raftStatus
  51. // and change raftStatus as we need.
  52. raftStatus func() raft.Status
  53. )
  54. func init() {
  55. raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
  56. expvar.Publish("raft.status", expvar.Func(func() interface{} { return raftStatus() }))
  57. }
  58. type RaftTimer interface {
  59. Index() uint64
  60. Term() uint64
  61. }
  62. // apply contains entries, snapshot be applied.
  63. // After applied all the items, the application needs
  64. // to send notification to done chan.
  65. type apply struct {
  66. entries []raftpb.Entry
  67. snapshot raftpb.Snapshot
  68. done chan struct{}
  69. }
  70. type raftNode struct {
  71. raft.Node
  72. // a chan to send out apply
  73. applyc chan apply
  74. // TODO: remove the etcdserver related logic from raftNode
  75. // TODO: add a state machine interface to apply the commit entries
  76. // and do snapshot/recover
  77. s *EtcdServer
  78. // utility
  79. ticker <-chan time.Time
  80. raftStorage *raft.MemoryStorage
  81. storage Storage
  82. // transport specifies the transport to send and receive msgs to members.
  83. // Sending messages MUST NOT block. It is okay to drop messages, since
  84. // clients should timeout and reissue their messages.
  85. // If transport is nil, server will panic.
  86. transport rafthttp.Transporter
  87. // Cache of the latest raft index and raft term the server has seen
  88. index uint64
  89. term uint64
  90. lead uint64
  91. stopped chan struct{}
  92. done chan struct{}
  93. }
  94. func (r *raftNode) run() {
  95. var syncC <-chan time.Time
  96. defer r.stop()
  97. for {
  98. select {
  99. case <-r.ticker:
  100. r.Tick()
  101. case rd := <-r.Ready():
  102. if rd.SoftState != nil {
  103. atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
  104. if rd.RaftState == raft.StateLeader {
  105. syncC = r.s.SyncTicker
  106. // TODO: remove the nil checking
  107. // current test utility does not provide the stats
  108. if r.s.stats != nil {
  109. r.s.stats.BecomeLeader()
  110. }
  111. } else {
  112. syncC = nil
  113. }
  114. }
  115. apply := apply{
  116. entries: rd.CommittedEntries,
  117. snapshot: rd.Snapshot,
  118. done: make(chan struct{}),
  119. }
  120. select {
  121. case r.applyc <- apply:
  122. case <-r.stopped:
  123. return
  124. }
  125. if !raft.IsEmptySnap(rd.Snapshot) {
  126. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  127. plog.Fatalf("raft save snapshot error: %v", err)
  128. }
  129. r.raftStorage.ApplySnapshot(rd.Snapshot)
  130. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  131. }
  132. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  133. plog.Fatalf("raft save state and entries error: %v", err)
  134. }
  135. r.raftStorage.Append(rd.Entries)
  136. r.s.send(rd.Messages)
  137. select {
  138. case <-apply.done:
  139. case <-r.stopped:
  140. return
  141. }
  142. r.Advance()
  143. case <-syncC:
  144. r.s.sync(defaultSyncTimeout)
  145. case <-r.stopped:
  146. return
  147. }
  148. }
  149. }
  150. func (r *raftNode) apply() chan apply {
  151. return r.applyc
  152. }
  153. func (r *raftNode) stop() {
  154. r.Stop()
  155. r.transport.Stop()
  156. if err := r.storage.Close(); err != nil {
  157. plog.Panicf("raft close storage error: %v", err)
  158. }
  159. close(r.done)
  160. }
  161. // for testing
  162. func (r *raftNode) pauseSending() {
  163. p := r.transport.(rafthttp.Pausable)
  164. p.Pause()
  165. }
  166. func (r *raftNode) resumeSending() {
  167. p := r.transport.(rafthttp.Pausable)
  168. p.Resume()
  169. }
  170. func startNode(cfg *ServerConfig, cl *cluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  171. var err error
  172. member := cl.MemberByName(cfg.Name)
  173. metadata := pbutil.MustMarshal(
  174. &pb.Metadata{
  175. NodeID: uint64(member.ID),
  176. ClusterID: uint64(cl.ID()),
  177. },
  178. )
  179. if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
  180. plog.Fatalf("create snapshot directory error: %v", err)
  181. }
  182. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  183. plog.Fatalf("create wal error: %v", err)
  184. }
  185. peers := make([]raft.Peer, len(ids))
  186. for i, id := range ids {
  187. ctx, err := json.Marshal((*cl).Member(id))
  188. if err != nil {
  189. plog.Panicf("marshal member should never fail: %v", err)
  190. }
  191. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  192. }
  193. id = member.ID
  194. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  195. s = raft.NewMemoryStorage()
  196. c := &raft.Config{
  197. ID: uint64(id),
  198. ElectionTick: cfg.ElectionTicks,
  199. HeartbeatTick: 1,
  200. Storage: s,
  201. MaxSizePerMsg: maxSizePerMsg,
  202. MaxInflightMsgs: maxInflightMsgs,
  203. }
  204. n = raft.StartNode(c, peers)
  205. raftStatus = n.Status
  206. return
  207. }
  208. func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *cluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  209. var walsnap walpb.Snapshot
  210. if snapshot != nil {
  211. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  212. }
  213. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  214. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  215. cl := newCluster("")
  216. cl.SetID(cid)
  217. s := raft.NewMemoryStorage()
  218. if snapshot != nil {
  219. s.ApplySnapshot(*snapshot)
  220. }
  221. s.SetHardState(st)
  222. s.Append(ents)
  223. c := &raft.Config{
  224. ID: uint64(id),
  225. ElectionTick: cfg.ElectionTicks,
  226. HeartbeatTick: 1,
  227. Storage: s,
  228. MaxSizePerMsg: maxSizePerMsg,
  229. MaxInflightMsgs: maxInflightMsgs,
  230. }
  231. n := raft.RestartNode(c)
  232. raftStatus = n.Status
  233. return id, cl, n, s, w
  234. }
  235. func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *cluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  236. var walsnap walpb.Snapshot
  237. if snapshot != nil {
  238. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  239. }
  240. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  241. // discard the previously uncommitted entries
  242. for i, ent := range ents {
  243. if ent.Index > st.Commit {
  244. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  245. ents = ents[:i]
  246. break
  247. }
  248. }
  249. // force append the configuration change entries
  250. toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
  251. ents = append(ents, toAppEnts...)
  252. // force commit newly appended entries
  253. err := w.Save(raftpb.HardState{}, toAppEnts)
  254. if err != nil {
  255. plog.Fatalf("%v", err)
  256. }
  257. if len(ents) != 0 {
  258. st.Commit = ents[len(ents)-1].Index
  259. }
  260. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  261. cl := newCluster("")
  262. cl.SetID(cid)
  263. s := raft.NewMemoryStorage()
  264. if snapshot != nil {
  265. s.ApplySnapshot(*snapshot)
  266. }
  267. s.SetHardState(st)
  268. s.Append(ents)
  269. c := &raft.Config{
  270. ID: uint64(id),
  271. ElectionTick: cfg.ElectionTicks,
  272. HeartbeatTick: 1,
  273. Storage: s,
  274. MaxSizePerMsg: maxSizePerMsg,
  275. MaxInflightMsgs: maxInflightMsgs,
  276. }
  277. n := raft.RestartNode(c)
  278. raftStatus = n.Status
  279. return id, cl, n, s, w
  280. }
  281. // getIDs returns an ordered set of IDs included in the given snapshot and
  282. // the entries. The given snapshot/entries can contain two kinds of
  283. // ID-related entry:
  284. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  285. // - ConfChangeAddRemove, in which case the contained ID will be removed from the set.
  286. func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  287. ids := make(map[uint64]bool)
  288. if snap != nil {
  289. for _, id := range snap.Metadata.ConfState.Nodes {
  290. ids[id] = true
  291. }
  292. }
  293. for _, e := range ents {
  294. if e.Type != raftpb.EntryConfChange {
  295. continue
  296. }
  297. var cc raftpb.ConfChange
  298. pbutil.MustUnmarshal(&cc, e.Data)
  299. switch cc.Type {
  300. case raftpb.ConfChangeAddNode:
  301. ids[cc.NodeID] = true
  302. case raftpb.ConfChangeRemoveNode:
  303. delete(ids, cc.NodeID)
  304. default:
  305. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  306. }
  307. }
  308. sids := make(types.Uint64Slice, 0)
  309. for id := range ids {
  310. sids = append(sids, id)
  311. }
  312. sort.Sort(sids)
  313. return []uint64(sids)
  314. }
  315. // createConfigChangeEnts creates a series of Raft entries (i.e.
  316. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  317. // `self` is _not_ removed, even if present in the set.
  318. // If `self` is not inside the given ids, it creates a Raft entry to add a
  319. // default member with the given `self`.
  320. func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  321. ents := make([]raftpb.Entry, 0)
  322. next := index + 1
  323. found := false
  324. for _, id := range ids {
  325. if id == self {
  326. found = true
  327. continue
  328. }
  329. cc := &raftpb.ConfChange{
  330. Type: raftpb.ConfChangeRemoveNode,
  331. NodeID: id,
  332. }
  333. e := raftpb.Entry{
  334. Type: raftpb.EntryConfChange,
  335. Data: pbutil.MustMarshal(cc),
  336. Term: term,
  337. Index: next,
  338. }
  339. ents = append(ents, e)
  340. next++
  341. }
  342. if !found {
  343. m := Member{
  344. ID: types.ID(self),
  345. RaftAttributes: RaftAttributes{PeerURLs: []string{"http://localhost:7001", "http://localhost:2380"}},
  346. }
  347. ctx, err := json.Marshal(m)
  348. if err != nil {
  349. plog.Panicf("marshal member should never fail: %v", err)
  350. }
  351. cc := &raftpb.ConfChange{
  352. Type: raftpb.ConfChangeAddNode,
  353. NodeID: self,
  354. Context: ctx,
  355. }
  356. e := raftpb.Entry{
  357. Type: raftpb.EntryConfChange,
  358. Data: pbutil.MustMarshal(cc),
  359. Term: term,
  360. Index: next,
  361. }
  362. ents = append(ents, e)
  363. }
  364. return ents
  365. }