raft.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. // Copyright 2015 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "os"
  19. "sort"
  20. "sync/atomic"
  21. "time"
  22. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  23. "github.com/coreos/etcd/pkg/pbutil"
  24. "github.com/coreos/etcd/pkg/types"
  25. "github.com/coreos/etcd/raft"
  26. "github.com/coreos/etcd/raft/raftpb"
  27. "github.com/coreos/etcd/rafthttp"
  28. "github.com/coreos/etcd/wal"
  29. "github.com/coreos/etcd/wal/walpb"
  30. "github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/pkg/capnslog"
  31. )
  32. const (
  33. // Number of entries for slow follower to catch-up after compacting
  34. // the raft storage entries.
  35. // We expect the follower has a millisecond level latency with the leader.
  36. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  37. // follower to catch up.
  38. numberOfCatchUpEntries = 5000
  39. // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
  40. // Assuming the RTT is around 10ms, 1MB max size is large enough.
  41. maxSizePerMsg = 1 * 1024 * 1024
  42. // Never overflow the rafthttp buffer, which is 4096.
  43. // TODO: a better const?
  44. maxInflightMsgs = 4096 / 8
  45. )
  46. var (
  47. // indirection for expvar func interface
  48. // expvar panics when publishing duplicate name
  49. // expvar does not support remove a registered name
  50. // so only register a func that calls raftStatus
  51. // and change raftStatus as we need.
  52. raftStatus func() raft.Status
  53. )
  54. func init() {
  55. raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
  56. expvar.Publish("raft.status", expvar.Func(func() interface{} { return raftStatus() }))
  57. }
  58. type RaftTimer interface {
  59. Index() uint64
  60. Term() uint64
  61. }
  62. // apply contains entries, snapshot be applied.
  63. // After applied all the items, the application needs
  64. // to send notification to done chan.
  65. type apply struct {
  66. entries []raftpb.Entry
  67. snapshot raftpb.Snapshot
  68. done chan struct{}
  69. }
  70. type raftNode struct {
  71. raft.Node
  72. // a chan to send out apply
  73. applyc chan apply
  74. // TODO: remove the etcdserver related logic from raftNode
  75. // TODO: add a state machine interface to apply the commit entries
  76. // and do snapshot/recover
  77. s *EtcdServer
  78. // utility
  79. ticker <-chan time.Time
  80. raftStorage *raft.MemoryStorage
  81. storage Storage
  82. // transport specifies the transport to send and receive msgs to members.
  83. // Sending messages MUST NOT block. It is okay to drop messages, since
  84. // clients should timeout and reissue their messages.
  85. // If transport is nil, server will panic.
  86. transport rafthttp.Transporter
  87. // Cache of the latest raft index and raft term the server has seen
  88. index uint64
  89. term uint64
  90. lead uint64
  91. stopped chan struct{}
  92. done chan struct{}
  93. }
  94. func (r *raftNode) run() {
  95. r.stopped = make(chan struct{})
  96. r.done = make(chan struct{})
  97. var syncC <-chan time.Time
  98. defer r.stop()
  99. for {
  100. select {
  101. case <-r.ticker:
  102. r.Tick()
  103. case rd := <-r.Ready():
  104. if rd.SoftState != nil {
  105. atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
  106. if rd.RaftState == raft.StateLeader {
  107. syncC = r.s.SyncTicker
  108. // TODO: remove the nil checking
  109. // current test utility does not provide the stats
  110. if r.s.stats != nil {
  111. r.s.stats.BecomeLeader()
  112. }
  113. } else {
  114. syncC = nil
  115. }
  116. }
  117. apply := apply{
  118. entries: rd.CommittedEntries,
  119. snapshot: rd.Snapshot,
  120. done: make(chan struct{}),
  121. }
  122. select {
  123. case r.applyc <- apply:
  124. case <-r.stopped:
  125. return
  126. }
  127. if !raft.IsEmptySnap(rd.Snapshot) {
  128. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  129. plog.Fatalf("raft save snapshot error: %v", err)
  130. }
  131. r.raftStorage.ApplySnapshot(rd.Snapshot)
  132. plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  133. }
  134. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  135. plog.Fatalf("raft save state and entries error: %v", err)
  136. }
  137. r.raftStorage.Append(rd.Entries)
  138. r.s.send(rd.Messages)
  139. select {
  140. case <-apply.done:
  141. case <-r.stopped:
  142. return
  143. }
  144. r.Advance()
  145. case <-syncC:
  146. r.s.sync(defaultSyncTimeout)
  147. case <-r.stopped:
  148. return
  149. }
  150. }
  151. }
  152. func (r *raftNode) apply() chan apply {
  153. return r.applyc
  154. }
  155. func (r *raftNode) stop() {
  156. r.Stop()
  157. r.transport.Stop()
  158. if err := r.storage.Close(); err != nil {
  159. plog.Panicf("raft close storage error: %v", err)
  160. }
  161. close(r.done)
  162. }
  163. // for testing
  164. func (r *raftNode) pauseSending() {
  165. p := r.transport.(rafthttp.Pausable)
  166. p.Pause()
  167. }
  168. func (r *raftNode) resumeSending() {
  169. p := r.transport.(rafthttp.Pausable)
  170. p.Resume()
  171. }
  172. func startNode(cfg *ServerConfig, cl *cluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  173. var err error
  174. member := cl.MemberByName(cfg.Name)
  175. metadata := pbutil.MustMarshal(
  176. &pb.Metadata{
  177. NodeID: uint64(member.ID),
  178. ClusterID: uint64(cl.ID()),
  179. },
  180. )
  181. if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
  182. plog.Fatalf("create snapshot directory error: %v", err)
  183. }
  184. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  185. plog.Fatalf("create wal error: %v", err)
  186. }
  187. peers := make([]raft.Peer, len(ids))
  188. for i, id := range ids {
  189. ctx, err := json.Marshal((*cl).Member(id))
  190. if err != nil {
  191. plog.Panicf("marshal member should never fail: %v", err)
  192. }
  193. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  194. }
  195. id = member.ID
  196. plog.Infof("starting member %s in cluster %s", id, cl.ID())
  197. s = raft.NewMemoryStorage()
  198. c := &raft.Config{
  199. ID: uint64(id),
  200. ElectionTick: cfg.ElectionTicks,
  201. HeartbeatTick: 1,
  202. Storage: s,
  203. MaxSizePerMsg: maxSizePerMsg,
  204. MaxInflightMsgs: maxInflightMsgs,
  205. }
  206. n = raft.StartNode(c, peers)
  207. raftStatus = n.Status
  208. return
  209. }
  210. func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *cluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  211. var walsnap walpb.Snapshot
  212. if snapshot != nil {
  213. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  214. }
  215. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  216. plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
  217. cl := newCluster("")
  218. cl.SetID(cid)
  219. s := raft.NewMemoryStorage()
  220. if snapshot != nil {
  221. s.ApplySnapshot(*snapshot)
  222. }
  223. s.SetHardState(st)
  224. s.Append(ents)
  225. c := &raft.Config{
  226. ID: uint64(id),
  227. ElectionTick: cfg.ElectionTicks,
  228. HeartbeatTick: 1,
  229. Storage: s,
  230. MaxSizePerMsg: maxSizePerMsg,
  231. MaxInflightMsgs: maxInflightMsgs,
  232. }
  233. n := raft.RestartNode(c)
  234. raftStatus = n.Status
  235. return id, cl, n, s, w
  236. }
  237. func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *cluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  238. var walsnap walpb.Snapshot
  239. if snapshot != nil {
  240. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  241. }
  242. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  243. // discard the previously uncommitted entries
  244. for i, ent := range ents {
  245. if ent.Index > st.Commit {
  246. plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
  247. ents = ents[:i]
  248. break
  249. }
  250. }
  251. // force append the configuration change entries
  252. toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
  253. ents = append(ents, toAppEnts...)
  254. // force commit newly appended entries
  255. err := w.Save(raftpb.HardState{}, toAppEnts)
  256. if err != nil {
  257. plog.Fatalf("%v", err)
  258. }
  259. if len(ents) != 0 {
  260. st.Commit = ents[len(ents)-1].Index
  261. }
  262. plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
  263. cl := newCluster("")
  264. cl.SetID(cid)
  265. s := raft.NewMemoryStorage()
  266. if snapshot != nil {
  267. s.ApplySnapshot(*snapshot)
  268. }
  269. s.SetHardState(st)
  270. s.Append(ents)
  271. c := &raft.Config{
  272. ID: uint64(id),
  273. ElectionTick: cfg.ElectionTicks,
  274. HeartbeatTick: 1,
  275. Storage: s,
  276. MaxSizePerMsg: maxSizePerMsg,
  277. MaxInflightMsgs: maxInflightMsgs,
  278. }
  279. n := raft.RestartNode(c)
  280. raftStatus = n.Status
  281. return id, cl, n, s, w
  282. }
  283. // getIDs returns an ordered set of IDs included in the given snapshot and
  284. // the entries. The given snapshot/entries can contain two kinds of
  285. // ID-related entry:
  286. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  287. // - ConfChangeAddRemove, in which case the contained ID will be removed from the set.
  288. func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  289. ids := make(map[uint64]bool)
  290. if snap != nil {
  291. for _, id := range snap.Metadata.ConfState.Nodes {
  292. ids[id] = true
  293. }
  294. }
  295. for _, e := range ents {
  296. if e.Type != raftpb.EntryConfChange {
  297. continue
  298. }
  299. var cc raftpb.ConfChange
  300. pbutil.MustUnmarshal(&cc, e.Data)
  301. switch cc.Type {
  302. case raftpb.ConfChangeAddNode:
  303. ids[cc.NodeID] = true
  304. case raftpb.ConfChangeRemoveNode:
  305. delete(ids, cc.NodeID)
  306. default:
  307. plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  308. }
  309. }
  310. sids := make(types.Uint64Slice, 0)
  311. for id := range ids {
  312. sids = append(sids, id)
  313. }
  314. sort.Sort(sids)
  315. return []uint64(sids)
  316. }
  317. // createConfigChangeEnts creates a series of Raft entries (i.e.
  318. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  319. // `self` is _not_ removed, even if present in the set.
  320. // If `self` is not inside the given ids, it creates a Raft entry to add a
  321. // default member with the given `self`.
  322. func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  323. ents := make([]raftpb.Entry, 0)
  324. next := index + 1
  325. found := false
  326. for _, id := range ids {
  327. if id == self {
  328. found = true
  329. continue
  330. }
  331. cc := &raftpb.ConfChange{
  332. Type: raftpb.ConfChangeRemoveNode,
  333. NodeID: id,
  334. }
  335. e := raftpb.Entry{
  336. Type: raftpb.EntryConfChange,
  337. Data: pbutil.MustMarshal(cc),
  338. Term: term,
  339. Index: next,
  340. }
  341. ents = append(ents, e)
  342. next++
  343. }
  344. if !found {
  345. m := Member{
  346. ID: types.ID(self),
  347. RaftAttributes: RaftAttributes{PeerURLs: []string{"http://localhost:7001", "http://localhost:2380"}},
  348. }
  349. ctx, err := json.Marshal(m)
  350. if err != nil {
  351. plog.Panicf("marshal member should never fail: %v", err)
  352. }
  353. cc := &raftpb.ConfChange{
  354. Type: raftpb.ConfChangeAddNode,
  355. NodeID: self,
  356. Context: ctx,
  357. }
  358. e := raftpb.Entry{
  359. Type: raftpb.EntryConfChange,
  360. Data: pbutil.MustMarshal(cc),
  361. Term: term,
  362. Index: next,
  363. }
  364. ents = append(ents, e)
  365. }
  366. return ents
  367. }