raft.go 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. // Copyright 2015 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "log"
  19. "os"
  20. "sort"
  21. "sync/atomic"
  22. "time"
  23. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  24. "github.com/coreos/etcd/pkg/pbutil"
  25. "github.com/coreos/etcd/pkg/types"
  26. "github.com/coreos/etcd/raft"
  27. "github.com/coreos/etcd/raft/raftpb"
  28. "github.com/coreos/etcd/rafthttp"
  29. "github.com/coreos/etcd/wal"
  30. "github.com/coreos/etcd/wal/walpb"
  31. )
  32. const (
  33. // Number of entries for slow follower to catch-up after compacting
  34. // the raft storage entries.
  35. // We expect the follower has a millisecond level latency with the leader.
  36. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  37. // follower to catch up.
  38. numberOfCatchUpEntries = 5000
  39. )
  40. var (
  41. // indirection for expvar func interface
  42. // expvar panics when publishing duplicate name
  43. // expvar does not support remove a registered name
  44. // so only register a func that calls raftStatus
  45. // and change raftStatus as we need.
  46. raftStatus func() raft.Status
  47. )
  48. func init() {
  49. expvar.Publish("raft.status", expvar.Func(func() interface{} { return raftStatus() }))
  50. }
  51. type RaftTimer interface {
  52. Index() uint64
  53. Term() uint64
  54. }
  55. // apply contains entries, snapshot be applied.
  56. // After applied all the items, the application needs
  57. // to send notification to done chan.
  58. type apply struct {
  59. entries []raftpb.Entry
  60. snapshot raftpb.Snapshot
  61. done chan struct{}
  62. }
  63. type raftNode struct {
  64. raft.Node
  65. // a chan to send out apply
  66. applyc chan apply
  67. // TODO: remove the etcdserver related logic from raftNode
  68. // TODO: add a state machine interface to apply the commit entries
  69. // and do snapshot/recover
  70. s *EtcdServer
  71. // utility
  72. ticker <-chan time.Time
  73. raftStorage *raft.MemoryStorage
  74. storage Storage
  75. // transport specifies the transport to send and receive msgs to members.
  76. // Sending messages MUST NOT block. It is okay to drop messages, since
  77. // clients should timeout and reissue their messages.
  78. // If transport is nil, server will panic.
  79. transport rafthttp.Transporter
  80. // Cache of the latest raft index and raft term the server has seen
  81. index uint64
  82. term uint64
  83. lead uint64
  84. }
  85. func (r *raftNode) run() {
  86. var syncC <-chan time.Time
  87. defer r.stop()
  88. for {
  89. select {
  90. case <-r.ticker:
  91. r.Tick()
  92. case rd := <-r.Ready():
  93. if rd.SoftState != nil {
  94. atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
  95. if rd.RaftState == raft.StateLeader {
  96. syncC = r.s.SyncTicker
  97. // TODO: remove the nil checking
  98. // current test utility does not provide the stats
  99. if r.s.stats != nil {
  100. r.s.stats.BecomeLeader()
  101. }
  102. } else {
  103. syncC = nil
  104. }
  105. }
  106. apply := apply{
  107. entries: rd.CommittedEntries,
  108. snapshot: rd.Snapshot,
  109. done: make(chan struct{}),
  110. }
  111. select {
  112. case r.applyc <- apply:
  113. case <-r.s.done:
  114. return
  115. }
  116. if !raft.IsEmptySnap(rd.Snapshot) {
  117. if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
  118. log.Fatalf("etcdraft: save snapshot error: %v", err)
  119. }
  120. r.raftStorage.ApplySnapshot(rd.Snapshot)
  121. log.Printf("etcdraft: applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
  122. }
  123. if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
  124. log.Fatalf("etcdraft: save state and entries error: %v", err)
  125. }
  126. r.raftStorage.Append(rd.Entries)
  127. r.s.send(rd.Messages)
  128. <-apply.done
  129. r.Advance()
  130. case <-syncC:
  131. r.s.sync(defaultSyncTimeout)
  132. case <-r.s.done:
  133. return
  134. }
  135. }
  136. }
  137. func (r *raftNode) apply() chan apply {
  138. return r.applyc
  139. }
  140. func (r *raftNode) stop() {
  141. r.Stop()
  142. r.transport.Stop()
  143. if err := r.storage.Close(); err != nil {
  144. log.Panicf("etcdraft: close storage error: %v", err)
  145. }
  146. }
  147. // for testing
  148. func (r *raftNode) pauseSending() {
  149. p := r.transport.(rafthttp.Pausable)
  150. p.Pause()
  151. }
  152. func (r *raftNode) resumeSending() {
  153. p := r.transport.(rafthttp.Pausable)
  154. p.Resume()
  155. }
  156. func startNode(cfg *ServerConfig, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  157. var err error
  158. member := cfg.Cluster.MemberByName(cfg.Name)
  159. metadata := pbutil.MustMarshal(
  160. &pb.Metadata{
  161. NodeID: uint64(member.ID),
  162. ClusterID: uint64(cfg.Cluster.ID()),
  163. },
  164. )
  165. if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
  166. log.Fatalf("etcdserver create snapshot directory error: %v", err)
  167. }
  168. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  169. log.Fatalf("etcdserver: create wal error: %v", err)
  170. }
  171. peers := make([]raft.Peer, len(ids))
  172. for i, id := range ids {
  173. ctx, err := json.Marshal((*cfg.Cluster).Member(id))
  174. if err != nil {
  175. log.Panicf("marshal member should never fail: %v", err)
  176. }
  177. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  178. }
  179. id = member.ID
  180. log.Printf("etcdserver: start member %s in cluster %s", id, cfg.Cluster.ID())
  181. s = raft.NewMemoryStorage()
  182. n = raft.StartNode(uint64(id), peers, cfg.ElectionTicks, 1, s)
  183. raftStatus = n.Status
  184. return
  185. }
  186. func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  187. var walsnap walpb.Snapshot
  188. if snapshot != nil {
  189. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  190. }
  191. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  192. cfg.Cluster.SetID(cid)
  193. log.Printf("etcdserver: restart member %s in cluster %s at commit index %d", id, cfg.Cluster.ID(), st.Commit)
  194. s := raft.NewMemoryStorage()
  195. if snapshot != nil {
  196. s.ApplySnapshot(*snapshot)
  197. }
  198. s.SetHardState(st)
  199. s.Append(ents)
  200. n := raft.RestartNode(uint64(id), cfg.ElectionTicks, 1, s, 0)
  201. raftStatus = n.Status
  202. return id, n, s, w
  203. }
  204. func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  205. var walsnap walpb.Snapshot
  206. if snapshot != nil {
  207. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  208. }
  209. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  210. cfg.Cluster.SetID(cid)
  211. // discard the previously uncommitted entries
  212. for i, ent := range ents {
  213. if ent.Index > st.Commit {
  214. log.Printf("etcdserver: discarding %d uncommitted WAL entries ", len(ents)-i)
  215. ents = ents[:i]
  216. break
  217. }
  218. }
  219. // force append the configuration change entries
  220. toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
  221. ents = append(ents, toAppEnts...)
  222. // force commit newly appended entries
  223. err := w.Save(raftpb.HardState{}, toAppEnts)
  224. if err != nil {
  225. log.Fatalf("etcdserver: %v", err)
  226. }
  227. if len(ents) != 0 {
  228. st.Commit = ents[len(ents)-1].Index
  229. }
  230. log.Printf("etcdserver: forcing restart of member %s in cluster %s at commit index %d", id, cfg.Cluster.ID(), st.Commit)
  231. s := raft.NewMemoryStorage()
  232. if snapshot != nil {
  233. s.ApplySnapshot(*snapshot)
  234. }
  235. s.SetHardState(st)
  236. s.Append(ents)
  237. n := raft.RestartNode(uint64(id), cfg.ElectionTicks, 1, s, 0)
  238. raftStatus = n.Status
  239. return id, n, s, w
  240. }
  241. // getIDs returns an ordered set of IDs included in the given snapshot and
  242. // the entries. The given snapshot/entries can contain two kinds of
  243. // ID-related entry:
  244. // - ConfChangeAddNode, in which case the contained ID will be added into the set.
  245. // - ConfChangeAddRemove, in which case the contained ID will be removed from the set.
  246. func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
  247. ids := make(map[uint64]bool)
  248. if snap != nil {
  249. for _, id := range snap.Metadata.ConfState.Nodes {
  250. ids[id] = true
  251. }
  252. }
  253. for _, e := range ents {
  254. if e.Type != raftpb.EntryConfChange {
  255. continue
  256. }
  257. var cc raftpb.ConfChange
  258. pbutil.MustUnmarshal(&cc, e.Data)
  259. switch cc.Type {
  260. case raftpb.ConfChangeAddNode:
  261. ids[cc.NodeID] = true
  262. case raftpb.ConfChangeRemoveNode:
  263. delete(ids, cc.NodeID)
  264. default:
  265. log.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
  266. }
  267. }
  268. sids := make(types.Uint64Slice, 0)
  269. for id := range ids {
  270. sids = append(sids, id)
  271. }
  272. sort.Sort(sids)
  273. return []uint64(sids)
  274. }
  275. // createConfigChangeEnts creates a series of Raft entries (i.e.
  276. // EntryConfChange) to remove the set of given IDs from the cluster. The ID
  277. // `self` is _not_ removed, even if present in the set.
  278. // If `self` is not inside the given ids, it creates a Raft entry to add a
  279. // default member with the given `self`.
  280. func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
  281. ents := make([]raftpb.Entry, 0)
  282. next := index + 1
  283. found := false
  284. for _, id := range ids {
  285. if id == self {
  286. found = true
  287. continue
  288. }
  289. cc := &raftpb.ConfChange{
  290. Type: raftpb.ConfChangeRemoveNode,
  291. NodeID: id,
  292. }
  293. e := raftpb.Entry{
  294. Type: raftpb.EntryConfChange,
  295. Data: pbutil.MustMarshal(cc),
  296. Term: term,
  297. Index: next,
  298. }
  299. ents = append(ents, e)
  300. next++
  301. }
  302. if !found {
  303. m := Member{
  304. ID: types.ID(self),
  305. RaftAttributes: RaftAttributes{PeerURLs: []string{"http://localhost:7001", "http://localhost:2380"}},
  306. }
  307. ctx, err := json.Marshal(m)
  308. if err != nil {
  309. log.Panicf("marshal member should never fail: %v", err)
  310. }
  311. cc := &raftpb.ConfChange{
  312. Type: raftpb.ConfChangeAddNode,
  313. NodeID: self,
  314. Context: ctx,
  315. }
  316. e := raftpb.Entry{
  317. Type: raftpb.EntryConfChange,
  318. Data: pbutil.MustMarshal(cc),
  319. Term: term,
  320. Index: next,
  321. }
  322. ents = append(ents, e)
  323. }
  324. return ents
  325. }