raft.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package main
  15. import (
  16. "fmt"
  17. "log"
  18. "os"
  19. "strconv"
  20. "time"
  21. "net/http"
  22. "net/url"
  23. "github.com/coreos/etcd/etcdserver/stats"
  24. "github.com/coreos/etcd/pkg/fileutil"
  25. "github.com/coreos/etcd/pkg/types"
  26. "github.com/coreos/etcd/raft"
  27. "github.com/coreos/etcd/raft/raftpb"
  28. "github.com/coreos/etcd/rafthttp"
  29. "github.com/coreos/etcd/snap"
  30. "github.com/coreos/etcd/wal"
  31. "github.com/coreos/etcd/wal/walpb"
  32. "golang.org/x/net/context"
  33. )
  34. // A key-value stream backed by raft
  35. type raftNode struct {
  36. proposeC <-chan string // proposed messages (k,v)
  37. confChangeC <-chan raftpb.ConfChange // proposed cluster config changes
  38. commitC chan<- *string // entries committed to log (k,v)
  39. errorC chan<- error // errors from raft session
  40. id int // client ID for raft session
  41. peers []string // raft peer URLs
  42. join bool // node is joining an existing cluster
  43. waldir string // path to WAL directory
  44. snapdir string // path to snapshot directory
  45. getSnapshot func() ([]byte, error)
  46. lastIndex uint64 // index of log at start
  47. confState raftpb.ConfState
  48. snapshotIndex uint64
  49. appliedIndex uint64
  50. // raft backing for the commit/error channel
  51. node raft.Node
  52. raftStorage *raft.MemoryStorage
  53. wal *wal.WAL
  54. snapshotter *snap.Snapshotter
  55. snapshotterReady chan *snap.Snapshotter // signals when snapshotter is ready
  56. snapCount uint64
  57. transport *rafthttp.Transport
  58. stopc chan struct{} // signals proposal channel closed
  59. httpstopc chan struct{} // signals http server to shutdown
  60. httpdonec chan struct{} // signals http server shutdown complete
  61. }
  62. var defaultSnapCount uint64 = 10000
  63. // newRaftNode initiates a raft instance and returns a committed log entry
  64. // channel and error channel. Proposals for log updates are sent over the
  65. // provided the proposal channel. All log entries are replayed over the
  66. // commit channel, followed by a nil message (to indicate the channel is
  67. // current), then new log entries. To shutdown, close proposeC and read errorC.
  68. func newRaftNode(id int, peers []string, join bool, getSnapshot func() ([]byte, error), proposeC <-chan string,
  69. confChangeC <-chan raftpb.ConfChange) (<-chan *string, <-chan error, <-chan *snap.Snapshotter) {
  70. commitC := make(chan *string)
  71. errorC := make(chan error)
  72. rc := &raftNode{
  73. proposeC: proposeC,
  74. confChangeC: confChangeC,
  75. commitC: commitC,
  76. errorC: errorC,
  77. id: id,
  78. peers: peers,
  79. join: join,
  80. waldir: fmt.Sprintf("raftexample-%d", id),
  81. snapdir: fmt.Sprintf("raftexample-%d-snap", id),
  82. getSnapshot: getSnapshot,
  83. raftStorage: raft.NewMemoryStorage(),
  84. snapCount: defaultSnapCount,
  85. stopc: make(chan struct{}),
  86. httpstopc: make(chan struct{}),
  87. httpdonec: make(chan struct{}),
  88. snapshotterReady: make(chan *snap.Snapshotter, 1),
  89. // rest of structure populated after WAL replay
  90. }
  91. go rc.startRaft()
  92. return commitC, errorC, rc.snapshotterReady
  93. }
  94. func (rc *raftNode) saveSnap(snap raftpb.Snapshot) error {
  95. if err := rc.snapshotter.SaveSnap(snap); err != nil {
  96. return err
  97. }
  98. walSnap := walpb.Snapshot{
  99. Index: snap.Metadata.Index,
  100. Term: snap.Metadata.Term,
  101. }
  102. if err := rc.wal.SaveSnapshot(walSnap); err != nil {
  103. return err
  104. }
  105. return rc.wal.ReleaseLockTo(snap.Metadata.Index)
  106. }
  107. func (rc *raftNode) entriesToApply(ents []raftpb.Entry) (nents []raftpb.Entry) {
  108. if len(ents) == 0 {
  109. return
  110. }
  111. firstIdx := ents[0].Index
  112. if firstIdx > rc.appliedIndex+1 {
  113. log.Fatalf("first index of committed entry[%d] should <= progress.appliedIndex[%d] 1", firstIdx, rc.appliedIndex)
  114. }
  115. if rc.appliedIndex-firstIdx+1 < uint64(len(ents)) {
  116. nents = ents[rc.appliedIndex-firstIdx+1:]
  117. }
  118. return
  119. }
  120. // publishEntries writes committed log entries to commit channel and returns
  121. // whether all entries could be published.
  122. func (rc *raftNode) publishEntries(ents []raftpb.Entry) bool {
  123. for i := range ents {
  124. switch ents[i].Type {
  125. case raftpb.EntryNormal:
  126. if len(ents[i].Data) == 0 {
  127. // ignore empty messages
  128. break
  129. }
  130. s := string(ents[i].Data)
  131. select {
  132. case rc.commitC <- &s:
  133. case <-rc.stopc:
  134. return false
  135. }
  136. case raftpb.EntryConfChange:
  137. var cc raftpb.ConfChange
  138. cc.Unmarshal(ents[i].Data)
  139. rc.node.ApplyConfChange(cc)
  140. switch cc.Type {
  141. case raftpb.ConfChangeAddNode:
  142. if len(cc.Context) > 0 {
  143. rc.transport.AddPeer(types.ID(cc.NodeID), []string{string(cc.Context)})
  144. }
  145. case raftpb.ConfChangeRemoveNode:
  146. if cc.NodeID == uint64(rc.id) {
  147. log.Println("I've been removed from the cluster! Shutting down.")
  148. return false
  149. }
  150. rc.transport.RemovePeer(types.ID(cc.NodeID))
  151. }
  152. }
  153. // after commit, update appliedIndex
  154. rc.appliedIndex = ents[i].Index
  155. // special nil commit to signal replay has finished
  156. if ents[i].Index == rc.lastIndex {
  157. select {
  158. case rc.commitC <- nil:
  159. case <-rc.stopc:
  160. return false
  161. }
  162. }
  163. }
  164. return true
  165. }
  166. // openWAL returns a WAL ready for reading.
  167. func (rc *raftNode) openWAL() *wal.WAL {
  168. if !wal.Exist(rc.waldir) {
  169. if err := os.Mkdir(rc.waldir, 0750); err != nil {
  170. log.Fatalf("raftexample: cannot create dir for wal (%v)", err)
  171. }
  172. w, err := wal.Create(rc.waldir, nil)
  173. if err != nil {
  174. log.Fatalf("raftexample: create wal error (%v)", err)
  175. }
  176. w.Close()
  177. }
  178. w, err := wal.Open(rc.waldir, walpb.Snapshot{})
  179. if err != nil {
  180. log.Fatalf("raftexample: error loading wal (%v)", err)
  181. }
  182. return w
  183. }
  184. // replayWAL replays WAL entries into the raft instance.
  185. func (rc *raftNode) replayWAL() *wal.WAL {
  186. w := rc.openWAL()
  187. _, st, ents, err := w.ReadAll()
  188. if err != nil {
  189. log.Fatalf("raftexample: failed to read WAL (%v)", err)
  190. }
  191. // append to storage so raft starts at the right place in log
  192. rc.raftStorage.Append(ents)
  193. // send nil once lastIndex is published so client knows commit channel is current
  194. if len(ents) > 0 {
  195. rc.lastIndex = ents[len(ents)-1].Index
  196. } else {
  197. rc.commitC <- nil
  198. }
  199. rc.raftStorage.SetHardState(st)
  200. return w
  201. }
  202. func (rc *raftNode) writeError(err error) {
  203. rc.stopHTTP()
  204. close(rc.commitC)
  205. rc.errorC <- err
  206. close(rc.errorC)
  207. rc.node.Stop()
  208. }
  209. func (rc *raftNode) startRaft() {
  210. if !fileutil.Exist(rc.snapdir) {
  211. if err := os.Mkdir(rc.snapdir, 0750); err != nil {
  212. log.Fatalf("raftexample: cannot create dir for snapshot (%v)", err)
  213. }
  214. }
  215. rc.snapshotter = snap.New(rc.snapdir)
  216. rc.snapshotterReady <- rc.snapshotter
  217. oldwal := wal.Exist(rc.waldir)
  218. rc.wal = rc.replayWAL()
  219. rpeers := make([]raft.Peer, len(rc.peers))
  220. for i := range rpeers {
  221. rpeers[i] = raft.Peer{ID: uint64(i + 1)}
  222. }
  223. c := &raft.Config{
  224. ID: uint64(rc.id),
  225. ElectionTick: 10,
  226. HeartbeatTick: 1,
  227. Storage: rc.raftStorage,
  228. MaxSizePerMsg: 1024 * 1024,
  229. MaxInflightMsgs: 256,
  230. }
  231. if oldwal {
  232. rc.node = raft.RestartNode(c)
  233. } else {
  234. startPeers := rpeers
  235. if rc.join {
  236. startPeers = nil
  237. }
  238. rc.node = raft.StartNode(c, startPeers)
  239. }
  240. ss := &stats.ServerStats{}
  241. ss.Initialize()
  242. rc.transport = &rafthttp.Transport{
  243. ID: types.ID(rc.id),
  244. ClusterID: 0x1000,
  245. Raft: rc,
  246. ServerStats: ss,
  247. LeaderStats: stats.NewLeaderStats(strconv.Itoa(rc.id)),
  248. ErrorC: make(chan error),
  249. }
  250. rc.transport.Start()
  251. for i := range rc.peers {
  252. if i+1 != rc.id {
  253. rc.transport.AddPeer(types.ID(i+1), []string{rc.peers[i]})
  254. }
  255. }
  256. go rc.serveRaft()
  257. go rc.serveChannels()
  258. }
  259. // stop closes http, closes all channels, and stops raft.
  260. func (rc *raftNode) stop() {
  261. rc.stopHTTP()
  262. close(rc.commitC)
  263. close(rc.errorC)
  264. rc.node.Stop()
  265. }
  266. func (rc *raftNode) stopHTTP() {
  267. rc.transport.Stop()
  268. close(rc.httpstopc)
  269. <-rc.httpdonec
  270. }
  271. func (rc *raftNode) publishSnapshot(snapshotToSave raftpb.Snapshot) {
  272. if raft.IsEmptySnap(snapshotToSave) {
  273. return
  274. }
  275. log.Printf("publishing snapshot at index %d", rc.snapshotIndex)
  276. defer log.Printf("finished publishing snapshot at index %d", rc.snapshotIndex)
  277. if snapshotToSave.Metadata.Index <= rc.appliedIndex {
  278. log.Fatalf("snapshot index [%d] should > progress.appliedIndex [%d] + 1", snapshotToSave.Metadata.Index, rc.appliedIndex)
  279. }
  280. rc.commitC <- nil // trigger kvstore to load snapshot
  281. rc.confState = snapshotToSave.Metadata.ConfState
  282. rc.snapshotIndex = snapshotToSave.Metadata.Index
  283. rc.appliedIndex = snapshotToSave.Metadata.Index
  284. }
  285. var snapshotCatchUpEntriesN uint64 = 10000
  286. func (rc *raftNode) maybeTriggerSnapshot() {
  287. if rc.appliedIndex-rc.snapshotIndex <= rc.snapCount {
  288. return
  289. }
  290. log.Printf("start snapshot [applied index: %d | last snapshot index: %d]", rc.appliedIndex, rc.snapshotIndex)
  291. data, err := rc.getSnapshot()
  292. if err != nil {
  293. log.Panic(err)
  294. }
  295. snap, err := rc.raftStorage.CreateSnapshot(rc.appliedIndex, &rc.confState, data)
  296. if err != nil {
  297. panic(err)
  298. }
  299. if err := rc.saveSnap(snap); err != nil {
  300. panic(err)
  301. }
  302. compactIndex := uint64(1)
  303. if rc.appliedIndex > snapshotCatchUpEntriesN {
  304. compactIndex = rc.appliedIndex - snapshotCatchUpEntriesN
  305. }
  306. if err := rc.raftStorage.Compact(compactIndex); err != nil {
  307. panic(err)
  308. }
  309. log.Printf("compacted log at index %d", compactIndex)
  310. rc.snapshotIndex = rc.appliedIndex
  311. }
  312. func (rc *raftNode) serveChannels() {
  313. snap, err := rc.raftStorage.Snapshot()
  314. if err != nil {
  315. panic(err)
  316. }
  317. rc.confState = snap.Metadata.ConfState
  318. rc.snapshotIndex = snap.Metadata.Index
  319. rc.appliedIndex = snap.Metadata.Index
  320. defer rc.wal.Close()
  321. ticker := time.NewTicker(100 * time.Millisecond)
  322. defer ticker.Stop()
  323. // send proposals over raft
  324. go func() {
  325. var confChangeCount uint64 = 0
  326. for rc.proposeC != nil && rc.confChangeC != nil {
  327. select {
  328. case prop, ok := <-rc.proposeC:
  329. if !ok {
  330. rc.proposeC = nil
  331. } else {
  332. // blocks until accepted by raft state machine
  333. rc.node.Propose(context.TODO(), []byte(prop))
  334. }
  335. case cc, ok := <-rc.confChangeC:
  336. if !ok {
  337. rc.confChangeC = nil
  338. } else {
  339. confChangeCount += 1
  340. cc.ID = confChangeCount
  341. rc.node.ProposeConfChange(context.TODO(), cc)
  342. }
  343. }
  344. }
  345. // client closed channel; shutdown raft if not already
  346. close(rc.stopc)
  347. }()
  348. // event loop on raft state machine updates
  349. for {
  350. select {
  351. case <-ticker.C:
  352. rc.node.Tick()
  353. // store raft entries to wal, then publish over commit channel
  354. case rd := <-rc.node.Ready():
  355. rc.wal.Save(rd.HardState, rd.Entries)
  356. if !raft.IsEmptySnap(rd.Snapshot) {
  357. rc.saveSnap(rd.Snapshot)
  358. rc.raftStorage.ApplySnapshot(rd.Snapshot)
  359. rc.publishSnapshot(rd.Snapshot)
  360. }
  361. rc.raftStorage.Append(rd.Entries)
  362. rc.transport.Send(rd.Messages)
  363. if ok := rc.publishEntries(rc.entriesToApply(rd.CommittedEntries)); !ok {
  364. rc.stop()
  365. return
  366. }
  367. rc.maybeTriggerSnapshot()
  368. rc.node.Advance()
  369. case err := <-rc.transport.ErrorC:
  370. rc.writeError(err)
  371. return
  372. case <-rc.stopc:
  373. rc.stop()
  374. return
  375. }
  376. }
  377. }
  378. func (rc *raftNode) serveRaft() {
  379. url, err := url.Parse(rc.peers[rc.id-1])
  380. if err != nil {
  381. log.Fatalf("raftexample: Failed parsing URL (%v)", err)
  382. }
  383. ln, err := newStoppableListener(url.Host, rc.httpstopc)
  384. if err != nil {
  385. log.Fatalf("raftexample: Failed to listen rafthttp (%v)", err)
  386. }
  387. err = (&http.Server{Handler: rc.transport.Handler()}).Serve(ln)
  388. select {
  389. case <-rc.httpstopc:
  390. default:
  391. log.Fatalf("raftexample: Failed to serve rafthttp (%v)", err)
  392. }
  393. close(rc.httpdonec)
  394. }
  395. func (rc *raftNode) Process(ctx context.Context, m raftpb.Message) error {
  396. return rc.node.Step(ctx, m)
  397. }
  398. func (rc *raftNode) IsIDRemoved(id uint64) bool { return false }
  399. func (rc *raftNode) ReportUnreachable(id uint64) {}
  400. func (rc *raftNode) ReportSnapshot(id uint64, status raft.SnapshotStatus) {}