rawnode_test.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package raft
  15. import (
  16. "bytes"
  17. "context"
  18. "fmt"
  19. "reflect"
  20. "testing"
  21. "go.etcd.io/etcd/raft/raftpb"
  22. )
  23. // rawNodeAdapter is essentially a lint that makes sure that RawNode implements
  24. // "most of" Node. The exceptions (some of which are easy to fix) are listed
  25. // below.
  26. type rawNodeAdapter struct {
  27. *RawNode
  28. }
  29. var _ Node = (*rawNodeAdapter)(nil)
  30. // Node specifies lead, which is pointless, can just be filled in.
  31. func (a *rawNodeAdapter) TransferLeadership(ctx context.Context, lead, transferee uint64) {
  32. a.RawNode.TransferLeader(transferee)
  33. }
  34. // Node has a goroutine, RawNode doesn't need this.
  35. func (a *rawNodeAdapter) Stop() {}
  36. // RawNode returns a *Status.
  37. func (a *rawNodeAdapter) Status() Status { return *a.RawNode.Status() }
  38. // RawNode takes a Ready. It doesn't really have to do that I think? It can hold on
  39. // to it internally. But maybe that approach is frail.
  40. func (a *rawNodeAdapter) Advance() { a.RawNode.Advance(Ready{}) }
  41. // RawNode returns a Ready, not a chan of one.
  42. func (a *rawNodeAdapter) Ready() <-chan Ready { return nil }
  43. // Node takes more contexts. Easy enough to fix.
  44. func (a *rawNodeAdapter) Campaign(context.Context) error { return a.RawNode.Campaign() }
  45. func (a *rawNodeAdapter) ReadIndex(_ context.Context, rctx []byte) error {
  46. a.RawNode.ReadIndex(rctx)
  47. // RawNode swallowed the error in ReadIndex, it probably should not do that.
  48. return nil
  49. }
  50. func (a *rawNodeAdapter) Step(_ context.Context, m raftpb.Message) error { return a.RawNode.Step(m) }
  51. func (a *rawNodeAdapter) Propose(_ context.Context, data []byte) error { return a.RawNode.Propose(data) }
  52. func (a *rawNodeAdapter) ProposeConfChange(_ context.Context, cc raftpb.ConfChange) error {
  53. return a.RawNode.ProposeConfChange(cc)
  54. }
  55. // TestRawNodeStep ensures that RawNode.Step ignore local message.
  56. func TestRawNodeStep(t *testing.T) {
  57. for i, msgn := range raftpb.MessageType_name {
  58. s := NewMemoryStorage()
  59. rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, s), []Peer{{ID: 1}})
  60. if err != nil {
  61. t.Fatal(err)
  62. }
  63. msgt := raftpb.MessageType(i)
  64. err = rawNode.Step(raftpb.Message{Type: msgt})
  65. // LocalMsg should be ignored.
  66. if IsLocalMsg(msgt) {
  67. if err != ErrStepLocalMsg {
  68. t.Errorf("%d: step should ignore %s", msgt, msgn)
  69. }
  70. }
  71. }
  72. }
  73. // TestNodeStepUnblock from node_test.go has no equivalent in rawNode because there is
  74. // no goroutine in RawNode.
  75. // TestRawNodeProposeAndConfChange ensures that RawNode.Propose and RawNode.ProposeConfChange
  76. // send the given proposal and ConfChange to the underlying raft.
  77. func TestRawNodeProposeAndConfChange(t *testing.T) {
  78. s := NewMemoryStorage()
  79. var err error
  80. rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, s), []Peer{{ID: 1}})
  81. if err != nil {
  82. t.Fatal(err)
  83. }
  84. rd := rawNode.Ready()
  85. s.Append(rd.Entries)
  86. rawNode.Advance(rd)
  87. if d := rawNode.Ready(); d.MustSync || !IsEmptyHardState(d.HardState) || len(d.Entries) > 0 {
  88. t.Fatalf("expected empty hard state with must-sync=false: %#v", d)
  89. }
  90. rawNode.Campaign()
  91. proposed := false
  92. var (
  93. lastIndex uint64
  94. ccdata []byte
  95. )
  96. for {
  97. rd = rawNode.Ready()
  98. s.Append(rd.Entries)
  99. // Once we are the leader, propose a command and a ConfChange.
  100. if !proposed && rd.SoftState.Lead == rawNode.raft.id {
  101. rawNode.Propose([]byte("somedata"))
  102. cc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1}
  103. ccdata, err = cc.Marshal()
  104. if err != nil {
  105. t.Fatal(err)
  106. }
  107. rawNode.ProposeConfChange(cc)
  108. proposed = true
  109. }
  110. rawNode.Advance(rd)
  111. // Exit when we have four entries: one ConfChange, one no-op for the election,
  112. // our proposed command and proposed ConfChange.
  113. lastIndex, err = s.LastIndex()
  114. if err != nil {
  115. t.Fatal(err)
  116. }
  117. if lastIndex >= 4 {
  118. break
  119. }
  120. }
  121. entries, err := s.Entries(lastIndex-1, lastIndex+1, noLimit)
  122. if err != nil {
  123. t.Fatal(err)
  124. }
  125. if len(entries) != 2 {
  126. t.Fatalf("len(entries) = %d, want %d", len(entries), 2)
  127. }
  128. if !bytes.Equal(entries[0].Data, []byte("somedata")) {
  129. t.Errorf("entries[0].Data = %v, want %v", entries[0].Data, []byte("somedata"))
  130. }
  131. if entries[1].Type != raftpb.EntryConfChange {
  132. t.Fatalf("type = %v, want %v", entries[1].Type, raftpb.EntryConfChange)
  133. }
  134. if !bytes.Equal(entries[1].Data, ccdata) {
  135. t.Errorf("data = %v, want %v", entries[1].Data, ccdata)
  136. }
  137. }
  138. // TestRawNodeProposeAddDuplicateNode ensures that two proposes to add the same node should
  139. // not affect the later propose to add new node.
  140. func TestRawNodeProposeAddDuplicateNode(t *testing.T) {
  141. s := NewMemoryStorage()
  142. rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, s), []Peer{{ID: 1}})
  143. if err != nil {
  144. t.Fatal(err)
  145. }
  146. rd := rawNode.Ready()
  147. s.Append(rd.Entries)
  148. rawNode.Advance(rd)
  149. rawNode.Campaign()
  150. for {
  151. rd = rawNode.Ready()
  152. s.Append(rd.Entries)
  153. if rd.SoftState.Lead == rawNode.raft.id {
  154. rawNode.Advance(rd)
  155. break
  156. }
  157. rawNode.Advance(rd)
  158. }
  159. proposeConfChangeAndApply := func(cc raftpb.ConfChange) {
  160. rawNode.ProposeConfChange(cc)
  161. rd = rawNode.Ready()
  162. s.Append(rd.Entries)
  163. for _, entry := range rd.CommittedEntries {
  164. if entry.Type == raftpb.EntryConfChange {
  165. var cc raftpb.ConfChange
  166. cc.Unmarshal(entry.Data)
  167. rawNode.ApplyConfChange(cc)
  168. }
  169. }
  170. rawNode.Advance(rd)
  171. }
  172. cc1 := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1}
  173. ccdata1, err := cc1.Marshal()
  174. if err != nil {
  175. t.Fatal(err)
  176. }
  177. proposeConfChangeAndApply(cc1)
  178. // try to add the same node again
  179. proposeConfChangeAndApply(cc1)
  180. // the new node join should be ok
  181. cc2 := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 2}
  182. ccdata2, err := cc2.Marshal()
  183. if err != nil {
  184. t.Fatal(err)
  185. }
  186. proposeConfChangeAndApply(cc2)
  187. lastIndex, err := s.LastIndex()
  188. if err != nil {
  189. t.Fatal(err)
  190. }
  191. // the last three entries should be: ConfChange cc1, cc1, cc2
  192. entries, err := s.Entries(lastIndex-2, lastIndex+1, noLimit)
  193. if err != nil {
  194. t.Fatal(err)
  195. }
  196. if len(entries) != 3 {
  197. t.Fatalf("len(entries) = %d, want %d", len(entries), 3)
  198. }
  199. if !bytes.Equal(entries[0].Data, ccdata1) {
  200. t.Errorf("entries[0].Data = %v, want %v", entries[0].Data, ccdata1)
  201. }
  202. if !bytes.Equal(entries[2].Data, ccdata2) {
  203. t.Errorf("entries[2].Data = %v, want %v", entries[2].Data, ccdata2)
  204. }
  205. }
  206. // TestRawNodeReadIndex ensures that Rawnode.ReadIndex sends the MsgReadIndex message
  207. // to the underlying raft. It also ensures that ReadState can be read out.
  208. func TestRawNodeReadIndex(t *testing.T) {
  209. msgs := []raftpb.Message{}
  210. appendStep := func(r *raft, m raftpb.Message) error {
  211. msgs = append(msgs, m)
  212. return nil
  213. }
  214. wrs := []ReadState{{Index: uint64(1), RequestCtx: []byte("somedata")}}
  215. s := NewMemoryStorage()
  216. c := newTestConfig(1, nil, 10, 1, s)
  217. rawNode, err := NewRawNode(c, []Peer{{ID: 1}})
  218. if err != nil {
  219. t.Fatal(err)
  220. }
  221. rawNode.raft.readStates = wrs
  222. // ensure the ReadStates can be read out
  223. hasReady := rawNode.HasReady()
  224. if !hasReady {
  225. t.Errorf("HasReady() returns %t, want %t", hasReady, true)
  226. }
  227. rd := rawNode.Ready()
  228. if !reflect.DeepEqual(rd.ReadStates, wrs) {
  229. t.Errorf("ReadStates = %d, want %d", rd.ReadStates, wrs)
  230. }
  231. s.Append(rd.Entries)
  232. rawNode.Advance(rd)
  233. // ensure raft.readStates is reset after advance
  234. if rawNode.raft.readStates != nil {
  235. t.Errorf("readStates = %v, want %v", rawNode.raft.readStates, nil)
  236. }
  237. wrequestCtx := []byte("somedata2")
  238. rawNode.Campaign()
  239. for {
  240. rd = rawNode.Ready()
  241. s.Append(rd.Entries)
  242. if rd.SoftState.Lead == rawNode.raft.id {
  243. rawNode.Advance(rd)
  244. // Once we are the leader, issue a ReadIndex request
  245. rawNode.raft.step = appendStep
  246. rawNode.ReadIndex(wrequestCtx)
  247. break
  248. }
  249. rawNode.Advance(rd)
  250. }
  251. // ensure that MsgReadIndex message is sent to the underlying raft
  252. if len(msgs) != 1 {
  253. t.Fatalf("len(msgs) = %d, want %d", len(msgs), 1)
  254. }
  255. if msgs[0].Type != raftpb.MsgReadIndex {
  256. t.Errorf("msg type = %d, want %d", msgs[0].Type, raftpb.MsgReadIndex)
  257. }
  258. if !bytes.Equal(msgs[0].Entries[0].Data, wrequestCtx) {
  259. t.Errorf("data = %v, want %v", msgs[0].Entries[0].Data, wrequestCtx)
  260. }
  261. }
  262. // TestBlockProposal from node_test.go has no equivalent in rawNode because there is
  263. // no leader check in RawNode.
  264. // TestNodeTick from node_test.go has no equivalent in rawNode because
  265. // it reaches into the raft object which is not exposed.
  266. // TestNodeStop from node_test.go has no equivalent in rawNode because there is
  267. // no goroutine in RawNode.
  268. // TestRawNodeStart ensures that a node can be started correctly. The node should
  269. // start with correct configuration change entries, and can accept and commit
  270. // proposals.
  271. func TestRawNodeStart(t *testing.T) {
  272. cc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1}
  273. ccdata, err := cc.Marshal()
  274. if err != nil {
  275. t.Fatalf("unexpected marshal error: %v", err)
  276. }
  277. wants := []Ready{
  278. {
  279. HardState: raftpb.HardState{Term: 1, Commit: 1, Vote: 0},
  280. Entries: []raftpb.Entry{
  281. {Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},
  282. },
  283. CommittedEntries: []raftpb.Entry{
  284. {Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},
  285. },
  286. MustSync: true,
  287. },
  288. {
  289. HardState: raftpb.HardState{Term: 2, Commit: 3, Vote: 1},
  290. Entries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}},
  291. CommittedEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}},
  292. MustSync: true,
  293. },
  294. }
  295. storage := NewMemoryStorage()
  296. rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, storage), []Peer{{ID: 1}})
  297. if err != nil {
  298. t.Fatal(err)
  299. }
  300. rd := rawNode.Ready()
  301. t.Logf("rd %v", rd)
  302. if !reflect.DeepEqual(rd, wants[0]) {
  303. t.Fatalf("#%d: g = %+v,\n w %+v", 1, rd, wants[0])
  304. } else {
  305. storage.Append(rd.Entries)
  306. rawNode.Advance(rd)
  307. }
  308. storage.Append(rd.Entries)
  309. rawNode.Advance(rd)
  310. rawNode.Campaign()
  311. rd = rawNode.Ready()
  312. storage.Append(rd.Entries)
  313. rawNode.Advance(rd)
  314. rawNode.Propose([]byte("foo"))
  315. if rd = rawNode.Ready(); !reflect.DeepEqual(rd, wants[1]) {
  316. t.Errorf("#%d: g = %+v,\n w %+v", 2, rd, wants[1])
  317. } else {
  318. storage.Append(rd.Entries)
  319. rawNode.Advance(rd)
  320. }
  321. if rawNode.HasReady() {
  322. t.Errorf("unexpected Ready: %+v", rawNode.Ready())
  323. }
  324. }
  325. func TestRawNodeRestart(t *testing.T) {
  326. entries := []raftpb.Entry{
  327. {Term: 1, Index: 1},
  328. {Term: 1, Index: 2, Data: []byte("foo")},
  329. }
  330. st := raftpb.HardState{Term: 1, Commit: 1}
  331. want := Ready{
  332. HardState: emptyState,
  333. // commit up to commit index in st
  334. CommittedEntries: entries[:st.Commit],
  335. MustSync: false,
  336. }
  337. storage := NewMemoryStorage()
  338. storage.SetHardState(st)
  339. storage.Append(entries)
  340. rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, storage), nil)
  341. if err != nil {
  342. t.Fatal(err)
  343. }
  344. rd := rawNode.Ready()
  345. if !reflect.DeepEqual(rd, want) {
  346. t.Errorf("g = %+v,\n w %+v", rd, want)
  347. }
  348. rawNode.Advance(rd)
  349. if rawNode.HasReady() {
  350. t.Errorf("unexpected Ready: %+v", rawNode.Ready())
  351. }
  352. }
  353. func TestRawNodeRestartFromSnapshot(t *testing.T) {
  354. snap := raftpb.Snapshot{
  355. Metadata: raftpb.SnapshotMetadata{
  356. ConfState: raftpb.ConfState{Nodes: []uint64{1, 2}},
  357. Index: 2,
  358. Term: 1,
  359. },
  360. }
  361. entries := []raftpb.Entry{
  362. {Term: 1, Index: 3, Data: []byte("foo")},
  363. }
  364. st := raftpb.HardState{Term: 1, Commit: 3}
  365. want := Ready{
  366. HardState: emptyState,
  367. // commit up to commit index in st
  368. CommittedEntries: entries,
  369. MustSync: false,
  370. }
  371. s := NewMemoryStorage()
  372. s.SetHardState(st)
  373. s.ApplySnapshot(snap)
  374. s.Append(entries)
  375. rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, s), nil)
  376. if err != nil {
  377. t.Fatal(err)
  378. }
  379. if rd := rawNode.Ready(); !reflect.DeepEqual(rd, want) {
  380. t.Errorf("g = %+v,\n w %+v", rd, want)
  381. } else {
  382. rawNode.Advance(rd)
  383. }
  384. if rawNode.HasReady() {
  385. t.Errorf("unexpected Ready: %+v", rawNode.HasReady())
  386. }
  387. }
  388. // TestNodeAdvance from node_test.go has no equivalent in rawNode because there is
  389. // no dependency check between Ready() and Advance()
  390. func TestRawNodeStatus(t *testing.T) {
  391. storage := NewMemoryStorage()
  392. rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, storage), []Peer{{ID: 1}})
  393. if err != nil {
  394. t.Fatal(err)
  395. }
  396. status := rawNode.Status()
  397. if status == nil {
  398. t.Errorf("expected status struct, got nil")
  399. }
  400. }
  401. // TestRawNodeCommitPaginationAfterRestart is the RawNode version of
  402. // TestNodeCommitPaginationAfterRestart. The anomaly here was even worse as the
  403. // Raft group would forget to apply entries:
  404. //
  405. // - node learns that index 11 is committed
  406. // - nextEnts returns index 1..10 in CommittedEntries (but index 10 already
  407. // exceeds maxBytes), which isn't noticed internally by Raft
  408. // - Commit index gets bumped to 10
  409. // - the node persists the HardState, but crashes before applying the entries
  410. // - upon restart, the storage returns the same entries, but `slice` takes a
  411. // different code path and removes the last entry.
  412. // - Raft does not emit a HardState, but when the app calls Advance(), it bumps
  413. // its internal applied index cursor to 10 (when it should be 9)
  414. // - the next Ready asks the app to apply index 11 (omitting index 10), losing a
  415. // write.
  416. func TestRawNodeCommitPaginationAfterRestart(t *testing.T) {
  417. s := &ignoreSizeHintMemStorage{
  418. MemoryStorage: NewMemoryStorage(),
  419. }
  420. persistedHardState := raftpb.HardState{
  421. Term: 1,
  422. Vote: 1,
  423. Commit: 10,
  424. }
  425. s.hardState = persistedHardState
  426. s.ents = make([]raftpb.Entry, 10)
  427. var size uint64
  428. for i := range s.ents {
  429. ent := raftpb.Entry{
  430. Term: 1,
  431. Index: uint64(i + 1),
  432. Type: raftpb.EntryNormal,
  433. Data: []byte("a"),
  434. }
  435. s.ents[i] = ent
  436. size += uint64(ent.Size())
  437. }
  438. cfg := newTestConfig(1, []uint64{1}, 10, 1, s)
  439. // Set a MaxSizePerMsg that would suggest to Raft that the last committed entry should
  440. // not be included in the initial rd.CommittedEntries. However, our storage will ignore
  441. // this and *will* return it (which is how the Commit index ended up being 10 initially).
  442. cfg.MaxSizePerMsg = size - uint64(s.ents[len(s.ents)-1].Size()) - 1
  443. s.ents = append(s.ents, raftpb.Entry{
  444. Term: 1,
  445. Index: uint64(11),
  446. Type: raftpb.EntryNormal,
  447. Data: []byte("boom"),
  448. })
  449. rawNode, err := NewRawNode(cfg, []Peer{{ID: 1}})
  450. if err != nil {
  451. t.Fatal(err)
  452. }
  453. for highestApplied := uint64(0); highestApplied != 11; {
  454. rd := rawNode.Ready()
  455. n := len(rd.CommittedEntries)
  456. if n == 0 {
  457. t.Fatalf("stopped applying entries at index %d", highestApplied)
  458. }
  459. if next := rd.CommittedEntries[0].Index; highestApplied != 0 && highestApplied+1 != next {
  460. t.Fatalf("attempting to apply index %d after index %d, leaving a gap", next, highestApplied)
  461. }
  462. highestApplied = rd.CommittedEntries[n-1].Index
  463. rawNode.Advance(rd)
  464. rawNode.Step(raftpb.Message{
  465. Type: raftpb.MsgHeartbeat,
  466. To: 1,
  467. From: 1, // illegal, but we get away with it
  468. Term: 1,
  469. Commit: 11,
  470. })
  471. }
  472. }
  473. // TestRawNodeBoundedLogGrowthWithPartition tests a scenario where a leader is
  474. // partitioned from a quorum of nodes. It verifies that the leader's log is
  475. // protected from unbounded growth even as new entries continue to be proposed.
  476. // This protection is provided by the MaxUncommittedEntriesSize configuration.
  477. func TestRawNodeBoundedLogGrowthWithPartition(t *testing.T) {
  478. const maxEntries = 16
  479. data := []byte("testdata")
  480. testEntry := raftpb.Entry{Data: data}
  481. maxEntrySize := uint64(maxEntries * PayloadSize(testEntry))
  482. s := NewMemoryStorage()
  483. cfg := newTestConfig(1, []uint64{1}, 10, 1, s)
  484. cfg.MaxUncommittedEntriesSize = maxEntrySize
  485. rawNode, err := NewRawNode(cfg, []Peer{{ID: 1}})
  486. if err != nil {
  487. t.Fatal(err)
  488. }
  489. rd := rawNode.Ready()
  490. s.Append(rd.Entries)
  491. rawNode.Advance(rd)
  492. // Become the leader.
  493. rawNode.Campaign()
  494. for {
  495. rd = rawNode.Ready()
  496. s.Append(rd.Entries)
  497. if rd.SoftState.Lead == rawNode.raft.id {
  498. rawNode.Advance(rd)
  499. break
  500. }
  501. rawNode.Advance(rd)
  502. }
  503. // Simulate a network partition while we make our proposals by never
  504. // committing anything. These proposals should not cause the leader's
  505. // log to grow indefinitely.
  506. for i := 0; i < 1024; i++ {
  507. rawNode.Propose(data)
  508. }
  509. // Check the size of leader's uncommitted log tail. It should not exceed the
  510. // MaxUncommittedEntriesSize limit.
  511. checkUncommitted := func(exp uint64) {
  512. t.Helper()
  513. if a := rawNode.raft.uncommittedSize; exp != a {
  514. t.Fatalf("expected %d uncommitted entry bytes, found %d", exp, a)
  515. }
  516. }
  517. checkUncommitted(maxEntrySize)
  518. // Recover from the partition. The uncommitted tail of the Raft log should
  519. // disappear as entries are committed.
  520. rd = rawNode.Ready()
  521. if len(rd.CommittedEntries) != maxEntries {
  522. t.Fatalf("expected %d entries, got %d", maxEntries, len(rd.CommittedEntries))
  523. }
  524. s.Append(rd.Entries)
  525. rawNode.Advance(rd)
  526. checkUncommitted(0)
  527. }
  528. func BenchmarkStatusProgress(b *testing.B) {
  529. setup := func(members int) *RawNode {
  530. peers := make([]uint64, members)
  531. for i := range peers {
  532. peers[i] = uint64(i + 1)
  533. }
  534. cfg := newTestConfig(1, peers, 3, 1, NewMemoryStorage())
  535. cfg.Logger = discardLogger
  536. r := newRaft(cfg)
  537. r.becomeFollower(1, 1)
  538. r.becomeCandidate()
  539. r.becomeLeader()
  540. return &RawNode{raft: r}
  541. }
  542. for _, members := range []int{1, 3, 5, 100} {
  543. b.Run(fmt.Sprintf("members=%d", members), func(b *testing.B) {
  544. // NB: call getStatus through rn.Status because that incurs an additional
  545. // allocation.
  546. rn := setup(members)
  547. b.Run("Status", func(b *testing.B) {
  548. b.ReportAllocs()
  549. for i := 0; i < b.N; i++ {
  550. _ = rn.Status()
  551. }
  552. })
  553. b.Run("Status-example", func(b *testing.B) {
  554. b.ReportAllocs()
  555. for i := 0; i < b.N; i++ {
  556. s := rn.Status()
  557. var n uint64
  558. for _, pr := range s.Progress {
  559. n += pr.Match
  560. }
  561. _ = n
  562. }
  563. })
  564. b.Run("StatusWithoutProgress", func(b *testing.B) {
  565. b.ReportAllocs()
  566. for i := 0; i < b.N; i++ {
  567. _ = rn.StatusWithoutProgress()
  568. }
  569. })
  570. b.Run("WithProgress", func(b *testing.B) {
  571. b.ReportAllocs()
  572. visit := func(uint64, ProgressType, Progress) {}
  573. for i := 0; i < b.N; i++ {
  574. rn.WithProgress(visit)
  575. }
  576. })
  577. b.Run("WithProgress-example", func(b *testing.B) {
  578. b.ReportAllocs()
  579. for i := 0; i < b.N; i++ {
  580. var n uint64
  581. visit := func(_ uint64, _ ProgressType, pr Progress) {
  582. n += pr.Match
  583. }
  584. rn.WithProgress(visit)
  585. _ = n
  586. }
  587. })
  588. })
  589. }
  590. }