etcd_functional_test.go 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. package etcd
  2. import (
  3. "math/rand"
  4. "net/http/httptest"
  5. "testing"
  6. "time"
  7. "github.com/coreos/etcd/config"
  8. )
  9. func TestKillLeader(t *testing.T) {
  10. tests := []int{3, 5, 9, 11}
  11. for i, tt := range tests {
  12. es, hs := buildCluster(tt, false)
  13. waitCluster(t, es)
  14. waitLeader(es)
  15. lead := es[0].node.Leader()
  16. es[lead].Stop()
  17. time.Sleep(es[0].tickDuration * defaultElection * 2)
  18. waitLeader(es)
  19. if es[1].node.Leader() == 0 {
  20. t.Errorf("#%d: lead = %d, want not 0", i, es[1].node.Leader())
  21. }
  22. for i := range es {
  23. es[len(es)-i-1].Stop()
  24. }
  25. for i := range hs {
  26. hs[len(hs)-i-1].Close()
  27. }
  28. }
  29. afterTest(t)
  30. }
  31. func TestRandomKill(t *testing.T) {
  32. tests := []int{3, 5, 9, 11}
  33. for _, tt := range tests {
  34. es, hs := buildCluster(tt, false)
  35. waitCluster(t, es)
  36. waitLeader(es)
  37. toKill := make(map[int64]struct{})
  38. for len(toKill) != tt/2-1 {
  39. toKill[rand.Int63n(int64(tt))] = struct{}{}
  40. }
  41. for k := range toKill {
  42. es[k].Stop()
  43. }
  44. time.Sleep(es[0].tickDuration * defaultElection * 2)
  45. waitLeader(es)
  46. for i := range es {
  47. es[len(es)-i-1].Stop()
  48. }
  49. for i := range hs {
  50. hs[len(hs)-i-1].Close()
  51. }
  52. }
  53. afterTest(t)
  54. }
  55. func TestJoinThroughFollower(t *testing.T) {
  56. tests := []int{3, 4, 5, 6}
  57. for _, tt := range tests {
  58. es := make([]*Server, tt)
  59. hs := make([]*httptest.Server, tt)
  60. for i := 0; i < tt; i++ {
  61. c := config.New()
  62. if i > 0 {
  63. c.Peers = []string{hs[i-1].URL}
  64. }
  65. es[i], hs[i] = initTestServer(c, int64(i), false)
  66. }
  67. go es[0].Bootstrap()
  68. for i := 1; i < tt; i++ {
  69. go es[i].Run()
  70. waitLeader(es[:i])
  71. }
  72. waitCluster(t, es)
  73. for i := range hs {
  74. es[len(hs)-i-1].Stop()
  75. }
  76. for i := range hs {
  77. hs[len(hs)-i-1].Close()
  78. }
  79. }
  80. afterTest(t)
  81. }
  82. type leadterm struct {
  83. lead int64
  84. term int64
  85. }
  86. func waitLeader(es []*Server) {
  87. for {
  88. ls := make([]leadterm, 0, len(es))
  89. for i := range es {
  90. switch es[i].mode {
  91. case participant:
  92. ls = append(ls, getLead(es[i]))
  93. case standby:
  94. //TODO(xiangli) add standby support
  95. case stop:
  96. }
  97. }
  98. if isSameLead(ls) {
  99. return
  100. }
  101. time.Sleep(es[0].tickDuration * defaultElection)
  102. }
  103. }
  104. func getLead(s *Server) leadterm {
  105. return leadterm{s.node.Leader(), s.node.Term()}
  106. }
  107. func isSameLead(ls []leadterm) bool {
  108. m := make(map[leadterm]int)
  109. for i := range ls {
  110. m[ls[i]] = m[ls[i]] + 1
  111. }
  112. if len(m) == 1 {
  113. if ls[0].lead == -1 {
  114. return false
  115. }
  116. return true
  117. }
  118. // todo(xiangli): printout the current cluster status for debugging....
  119. return false
  120. }