etcd_functional_test.go 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. /*
  2. Copyright 2014 CoreOS Inc.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package etcd
  14. import (
  15. "fmt"
  16. "math/rand"
  17. "net/http/httptest"
  18. "net/url"
  19. "reflect"
  20. "testing"
  21. "time"
  22. "github.com/coreos/etcd/conf"
  23. "github.com/coreos/etcd/store"
  24. "github.com/coreos/etcd/third_party/github.com/coreos/go-etcd/etcd"
  25. )
  26. func TestKillLeader(t *testing.T) {
  27. tests := []int{3, 5, 9}
  28. for i, tt := range tests {
  29. es, hs := buildCluster(tt, false)
  30. waitCluster(t, es)
  31. var totalTime time.Duration
  32. for j := 0; j < tt; j++ {
  33. lead, _ := waitLeader(es)
  34. es[lead].Stop()
  35. hs[lead].Close()
  36. time.Sleep(es[0].tickDuration * defaultElection * 2)
  37. start := time.Now()
  38. if g, _ := waitLeader(es); g == lead {
  39. t.Errorf("#%d.%d: lead = %d, want not %d", i, j, g, lead)
  40. }
  41. take := time.Now().Sub(start)
  42. totalTime += take
  43. avgTime := totalTime / (time.Duration)(i+1)
  44. fmt.Println("Total time:", totalTime, "; Avg time:", avgTime)
  45. c := newTestConfig()
  46. c.DataDir = es[lead].cfg.DataDir
  47. c.Addr = hs[lead].Listener.Addr().String()
  48. id := es[lead].id
  49. e, h := newUnstartedTestServer(c, id, false)
  50. err := startServer(t, e)
  51. if err != nil {
  52. t.Fatalf("#%d.%d: %v", i, j, err)
  53. }
  54. es[lead] = e
  55. hs[lead] = h
  56. }
  57. destoryCluster(t, es, hs)
  58. }
  59. afterTest(t)
  60. }
  61. func TestKillRandom(t *testing.T) {
  62. tests := []int{3, 5, 9}
  63. for _, tt := range tests {
  64. es, hs := buildCluster(tt, false)
  65. waitCluster(t, es)
  66. for j := 0; j < tt; j++ {
  67. waitLeader(es)
  68. toKill := make(map[int64]struct{})
  69. for len(toKill) != tt/2-1 {
  70. toKill[rand.Int63n(int64(tt))] = struct{}{}
  71. }
  72. for k := range toKill {
  73. es[k].Stop()
  74. hs[k].Close()
  75. }
  76. time.Sleep(es[0].tickDuration * defaultElection * 2)
  77. waitLeader(es)
  78. for k := range toKill {
  79. c := newTestConfig()
  80. c.DataDir = es[k].cfg.DataDir
  81. c.Addr = hs[k].Listener.Addr().String()
  82. id := es[k].id
  83. e, h := newUnstartedTestServer(c, id, false)
  84. err := startServer(t, e)
  85. if err != nil {
  86. t.Fatal(err)
  87. }
  88. es[k] = e
  89. hs[k] = h
  90. }
  91. }
  92. destoryCluster(t, es, hs)
  93. }
  94. afterTest(t)
  95. }
  96. func TestJoinThroughFollower(t *testing.T) {
  97. tests := []int{3, 4, 5, 6}
  98. for _, tt := range tests {
  99. es := make([]*Server, tt)
  100. hs := make([]*httptest.Server, tt)
  101. for i := 0; i < tt; i++ {
  102. c := newTestConfig()
  103. if i > 0 {
  104. c.Peers = []string{hs[i-1].URL}
  105. }
  106. es[i], hs[i] = newUnstartedTestServer(c, int64(i), false)
  107. }
  108. go es[0].Run()
  109. for i := 1; i < tt; i++ {
  110. go es[i].Run()
  111. waitLeader(es[:i])
  112. }
  113. waitCluster(t, es)
  114. destoryCluster(t, es, hs)
  115. }
  116. afterTest(t)
  117. }
  118. func TestClusterConfigReload(t *testing.T) {
  119. tests := []int{3, 4, 5, 6}
  120. for i, tt := range tests {
  121. es, hs := buildCluster(tt, false)
  122. waitCluster(t, es)
  123. lead, _ := waitLeader(es)
  124. cc := conf.NewClusterConfig()
  125. cc.ActiveSize = 15
  126. cc.RemoveDelay = 60
  127. if err := es[lead].p.setClusterConfig(cc); err != nil {
  128. t.Fatalf("#%d: setClusterConfig err = %v", i, err)
  129. }
  130. for k := range es {
  131. es[k].Stop()
  132. hs[k].Close()
  133. }
  134. for k := range es {
  135. c := newTestConfig()
  136. c.DataDir = es[k].cfg.DataDir
  137. c.Addr = hs[k].Listener.Addr().String()
  138. id := es[k].id
  139. e, h := newUnstartedTestServer(c, id, false)
  140. err := startServer(t, e)
  141. if err != nil {
  142. t.Fatal(err)
  143. }
  144. es[k] = e
  145. hs[k] = h
  146. }
  147. lead, _ = waitLeader(es)
  148. // wait for msgAppResp to commit all entries
  149. time.Sleep(2 * defaultHeartbeat * es[lead].tickDuration)
  150. if g := es[lead].p.clusterConfig(); !reflect.DeepEqual(g, cc) {
  151. t.Errorf("#%d: clusterConfig = %+v, want %+v", i, g, cc)
  152. }
  153. destoryCluster(t, es, hs)
  154. }
  155. afterTest(t)
  156. }
  157. func TestMultiNodeKillOne(t *testing.T) {
  158. tests := []int{5}
  159. for i, tt := range tests {
  160. es, hs := buildCluster(tt, false)
  161. waitCluster(t, es)
  162. stop := make(chan bool)
  163. go keepSetting(hs[0].URL, stop)
  164. for j := 0; j < 10; j++ {
  165. idx := rand.Int() % tt
  166. es[idx].Stop()
  167. hs[idx].Close()
  168. c := newTestConfig()
  169. c.DataDir = es[idx].cfg.DataDir
  170. c.Addr = hs[idx].Listener.Addr().String()
  171. id := es[idx].id
  172. e, h := newUnstartedTestServer(c, id, false)
  173. err := startServer(t, e)
  174. if err != nil {
  175. t.Fatalf("#%d.%d: %v", i, j, err)
  176. }
  177. es[idx] = e
  178. hs[idx] = h
  179. }
  180. stop <- true
  181. <-stop
  182. destoryCluster(t, es, hs)
  183. }
  184. afterTest(t)
  185. }
  186. func TestMultiNodeKillAllAndRecovery(t *testing.T) {
  187. tests := []int{5}
  188. for i, tt := range tests {
  189. es, hs := buildCluster(tt, false)
  190. waitCluster(t, es)
  191. waitLeader(es)
  192. c := etcd.NewClient([]string{hs[0].URL})
  193. for i := 0; i < 10; i++ {
  194. if _, err := c.Set("foo", "bar", 0); err != nil {
  195. panic(err)
  196. }
  197. }
  198. for k := range es {
  199. es[k].Stop()
  200. hs[k].Close()
  201. }
  202. for k := range es {
  203. c := newTestConfig()
  204. c.DataDir = es[k].cfg.DataDir
  205. c.Addr = hs[k].Listener.Addr().String()
  206. id := es[k].id
  207. e, h := newUnstartedTestServer(c, id, false)
  208. err := startServer(t, e)
  209. if err != nil {
  210. t.Fatalf("#%d.%d: %v", i, k, err)
  211. }
  212. es[k] = e
  213. hs[k] = h
  214. }
  215. waitLeader(es)
  216. res, err := c.Set("foo", "bar", 0)
  217. if err != nil {
  218. t.Fatalf("#%d: set err after recovery: %v", err)
  219. }
  220. if g := res.Node.ModifiedIndex; g != 16 {
  221. t.Errorf("#%d: modifiedIndex = %d, want %d", i, g, 16)
  222. }
  223. destoryCluster(t, es, hs)
  224. }
  225. afterTest(t)
  226. }
  227. func BenchmarkEndToEndSet(b *testing.B) {
  228. es, hs := buildCluster(3, false)
  229. waitLeader(es)
  230. b.ResetTimer()
  231. for n := 0; n < b.N; n++ {
  232. _, err := es[0].p.Set("foo", false, "bar", store.Permanent)
  233. if err != nil {
  234. panic("unexpect error")
  235. }
  236. }
  237. b.StopTimer()
  238. destoryCluster(nil, es, hs)
  239. }
  240. // TODO(yichengq): cannot handle previous msgDenial correctly now
  241. func TestModeSwitch(t *testing.T) {
  242. t.Skip("not passed")
  243. size := 5
  244. round := 3
  245. for i := 0; i < size; i++ {
  246. es, hs := buildCluster(size, false)
  247. waitCluster(t, es)
  248. cfg := conf.NewClusterConfig()
  249. cfg.SyncInterval = 0
  250. id := int64(i)
  251. for j := 0; j < round; j++ {
  252. lead, _ := waitActiveLeader(es)
  253. // cluster only demotes follower
  254. if lead == id {
  255. continue
  256. }
  257. cfg.ActiveSize = size - 1
  258. if err := es[lead].p.setClusterConfig(cfg); err != nil {
  259. t.Fatalf("#%d: setClusterConfig err = %v", i, err)
  260. }
  261. if err := es[lead].p.remove(id); err != nil {
  262. t.Fatalf("#%d: remove err = %v", i, err)
  263. }
  264. waitMode(standbyMode, es[i])
  265. for k := 0; k < 4; k++ {
  266. if es[i].s.leader != noneId {
  267. break
  268. }
  269. time.Sleep(20 * time.Millisecond)
  270. }
  271. if g := es[i].s.leader; g != lead {
  272. t.Errorf("#%d: lead = %d, want %d", i, g, lead)
  273. }
  274. cfg.ActiveSize = size
  275. if err := es[lead].p.setClusterConfig(cfg); err != nil {
  276. t.Fatalf("#%d: setClusterConfig err = %v", i, err)
  277. }
  278. waitMode(participantMode, es[i])
  279. if err := checkParticipant(i, es); err != nil {
  280. t.Errorf("#%d: check alive err = %v", i, err)
  281. }
  282. }
  283. destoryCluster(t, es, hs)
  284. }
  285. afterTest(t)
  286. }
  287. // Sending set commands
  288. func keepSetting(urlStr string, stop chan bool) {
  289. tc := NewTestClient()
  290. i := 0
  291. value := url.Values(map[string][]string{"value": {"bar"}})
  292. for {
  293. resp, err := tc.PutForm(fmt.Sprintf("%s/v2/keys/foo_%v", urlStr, i), value)
  294. if err == nil {
  295. tc.ReadBody(resp)
  296. }
  297. select {
  298. case <-stop:
  299. stop <- true
  300. return
  301. default:
  302. }
  303. i++
  304. }
  305. }
  306. type leadterm struct {
  307. lead int64
  308. term int64
  309. }
  310. func waitActiveLeader(es []*Server) (lead, term int64) {
  311. for {
  312. if l, t := waitLeader(es); l >= 0 && es[l].mode.Get() == participantMode {
  313. return l, t
  314. }
  315. }
  316. }
  317. // waitLeader waits until all alive servers are checked to have the same leader.
  318. // WARNING: The lead returned is not guaranteed to be actual leader.
  319. func waitLeader(es []*Server) (lead, term int64) {
  320. for {
  321. ls := make([]leadterm, 0, len(es))
  322. for i := range es {
  323. switch es[i].mode.Get() {
  324. case participantMode:
  325. ls = append(ls, getLead(es[i]))
  326. case standbyMode:
  327. //TODO(xiangli) add standby support
  328. case stopMode:
  329. }
  330. }
  331. if isSameLead(ls) {
  332. return ls[0].lead, ls[0].term
  333. }
  334. time.Sleep(es[0].tickDuration * defaultElection)
  335. }
  336. }
  337. func getLead(s *Server) leadterm {
  338. return leadterm{s.p.node.Leader(), s.p.node.Term()}
  339. }
  340. func isSameLead(ls []leadterm) bool {
  341. m := make(map[leadterm]int)
  342. for i := range ls {
  343. m[ls[i]] = m[ls[i]] + 1
  344. }
  345. if len(m) == 1 {
  346. if ls[0].lead == -1 {
  347. return false
  348. }
  349. return true
  350. }
  351. // todo(xiangli): printout the current cluster status for debugging....
  352. return false
  353. }