cluster_test.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package integration
  15. import (
  16. "fmt"
  17. "log"
  18. "math/rand"
  19. "os"
  20. "strconv"
  21. "strings"
  22. "testing"
  23. "time"
  24. "github.com/coreos/etcd/client"
  25. "github.com/coreos/etcd/etcdserver"
  26. "github.com/coreos/etcd/pkg/testutil"
  27. "golang.org/x/net/context"
  28. )
  29. func init() {
  30. // open microsecond-level time log for integration test debugging
  31. log.SetFlags(log.Ltime | log.Lmicroseconds | log.Lshortfile)
  32. if t := os.Getenv("ETCD_ELECTION_TIMEOUT_TICKS"); t != "" {
  33. if i, err := strconv.ParseInt(t, 10, 64); err == nil {
  34. electionTicks = int(i)
  35. }
  36. }
  37. }
  38. func TestClusterOf1(t *testing.T) { testCluster(t, 1) }
  39. func TestClusterOf3(t *testing.T) { testCluster(t, 3) }
  40. func testCluster(t *testing.T, size int) {
  41. defer testutil.AfterTest(t)
  42. c := NewCluster(t, size)
  43. c.Launch(t)
  44. defer c.Terminate(t)
  45. clusterMustProgress(t, c.Members)
  46. }
  47. func TestTLSClusterOf3(t *testing.T) {
  48. defer testutil.AfterTest(t)
  49. c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
  50. c.Launch(t)
  51. defer c.Terminate(t)
  52. clusterMustProgress(t, c.Members)
  53. }
  54. func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1) }
  55. func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3) }
  56. func testClusterUsingDiscovery(t *testing.T, size int) {
  57. defer testutil.AfterTest(t)
  58. dc := NewCluster(t, 1)
  59. dc.Launch(t)
  60. defer dc.Terminate(t)
  61. // init discovery token space
  62. dcc := MustNewHTTPClient(t, dc.URLs(), nil)
  63. dkapi := client.NewKeysAPI(dcc)
  64. ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  65. if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil {
  66. t.Fatal(err)
  67. }
  68. cancel()
  69. c := NewClusterByConfig(
  70. t,
  71. &ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"},
  72. )
  73. c.Launch(t)
  74. defer c.Terminate(t)
  75. clusterMustProgress(t, c.Members)
  76. }
  77. func TestTLSClusterOf3UsingDiscovery(t *testing.T) {
  78. defer testutil.AfterTest(t)
  79. dc := NewCluster(t, 1)
  80. dc.Launch(t)
  81. defer dc.Terminate(t)
  82. // init discovery token space
  83. dcc := MustNewHTTPClient(t, dc.URLs(), nil)
  84. dkapi := client.NewKeysAPI(dcc)
  85. ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  86. if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", 3)); err != nil {
  87. t.Fatal(err)
  88. }
  89. cancel()
  90. c := NewClusterByConfig(t,
  91. &ClusterConfig{
  92. Size: 3,
  93. PeerTLS: &testTLSInfo,
  94. DiscoveryURL: dc.URL(0) + "/v2/keys"},
  95. )
  96. c.Launch(t)
  97. defer c.Terminate(t)
  98. clusterMustProgress(t, c.Members)
  99. }
  100. func TestDoubleClusterSizeOf1(t *testing.T) { testDoubleClusterSize(t, 1) }
  101. func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) }
  102. func testDoubleClusterSize(t *testing.T, size int) {
  103. defer testutil.AfterTest(t)
  104. c := NewCluster(t, size)
  105. c.Launch(t)
  106. defer c.Terminate(t)
  107. for i := 0; i < size; i++ {
  108. c.AddMember(t)
  109. }
  110. clusterMustProgress(t, c.Members)
  111. }
  112. func TestDoubleTLSClusterSizeOf3(t *testing.T) {
  113. defer testutil.AfterTest(t)
  114. c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
  115. c.Launch(t)
  116. defer c.Terminate(t)
  117. for i := 0; i < 3; i++ {
  118. c.AddMember(t)
  119. }
  120. clusterMustProgress(t, c.Members)
  121. }
  122. func TestDecreaseClusterSizeOf3(t *testing.T) { testDecreaseClusterSize(t, 3) }
  123. func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) }
  124. func testDecreaseClusterSize(t *testing.T, size int) {
  125. defer testutil.AfterTest(t)
  126. c := NewCluster(t, size)
  127. c.Launch(t)
  128. defer c.Terminate(t)
  129. // TODO: remove the last but one member
  130. for i := 0; i < size-1; i++ {
  131. id := c.Members[len(c.Members)-1].s.ID()
  132. c.RemoveMember(t, uint64(id))
  133. c.waitLeader(t, c.Members)
  134. }
  135. clusterMustProgress(t, c.Members)
  136. }
  137. func TestForceNewCluster(t *testing.T) {
  138. c := NewCluster(t, 3)
  139. c.Launch(t)
  140. cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
  141. kapi := client.NewKeysAPI(cc)
  142. ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  143. resp, err := kapi.Create(ctx, "/foo", "bar")
  144. if err != nil {
  145. t.Fatalf("unexpected create error: %v", err)
  146. }
  147. cancel()
  148. // ensure create has been applied in this machine
  149. ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
  150. if _, err = kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
  151. t.Fatalf("unexpected watch error: %v", err)
  152. }
  153. cancel()
  154. c.Members[0].Stop(t)
  155. c.Members[1].Terminate(t)
  156. c.Members[2].Terminate(t)
  157. c.Members[0].ForceNewCluster = true
  158. err = c.Members[0].Restart(t)
  159. if err != nil {
  160. t.Fatalf("unexpected ForceRestart error: %v", err)
  161. }
  162. defer c.Members[0].Terminate(t)
  163. c.waitLeader(t, c.Members[:1])
  164. // use new http client to init new connection
  165. cc = MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
  166. kapi = client.NewKeysAPI(cc)
  167. // ensure force restart keep the old data, and new cluster can make progress
  168. ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
  169. if _, err := kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
  170. t.Fatalf("unexpected watch error: %v", err)
  171. }
  172. cancel()
  173. clusterMustProgress(t, c.Members[:1])
  174. }
  175. func TestAddMemberAfterClusterFullRotation(t *testing.T) {
  176. defer testutil.AfterTest(t)
  177. c := NewCluster(t, 3)
  178. c.Launch(t)
  179. defer c.Terminate(t)
  180. // remove all the previous three members and add in three new members.
  181. for i := 0; i < 3; i++ {
  182. c.RemoveMember(t, uint64(c.Members[0].s.ID()))
  183. c.waitLeader(t, c.Members)
  184. c.AddMember(t)
  185. c.waitLeader(t, c.Members)
  186. }
  187. c.AddMember(t)
  188. c.waitLeader(t, c.Members)
  189. clusterMustProgress(t, c.Members)
  190. }
  191. // Ensure we can remove a member then add a new one back immediately.
  192. func TestIssue2681(t *testing.T) {
  193. defer testutil.AfterTest(t)
  194. c := NewCluster(t, 5)
  195. c.Launch(t)
  196. defer c.Terminate(t)
  197. c.RemoveMember(t, uint64(c.Members[4].s.ID()))
  198. c.waitLeader(t, c.Members)
  199. c.AddMember(t)
  200. c.waitLeader(t, c.Members)
  201. clusterMustProgress(t, c.Members)
  202. }
  203. // Ensure we can remove a member after a snapshot then add a new one back.
  204. func TestIssue2746(t *testing.T) { testIssue2746(t, 5) }
  205. // With 3 nodes TestIssue2476 sometimes had a shutdown with an inflight snapshot.
  206. func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) }
  207. func testIssue2746(t *testing.T, members int) {
  208. defer testutil.AfterTest(t)
  209. c := NewCluster(t, members)
  210. for _, m := range c.Members {
  211. m.SnapCount = 10
  212. }
  213. c.Launch(t)
  214. defer c.Terminate(t)
  215. // force a snapshot
  216. for i := 0; i < 20; i++ {
  217. clusterMustProgress(t, c.Members)
  218. }
  219. c.RemoveMember(t, uint64(c.Members[members-1].s.ID()))
  220. c.waitLeader(t, c.Members)
  221. c.AddMember(t)
  222. c.waitLeader(t, c.Members)
  223. clusterMustProgress(t, c.Members)
  224. }
  225. // Ensure etcd will not panic when removing a just started member.
  226. func TestIssue2904(t *testing.T) {
  227. defer testutil.AfterTest(t)
  228. // start 1-member cluster to ensure member 0 is the leader of the cluster.
  229. c := NewCluster(t, 1)
  230. c.Launch(t)
  231. defer c.Terminate(t)
  232. c.AddMember(t)
  233. c.Members[1].Stop(t)
  234. // send remove member-1 request to the cluster.
  235. cc := MustNewHTTPClient(t, c.URLs(), nil)
  236. ma := client.NewMembersAPI(cc)
  237. ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  238. // the proposal is not committed because member 1 is stopped, but the
  239. // proposal is appended to leader's raft log.
  240. ma.Remove(ctx, c.Members[1].s.ID().String())
  241. cancel()
  242. // restart member, and expect it to send UpdateAttributes request.
  243. // the log in the leader is like this:
  244. // [..., remove 1, ..., update attr 1, ...]
  245. c.Members[1].Restart(t)
  246. // when the member comes back, it ack the proposal to remove itself,
  247. // and apply it.
  248. <-c.Members[1].s.StopNotify()
  249. // terminate removed member
  250. c.Members[1].Terminate(t)
  251. c.Members = c.Members[:1]
  252. // wait member to be removed.
  253. c.waitMembersMatch(t, c.HTTPMembers())
  254. }
  255. // TestIssue3699 tests minority failure during cluster configuration; it was
  256. // deadlocking.
  257. func TestIssue3699(t *testing.T) {
  258. // start a cluster of 3 nodes a, b, c
  259. defer testutil.AfterTest(t)
  260. c := NewCluster(t, 3)
  261. c.Launch(t)
  262. defer c.Terminate(t)
  263. // make node a unavailable
  264. c.Members[0].Stop(t)
  265. // add node d
  266. c.AddMember(t)
  267. // electing node d as leader makes node a unable to participate
  268. leaderID := c.waitLeader(t, c.Members)
  269. for leaderID != 3 {
  270. c.Members[leaderID].Stop(t)
  271. <-c.Members[leaderID].s.StopNotify()
  272. // do not restart the killed member immediately.
  273. // the member will advance its election timeout after restart,
  274. // so it will have a better chance to become the leader again.
  275. time.Sleep(time.Duration(electionTicks * int(tickDuration)))
  276. c.Members[leaderID].Restart(t)
  277. leaderID = c.waitLeader(t, c.Members)
  278. }
  279. // bring back node a
  280. // node a will remain useless as long as d is the leader.
  281. if err := c.Members[0].Restart(t); err != nil {
  282. t.Fatal(err)
  283. }
  284. select {
  285. // waiting for ReadyNotify can take several seconds
  286. case <-time.After(10 * time.Second):
  287. t.Fatalf("waited too long for ready notification")
  288. case <-c.Members[0].s.StopNotify():
  289. t.Fatalf("should not be stopped")
  290. case <-c.Members[0].s.ReadyNotify():
  291. }
  292. // must waitLeader so goroutines don't leak on terminate
  293. c.waitLeader(t, c.Members)
  294. // try to participate in cluster
  295. cc := MustNewHTTPClient(t, []string{c.URL(0)}, c.cfg.ClientTLS)
  296. kapi := client.NewKeysAPI(cc)
  297. ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  298. if _, err := kapi.Set(ctx, "/foo", "bar", nil); err != nil {
  299. t.Fatalf("unexpected error on Set (%v)", err)
  300. }
  301. cancel()
  302. }
  303. // TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members.
  304. func TestRejectUnhealthyAdd(t *testing.T) {
  305. defer testutil.AfterTest(t)
  306. c := NewCluster(t, 3)
  307. for _, m := range c.Members {
  308. m.ServerConfig.StrictReconfigCheck = true
  309. }
  310. c.Launch(t)
  311. defer c.Terminate(t)
  312. // make cluster unhealthy and wait for downed peer
  313. c.Members[0].Stop(t)
  314. c.WaitLeader(t)
  315. // all attempts to add member should fail
  316. for i := 1; i < len(c.Members); i++ {
  317. err := c.addMemberByURL(t, c.URL(i), "unix://foo:12345")
  318. if err == nil {
  319. t.Fatalf("should have failed adding peer")
  320. }
  321. // TODO: client should return descriptive error codes for internal errors
  322. if !strings.Contains(err.Error(), "has no leader") {
  323. t.Errorf("unexpected error (%v)", err)
  324. }
  325. }
  326. // make cluster healthy
  327. c.Members[0].Restart(t)
  328. c.WaitLeader(t)
  329. time.Sleep(2 * etcdserver.HealthInterval)
  330. // add member should succeed now that it's healthy
  331. var err error
  332. for i := 1; i < len(c.Members); i++ {
  333. if err = c.addMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil {
  334. break
  335. }
  336. }
  337. if err != nil {
  338. t.Fatalf("should have added peer to healthy cluster (%v)", err)
  339. }
  340. }
  341. // TestRejectUnhealthyRemove ensures an unhealthy cluster rejects removing members
  342. // if quorum will be lost.
  343. func TestRejectUnhealthyRemove(t *testing.T) {
  344. defer testutil.AfterTest(t)
  345. c := NewCluster(t, 5)
  346. for _, m := range c.Members {
  347. m.ServerConfig.StrictReconfigCheck = true
  348. }
  349. c.Launch(t)
  350. defer c.Terminate(t)
  351. // make cluster unhealthy and wait for downed peer; (3 up, 2 down)
  352. c.Members[0].Stop(t)
  353. c.Members[1].Stop(t)
  354. c.WaitLeader(t)
  355. // reject remove active member since (3,2)-(1,0) => (2,2) lacks quorum
  356. err := c.removeMember(t, uint64(c.Members[2].s.ID()))
  357. if err == nil {
  358. t.Fatalf("should reject quorum breaking remove")
  359. }
  360. // TODO: client should return more descriptive error codes for internal errors
  361. if !strings.Contains(err.Error(), "has no leader") {
  362. t.Errorf("unexpected error (%v)", err)
  363. }
  364. // member stopped after launch; wait for missing heartbeats
  365. time.Sleep(time.Duration(electionTicks * int(tickDuration)))
  366. // permit remove dead member since (3,2) - (0,1) => (3,1) has quorum
  367. if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
  368. t.Fatalf("should accept removing down member")
  369. }
  370. // bring cluster to (4,1)
  371. c.Members[0].Restart(t)
  372. // restarted member must be connected for a HealthInterval before remove is accepted
  373. time.Sleep((3 * etcdserver.HealthInterval) / 2)
  374. // accept remove member since (4,1)-(1,0) => (3,1) has quorum
  375. if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
  376. t.Fatalf("expected to remove member, got error %v", err)
  377. }
  378. }
  379. // clusterMustProgress ensures that cluster can make progress. It creates
  380. // a random key first, and check the new key could be got from all client urls
  381. // of the cluster.
  382. func clusterMustProgress(t *testing.T, membs []*member) {
  383. cc := MustNewHTTPClient(t, []string{membs[0].URL()}, nil)
  384. kapi := client.NewKeysAPI(cc)
  385. key := fmt.Sprintf("foo%d", rand.Int())
  386. var (
  387. err error
  388. resp *client.Response
  389. )
  390. // retry in case of leader loss induced by slow CI
  391. for i := 0; i < 3; i++ {
  392. ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  393. resp, err = kapi.Create(ctx, "/"+key, "bar")
  394. cancel()
  395. if err == nil {
  396. break
  397. }
  398. t.Logf("failed to create key on %q (%v)", membs[0].URL(), err)
  399. }
  400. if err != nil {
  401. t.Fatalf("create on %s error: %v", membs[0].URL(), err)
  402. }
  403. for i, m := range membs {
  404. u := m.URL()
  405. mcc := MustNewHTTPClient(t, []string{u}, nil)
  406. mkapi := client.NewKeysAPI(mcc)
  407. mctx, mcancel := context.WithTimeout(context.Background(), requestTimeout)
  408. if _, err := mkapi.Watcher(key, &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(mctx); err != nil {
  409. t.Fatalf("#%d: watch on %s error: %v", i, u, err)
  410. }
  411. mcancel()
  412. }
  413. }
  414. func TestTransferLeader(t *testing.T) {
  415. defer testutil.AfterTest(t)
  416. clus := NewClusterV3(t, &ClusterConfig{Size: 3})
  417. defer clus.Terminate(t)
  418. oldLeadIdx := clus.WaitLeader(t)
  419. oldLeadID := uint64(clus.Members[oldLeadIdx].s.ID())
  420. // ensure followers go through leader transition while learship transfer
  421. idc := make(chan uint64)
  422. for i := range clus.Members {
  423. if oldLeadIdx != i {
  424. go func(m *member) {
  425. idc <- checkLeaderTransition(t, m, oldLeadID)
  426. }(clus.Members[i])
  427. }
  428. }
  429. err := clus.Members[oldLeadIdx].s.TransferLeadership()
  430. if err != nil {
  431. t.Fatal(err)
  432. }
  433. // wait until leader transitions have happened
  434. var newLeadIDs [2]uint64
  435. for i := range newLeadIDs {
  436. select {
  437. case newLeadIDs[i] = <-idc:
  438. case <-time.After(time.Second):
  439. t.Fatal("timed out waiting for leader transition")
  440. }
  441. }
  442. // remaining members must agree on the same leader
  443. if newLeadIDs[0] != newLeadIDs[1] {
  444. t.Fatalf("expected same new leader %d == %d", newLeadIDs[0], newLeadIDs[1])
  445. }
  446. // new leader must be different than the old leader
  447. if oldLeadID == newLeadIDs[0] {
  448. t.Fatalf("expected old leader %d != new leader %d", oldLeadID, newLeadIDs[0])
  449. }
  450. }