cluster_test.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package integration
  15. import (
  16. "fmt"
  17. "log"
  18. "math/rand"
  19. "os"
  20. "strconv"
  21. "strings"
  22. "testing"
  23. "time"
  24. "github.com/coreos/etcd/client"
  25. "github.com/coreos/etcd/etcdserver"
  26. "github.com/coreos/etcd/pkg/testutil"
  27. "github.com/coreos/pkg/capnslog"
  28. "golang.org/x/net/context"
  29. )
  30. func init() {
  31. // open microsecond-level time log for integration test debugging
  32. log.SetFlags(log.Ltime | log.Lmicroseconds | log.Lshortfile)
  33. if t := os.Getenv("ETCD_ELECTION_TIMEOUT_TICKS"); t != "" {
  34. if i, err := strconv.ParseInt(t, 10, 64); err == nil {
  35. electionTicks = int(i)
  36. }
  37. }
  38. }
  39. func TestClusterOf1(t *testing.T) { testCluster(t, 1) }
  40. func TestClusterOf3(t *testing.T) { testCluster(t, 3) }
  41. func testCluster(t *testing.T, size int) {
  42. defer testutil.AfterTest(t)
  43. c := NewCluster(t, size)
  44. c.Launch(t)
  45. defer c.Terminate(t)
  46. clusterMustProgress(t, c.Members)
  47. }
  48. func TestTLSClusterOf3(t *testing.T) {
  49. defer testutil.AfterTest(t)
  50. c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
  51. c.Launch(t)
  52. defer c.Terminate(t)
  53. clusterMustProgress(t, c.Members)
  54. }
  55. func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1) }
  56. func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3) }
  57. func testClusterUsingDiscovery(t *testing.T, size int) {
  58. defer testutil.AfterTest(t)
  59. dc := NewCluster(t, 1)
  60. dc.Launch(t)
  61. defer dc.Terminate(t)
  62. // init discovery token space
  63. dcc := MustNewHTTPClient(t, dc.URLs(), nil)
  64. dkapi := client.NewKeysAPI(dcc)
  65. ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  66. if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil {
  67. t.Fatal(err)
  68. }
  69. cancel()
  70. c := NewClusterByConfig(
  71. t,
  72. &ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"},
  73. )
  74. c.Launch(t)
  75. defer c.Terminate(t)
  76. clusterMustProgress(t, c.Members)
  77. }
  78. func TestTLSClusterOf3UsingDiscovery(t *testing.T) {
  79. defer testutil.AfterTest(t)
  80. dc := NewCluster(t, 1)
  81. dc.Launch(t)
  82. defer dc.Terminate(t)
  83. // init discovery token space
  84. dcc := MustNewHTTPClient(t, dc.URLs(), nil)
  85. dkapi := client.NewKeysAPI(dcc)
  86. ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  87. if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", 3)); err != nil {
  88. t.Fatal(err)
  89. }
  90. cancel()
  91. c := NewClusterByConfig(t,
  92. &ClusterConfig{
  93. Size: 3,
  94. PeerTLS: &testTLSInfo,
  95. DiscoveryURL: dc.URL(0) + "/v2/keys"},
  96. )
  97. c.Launch(t)
  98. defer c.Terminate(t)
  99. clusterMustProgress(t, c.Members)
  100. }
  101. func TestDoubleClusterSizeOf1(t *testing.T) { testDoubleClusterSize(t, 1) }
  102. func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) }
  103. func testDoubleClusterSize(t *testing.T, size int) {
  104. defer testutil.AfterTest(t)
  105. c := NewCluster(t, size)
  106. c.Launch(t)
  107. defer c.Terminate(t)
  108. for i := 0; i < size; i++ {
  109. c.AddMember(t)
  110. }
  111. clusterMustProgress(t, c.Members)
  112. }
  113. func TestDoubleTLSClusterSizeOf3(t *testing.T) {
  114. defer testutil.AfterTest(t)
  115. c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
  116. c.Launch(t)
  117. defer c.Terminate(t)
  118. for i := 0; i < 3; i++ {
  119. c.AddMember(t)
  120. }
  121. clusterMustProgress(t, c.Members)
  122. }
  123. func TestDecreaseClusterSizeOf3(t *testing.T) { testDecreaseClusterSize(t, 3) }
  124. func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) }
  125. func testDecreaseClusterSize(t *testing.T, size int) {
  126. defer testutil.AfterTest(t)
  127. c := NewCluster(t, size)
  128. c.Launch(t)
  129. defer c.Terminate(t)
  130. // TODO: remove the last but one member
  131. for i := 0; i < size-1; i++ {
  132. id := c.Members[len(c.Members)-1].s.ID()
  133. // may hit second leader election on slow machines
  134. if err := c.removeMember(t, uint64(id)); err != nil {
  135. if strings.Contains(err.Error(), "no leader") {
  136. t.Logf("got leader error (%v)", err)
  137. i--
  138. continue
  139. }
  140. t.Fatal(err)
  141. }
  142. c.waitLeader(t, c.Members)
  143. }
  144. clusterMustProgress(t, c.Members)
  145. }
  146. func TestForceNewCluster(t *testing.T) {
  147. c := NewCluster(t, 3)
  148. c.Launch(t)
  149. cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
  150. kapi := client.NewKeysAPI(cc)
  151. ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  152. resp, err := kapi.Create(ctx, "/foo", "bar")
  153. if err != nil {
  154. t.Fatalf("unexpected create error: %v", err)
  155. }
  156. cancel()
  157. // ensure create has been applied in this machine
  158. ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
  159. if _, err = kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
  160. t.Fatalf("unexpected watch error: %v", err)
  161. }
  162. cancel()
  163. c.Members[0].Stop(t)
  164. c.Members[1].Terminate(t)
  165. c.Members[2].Terminate(t)
  166. c.Members[0].ForceNewCluster = true
  167. err = c.Members[0].Restart(t)
  168. if err != nil {
  169. t.Fatalf("unexpected ForceRestart error: %v", err)
  170. }
  171. defer c.Members[0].Terminate(t)
  172. c.waitLeader(t, c.Members[:1])
  173. // use new http client to init new connection
  174. cc = MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
  175. kapi = client.NewKeysAPI(cc)
  176. // ensure force restart keep the old data, and new cluster can make progress
  177. ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
  178. if _, err := kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
  179. t.Fatalf("unexpected watch error: %v", err)
  180. }
  181. cancel()
  182. clusterMustProgress(t, c.Members[:1])
  183. }
  184. func TestAddMemberAfterClusterFullRotation(t *testing.T) {
  185. defer testutil.AfterTest(t)
  186. c := NewCluster(t, 3)
  187. c.Launch(t)
  188. defer c.Terminate(t)
  189. // remove all the previous three members and add in three new members.
  190. for i := 0; i < 3; i++ {
  191. c.RemoveMember(t, uint64(c.Members[0].s.ID()))
  192. c.waitLeader(t, c.Members)
  193. c.AddMember(t)
  194. c.waitLeader(t, c.Members)
  195. }
  196. c.AddMember(t)
  197. c.waitLeader(t, c.Members)
  198. clusterMustProgress(t, c.Members)
  199. }
  200. // Ensure we can remove a member then add a new one back immediately.
  201. func TestIssue2681(t *testing.T) {
  202. defer testutil.AfterTest(t)
  203. c := NewCluster(t, 5)
  204. c.Launch(t)
  205. defer c.Terminate(t)
  206. c.RemoveMember(t, uint64(c.Members[4].s.ID()))
  207. c.waitLeader(t, c.Members)
  208. c.AddMember(t)
  209. c.waitLeader(t, c.Members)
  210. clusterMustProgress(t, c.Members)
  211. }
  212. // Ensure we can remove a member after a snapshot then add a new one back.
  213. func TestIssue2746(t *testing.T) { testIssue2746(t, 5) }
  214. // With 3 nodes TestIssue2476 sometimes had a shutdown with an inflight snapshot.
  215. func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) }
  216. func testIssue2746(t *testing.T, members int) {
  217. defer testutil.AfterTest(t)
  218. c := NewCluster(t, members)
  219. for _, m := range c.Members {
  220. m.SnapCount = 10
  221. }
  222. c.Launch(t)
  223. defer c.Terminate(t)
  224. // force a snapshot
  225. for i := 0; i < 20; i++ {
  226. clusterMustProgress(t, c.Members)
  227. }
  228. c.RemoveMember(t, uint64(c.Members[members-1].s.ID()))
  229. c.waitLeader(t, c.Members)
  230. c.AddMember(t)
  231. c.waitLeader(t, c.Members)
  232. clusterMustProgress(t, c.Members)
  233. }
  234. // Ensure etcd will not panic when removing a just started member.
  235. func TestIssue2904(t *testing.T) {
  236. defer testutil.AfterTest(t)
  237. // start 1-member cluster to ensure member 0 is the leader of the cluster.
  238. c := NewCluster(t, 1)
  239. c.Launch(t)
  240. defer c.Terminate(t)
  241. c.AddMember(t)
  242. c.Members[1].Stop(t)
  243. // send remove member-1 request to the cluster.
  244. cc := MustNewHTTPClient(t, c.URLs(), nil)
  245. ma := client.NewMembersAPI(cc)
  246. ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  247. // the proposal is not committed because member 1 is stopped, but the
  248. // proposal is appended to leader's raft log.
  249. ma.Remove(ctx, c.Members[1].s.ID().String())
  250. cancel()
  251. // restart member, and expect it to send UpdateAttributes request.
  252. // the log in the leader is like this:
  253. // [..., remove 1, ..., update attr 1, ...]
  254. c.Members[1].Restart(t)
  255. // when the member comes back, it ack the proposal to remove itself,
  256. // and apply it.
  257. <-c.Members[1].s.StopNotify()
  258. // terminate removed member
  259. c.Members[1].Terminate(t)
  260. c.Members = c.Members[:1]
  261. // wait member to be removed.
  262. c.waitMembersMatch(t, c.HTTPMembers())
  263. }
  264. // TestIssue3699 tests minority failure during cluster configuration; it was
  265. // deadlocking.
  266. func TestIssue3699(t *testing.T) {
  267. // start a cluster of 3 nodes a, b, c
  268. defer testutil.AfterTest(t)
  269. c := NewCluster(t, 3)
  270. c.Launch(t)
  271. defer c.Terminate(t)
  272. // make node a unavailable
  273. c.Members[0].Stop(t)
  274. // add node d
  275. c.AddMember(t)
  276. // electing node d as leader makes node a unable to participate
  277. leaderID := c.waitLeader(t, c.Members)
  278. for leaderID != 3 {
  279. c.Members[leaderID].Stop(t)
  280. <-c.Members[leaderID].s.StopNotify()
  281. // do not restart the killed member immediately.
  282. // the member will advance its election timeout after restart,
  283. // so it will have a better chance to become the leader again.
  284. time.Sleep(time.Duration(electionTicks * int(tickDuration)))
  285. c.Members[leaderID].Restart(t)
  286. leaderID = c.waitLeader(t, c.Members)
  287. }
  288. // bring back node a
  289. // node a will remain useless as long as d is the leader.
  290. if err := c.Members[0].Restart(t); err != nil {
  291. t.Fatal(err)
  292. }
  293. select {
  294. // waiting for ReadyNotify can take several seconds
  295. case <-time.After(10 * time.Second):
  296. t.Fatalf("waited too long for ready notification")
  297. case <-c.Members[0].s.StopNotify():
  298. t.Fatalf("should not be stopped")
  299. case <-c.Members[0].s.ReadyNotify():
  300. }
  301. // must waitLeader so goroutines don't leak on terminate
  302. c.waitLeader(t, c.Members)
  303. // try to participate in cluster
  304. cc := MustNewHTTPClient(t, []string{c.URL(0)}, c.cfg.ClientTLS)
  305. kapi := client.NewKeysAPI(cc)
  306. ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  307. if _, err := kapi.Set(ctx, "/foo", "bar", nil); err != nil {
  308. t.Fatalf("unexpected error on Set (%v)", err)
  309. }
  310. cancel()
  311. }
  312. // TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members.
  313. func TestRejectUnhealthyAdd(t *testing.T) {
  314. defer testutil.AfterTest(t)
  315. c := NewCluster(t, 3)
  316. for _, m := range c.Members {
  317. m.ServerConfig.StrictReconfigCheck = true
  318. }
  319. c.Launch(t)
  320. defer c.Terminate(t)
  321. // make cluster unhealthy and wait for downed peer
  322. c.Members[0].Stop(t)
  323. c.WaitLeader(t)
  324. // all attempts to add member should fail
  325. for i := 1; i < len(c.Members); i++ {
  326. err := c.addMemberByURL(t, c.URL(i), "unix://foo:12345")
  327. if err == nil {
  328. t.Fatalf("should have failed adding peer")
  329. }
  330. // TODO: client should return descriptive error codes for internal errors
  331. if !strings.Contains(err.Error(), "has no leader") {
  332. t.Errorf("unexpected error (%v)", err)
  333. }
  334. }
  335. // make cluster healthy
  336. c.Members[0].Restart(t)
  337. c.WaitLeader(t)
  338. time.Sleep(2 * etcdserver.HealthInterval)
  339. // add member should succeed now that it's healthy
  340. var err error
  341. for i := 1; i < len(c.Members); i++ {
  342. if err = c.addMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil {
  343. break
  344. }
  345. }
  346. if err != nil {
  347. t.Fatalf("should have added peer to healthy cluster (%v)", err)
  348. }
  349. }
  350. // TestRejectUnhealthyRemove ensures an unhealthy cluster rejects removing members
  351. // if quorum will be lost.
  352. func TestRejectUnhealthyRemove(t *testing.T) {
  353. defer testutil.AfterTest(t)
  354. c := NewCluster(t, 5)
  355. for _, m := range c.Members {
  356. m.ServerConfig.StrictReconfigCheck = true
  357. }
  358. c.Launch(t)
  359. defer c.Terminate(t)
  360. // make cluster unhealthy and wait for downed peer; (3 up, 2 down)
  361. c.Members[0].Stop(t)
  362. c.Members[1].Stop(t)
  363. c.WaitLeader(t)
  364. // reject remove active member since (3,2)-(1,0) => (2,2) lacks quorum
  365. err := c.removeMember(t, uint64(c.Members[2].s.ID()))
  366. if err == nil {
  367. t.Fatalf("should reject quorum breaking remove")
  368. }
  369. // TODO: client should return more descriptive error codes for internal errors
  370. if !strings.Contains(err.Error(), "has no leader") {
  371. t.Errorf("unexpected error (%v)", err)
  372. }
  373. // member stopped after launch; wait for missing heartbeats
  374. time.Sleep(time.Duration(electionTicks * int(tickDuration)))
  375. // permit remove dead member since (3,2) - (0,1) => (3,1) has quorum
  376. if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
  377. t.Fatalf("should accept removing down member")
  378. }
  379. // bring cluster to (4,1)
  380. c.Members[0].Restart(t)
  381. // restarted member must be connected for a HealthInterval before remove is accepted
  382. time.Sleep((3 * etcdserver.HealthInterval) / 2)
  383. // accept remove member since (4,1)-(1,0) => (3,1) has quorum
  384. if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
  385. t.Fatalf("expected to remove member, got error %v", err)
  386. }
  387. }
  388. // TestRestartRemoved ensures that restarting removed member must exit
  389. // if 'initial-cluster-state' is set 'new' and old data directory still exists
  390. // (see https://github.com/coreos/etcd/issues/7512 for more).
  391. func TestRestartRemoved(t *testing.T) {
  392. defer testutil.AfterTest(t)
  393. capnslog.SetGlobalLogLevel(capnslog.INFO)
  394. defer capnslog.SetGlobalLogLevel(defaultLogLevel)
  395. // 1. start single-member cluster
  396. c := NewCluster(t, 1)
  397. for _, m := range c.Members {
  398. m.ServerConfig.StrictReconfigCheck = true
  399. }
  400. c.Launch(t)
  401. defer c.Terminate(t)
  402. // 2. add a new member
  403. c.AddMember(t)
  404. c.WaitLeader(t)
  405. oldm := c.Members[0]
  406. oldm.keepDataDirTerminate = true
  407. // 3. remove first member, shut down without deleting data
  408. if err := c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
  409. t.Fatalf("expected to remove member, got error %v", err)
  410. }
  411. c.WaitLeader(t)
  412. // 4. restart first member with 'initial-cluster-state=new'
  413. // wrong config, expects exit within ReqTimeout
  414. oldm.ServerConfig.NewCluster = false
  415. if err := oldm.Restart(t); err != nil {
  416. t.Fatalf("unexpected ForceRestart error: %v", err)
  417. }
  418. defer func() {
  419. oldm.Close()
  420. os.RemoveAll(oldm.ServerConfig.DataDir)
  421. }()
  422. select {
  423. case <-oldm.s.StopNotify():
  424. case <-time.After(time.Minute):
  425. t.Fatalf("removed member didn't exit within %v", time.Minute)
  426. }
  427. }
  428. // clusterMustProgress ensures that cluster can make progress. It creates
  429. // a random key first, and check the new key could be got from all client urls
  430. // of the cluster.
  431. func clusterMustProgress(t *testing.T, membs []*member) {
  432. cc := MustNewHTTPClient(t, []string{membs[0].URL()}, nil)
  433. kapi := client.NewKeysAPI(cc)
  434. key := fmt.Sprintf("foo%d", rand.Int())
  435. var (
  436. err error
  437. resp *client.Response
  438. )
  439. // retry in case of leader loss induced by slow CI
  440. for i := 0; i < 3; i++ {
  441. ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  442. resp, err = kapi.Create(ctx, "/"+key, "bar")
  443. cancel()
  444. if err == nil {
  445. break
  446. }
  447. t.Logf("failed to create key on %q (%v)", membs[0].URL(), err)
  448. }
  449. if err != nil {
  450. t.Fatalf("create on %s error: %v", membs[0].URL(), err)
  451. }
  452. for i, m := range membs {
  453. u := m.URL()
  454. mcc := MustNewHTTPClient(t, []string{u}, nil)
  455. mkapi := client.NewKeysAPI(mcc)
  456. mctx, mcancel := context.WithTimeout(context.Background(), requestTimeout)
  457. if _, err := mkapi.Watcher(key, &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(mctx); err != nil {
  458. t.Fatalf("#%d: watch on %s error: %v", i, u, err)
  459. }
  460. mcancel()
  461. }
  462. }
  463. func TestSpeedyTerminate(t *testing.T) {
  464. defer testutil.AfterTest(t)
  465. clus := NewClusterV3(t, &ClusterConfig{Size: 3})
  466. // Stop/Restart so requests will time out on lost leaders
  467. for i := 0; i < 3; i++ {
  468. clus.Members[i].Stop(t)
  469. clus.Members[i].Restart(t)
  470. }
  471. donec := make(chan struct{})
  472. go func() {
  473. defer close(donec)
  474. clus.Terminate(t)
  475. }()
  476. select {
  477. case <-time.After(10 * time.Second):
  478. t.Fatalf("cluster took too long to terminate")
  479. case <-donec:
  480. }
  481. }