network_partition_test.go 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. // Copyright 2017 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // +build !cluster_proxy
  15. package integration
  16. import (
  17. "context"
  18. "testing"
  19. "time"
  20. "github.com/coreos/etcd/clientv3"
  21. "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
  22. "github.com/coreos/etcd/integration"
  23. "github.com/coreos/etcd/pkg/testutil"
  24. )
  25. // TestBalancerUnderNetworkPartitionPut tests when one member becomes isolated,
  26. // first Put request fails, and following retry succeeds with client balancer
  27. // switching to others.
  28. func TestBalancerUnderNetworkPartitionPut(t *testing.T) {
  29. testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
  30. _, err := cli.Put(ctx, "a", "b")
  31. return err
  32. })
  33. }
  34. // TestBalancerUnderNetworkPartitionGet tests when one member becomes isolated,
  35. // first Get request fails, and following retry succeeds with client balancer
  36. // switching to others.
  37. func TestBalancerUnderNetworkPartitionGet(t *testing.T) {
  38. testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
  39. _, err := cli.Get(ctx, "a")
  40. return err
  41. })
  42. }
  43. func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, context.Context) error) {
  44. defer testutil.AfterTest(t)
  45. clus := integration.NewClusterV3(t, &integration.ClusterConfig{
  46. Size: 3,
  47. GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings
  48. SkipCreatingClient: true,
  49. })
  50. defer clus.Terminate(t)
  51. // expect pin ep[0]
  52. ccfg := clientv3.Config{
  53. Endpoints: []string{clus.Members[0].GRPCAddr()},
  54. DialTimeout: 3 * time.Second,
  55. DialKeepAliveTime: 2 * time.Second,
  56. DialKeepAliveTimeout: 2 * time.Second,
  57. }
  58. cli, err := clientv3.New(ccfg)
  59. if err != nil {
  60. t.Fatal(err)
  61. }
  62. defer cli.Close()
  63. // wait for ep[0] to be pinned
  64. waitPinReady(t, cli)
  65. // add other endpoints for later endpoint switch
  66. cli.SetEndpoints(clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr())
  67. clus.Members[0].InjectPartition(t, clus.Members[1:]...)
  68. for i := 0; i < 2; i++ {
  69. ctx, cancel := context.WithTimeout(context.Background(), time.Second)
  70. err = op(cli, ctx)
  71. cancel()
  72. if err == nil {
  73. break
  74. }
  75. // TODO: separate put and get test for error checking.
  76. // we do not really expect ErrTimeout on get.
  77. if err != context.DeadlineExceeded && err != rpctypes.ErrTimeout {
  78. t.Errorf("#%d: expected %v or %v, got %v", i, context.DeadlineExceeded, rpctypes.ErrTimeout, err)
  79. }
  80. // give enough time for endpoint switch
  81. // TODO: remove random sleep by syncing directly with balancer
  82. if i == 0 {
  83. time.Sleep(5 * time.Second)
  84. }
  85. }
  86. if err != nil {
  87. t.Errorf("balancer did not switch in time (%v)", err)
  88. }
  89. }
  90. func TestBalancerUnderNetworkPartitionWatchLeader(t *testing.T) {
  91. testBalancerUnderNetworkPartitionWatch(t, true)
  92. }
  93. func TestBalancerUnderNetworkPartitionWatchFollower(t *testing.T) {
  94. testBalancerUnderNetworkPartitionWatch(t, false)
  95. }
  96. // testBalancerUnderNetworkPartitionWatch ensures watch stream
  97. // to a partitioned node be closed when context requires leader.
  98. func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
  99. defer testutil.AfterTest(t)
  100. clus := integration.NewClusterV3(t, &integration.ClusterConfig{
  101. Size: 3,
  102. SkipCreatingClient: true,
  103. })
  104. defer clus.Terminate(t)
  105. eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
  106. target := clus.WaitLeader(t)
  107. if !isolateLeader {
  108. target = (target + 1) % 3
  109. }
  110. // pin eps[target]
  111. watchCli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[target]}})
  112. if err != nil {
  113. t.Fatal(err)
  114. }
  115. defer watchCli.Close()
  116. // wait for eps[target] to be pinned
  117. waitPinReady(t, watchCli)
  118. // add all eps to list, so that when the original pined one fails
  119. // the client can switch to other available eps
  120. watchCli.SetEndpoints(eps...)
  121. wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify())
  122. select {
  123. case <-wch:
  124. case <-time.After(3 * time.Second):
  125. t.Fatal("took too long to create watch")
  126. }
  127. // isolate eps[target]
  128. clus.Members[target].InjectPartition(t,
  129. clus.Members[(target+1)%3],
  130. clus.Members[(target+2)%3],
  131. )
  132. select {
  133. case ev := <-wch:
  134. if len(ev.Events) != 0 {
  135. t.Fatal("expected no event")
  136. }
  137. if err = ev.Err(); err != rpctypes.ErrNoLeader {
  138. t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err)
  139. }
  140. case <-time.After(3 * time.Second): // enough time to detect leader lost
  141. t.Fatal("took too long to detect leader lost")
  142. }
  143. }