Browse Source

Merge pull request #8839 from gyuho/test-balancer

clientv3/integration: test linearizable get with leader election, network partition
Gyu-Ho Lee 8 years ago
parent
commit
dfe0f8c2bc
2 changed files with 45 additions and 26 deletions
  1. 0 26
      clientv3/integration/kv_test.go
  2. 45 0
      clientv3/integration/network_partition_test.go

+ 0 - 26
clientv3/integration/kv_test.go

@@ -937,29 +937,3 @@ func TestKVPutAtMostOnce(t *testing.T) {
 		t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0])
 	}
 }
-
-func TestKVSwitchUnavailable(t *testing.T) {
-	defer testutil.AfterTest(t)
-	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true})
-	defer clus.Terminate(t)
-
-	clus.Members[0].InjectPartition(t, clus.Members[1:]...)
-	// try to connect with dead node in the endpoint list
-	cfg := clientv3.Config{
-		Endpoints: []string{
-			clus.Members[0].GRPCAddr(),
-			clus.Members[1].GRPCAddr(),
-		},
-		DialTimeout: 1 * time.Second}
-	cli, err := clientv3.New(cfg)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer cli.Close()
-	timeout := 3 * clus.Members[0].ServerConfig.ReqTimeout()
-	ctx, cancel := context.WithTimeout(context.TODO(), timeout)
-	if _, err := cli.Get(ctx, "abc"); err != nil {
-		t.Fatal(err)
-	}
-	cancel()
-}

+ 45 - 0
clientv3/integration/network_partition_test.go

@@ -146,6 +146,51 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
 	}
 }
 
+// TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection ensures balancer
+// switches endpoint when leader fails and linearizable get requests returns
+// "etcdserver: request timed out".
+func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) {
+	defer testutil.AfterTest(t)
+
+	clus := integration.NewClusterV3(t, &integration.ClusterConfig{
+		Size:               3,
+		SkipCreatingClient: true,
+	})
+	defer clus.Terminate(t)
+	eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
+
+	lead := clus.WaitLeader(t)
+
+	timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout()
+
+	cli, err := clientv3.New(clientv3.Config{
+		Endpoints:   []string{eps[(lead+1)%2]},
+		DialTimeout: 1 * time.Second,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer cli.Close()
+
+	// wait for non-leader to be pinned
+	mustWaitPinReady(t, cli)
+
+	// add all eps to list, so that when the original pined one fails
+	// the client can switch to other available eps
+	cli.SetEndpoints(eps[lead], eps[(lead+1)%2])
+
+	// isolate leader
+	clus.Members[lead].InjectPartition(t, clus.Members[(lead+1)%3], clus.Members[(lead+2)%3])
+
+	// expects balancer endpoint switch while ongoing leader election
+	ctx, cancel := context.WithTimeout(context.TODO(), timeout)
+	_, err = cli.Get(ctx, "a")
+	cancel()
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
 func TestBalancerUnderNetworkPartitionWatchLeader(t *testing.T) {
 	testBalancerUnderNetworkPartitionWatch(t, true)
 }