Browse Source

ordering: use default clients to populate etcd data

Switching endpoints on the same client was triggering balancer
reconnect errors that should be tested in clientv3/integration.
Anthony Romano 8 years ago
parent
commit
10db0319d1
2 changed files with 14 additions and 29 deletions
  1. 5 14
      clientv3/ordering/kv_test.go
  2. 9 15
      clientv3/ordering/util_test.go

+ 5 - 14
clientv3/ordering/kv_test.go

@@ -45,15 +45,11 @@ func TestDetectKvOrderViolation(t *testing.T) {
 	cli, err := clientv3.New(cfg)
 	ctx := context.TODO()
 
-	cli.SetEndpoints(clus.Members[0].GRPCAddr())
-	_, err = cli.Put(ctx, "foo", "bar")
-	if err != nil {
+	if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil {
 		t.Fatal(err)
 	}
 	// ensure that the second member has the current revision for the key foo
-	cli.SetEndpoints(clus.Members[1].GRPCAddr())
-	_, err = cli.Get(ctx, "foo")
-	if err != nil {
+	if _, err = clus.Client(1).Get(ctx, "foo"); err != nil {
 		t.Fatal(err)
 	}
 
@@ -107,23 +103,18 @@ func TestDetectTxnOrderViolation(t *testing.T) {
 	cli, err := clientv3.New(cfg)
 	ctx := context.TODO()
 
-	cli.SetEndpoints(clus.Members[0].GRPCAddr())
-	_, err = cli.Put(ctx, "foo", "bar")
-	if err != nil {
+	if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil {
 		t.Fatal(err)
 	}
 	// ensure that the second member has the current revision for the key foo
-	cli.SetEndpoints(clus.Members[1].GRPCAddr())
-	_, err = cli.Get(ctx, "foo")
-	if err != nil {
+	if _, err = clus.Client(1).Get(ctx, "foo"); err != nil {
 		t.Fatal(err)
 	}
 
 	// stop third member in order to force the member to have an outdated revision
 	clus.Members[2].Stop(t)
 	time.Sleep(1 * time.Second) // give enough time for operation
-	_, err = cli.Put(ctx, "foo", "buzz")
-	if err != nil {
+	if _, err = clus.Client(1).Put(ctx, "foo", "buzz"); err != nil {
 		t.Fatal(err)
 	}
 

+ 9 - 15
clientv3/ordering/util_test.go

@@ -28,26 +28,21 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
 	defer testutil.AfterTest(t)
 	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
 	defer clus.Terminate(t)
-	cfg := clientv3.Config{
-		Endpoints: []string{
-			clus.Members[0].GRPCAddr(),
-			clus.Members[1].GRPCAddr(),
-			clus.Members[2].GRPCAddr(),
-		},
+	eps := []string{
+		clus.Members[0].GRPCAddr(),
+		clus.Members[1].GRPCAddr(),
+		clus.Members[2].GRPCAddr(),
 	}
+	cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCAddr()}}
 	cli, err := clientv3.New(cfg)
-	eps := cli.Endpoints()
+
 	ctx := context.TODO()
 
-	cli.SetEndpoints(clus.Members[0].GRPCAddr())
-	_, err = cli.Put(ctx, "foo", "bar")
-	if err != nil {
+	if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil {
 		t.Fatal(err)
 	}
 	// ensure that the second member has current revision for key "foo"
-	cli.SetEndpoints(clus.Members[1].GRPCAddr())
-	_, err = cli.Get(ctx, "foo")
-	if err != nil {
+	if _, err = clus.Client(1).Get(ctx, "foo"); err != nil {
 		t.Fatal(err)
 	}
 
@@ -58,8 +53,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
 	time.Sleep(1 * time.Second) // give enough time for the operation
 
 	// update to "foo" will not be replicated to the third member due to the partition
-	_, err = cli.Put(ctx, "foo", "buzz")
-	if err != nil {
+	if _, err = clus.Client(1).Put(ctx, "foo", "buzz"); err != nil {
 		t.Fatal(err)
 	}