Browse Source

integration: add constant RequestWaitTimeout.

Sam Batschelet 8 years ago
parent
commit
d21fef2d41

+ 1 - 1
clientv3/integration/black_hole_test.go

@@ -54,7 +54,7 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
 	// TODO: only send healthy endpoint to gRPC so gRPC wont waste time to
 	// TODO: only send healthy endpoint to gRPC so gRPC wont waste time to
 	// dial for unhealthy endpoint.
 	// dial for unhealthy endpoint.
 	// then we can reduce 3s to 1s.
 	// then we can reduce 3s to 1s.
-	timeout := pingInterval + 3*time.Second
+	timeout := pingInterval + intergration.RequestWaitTimeout
 
 
 	cli, err := clientv3.New(ccfg)
 	cli, err := clientv3.New(ccfg)
 	if err != nil {
 	if err != nil {

+ 1 - 1
clientv3/integration/dial_test.go

@@ -121,7 +121,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
 	if !setBefore {
 	if !setBefore {
 		cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
 		cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
 	}
 	}
-	ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+	ctx, cancel := context.WithTimeout(context.Background(), intergration.RequestWaitTimeout)
 	if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil {
 	if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}

+ 2 - 2
clientv3/integration/kv_test.go

@@ -453,7 +453,7 @@ func TestKVGetErrConnClosed(t *testing.T) {
 	clus.TakeClient(0)
 	clus.TakeClient(0)
 
 
 	select {
 	select {
-	case <-time.After(3 * time.Second):
+	case <-time.After(integration.RequestWaitTimeout):
 		t.Fatal("kv.Get took too long")
 		t.Fatal("kv.Get took too long")
 	case <-donec:
 	case <-donec:
 	}
 	}
@@ -480,7 +480,7 @@ func TestKVNewAfterClose(t *testing.T) {
 		close(donec)
 		close(donec)
 	}()
 	}()
 	select {
 	select {
-	case <-time.After(3 * time.Second):
+	case <-time.After(integration.RequestWaitTimeout):
 		t.Fatal("kv.Get took too long")
 		t.Fatal("kv.Get took too long")
 	case <-donec:
 	case <-donec:
 	}
 	}

+ 3 - 3
clientv3/integration/lease_test.go

@@ -299,7 +299,7 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
 	}
 	}
 
 
 	select {
 	select {
-	case <-time.After(3 * time.Second):
+	case <-time.After(integration.RequestWaitTimeout):
 		t.Fatal("le.Grant took too long")
 		t.Fatal("le.Grant took too long")
 	case <-donec:
 	case <-donec:
 	}
 	}
@@ -325,7 +325,7 @@ func TestLeaseGrantNewAfterClose(t *testing.T) {
 		close(donec)
 		close(donec)
 	}()
 	}()
 	select {
 	select {
-	case <-time.After(3 * time.Second):
+	case <-time.After(integration.RequestWaitTimeout):
 		t.Fatal("le.Grant took too long")
 		t.Fatal("le.Grant took too long")
 	case <-donec:
 	case <-donec:
 	}
 	}
@@ -357,7 +357,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
 		close(donec)
 		close(donec)
 	}()
 	}()
 	select {
 	select {
-	case <-time.After(3 * time.Second):
+	case <-time.After(integration.RequestWaitTimeout):
 		t.Fatal("le.Revoke took too long")
 		t.Fatal("le.Revoke took too long")
 	case <-donec:
 	case <-donec:
 	}
 	}

+ 2 - 2
clientv3/integration/network_partition_test.go

@@ -234,7 +234,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
 	wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify())
 	wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify())
 	select {
 	select {
 	case <-wch:
 	case <-wch:
-	case <-time.After(3 * time.Second):
+	case <-time.After(integration.RequestWaitTimeout):
 		t.Fatal("took too long to create watch")
 		t.Fatal("took too long to create watch")
 	}
 	}
 
 
@@ -252,7 +252,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
 		if err = ev.Err(); err != rpctypes.ErrNoLeader {
 		if err = ev.Err(); err != rpctypes.ErrNoLeader {
 			t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err)
 			t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err)
 		}
 		}
-	case <-time.After(3 * time.Second): // enough time to detect leader lost
+	case <-time.After(integration.RequestWaitTimeout): // enough time to detect leader lost
 		t.Fatal("took too long to detect leader lost")
 		t.Fatal("took too long to detect leader lost")
 	}
 	}
 }
 }

+ 2 - 2
clientv3/integration/server_shutdown_test.go

@@ -63,7 +63,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
 	wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify())
 	wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify())
 	select {
 	select {
 	case <-wch:
 	case <-wch:
-	case <-time.After(3 * time.Second):
+	case <-time.After(integration.RequestWaitTimeout):
 		t.Fatal("took too long to create watch")
 		t.Fatal("took too long to create watch")
 	}
 	}
 
 
@@ -348,7 +348,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
 	clus.Members[target].Restart(t)
 	clus.Members[target].Restart(t)
 
 
 	select {
 	select {
-	case <-time.After(clientTimeout + 3*time.Second):
+	case <-time.After(clientTimeout + integration.RequestWaitTimeout):
 		t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt)
 		t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt)
 	case <-donec:
 	case <-donec:
 	}
 	}

+ 4 - 4
clientv3/integration/watch_test.go

@@ -678,7 +678,7 @@ func TestWatchErrConnClosed(t *testing.T) {
 	clus.TakeClient(0)
 	clus.TakeClient(0)
 
 
 	select {
 	select {
-	case <-time.After(3 * time.Second):
+	case <-time.After(integration.RequestWaitTimeout):
 		t.Fatal("wc.Watch took too long")
 		t.Fatal("wc.Watch took too long")
 	case <-donec:
 	case <-donec:
 	}
 	}
@@ -705,7 +705,7 @@ func TestWatchAfterClose(t *testing.T) {
 		close(donec)
 		close(donec)
 	}()
 	}()
 	select {
 	select {
-	case <-time.After(3 * time.Second):
+	case <-time.After(integration.RequestWaitTimeout):
 		t.Fatal("wc.Watch took too long")
 		t.Fatal("wc.Watch took too long")
 	case <-donec:
 	case <-donec:
 	}
 	}
@@ -751,7 +751,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
 		if resp.Err() != rpctypes.ErrNoLeader {
 		if resp.Err() != rpctypes.ErrNoLeader {
 			t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
 			t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
 		}
 		}
-	case <-time.After(3 * time.Second):
+	case <-time.After(integration.RequestWaitTimeout):
 		t.Fatal("watch without leader took too long to close")
 		t.Fatal("watch without leader took too long to close")
 	}
 	}
 
 
@@ -760,7 +760,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
 		if ok {
 		if ok {
 			t.Fatalf("expected closed channel, got response %v", resp)
 			t.Fatalf("expected closed channel, got response %v", resp)
 		}
 		}
-	case <-time.After(3 * time.Second):
+	case <-time.After(integration.RequestWaitTimeout):
 		t.Fatal("waited too long for channel to close")
 		t.Fatal("waited too long for channel to close")
 	}
 	}
 
 

+ 5 - 3
integration/cluster.go

@@ -58,10 +58,12 @@ import (
 )
 )
 
 
 const (
 const (
-	tickDuration   = 10 * time.Millisecond
-	clusterName    = "etcd"
-	requestTimeout = 20 * time.Second
+	// RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss.
+	RequestWaitTimeout = 3 * time.Second
+	tickDuration       = 10 * time.Millisecond
+	requestTimeout     = 20 * time.Second
 
 
+	clusterName  = "etcd"
 	basePort     = 21000
 	basePort     = 21000
 	UrlScheme    = "unix"
 	UrlScheme    = "unix"
 	UrlSchemeTLS = "unixs"
 	UrlSchemeTLS = "unixs"