|
|
@@ -438,12 +438,15 @@ func TestKVGetErrConnClosed(t *testing.T) {
|
|
|
|
|
|
cli := clus.Client(0)
|
|
|
|
|
|
+ // TODO: Remove wait once the new grpc load balancer provides retry.
|
|
|
+ integration.WaitClientV3(t, cli)
|
|
|
+
|
|
|
donec := make(chan struct{})
|
|
|
go func() {
|
|
|
defer close(donec)
|
|
|
_, err := cli.Get(context.TODO(), "foo")
|
|
|
- if err != nil && err != context.Canceled && err != grpc.ErrClientConnClosing {
|
|
|
- t.Fatalf("expected %v or %v, got %v", context.Canceled, grpc.ErrClientConnClosing, err)
|
|
|
+ if err != nil && err != context.Canceled && err != grpc.ErrClientConnClosing && !isServerUnavailable(err) {
|
|
|
+ t.Fatalf("expected %v, %v or server unavailable, got %v", context.Canceled, grpc.ErrClientConnClosing, err)
|
|
|
}
|
|
|
}()
|
|
|
|
|
|
@@ -686,6 +689,8 @@ func TestKVGetRetry(t *testing.T) {
|
|
|
|
|
|
donec := make(chan struct{})
|
|
|
go func() {
|
|
|
+ // TODO: Remove wait once the new grpc load balancer provides retry.
|
|
|
+ integration.WaitClientV3(t, kv)
|
|
|
// Get will fail, but reconnect will trigger
|
|
|
gresp, gerr := kv.Get(ctx, "foo")
|
|
|
if gerr != nil {
|
|
|
@@ -711,7 +716,7 @@ func TestKVGetRetry(t *testing.T) {
|
|
|
clus.Members[fIdx].WaitOK(t)
|
|
|
|
|
|
select {
|
|
|
- case <-time.After(5 * time.Second):
|
|
|
+ case <-time.After(20 * time.Second):
|
|
|
t.Fatalf("timed out waiting for get")
|
|
|
case <-donec:
|
|
|
}
|
|
|
@@ -736,6 +741,8 @@ func TestKVPutFailGetRetry(t *testing.T) {
|
|
|
|
|
|
donec := make(chan struct{})
|
|
|
go func() {
|
|
|
+ // TODO: Remove wait once the new grpc load balancer provides retry.
|
|
|
+ integration.WaitClientV3(t, kv)
|
|
|
// Get will fail, but reconnect will trigger
|
|
|
gresp, gerr := kv.Get(context.TODO(), "foo")
|
|
|
if gerr != nil {
|
|
|
@@ -751,7 +758,7 @@ func TestKVPutFailGetRetry(t *testing.T) {
|
|
|
clus.Members[0].Restart(t)
|
|
|
|
|
|
select {
|
|
|
- case <-time.After(5 * time.Second):
|
|
|
+ case <-time.After(20 * time.Second):
|
|
|
t.Fatalf("timed out waiting for get")
|
|
|
case <-donec:
|
|
|
}
|
|
|
@@ -793,7 +800,7 @@ func TestKVGetStoppedServerAndClose(t *testing.T) {
|
|
|
// this Get fails and triggers an asynchronous connection retry
|
|
|
_, err := cli.Get(ctx, "abc")
|
|
|
cancel()
|
|
|
- if err != nil && !isServerUnavailable(err) {
|
|
|
+ if err != nil && !(isServerUnavailable(err) || isCanceled(err) || isClientTimeout(err)) {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
}
|
|
|
@@ -815,15 +822,15 @@ func TestKVPutStoppedServerAndClose(t *testing.T) {
|
|
|
// grpc finds out the original connection is down due to the member shutdown.
|
|
|
_, err := cli.Get(ctx, "abc")
|
|
|
cancel()
|
|
|
- if err != nil && !isServerUnavailable(err) {
|
|
|
+ if err != nil && !(isServerUnavailable(err) || isCanceled(err) || isClientTimeout(err)) {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
|
|
|
- ctx, cancel = context.WithTimeout(context.TODO(), time.Second) // TODO: How was this test not consistently failing with context canceled errors?
|
|
|
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
|
|
|
// this Put fails and triggers an asynchronous connection retry
|
|
|
_, err = cli.Put(ctx, "abc", "123")
|
|
|
cancel()
|
|
|
- if err != nil && !isServerUnavailable(err) {
|
|
|
+ if err != nil && !(isServerUnavailable(err) || isCanceled(err) || isClientTimeout(err)) {
|
|
|
t.Fatal(err)
|
|
|
}
|
|
|
}
|