Bläddra i källkod

integration: test 'inflight' range requests

- Test https://github.com/coreos/etcd/issues/7322.
- Remove test case added in https://github.com/coreos/etcd/pull/6662.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
Gyu-Ho Lee 8 år sedan
förälder
incheckning
472a536052
2 ändrade filer med 36 tillägg och 21 borttagningar
  1. 1 1
      integration/cluster.go
  2. 35 20
      integration/v3_grpc_inflight_test.go

+ 1 - 1
integration/cluster.go

@@ -719,7 +719,7 @@ func (m *member) Close() {
 		m.serverClient = nil
 	}
 	if m.grpcServer != nil {
-		m.grpcServer.Stop()
+		m.grpcServer.GracefulStop()
 		m.grpcServer = nil
 	}
 	m.s.HardStop()

+ 35 - 20
integration/v3_maintenance_test.go → integration/v3_grpc_inflight_test.go

@@ -15,63 +15,78 @@
 package integration
 
 import (
+	"sync"
 	"testing"
 	"time"
 
-	"google.golang.org/grpc"
-
 	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
 	"github.com/coreos/etcd/pkg/testutil"
+
 	"golang.org/x/net/context"
+	"google.golang.org/grpc"
 )
 
-// TestV3MaintenanceHashInflight ensures inflight Hash call
-// to embedded being-stopped EtcdServer does not trigger panic.
-func TestV3MaintenanceHashInflight(t *testing.T) {
+// TestV3MaintenanceDefragmentInflightRange ensures inflight range requests
+// does not panic the mvcc backend while defragment is running.
+func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
 	defer testutil.AfterTest(t)
 	clus := NewClusterV3(t, &ClusterConfig{Size: 1})
 	defer clus.Terminate(t)
 
 	cli := clus.RandClient()
-	mvc := toGRPC(cli).Maintenance
+	kvc := toGRPC(cli).KV
+	if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
+		t.Fatal(err)
+	}
+
 	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
 
 	donec := make(chan struct{})
 	go func() {
 		defer close(donec)
-		mvc.Hash(ctx, &pb.HashRequest{}, grpc.FailFast(false))
+		kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo")})
 	}()
 
-	clus.Members[0].s.HardStop()
+	mvc := toGRPC(cli).Maintenance
+	mvc.Defragment(context.Background(), &pb.DefragmentRequest{})
 	cancel()
 
 	<-donec
 }
 
-// TestV3MaintenanceDefragmentInflightRange ensures inflight range requests
-// does not panic the mvcc backend while defragment is running.
-func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
+// TestV3KVInflightRangeRequests ensures that inflight requests
+// (sent before server shutdown) are gracefully handled by server-side.
+// They are either finished or canceled, but never crash the backend.
+// See https://github.com/coreos/etcd/issues/7322 for more detail.
+func TestV3KVInflightRangeRequests(t *testing.T) {
 	defer testutil.AfterTest(t)
 	clus := NewClusterV3(t, &ClusterConfig{Size: 1})
 	defer clus.Terminate(t)
 
 	cli := clus.RandClient()
 	kvc := toGRPC(cli).KV
+
 	if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
 		t.Fatal(err)
 	}
 
-	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
 
-	donec := make(chan struct{})
-	go func() {
-		defer close(donec)
-		kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo")})
-	}()
+	reqN := 10 // use 500+ for fast machine
+	var wg sync.WaitGroup
+	wg.Add(reqN)
+	for i := 0; i < reqN; i++ {
+		go func() {
+			defer wg.Done()
+			_, err := kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo"), Serializable: true}, grpc.FailFast(false))
+			if err != nil && grpc.ErrorDesc(err) != context.Canceled.Error() {
+				t.Fatalf("inflight request should be canceld with %v, got %v", context.Canceled, err)
+			}
+		}()
+	}
 
-	mvc := toGRPC(cli).Maintenance
-	mvc.Defragment(context.Background(), &pb.DefragmentRequest{})
+	clus.Members[0].Stop(t)
 	cancel()
 
-	<-donec
+	wg.Wait()
 }