Browse Source

integration: test with new server errors

Gyu-Ho Lee 9 năm trước cách đây
mục cha
commit
ec1fdd3938

+ 22 - 10
integration/cluster.go

@@ -196,8 +196,13 @@ func (c *cluster) HTTPMembers() []client.Member {
 }
 
 func (c *cluster) mustNewMember(t *testing.T) *member {
-	name := c.name(rand.Int())
-	m := mustNewMember(t, name, c.cfg.PeerTLS, c.cfg.ClientTLS, c.cfg.QuotaBackendBytes)
+	m := mustNewMember(t,
+		memberConfig{
+			name:              c.name(rand.Int()),
+			peerTLS:           c.cfg.PeerTLS,
+			clientTLS:         c.cfg.ClientTLS,
+			quotaBackendBytes: c.cfg.QuotaBackendBytes,
+		})
 	m.DiscoveryURL = c.cfg.DiscoveryURL
 	if c.cfg.UseGRPC {
 		if err := m.listenGRPC(); err != nil {
@@ -416,17 +421,24 @@ type member struct {
 	grpcAddr   string
 }
 
+type memberConfig struct {
+	name              string
+	peerTLS           *transport.TLSInfo
+	clientTLS         *transport.TLSInfo
+	quotaBackendBytes int64
+}
+
 // mustNewMember return an inited member with the given name. If peerTLS is
 // set, it will use https scheme to communicate between peers.
-func mustNewMember(t *testing.T, name string, peerTLS *transport.TLSInfo, clientTLS *transport.TLSInfo, quotaBackendBytes int64) *member {
+func mustNewMember(t *testing.T, mcfg memberConfig) *member {
 	var err error
 	m := &member{}
 
 	peerScheme, clientScheme := "http", "http"
-	if peerTLS != nil {
+	if mcfg.peerTLS != nil {
 		peerScheme = "https"
 	}
-	if clientTLS != nil {
+	if mcfg.clientTLS != nil {
 		clientScheme = "https"
 	}
 
@@ -436,7 +448,7 @@ func mustNewMember(t *testing.T, name string, peerTLS *transport.TLSInfo, client
 	if err != nil {
 		t.Fatal(err)
 	}
-	m.PeerTLSInfo = peerTLS
+	m.PeerTLSInfo = mcfg.peerTLS
 
 	cln := newLocalListener(t)
 	m.ClientListeners = []net.Listener{cln}
@@ -444,15 +456,15 @@ func mustNewMember(t *testing.T, name string, peerTLS *transport.TLSInfo, client
 	if err != nil {
 		t.Fatal(err)
 	}
-	m.ClientTLSInfo = clientTLS
+	m.ClientTLSInfo = mcfg.clientTLS
 
-	m.Name = name
+	m.Name = mcfg.name
 
 	m.DataDir, err = ioutil.TempDir(os.TempDir(), "etcd")
 	if err != nil {
 		t.Fatal(err)
 	}
-	clusterStr := fmt.Sprintf("%s=%s://%s", name, peerScheme, pln.Addr().String())
+	clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.name, peerScheme, pln.Addr().String())
 	m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
 	if err != nil {
 		t.Fatal(err)
@@ -465,7 +477,7 @@ func mustNewMember(t *testing.T, name string, peerTLS *transport.TLSInfo, client
 	}
 	m.ElectionTicks = electionTicks
 	m.TickMs = uint(tickDuration / time.Millisecond)
-	m.QuotaBackendBytes = quotaBackendBytes
+	m.QuotaBackendBytes = mcfg.quotaBackendBytes
 	return m
 }
 

+ 1 - 1
integration/member_test.go

@@ -84,7 +84,7 @@ func TestLaunchDuplicateMemberShouldFail(t *testing.T) {
 
 func TestSnapshotAndRestartMember(t *testing.T) {
 	defer testutil.AfterTest(t)
-	m := mustNewMember(t, "snapAndRestartTest", nil, nil)
+	m := mustNewMember(t, memberConfig{name: "snapAndRestartTest"})
 	m.SnapCount = 100
 	m.Launch()
 	defer m.Terminate(t)

+ 1 - 1
integration/migration_test.go

@@ -23,7 +23,7 @@ import (
 
 func TestUpgradeMember(t *testing.T) {
 	defer testutil.AfterTest(t)
-	m := mustNewMember(t, "integration046", nil, nil)
+	m := mustNewMember(t, memberConfig{name: "integration046"})
 	cmd := exec.Command("cp", "-r", "testdata/integration046_data/conf", "testdata/integration046_data/log", "testdata/integration046_data/snapshot", m.DataDir)
 	err := cmd.Run()
 	if err != nil {

+ 17 - 17
integration/v3_grpc_test.go

@@ -193,8 +193,8 @@ func TestV3TxnTooManyOps(t *testing.T) {
 		}
 
 		_, err := kvc.Txn(context.Background(), txn)
-		if err != rpctypes.ErrTooManyOps {
-			t.Errorf("#%d: err = %v, want %v", i, err, rpctypes.ErrTooManyOps)
+		if err != rpctypes.ErrGRPCTooManyOps {
+			t.Errorf("#%d: err = %v, want %v", i, err, rpctypes.ErrGRPCTooManyOps)
 		}
 	}
 }
@@ -233,17 +233,17 @@ func TestV3TxnDuplicateKeys(t *testing.T) {
 		{
 			txnSuccess: []*pb.RequestUnion{putreq, putreq},
 
-			werr: rpctypes.ErrDuplicateKey,
+			werr: rpctypes.ErrGRPCDuplicateKey,
 		},
 		{
 			txnSuccess: []*pb.RequestUnion{putreq, delKeyReq},
 
-			werr: rpctypes.ErrDuplicateKey,
+			werr: rpctypes.ErrGRPCDuplicateKey,
 		},
 		{
 			txnSuccess: []*pb.RequestUnion{putreq, delInRangeReq},
 
-			werr: rpctypes.ErrDuplicateKey,
+			werr: rpctypes.ErrGRPCDuplicateKey,
 		},
 		{
 			txnSuccess: []*pb.RequestUnion{delKeyReq, delInRangeReq, delKeyReq, delInRangeReq},
@@ -502,15 +502,15 @@ func TestV3TxnInvaildRange(t *testing.T) {
 		Request: &pb.RequestUnion_RequestRange{
 			RequestRange: rreq}})
 
-	if _, err := kvc.Txn(context.TODO(), txn); err != rpctypes.ErrFutureRev {
-		t.Errorf("err = %v, want %v", err, rpctypes.ErrFutureRev)
+	if _, err := kvc.Txn(context.TODO(), txn); err != rpctypes.ErrGRPCFutureRev {
+		t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCFutureRev)
 	}
 
 	// compacted rev
 	tv, _ := txn.Success[1].Request.(*pb.RequestUnion_RequestRange)
 	tv.RequestRange.Revision = 1
-	if _, err := kvc.Txn(context.TODO(), txn); err != rpctypes.ErrCompacted {
-		t.Errorf("err = %v, want %v", err, rpctypes.ErrCompacted)
+	if _, err := kvc.Txn(context.TODO(), txn); err != rpctypes.ErrGRPCCompacted {
+		t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCCompacted)
 	}
 }
 
@@ -527,8 +527,8 @@ func TestV3TooLargeRequest(t *testing.T) {
 	preq := &pb.PutRequest{Key: []byte("foo"), Value: largeV}
 
 	_, err := kvc.Put(context.Background(), preq)
-	if err != rpctypes.ErrRequestTooLarge {
-		t.Errorf("err = %v, want %v", err, rpctypes.ErrRequestTooLarge)
+	if err != rpctypes.ErrGRPCRequestTooLarge {
+		t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCRequestTooLarge)
 	}
 }
 
@@ -581,8 +581,8 @@ func TestV3StorageQuotaAPI(t *testing.T) {
 	// test big put
 	bigbuf := make([]byte, 64*1024)
 	_, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf})
-	if err == nil || err != rpctypes.ErrNoSpace {
-		t.Fatalf("big put got %v, expected %v", err, rpctypes.ErrNoSpace)
+	if err == nil || err != rpctypes.ErrGRPCNoSpace {
+		t.Fatalf("big put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace)
 	}
 
 	// test big txn
@@ -597,8 +597,8 @@ func TestV3StorageQuotaAPI(t *testing.T) {
 	txnreq := &pb.TxnRequest{}
 	txnreq.Success = append(txnreq.Success, puttxn)
 	_, txnerr := kvc.Txn(context.TODO(), txnreq)
-	if txnerr == nil || err != rpctypes.ErrNoSpace {
-		t.Fatalf("big txn got %v, expected %v", err, rpctypes.ErrNoSpace)
+	if txnerr == nil || err != rpctypes.ErrGRPCNoSpace {
+		t.Fatalf("big txn got %v, expected %v", err, rpctypes.ErrGRPCNoSpace)
 	}
 }
 
@@ -696,8 +696,8 @@ func TestV3AlarmDeactivate(t *testing.T) {
 	key := []byte("abc")
 	smallbuf := make([]byte, 512)
 	_, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf})
-	if err == nil && err != rpctypes.ErrNoSpace {
-		t.Fatalf("put got %v, expected %v", err, rpctypes.ErrNoSpace)
+	if err == nil && err != rpctypes.ErrGRPCNoSpace {
+		t.Fatalf("put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace)
 	}
 
 	alarmReq.Action = pb.AlarmRequest_DEACTIVATE

+ 4 - 4
integration/v3_lease_test.go

@@ -106,7 +106,7 @@ func TestV3LeaseGrantByID(t *testing.T) {
 	lresp, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
 		context.TODO(),
 		&pb.LeaseGrantRequest{ID: 1, TTL: 1})
-	if err != rpctypes.ErrLeaseExist {
+	if err != rpctypes.ErrGRPCLeaseExist {
 		t.Error(err)
 	}
 
@@ -242,8 +242,8 @@ func TestV3PutOnNonExistLease(t *testing.T) {
 	badLeaseID := int64(0x12345678)
 	putr := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: badLeaseID}
 	_, err := toGRPC(clus.RandClient()).KV.Put(ctx, putr)
-	if err != rpctypes.ErrLeaseNotFound {
-		t.Errorf("err = %v, want %v", err, rpctypes.ErrCompacted)
+	if err != rpctypes.ErrGRPCLeaseNotFound {
+		t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCCompacted)
 	}
 }
 
@@ -424,7 +424,7 @@ func leaseExist(t *testing.T, clus *ClusterV3, leaseID int64) bool {
 		return false
 	}
 
-	if err == rpctypes.ErrLeaseExist {
+	if err == rpctypes.ErrGRPCLeaseExist {
 		return true
 	}
 	t.Fatalf("unexpecter error %v", err)