Browse Source

functional/tester: add "Checker", remove compositeChecker

Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
Gyuho Lee 7 years ago
parent
commit
e9c4bad2d1

+ 5 - 2
functional.yaml

@@ -157,7 +157,6 @@ tester-config:
 
 
   round-limit: 1
   round-limit: 1
   exit-on-failure: true
   exit-on-failure: true
-  consistency-check: true
   enable-pprof: true
   enable-pprof: true
 
 
   case-delay-ms: 7000
   case-delay-ms: 7000
@@ -205,7 +204,7 @@ tester-config:
   runner-exec-path: ./bin/etcd-runner
   runner-exec-path: ./bin/etcd-runner
   external-exec-path: ""
   external-exec-path: ""
 
 
-  stress-types:
+  stressers:
   - KV
   - KV
   - LEASE
   - LEASE
   # - ELECTION_RUNNER
   # - ELECTION_RUNNER
@@ -213,6 +212,10 @@ tester-config:
   # - LOCK_RACER_RUNNER
   # - LOCK_RACER_RUNNER
   # - LEASE_RUNNER
   # - LEASE_RUNNER
 
 
+  checkers:
+  - KV_HASH
+  - LEASE_EXPIRE
+
   stress-key-size: 100
   stress-key-size: 100
   stress-key-size-large: 32769
   stress-key-size-large: 32769
   stress-key-suffix-range: 250000
   stress-key-suffix-range: 250000

+ 25 - 0
functional/tester/checker.go

@@ -0,0 +1,25 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import "github.com/coreos/etcd/functional/rpcpb"
+
+// Checker checks cluster consistency.
+type Checker interface {
+	// Type returns the checker type.
+	Type() rpcpb.Checker
+	// Check returns an error if the system fails a consistency check.
+	Check() error
+}

+ 91 - 0
functional/tester/checker_kv_hash.go

@@ -0,0 +1,91 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/coreos/etcd/functional/rpcpb"
+
+	"go.uber.org/zap"
+)
+
+const retries = 7
+
+type hashRevGetter interface {
+	getRevisionHash() (revs map[string]int64, hashes map[string]int64, err error)
+}
+
+type kvHashChecker struct {
+	ctype rpcpb.Checker
+	lg    *zap.Logger
+	hrg   hashRevGetter
+}
+
+func newKVHashChecker(lg *zap.Logger, hrg hashRevGetter) Checker {
+	return &kvHashChecker{
+		ctype: rpcpb.Checker_KV_HASH,
+		lg:    lg,
+		hrg:   hrg,
+	}
+}
+
+func (hc *kvHashChecker) checkRevAndHashes() (err error) {
+	var (
+		revs   map[string]int64
+		hashes map[string]int64
+	)
+	// retries in case of transient failure or etcd cluster has not stablized yet.
+	for i := 0; i < retries; i++ {
+		revs, hashes, err = hc.hrg.getRevisionHash()
+		if err != nil {
+			hc.lg.Warn(
+				"failed to get revision and hash",
+				zap.Int("retries", i),
+				zap.Error(err),
+			)
+		} else {
+			sameRev := getSameValue(revs)
+			sameHashes := getSameValue(hashes)
+			if sameRev && sameHashes {
+				return nil
+			}
+			hc.lg.Warn(
+				"retrying; etcd cluster is not stable",
+				zap.Int("retries", i),
+				zap.Bool("same-revisions", sameRev),
+				zap.Bool("same-hashes", sameHashes),
+				zap.String("revisions", fmt.Sprintf("%+v", revs)),
+				zap.String("hashes", fmt.Sprintf("%+v", hashes)),
+			)
+		}
+		time.Sleep(time.Second)
+	}
+
+	if err != nil {
+		return fmt.Errorf("failed revision and hash check (%v)", err)
+	}
+
+	return fmt.Errorf("etcd cluster is not stable: [revisions: %v] and [hashes: %v]", revs, hashes)
+}
+
+func (hc *kvHashChecker) Type() rpcpb.Checker {
+	return hc.ctype
+}
+
+func (hc *kvHashChecker) Check() error {
+	return hc.checkRevAndHashes()
+}

+ 25 - 117
functional/tester/checks.go → functional/tester/checker_lease_expire.go

@@ -27,83 +27,30 @@ import (
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
 )
 )
 
 
-const retries = 7
-
-// Checker checks cluster consistency.
-type Checker interface {
-	// Check returns an error if the system fails a consistency check.
-	Check() error
-}
-
-type hashAndRevGetter interface {
-	getRevisionHash() (revs map[string]int64, hashes map[string]int64, err error)
+type leaseExpireChecker struct {
+	ctype rpcpb.Checker
+	lg    *zap.Logger
+	m     *rpcpb.Member
+	ls    *leaseStresser
+	cli   *clientv3.Client
 }
 }
 
 
-type hashChecker struct {
-	lg  *zap.Logger
-	hrg hashAndRevGetter
-}
-
-func newHashChecker(lg *zap.Logger, hrg hashAndRevGetter) Checker {
-	return &hashChecker{
-		lg:  lg,
-		hrg: hrg,
+func newLeaseExpireChecker(ls *leaseStresser) Checker {
+	return &leaseExpireChecker{
+		ctype: rpcpb.Checker_LEASE_EXPIRE,
+		lg:    ls.lg,
+		m:     ls.m,
+		ls:    ls,
 	}
 	}
 }
 }
 
 
-const leaseCheckerTimeout = 10 * time.Second
-
-func (hc *hashChecker) checkRevAndHashes() (err error) {
-	var (
-		revs   map[string]int64
-		hashes map[string]int64
-	)
-	// retries in case of transient failure or etcd cluster has not stablized yet.
-	for i := 0; i < retries; i++ {
-		revs, hashes, err = hc.hrg.getRevisionHash()
-		if err != nil {
-			hc.lg.Warn(
-				"failed to get revision and hash",
-				zap.Int("retries", i),
-				zap.Error(err),
-			)
-		} else {
-			sameRev := getSameValue(revs)
-			sameHashes := getSameValue(hashes)
-			if sameRev && sameHashes {
-				return nil
-			}
-			hc.lg.Warn(
-				"retrying; etcd cluster is not stable",
-				zap.Int("retries", i),
-				zap.Bool("same-revisions", sameRev),
-				zap.Bool("same-hashes", sameHashes),
-				zap.String("revisions", fmt.Sprintf("%+v", revs)),
-				zap.String("hashes", fmt.Sprintf("%+v", hashes)),
-			)
-		}
-		time.Sleep(time.Second)
-	}
-
-	if err != nil {
-		return fmt.Errorf("failed revision and hash check (%v)", err)
-	}
-
-	return fmt.Errorf("etcd cluster is not stable: [revisions: %v] and [hashes: %v]", revs, hashes)
-}
+const leaseExpireCheckerTimeout = 10 * time.Second
 
 
-func (hc *hashChecker) Check() error {
-	return hc.checkRevAndHashes()
+func (lc *leaseExpireChecker) Type() rpcpb.Checker {
+	return lc.ctype
 }
 }
 
 
-type leaseChecker struct {
-	lg  *zap.Logger
-	m   *rpcpb.Member
-	ls  *leaseStresser
-	cli *clientv3.Client
-}
-
-func (lc *leaseChecker) Check() error {
+func (lc *leaseExpireChecker) Check() error {
 	if lc.ls == nil {
 	if lc.ls == nil {
 		return nil
 		return nil
 	}
 	}
@@ -135,8 +82,8 @@ func (lc *leaseChecker) Check() error {
 }
 }
 
 
 // checkShortLivedLeases ensures leases expire.
 // checkShortLivedLeases ensures leases expire.
-func (lc *leaseChecker) checkShortLivedLeases() error {
-	ctx, cancel := context.WithTimeout(context.Background(), leaseCheckerTimeout)
+func (lc *leaseExpireChecker) checkShortLivedLeases() error {
+	ctx, cancel := context.WithTimeout(context.Background(), leaseExpireCheckerTimeout)
 	errc := make(chan error)
 	errc := make(chan error)
 	defer cancel()
 	defer cancel()
 	for leaseID := range lc.ls.shortLivedLeases.leases {
 	for leaseID := range lc.ls.shortLivedLeases.leases {
@@ -154,7 +101,7 @@ func (lc *leaseChecker) checkShortLivedLeases() error {
 	return errsToError(errs)
 	return errsToError(errs)
 }
 }
 
 
-func (lc *leaseChecker) checkShortLivedLease(ctx context.Context, leaseID int64) (err error) {
+func (lc *leaseExpireChecker) checkShortLivedLease(ctx context.Context, leaseID int64) (err error) {
 	// retry in case of transient failure or lease is expired but not yet revoked due to the fact that etcd cluster didn't have enought time to delete it.
 	// retry in case of transient failure or lease is expired but not yet revoked due to the fact that etcd cluster didn't have enought time to delete it.
 	var resp *clientv3.LeaseTimeToLiveResponse
 	var resp *clientv3.LeaseTimeToLiveResponse
 	for i := 0; i < retries; i++ {
 	for i := 0; i < retries; i++ {
@@ -199,7 +146,7 @@ func (lc *leaseChecker) checkShortLivedLease(ctx context.Context, leaseID int64)
 	return err
 	return err
 }
 }
 
 
-func (lc *leaseChecker) checkLease(ctx context.Context, expired bool, leaseID int64) error {
+func (lc *leaseExpireChecker) checkLease(ctx context.Context, expired bool, leaseID int64) error {
 	keysExpired, err := lc.hasKeysAttachedToLeaseExpired(ctx, leaseID)
 	keysExpired, err := lc.hasKeysAttachedToLeaseExpired(ctx, leaseID)
 	if err != nil {
 	if err != nil {
 		lc.lg.Warn(
 		lc.lg.Warn(
@@ -227,8 +174,8 @@ func (lc *leaseChecker) checkLease(ctx context.Context, expired bool, leaseID in
 	return nil
 	return nil
 }
 }
 
 
-func (lc *leaseChecker) check(expired bool, leases map[int64]time.Time) error {
-	ctx, cancel := context.WithTimeout(context.Background(), leaseCheckerTimeout)
+func (lc *leaseExpireChecker) check(expired bool, leases map[int64]time.Time) error {
+	ctx, cancel := context.WithTimeout(context.Background(), leaseExpireCheckerTimeout)
 	defer cancel()
 	defer cancel()
 	for leaseID := range leases {
 	for leaseID := range leases {
 		if err := lc.checkLease(ctx, expired, leaseID); err != nil {
 		if err := lc.checkLease(ctx, expired, leaseID); err != nil {
@@ -239,7 +186,7 @@ func (lc *leaseChecker) check(expired bool, leases map[int64]time.Time) error {
 }
 }
 
 
 // TODO: handle failures from "grpc.FailFast(false)"
 // TODO: handle failures from "grpc.FailFast(false)"
-func (lc *leaseChecker) getLeaseByID(ctx context.Context, leaseID int64) (*clientv3.LeaseTimeToLiveResponse, error) {
+func (lc *leaseExpireChecker) getLeaseByID(ctx context.Context, leaseID int64) (*clientv3.LeaseTimeToLiveResponse, error) {
 	return lc.cli.TimeToLive(
 	return lc.cli.TimeToLive(
 		ctx,
 		ctx,
 		clientv3.LeaseID(leaseID),
 		clientv3.LeaseID(leaseID),
@@ -247,7 +194,7 @@ func (lc *leaseChecker) getLeaseByID(ctx context.Context, leaseID int64) (*clien
 	)
 	)
 }
 }
 
 
-func (lc *leaseChecker) hasLeaseExpired(ctx context.Context, leaseID int64) (bool, error) {
+func (lc *leaseExpireChecker) hasLeaseExpired(ctx context.Context, leaseID int64) (bool, error) {
 	// keep retrying until lease's state is known or ctx is being canceled
 	// keep retrying until lease's state is known or ctx is being canceled
 	for ctx.Err() == nil {
 	for ctx.Err() == nil {
 		resp, err := lc.getLeaseByID(ctx, leaseID)
 		resp, err := lc.getLeaseByID(ctx, leaseID)
@@ -272,7 +219,7 @@ func (lc *leaseChecker) hasLeaseExpired(ctx context.Context, leaseID int64) (boo
 // The keys attached to the lease has the format of "<leaseID>_<idx>" where idx is the ordering key creation
 // The keys attached to the lease has the format of "<leaseID>_<idx>" where idx is the ordering key creation
 // Since the format of keys contains about leaseID, finding keys base on "<leaseID>" prefix
 // Since the format of keys contains about leaseID, finding keys base on "<leaseID>" prefix
 // determines whether the attached keys for a given leaseID has been deleted or not
 // determines whether the attached keys for a given leaseID has been deleted or not
-func (lc *leaseChecker) hasKeysAttachedToLeaseExpired(ctx context.Context, leaseID int64) (bool, error) {
+func (lc *leaseExpireChecker) hasKeysAttachedToLeaseExpired(ctx context.Context, leaseID int64) (bool, error) {
 	resp, err := lc.cli.Get(ctx, fmt.Sprintf("%d", leaseID), clientv3.WithPrefix())
 	resp, err := lc.cli.Get(ctx, fmt.Sprintf("%d", leaseID), clientv3.WithPrefix())
 	if err != nil {
 	if err != nil {
 		lc.lg.Warn(
 		lc.lg.Warn(
@@ -285,42 +232,3 @@ func (lc *leaseChecker) hasKeysAttachedToLeaseExpired(ctx context.Context, lease
 	}
 	}
 	return len(resp.Kvs) == 0, nil
 	return len(resp.Kvs) == 0, nil
 }
 }
-
-// compositeChecker implements a checker that runs a slice of Checkers concurrently.
-type compositeChecker struct{ checkers []Checker }
-
-func newCompositeChecker(checkers []Checker) Checker {
-	return &compositeChecker{checkers}
-}
-
-func (cchecker *compositeChecker) Check() error {
-	errc := make(chan error)
-	for _, c := range cchecker.checkers {
-		go func(chk Checker) { errc <- chk.Check() }(c)
-	}
-	var errs []error
-	for range cchecker.checkers {
-		if err := <-errc; err != nil {
-			errs = append(errs, err)
-		}
-	}
-	return errsToError(errs)
-}
-
-type runnerChecker struct {
-	errc chan error
-}
-
-func (rc *runnerChecker) Check() error {
-	select {
-	case err := <-rc.errc:
-		return err
-	default:
-		return nil
-	}
-}
-
-type noChecker struct{}
-
-func newNoChecker() Checker        { return &noChecker{} }
-func (nc *noChecker) Check() error { return nil }

+ 23 - 0
functional/tester/checker_no_check.go

@@ -0,0 +1,23 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import "github.com/coreos/etcd/functional/rpcpb"
+
+type noCheck struct{}
+
+func newNoChecker() Checker             { return &noCheck{} }
+func (nc *noCheck) Type() rpcpb.Checker { return rpcpb.Checker_NO_CHECK }
+func (nc *noCheck) Check() error        { return nil }

+ 42 - 0
functional/tester/checker_runner.go

@@ -0,0 +1,42 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import "github.com/coreos/etcd/functional/rpcpb"
+
+type runnerChecker struct {
+	ctype rpcpb.Checker
+	errc  chan error
+}
+
+func newRunnerChecker(errc chan error) Checker {
+	return &runnerChecker{
+		ctype: rpcpb.Checker_RUNNER,
+		errc:  errc,
+	}
+}
+
+func (rc *runnerChecker) Type() rpcpb.Checker {
+	return rc.ctype
+}
+
+func (rc *runnerChecker) Check() error {
+	select {
+	case err := <-rc.errc:
+		return err
+	default:
+		return nil
+	}
+}

+ 49 - 22
functional/tester/cluster.go

@@ -56,7 +56,7 @@ type Cluster struct {
 
 
 	rateLimiter *rate.Limiter
 	rateLimiter *rate.Limiter
 	stresser    Stresser
 	stresser    Stresser
-	checker     Checker
+	checkers    []Checker
 
 
 	currentRevision int64
 	currentRevision int64
 	rd              int
 	rd              int
@@ -118,7 +118,7 @@ func NewCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
 		int(clus.Tester.StressQPS),
 		int(clus.Tester.StressQPS),
 	)
 	)
 
 
-	clus.updateStresserChecker()
+	clus.setStresserChecker()
 
 
 	return clus, nil
 	return clus, nil
 }
 }
@@ -274,26 +274,49 @@ func (clus *Cluster) UpdateDelayLatencyMs() {
 	}
 	}
 }
 }
 
 
-func (clus *Cluster) updateStresserChecker() {
-	cs := &compositeStresser{}
+func (clus *Cluster) setStresserChecker() {
+	css := &compositeStresser{}
+	lss := []*leaseStresser{}
+	rss := []*runnerStresser{}
 	for _, m := range clus.Members {
 	for _, m := range clus.Members {
-		cs.stressers = append(cs.stressers, newStresser(clus, m))
+		sss := newStresser(clus, m)
+		css.stressers = append(css.stressers, &compositeStresser{sss})
+		for _, s := range sss {
+			if v, ok := s.(*leaseStresser); ok {
+				lss = append(lss, v)
+				clus.lg.Info("added lease stresser", zap.String("endpoint", m.EtcdClientEndpoint))
+			}
+			if v, ok := s.(*runnerStresser); ok {
+				rss = append(rss, v)
+				clus.lg.Info("added lease stresser", zap.String("endpoint", m.EtcdClientEndpoint))
+			}
+		}
 	}
 	}
-	clus.stresser = cs
+	clus.stresser = css
+
+	for _, cs := range clus.Tester.Checkers {
+		switch cs {
+		case "KV_HASH":
+			clus.checkers = append(clus.checkers, newKVHashChecker(clus.lg, hashRevGetter(clus)))
 
 
-	if clus.Tester.ConsistencyCheck {
-		clus.checker = newHashChecker(clus.lg, hashAndRevGetter(clus))
-		if schk := cs.Checker(); schk != nil {
-			clus.checker = newCompositeChecker([]Checker{clus.checker, schk})
+		case "LEASE_EXPIRE":
+			for _, ls := range lss {
+				clus.checkers = append(clus.checkers, newLeaseExpireChecker(ls))
+			}
+
+		case "RUNNER":
+			for _, rs := range rss {
+				clus.checkers = append(clus.checkers, newRunnerChecker(rs.errc))
+			}
+
+		case "NO_CHECK":
+			clus.checkers = append(clus.checkers, newNoChecker())
 		}
 		}
-	} else {
-		clus.checker = newNoChecker()
 	}
 	}
-
 	clus.lg.Info("updated stressers")
 	clus.lg.Info("updated stressers")
 }
 }
 
 
-func (clus *Cluster) checkConsistency() (err error) {
+func (clus *Cluster) runCheckers() (err error) {
 	defer func() {
 	defer func() {
 		if err != nil {
 		if err != nil {
 			return
 			return
@@ -307,15 +330,19 @@ func (clus *Cluster) checkConsistency() (err error) {
 		}
 		}
 	}()
 	}()
 
 
-	if err = clus.checker.Check(); err != nil {
-		clus.lg.Warn(
-			"consistency check FAIL",
-			zap.Int("round", clus.rd),
-			zap.Int("case", clus.cs),
-			zap.Error(err),
-		)
-		return err
+	for _, chk := range clus.checkers {
+		if err = chk.Check(); err != nil {
+			clus.lg.Warn(
+				"consistency check FAIL",
+				zap.String("checker", chk.Type().String()),
+				zap.Int("round", clus.rd),
+				zap.Int("case", clus.cs),
+				zap.Error(err),
+			)
+			return err
+		}
 	}
 	}
+
 	clus.lg.Info(
 	clus.lg.Info(
 		"consistency check ALL PASS",
 		"consistency check ALL PASS",
 		zap.Int("round", clus.rd),
 		zap.Int("round", clus.rd),

+ 8 - 3
functional/tester/cluster_read_config.go

@@ -336,9 +336,14 @@ func read(lg *zap.Logger, fpath string) (*Cluster, error) {
 		}
 		}
 	}
 	}
 
 
-	for _, v := range clus.Tester.StressTypes {
-		if _, ok := rpcpb.StressType_value[v]; !ok {
-			return nil, fmt.Errorf("StressType is unknown; got %q", v)
+	for _, v := range clus.Tester.Stressers {
+		if _, ok := rpcpb.Stresser_value[v]; !ok {
+			return nil, fmt.Errorf("Stresser is unknown; got %q", v)
+		}
+	}
+	for _, v := range clus.Tester.Checkers {
+		if _, ok := rpcpb.Checker_value[v]; !ok {
+			return nil, fmt.Errorf("Checker is unknown; got %q", v)
 		}
 		}
 	}
 	}
 
 

+ 2 - 2
functional/tester/cluster_run.go

@@ -237,7 +237,7 @@ func (clus *Cluster) doRound() error {
 			zap.Int("case-total", len(clus.cases)),
 			zap.Int("case-total", len(clus.cases)),
 			zap.String("desc", fa.Desc()),
 			zap.String("desc", fa.Desc()),
 		)
 		)
-		if err := clus.checkConsistency(); err != nil {
+		if err := clus.runCheckers(); err != nil {
 			return fmt.Errorf("consistency check error (%v)", err)
 			return fmt.Errorf("consistency check error (%v)", err)
 		}
 		}
 
 
@@ -362,6 +362,6 @@ func (clus *Cluster) cleanup() error {
 		return err
 		return err
 	}
 	}
 
 
-	clus.updateStresserChecker()
+	clus.setStresserChecker()
 	return nil
 	return nil
 }
 }

+ 2 - 2
functional/tester/cluster_test.go

@@ -190,7 +190,6 @@ func Test_read(t *testing.T) {
 			UpdatedDelayLatencyMs: 5000,
 			UpdatedDelayLatencyMs: 5000,
 			RoundLimit:            1,
 			RoundLimit:            1,
 			ExitOnCaseFail:        true,
 			ExitOnCaseFail:        true,
-			ConsistencyCheck:      true,
 			EnablePprof:           true,
 			EnablePprof:           true,
 			CaseDelayMs:           7000,
 			CaseDelayMs:           7000,
 			CaseShuffle:           true,
 			CaseShuffle:           true,
@@ -230,7 +229,8 @@ func Test_read(t *testing.T) {
 			FailpointCommands:       []string{`panic("etcd-tester")`},
 			FailpointCommands:       []string{`panic("etcd-tester")`},
 			RunnerExecPath:          "./bin/etcd-runner",
 			RunnerExecPath:          "./bin/etcd-runner",
 			ExternalExecPath:        "",
 			ExternalExecPath:        "",
-			StressTypes:             []string{"KV", "LEASE"},
+			Stressers:               []string{"KV", "LEASE"},
+			Checkers:                []string{"KV_HASH", "LEASE_EXPIRE"},
 			StressKeySize:           100,
 			StressKeySize:           100,
 			StressKeySizeLarge:      32769,
 			StressKeySizeLarge:      32769,
 			StressKeySuffixRange:    250000,
 			StressKeySuffixRange:    250000,

+ 10 - 12
functional/tester/stress.go → functional/tester/stresser.go

@@ -33,14 +33,12 @@ type Stresser interface {
 	Close() map[string]int
 	Close() map[string]int
 	// ModifiedKeys reports the number of keys created and deleted by stresser
 	// ModifiedKeys reports the number of keys created and deleted by stresser
 	ModifiedKeys() int64
 	ModifiedKeys() int64
-	// Checker returns an invariant checker for after the stresser is canceled.
-	Checker() Checker
 }
 }
 
 
 // newStresser creates stresser from a comma separated list of stresser types.
 // newStresser creates stresser from a comma separated list of stresser types.
-func newStresser(clus *Cluster, m *rpcpb.Member) Stresser {
-	stressers := make([]Stresser, len(clus.Tester.StressTypes))
-	for i, stype := range clus.Tester.StressTypes {
+func newStresser(clus *Cluster, m *rpcpb.Member) (stressers []Stresser) {
+	stressers = make([]Stresser, len(clus.Tester.Stressers))
+	for i, stype := range clus.Tester.Stressers {
 		clus.lg.Info(
 		clus.lg.Info(
 			"creating stresser",
 			"creating stresser",
 			zap.String("type", stype),
 			zap.String("type", stype),
@@ -52,7 +50,7 @@ func newStresser(clus *Cluster, m *rpcpb.Member) Stresser {
 			// TODO: Too intensive stressing clients can panic etcd member with
 			// TODO: Too intensive stressing clients can panic etcd member with
 			// 'out of memory' error. Put rate limits in server side.
 			// 'out of memory' error. Put rate limits in server side.
 			stressers[i] = &keyStresser{
 			stressers[i] = &keyStresser{
-				stype:             rpcpb.StressType_KV,
+				stype:             rpcpb.Stresser_KV,
 				lg:                clus.lg,
 				lg:                clus.lg,
 				m:                 m,
 				m:                 m,
 				keySize:           int(clus.Tester.StressKeySize),
 				keySize:           int(clus.Tester.StressKeySize),
@@ -66,7 +64,7 @@ func newStresser(clus *Cluster, m *rpcpb.Member) Stresser {
 
 
 		case "LEASE":
 		case "LEASE":
 			stressers[i] = &leaseStresser{
 			stressers[i] = &leaseStresser{
-				stype:        rpcpb.StressType_LEASE,
+				stype:        rpcpb.Stresser_LEASE,
 				lg:           clus.lg,
 				lg:           clus.lg,
 				m:            m,
 				m:            m,
 				numLeases:    10, // TODO: configurable
 				numLeases:    10, // TODO: configurable
@@ -86,7 +84,7 @@ func newStresser(clus *Cluster, m *rpcpb.Member) Stresser {
 				"--req-rate", fmt.Sprintf("%v", reqRate),
 				"--req-rate", fmt.Sprintf("%v", reqRate),
 			}
 			}
 			stressers[i] = newRunnerStresser(
 			stressers[i] = newRunnerStresser(
-				rpcpb.StressType_ELECTION_RUNNER,
+				rpcpb.Stresser_ELECTION_RUNNER,
 				clus.lg,
 				clus.lg,
 				clus.Tester.RunnerExecPath,
 				clus.Tester.RunnerExecPath,
 				args,
 				args,
@@ -107,7 +105,7 @@ func newStresser(clus *Cluster, m *rpcpb.Member) Stresser {
 				"--req-rate", fmt.Sprintf("%v", reqRate),
 				"--req-rate", fmt.Sprintf("%v", reqRate),
 			}
 			}
 			stressers[i] = newRunnerStresser(
 			stressers[i] = newRunnerStresser(
-				rpcpb.StressType_WATCH_RUNNER,
+				rpcpb.Stresser_WATCH_RUNNER,
 				clus.lg,
 				clus.lg,
 				clus.Tester.RunnerExecPath,
 				clus.Tester.RunnerExecPath,
 				args,
 				args,
@@ -126,7 +124,7 @@ func newStresser(clus *Cluster, m *rpcpb.Member) Stresser {
 				"--req-rate", fmt.Sprintf("%v", reqRate),
 				"--req-rate", fmt.Sprintf("%v", reqRate),
 			}
 			}
 			stressers[i] = newRunnerStresser(
 			stressers[i] = newRunnerStresser(
-				rpcpb.StressType_LOCK_RACER_RUNNER,
+				rpcpb.Stresser_LOCK_RACER_RUNNER,
 				clus.lg,
 				clus.lg,
 				clus.Tester.RunnerExecPath,
 				clus.Tester.RunnerExecPath,
 				args,
 				args,
@@ -141,7 +139,7 @@ func newStresser(clus *Cluster, m *rpcpb.Member) Stresser {
 				"--endpoints", m.EtcdClientEndpoint,
 				"--endpoints", m.EtcdClientEndpoint,
 			}
 			}
 			stressers[i] = newRunnerStresser(
 			stressers[i] = newRunnerStresser(
-				rpcpb.StressType_LEASE_RUNNER,
+				rpcpb.Stresser_LEASE_RUNNER,
 				clus.lg,
 				clus.lg,
 				clus.Tester.RunnerExecPath,
 				clus.Tester.RunnerExecPath,
 				args,
 				args,
@@ -150,5 +148,5 @@ func newStresser(clus *Cluster, m *rpcpb.Member) Stresser {
 			)
 			)
 		}
 		}
 	}
 	}
-	return &compositeStresser{stressers}
+	return stressers
 }
 }

+ 0 - 13
functional/tester/stress_composite.go → functional/tester/stresser_composite.go

@@ -74,16 +74,3 @@ func (cs *compositeStresser) ModifiedKeys() (modifiedKey int64) {
 	}
 	}
 	return modifiedKey
 	return modifiedKey
 }
 }
-
-func (cs *compositeStresser) Checker() Checker {
-	var chks []Checker
-	for _, s := range cs.stressers {
-		if chk := s.Checker(); chk != nil {
-			chks = append(chks, chk)
-		}
-	}
-	if len(chks) == 0 {
-		return nil
-	}
-	return newCompositeChecker(chks)
-}

+ 1 - 3
functional/tester/stress_key.go → functional/tester/stresser_key.go

@@ -35,7 +35,7 @@ import (
 )
 )
 
 
 type keyStresser struct {
 type keyStresser struct {
-	stype rpcpb.StressType
+	stype rpcpb.Stresser
 	lg    *zap.Logger
 	lg    *zap.Logger
 
 
 	m *rpcpb.Member
 	m *rpcpb.Member
@@ -204,8 +204,6 @@ func (s *keyStresser) ModifiedKeys() int64 {
 	return atomic.LoadInt64(&s.atomicModifiedKeys)
 	return atomic.LoadInt64(&s.atomicModifiedKeys)
 }
 }
 
 
-func (s *keyStresser) Checker() Checker { return nil }
-
 type stressFunc func(ctx context.Context) (err error, modifiedKeys int64)
 type stressFunc func(ctx context.Context) (err error, modifiedKeys int64)
 
 
 type stressEntry struct {
 type stressEntry struct {

+ 1 - 5
functional/tester/stress_lease.go → functional/tester/stresser_lease.go

@@ -38,7 +38,7 @@ const (
 )
 )
 
 
 type leaseStresser struct {
 type leaseStresser struct {
-	stype rpcpb.StressType
+	stype rpcpb.Stresser
 	lg    *zap.Logger
 	lg    *zap.Logger
 
 
 	m      *rpcpb.Member
 	m      *rpcpb.Member
@@ -485,7 +485,3 @@ func (ls *leaseStresser) Close() map[string]int {
 func (ls *leaseStresser) ModifiedKeys() int64 {
 func (ls *leaseStresser) ModifiedKeys() int64 {
 	return atomic.LoadInt64(&ls.atomicModifiedKey)
 	return atomic.LoadInt64(&ls.atomicModifiedKey)
 }
 }
-
-func (ls *leaseStresser) Checker() Checker {
-	return &leaseChecker{lg: ls.lg, m: ls.m, ls: ls}
-}

+ 2 - 6
functional/tester/stress_runner.go → functional/tester/stresser_runner.go

@@ -27,7 +27,7 @@ import (
 )
 )
 
 
 type runnerStresser struct {
 type runnerStresser struct {
-	stype rpcpb.StressType
+	stype rpcpb.Stresser
 	lg    *zap.Logger
 	lg    *zap.Logger
 
 
 	cmd     *exec.Cmd
 	cmd     *exec.Cmd
@@ -41,7 +41,7 @@ type runnerStresser struct {
 }
 }
 
 
 func newRunnerStresser(
 func newRunnerStresser(
-	stype rpcpb.StressType,
+	stype rpcpb.Stresser,
 	lg *zap.Logger,
 	lg *zap.Logger,
 	cmdStr string,
 	cmdStr string,
 	args []string,
 	args []string,
@@ -115,7 +115,3 @@ func (rs *runnerStresser) Close() map[string]int {
 func (rs *runnerStresser) ModifiedKeys() int64 {
 func (rs *runnerStresser) ModifiedKeys() int64 {
 	return 1
 	return 1
 }
 }
-
-func (rs *runnerStresser) Checker() Checker {
-	return &runnerChecker{rs.errc}
-}