Browse Source

Merge pull request #9513 from gyuho/functional-tester

functional-tester: refactor agent/tester
Gyuho Lee 7 years ago
parent
commit
93fec5ef54
100 changed files with 10545 additions and 3020 deletions
  1. 11 9
      Dockerfile-functional-tester
  2. 26 1
      Gopkg.lock
  3. 27 0
      bill-of-materials.json
  4. 3 3
      build
  5. 8 8
      hack/scripts-dev/Makefile
  6. 210 63
      pkg/transport/proxy.go
  7. 12 9
      pkg/transport/proxy_test.go
  8. 5 17
      test
  9. 2 2
      tools/etcd-test-proxy/main.go
  10. 1 10
      tools/functional-tester/README.md
  11. 3 3
      tools/functional-tester/agent/doc.go
  12. 435 0
      tools/functional-tester/agent/handler.go
  13. 166 0
      tools/functional-tester/agent/server.go
  14. 110 0
      tools/functional-tester/agent/utils.go
  15. 36 0
      tools/functional-tester/agent/utils_test.go
  16. 3 4
      tools/functional-tester/build
  17. 46 0
      tools/functional-tester/cmd/etcd-agent/main.go
  18. 6 0
      tools/functional-tester/cmd/etcd-runner/main.go
  19. 59 0
      tools/functional-tester/cmd/etcd-tester/main.go
  20. 0 372
      tools/functional-tester/etcd-agent/agent.go
  21. 0 87
      tools/functional-tester/etcd-agent/agent_test.go
  22. 0 118
      tools/functional-tester/etcd-agent/client/client.go
  23. 0 47
      tools/functional-tester/etcd-agent/main.go
  24. 0 131
      tools/functional-tester/etcd-agent/rpc.go
  25. 0 166
      tools/functional-tester/etcd-agent/rpc_test.go
  26. 0 261
      tools/functional-tester/etcd-tester/cluster.go
  27. 0 16
      tools/functional-tester/etcd-tester/doc.go
  28. 0 205
      tools/functional-tester/etcd-tester/failure.go
  29. 0 177
      tools/functional-tester/etcd-tester/failure_agent.go
  30. 0 44
      tools/functional-tester/etcd-tester/http.go
  31. 0 232
      tools/functional-tester/etcd-tester/main.go
  32. 0 190
      tools/functional-tester/etcd-tester/member.go
  33. 0 57
      tools/functional-tester/etcd-tester/status.go
  34. 0 218
      tools/functional-tester/etcd-tester/stresser.go
  35. 0 286
      tools/functional-tester/etcd-tester/tester.go
  36. 0 117
      tools/functional-tester/etcd-tester/v2_stresser.go
  37. 76 0
      tools/functional-tester/rpcpb/etcd_config.go
  38. 59 0
      tools/functional-tester/rpcpb/etcd_config_test.go
  39. 158 0
      tools/functional-tester/rpcpb/member.go
  40. 2862 0
      tools/functional-tester/rpcpb/rpc.pb.go
  41. 187 0
      tools/functional-tester/rpcpb/rpc.proto
  42. 1 1
      tools/functional-tester/runner/election_command.go
  43. 1 1
      tools/functional-tester/runner/error.go
  44. 1 1
      tools/functional-tester/runner/global.go
  45. 1 1
      tools/functional-tester/runner/help.go
  46. 1 1
      tools/functional-tester/runner/lease_renewer_command.go
  47. 1 1
      tools/functional-tester/runner/lock_racer_command.go
  48. 2 2
      tools/functional-tester/runner/root.go
  49. 1 1
      tools/functional-tester/runner/watch_command.go
  50. 4 20
      tools/functional-tester/scripts/docker-local-agent.sh
  51. 2 49
      tools/functional-tester/scripts/docker-local-tester.sh
  52. 28 0
      tools/functional-tester/scripts/genproto.sh
  53. 72 18
      tools/functional-tester/tester/checks.go
  54. 728 0
      tools/functional-tester/tester/cluster.go
  55. 162 0
      tools/functional-tester/tester/cluster_test.go
  56. 3 3
      tools/functional-tester/tester/doc.go
  57. 30 0
      tools/functional-tester/tester/failure.go
  58. 49 0
      tools/functional-tester/tester/failure_case_blackhole.go
  59. 41 0
      tools/functional-tester/tester/failure_case_delay.go
  60. 44 0
      tools/functional-tester/tester/failure_case_external.go
  61. 20 21
      tools/functional-tester/tester/failure_case_failpoints.go
  62. 210 0
      tools/functional-tester/tester/failure_case_kill.go
  63. 10 6
      tools/functional-tester/tester/failure_case_no_op.go
  64. 85 0
      tools/functional-tester/tester/failure_case_slow_network.go
  65. 126 0
      tools/functional-tester/tester/local-test.yaml
  66. 3 5
      tools/functional-tester/tester/metrics.go
  67. 202 0
      tools/functional-tester/tester/stress.go
  68. 20 7
      tools/functional-tester/tester/stress_key.go
  69. 128 25
      tools/functional-tester/tester/stress_lease.go
  70. 2 2
      tools/functional-tester/tester/stress_runner.go
  71. 274 0
      tools/functional-tester/tester/tester.go
  72. 30 2
      tools/functional-tester/tester/utils.go
  73. 19 0
      vendor/go.uber.org/atomic/LICENSE.txt
  74. 309 0
      vendor/go.uber.org/atomic/atomic.go
  75. 49 0
      vendor/go.uber.org/atomic/string.go
  76. 19 0
      vendor/go.uber.org/multierr/LICENSE.txt
  77. 401 0
      vendor/go.uber.org/multierr/error.go
  78. 19 0
      vendor/go.uber.org/zap/LICENSE.txt
  79. 320 0
      vendor/go.uber.org/zap/array.go
  80. 106 0
      vendor/go.uber.org/zap/buffer/buffer.go
  81. 49 0
      vendor/go.uber.org/zap/buffer/pool.go
  82. 243 0
      vendor/go.uber.org/zap/config.go
  83. 113 0
      vendor/go.uber.org/zap/doc.go
  84. 75 0
      vendor/go.uber.org/zap/encoder.go
  85. 80 0
      vendor/go.uber.org/zap/error.go
  86. 306 0
      vendor/go.uber.org/zap/field.go
  87. 39 0
      vendor/go.uber.org/zap/flag.go
  88. 139 0
      vendor/go.uber.org/zap/global.go
  89. 81 0
      vendor/go.uber.org/zap/http_handler.go
  90. 31 0
      vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
  91. 44 0
      vendor/go.uber.org/zap/internal/color/color.go
  92. 64 0
      vendor/go.uber.org/zap/internal/exit/exit.go
  93. 132 0
      vendor/go.uber.org/zap/level.go
  94. 305 0
      vendor/go.uber.org/zap/logger.go
  95. 109 0
      vendor/go.uber.org/zap/options.go
  96. 126 0
      vendor/go.uber.org/zap/stacktrace.go
  97. 304 0
      vendor/go.uber.org/zap/sugar.go
  98. 27 0
      vendor/go.uber.org/zap/time.go
  99. 96 0
      vendor/go.uber.org/zap/writer.go
  100. 147 0
      vendor/go.uber.org/zap/zapcore/console_encoder.go

+ 11 - 9
Dockerfile-functional-tester

@@ -22,19 +22,21 @@ RUN rm -rf ${GOROOT} \
 
 RUN mkdir -p ${GOPATH}/src/github.com/coreos/etcd
 ADD . ${GOPATH}/src/github.com/coreos/etcd
+ADD ./tools/functional-tester/tester/local-test.yaml /local-test.yaml
 
 RUN go get -v github.com/coreos/gofail \
   && pushd ${GOPATH}/src/github.com/coreos/etcd \
   && GO_BUILD_FLAGS="-v" ./build \
-  && cp ./bin/etcd /etcd \
-  && cp ./bin/etcdctl /etcdctl \
+  && mkdir -p /bin \
+  && cp ./bin/etcd /bin/etcd \
+  && cp ./bin/etcdctl /bin/etcdctl \
   && GO_BUILD_FLAGS="-v" FAILPOINTS=1 ./build \
-  && cp ./bin/etcd /etcd-failpoints \
+  && cp ./bin/etcd /bin/etcd-failpoints \
   && ./tools/functional-tester/build \
-  && cp ./bin/etcd-agent /etcd-agent \
-  && cp ./bin/etcd-tester /etcd-tester \
-  && cp ./bin/etcd-runner /etcd-runner \
-  && go build -v -o /benchmark ./tools/benchmark \
-  && go build -v -o /etcd-test-proxy ./tools/etcd-test-proxy \
+  && cp ./bin/etcd-agent /bin/etcd-agent \
+  && cp ./bin/etcd-tester /bin/etcd-tester \
+  && cp ./bin/etcd-runner /bin/etcd-runner \
+  && go build -v -o /bin/benchmark ./tools/benchmark \
+  && go build -v -o /bin/etcd-test-proxy ./tools/etcd-test-proxy \
   && popd \
-  && rm -rf ${GOPATH}/src/github.com/coreos/etcd
+  && rm -rf ${GOPATH}/src/github.com/coreos/etcd

+ 26 - 1
Gopkg.lock

@@ -239,6 +239,31 @@
   revision = "07dd2e8dfe18522e9c447ba95f2fe95262f63bb2"
   version = "0.0.1"
 
+[[projects]]
+  name = "go.uber.org/atomic"
+  packages = ["."]
+  revision = "8474b86a5a6f79c443ce4b2992817ff32cf208b8"
+  version = "v1.3.1"
+
+[[projects]]
+  name = "go.uber.org/multierr"
+  packages = ["."]
+  revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
+  version = "v1.1.0"
+
+[[projects]]
+  name = "go.uber.org/zap"
+  packages = [
+    ".",
+    "buffer",
+    "internal/bufferpool",
+    "internal/color",
+    "internal/exit",
+    "zapcore"
+  ]
+  revision = "35aad584952c3e7020db7b839f6b102de6271f89"
+  version = "v1.7.1"
+
 [[projects]]
   name = "golang.org/x/crypto"
   packages = [
@@ -334,6 +359,6 @@
 [solve-meta]
   analyzer-name = "dep"
   analyzer-version = 1
-  inputs-digest = "54d78c9d66910169e10670b6e078ade50428e2019bd28a5987470fb2da185e26"
+  inputs-digest = "8d19d25de42fad79e4e417d847283a0057b08e2cacbf556d1da5dbcf8b81d0a9"
   solver-name = "gps-cdcl"
   solver-version = 1

+ 27 - 0
bill-of-materials.json

@@ -336,6 +336,33 @@
 			}
 		]
 	},
+	{
+		"project": "go.uber.org/atomic",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.9891304347826086
+			}
+		]
+	},
+	{
+		"project": "go.uber.org/multierr",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.9891304347826086
+			}
+		]
+	},
+	{
+		"project": "go.uber.org/zap",
+		"licenses": [
+			{
+				"type": "MIT License",
+				"confidence": 0.9891304347826086
+			}
+		]
+	},
 	{
 		"project": "golang.org/x/crypto",
 		"licenses": [

+ 3 - 3
build

@@ -70,9 +70,9 @@ tools_build() {
 	tools_path="benchmark
 	etcd-dump-db
 	etcd-dump-logs
-	functional-tester/etcd-agent
-	functional-tester/etcd-tester
-	functional-tester/etcd-runner
+	functional-tester/cmd/etcd-agent
+	functional-tester/cmd/etcd-tester
+	functional-tester/cmd/etcd-runner
 	local-tester/bridge"
 	for tool in ${tools_path}
 	do

+ 8 - 8
hack/scripts-dev/Makefile

@@ -479,14 +479,14 @@ build-docker-functional-tester:
 	docker run \
 	  --rm \
 	  gcr.io/etcd-development/etcd-functional-tester:go$(GO_VERSION) \
-	  /bin/bash -c "/etcd --version && \
-	   /etcd-failpoints --version && \
-	   ETCDCTL_API=3 /etcdctl version && \
-	   /etcd-agent -help || true && \
-	   /etcd-tester -help || true && \
-	   /etcd-runner --help || true && \
-	   /benchmark --help || true && \
-	   /etcd-test-proxy -help || true"
+	  /bin/bash -c "./bin/etcd --version && \
+	   ./bin/etcd-failpoints --version && \
+	   ETCDCTL_API=3 ./bin/etcdctl version && \
+	   ./bin/etcd-agent -help || true && \
+	   ./bin/etcd-tester -help || true && \
+	   ./bin/etcd-runner --help || true && \
+	   ./bin/benchmark --help || true && \
+	   ./bin/etcd-test-proxy -help || true"
 
 push-docker-functional-tester:
 	$(info GO_VERSION: $(GO_VERSION))

+ 210 - 63
pkg/transport/proxy.go

@@ -21,13 +21,13 @@ import (
 	"net"
 	"net/http"
 	"net/url"
-	"os"
 	"strings"
 	"sync"
 	"time"
 
+	"go.uber.org/zap"
+
 	humanize "github.com/dustin/go-humanize"
-	"google.golang.org/grpc/grpclog"
 )
 
 // Proxy defines proxy layer that simulates common network faults,
@@ -102,12 +102,13 @@ type Proxy interface {
 }
 
 type proxy struct {
+	logger *zap.Logger
+
 	from, to      url.URL
 	tlsInfo       TLSInfo
 	dialTimeout   time.Duration
 	bufferSize    int
 	retryInterval time.Duration
-	logger        grpclog.LoggerV2
 
 	readyc chan struct{}
 	donec  chan struct{}
@@ -143,33 +144,42 @@ type proxy struct {
 
 // ProxyConfig defines proxy configuration.
 type ProxyConfig struct {
+	Logger        *zap.Logger
 	From          url.URL
 	To            url.URL
 	TLSInfo       TLSInfo
 	DialTimeout   time.Duration
 	BufferSize    int
 	RetryInterval time.Duration
-	Logger        grpclog.LoggerV2
 }
 
 var (
 	defaultDialTimeout   = 3 * time.Second
 	defaultBufferSize    = 48 * 1024
 	defaultRetryInterval = 10 * time.Millisecond
-	defaultLogger        = grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 0)
+	defaultLogger        *zap.Logger
 )
 
+func init() {
+	var err error
+	defaultLogger, err = zap.NewProduction()
+	if err != nil {
+		panic(err)
+	}
+}
+
 // NewProxy returns a proxy implementation with no iptables/tc dependencies.
 // The proxy layer overhead is <1ms.
 func NewProxy(cfg ProxyConfig) Proxy {
 	p := &proxy{
+		logger: cfg.Logger,
+
 		from:          cfg.From,
 		to:            cfg.To,
 		tlsInfo:       cfg.TLSInfo,
 		dialTimeout:   cfg.DialTimeout,
 		bufferSize:    cfg.BufferSize,
 		retryInterval: cfg.RetryInterval,
-		logger:        cfg.Logger,
 
 		readyc: make(chan struct{}),
 		donec:  make(chan struct{}),
@@ -220,7 +230,8 @@ func NewProxy(cfg ProxyConfig) Proxy {
 
 	p.closeWg.Add(1)
 	go p.listenAndServe()
-	p.logger.Infof("started proxying [%s -> %s]", p.From(), p.To())
+
+	p.logger.Info("started proxying", zap.String("from", p.From()), zap.String("to", p.To()))
 	return p
 }
 
@@ -240,7 +251,7 @@ func (p *proxy) To() string {
 func (p *proxy) listenAndServe() {
 	defer p.closeWg.Done()
 
-	p.logger.Infof("listen %q", p.From())
+	p.logger.Info("proxy is listening on", zap.String("from", p.From()))
 	close(p.readyc)
 
 	for {
@@ -280,9 +291,7 @@ func (p *proxy) listenAndServe() {
 			case <-p.donec:
 				return
 			}
-			if p.logger.V(5) {
-				p.logger.Errorf("listener accept error %q", err.Error())
-			}
+			p.logger.Debug("listener accept error", zap.Error(err))
 
 			if strings.HasSuffix(err.Error(), "use of closed network connection") {
 				select {
@@ -290,9 +299,7 @@ func (p *proxy) listenAndServe() {
 				case <-p.donec:
 					return
 				}
-				if p.logger.V(5) {
-					p.logger.Errorf("listener is closed; retry listen %q", p.From())
-				}
+				p.logger.Debug("listener is closed; retry listening on", zap.String("from", p.From()))
 
 				if err = p.ResetListener(); err != nil {
 					select {
@@ -305,7 +312,7 @@ func (p *proxy) listenAndServe() {
 					case <-p.donec:
 						return
 					}
-					p.logger.Errorf("failed to reset listener %q", err.Error())
+					p.logger.Warn("failed to reset listener", zap.Error(err))
 				}
 			}
 
@@ -344,9 +351,7 @@ func (p *proxy) listenAndServe() {
 			case <-p.donec:
 				return
 			}
-			if p.logger.V(5) {
-				p.logger.Errorf("dial error %q", err.Error())
-			}
+			p.logger.Debug("failed to dial", zap.Error(err))
 			continue
 		}
 
@@ -392,9 +397,7 @@ func (p *proxy) ioCopy(dst io.Writer, src io.Reader, proxySend bool) {
 			case <-p.donec:
 				return
 			}
-			if p.logger.V(5) {
-				p.logger.Errorf("read error %q", err.Error())
-			}
+			p.logger.Debug("failed to read", zap.Error(err))
 			return
 		}
 		if nr == 0 {
@@ -429,12 +432,20 @@ func (p *proxy) ioCopy(dst io.Writer, src io.Reader, proxySend bool) {
 		default:
 		}
 		if blackholed {
-			if p.logger.V(5) {
-				if proxySend {
-					p.logger.Infof("dropped %s [%s -> %s]", humanize.Bytes(uint64(nr)), p.From(), p.To())
-				} else {
-					p.logger.Infof("dropped %s [%s <- %s]", humanize.Bytes(uint64(nr)), p.From(), p.To())
-				}
+			if proxySend {
+				p.logger.Debug(
+					"dropped",
+					zap.String("data-size", humanize.Bytes(uint64(nr))),
+					zap.String("from", p.From()),
+					zap.String("to", p.To()),
+				)
+			} else {
+				p.logger.Debug(
+					"dropped",
+					zap.String("data-size", humanize.Bytes(uint64(nr))),
+					zap.String("from", p.To()),
+					zap.String("to", p.From()),
+				)
 			}
 			continue
 		}
@@ -487,12 +498,10 @@ func (p *proxy) ioCopy(dst io.Writer, src io.Reader, proxySend bool) {
 			case <-p.donec:
 				return
 			}
-			if p.logger.V(5) {
-				if proxySend {
-					p.logger.Errorf("write error while sending (%q)", err.Error())
-				} else {
-					p.logger.Errorf("write error while receiving (%q)", err.Error())
-				}
+			if proxySend {
+				p.logger.Debug("failed to write while sending", zap.Error(err))
+			} else {
+				p.logger.Debug("failed to write while receiving", zap.Error(err))
 			}
 			return
 		}
@@ -509,20 +518,39 @@ func (p *proxy) ioCopy(dst io.Writer, src io.Reader, proxySend bool) {
 				return
 			}
 			if proxySend {
-				p.logger.Errorf("write error while sending (%q); read %d bytes != wrote %d bytes", io.ErrShortWrite.Error(), nr, nw)
+				p.logger.Debug(
+					"failed to write while sending; read/write bytes are different",
+					zap.Int("read-bytes", nr),
+					zap.Int("write-bytes", nw),
+					zap.Error(io.ErrShortWrite),
+				)
 			} else {
-				p.logger.Errorf("write error while receiving (%q); read %d bytes != wrote %d bytes", io.ErrShortWrite.Error(), nr, nw)
+				p.logger.Debug(
+					"failed to write while receiving; read/write bytes are different",
+					zap.Int("read-bytes", nr),
+					zap.Int("write-bytes", nw),
+					zap.Error(io.ErrShortWrite),
+				)
 			}
 			return
 		}
 
-		if p.logger.V(5) {
-			if proxySend {
-				p.logger.Infof("transmitted %s [%s -> %s]", humanize.Bytes(uint64(nr)), p.From(), p.To())
-			} else {
-				p.logger.Infof("received %s [%s <- %s]", humanize.Bytes(uint64(nr)), p.From(), p.To())
-			}
+		if proxySend {
+			p.logger.Debug(
+				"transmitted",
+				zap.String("data-size", humanize.Bytes(uint64(nr))),
+				zap.String("from", p.From()),
+				zap.String("to", p.To()),
+			)
+		} else {
+			p.logger.Debug(
+				"received",
+				zap.String("data-size", humanize.Bytes(uint64(nr))),
+				zap.String("from", p.To()),
+				zap.String("to", p.From()),
+			)
 		}
+
 	}
 }
 
@@ -535,8 +563,13 @@ func (p *proxy) Close() (err error) {
 		p.listenerMu.Lock()
 		if p.listener != nil {
 			err = p.listener.Close()
-			p.logger.Infof("closed proxy listener on %q", p.From())
+			p.logger.Info(
+				"closed proxy listener",
+				zap.String("from", p.From()),
+				zap.String("to", p.To()),
+			)
 		}
+		p.logger.Sync()
 		p.listenerMu.Unlock()
 	})
 	p.closeWg.Wait()
@@ -551,7 +584,15 @@ func (p *proxy) DelayAccept(latency, rv time.Duration) {
 	p.latencyAcceptMu.Lock()
 	p.latencyAccept = d
 	p.latencyAcceptMu.Unlock()
-	p.logger.Infof("set accept latency %v(%v±%v) [%s -> %s]", d, latency, rv, p.From(), p.To())
+
+	p.logger.Info(
+		"set accept latency",
+		zap.Duration("latency", d),
+		zap.Duration("given-latency", latency),
+		zap.Duration("given-latency-random-variable", rv),
+		zap.String("from", p.From()),
+		zap.String("to", p.To()),
+	)
 }
 
 func (p *proxy) UndelayAccept() {
@@ -559,7 +600,13 @@ func (p *proxy) UndelayAccept() {
 	d := p.latencyAccept
 	p.latencyAccept = 0
 	p.latencyAcceptMu.Unlock()
-	p.logger.Infof("removed accept latency %v [%s -> %s]", d, p.From(), p.To())
+
+	p.logger.Info(
+		"removed accept latency",
+		zap.Duration("latency", d),
+		zap.String("from", p.From()),
+		zap.String("to", p.To()),
+	)
 }
 
 func (p *proxy) LatencyAccept() time.Duration {
@@ -577,7 +624,15 @@ func (p *proxy) DelayTx(latency, rv time.Duration) {
 	p.latencyTxMu.Lock()
 	p.latencyTx = d
 	p.latencyTxMu.Unlock()
-	p.logger.Infof("set transmit latency %v(%v±%v) [%s -> %s]", d, latency, rv, p.From(), p.To())
+
+	p.logger.Info(
+		"set transmit latency",
+		zap.Duration("latency", d),
+		zap.Duration("given-latency", latency),
+		zap.Duration("given-latency-random-variable", rv),
+		zap.String("from", p.From()),
+		zap.String("to", p.To()),
+	)
 }
 
 func (p *proxy) UndelayTx() {
@@ -585,7 +640,13 @@ func (p *proxy) UndelayTx() {
 	d := p.latencyTx
 	p.latencyTx = 0
 	p.latencyTxMu.Unlock()
-	p.logger.Infof("removed transmit latency %v [%s -> %s]", d, p.From(), p.To())
+
+	p.logger.Info(
+		"removed transmit latency",
+		zap.Duration("latency", d),
+		zap.String("from", p.From()),
+		zap.String("to", p.To()),
+	)
 }
 
 func (p *proxy) LatencyTx() time.Duration {
@@ -603,7 +664,15 @@ func (p *proxy) DelayRx(latency, rv time.Duration) {
 	p.latencyRxMu.Lock()
 	p.latencyRx = d
 	p.latencyRxMu.Unlock()
-	p.logger.Infof("set receive latency %v(%v±%v) [%s <- %s]", d, latency, rv, p.From(), p.To())
+
+	p.logger.Info(
+		"set receive latency",
+		zap.Duration("latency", d),
+		zap.Duration("given-latency", latency),
+		zap.Duration("given-latency-random-variable", rv),
+		zap.String("from", p.To()),
+		zap.String("to", p.From()),
+	)
 }
 
 func (p *proxy) UndelayRx() {
@@ -611,7 +680,13 @@ func (p *proxy) UndelayRx() {
 	d := p.latencyRx
 	p.latencyRx = 0
 	p.latencyRxMu.Unlock()
-	p.logger.Infof("removed receive latency %v [%s <- %s]", d, p.From(), p.To())
+
+	p.logger.Info(
+		"removed receive latency",
+		zap.Duration("latency", d),
+		zap.String("from", p.To()),
+		zap.String("to", p.From()),
+	)
 }
 
 func (p *proxy) LatencyRx() time.Duration {
@@ -644,7 +719,12 @@ func (p *proxy) PauseAccept() {
 	p.acceptMu.Lock()
 	p.pauseAcceptc = make(chan struct{})
 	p.acceptMu.Unlock()
-	p.logger.Infof("paused accepting new connections [%s -> %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"paused accepting new connections",
+		zap.String("from", p.From()),
+		zap.String("to", p.To()),
+	)
 }
 
 func (p *proxy) UnpauseAccept() {
@@ -658,14 +738,24 @@ func (p *proxy) UnpauseAccept() {
 		close(p.pauseAcceptc)
 	}
 	p.acceptMu.Unlock()
-	p.logger.Infof("unpaused accepting new connections [%s -> %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"unpaused accepting new connections",
+		zap.String("from", p.From()),
+		zap.String("to", p.To()),
+	)
 }
 
 func (p *proxy) PauseTx() {
 	p.txMu.Lock()
 	p.pauseTxc = make(chan struct{})
 	p.txMu.Unlock()
-	p.logger.Infof("paused transmit listen [%s -> %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"paused transmit listen",
+		zap.String("from", p.From()),
+		zap.String("to", p.To()),
+	)
 }
 
 func (p *proxy) UnpauseTx() {
@@ -679,14 +769,24 @@ func (p *proxy) UnpauseTx() {
 		close(p.pauseTxc)
 	}
 	p.txMu.Unlock()
-	p.logger.Infof("unpaused transmit listen [%s -> %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"unpaused transmit listen",
+		zap.String("from", p.From()),
+		zap.String("to", p.To()),
+	)
 }
 
 func (p *proxy) PauseRx() {
 	p.rxMu.Lock()
 	p.pauseRxc = make(chan struct{})
 	p.rxMu.Unlock()
-	p.logger.Infof("paused receive listen [%s <- %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"paused receive listen",
+		zap.String("from", p.To()),
+		zap.String("to", p.From()),
+	)
 }
 
 func (p *proxy) UnpauseRx() {
@@ -700,7 +800,12 @@ func (p *proxy) UnpauseRx() {
 		close(p.pauseRxc)
 	}
 	p.rxMu.Unlock()
-	p.logger.Infof("unpaused receive listen [%s <- %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"unpaused receive listen",
+		zap.String("from", p.To()),
+		zap.String("to", p.From()),
+	)
 }
 
 func (p *proxy) BlackholeTx() {
@@ -714,14 +819,24 @@ func (p *proxy) BlackholeTx() {
 		close(p.blackholeTxc)
 	}
 	p.txMu.Unlock()
-	p.logger.Infof("blackholed transmit [%s -> %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"blackholed transmit",
+		zap.String("from", p.From()),
+		zap.String("to", p.To()),
+	)
 }
 
 func (p *proxy) UnblackholeTx() {
 	p.txMu.Lock()
 	p.blackholeTxc = make(chan struct{})
 	p.txMu.Unlock()
-	p.logger.Infof("unblackholed transmit [%s -> %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"unblackholed transmit",
+		zap.String("from", p.From()),
+		zap.String("to", p.To()),
+	)
 }
 
 func (p *proxy) BlackholeRx() {
@@ -735,42 +850,71 @@ func (p *proxy) BlackholeRx() {
 		close(p.blackholeRxc)
 	}
 	p.rxMu.Unlock()
-	p.logger.Infof("blackholed receive [%s <- %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"blackholed receive",
+		zap.String("from", p.To()),
+		zap.String("to", p.From()),
+	)
 }
 
 func (p *proxy) UnblackholeRx() {
 	p.rxMu.Lock()
 	p.blackholeRxc = make(chan struct{})
 	p.rxMu.Unlock()
-	p.logger.Infof("unblackholed receive [%s <- %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"unblackholed receive",
+		zap.String("from", p.To()),
+		zap.String("to", p.From()),
+	)
 }
 
 func (p *proxy) CorruptTx(f func([]byte) []byte) {
 	p.corruptTxMu.Lock()
 	p.corruptTx = f
 	p.corruptTxMu.Unlock()
-	p.logger.Infof("corrupting transmit [%s -> %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"corrupting transmit",
+		zap.String("from", p.From()),
+		zap.String("to", p.To()),
+	)
 }
 
 func (p *proxy) UncorruptTx() {
 	p.corruptTxMu.Lock()
 	p.corruptTx = nil
 	p.corruptTxMu.Unlock()
-	p.logger.Infof("stopped corrupting transmit [%s -> %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"stopped corrupting transmit",
+		zap.String("from", p.From()),
+		zap.String("to", p.To()),
+	)
 }
 
 func (p *proxy) CorruptRx(f func([]byte) []byte) {
 	p.corruptRxMu.Lock()
 	p.corruptRx = f
 	p.corruptRxMu.Unlock()
-	p.logger.Infof("corrupting receive [%s <- %s]", p.From(), p.To())
+	p.logger.Info(
+		"corrupting receive",
+		zap.String("from", p.To()),
+		zap.String("to", p.From()),
+	)
 }
 
 func (p *proxy) UncorruptRx() {
 	p.corruptRxMu.Lock()
 	p.corruptRx = nil
 	p.corruptRxMu.Unlock()
-	p.logger.Infof("stopped corrupting receive [%s <- %s]", p.From(), p.To())
+
+	p.logger.Info(
+		"stopped corrupting receive",
+		zap.String("from", p.To()),
+		zap.String("to", p.From()),
+	)
 }
 
 func (p *proxy) ResetListener() error {
@@ -796,6 +940,9 @@ func (p *proxy) ResetListener() error {
 	}
 	p.listener = ln
 
-	p.logger.Infof("reset listener %q", p.From())
+	p.logger.Info(
+		"reset listener on",
+		zap.String("from", p.From()),
+	)
 	return nil
 }

+ 12 - 9
pkg/transport/proxy_test.go

@@ -28,9 +28,12 @@ import (
 	"testing"
 	"time"
 
-	"google.golang.org/grpc/grpclog"
+	"go.uber.org/zap"
 )
 
+// enable DebugLevel
+var testLogger = zap.NewExample()
+
 var testTLSInfo = TLSInfo{
 	KeyFile:        "./fixtures/server.key.insecure",
 	CertFile:       "./fixtures/server.crt",
@@ -67,9 +70,9 @@ func testProxy(t *testing.T, scheme string, secure bool, delayTx bool) {
 	defer ln.Close()
 
 	cfg := ProxyConfig{
+		Logger: testLogger,
 		From:   url.URL{Scheme: scheme, Host: srcAddr},
 		To:     url.URL{Scheme: scheme, Host: dstAddr},
-		Logger: grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 5),
 	}
 	if secure {
 		cfg.TLSInfo = testTLSInfo
@@ -176,9 +179,9 @@ func testProxyDelayAccept(t *testing.T, secure bool) {
 	defer ln.Close()
 
 	cfg := ProxyConfig{
+		Logger: testLogger,
 		From:   url.URL{Scheme: scheme, Host: srcAddr},
 		To:     url.URL{Scheme: scheme, Host: dstAddr},
-		Logger: grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 5),
 	}
 	if secure {
 		cfg.TLSInfo = testTLSInfo
@@ -229,9 +232,9 @@ func TestProxy_PauseTx(t *testing.T) {
 	defer ln.Close()
 
 	p := NewProxy(ProxyConfig{
+		Logger: testLogger,
 		From:   url.URL{Scheme: scheme, Host: srcAddr},
 		To:     url.URL{Scheme: scheme, Host: dstAddr},
-		Logger: grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 5),
 	})
 	<-p.Ready()
 	defer p.Close()
@@ -275,9 +278,9 @@ func TestProxy_BlackholeTx(t *testing.T) {
 	defer ln.Close()
 
 	p := NewProxy(ProxyConfig{
+		Logger: testLogger,
 		From:   url.URL{Scheme: scheme, Host: srcAddr},
 		To:     url.URL{Scheme: scheme, Host: dstAddr},
-		Logger: grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 5),
 	})
 	<-p.Ready()
 	defer p.Close()
@@ -325,9 +328,9 @@ func TestProxy_CorruptTx(t *testing.T) {
 	defer ln.Close()
 
 	p := NewProxy(ProxyConfig{
+		Logger: testLogger,
 		From:   url.URL{Scheme: scheme, Host: srcAddr},
 		To:     url.URL{Scheme: scheme, Host: dstAddr},
-		Logger: grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 5),
 	})
 	<-p.Ready()
 	defer p.Close()
@@ -360,9 +363,9 @@ func TestProxy_Shutdown(t *testing.T) {
 	defer ln.Close()
 
 	p := NewProxy(ProxyConfig{
+		Logger: testLogger,
 		From:   url.URL{Scheme: scheme, Host: srcAddr},
 		To:     url.URL{Scheme: scheme, Host: dstAddr},
-		Logger: grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 5),
 	})
 	<-p.Ready()
 	defer p.Close()
@@ -390,9 +393,9 @@ func TestProxy_ShutdownListener(t *testing.T) {
 	defer ln.Close()
 
 	p := NewProxy(ProxyConfig{
+		Logger: testLogger,
 		From:   url.URL{Scheme: scheme, Host: srcAddr},
 		To:     url.URL{Scheme: scheme, Host: dstAddr},
-		Logger: grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 5),
 	})
 	<-p.Ready()
 	defer p.Close()
@@ -462,9 +465,9 @@ func testProxyHTTP(t *testing.T, secure, delayTx bool) {
 	time.Sleep(200 * time.Millisecond)
 
 	cfg := ProxyConfig{
+		Logger: testLogger,
 		From:   url.URL{Scheme: scheme, Host: srcAddr},
 		To:     url.URL{Scheme: scheme, Host: dstAddr},
-		Logger: grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 5),
 	}
 	if secure {
 		cfg.TLSInfo = testTLSInfo

+ 5 - 17
test

@@ -144,7 +144,7 @@ function integration_pass {
 	# expectation could be different
 	USERTIMEOUT=""
 	if [ -z "${TIMEOUT}" ]; then
-		USERTIMEOUT="15m"
+		USERTIMEOUT="20m"
 	else
 		USERTIMEOUT="${TIMEOUT}"
 	fi
@@ -172,7 +172,7 @@ function integration_pass {
 
 function integration_extra {
 	go test -timeout 1m -v ${RACE} -cpu 1,2,4 "$@" "${REPO_PATH}/client/integration"
-	go test -timeout 20m -v ${RACE} -cpu 1,2,4 "$@" "${REPO_PATH}/clientv3/integration"
+	go test -timeout 25m -v ${RACE} -cpu 1,2,4 "$@" "${REPO_PATH}/clientv3/integration"
 	go test -timeout 1m -v -cpu 1,2,4 "$@" "${REPO_PATH}/contrib/raftexample"
 	go test -timeout 5m -v ${RACE} -tags v2v3 "$@" "${REPO_PATH}/etcdserver/v2store"
 	go test -timeout 1m -v ${RACE} -cpu 1,2,4 -run=Example "$@" "${TEST[@]}"
@@ -180,11 +180,10 @@ function integration_extra {
 
 function functional_pass {
   	# Clean up any data and logs from previous runs
-  	rm -rf ./agent-*
+  	rm -rf /tmp/etcd-agent-data-*
 
 	for a in 1 2 3; do
-		mkdir -p ./agent-$a
-		./bin/etcd-agent -etcd-path ./bin/etcd -etcd-log-dir "./agent-$a" -port ":${a}9027" &
+		./bin/etcd-agent --network tcp --address 127.0.0.1:${a}9027 &
 		pid="$!"
 		agent_pids="${agent_pids} $pid"
 	done
@@ -197,18 +196,7 @@ function functional_pass {
 	done
 
 	echo "Starting 'etcd-tester'"
-	./bin/etcd-tester \
-		-agent-endpoints "127.0.0.1:19027,127.0.0.1:29027,127.0.0.1:39027" \
-		-client-ports 1379,2379,3379 \
-		-advertise-client-ports 13790,23790,33790 \
-		-peer-ports 1380,2380,3380 \
-		-advertise-peer-ports 13800,23800,33800 \
-		-limit 1 \
-		-schedule-cases "0 1 2 3 4 5 6 7 8 9" \
-		-stress-qps 1000 \
-		-stress-key-txn-count 100 \
-		-stress-key-txn-ops 10 \
-		-exit-on-failure && echo "'etcd-tester' succeeded"
+	./bin/etcd-tester --config ./tools/functional-tester/tester/local-test.yaml && echo "'etcd-tester' succeeded"
 	ETCD_TESTER_EXIT_CODE=$?
 	echo "ETCD_TESTER_EXIT_CODE:" ${ETCD_TESTER_EXIT_CODE}
 

+ 2 - 2
tools/etcd-test-proxy/main.go

@@ -28,7 +28,7 @@ import (
 
 	"github.com/coreos/etcd/pkg/transport"
 
-	"google.golang.org/grpc/grpclog"
+	"go.uber.org/zap"
 )
 
 var from string
@@ -74,7 +74,7 @@ $ ETCDCTL_API=3 ./bin/etcdctl --endpoints localhost:23790 put foo bar`)
 		To:   url.URL{Scheme: "tcp", Host: to},
 	}
 	if verbose {
-		cfg.Logger = grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 5)
+		cfg.Logger = zap.NewExample()
 	}
 	p := transport.NewProxy(cfg)
 	<-p.Ready()

+ 1 - 10
tools/functional-tester/README.md

@@ -44,14 +44,5 @@ And run [example scripts](./scripts).
 ./scripts/docker-local-agent.sh 3
 
 # to run only 1 tester round
-LIMIT=1 ./scripts/docker-local-tester.sh
-
-# to run long-running tests with no limit
-LIMIT=1 ./scripts/docker-local-tester.sh
-
-# to run only 1 tester round with election runner and others
-# default is STRESSER="keys,lease"
-LIMIT=1 \
-  STRESSER="keys,lease,election-runner,watch-runner,lock-racer-runner,lease-runner" \
-  ./scripts/docker-local-tester.sh
+./scripts/docker-local-tester.sh
 ```

+ 3 - 3
tools/functional-tester/etcd-agent/doc.go → tools/functional-tester/agent/doc.go

@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright 2018 The etcd Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,5 +12,5 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// etcd-agent is a daemon for controlling an etcd process via HTTP RPC.
-package main
+// Package agent implements functional-tester agent server.
+package agent

+ 435 - 0
tools/functional-tester/agent/handler.go

@@ -0,0 +1,435 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package agent
+
+import (
+	"errors"
+	"fmt"
+	"net/url"
+	"os"
+	"os/exec"
+	"syscall"
+	"time"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+	"github.com/coreos/etcd/pkg/transport"
+	"github.com/coreos/etcd/tools/functional-tester/rpcpb"
+
+	"go.uber.org/zap"
+)
+
+// return error for system errors (e.g. fail to create files)
+// return status error in response for wrong configuration/operation (e.g. start etcd twice)
+func (srv *Server) handleTesterRequest(req *rpcpb.Request) (resp *rpcpb.Response, err error) {
+	defer func() {
+		if err == nil {
+			srv.last = req.Operation
+			srv.logger.Info("handler success", zap.String("operation", req.Operation.String()))
+		}
+	}()
+
+	switch req.Operation {
+	case rpcpb.Operation_InitialStartEtcd:
+		return srv.handleInitialStartEtcd(req)
+	case rpcpb.Operation_RestartEtcd:
+		return srv.handleRestartEtcd()
+	case rpcpb.Operation_KillEtcd:
+		return srv.handleKillEtcd()
+	case rpcpb.Operation_FailArchive:
+		return srv.handleFailArchive()
+	case rpcpb.Operation_DestroyEtcdAgent:
+		return srv.handleDestroyEtcdAgent()
+
+	case rpcpb.Operation_BlackholePeerPortTxRx:
+		return srv.handleBlackholePeerPortTxRx()
+	case rpcpb.Operation_UnblackholePeerPortTxRx:
+		return srv.handleUnblackholePeerPortTxRx()
+	case rpcpb.Operation_DelayPeerPortTxRx:
+		return srv.handleDelayPeerPortTxRx()
+	case rpcpb.Operation_UndelayPeerPortTxRx:
+		return srv.handleUndelayPeerPortTxRx()
+
+	default:
+		msg := fmt.Sprintf("operation not found (%v)", req.Operation)
+		return &rpcpb.Response{Success: false, Status: msg}, errors.New(msg)
+	}
+}
+
+func (srv *Server) handleInitialStartEtcd(req *rpcpb.Request) (*rpcpb.Response, error) {
+	if srv.last != rpcpb.Operation_NotStarted {
+		return &rpcpb.Response{
+			Success: false,
+			Status:  fmt.Sprintf("%q is not valid; last server operation was %q", rpcpb.Operation_InitialStartEtcd.String(), srv.last.String()),
+		}, nil
+	}
+
+	srv.Member = req.Member
+	srv.Tester = req.Tester
+
+	srv.logger.Info("creating base directory", zap.String("path", srv.Member.BaseDir))
+	err := fileutil.TouchDirAll(srv.Member.BaseDir)
+	if err != nil {
+		return nil, err
+	}
+	srv.logger.Info("created base directory", zap.String("path", srv.Member.BaseDir))
+
+	if err = srv.createEtcdFile(); err != nil {
+		return nil, err
+	}
+	srv.creatEtcdCmd()
+
+	srv.logger.Info("starting etcd process")
+	err = srv.startEtcdCmd()
+	if err != nil {
+		return nil, err
+	}
+	srv.logger.Info("started etcd process", zap.String("command-path", srv.etcdCmd.Path))
+
+	// wait some time for etcd listener start
+	// before setting up proxy
+	time.Sleep(time.Second)
+	if err = srv.startProxy(); err != nil {
+		return nil, err
+	}
+
+	return &rpcpb.Response{
+		Success: true,
+		Status:  "successfully started etcd!",
+	}, nil
+}
+
+func (srv *Server) startProxy() error {
+	if srv.Member.EtcdClientProxy {
+		advertiseClientURL, advertiseClientURLPort, err := getURLAndPort(srv.Member.Etcd.AdvertiseClientURLs[0])
+		if err != nil {
+			return err
+		}
+		listenClientURL, _, err := getURLAndPort(srv.Member.Etcd.ListenClientURLs[0])
+		if err != nil {
+			return err
+		}
+
+		srv.logger.Info("starting proxy on client traffic", zap.String("url", advertiseClientURL.String()))
+		srv.advertiseClientPortToProxy[advertiseClientURLPort] = transport.NewProxy(transport.ProxyConfig{
+			Logger: srv.logger,
+			From:   *advertiseClientURL,
+			To:     *listenClientURL,
+		})
+		select {
+		case err = <-srv.advertiseClientPortToProxy[advertiseClientURLPort].Error():
+			return err
+		case <-time.After(2 * time.Second):
+			srv.logger.Info("started proxy on client traffic", zap.String("url", advertiseClientURL.String()))
+		}
+	}
+
+	if srv.Member.EtcdPeerProxy {
+		advertisePeerURL, advertisePeerURLPort, err := getURLAndPort(srv.Member.Etcd.InitialAdvertisePeerURLs[0])
+		if err != nil {
+			return err
+		}
+		listenPeerURL, _, err := getURLAndPort(srv.Member.Etcd.ListenPeerURLs[0])
+		if err != nil {
+			return err
+		}
+
+		srv.logger.Info("starting proxy on peer traffic", zap.String("url", advertisePeerURL.String()))
+		srv.advertisePeerPortToProxy[advertisePeerURLPort] = transport.NewProxy(transport.ProxyConfig{
+			Logger: srv.logger,
+			From:   *advertisePeerURL,
+			To:     *listenPeerURL,
+		})
+		select {
+		case err = <-srv.advertisePeerPortToProxy[advertisePeerURLPort].Error():
+			return err
+		case <-time.After(2 * time.Second):
+			srv.logger.Info("started proxy on peer traffic", zap.String("url", advertisePeerURL.String()))
+		}
+	}
+	return nil
+}
+
+func (srv *Server) stopProxy() {
+	if srv.Member.EtcdClientProxy && len(srv.advertiseClientPortToProxy) > 0 {
+		for port, px := range srv.advertiseClientPortToProxy {
+			srv.logger.Info("closing proxy",
+				zap.Int("port", port),
+				zap.String("from", px.From()),
+				zap.String("to", px.To()),
+			)
+			if err := px.Close(); err != nil {
+				srv.logger.Warn("failed to close proxy", zap.Int("port", port))
+				continue
+			}
+			select {
+			case <-px.Done():
+				// enough time to release port
+				time.Sleep(time.Second)
+			case <-time.After(time.Second):
+			}
+			srv.logger.Info("closed proxy",
+				zap.Int("port", port),
+				zap.String("from", px.From()),
+				zap.String("to", px.To()),
+			)
+		}
+		srv.advertiseClientPortToProxy = make(map[int]transport.Proxy)
+	}
+	if srv.Member.EtcdPeerProxy && len(srv.advertisePeerPortToProxy) > 0 {
+		for port, px := range srv.advertisePeerPortToProxy {
+			srv.logger.Info("closing proxy",
+				zap.Int("port", port),
+				zap.String("from", px.From()),
+				zap.String("to", px.To()),
+			)
+			if err := px.Close(); err != nil {
+				srv.logger.Warn("failed to close proxy", zap.Int("port", port))
+				continue
+			}
+			select {
+			case <-px.Done():
+				// enough time to release port
+				time.Sleep(time.Second)
+			case <-time.After(time.Second):
+			}
+			srv.logger.Info("closed proxy",
+				zap.Int("port", port),
+				zap.String("from", px.From()),
+				zap.String("to", px.To()),
+			)
+		}
+		srv.advertisePeerPortToProxy = make(map[int]transport.Proxy)
+	}
+}
+
+func (srv *Server) createEtcdFile() error {
+	srv.logger.Info("creating etcd log file", zap.String("path", srv.Member.EtcdLogPath))
+	var err error
+	srv.etcdLogFile, err = os.Create(srv.Member.EtcdLogPath)
+	if err != nil {
+		return err
+	}
+	srv.logger.Info("created etcd log file", zap.String("path", srv.Member.EtcdLogPath))
+	return nil
+}
+
+func (srv *Server) creatEtcdCmd() {
+	etcdPath, etcdFlags := srv.Member.EtcdExecPath, srv.Member.Etcd.Flags()
+	u, _ := url.Parse(srv.Member.FailpointHTTPAddr)
+	srv.logger.Info("creating etcd command",
+		zap.String("etcd-exec-path", etcdPath),
+		zap.Strings("etcd-flags", etcdFlags),
+		zap.String("failpoint-http-addr", srv.Member.FailpointHTTPAddr),
+		zap.String("failpoint-addr", u.Host),
+	)
+	srv.etcdCmd = exec.Command(etcdPath, etcdFlags...)
+	srv.etcdCmd.Env = []string{"GOFAIL_HTTP=" + u.Host}
+	srv.etcdCmd.Stdout = srv.etcdLogFile
+	srv.etcdCmd.Stderr = srv.etcdLogFile
+}
+
+// start but do not wait for it to complete
+func (srv *Server) startEtcdCmd() error {
+	return srv.etcdCmd.Start()
+}
+
+func (srv *Server) handleRestartEtcd() (*rpcpb.Response, error) {
+	srv.creatEtcdCmd()
+
+	srv.logger.Info("restarting etcd process")
+	err := srv.startEtcdCmd()
+	if err != nil {
+		return nil, err
+	}
+	srv.logger.Info("restarted etcd process", zap.String("command-path", srv.etcdCmd.Path))
+
+	// wait some time for etcd listener start
+	// before setting up proxy
+	time.Sleep(time.Second)
+	if err = srv.startProxy(); err != nil {
+		return nil, err
+	}
+
+	return &rpcpb.Response{
+		Success: true,
+		Status:  "successfully restarted etcd!",
+	}, nil
+}
+
+func (srv *Server) handleKillEtcd() (*rpcpb.Response, error) {
+	if srv.last != rpcpb.Operation_InitialStartEtcd && srv.last != rpcpb.Operation_RestartEtcd {
+		return &rpcpb.Response{
+			Success: false,
+			Status:  fmt.Sprintf("%q is not valid; last server operation was %q", rpcpb.Operation_KillEtcd.String(), srv.last.String()),
+		}, nil
+	}
+
+	srv.stopProxy()
+
+	srv.logger.Info("killing etcd process", zap.String("signal", syscall.SIGTERM.String()))
+	err := stopWithSig(srv.etcdCmd, syscall.SIGTERM)
+	if err != nil {
+		return nil, err
+	}
+	srv.logger.Info("killed etcd process", zap.String("signal", syscall.SIGTERM.String()))
+
+	return &rpcpb.Response{
+		Success: true,
+		Status:  "successfully killed etcd!",
+	}, nil
+}
+
+func (srv *Server) handleFailArchive() (*rpcpb.Response, error) {
+	// TODO: stop/restart proxy?
+	// for now, just keep using the old ones
+	// if len(srv.advertisePortToProxy) > 0
+
+	// exit with stackstrace
+	srv.logger.Info("killing etcd process", zap.String("signal", syscall.SIGQUIT.String()))
+	err := stopWithSig(srv.etcdCmd, syscall.SIGQUIT)
+	if err != nil {
+		return nil, err
+	}
+	srv.logger.Info("killed etcd process", zap.String("signal", syscall.SIGQUIT.String()))
+
+	srv.etcdLogFile.Sync()
+	srv.etcdLogFile.Close()
+
+	// TODO: support separate WAL directory
+	srv.logger.Info("archiving data", zap.String("base-dir", srv.Member.BaseDir))
+	if err = archive(
+		srv.Member.BaseDir,
+		srv.Member.EtcdLogPath,
+		srv.Member.Etcd.DataDir,
+	); err != nil {
+		return nil, err
+	}
+	srv.logger.Info("archived data", zap.String("base-dir", srv.Member.BaseDir))
+
+	if err = srv.createEtcdFile(); err != nil {
+		return nil, err
+	}
+
+	srv.logger.Info("cleaning up page cache")
+	if err := cleanPageCache(); err != nil {
+		srv.logger.Warn("failed to clean up page cache", zap.String("error", err.Error()))
+	}
+	srv.logger.Info("cleaned up page cache")
+
+	return &rpcpb.Response{
+		Success: true,
+		Status:  "successfully cleaned up etcd!",
+	}, nil
+}
+
+// stop proxy, etcd, delete data directory
+func (srv *Server) handleDestroyEtcdAgent() (*rpcpb.Response, error) {
+	srv.logger.Info("killing etcd process", zap.String("signal", syscall.SIGTERM.String()))
+	err := stopWithSig(srv.etcdCmd, syscall.SIGTERM)
+	if err != nil {
+		return nil, err
+	}
+	srv.logger.Info("killed etcd process", zap.String("signal", syscall.SIGTERM.String()))
+
+	srv.logger.Info("removing base directory", zap.String("dir", srv.Member.BaseDir))
+	err = os.RemoveAll(srv.Member.BaseDir)
+	if err != nil {
+		return nil, err
+	}
+	srv.logger.Info("removed base directory", zap.String("dir", srv.Member.BaseDir))
+
+	// stop agent server
+	srv.Stop()
+
+	for port, px := range srv.advertiseClientPortToProxy {
+		srv.logger.Info("closing proxy", zap.Int("client-port", port))
+		err := px.Close()
+		srv.logger.Info("closed proxy", zap.Int("client-port", port), zap.Error(err))
+	}
+	for port, px := range srv.advertisePeerPortToProxy {
+		srv.logger.Info("closing proxy", zap.Int("peer-port", port))
+		err := px.Close()
+		srv.logger.Info("closed proxy", zap.Int("peer-port", port), zap.Error(err))
+	}
+
+	return &rpcpb.Response{
+		Success: true,
+		Status:  "successfully destroyed etcd and agent!",
+	}, nil
+}
+
+func (srv *Server) handleBlackholePeerPortTxRx() (*rpcpb.Response, error) {
+	for port, px := range srv.advertisePeerPortToProxy {
+		srv.logger.Info("blackholing", zap.Int("peer-port", port))
+		px.BlackholeTx()
+		px.BlackholeRx()
+		srv.logger.Info("blackholed", zap.Int("peer-port", port))
+	}
+	return &rpcpb.Response{
+		Success: true,
+		Status:  "successfully blackholed peer port tx/rx!",
+	}, nil
+}
+
+func (srv *Server) handleUnblackholePeerPortTxRx() (*rpcpb.Response, error) {
+	for port, px := range srv.advertisePeerPortToProxy {
+		srv.logger.Info("unblackholing", zap.Int("peer-port", port))
+		px.UnblackholeTx()
+		px.UnblackholeRx()
+		srv.logger.Info("unblackholed", zap.Int("peer-port", port))
+	}
+	return &rpcpb.Response{
+		Success: true,
+		Status:  "successfully unblackholed peer port tx/rx!",
+	}, nil
+}
+
+func (srv *Server) handleDelayPeerPortTxRx() (*rpcpb.Response, error) {
+	lat := time.Duration(srv.Tester.DelayLatencyMs) * time.Millisecond
+	rv := time.Duration(srv.Tester.DelayLatencyMsRv) * time.Millisecond
+
+	for port, px := range srv.advertisePeerPortToProxy {
+		srv.logger.Info("delaying",
+			zap.Int("peer-port", port),
+			zap.Duration("latency", lat),
+			zap.Duration("random-variable", rv),
+		)
+		px.DelayTx(lat, rv)
+		px.DelayRx(lat, rv)
+		srv.logger.Info("delayed",
+			zap.Int("peer-port", port),
+			zap.Duration("latency", lat),
+			zap.Duration("random-variable", rv),
+		)
+	}
+
+	return &rpcpb.Response{
+		Success: true,
+		Status:  "successfully delay peer port tx/rx!",
+	}, nil
+}
+
+func (srv *Server) handleUndelayPeerPortTxRx() (*rpcpb.Response, error) {
+	for port, px := range srv.advertisePeerPortToProxy {
+		srv.logger.Info("undelaying", zap.Int("peer-port", port))
+		px.UndelayTx()
+		px.UndelayRx()
+		srv.logger.Info("undelayed", zap.Int("peer-port", port))
+	}
+	return &rpcpb.Response{
+		Success: true,
+		Status:  "successfully undelay peer port tx/rx!",
+	}, nil
+}

+ 166 - 0
tools/functional-tester/agent/server.go

@@ -0,0 +1,166 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package agent
+
+import (
+	"math"
+	"net"
+	"os"
+	"os/exec"
+	"strings"
+
+	"github.com/coreos/etcd/pkg/transport"
+	"github.com/coreos/etcd/tools/functional-tester/rpcpb"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+)
+
+// Server implements "rpcpb.TransportServer"
+// and other etcd operations as an agent
+// no need to lock fields since request operations are
+// serialized in tester-side
+type Server struct {
+	grpcServer *grpc.Server
+	logger     *zap.Logger
+
+	network string
+	address string
+	ln      net.Listener
+
+	rpcpb.TransportServer
+	last rpcpb.Operation
+
+	*rpcpb.Member
+	*rpcpb.Tester
+
+	etcdCmd     *exec.Cmd
+	etcdLogFile *os.File
+
+	// forward incoming advertise URLs traffic to listen URLs
+	advertiseClientPortToProxy map[int]transport.Proxy
+	advertisePeerPortToProxy   map[int]transport.Proxy
+}
+
+// NewServer returns a new agent server.
+func NewServer(
+	logger *zap.Logger,
+	network string,
+	address string,
+) *Server {
+	return &Server{
+		logger:  logger,
+		network: network,
+		address: address,
+		last:    rpcpb.Operation_NotStarted,
+		advertiseClientPortToProxy: make(map[int]transport.Proxy),
+		advertisePeerPortToProxy:   make(map[int]transport.Proxy),
+	}
+}
+
+const (
+	maxRequestBytes   = 1.5 * 1024 * 1024
+	grpcOverheadBytes = 512 * 1024
+	maxStreams        = math.MaxUint32
+	maxSendBytes      = math.MaxInt32
+)
+
+// StartServe starts serving agent server.
+func (srv *Server) StartServe() error {
+	var err error
+	srv.ln, err = net.Listen(srv.network, srv.address)
+	if err != nil {
+		return err
+	}
+
+	var opts []grpc.ServerOption
+	opts = append(opts, grpc.MaxRecvMsgSize(int(maxRequestBytes+grpcOverheadBytes)))
+	opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
+	opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
+	srv.grpcServer = grpc.NewServer(opts...)
+
+	rpcpb.RegisterTransportServer(srv.grpcServer, srv)
+
+	srv.logger.Info(
+		"gRPC server started",
+		zap.String("address", srv.address),
+		zap.String("listener-address", srv.ln.Addr().String()),
+	)
+	err = srv.grpcServer.Serve(srv.ln)
+	if err != nil && strings.Contains(err.Error(), "use of closed network connection") {
+		srv.logger.Info(
+			"gRPC server is shut down",
+			zap.String("address", srv.address),
+			zap.Error(err),
+		)
+	} else {
+		srv.logger.Warn(
+			"gRPC server returned with error",
+			zap.String("address", srv.address),
+			zap.Error(err),
+		)
+	}
+
+	return err
+}
+
+// Stop stops serving gRPC server.
+func (srv *Server) Stop() {
+	srv.logger.Info("gRPC server stopping", zap.String("address", srv.address))
+	srv.grpcServer.Stop()
+	srv.logger.Info("gRPC server stopped", zap.String("address", srv.address))
+}
+
+// Transport communicates with etcd tester.
+func (srv *Server) Transport(stream rpcpb.Transport_TransportServer) (err error) {
+	errc := make(chan error)
+	go func() {
+		for {
+			req, err := stream.Recv()
+			if err != nil {
+				errc <- err
+				// TODO: handle error and retry
+				return
+			}
+			if req.Member != nil {
+				srv.Member = req.Member
+			}
+			if req.Tester != nil {
+				srv.Tester = req.Tester
+			}
+
+			var resp *rpcpb.Response
+			resp, err = srv.handleTesterRequest(req)
+			if err != nil {
+				errc <- err
+				// TODO: handle error and retry
+				return
+			}
+
+			if err = stream.Send(resp); err != nil {
+				errc <- err
+				// TODO: handle error and retry
+				return
+			}
+		}
+	}()
+
+	select {
+	case err = <-errc:
+	case <-stream.Context().Done():
+		err = stream.Context().Err()
+	}
+	return err
+}

+ 110 - 0
tools/functional-tester/agent/utils.go

@@ -0,0 +1,110 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package agent
+
+import (
+	"net"
+	"net/url"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strconv"
+	"time"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+)
+
+// TODO: support separate WAL directory
+func archive(baseDir, etcdLogPath, dataDir string) error {
+	dir := filepath.Join(baseDir, "etcd-failure-archive", time.Now().Format(time.RFC3339))
+	if existDir(dir) {
+		dir = filepath.Join(baseDir, "etcd-failure-archive", time.Now().Add(time.Second).Format(time.RFC3339))
+	}
+	if err := fileutil.TouchDirAll(dir); err != nil {
+		return err
+	}
+
+	if err := os.Rename(etcdLogPath, filepath.Join(dir, "etcd.log")); err != nil {
+		if !os.IsNotExist(err) {
+			return err
+		}
+	}
+	if err := os.Rename(dataDir, filepath.Join(dir, filepath.Base(dataDir))); err != nil {
+		if !os.IsNotExist(err) {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func existDir(fpath string) bool {
+	st, err := os.Stat(fpath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return false
+		}
+	} else {
+		return st.IsDir()
+	}
+	return false
+}
+
+func getURLAndPort(addr string) (urlAddr *url.URL, port int, err error) {
+	urlAddr, err = url.Parse(addr)
+	if err != nil {
+		return nil, -1, err
+	}
+	var s string
+	_, s, err = net.SplitHostPort(urlAddr.Host)
+	if err != nil {
+		return nil, -1, err
+	}
+	port, err = strconv.Atoi(s)
+	if err != nil {
+		return nil, -1, err
+	}
+	return urlAddr, port, err
+}
+
+func stopWithSig(cmd *exec.Cmd, sig os.Signal) error {
+	err := cmd.Process.Signal(sig)
+	if err != nil {
+		return err
+	}
+
+	errc := make(chan error)
+	go func() {
+		_, ew := cmd.Process.Wait()
+		errc <- ew
+		close(errc)
+	}()
+
+	select {
+	case <-time.After(5 * time.Second):
+		cmd.Process.Kill()
+	case e := <-errc:
+		return e
+	}
+	err = <-errc
+	return err
+}
+
+func cleanPageCache() error {
+	// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+	// https://github.com/torvalds/linux/blob/master/fs/drop_caches.c
+	cmd := exec.Command("/bin/sh", "-c", `echo "echo 1 > /proc/sys/vm/drop_caches" | sudo sh`)
+	return cmd.Run()
+}

+ 36 - 0
tools/functional-tester/agent/utils_test.go

@@ -0,0 +1,36 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package agent
+
+import (
+	"net/url"
+	"reflect"
+	"testing"
+)
+
+func TestGetURLAndPort(t *testing.T) {
+	addr := "https://127.0.0.1:2379"
+	urlAddr, port, err := getURLAndPort(addr)
+	if err != nil {
+		t.Fatal(err)
+	}
+	exp := &url.URL{Scheme: "https", Host: "127.0.0.1:2379"}
+	if !reflect.DeepEqual(urlAddr, exp) {
+		t.Fatalf("expected %+v, got %+v", exp, urlAddr)
+	}
+	if port != 2379 {
+		t.Fatalf("port expected 2379, got %d", port)
+	}
+}

+ 3 - 4
tools/functional-tester/build

@@ -5,7 +5,6 @@ if ! [[ "$0" =~ "tools/functional-tester/build" ]]; then
 	exit 255
 fi
 
-CGO_ENABLED=0 go build -a -installsuffix cgo -ldflags "-s" -o bin/etcd-agent ./tools/functional-tester/etcd-agent
-CGO_ENABLED=0 go build -a -installsuffix cgo -ldflags "-s" -o bin/etcd-tester ./tools/functional-tester/etcd-tester
-CGO_ENABLED=0 go build -a -installsuffix cgo -ldflags "-s" -o bin/etcd-runner ./tools/functional-tester/etcd-runner
-
+CGO_ENABLED=0 go build -a -installsuffix cgo -ldflags "-s" -o bin/etcd-agent ./tools/functional-tester/cmd/etcd-agent
+CGO_ENABLED=0 go build -a -installsuffix cgo -ldflags "-s" -o bin/etcd-tester ./tools/functional-tester/cmd/etcd-tester
+CGO_ENABLED=0 go build -a -installsuffix cgo -ldflags "-s" -o bin/etcd-runner ./tools/functional-tester/cmd/etcd-runner

+ 46 - 0
tools/functional-tester/cmd/etcd-agent/main.go

@@ -0,0 +1,46 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// etcd-agent is a program that runs functional-tester agent.
+package main
+
+import (
+	"flag"
+
+	"github.com/coreos/etcd/tools/functional-tester/agent"
+
+	"go.uber.org/zap"
+)
+
+var logger *zap.Logger
+
+func init() {
+	var err error
+	logger, err = zap.NewProduction()
+	if err != nil {
+		panic(err)
+	}
+}
+
+func main() {
+	network := flag.String("network", "tcp", "network to serve agent server")
+	address := flag.String("address", "127.0.0.1:9027", "address to serve agent server")
+	flag.Parse()
+
+	defer logger.Sync()
+
+	srv := agent.NewServer(logger, *network, *address)
+	err := srv.StartServe()
+	logger.Info("agent exiting", zap.Error(err))
+}

+ 6 - 0
tools/functional-tester/etcd-runner/doc.go → tools/functional-tester/cmd/etcd-runner/main.go

@@ -14,3 +14,9 @@
 
 // etcd-runner is a program for testing etcd clientv3 features against a fault injected cluster.
 package main
+
+import "github.com/coreos/etcd/tools/functional-tester/runner"
+
+func main() {
+	runner.Start()
+}

+ 59 - 0
tools/functional-tester/cmd/etcd-tester/main.go

@@ -0,0 +1,59 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// etcd-tester is a program that runs functional-tester client.
+package main
+
+import (
+	"flag"
+
+	"github.com/coreos/etcd/tools/functional-tester/tester"
+
+	"go.uber.org/zap"
+)
+
+var logger *zap.Logger
+
+func init() {
+	var err error
+	logger, err = zap.NewProduction()
+	if err != nil {
+		panic(err)
+	}
+}
+
+func main() {
+	config := flag.String("config", "", "path to tester configuration")
+	flag.Parse()
+
+	defer logger.Sync()
+
+	clus, err := tester.NewCluster(logger, *config)
+	if err != nil {
+		logger.Fatal("failed to create a cluster", zap.Error(err))
+	}
+
+	err = clus.Bootstrap()
+	if err != nil {
+		logger.Fatal("Bootstrap failed", zap.Error(err))
+	}
+	defer clus.DestroyEtcdAgents()
+
+	err = clus.WaitHealth()
+	if err != nil {
+		logger.Fatal("WaitHealth failed", zap.Error(err))
+	}
+
+	clus.StartTester()
+}

+ 0 - 372
tools/functional-tester/etcd-agent/agent.go

@@ -1,372 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"fmt"
-	"net"
-	"net/url"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"strconv"
-	"sync"
-	"syscall"
-	"time"
-
-	"github.com/coreos/etcd/pkg/fileutil"
-	"github.com/coreos/etcd/pkg/transport"
-	"github.com/coreos/etcd/tools/functional-tester/etcd-agent/client"
-)
-
-const (
-	stateUninitialized = "uninitialized"
-	stateStarted       = "started"
-	stateStopped       = "stopped"
-	stateTerminated    = "terminated"
-)
-
-type Agent struct {
-	state string // the state of etcd process
-
-	cmd     *exec.Cmd
-	logfile *os.File
-
-	cfg AgentConfig
-
-	pmu                  sync.Mutex
-	advertisePortToProxy map[int]transport.Proxy
-}
-
-type AgentConfig struct {
-	EtcdPath      string
-	LogDir        string
-	FailpointAddr string
-}
-
-func newAgent(cfg AgentConfig) (*Agent, error) {
-	// check if the file exists
-	_, err := os.Stat(cfg.EtcdPath)
-	if err != nil {
-		return nil, err
-	}
-
-	c := exec.Command(cfg.EtcdPath)
-
-	err = fileutil.TouchDirAll(cfg.LogDir)
-	if err != nil {
-		return nil, err
-	}
-
-	var f *os.File
-	f, err = os.Create(filepath.Join(cfg.LogDir, "etcd.log"))
-	if err != nil {
-		return nil, err
-	}
-
-	return &Agent{
-		state:                stateUninitialized,
-		cmd:                  c,
-		logfile:              f,
-		cfg:                  cfg,
-		advertisePortToProxy: make(map[int]transport.Proxy),
-	}, nil
-}
-
-// start starts a new etcd process with the given args.
-func (a *Agent) start(args ...string) error {
-	args = append(args, "--data-dir", a.dataDir())
-	a.cmd = exec.Command(a.cmd.Path, args...)
-	a.cmd.Env = []string{"GOFAIL_HTTP=" + a.cfg.FailpointAddr}
-	a.cmd.Stdout = a.logfile
-	a.cmd.Stderr = a.logfile
-	err := a.cmd.Start()
-	if err != nil {
-		return err
-	}
-
-	a.state = stateStarted
-
-	a.pmu.Lock()
-	defer a.pmu.Unlock()
-	if len(a.advertisePortToProxy) == 0 {
-		// enough time for etcd start before setting up proxy
-		time.Sleep(time.Second)
-		var (
-			err                    error
-			s                      string
-			listenClientURL        *url.URL
-			advertiseClientURL     *url.URL
-			advertiseClientURLPort int
-			listenPeerURL          *url.URL
-			advertisePeerURL       *url.URL
-			advertisePeerURLPort   int
-		)
-		for i := range args {
-			switch args[i] {
-			case "--listen-client-urls":
-				listenClientURL, err = url.Parse(args[i+1])
-				if err != nil {
-					return err
-				}
-			case "--advertise-client-urls":
-				advertiseClientURL, err = url.Parse(args[i+1])
-				if err != nil {
-					return err
-				}
-				_, s, err = net.SplitHostPort(advertiseClientURL.Host)
-				if err != nil {
-					return err
-				}
-				advertiseClientURLPort, err = strconv.Atoi(s)
-				if err != nil {
-					return err
-				}
-			case "--listen-peer-urls":
-				listenPeerURL, err = url.Parse(args[i+1])
-				if err != nil {
-					return err
-				}
-			case "--initial-advertise-peer-urls":
-				advertisePeerURL, err = url.Parse(args[i+1])
-				if err != nil {
-					return err
-				}
-				_, s, err = net.SplitHostPort(advertisePeerURL.Host)
-				if err != nil {
-					return err
-				}
-				advertisePeerURLPort, err = strconv.Atoi(s)
-				if err != nil {
-					return err
-				}
-			}
-		}
-
-		clientProxy := transport.NewProxy(transport.ProxyConfig{
-			From: *advertiseClientURL,
-			To:   *listenClientURL,
-		})
-		select {
-		case err = <-clientProxy.Error():
-			return err
-		case <-time.After(time.Second):
-		}
-		a.advertisePortToProxy[advertiseClientURLPort] = clientProxy
-
-		peerProxy := transport.NewProxy(transport.ProxyConfig{
-			From: *advertisePeerURL,
-			To:   *listenPeerURL,
-		})
-		select {
-		case err = <-peerProxy.Error():
-			return err
-		case <-time.After(time.Second):
-		}
-		a.advertisePortToProxy[advertisePeerURLPort] = peerProxy
-	}
-	return nil
-}
-
-// stop stops the existing etcd process the agent started.
-func (a *Agent) stopWithSig(sig os.Signal) error {
-	if a.state != stateStarted {
-		return nil
-	}
-
-	a.pmu.Lock()
-	if len(a.advertisePortToProxy) > 0 {
-		for _, p := range a.advertisePortToProxy {
-			if err := p.Close(); err != nil {
-				a.pmu.Unlock()
-				return err
-			}
-			select {
-			case <-p.Done():
-				// enough time to release port
-				time.Sleep(time.Second)
-			case <-time.After(time.Second):
-			}
-		}
-		a.advertisePortToProxy = make(map[int]transport.Proxy)
-	}
-	a.pmu.Unlock()
-
-	err := stopWithSig(a.cmd, sig)
-	if err != nil {
-		return err
-	}
-
-	a.state = stateStopped
-	return nil
-}
-
-func stopWithSig(cmd *exec.Cmd, sig os.Signal) error {
-	err := cmd.Process.Signal(sig)
-	if err != nil {
-		return err
-	}
-
-	errc := make(chan error)
-	go func() {
-		_, ew := cmd.Process.Wait()
-		errc <- ew
-		close(errc)
-	}()
-
-	select {
-	case <-time.After(5 * time.Second):
-		cmd.Process.Kill()
-	case e := <-errc:
-		return e
-	}
-	err = <-errc
-	return err
-}
-
-// restart restarts the stopped etcd process.
-func (a *Agent) restart() error {
-	return a.start(a.cmd.Args[1:]...)
-}
-
-func (a *Agent) cleanup() error {
-	// exit with stackstrace
-	if err := a.stopWithSig(syscall.SIGQUIT); err != nil {
-		return err
-	}
-	a.state = stateUninitialized
-
-	a.logfile.Close()
-	if err := archiveLogAndDataDir(a.cfg.LogDir, a.dataDir()); err != nil {
-		return err
-	}
-
-	if err := fileutil.TouchDirAll(a.cfg.LogDir); err != nil {
-		return err
-	}
-
-	f, err := os.Create(filepath.Join(a.cfg.LogDir, "etcd.log"))
-	if err != nil {
-		return err
-	}
-	a.logfile = f
-
-	// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
-	// https://github.com/torvalds/linux/blob/master/fs/drop_caches.c
-	cmd := exec.Command("/bin/sh", "-c", `echo "echo 1 > /proc/sys/vm/drop_caches" | sudo sh`)
-	if err := cmd.Run(); err != nil {
-		plog.Infof("error when cleaning page cache (%v)", err)
-	}
-	return nil
-}
-
-// terminate stops the exiting etcd process the agent started
-// and removes the data dir.
-func (a *Agent) terminate() error {
-	err := a.stopWithSig(syscall.SIGTERM)
-	if err != nil {
-		return err
-	}
-	err = os.RemoveAll(a.dataDir())
-	if err != nil {
-		return err
-	}
-	a.state = stateTerminated
-	return nil
-}
-
-func (a *Agent) dropPort(port int) error {
-	a.pmu.Lock()
-	defer a.pmu.Unlock()
-
-	p, ok := a.advertisePortToProxy[port]
-	if !ok {
-		return fmt.Errorf("%d does not have proxy", port)
-	}
-	p.BlackholeTx()
-	p.BlackholeRx()
-	return nil
-}
-
-func (a *Agent) recoverPort(port int) error {
-	a.pmu.Lock()
-	defer a.pmu.Unlock()
-
-	p, ok := a.advertisePortToProxy[port]
-	if !ok {
-		return fmt.Errorf("%d does not have proxy", port)
-	}
-	p.UnblackholeTx()
-	p.UnblackholeRx()
-	return nil
-}
-
-func (a *Agent) setLatency(ms, rv int) error {
-	a.pmu.Lock()
-	defer a.pmu.Unlock()
-
-	if ms == 0 {
-		for _, p := range a.advertisePortToProxy {
-			p.UndelayTx()
-			p.UndelayRx()
-		}
-	}
-	for _, p := range a.advertisePortToProxy {
-		p.DelayTx(time.Duration(ms)*time.Millisecond, time.Duration(rv)*time.Millisecond)
-		p.DelayRx(time.Duration(ms)*time.Millisecond, time.Duration(rv)*time.Millisecond)
-	}
-	return nil
-}
-
-func (a *Agent) status() client.Status {
-	return client.Status{State: a.state}
-}
-
-func (a *Agent) dataDir() string {
-	return filepath.Join(a.cfg.LogDir, "etcd.data")
-}
-
-func existDir(fpath string) bool {
-	st, err := os.Stat(fpath)
-	if err != nil {
-		if os.IsNotExist(err) {
-			return false
-		}
-	} else {
-		return st.IsDir()
-	}
-	return false
-}
-
-func archiveLogAndDataDir(logDir string, datadir string) error {
-	dir := filepath.Join(logDir, "failure_archive", time.Now().Format(time.RFC3339))
-	if existDir(dir) {
-		dir = filepath.Join(logDir, "failure_archive", time.Now().Add(time.Second).Format(time.RFC3339))
-	}
-	if err := fileutil.TouchDirAll(dir); err != nil {
-		return err
-	}
-	if err := os.Rename(filepath.Join(logDir, "etcd.log"), filepath.Join(dir, "etcd.log")); err != nil {
-		if !os.IsNotExist(err) {
-			return err
-		}
-	}
-	if err := os.Rename(datadir, filepath.Join(dir, filepath.Base(datadir))); err != nil {
-		if !os.IsNotExist(err) {
-			return err
-		}
-	}
-	return nil
-}

+ 0 - 87
tools/functional-tester/etcd-agent/agent_test.go

@@ -1,87 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"os"
-	"path/filepath"
-	"syscall"
-	"testing"
-)
-
-var etcdPath = filepath.Join(os.Getenv("GOPATH"), "bin/etcd")
-
-func TestAgentStart(t *testing.T) {
-	defer os.Remove("etcd.log")
-
-	a := newTestAgent(t)
-	defer a.terminate()
-
-	err := a.start()
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestAgentRestart(t *testing.T) {
-	defer os.Remove("etcd.log")
-
-	a := newTestAgent(t)
-	defer a.terminate()
-
-	err := a.start()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = a.stopWithSig(syscall.SIGTERM)
-	if err != nil {
-		t.Fatal(err)
-	}
-	err = a.restart()
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestAgentTerminate(t *testing.T) {
-	defer os.Remove("etcd.log")
-
-	a := newTestAgent(t)
-
-	err := a.start()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = a.terminate()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if _, err := os.Stat(a.dataDir()); !os.IsNotExist(err) {
-		t.Fatal(err)
-	}
-}
-
-// newTestAgent creates a test agent
-func newTestAgent(t *testing.T) *Agent {
-	a, err := newAgent(AgentConfig{EtcdPath: etcdPath, LogDir: "etcd.log"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	return a
-}

+ 0 - 118
tools/functional-tester/etcd-agent/client/client.go

@@ -1,118 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import "net/rpc"
-
-type Status struct {
-	// State gives the human-readable status of an agent (e.g., "started" or "terminated")
-	State string
-
-	// TODO: gather more informations
-	// TODO: memory usage, raft information, etc..
-}
-
-type Agent interface {
-	ID() uint64
-	// Start starts a new etcd with the given args on the agent machine.
-	Start(args ...string) (int, error)
-	// Stop stops the existing etcd the agent started.
-	Stop() error
-	// Restart restarts the existing etcd the agent stopped.
-	Restart() (int, error)
-	// Cleanup stops the exiting etcd the agent started, then archives log and its data dir.
-	Cleanup() error
-	// Terminate stops the exiting etcd the agent started and removes its data dir.
-	Terminate() error
-	// DropPort drops all network packets at the given port.
-	DropPort(port int) error
-	// RecoverPort stops dropping all network packets at the given port.
-	RecoverPort(port int) error
-	// SetLatency slows down network by introducing latency.
-	SetLatency(ms, rv int) error
-	// RemoveLatency removes latency introduced by SetLatency.
-	RemoveLatency() error
-	// Status returns the status of etcd on the agent
-	Status() (Status, error)
-}
-
-type agent struct {
-	endpoint  string
-	rpcClient *rpc.Client
-}
-
-func NewAgent(endpoint string) (Agent, error) {
-	c, err := rpc.DialHTTP("tcp", endpoint)
-	if err != nil {
-		return nil, err
-	}
-	return &agent{endpoint, c}, nil
-}
-
-func (a *agent) Start(args ...string) (int, error) {
-	var pid int
-	err := a.rpcClient.Call("Agent.RPCStart", args, &pid)
-	if err != nil {
-		return -1, err
-	}
-	return pid, nil
-}
-
-func (a *agent) Stop() error {
-	return a.rpcClient.Call("Agent.RPCStop", struct{}{}, nil)
-}
-
-func (a *agent) Restart() (int, error) {
-	var pid int
-	err := a.rpcClient.Call("Agent.RPCRestart", struct{}{}, &pid)
-	if err != nil {
-		return -1, err
-	}
-	return pid, nil
-}
-
-func (a *agent) Cleanup() error {
-	return a.rpcClient.Call("Agent.RPCCleanup", struct{}{}, nil)
-}
-
-func (a *agent) Terminate() error {
-	return a.rpcClient.Call("Agent.RPCTerminate", struct{}{}, nil)
-}
-
-func (a *agent) DropPort(port int) error {
-	return a.rpcClient.Call("Agent.RPCDropPort", port, nil)
-}
-
-func (a *agent) RecoverPort(port int) error {
-	return a.rpcClient.Call("Agent.RPCRecoverPort", port, nil)
-}
-
-func (a *agent) SetLatency(ms, rv int) error {
-	return a.rpcClient.Call("Agent.RPCSetLatency", []int{ms, rv}, nil)
-}
-
-func (a *agent) RemoveLatency() error {
-	return a.rpcClient.Call("Agent.RPCRemoveLatency", struct{}{}, nil)
-}
-
-func (a *agent) Status() (Status, error) {
-	var s Status
-	err := a.rpcClient.Call("Agent.RPCStatus", struct{}{}, &s)
-	return s, err
-}
-
-func (a *agent) ID() uint64 {
-	panic("not implemented")
-}

+ 0 - 47
tools/functional-tester/etcd-agent/main.go

@@ -1,47 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"flag"
-	"os"
-	"path/filepath"
-
-	"github.com/coreos/pkg/capnslog"
-)
-
-var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcd-agent")
-
-func main() {
-	etcdPath := flag.String("etcd-path", filepath.Join(os.Getenv("GOPATH"), "bin/etcd"), "the path to etcd binary")
-	etcdLogDir := flag.String("etcd-log-dir", "etcd-log", "directory to store etcd logs, data directories, failure archive")
-	port := flag.String("port", ":9027", "port to serve agent server")
-	failpointAddr := flag.String("failpoint-addr", ":2381", "interface for gofail's HTTP server")
-	flag.Parse()
-
-	cfg := AgentConfig{
-		EtcdPath:      *etcdPath,
-		LogDir:        *etcdLogDir,
-		FailpointAddr: *failpointAddr,
-	}
-	a, err := newAgent(cfg)
-	if err != nil {
-		plog.Fatal(err)
-	}
-	a.serveRPC(*port)
-
-	var done chan struct{}
-	<-done
-}

+ 0 - 131
tools/functional-tester/etcd-agent/rpc.go

@@ -1,131 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"fmt"
-	"net"
-	"net/http"
-	"net/rpc"
-	"syscall"
-
-	"github.com/coreos/etcd/tools/functional-tester/etcd-agent/client"
-)
-
-func (a *Agent) serveRPC(port string) {
-	rpc.Register(a)
-	rpc.HandleHTTP()
-	l, e := net.Listen("tcp", port)
-	if e != nil {
-		plog.Fatal(e)
-	}
-	plog.Println("agent listening on", port)
-	go http.Serve(l, nil)
-}
-
-func (a *Agent) RPCStart(args []string, pid *int) error {
-	plog.Printf("start etcd with args %v", args)
-	err := a.start(args...)
-	if err != nil {
-		plog.Println("error starting etcd", err)
-		return err
-	}
-	*pid = a.cmd.Process.Pid
-	return nil
-}
-
-func (a *Agent) RPCStop(args struct{}, reply *struct{}) error {
-	plog.Printf("stop etcd")
-	err := a.stopWithSig(syscall.SIGTERM)
-	if err != nil {
-		plog.Println("error stopping etcd", err)
-		return err
-	}
-	return nil
-}
-
-func (a *Agent) RPCRestart(args struct{}, pid *int) error {
-	plog.Printf("restart etcd")
-	err := a.restart()
-	if err != nil {
-		plog.Println("error restarting etcd", err)
-		return err
-	}
-	*pid = a.cmd.Process.Pid
-	return nil
-}
-
-func (a *Agent) RPCCleanup(args struct{}, reply *struct{}) error {
-	plog.Printf("cleanup etcd")
-	err := a.cleanup()
-	if err != nil {
-		plog.Println("error cleaning up etcd", err)
-		return err
-	}
-	return nil
-}
-
-func (a *Agent) RPCTerminate(args struct{}, reply *struct{}) error {
-	plog.Printf("terminate etcd")
-	err := a.terminate()
-	if err != nil {
-		plog.Println("error terminating etcd", err)
-	}
-	return nil
-}
-
-func (a *Agent) RPCDropPort(port int, reply *struct{}) error {
-	plog.Printf("drop port %d", port)
-	err := a.dropPort(port)
-	if err != nil {
-		plog.Println("error dropping port", err)
-	}
-	return nil
-}
-
-func (a *Agent) RPCRecoverPort(port int, reply *struct{}) error {
-	plog.Printf("recover port %d", port)
-	err := a.recoverPort(port)
-	if err != nil {
-		plog.Println("error recovering port", err)
-	}
-	return nil
-}
-
-func (a *Agent) RPCSetLatency(args []int, reply *struct{}) error {
-	if len(args) != 2 {
-		return fmt.Errorf("SetLatency needs two args, got (%v)", args)
-	}
-	plog.Printf("set latency of %dms (+/- %dms)", args[0], args[1])
-	err := a.setLatency(args[0], args[1])
-	if err != nil {
-		plog.Println("error setting latency", err)
-	}
-	return nil
-}
-
-func (a *Agent) RPCRemoveLatency(args struct{}, reply *struct{}) error {
-	plog.Println("removing latency")
-	err := a.setLatency(0, 0)
-	if err != nil {
-		plog.Println("error removing latency")
-	}
-	return nil
-}
-
-func (a *Agent) RPCStatus(args struct{}, status *client.Status) error {
-	*status = a.status()
-	return nil
-}

+ 0 - 166
tools/functional-tester/etcd-agent/rpc_test.go

@@ -1,166 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"io/ioutil"
-	"log"
-	"net/rpc"
-	"os"
-	"testing"
-
-	"github.com/coreos/etcd/tools/functional-tester/etcd-agent/client"
-)
-
-func init() {
-	defaultAgent, err := newAgent(AgentConfig{EtcdPath: etcdPath, LogDir: "etcd.log"})
-	if err != nil {
-		log.Panic(err)
-	}
-	defaultAgent.serveRPC(":9027")
-}
-
-func TestRPCStart(t *testing.T) {
-	c, err := rpc.DialHTTP("tcp", ":9027")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	dir, err := ioutil.TempDir(os.TempDir(), "etcd-agent")
-	if err != nil {
-		t.Fatal(err)
-	}
-	var pid int
-	err = c.Call("Agent.RPCStart", []string{"--data-dir", dir}, &pid)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer c.Call("Agent.RPCTerminate", struct{}{}, nil)
-
-	_, err = os.FindProcess(pid)
-	if err != nil {
-		t.Errorf("unexpected error %v when find process %d", err, pid)
-	}
-}
-
-func TestRPCRestart(t *testing.T) {
-	c, err := rpc.DialHTTP("tcp", ":9027")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	dir, err := ioutil.TempDir(os.TempDir(), "etcd-agent")
-	if err != nil {
-		t.Fatal(err)
-	}
-	var pid int
-	err = c.Call("Agent.RPCStart", []string{"--data-dir", dir}, &pid)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer c.Call("Agent.RPCTerminate", struct{}{}, nil)
-
-	err = c.Call("Agent.RPCStop", struct{}{}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	var npid int
-	err = c.Call("Agent.RPCRestart", struct{}{}, &npid)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if npid == pid {
-		t.Errorf("pid = %v, want not equal to %d", npid, pid)
-	}
-
-	s, err := os.FindProcess(pid)
-	if err != nil {
-		t.Errorf("unexpected error %v when find process %d", err, pid)
-	}
-	_, err = s.Wait()
-	if err == nil {
-		t.Errorf("err = nil, want killed error")
-	}
-	_, err = os.FindProcess(npid)
-	if err != nil {
-		t.Errorf("unexpected error %v when find process %d", err, npid)
-	}
-}
-
-func TestRPCTerminate(t *testing.T) {
-	c, err := rpc.DialHTTP("tcp", ":9027")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	dir, err := ioutil.TempDir(os.TempDir(), "etcd-agent")
-	if err != nil {
-		t.Fatal(err)
-	}
-	var pid int
-	err = c.Call("Agent.RPCStart", []string{"--data-dir", dir}, &pid)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = c.Call("Agent.RPCTerminate", struct{}{}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if _, err := os.Stat(dir); !os.IsNotExist(err) {
-		t.Fatal(err)
-	}
-}
-
-func TestRPCStatus(t *testing.T) {
-	c, err := rpc.DialHTTP("tcp", ":9027")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	var s client.Status
-	err = c.Call("Agent.RPCStatus", struct{}{}, &s)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if s.State != stateTerminated {
-		t.Errorf("state = %s, want %s", s.State, stateTerminated)
-	}
-
-	dir, err := ioutil.TempDir(os.TempDir(), "etcd-agent")
-	if err != nil {
-		t.Fatal(err)
-	}
-	var pid int
-	err = c.Call("Agent.RPCStart", []string{"--data-dir", dir}, &pid)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = c.Call("Agent.RPCStatus", struct{}{}, &s)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if s.State != stateStarted {
-		t.Errorf("state = %s, want %s", s.State, stateStarted)
-	}
-
-	err = c.Call("Agent.RPCTerminate", struct{}{}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-}

+ 0 - 261
tools/functional-tester/etcd-tester/cluster.go

@@ -1,261 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"context"
-	"fmt"
-	"math/rand"
-	"net"
-	"strings"
-	"time"
-
-	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-	"github.com/coreos/etcd/tools/functional-tester/etcd-agent/client"
-
-	"google.golang.org/grpc"
-)
-
-// agentConfig holds information needed to interact/configure an agent and its etcd process
-type agentConfig struct {
-	endpoint            string
-	clientPort          int
-	advertiseClientPort int
-	peerPort            int
-	advertisePeerPort   int
-	failpointPort       int
-}
-
-type cluster struct {
-	agents  []agentConfig
-	Size    int
-	Members []*member
-}
-
-type ClusterStatus struct {
-	AgentStatuses map[string]client.Status
-}
-
-func (c *cluster) bootstrap() error {
-	size := len(c.agents)
-
-	members := make([]*member, size)
-	memberNameURLs := make([]string, size)
-	for i, a := range c.agents {
-		agent, err := client.NewAgent(a.endpoint)
-		if err != nil {
-			return err
-		}
-		host, _, err := net.SplitHostPort(a.endpoint)
-		if err != nil {
-			return err
-		}
-		members[i] = &member{
-			Agent:              agent,
-			Endpoint:           a.endpoint,
-			Name:               fmt.Sprintf("etcd-%d", i),
-			ClientURL:          fmt.Sprintf("http://%s:%d", host, a.clientPort),
-			AdvertiseClientURL: fmt.Sprintf("http://%s:%d", host, a.advertiseClientPort),
-			PeerURL:            fmt.Sprintf("http://%s:%d", host, a.peerPort),
-			AdvertisePeerURL:   fmt.Sprintf("http://%s:%d", host, a.advertisePeerPort),
-			FailpointURL:       fmt.Sprintf("http://%s:%d", host, a.failpointPort),
-		}
-		memberNameURLs[i] = members[i].ClusterEntry()
-	}
-	clusterStr := strings.Join(memberNameURLs, ",")
-	token := fmt.Sprint(rand.Int())
-
-	for i, m := range members {
-		flags := append(
-			m.Flags(),
-			"--initial-cluster-token", token,
-			"--initial-cluster", clusterStr,
-		)
-
-		if _, err := m.Agent.Start(flags...); err != nil {
-			// cleanup
-			for _, m := range members[:i] {
-				m.Agent.Terminate()
-			}
-			return err
-		}
-	}
-
-	c.Size = size
-	c.Members = members
-	return nil
-}
-
-func (c *cluster) Reset() error { return c.bootstrap() }
-
-func (c *cluster) WaitHealth() error {
-	var err error
-	// wait 60s to check cluster health.
-	// TODO: set it to a reasonable value. It is set that high because
-	// follower may use long time to catch up the leader when reboot under
-	// reasonable workload (https://github.com/coreos/etcd/issues/2698)
-	for i := 0; i < 60; i++ {
-		for _, m := range c.Members {
-			if err = m.SetHealthKeyV3(); err != nil {
-				break
-			}
-		}
-		if err == nil {
-			return nil
-		}
-		plog.Warningf("#%d setHealthKey error (%v)", i, err)
-		time.Sleep(time.Second)
-	}
-	return err
-}
-
-// GetLeader returns the index of leader and error if any.
-func (c *cluster) GetLeader() (int, error) {
-	for i, m := range c.Members {
-		isLeader, err := m.IsLeader()
-		if isLeader || err != nil {
-			return i, err
-		}
-	}
-	return 0, fmt.Errorf("no leader found")
-}
-
-func (c *cluster) Cleanup() error {
-	var lasterr error
-	for _, m := range c.Members {
-		if err := m.Agent.Cleanup(); err != nil {
-			lasterr = err
-		}
-	}
-	return lasterr
-}
-
-func (c *cluster) Terminate() {
-	for _, m := range c.Members {
-		m.Agent.Terminate()
-	}
-}
-
-func (c *cluster) Status() ClusterStatus {
-	cs := ClusterStatus{
-		AgentStatuses: make(map[string]client.Status),
-	}
-
-	for _, m := range c.Members {
-		s, err := m.Agent.Status()
-		// TODO: add a.Desc() as a key of the map
-		desc := m.Endpoint
-		if err != nil {
-			cs.AgentStatuses[desc] = client.Status{State: "unknown"}
-			plog.Printf("failed to get the status of agent [%s]", desc)
-		}
-		cs.AgentStatuses[desc] = s
-	}
-	return cs
-}
-
-// maxRev returns the maximum revision found on the cluster.
-func (c *cluster) maxRev() (rev int64, err error) {
-	ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
-	defer cancel()
-	revc, errc := make(chan int64, len(c.Members)), make(chan error, len(c.Members))
-	for i := range c.Members {
-		go func(m *member) {
-			mrev, merr := m.Rev(ctx)
-			revc <- mrev
-			errc <- merr
-		}(c.Members[i])
-	}
-	for i := 0; i < len(c.Members); i++ {
-		if merr := <-errc; merr != nil {
-			err = merr
-		}
-		if mrev := <-revc; mrev > rev {
-			rev = mrev
-		}
-	}
-	return rev, err
-}
-
-func (c *cluster) getRevisionHash() (map[string]int64, map[string]int64, error) {
-	revs := make(map[string]int64)
-	hashes := make(map[string]int64)
-	for _, m := range c.Members {
-		rev, hash, err := m.RevHash()
-		if err != nil {
-			return nil, nil, err
-		}
-		revs[m.ClientURL] = rev
-		hashes[m.ClientURL] = hash
-	}
-	return revs, hashes, nil
-}
-
-func (c *cluster) compactKV(rev int64, timeout time.Duration) (err error) {
-	if rev <= 0 {
-		return nil
-	}
-
-	for i, m := range c.Members {
-		u := m.ClientURL
-		conn, derr := m.dialGRPC()
-		if derr != nil {
-			plog.Printf("[compact kv #%d] dial error %v (endpoint %s)", i, derr, u)
-			err = derr
-			continue
-		}
-		kvc := pb.NewKVClient(conn)
-		ctx, cancel := context.WithTimeout(context.Background(), timeout)
-		plog.Printf("[compact kv #%d] starting (endpoint %s)", i, u)
-		_, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true}, grpc.FailFast(false))
-		cancel()
-		conn.Close()
-		succeed := true
-		if cerr != nil {
-			if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
-				plog.Printf("[compact kv #%d] already compacted (endpoint %s)", i, u)
-			} else {
-				plog.Warningf("[compact kv #%d] error %v (endpoint %s)", i, cerr, u)
-				err = cerr
-				succeed = false
-			}
-		}
-		if succeed {
-			plog.Printf("[compact kv #%d] done (endpoint %s)", i, u)
-		}
-	}
-	return err
-}
-
-func (c *cluster) checkCompact(rev int64) error {
-	if rev == 0 {
-		return nil
-	}
-	for _, m := range c.Members {
-		if err := m.CheckCompact(rev); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (c *cluster) defrag() error {
-	for _, m := range c.Members {
-		if err := m.Defrag(); err != nil {
-			return err
-		}
-	}
-	return nil
-}

+ 0 - 16
tools/functional-tester/etcd-tester/doc.go

@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// etcd-tester is a single controller for all etcd-agents to manage an etcd cluster and simulate failures.
-package main

+ 0 - 205
tools/functional-tester/etcd-tester/failure.go

@@ -1,205 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"fmt"
-	"math/rand"
-	"os/exec"
-	"time"
-)
-
-type failure interface {
-	// Inject injeccts the failure into the testing cluster at the given
-	// round. When calling the function, the cluster should be in health.
-	Inject(c *cluster, round int) error
-	// Recover recovers the injected failure caused by the injection of the
-	// given round and wait for the recovery of the testing cluster.
-	Recover(c *cluster, round int) error
-	// Desc returns a description of the failure
-	Desc() string
-}
-
-type description string
-
-func (d description) Desc() string { return string(d) }
-
-type injectMemberFunc func(*member) error
-type recoverMemberFunc func(*member) error
-
-type failureByFunc struct {
-	description
-	injectMember  injectMemberFunc
-	recoverMember recoverMemberFunc
-}
-
-type failureOne failureByFunc
-type failureAll failureByFunc
-type failureMajority failureByFunc
-type failureLeader struct {
-	failureByFunc
-	idx int
-}
-
-type failureDelay struct {
-	failure
-	delayDuration time.Duration
-}
-
-// failureUntilSnapshot injects a failure and waits for a snapshot event
-type failureUntilSnapshot struct{ failure }
-
-func (f *failureOne) Inject(c *cluster, round int) error {
-	return f.injectMember(c.Members[round%c.Size])
-}
-
-func (f *failureOne) Recover(c *cluster, round int) error {
-	if err := f.recoverMember(c.Members[round%c.Size]); err != nil {
-		return err
-	}
-	return c.WaitHealth()
-}
-
-func (f *failureAll) Inject(c *cluster, round int) error {
-	for _, m := range c.Members {
-		if err := f.injectMember(m); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (f *failureAll) Recover(c *cluster, round int) error {
-	for _, m := range c.Members {
-		if err := f.recoverMember(m); err != nil {
-			return err
-		}
-	}
-	return c.WaitHealth()
-}
-
-func (f *failureMajority) Inject(c *cluster, round int) error {
-	for i := range killMap(c.Size, round) {
-		if err := f.injectMember(c.Members[i]); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (f *failureMajority) Recover(c *cluster, round int) error {
-	for i := range killMap(c.Size, round) {
-		if err := f.recoverMember(c.Members[i]); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (f *failureLeader) Inject(c *cluster, round int) error {
-	idx, err := c.GetLeader()
-	if err != nil {
-		return err
-	}
-	f.idx = idx
-	return f.injectMember(c.Members[idx])
-}
-
-func (f *failureLeader) Recover(c *cluster, round int) error {
-	if err := f.recoverMember(c.Members[f.idx]); err != nil {
-		return err
-	}
-	return c.WaitHealth()
-}
-
-func (f *failureDelay) Inject(c *cluster, round int) error {
-	if err := f.failure.Inject(c, round); err != nil {
-		return err
-	}
-	if f.delayDuration > 0 {
-		plog.Infof("sleeping delay duration %v for %q", f.delayDuration, f.failure.Desc())
-		time.Sleep(f.delayDuration)
-	}
-	return nil
-}
-
-func (f *failureUntilSnapshot) Inject(c *cluster, round int) error {
-	if err := f.failure.Inject(c, round); err != nil {
-		return err
-	}
-	if c.Size < 3 {
-		return nil
-	}
-	// maxRev may fail since failure just injected, retry if failed.
-	startRev, err := c.maxRev()
-	for i := 0; i < 10 && startRev == 0; i++ {
-		startRev, err = c.maxRev()
-	}
-	if startRev == 0 {
-		return err
-	}
-	lastRev := startRev
-	// Normal healthy cluster could accept 1000req/s at least.
-	// Give it 3-times time to create a new snapshot.
-	retry := snapshotCount / 1000 * 3
-	for j := 0; j < retry; j++ {
-		lastRev, _ = c.maxRev()
-		// If the number of proposals committed is bigger than snapshot count,
-		// a new snapshot should have been created.
-		if lastRev-startRev > snapshotCount {
-			return nil
-		}
-		time.Sleep(time.Second)
-	}
-	return fmt.Errorf("cluster too slow: only commit %d requests in %ds", lastRev-startRev, retry)
-}
-
-func (f *failureUntilSnapshot) Desc() string {
-	return f.failure.Desc() + " for a long time and expect it to recover from an incoming snapshot"
-}
-
-func killMap(size int, seed int) map[int]bool {
-	m := make(map[int]bool)
-	r := rand.New(rand.NewSource(int64(seed)))
-	majority := size/2 + 1
-	for {
-		m[r.Intn(size)] = true
-		if len(m) >= majority {
-			return m
-		}
-	}
-}
-
-type failureNop failureByFunc
-
-func (f *failureNop) Inject(c *cluster, round int) error  { return nil }
-func (f *failureNop) Recover(c *cluster, round int) error { return nil }
-
-type failureExternal struct {
-	failure
-
-	description string
-	scriptPath  string
-}
-
-func (f *failureExternal) Inject(c *cluster, round int) error {
-	return exec.Command(f.scriptPath, "enable", fmt.Sprintf("%d", round)).Run()
-}
-
-func (f *failureExternal) Recover(c *cluster, round int) error {
-	return exec.Command(f.scriptPath, "disable", fmt.Sprintf("%d", round)).Run()
-}
-
-func (f *failureExternal) Desc() string { return f.description }

+ 0 - 177
tools/functional-tester/etcd-tester/failure_agent.go

@@ -1,177 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"fmt"
-	"time"
-)
-
-const (
-	snapshotCount      = 10000
-	slowNetworkLatency = 500 // 500 millisecond
-	randomVariation    = 50
-
-	// delay duration to trigger leader election (default election timeout 1s)
-	triggerElectionDur = 5 * time.Second
-
-	// Wait more when it recovers from slow network, because network layer
-	// needs extra time to propagate traffic control (tc command) change.
-	// Otherwise, we get different hash values from the previous revision.
-	// For more detail, please see https://github.com/coreos/etcd/issues/5121.
-	waitRecover = 5 * time.Second
-)
-
-func injectStop(m *member) error { return m.Agent.Stop() }
-func recoverStop(m *member) error {
-	_, err := m.Agent.Restart()
-	return err
-}
-
-func newFailureKillAll() failure {
-	return &failureAll{
-		description:   "kill all members",
-		injectMember:  injectStop,
-		recoverMember: recoverStop,
-	}
-}
-
-func newFailureKillMajority() failure {
-	return &failureMajority{
-		description:   "kill majority of the cluster",
-		injectMember:  injectStop,
-		recoverMember: recoverStop,
-	}
-}
-
-func newFailureKillOne() failure {
-	return &failureOne{
-		description:   "kill one random member",
-		injectMember:  injectStop,
-		recoverMember: recoverStop,
-	}
-}
-
-func newFailureKillLeader() failure {
-	ff := failureByFunc{
-		description:   "kill leader member",
-		injectMember:  injectStop,
-		recoverMember: recoverStop,
-	}
-	return &failureLeader{ff, 0}
-}
-
-func newFailureKillOneForLongTime() failure {
-	return &failureUntilSnapshot{newFailureKillOne()}
-}
-
-func newFailureKillLeaderForLongTime() failure {
-	return &failureUntilSnapshot{newFailureKillLeader()}
-}
-
-func injectDropPort(m *member) error  { return m.Agent.DropPort(m.peerPort()) }
-func recoverDropPort(m *member) error { return m.Agent.RecoverPort(m.peerPort()) }
-
-func newFailureIsolate() failure {
-	f := &failureOne{
-		description:   "isolate one member",
-		injectMember:  injectDropPort,
-		recoverMember: recoverDropPort,
-	}
-	return &failureDelay{
-		failure:       f,
-		delayDuration: triggerElectionDur,
-	}
-}
-
-func newFailureIsolateAll() failure {
-	f := &failureAll{
-		description:   "isolate all members",
-		injectMember:  injectDropPort,
-		recoverMember: recoverDropPort,
-	}
-	return &failureDelay{
-		failure:       f,
-		delayDuration: triggerElectionDur,
-	}
-}
-
-func injectLatency(m *member) error {
-	if err := m.Agent.SetLatency(slowNetworkLatency, randomVariation); err != nil {
-		m.Agent.RemoveLatency()
-		return err
-	}
-	return nil
-}
-
-func recoverLatency(m *member) error {
-	if err := m.Agent.RemoveLatency(); err != nil {
-		return err
-	}
-	time.Sleep(waitRecover)
-	return nil
-}
-
-func newFailureSlowNetworkOneMember() failure {
-	desc := fmt.Sprintf("slow down one member's network by adding %d ms latency", slowNetworkLatency)
-	f := &failureOne{
-		description:   description(desc),
-		injectMember:  injectLatency,
-		recoverMember: recoverLatency,
-	}
-	return &failureDelay{
-		failure:       f,
-		delayDuration: triggerElectionDur,
-	}
-}
-
-func newFailureSlowNetworkLeader() failure {
-	desc := fmt.Sprintf("slow down leader's network by adding %d ms latency", slowNetworkLatency)
-	ff := failureByFunc{
-		description:   description(desc),
-		injectMember:  injectLatency,
-		recoverMember: recoverLatency,
-	}
-	f := &failureLeader{ff, 0}
-	return &failureDelay{
-		failure:       f,
-		delayDuration: triggerElectionDur,
-	}
-}
-
-func newFailureSlowNetworkAll() failure {
-	f := &failureAll{
-		description:   "slow down all members' network",
-		injectMember:  injectLatency,
-		recoverMember: recoverLatency,
-	}
-	return &failureDelay{
-		failure:       f,
-		delayDuration: triggerElectionDur,
-	}
-}
-
-func newFailureNop() failure {
-	return &failureNop{
-		description: "no failure",
-	}
-}
-
-func newFailureExternal(scriptPath string) failure {
-	return &failureExternal{
-		description: fmt.Sprintf("external fault injector (script: %s)", scriptPath),
-		scriptPath:  scriptPath,
-	}
-}

+ 0 - 44
tools/functional-tester/etcd-tester/http.go

@@ -1,44 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"encoding/json"
-	"net/http"
-)
-
-type statusHandler struct {
-	status *Status
-}
-
-func (sh statusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	w.Header().Set("Content-Type", "application/json")
-	en := json.NewEncoder(w)
-
-	sh.status.mu.Lock()
-	defer sh.status.mu.Unlock()
-
-	if err := en.Encode(Status{
-		Since:      sh.status.Since,
-		Failures:   sh.status.Failures,
-		RoundLimit: sh.status.RoundLimit,
-		Cluster:    sh.status.cluster.Status(),
-		cluster:    sh.status.cluster,
-		Round:      sh.status.Round,
-		Case:       sh.status.Case,
-	}); err != nil {
-		http.Error(w, err.Error(), http.StatusInternalServerError)
-	}
-}

+ 0 - 232
tools/functional-tester/etcd-tester/main.go

@@ -1,232 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"net/http"
-	"os"
-	"strings"
-
-	"github.com/coreos/etcd/pkg/debugutil"
-
-	"github.com/coreos/pkg/capnslog"
-	"github.com/prometheus/client_golang/prometheus/promhttp"
-	"golang.org/x/time/rate"
-	"google.golang.org/grpc/grpclog"
-)
-
-var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcd-tester")
-
-const (
-	defaultClientPort    = 2379
-	defaultPeerPort      = 2380
-	defaultFailpointPort = 2381
-)
-
-func main() {
-	endpointStr := flag.String("agent-endpoints", "localhost:9027", "HTTP RPC endpoints of agents. Do not specify the schema.")
-	clientPorts := flag.String("client-ports", "", "etcd client port for each agent endpoint")
-	advertiseClientPorts := flag.String("advertise-client-ports", "", "etcd advertise client port for each agent endpoint")
-	peerPorts := flag.String("peer-ports", "", "etcd peer port for each agent endpoint")
-	advertisePeerPorts := flag.String("advertise-peer-ports", "", "etcd advertise peer port for each agent endpoint")
-	failpointPorts := flag.String("failpoint-ports", "", "etcd failpoint port for each agent endpoint")
-
-	stressKeyLargeSize := flag.Uint("stress-key-large-size", 32*1024+1, "the size of each large key written into etcd.")
-	stressKeySize := flag.Uint("stress-key-size", 100, "the size of each small key written into etcd.")
-	stressKeySuffixRange := flag.Uint("stress-key-count", 250000, "the count of key range written into etcd.")
-	stressKeyTxnSuffixRange := flag.Uint("stress-key-txn-count", 100, "the count of key range written into etcd txn (max 100).")
-	stressKeyTxnOps := flag.Uint("stress-key-txn-ops", 1, "number of operations per a transaction (max 64).")
-	limit := flag.Int("limit", -1, "the limit of rounds to run failure set (-1 to run without limits).")
-	exitOnFailure := flag.Bool("exit-on-failure", false, "exit tester on first failure")
-	stressQPS := flag.Int("stress-qps", 10000, "maximum number of stresser requests per second.")
-	schedCases := flag.String("schedule-cases", "", "test case schedule")
-	consistencyCheck := flag.Bool("consistency-check", true, "true to check consistency (revision, hash)")
-	stresserType := flag.String("stresser", "keys,lease", "comma separated list of stressing clients (keys, lease, v2keys, nop, election-runner, watch-runner, lock-racer-runner, lease-runner).")
-	etcdRunnerPath := flag.String("etcd-runner", "", "specify a path of etcd runner binary")
-	failureTypes := flag.String("failures", "default,failpoints", "specify failures (concat of \"default\" and \"failpoints\").")
-	failpoints := flag.String("failpoints", `panic("etcd-tester")`, `comma separated list of failpoint terms to inject (e.g. 'panic("etcd-tester"),1*sleep(1000)')`)
-	externalFailures := flag.String("external-failures", "", "specify a path of script for enabling/disabling an external fault injector")
-	enablePprof := flag.Bool("enable-pprof", false, "true to enable pprof")
-	flag.Parse()
-
-	// to discard gRPC-side balancer logs
-	grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
-
-	eps := strings.Split(*endpointStr, ",")
-	cports := portsFromArg(*clientPorts, len(eps), defaultClientPort)
-	acports := portsFromArg(*advertiseClientPorts, len(eps), defaultClientPort)
-	pports := portsFromArg(*peerPorts, len(eps), defaultPeerPort)
-	apports := portsFromArg(*advertisePeerPorts, len(eps), defaultPeerPort)
-	fports := portsFromArg(*failpointPorts, len(eps), defaultFailpointPort)
-	agents := make([]agentConfig, len(eps))
-
-	for i := range eps {
-		agents[i].endpoint = eps[i]
-		agents[i].clientPort = cports[i]
-		agents[i].advertiseClientPort = acports[i]
-		agents[i].peerPort = pports[i]
-		agents[i].advertisePeerPort = apports[i]
-		agents[i].failpointPort = fports[i]
-	}
-
-	c := &cluster{agents: agents}
-	if err := c.bootstrap(); err != nil {
-		plog.Fatal(err)
-	}
-	defer c.Terminate()
-
-	// ensure cluster is fully booted to know failpoints are available
-	c.WaitHealth()
-
-	var failures []failure
-
-	if failureTypes != nil && *failureTypes != "" {
-		types, failpoints := strings.Split(*failureTypes, ","), strings.Split(*failpoints, ",")
-		failures = makeFailures(types, failpoints, c)
-	}
-
-	if externalFailures != nil && *externalFailures != "" {
-		if len(failures) != 0 {
-			plog.Errorf("specify only one of -failures or -external-failures")
-			os.Exit(1)
-		}
-		failures = append(failures, newFailureExternal(*externalFailures))
-	}
-
-	if len(failures) == 0 {
-		plog.Infof("no failures\n")
-		failures = append(failures, newFailureNop())
-	}
-
-	schedule := failures
-	if schedCases != nil && *schedCases != "" {
-		cases := strings.Split(*schedCases, " ")
-		schedule = make([]failure, len(cases))
-		for i := range cases {
-			caseNum := 0
-			n, err := fmt.Sscanf(cases[i], "%d", &caseNum)
-			if n == 0 || err != nil {
-				plog.Fatalf(`couldn't parse case "%s" (%v)`, cases[i], err)
-			}
-			schedule[i] = failures[caseNum]
-		}
-	}
-
-	scfg := stressConfig{
-		rateLimiter:       rate.NewLimiter(rate.Limit(*stressQPS), *stressQPS),
-		keyLargeSize:      int(*stressKeyLargeSize),
-		keySize:           int(*stressKeySize),
-		keySuffixRange:    int(*stressKeySuffixRange),
-		keyTxnSuffixRange: int(*stressKeyTxnSuffixRange),
-		keyTxnOps:         int(*stressKeyTxnOps),
-		numLeases:         10,
-		keysPerLease:      10,
-
-		etcdRunnerPath: *etcdRunnerPath,
-	}
-	if scfg.keyTxnSuffixRange > 100 {
-		plog.Fatalf("stress-key-txn-count is maximum 100, got %d", scfg.keyTxnSuffixRange)
-	}
-	if scfg.keyTxnOps > 64 {
-		plog.Fatalf("stress-key-txn-ops is maximum 64, got %d", scfg.keyTxnOps)
-	}
-
-	t := &tester{
-		failures:      schedule,
-		cluster:       c,
-		limit:         *limit,
-		exitOnFailure: *exitOnFailure,
-
-		scfg:         scfg,
-		stresserType: *stresserType,
-		doChecks:     *consistencyCheck,
-	}
-
-	sh := statusHandler{status: &t.status}
-	http.Handle("/status", sh)
-	http.Handle("/metrics", promhttp.Handler())
-
-	if *enablePprof {
-		for p, h := range debugutil.PProfHandlers() {
-			http.Handle(p, h)
-		}
-	}
-
-	go func() { plog.Fatal(http.ListenAndServe(":9028", nil)) }()
-
-	t.runLoop()
-}
-
-// portsFromArg converts a comma separated list into a slice of ints
-func portsFromArg(arg string, n, defaultPort int) []int {
-	ret := make([]int, n)
-	if len(arg) == 0 {
-		for i := range ret {
-			ret[i] = defaultPort
-		}
-		return ret
-	}
-	s := strings.Split(arg, ",")
-	if len(s) != n {
-		fmt.Printf("expected %d ports, got %d (%s)\n", n, len(s), arg)
-		os.Exit(1)
-	}
-	for i := range s {
-		if _, err := fmt.Sscanf(s[i], "%d", &ret[i]); err != nil {
-			fmt.Println(err)
-			os.Exit(1)
-		}
-	}
-	return ret
-}
-
-func makeFailures(types, failpoints []string, c *cluster) []failure {
-	var failures []failure
-	for i := range types {
-		switch types[i] {
-		case "default":
-			defaultFailures := []failure{
-				newFailureKillAll(),
-				newFailureKillMajority(),
-				newFailureKillOne(),
-				newFailureKillLeader(),
-				newFailureKillOneForLongTime(),
-				newFailureKillLeaderForLongTime(),
-				newFailureIsolate(),
-				newFailureIsolateAll(),
-				newFailureSlowNetworkOneMember(),
-				newFailureSlowNetworkLeader(),
-				newFailureSlowNetworkAll(),
-			}
-			failures = append(failures, defaultFailures...)
-
-		case "failpoints":
-			fpFailures, fperr := failpointFailures(c, failpoints)
-			if len(fpFailures) == 0 {
-				plog.Infof("no failpoints found (%v)", fperr)
-			}
-			failures = append(failures, fpFailures...)
-
-		default:
-			plog.Errorf("unknown failure: %s\n", types[i])
-			os.Exit(1)
-		}
-	}
-
-	return failures
-}

+ 0 - 190
tools/functional-tester/etcd-tester/member.go

@@ -1,190 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"context"
-	"fmt"
-	"net"
-	"net/url"
-	"time"
-
-	"github.com/coreos/etcd/clientv3"
-	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-	"github.com/coreos/etcd/tools/functional-tester/etcd-agent/client"
-
-	"google.golang.org/grpc"
-)
-
-type member struct {
-	Agent              client.Agent
-	Endpoint           string
-	Name               string
-	ClientURL          string
-	AdvertiseClientURL string
-	PeerURL            string
-	AdvertisePeerURL   string
-	FailpointURL       string
-}
-
-func (m *member) ClusterEntry() string { return m.Name + "=" + m.AdvertisePeerURL }
-
-func (m *member) Flags() []string {
-	return []string{
-		"--name", m.Name,
-		"--listen-client-urls", m.ClientURL,
-		"--advertise-client-urls", m.AdvertiseClientURL,
-		"--listen-peer-urls", m.PeerURL,
-		"--initial-advertise-peer-urls", m.AdvertisePeerURL,
-		"--initial-cluster-state", "new",
-		"--snapshot-count", "10000",
-		"--pre-vote",
-		"--experimental-initial-corrupt-check",
-	}
-}
-
-func (m *member) CheckCompact(rev int64) error {
-	cli, err := m.newClientV3()
-	if err != nil {
-		return fmt.Errorf("%v (endpoint %s)", err, m.AdvertiseClientURL)
-	}
-	defer cli.Close()
-
-	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-	wch := cli.Watch(ctx, "\x00", clientv3.WithFromKey(), clientv3.WithRev(rev-1))
-	wr, ok := <-wch
-	cancel()
-
-	if !ok {
-		return fmt.Errorf("watch channel terminated (endpoint %s)", m.AdvertiseClientURL)
-	}
-	if wr.CompactRevision != rev {
-		return fmt.Errorf("got compact revision %v, wanted %v (endpoint %s)", wr.CompactRevision, rev, m.AdvertiseClientURL)
-	}
-
-	return nil
-}
-
-func (m *member) Defrag() error {
-	plog.Printf("defragmenting %s", m.AdvertiseClientURL)
-	cli, err := m.newClientV3()
-	if err != nil {
-		return err
-	}
-	defer cli.Close()
-	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
-	_, err = cli.Defragment(ctx, m.AdvertiseClientURL)
-	cancel()
-	if err != nil {
-		return err
-	}
-	plog.Printf("defragmented %s", m.AdvertiseClientURL)
-	return nil
-}
-
-func (m *member) RevHash() (int64, int64, error) {
-	conn, err := m.dialGRPC()
-	if err != nil {
-		return 0, 0, err
-	}
-	mt := pb.NewMaintenanceClient(conn)
-	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-	resp, err := mt.Hash(ctx, &pb.HashRequest{}, grpc.FailFast(false))
-	cancel()
-	conn.Close()
-
-	if err != nil {
-		return 0, 0, err
-	}
-
-	return resp.Header.Revision, int64(resp.Hash), nil
-}
-
-func (m *member) Rev(ctx context.Context) (int64, error) {
-	cli, err := m.newClientV3()
-	if err != nil {
-		return 0, err
-	}
-	defer cli.Close()
-	resp, err := cli.Status(ctx, m.AdvertiseClientURL)
-	if err != nil {
-		return 0, err
-	}
-	return resp.Header.Revision, nil
-}
-
-func (m *member) IsLeader() (bool, error) {
-	cli, err := m.newClientV3()
-	if err != nil {
-		return false, err
-	}
-	defer cli.Close()
-	resp, err := cli.Status(context.Background(), m.AdvertiseClientURL)
-	if err != nil {
-		return false, err
-	}
-	return resp.Header.MemberId == resp.Leader, nil
-}
-
-func (m *member) SetHealthKeyV3() error {
-	cli, err := m.newClientV3()
-	if err != nil {
-		return fmt.Errorf("%v (%s)", err, m.AdvertiseClientURL)
-	}
-	defer cli.Close()
-	// give enough time-out in case expensive requests (range/delete) are pending
-	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-	_, err = cli.Put(ctx, "health", "good")
-	cancel()
-	if err != nil {
-		return fmt.Errorf("%v (%s)", err, m.AdvertiseClientURL)
-	}
-	return nil
-}
-
-func (m *member) newClientV3() (*clientv3.Client, error) {
-	return clientv3.New(clientv3.Config{
-		Endpoints:   []string{m.AdvertiseClientURL},
-		DialTimeout: 5 * time.Second,
-	})
-}
-
-func (m *member) dialGRPC() (*grpc.ClientConn, error) {
-	return grpc.Dial(m.grpcAddr(), grpc.WithInsecure(), grpc.WithTimeout(5*time.Second), grpc.WithBlock())
-}
-
-// grpcAddr gets the host from clientURL so it works with grpc.Dial()
-func (m *member) grpcAddr() string {
-	u, err := url.Parse(m.AdvertiseClientURL)
-	if err != nil {
-		panic(err)
-	}
-	return u.Host
-}
-
-func (m *member) peerPort() (port int) {
-	u, err := url.Parse(m.AdvertisePeerURL)
-	if err != nil {
-		panic(err)
-	}
-	_, portStr, err := net.SplitHostPort(u.Host)
-	if err != nil {
-		panic(err)
-	}
-	if _, err = fmt.Sscanf(portStr, "%d", &port); err != nil {
-		panic(err)
-	}
-	return port
-}

+ 0 - 57
tools/functional-tester/etcd-tester/status.go

@@ -1,57 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"sync"
-	"time"
-)
-
-type Status struct {
-	Since      time.Time
-	Failures   []string
-	RoundLimit int
-
-	Cluster ClusterStatus
-	cluster *cluster
-
-	mu    sync.Mutex // guards Round and Case
-	Round int
-	Case  int
-}
-
-func (s *Status) setRound(r int) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	s.Round = r
-}
-
-func (s *Status) getRound() int {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	return s.Round
-}
-
-func (s *Status) setCase(c int) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	s.Case = c
-}
-
-func (s *Status) getCase() int {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	return s.Case
-}

+ 0 - 218
tools/functional-tester/etcd-tester/stresser.go

@@ -1,218 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"fmt"
-	"strings"
-	"sync"
-	"time"
-
-	"golang.org/x/time/rate"
-)
-
-type Stresser interface {
-	// Stress starts to stress the etcd cluster
-	Stress() error
-	// Pause stops the stresser from sending requests to etcd. Resume by calling Stress.
-	Pause()
-	// Close releases all of the Stresser's resources.
-	Close()
-	// ModifiedKeys reports the number of keys created and deleted by stresser
-	ModifiedKeys() int64
-	// Checker returns an invariant checker for after the stresser is canceled.
-	Checker() Checker
-}
-
-// nopStresser implements Stresser that does nothing
-type nopStresser struct {
-	start time.Time
-	qps   int
-}
-
-func (s *nopStresser) Stress() error { return nil }
-func (s *nopStresser) Pause()        {}
-func (s *nopStresser) Close()        {}
-func (s *nopStresser) ModifiedKeys() int64 {
-	return 0
-}
-func (s *nopStresser) Checker() Checker { return nil }
-
-// compositeStresser implements a Stresser that runs a slice of
-// stressing clients concurrently.
-type compositeStresser struct {
-	stressers []Stresser
-}
-
-func (cs *compositeStresser) Stress() error {
-	for i, s := range cs.stressers {
-		if err := s.Stress(); err != nil {
-			for j := 0; j < i; j++ {
-				cs.stressers[i].Close()
-			}
-			return err
-		}
-	}
-	return nil
-}
-
-func (cs *compositeStresser) Pause() {
-	var wg sync.WaitGroup
-	wg.Add(len(cs.stressers))
-	for i := range cs.stressers {
-		go func(s Stresser) {
-			defer wg.Done()
-			s.Pause()
-		}(cs.stressers[i])
-	}
-	wg.Wait()
-}
-
-func (cs *compositeStresser) Close() {
-	var wg sync.WaitGroup
-	wg.Add(len(cs.stressers))
-	for i := range cs.stressers {
-		go func(s Stresser) {
-			defer wg.Done()
-			s.Close()
-		}(cs.stressers[i])
-	}
-	wg.Wait()
-}
-
-func (cs *compositeStresser) ModifiedKeys() (modifiedKey int64) {
-	for _, stress := range cs.stressers {
-		modifiedKey += stress.ModifiedKeys()
-	}
-	return modifiedKey
-}
-
-func (cs *compositeStresser) Checker() Checker {
-	var chks []Checker
-	for _, s := range cs.stressers {
-		if chk := s.Checker(); chk != nil {
-			chks = append(chks, chk)
-		}
-	}
-	if len(chks) == 0 {
-		return nil
-	}
-	return newCompositeChecker(chks)
-}
-
-type stressConfig struct {
-	keyLargeSize      int
-	keySize           int
-	keySuffixRange    int
-	keyTxnSuffixRange int
-	keyTxnOps         int
-
-	numLeases    int
-	keysPerLease int
-
-	rateLimiter *rate.Limiter
-
-	etcdRunnerPath string
-}
-
-// NewStresser creates stresser from a comma separated list of stresser types.
-func NewStresser(s string, sc *stressConfig, m *member) Stresser {
-	types := strings.Split(s, ",")
-	if len(types) > 1 {
-		stressers := make([]Stresser, len(types))
-		for i, stype := range types {
-			stressers[i] = NewStresser(stype, sc, m)
-		}
-		return &compositeStresser{stressers}
-	}
-	switch s {
-	case "nop":
-		return &nopStresser{start: time.Now(), qps: int(sc.rateLimiter.Limit())}
-	case "keys":
-		// TODO: Too intensive stressing clients can panic etcd member with
-		// 'out of memory' error. Put rate limits in server side.
-		return &keyStresser{
-			Endpoint:          m.grpcAddr(),
-			keyLargeSize:      sc.keyLargeSize,
-			keySize:           sc.keySize,
-			keySuffixRange:    sc.keySuffixRange,
-			keyTxnSuffixRange: sc.keyTxnSuffixRange,
-			keyTxnOps:         sc.keyTxnOps,
-			N:                 100,
-			rateLimiter:       sc.rateLimiter,
-		}
-	case "v2keys":
-		return &v2Stresser{
-			Endpoint:       m.ClientURL,
-			keySize:        sc.keySize,
-			keySuffixRange: sc.keySuffixRange,
-			N:              100,
-			rateLimiter:    sc.rateLimiter,
-		}
-	case "lease":
-		return &leaseStresser{
-			endpoint:     m.grpcAddr(),
-			numLeases:    sc.numLeases,
-			keysPerLease: sc.keysPerLease,
-			rateLimiter:  sc.rateLimiter,
-		}
-	case "election-runner":
-		reqRate := 100
-		args := []string{
-			"election",
-			fmt.Sprintf("%v", time.Now().UnixNano()), // election name as current nano time
-			"--dial-timeout=10s",
-			"--endpoints", m.grpcAddr(),
-			"--total-client-connections=10",
-			"--rounds=0", // runs forever
-			"--req-rate", fmt.Sprintf("%v", reqRate),
-		}
-		return newRunnerStresser(sc.etcdRunnerPath, args, sc.rateLimiter, reqRate)
-	case "watch-runner":
-		reqRate := 100
-		args := []string{
-			"watcher",
-			"--prefix", fmt.Sprintf("%v", time.Now().UnixNano()), // prefix all keys with nano time
-			"--total-keys=1",
-			"--total-prefixes=1",
-			"--watch-per-prefix=1",
-			"--endpoints", m.grpcAddr(),
-			"--rounds=0", // runs forever
-			"--req-rate", fmt.Sprintf("%v", reqRate),
-		}
-		return newRunnerStresser(sc.etcdRunnerPath, args, sc.rateLimiter, reqRate)
-	case "lock-racer-runner":
-		reqRate := 100
-		args := []string{
-			"lock-racer",
-			fmt.Sprintf("%v", time.Now().UnixNano()), // locker name as current nano time
-			"--endpoints", m.grpcAddr(),
-			"--total-client-connections=10",
-			"--rounds=0", // runs forever
-			"--req-rate", fmt.Sprintf("%v", reqRate),
-		}
-		return newRunnerStresser(sc.etcdRunnerPath, args, sc.rateLimiter, reqRate)
-	case "lease-runner":
-		args := []string{
-			"lease-renewer",
-			"--ttl=30",
-			"--endpoints", m.grpcAddr(),
-		}
-		return newRunnerStresser(sc.etcdRunnerPath, args, sc.rateLimiter, 0)
-	default:
-		plog.Panicf("unknown stresser type: %s\n", s)
-	}
-	return nil // never reach here
-}

+ 0 - 286
tools/functional-tester/etcd-tester/tester.go

@@ -1,286 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"fmt"
-	"os"
-	"time"
-)
-
-type tester struct {
-	cluster       *cluster
-	limit         int
-	exitOnFailure bool
-
-	failures        []failure
-	status          Status
-	currentRevision int64
-
-	stresserType string
-	scfg         stressConfig
-	doChecks     bool
-
-	stresser Stresser
-	checker  Checker
-}
-
-// compactQPS is rough number of compact requests per second.
-// Previous tests showed etcd can compact about 60,000 entries per second.
-const compactQPS = 50000
-
-func (tt *tester) runLoop() {
-	tt.status.Since = time.Now()
-	tt.status.RoundLimit = tt.limit
-	tt.status.cluster = tt.cluster
-	for _, f := range tt.failures {
-		tt.status.Failures = append(tt.status.Failures, f.Desc())
-	}
-
-	if err := tt.resetStressCheck(); err != nil {
-		plog.Errorf("%s failed to start stresser (%v)", tt.logPrefix(), err)
-		tt.failed()
-		return
-	}
-
-	var preModifiedKey int64
-	for round := 0; round < tt.limit || tt.limit == -1; round++ {
-		tt.status.setRound(round)
-		roundTotalCounter.Inc()
-
-		if err := tt.doRound(round); err != nil {
-			plog.Warningf("%s functional-tester returning with error (%v)", tt.logPrefix(), err)
-			if tt.cleanup() != nil {
-				return
-			}
-			// reset preModifiedKey after clean up
-			preModifiedKey = 0
-			continue
-		}
-		// -1 so that logPrefix doesn't print out 'case'
-		tt.status.setCase(-1)
-
-		revToCompact := max(0, tt.currentRevision-10000)
-		currentModifiedKey := tt.stresser.ModifiedKeys()
-		modifiedKey := currentModifiedKey - preModifiedKey
-		preModifiedKey = currentModifiedKey
-		timeout := 10 * time.Second
-		timeout += time.Duration(modifiedKey/compactQPS) * time.Second
-		plog.Infof("%s compacting %d modifications (timeout %v)", tt.logPrefix(), modifiedKey, timeout)
-		if err := tt.compact(revToCompact, timeout); err != nil {
-			plog.Warningf("%s functional-tester compact got error (%v)", tt.logPrefix(), err)
-			if tt.cleanup() != nil {
-				return
-			}
-			// reset preModifiedKey after clean up
-			preModifiedKey = 0
-		}
-		if round > 0 && round%500 == 0 { // every 500 rounds
-			if err := tt.defrag(); err != nil {
-				plog.Warningf("%s functional-tester returning with error (%v)", tt.logPrefix(), err)
-				tt.failed()
-				return
-			}
-		}
-	}
-
-	plog.Infof("%s functional-tester is finished", tt.logPrefix())
-}
-
-func (tt *tester) doRound(round int) error {
-	for j, f := range tt.failures {
-		caseTotalCounter.WithLabelValues(f.Desc()).Inc()
-		tt.status.setCase(j)
-
-		if err := tt.cluster.WaitHealth(); err != nil {
-			return fmt.Errorf("wait full health error: %v", err)
-		}
-		plog.Infof("%s injecting failure %q", tt.logPrefix(), f.Desc())
-		if err := f.Inject(tt.cluster, round); err != nil {
-			return fmt.Errorf("injection error: %v", err)
-		}
-		plog.Infof("%s injected failure", tt.logPrefix())
-
-		plog.Infof("%s recovering failure %q", tt.logPrefix(), f.Desc())
-		if err := f.Recover(tt.cluster, round); err != nil {
-			return fmt.Errorf("recovery error: %v", err)
-		}
-		plog.Infof("%s recovered failure", tt.logPrefix())
-		tt.pauseStresser()
-		plog.Infof("%s wait until cluster is healthy", tt.logPrefix())
-		if err := tt.cluster.WaitHealth(); err != nil {
-			return fmt.Errorf("wait full health error: %v", err)
-		}
-		plog.Infof("%s cluster is healthy", tt.logPrefix())
-
-		plog.Infof("%s checking consistency and invariant of cluster", tt.logPrefix())
-		if err := tt.checkConsistency(); err != nil {
-			return fmt.Errorf("tt.checkConsistency error (%v)", err)
-		}
-		plog.Infof("%s checking consistency and invariant of cluster done", tt.logPrefix())
-
-		plog.Infof("%s succeed!", tt.logPrefix())
-	}
-	return nil
-}
-
-func (tt *tester) updateRevision() error {
-	revs, _, err := tt.cluster.getRevisionHash()
-	for _, rev := range revs {
-		tt.currentRevision = rev
-		break // just need get one of the current revisions
-	}
-
-	plog.Infof("%s updated current revision to %d", tt.logPrefix(), tt.currentRevision)
-	return err
-}
-
-func (tt *tester) checkConsistency() (err error) {
-	defer func() {
-		if err != nil {
-			return
-		}
-		if err = tt.updateRevision(); err != nil {
-			plog.Warningf("%s functional-tester returning with tt.updateRevision error (%v)", tt.logPrefix(), err)
-			return
-		}
-		err = tt.startStresser()
-	}()
-	if err = tt.checker.Check(); err != nil {
-		plog.Infof("%s %v", tt.logPrefix(), err)
-	}
-	return err
-}
-
-func (tt *tester) compact(rev int64, timeout time.Duration) (err error) {
-	tt.pauseStresser()
-	defer func() {
-		if err == nil {
-			err = tt.startStresser()
-		}
-	}()
-
-	plog.Infof("%s compacting storage (current revision %d, compact revision %d)", tt.logPrefix(), tt.currentRevision, rev)
-	if err = tt.cluster.compactKV(rev, timeout); err != nil {
-		return err
-	}
-	plog.Infof("%s compacted storage (compact revision %d)", tt.logPrefix(), rev)
-
-	plog.Infof("%s checking compaction (compact revision %d)", tt.logPrefix(), rev)
-	if err = tt.cluster.checkCompact(rev); err != nil {
-		plog.Warningf("%s checkCompact error (%v)", tt.logPrefix(), err)
-		return err
-	}
-
-	plog.Infof("%s confirmed compaction (compact revision %d)", tt.logPrefix(), rev)
-	return nil
-}
-
-func (tt *tester) defrag() error {
-	plog.Infof("%s defragmenting...", tt.logPrefix())
-	if err := tt.cluster.defrag(); err != nil {
-		plog.Warningf("%s defrag error (%v)", tt.logPrefix(), err)
-		if cerr := tt.cleanup(); cerr != nil {
-			return fmt.Errorf("%s, %s", err, cerr)
-		}
-		return err
-	}
-	plog.Infof("%s defragmented...", tt.logPrefix())
-	return nil
-}
-
-func (tt *tester) logPrefix() string {
-	var (
-		rd     = tt.status.getRound()
-		cs     = tt.status.getCase()
-		prefix = fmt.Sprintf("[round#%d case#%d]", rd, cs)
-	)
-	if cs == -1 {
-		prefix = fmt.Sprintf("[round#%d]", rd)
-	}
-	return prefix
-}
-
-func (tt *tester) failed() {
-	if !tt.exitOnFailure {
-		return
-	}
-	plog.Warningf("%s exiting on failure", tt.logPrefix())
-	tt.cluster.Terminate()
-	os.Exit(2)
-}
-
-func (tt *tester) cleanup() error {
-	defer tt.failed()
-
-	roundFailedTotalCounter.Inc()
-	desc := "compact/defrag"
-	if tt.status.Case != -1 {
-		desc = tt.failures[tt.status.Case].Desc()
-	}
-	caseFailedTotalCounter.WithLabelValues(desc).Inc()
-
-	tt.closeStresser()
-	if err := tt.cluster.Cleanup(); err != nil {
-		plog.Warningf("%s cleanup error: %v", tt.logPrefix(), err)
-		return err
-	}
-	if err := tt.cluster.Reset(); err != nil {
-		plog.Warningf("%s cleanup Bootstrap error: %v", tt.logPrefix(), err)
-		return err
-	}
-	return tt.resetStressCheck()
-}
-
-func (tt *tester) pauseStresser() {
-	plog.Infof("%s pausing the stressing clients...", tt.logPrefix())
-	tt.stresser.Pause()
-	plog.Infof("%s paused stressing clients", tt.logPrefix())
-}
-
-func (tt *tester) startStresser() (err error) {
-	plog.Infof("%s starting the stressing clients...", tt.logPrefix())
-	err = tt.stresser.Stress()
-	plog.Infof("%s started stressing clients", tt.logPrefix())
-	return err
-}
-
-func (tt *tester) closeStresser() {
-	plog.Infof("%s closing the stressing clients...", tt.logPrefix())
-	tt.stresser.Close()
-	plog.Infof("%s closed stressing clients", tt.logPrefix())
-}
-
-func (tt *tester) resetStressCheck() error {
-	plog.Infof("%s resetting stressing clients and checkers...", tt.logPrefix())
-	cs := &compositeStresser{}
-	for _, m := range tt.cluster.Members {
-		s := NewStresser(tt.stresserType, &tt.scfg, m)
-		cs.stressers = append(cs.stressers, s)
-	}
-	tt.stresser = cs
-	if !tt.doChecks {
-		tt.checker = newNoChecker()
-		return tt.startStresser()
-	}
-	chk := newHashChecker(hashAndRevGetter(tt.cluster))
-	if schk := cs.Checker(); schk != nil {
-		chk = newCompositeChecker([]Checker{chk, schk})
-	}
-	tt.checker = chk
-	return tt.startStresser()
-}
-
-func (tt *tester) Report() int64 { return tt.stresser.ModifiedKeys() }

+ 0 - 117
tools/functional-tester/etcd-tester/v2_stresser.go

@@ -1,117 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"context"
-	"fmt"
-	"math/rand"
-	"net"
-	"net/http"
-	"sync"
-	"sync/atomic"
-	"time"
-
-	"golang.org/x/time/rate"
-
-	clientV2 "github.com/coreos/etcd/client"
-)
-
-type v2Stresser struct {
-	Endpoint string
-
-	keySize        int
-	keySuffixRange int
-
-	N int
-
-	rateLimiter *rate.Limiter
-
-	wg sync.WaitGroup
-
-	atomicModifiedKey int64
-
-	cancel func()
-}
-
-func (s *v2Stresser) Stress() error {
-	cfg := clientV2.Config{
-		Endpoints: []string{s.Endpoint},
-		Transport: &http.Transport{
-			Dial: (&net.Dialer{
-				Timeout:   time.Second,
-				KeepAlive: 30 * time.Second,
-			}).Dial,
-			MaxIdleConnsPerHost: s.N,
-		},
-	}
-	c, err := clientV2.New(cfg)
-	if err != nil {
-		return err
-	}
-
-	kv := clientV2.NewKeysAPI(c)
-	ctx, cancel := context.WithCancel(context.Background())
-	s.cancel = cancel
-	s.wg.Add(s.N)
-	for i := 0; i < s.N; i++ {
-		go func() {
-			defer s.wg.Done()
-			s.run(ctx, kv)
-		}()
-	}
-	return nil
-}
-
-func (s *v2Stresser) run(ctx context.Context, kv clientV2.KeysAPI) {
-	for {
-		if err := s.rateLimiter.Wait(ctx); err == context.Canceled {
-			return
-		}
-		setctx, setcancel := context.WithTimeout(ctx, clientV2.DefaultRequestTimeout)
-		key := fmt.Sprintf("foo%016x", rand.Intn(s.keySuffixRange))
-		_, err := kv.Set(setctx, key, string(randBytes(s.keySize)), nil)
-		if err == nil {
-			atomic.AddInt64(&s.atomicModifiedKey, 1)
-		}
-		setcancel()
-		if err == context.Canceled {
-			return
-		}
-	}
-}
-
-func (s *v2Stresser) Pause() {
-	s.cancel()
-	s.wg.Wait()
-}
-
-func (s *v2Stresser) Close() {
-	s.Pause()
-}
-
-func (s *v2Stresser) ModifiedKeys() int64 {
-	return atomic.LoadInt64(&s.atomicModifiedKey)
-}
-
-func (s *v2Stresser) Checker() Checker { return nil }
-
-func randBytes(size int) []byte {
-	data := make([]byte, size)
-	for i := 0; i < size; i++ {
-		data[i] = byte(int('a') + rand.Intn(26))
-	}
-	return data
-}

+ 76 - 0
tools/functional-tester/rpcpb/etcd_config.go

@@ -0,0 +1,76 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcpb
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+var etcdFields = []string{
+	"Name",
+	"DataDir",
+	"WALDir",
+	"ListenClientURLs",
+	"AdvertiseClientURLs",
+	"ListenPeerURLs",
+	"InitialAdvertisePeerURLs",
+	"InitialCluster",
+	"InitialClusterState",
+	"InitialClusterToken",
+	"SnapshotCount",
+	"QuotaBackendBytes",
+	"PreVote",
+	"InitialCorruptCheck",
+}
+
+// Flags returns etcd flags in string slice.
+func (cfg *Etcd) Flags() (fs []string) {
+	tp := reflect.TypeOf(*cfg)
+	vo := reflect.ValueOf(*cfg)
+	for _, name := range etcdFields {
+		field, ok := tp.FieldByName(name)
+		if !ok {
+			panic(fmt.Errorf("field %q not found", name))
+		}
+		fv := reflect.Indirect(vo).FieldByName(name)
+		var sv string
+		switch fv.Type().Kind() {
+		case reflect.String:
+			sv = fv.String()
+		case reflect.Slice:
+			n := fv.Len()
+			sl := make([]string, n)
+			for i := 0; i < n; i++ {
+				sl[i] = fv.Index(i).String()
+			}
+			sv = strings.Join(sl, ",")
+		case reflect.Int64:
+			sv = fmt.Sprintf("%d", fv.Int())
+		case reflect.Bool:
+			sv = fmt.Sprintf("%v", fv.Bool())
+		default:
+			panic(fmt.Errorf("field %q (%v) cannot be parsed", name, fv.Type().Kind()))
+		}
+		fname := field.Tag.Get("yaml")
+		// TODO: remove this
+		if fname == "initial-corrupt-check" {
+			fname = "experimental-" + fname
+		}
+		fs = append(fs, fmt.Sprintf("--%s=%s", fname, sv))
+	}
+	return fs
+}

+ 59 - 0
tools/functional-tester/rpcpb/etcd_config_test.go

@@ -0,0 +1,59 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcpb
+
+import (
+	"reflect"
+	"testing"
+)
+
+func TestEtcdFlags(t *testing.T) {
+	cfg := &Etcd{
+		Name:                     "s1",
+		DataDir:                  "/tmp/etcd-agent-data-1/etcd.data",
+		WALDir:                   "/tmp/etcd-agent-data-1/etcd.data/member/wal",
+		ListenClientURLs:         []string{"127.0.0.1:1379"},
+		AdvertiseClientURLs:      []string{"127.0.0.1:13790"},
+		ListenPeerURLs:           []string{"127.0.0.1:1380"},
+		InitialAdvertisePeerURLs: []string{"127.0.0.1:13800"},
+		InitialCluster:           "s1=127.0.0.1:13800,s2=127.0.0.1:23800,s3=127.0.0.1:33800",
+		InitialClusterState:      "new",
+		InitialClusterToken:      "tkn",
+		SnapshotCount:            10000,
+		QuotaBackendBytes:        10740000000,
+		PreVote:                  true,
+		InitialCorruptCheck:      true,
+	}
+	exp := []string{
+		"--name=s1",
+		"--data-dir=/tmp/etcd-agent-data-1/etcd.data",
+		"--wal-dir=/tmp/etcd-agent-data-1/etcd.data/member/wal",
+		"--listen-client-urls=127.0.0.1:1379",
+		"--advertise-client-urls=127.0.0.1:13790",
+		"--listen-peer-urls=127.0.0.1:1380",
+		"--initial-advertise-peer-urls=127.0.0.1:13800",
+		"--initial-cluster=s1=127.0.0.1:13800,s2=127.0.0.1:23800,s3=127.0.0.1:33800",
+		"--initial-cluster-state=new",
+		"--initial-cluster-token=tkn",
+		"--snapshot-count=10000",
+		"--quota-backend-bytes=10740000000",
+		"--pre-vote=true",
+		"--experimental-initial-corrupt-check=true",
+	}
+	fs := cfg.Flags()
+	if !reflect.DeepEqual(exp, fs) {
+		t.Fatalf("expected %q, got %q", exp, fs)
+	}
+}

+ 158 - 0
tools/functional-tester/rpcpb/member.go

@@ -0,0 +1,158 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcpb
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/coreos/etcd/clientv3"
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+
+	grpc "google.golang.org/grpc"
+)
+
+var dialOpts = []grpc.DialOption{
+	grpc.WithInsecure(),
+	grpc.WithTimeout(5 * time.Second),
+	grpc.WithBlock(),
+}
+
+// DialEtcdGRPCServer creates a raw gRPC connection to an etcd member.
+func (m *Member) DialEtcdGRPCServer() (*grpc.ClientConn, error) {
+	if m.EtcdClientTLS {
+		// TODO: support TLS
+		panic("client TLS not supported yet")
+	}
+	return grpc.Dial(m.EtcdClientEndpoint, dialOpts...)
+}
+
+// CreateEtcdClient creates a client from member.
+func (m *Member) CreateEtcdClient() (*clientv3.Client, error) {
+	if m.EtcdClientTLS {
+		// TODO: support TLS
+		panic("client TLS not supported yet")
+	}
+	return clientv3.New(clientv3.Config{
+		Endpoints:   []string{m.EtcdClientEndpoint},
+		DialTimeout: 5 * time.Second,
+	})
+}
+
+// CheckCompact ensures that historical data before given revision has been compacted.
+func (m *Member) CheckCompact(rev int64) error {
+	cli, err := m.CreateEtcdClient()
+	if err != nil {
+		return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
+	}
+	defer cli.Close()
+
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	wch := cli.Watch(ctx, "\x00", clientv3.WithFromKey(), clientv3.WithRev(rev-1))
+	wr, ok := <-wch
+	cancel()
+
+	if !ok {
+		return fmt.Errorf("watch channel terminated (endpoint %q)", m.EtcdClientEndpoint)
+	}
+	if wr.CompactRevision != rev {
+		return fmt.Errorf("got compact revision %v, wanted %v (endpoint %q)", wr.CompactRevision, rev, m.EtcdClientEndpoint)
+	}
+
+	return nil
+}
+
+// Defrag runs defragmentation on this member.
+func (m *Member) Defrag() error {
+	cli, err := m.CreateEtcdClient()
+	if err != nil {
+		return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
+	}
+	defer cli.Close()
+
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
+	_, err = cli.Defragment(ctx, m.EtcdClientEndpoint)
+	cancel()
+	return err
+}
+
+// RevHash fetches current revision and hash on this member.
+func (m *Member) RevHash() (int64, int64, error) {
+	conn, err := m.DialEtcdGRPCServer()
+	if err != nil {
+		return 0, 0, err
+	}
+	defer conn.Close()
+
+	mt := pb.NewMaintenanceClient(conn)
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	resp, err := mt.Hash(ctx, &pb.HashRequest{}, grpc.FailFast(false))
+	cancel()
+
+	if err != nil {
+		return 0, 0, err
+	}
+
+	return resp.Header.Revision, int64(resp.Hash), nil
+}
+
+// Rev fetches current revision on this member.
+func (m *Member) Rev(ctx context.Context) (int64, error) {
+	cli, err := m.CreateEtcdClient()
+	if err != nil {
+		return 0, fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
+	}
+	defer cli.Close()
+
+	resp, err := cli.Status(ctx, m.EtcdClientEndpoint)
+	if err != nil {
+		return 0, err
+	}
+	return resp.Header.Revision, nil
+}
+
+// IsLeader returns true if this member is the current cluster leader.
+func (m *Member) IsLeader() (bool, error) {
+	cli, err := m.CreateEtcdClient()
+	if err != nil {
+		return false, fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
+	}
+	defer cli.Close()
+
+	resp, err := cli.Status(context.Background(), m.EtcdClientEndpoint)
+	if err != nil {
+		return false, err
+	}
+	return resp.Header.MemberId == resp.Leader, nil
+}
+
+// WriteHealthKey writes a health key to this member.
+func (m *Member) WriteHealthKey() error {
+	cli, err := m.CreateEtcdClient()
+	if err != nil {
+		return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
+	}
+	defer cli.Close()
+
+	// give enough time-out in case expensive requests (range/delete) are pending
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	_, err = cli.Put(ctx, "health", "good")
+	cancel()
+	if err != nil {
+		return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint)
+	}
+	return nil
+}

+ 2862 - 0
tools/functional-tester/rpcpb/rpc.pb.go

@@ -0,0 +1,2862 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: rpcpb/rpc.proto
+
+/*
+	Package rpcpb is a generated protocol buffer package.
+
+	It is generated from these files:
+		rpcpb/rpc.proto
+
+	It has these top-level messages:
+		Etcd
+		Member
+		Tester
+		Request
+		Response
+*/
+package rpcpb
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import context "golang.org/x/net/context"
+import grpc "google.golang.org/grpc"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Operation int32
+
+const (
+	Operation_NotStarted Operation = 0
+	// InitialStartEtcd is only called to start etcd very first time.
+	Operation_InitialStartEtcd Operation = 1
+	// RestartEtcd is sent to restart killed etcd.
+	Operation_RestartEtcd Operation = 2
+	// KillEtcd pauses etcd process while keeping data directories
+	// and previous etcd configurations.
+	Operation_KillEtcd Operation = 3
+	// FailArchive is sent when consistency check failed,
+	// thus need to archive etcd data directories.
+	Operation_FailArchive Operation = 4
+	// DestroyEtcdAgent destroys etcd process, etcd data, and agent server.
+	Operation_DestroyEtcdAgent        Operation = 5
+	Operation_BlackholePeerPortTxRx   Operation = 100
+	Operation_UnblackholePeerPortTxRx Operation = 101
+	Operation_DelayPeerPortTxRx       Operation = 102
+	Operation_UndelayPeerPortTxRx     Operation = 103
+)
+
+var Operation_name = map[int32]string{
+	0:   "NotStarted",
+	1:   "InitialStartEtcd",
+	2:   "RestartEtcd",
+	3:   "KillEtcd",
+	4:   "FailArchive",
+	5:   "DestroyEtcdAgent",
+	100: "BlackholePeerPortTxRx",
+	101: "UnblackholePeerPortTxRx",
+	102: "DelayPeerPortTxRx",
+	103: "UndelayPeerPortTxRx",
+}
+var Operation_value = map[string]int32{
+	"NotStarted":              0,
+	"InitialStartEtcd":        1,
+	"RestartEtcd":             2,
+	"KillEtcd":                3,
+	"FailArchive":             4,
+	"DestroyEtcdAgent":        5,
+	"BlackholePeerPortTxRx":   100,
+	"UnblackholePeerPortTxRx": 101,
+	"DelayPeerPortTxRx":       102,
+	"UndelayPeerPortTxRx":     103,
+}
+
+func (x Operation) String() string {
+	return proto.EnumName(Operation_name, int32(x))
+}
+func (Operation) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} }
+
+type FailureCase int32
+
+const (
+	FailureCase_KILL_ONE_FOLLOWER                      FailureCase = 0
+	FailureCase_KILL_LEADER                            FailureCase = 1
+	FailureCase_KILL_ONE_FOLLOWER_FOR_LONG             FailureCase = 2
+	FailureCase_KILL_LEADER_FOR_LONG                   FailureCase = 3
+	FailureCase_KILL_QUORUM                            FailureCase = 4
+	FailureCase_KILL_ALL                               FailureCase = 5
+	FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER FailureCase = 6
+	FailureCase_BLACKHOLE_PEER_PORT_TX_RX_LEADER_ONE   FailureCase = 7
+	FailureCase_BLACKHOLE_PEER_PORT_TX_RX_ALL          FailureCase = 8
+	FailureCase_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER     FailureCase = 9
+	FailureCase_DELAY_PEER_PORT_TX_RX_LEADER           FailureCase = 10
+	FailureCase_DELAY_PEER_PORT_TX_RX_ALL              FailureCase = 11
+	FailureCase_FAILPOINTS                             FailureCase = 100
+	FailureCase_NO_FAIL                                FailureCase = 200
+	FailureCase_EXTERNAL                               FailureCase = 300
+)
+
+var FailureCase_name = map[int32]string{
+	0:   "KILL_ONE_FOLLOWER",
+	1:   "KILL_LEADER",
+	2:   "KILL_ONE_FOLLOWER_FOR_LONG",
+	3:   "KILL_LEADER_FOR_LONG",
+	4:   "KILL_QUORUM",
+	5:   "KILL_ALL",
+	6:   "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER",
+	7:   "BLACKHOLE_PEER_PORT_TX_RX_LEADER_ONE",
+	8:   "BLACKHOLE_PEER_PORT_TX_RX_ALL",
+	9:   "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
+	10:  "DELAY_PEER_PORT_TX_RX_LEADER",
+	11:  "DELAY_PEER_PORT_TX_RX_ALL",
+	100: "FAILPOINTS",
+	200: "NO_FAIL",
+	300: "EXTERNAL",
+}
+var FailureCase_value = map[string]int32{
+	"KILL_ONE_FOLLOWER":          0,
+	"KILL_LEADER":                1,
+	"KILL_ONE_FOLLOWER_FOR_LONG": 2,
+	"KILL_LEADER_FOR_LONG":       3,
+	"KILL_QUORUM":                4,
+	"KILL_ALL":                   5,
+	"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER": 6,
+	"BLACKHOLE_PEER_PORT_TX_RX_LEADER_ONE":   7,
+	"BLACKHOLE_PEER_PORT_TX_RX_ALL":          8,
+	"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":     9,
+	"DELAY_PEER_PORT_TX_RX_LEADER":           10,
+	"DELAY_PEER_PORT_TX_RX_ALL":              11,
+	"FAILPOINTS":                             100,
+	"NO_FAIL":                                200,
+	"EXTERNAL":                               300,
+}
+
+func (x FailureCase) String() string {
+	return proto.EnumName(FailureCase_name, int32(x))
+}
+func (FailureCase) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} }
+
+type StressType int32
+
+const (
+	StressType_KV                StressType = 0
+	StressType_LEASE             StressType = 1
+	StressType_NO_STRESS         StressType = 2
+	StressType_ELECTION_RUNNER   StressType = 3
+	StressType_WATCH_RUNNER      StressType = 4
+	StressType_LOCK_RACER_RUNNER StressType = 5
+	StressType_LEASE_RUNNER      StressType = 6
+)
+
+var StressType_name = map[int32]string{
+	0: "KV",
+	1: "LEASE",
+	2: "NO_STRESS",
+	3: "ELECTION_RUNNER",
+	4: "WATCH_RUNNER",
+	5: "LOCK_RACER_RUNNER",
+	6: "LEASE_RUNNER",
+}
+var StressType_value = map[string]int32{
+	"KV":                0,
+	"LEASE":             1,
+	"NO_STRESS":         2,
+	"ELECTION_RUNNER":   3,
+	"WATCH_RUNNER":      4,
+	"LOCK_RACER_RUNNER": 5,
+	"LEASE_RUNNER":      6,
+}
+
+func (x StressType) String() string {
+	return proto.EnumName(StressType_name, int32(x))
+}
+func (StressType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} }
+
+type Etcd struct {
+	Name                     string   `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty" yaml:"name"`
+	DataDir                  string   `protobuf:"bytes,2,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"`
+	WALDir                   string   `protobuf:"bytes,3,opt,name=WALDir,proto3" json:"WALDir,omitempty" yaml:"wal-dir"`
+	ListenClientURLs         []string `protobuf:"bytes,4,rep,name=ListenClientURLs" json:"ListenClientURLs,omitempty" yaml:"listen-client-urls"`
+	AdvertiseClientURLs      []string `protobuf:"bytes,5,rep,name=AdvertiseClientURLs" json:"AdvertiseClientURLs,omitempty" yaml:"advertise-client-urls"`
+	ListenPeerURLs           []string `protobuf:"bytes,6,rep,name=ListenPeerURLs" json:"ListenPeerURLs,omitempty" yaml:"listen-peer-urls"`
+	InitialAdvertisePeerURLs []string `protobuf:"bytes,7,rep,name=InitialAdvertisePeerURLs" json:"InitialAdvertisePeerURLs,omitempty" yaml:"initial-advertise-peer-urls"`
+	InitialCluster           string   `protobuf:"bytes,8,opt,name=InitialCluster,proto3" json:"InitialCluster,omitempty" yaml:"initial-cluster"`
+	InitialClusterState      string   `protobuf:"bytes,9,opt,name=InitialClusterState,proto3" json:"InitialClusterState,omitempty" yaml:"initial-cluster-state"`
+	InitialClusterToken      string   `protobuf:"bytes,10,opt,name=InitialClusterToken,proto3" json:"InitialClusterToken,omitempty" yaml:"initial-cluster-token"`
+	SnapshotCount            int64    `protobuf:"varint,11,opt,name=SnapshotCount,proto3" json:"SnapshotCount,omitempty" yaml:"snapshot-count"`
+	QuotaBackendBytes        int64    `protobuf:"varint,12,opt,name=QuotaBackendBytes,proto3" json:"QuotaBackendBytes,omitempty" yaml:"quota-backend-bytes"`
+	PreVote                  bool     `protobuf:"varint,13,opt,name=PreVote,proto3" json:"PreVote,omitempty" yaml:"pre-vote"`
+	InitialCorruptCheck      bool     `protobuf:"varint,14,opt,name=InitialCorruptCheck,proto3" json:"InitialCorruptCheck,omitempty" yaml:"initial-corrupt-check"`
+}
+
+func (m *Etcd) Reset()                    { *m = Etcd{} }
+func (m *Etcd) String() string            { return proto.CompactTextString(m) }
+func (*Etcd) ProtoMessage()               {}
+func (*Etcd) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} }
+
+type Member struct {
+	// EtcdExecPath is the executable etcd binary path in agent server.
+	EtcdExecPath string `protobuf:"bytes,1,opt,name=EtcdExecPath,proto3" json:"EtcdExecPath,omitempty" yaml:"etcd-exec-path"`
+	// AgentAddr is the agent HTTP server address.
+	AgentAddr string `protobuf:"bytes,11,opt,name=AgentAddr,proto3" json:"AgentAddr,omitempty" yaml:"agent-addr"`
+	// FailpointHTTPAddr is the agent's failpoints HTTP server address.
+	FailpointHTTPAddr string `protobuf:"bytes,12,opt,name=FailpointHTTPAddr,proto3" json:"FailpointHTTPAddr,omitempty" yaml:"failpoint-http-addr"`
+	// BaseDir is the base directory where all logs and etcd data are stored.
+	BaseDir string `protobuf:"bytes,101,opt,name=BaseDir,proto3" json:"BaseDir,omitempty" yaml:"base-dir"`
+	// EtcdLogPath is the log file to store current etcd server logs.
+	EtcdLogPath string `protobuf:"bytes,102,opt,name=EtcdLogPath,proto3" json:"EtcdLogPath,omitempty" yaml:"etcd-log-path"`
+	// EtcdClientTLS is true when client traffic needs to be encrypted.
+	EtcdClientTLS bool `protobuf:"varint,201,opt,name=EtcdClientTLS,proto3" json:"EtcdClientTLS,omitempty" yaml:"etcd-client-tls"`
+	// EtcdClientProxy is true when client traffic needs to be proxied.
+	// If true, listen client URL port must be different than advertise client URL port.
+	EtcdClientProxy bool `protobuf:"varint,202,opt,name=EtcdClientProxy,proto3" json:"EtcdClientProxy,omitempty" yaml:"etcd-client-proxy"`
+	// EtcdPeerProxy is true when peer traffic needs to be proxied.
+	// If true, listen peer URL port must be different than advertise peer URL port.
+	EtcdPeerProxy bool `protobuf:"varint,203,opt,name=EtcdPeerProxy,proto3" json:"EtcdPeerProxy,omitempty" yaml:"etcd-peer-proxy"`
+	// EtcdClientEndpoint is the etcd client endpoint.
+	EtcdClientEndpoint string `protobuf:"bytes,204,opt,name=EtcdClientEndpoint,proto3" json:"EtcdClientEndpoint,omitempty" yaml:"etcd-client-endpoint"`
+	// Etcd defines etcd binary configuration flags.
+	Etcd *Etcd `protobuf:"bytes,301,opt,name=Etcd" json:"Etcd,omitempty" yaml:"etcd-config"`
+}
+
+func (m *Member) Reset()                    { *m = Member{} }
+func (m *Member) String() string            { return proto.CompactTextString(m) }
+func (*Member) ProtoMessage()               {}
+func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} }
+
+type Tester struct {
+	TesterNetwork string `protobuf:"bytes,1,opt,name=TesterNetwork,proto3" json:"TesterNetwork,omitempty" yaml:"tester-network"`
+	TesterAddr    string `protobuf:"bytes,2,opt,name=TesterAddr,proto3" json:"TesterAddr,omitempty" yaml:"tester-addr"`
+	// DelayLatencyMsRv is the delay latency in milliseconds,
+	// to inject to simulated slow network.
+	DelayLatencyMs uint32 `protobuf:"varint,11,opt,name=DelayLatencyMs,proto3" json:"DelayLatencyMs,omitempty" yaml:"delay-latency-ms"`
+	// DelayLatencyMsRv is the delay latency random variable in milliseconds.
+	DelayLatencyMsRv uint32 `protobuf:"varint,12,opt,name=DelayLatencyMsRv,proto3" json:"DelayLatencyMsRv,omitempty" yaml:"delay-latency-ms-rv"`
+	// RoundLimit is the limit of rounds to run failure set (-1 to run without limits).
+	RoundLimit int32 `protobuf:"varint,21,opt,name=RoundLimit,proto3" json:"RoundLimit,omitempty" yaml:"round-limit"`
+	// ExitOnFailure is true, then exit tester on first failure.
+	ExitOnFailure bool `protobuf:"varint,22,opt,name=ExitOnFailure,proto3" json:"ExitOnFailure,omitempty" yaml:"exit-on-failure"`
+	// ConsistencyCheck is true to check consistency (revision, hash).
+	ConsistencyCheck bool `protobuf:"varint,23,opt,name=ConsistencyCheck,proto3" json:"ConsistencyCheck,omitempty" yaml:"consistency-check"`
+	// EnablePprof is true to enable profiler.
+	EnablePprof bool `protobuf:"varint,24,opt,name=EnablePprof,proto3" json:"EnablePprof,omitempty" yaml:"enable-pprof"`
+	// FailureCases is the selected test cases to schedule.
+	// If empty, run all failure cases.
+	// TODO: support no-op
+	FailureCases []string `protobuf:"bytes,31,rep,name=FailureCases" json:"FailureCases,omitempty" yaml:"failure-cases"`
+	// FailureShuffle is true to randomize failure injecting order.
+	// TODO: support shuffle
+	// bool FailureShuffle = 32 [(gogoproto.moretags) = "yaml:\"failure-shuffle\""];
+	// FailpointCommands is the list of "gofail" commands (e.g. panic("etcd-tester"),1*sleep(1000)).
+	FailpointCommands []string `protobuf:"bytes,33,rep,name=FailpointCommands" json:"FailpointCommands,omitempty" yaml:"failpoint-commands"`
+	// RunnerExecPath is a path of etcd-runner binary.
+	RunnerExecPath string `protobuf:"bytes,41,opt,name=RunnerExecPath,proto3" json:"RunnerExecPath,omitempty" yaml:"runner-exec-path"`
+	// ExternalExecPath is a path of script for enabling/disabling an external fault injector.
+	ExternalExecPath string `protobuf:"bytes,42,opt,name=ExternalExecPath,proto3" json:"ExternalExecPath,omitempty" yaml:"external-exec-path"`
+	// StressTypes is the list of stresser names:
+	// keys, lease, nop, election-runner, watch-runner, lock-racer-runner, lease-runner.
+	StressTypes []string `protobuf:"bytes,101,rep,name=StressTypes" json:"StressTypes,omitempty" yaml:"stress-types"`
+	// StressKeySize is the size of each small key written into etcd.
+	StressKeySize int32 `protobuf:"varint,102,opt,name=StressKeySize,proto3" json:"StressKeySize,omitempty" yaml:"stress-key-size"`
+	// StressKeySizeLarge is the size of each large key written into etcd.
+	StressKeySizeLarge int32 `protobuf:"varint,103,opt,name=StressKeySizeLarge,proto3" json:"StressKeySizeLarge,omitempty" yaml:"stress-key-size-large"`
+	// StressKeySuffixRange is the count of key range written into etcd.
+	// Stress keys are created with "fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)".
+	StressKeySuffixRange int32 `protobuf:"varint,104,opt,name=StressKeySuffixRange,proto3" json:"StressKeySuffixRange,omitempty" yaml:"stress-key-suffix-range"`
+	// StressKeySuffixRangeTxn is the count of key range written into etcd txn (max 100).
+	// Stress keys are created with "fmt.Sprintf("/k%03d", i)".
+	StressKeySuffixRangeTxn int32 `protobuf:"varint,105,opt,name=StressKeySuffixRangeTxn,proto3" json:"StressKeySuffixRangeTxn,omitempty" yaml:"stress-key-suffix-range-txn"`
+	// StressKeyTxnOps is the number of operations per a transaction (max 64).
+	StressKeyTxnOps int32 `protobuf:"varint,106,opt,name=StressKeyTxnOps,proto3" json:"StressKeyTxnOps,omitempty" yaml:"stress-key-txn-ops"`
+	// StressQPS is the maximum number of stresser requests per second.
+	StressQPS int32 `protobuf:"varint,107,opt,name=StressQPS,proto3" json:"StressQPS,omitempty" yaml:"stress-qps"`
+}
+
+func (m *Tester) Reset()                    { *m = Tester{} }
+func (m *Tester) String() string            { return proto.CompactTextString(m) }
+func (*Tester) ProtoMessage()               {}
+func (*Tester) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} }
+
+type Request struct {
+	Operation Operation `protobuf:"varint,1,opt,name=Operation,proto3,enum=rpcpb.Operation" json:"Operation,omitempty"`
+	Member    *Member   `protobuf:"bytes,2,opt,name=Member" json:"Member,omitempty"`
+	Tester    *Tester   `protobuf:"bytes,3,opt,name=Tester" json:"Tester,omitempty"`
+}
+
+func (m *Request) Reset()                    { *m = Request{} }
+func (m *Request) String() string            { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage()               {}
+func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} }
+
+type Response struct {
+	Success bool   `protobuf:"varint,1,opt,name=Success,proto3" json:"Success,omitempty"`
+	Status  string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"`
+}
+
+func (m *Response) Reset()                    { *m = Response{} }
+func (m *Response) String() string            { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage()               {}
+func (*Response) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} }
+
+func init() {
+	proto.RegisterType((*Etcd)(nil), "rpcpb.Etcd")
+	proto.RegisterType((*Member)(nil), "rpcpb.Member")
+	proto.RegisterType((*Tester)(nil), "rpcpb.Tester")
+	proto.RegisterType((*Request)(nil), "rpcpb.Request")
+	proto.RegisterType((*Response)(nil), "rpcpb.Response")
+	proto.RegisterEnum("rpcpb.Operation", Operation_name, Operation_value)
+	proto.RegisterEnum("rpcpb.FailureCase", FailureCase_name, FailureCase_value)
+	proto.RegisterEnum("rpcpb.StressType", StressType_name, StressType_value)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Transport service
+
+type TransportClient interface {
+	Transport(ctx context.Context, opts ...grpc.CallOption) (Transport_TransportClient, error)
+}
+
+type transportClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewTransportClient(cc *grpc.ClientConn) TransportClient {
+	return &transportClient{cc}
+}
+
+func (c *transportClient) Transport(ctx context.Context, opts ...grpc.CallOption) (Transport_TransportClient, error) {
+	stream, err := grpc.NewClientStream(ctx, &_Transport_serviceDesc.Streams[0], c.cc, "/rpcpb.Transport/Transport", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &transportTransportClient{stream}
+	return x, nil
+}
+
+type Transport_TransportClient interface {
+	Send(*Request) error
+	Recv() (*Response, error)
+	grpc.ClientStream
+}
+
+type transportTransportClient struct {
+	grpc.ClientStream
+}
+
+func (x *transportTransportClient) Send(m *Request) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *transportTransportClient) Recv() (*Response, error) {
+	m := new(Response)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// Server API for Transport service
+
+type TransportServer interface {
+	Transport(Transport_TransportServer) error
+}
+
+func RegisterTransportServer(s *grpc.Server, srv TransportServer) {
+	s.RegisterService(&_Transport_serviceDesc, srv)
+}
+
+func _Transport_Transport_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(TransportServer).Transport(&transportTransportServer{stream})
+}
+
+type Transport_TransportServer interface {
+	Send(*Response) error
+	Recv() (*Request, error)
+	grpc.ServerStream
+}
+
+type transportTransportServer struct {
+	grpc.ServerStream
+}
+
+func (x *transportTransportServer) Send(m *Response) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *transportTransportServer) Recv() (*Request, error) {
+	m := new(Request)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+var _Transport_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "rpcpb.Transport",
+	HandlerType: (*TransportServer)(nil),
+	Methods:     []grpc.MethodDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Transport",
+			Handler:       _Transport_Transport_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "rpcpb/rpc.proto",
+}
+
+func (m *Etcd) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Etcd) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	if len(m.DataDir) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.DataDir)))
+		i += copy(dAtA[i:], m.DataDir)
+	}
+	if len(m.WALDir) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.WALDir)))
+		i += copy(dAtA[i:], m.WALDir)
+	}
+	if len(m.ListenClientURLs) > 0 {
+		for _, s := range m.ListenClientURLs {
+			dAtA[i] = 0x22
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if len(m.AdvertiseClientURLs) > 0 {
+		for _, s := range m.AdvertiseClientURLs {
+			dAtA[i] = 0x2a
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if len(m.ListenPeerURLs) > 0 {
+		for _, s := range m.ListenPeerURLs {
+			dAtA[i] = 0x32
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if len(m.InitialAdvertisePeerURLs) > 0 {
+		for _, s := range m.InitialAdvertisePeerURLs {
+			dAtA[i] = 0x3a
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if len(m.InitialCluster) > 0 {
+		dAtA[i] = 0x42
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.InitialCluster)))
+		i += copy(dAtA[i:], m.InitialCluster)
+	}
+	if len(m.InitialClusterState) > 0 {
+		dAtA[i] = 0x4a
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.InitialClusterState)))
+		i += copy(dAtA[i:], m.InitialClusterState)
+	}
+	if len(m.InitialClusterToken) > 0 {
+		dAtA[i] = 0x52
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.InitialClusterToken)))
+		i += copy(dAtA[i:], m.InitialClusterToken)
+	}
+	if m.SnapshotCount != 0 {
+		dAtA[i] = 0x58
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotCount))
+	}
+	if m.QuotaBackendBytes != 0 {
+		dAtA[i] = 0x60
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.QuotaBackendBytes))
+	}
+	if m.PreVote {
+		dAtA[i] = 0x68
+		i++
+		if m.PreVote {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.InitialCorruptCheck {
+		dAtA[i] = 0x70
+		i++
+		if m.InitialCorruptCheck {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	return i, nil
+}
+
+func (m *Member) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Member) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.EtcdExecPath) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.EtcdExecPath)))
+		i += copy(dAtA[i:], m.EtcdExecPath)
+	}
+	if len(m.AgentAddr) > 0 {
+		dAtA[i] = 0x5a
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.AgentAddr)))
+		i += copy(dAtA[i:], m.AgentAddr)
+	}
+	if len(m.FailpointHTTPAddr) > 0 {
+		dAtA[i] = 0x62
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.FailpointHTTPAddr)))
+		i += copy(dAtA[i:], m.FailpointHTTPAddr)
+	}
+	if len(m.BaseDir) > 0 {
+		dAtA[i] = 0xaa
+		i++
+		dAtA[i] = 0x6
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.BaseDir)))
+		i += copy(dAtA[i:], m.BaseDir)
+	}
+	if len(m.EtcdLogPath) > 0 {
+		dAtA[i] = 0xb2
+		i++
+		dAtA[i] = 0x6
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.EtcdLogPath)))
+		i += copy(dAtA[i:], m.EtcdLogPath)
+	}
+	if m.EtcdClientTLS {
+		dAtA[i] = 0xc8
+		i++
+		dAtA[i] = 0xc
+		i++
+		if m.EtcdClientTLS {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.EtcdClientProxy {
+		dAtA[i] = 0xd0
+		i++
+		dAtA[i] = 0xc
+		i++
+		if m.EtcdClientProxy {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.EtcdPeerProxy {
+		dAtA[i] = 0xd8
+		i++
+		dAtA[i] = 0xc
+		i++
+		if m.EtcdPeerProxy {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if len(m.EtcdClientEndpoint) > 0 {
+		dAtA[i] = 0xe2
+		i++
+		dAtA[i] = 0xc
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.EtcdClientEndpoint)))
+		i += copy(dAtA[i:], m.EtcdClientEndpoint)
+	}
+	if m.Etcd != nil {
+		dAtA[i] = 0xea
+		i++
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.Etcd.Size()))
+		n1, err := m.Etcd.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n1
+	}
+	return i, nil
+}
+
+func (m *Tester) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Tester) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.TesterNetwork) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.TesterNetwork)))
+		i += copy(dAtA[i:], m.TesterNetwork)
+	}
+	if len(m.TesterAddr) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.TesterAddr)))
+		i += copy(dAtA[i:], m.TesterAddr)
+	}
+	if m.DelayLatencyMs != 0 {
+		dAtA[i] = 0x58
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.DelayLatencyMs))
+	}
+	if m.DelayLatencyMsRv != 0 {
+		dAtA[i] = 0x60
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.DelayLatencyMsRv))
+	}
+	if m.RoundLimit != 0 {
+		dAtA[i] = 0xa8
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.RoundLimit))
+	}
+	if m.ExitOnFailure {
+		dAtA[i] = 0xb0
+		i++
+		dAtA[i] = 0x1
+		i++
+		if m.ExitOnFailure {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.ConsistencyCheck {
+		dAtA[i] = 0xb8
+		i++
+		dAtA[i] = 0x1
+		i++
+		if m.ConsistencyCheck {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.EnablePprof {
+		dAtA[i] = 0xc0
+		i++
+		dAtA[i] = 0x1
+		i++
+		if m.EnablePprof {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if len(m.FailureCases) > 0 {
+		for _, s := range m.FailureCases {
+			dAtA[i] = 0xfa
+			i++
+			dAtA[i] = 0x1
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if len(m.FailpointCommands) > 0 {
+		for _, s := range m.FailpointCommands {
+			dAtA[i] = 0x8a
+			i++
+			dAtA[i] = 0x2
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if len(m.RunnerExecPath) > 0 {
+		dAtA[i] = 0xca
+		i++
+		dAtA[i] = 0x2
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RunnerExecPath)))
+		i += copy(dAtA[i:], m.RunnerExecPath)
+	}
+	if len(m.ExternalExecPath) > 0 {
+		dAtA[i] = 0xd2
+		i++
+		dAtA[i] = 0x2
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.ExternalExecPath)))
+		i += copy(dAtA[i:], m.ExternalExecPath)
+	}
+	if len(m.StressTypes) > 0 {
+		for _, s := range m.StressTypes {
+			dAtA[i] = 0xaa
+			i++
+			dAtA[i] = 0x6
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if m.StressKeySize != 0 {
+		dAtA[i] = 0xb0
+		i++
+		dAtA[i] = 0x6
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.StressKeySize))
+	}
+	if m.StressKeySizeLarge != 0 {
+		dAtA[i] = 0xb8
+		i++
+		dAtA[i] = 0x6
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.StressKeySizeLarge))
+	}
+	if m.StressKeySuffixRange != 0 {
+		dAtA[i] = 0xc0
+		i++
+		dAtA[i] = 0x6
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.StressKeySuffixRange))
+	}
+	if m.StressKeySuffixRangeTxn != 0 {
+		dAtA[i] = 0xc8
+		i++
+		dAtA[i] = 0x6
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.StressKeySuffixRangeTxn))
+	}
+	if m.StressKeyTxnOps != 0 {
+		dAtA[i] = 0xd0
+		i++
+		dAtA[i] = 0x6
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.StressKeyTxnOps))
+	}
+	if m.StressQPS != 0 {
+		dAtA[i] = 0xd8
+		i++
+		dAtA[i] = 0x6
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.StressQPS))
+	}
+	return i, nil
+}
+
+func (m *Request) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Request) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Operation != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.Operation))
+	}
+	if m.Member != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size()))
+		n2, err := m.Member.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n2
+	}
+	if m.Tester != nil {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(m.Tester.Size()))
+		n3, err := m.Tester.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n3
+	}
+	return i, nil
+}
+
+func (m *Response) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Response) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Success {
+		dAtA[i] = 0x8
+		i++
+		if m.Success {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if len(m.Status) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Status)))
+		i += copy(dAtA[i:], m.Status)
+	}
+	return i, nil
+}
+
+func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Etcd) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.DataDir)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.WALDir)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.ListenClientURLs) > 0 {
+		for _, s := range m.ListenClientURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.AdvertiseClientURLs) > 0 {
+		for _, s := range m.AdvertiseClientURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.ListenPeerURLs) > 0 {
+		for _, s := range m.ListenPeerURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.InitialAdvertisePeerURLs) > 0 {
+		for _, s := range m.InitialAdvertisePeerURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	l = len(m.InitialCluster)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.InitialClusterState)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.InitialClusterToken)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.SnapshotCount != 0 {
+		n += 1 + sovRpc(uint64(m.SnapshotCount))
+	}
+	if m.QuotaBackendBytes != 0 {
+		n += 1 + sovRpc(uint64(m.QuotaBackendBytes))
+	}
+	if m.PreVote {
+		n += 2
+	}
+	if m.InitialCorruptCheck {
+		n += 2
+	}
+	return n
+}
+
+func (m *Member) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.EtcdExecPath)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.AgentAddr)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.FailpointHTTPAddr)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.BaseDir)
+	if l > 0 {
+		n += 2 + l + sovRpc(uint64(l))
+	}
+	l = len(m.EtcdLogPath)
+	if l > 0 {
+		n += 2 + l + sovRpc(uint64(l))
+	}
+	if m.EtcdClientTLS {
+		n += 3
+	}
+	if m.EtcdClientProxy {
+		n += 3
+	}
+	if m.EtcdPeerProxy {
+		n += 3
+	}
+	l = len(m.EtcdClientEndpoint)
+	if l > 0 {
+		n += 2 + l + sovRpc(uint64(l))
+	}
+	if m.Etcd != nil {
+		l = m.Etcd.Size()
+		n += 2 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+
+func (m *Tester) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.TesterNetwork)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.TesterAddr)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.DelayLatencyMs != 0 {
+		n += 1 + sovRpc(uint64(m.DelayLatencyMs))
+	}
+	if m.DelayLatencyMsRv != 0 {
+		n += 1 + sovRpc(uint64(m.DelayLatencyMsRv))
+	}
+	if m.RoundLimit != 0 {
+		n += 2 + sovRpc(uint64(m.RoundLimit))
+	}
+	if m.ExitOnFailure {
+		n += 3
+	}
+	if m.ConsistencyCheck {
+		n += 3
+	}
+	if m.EnablePprof {
+		n += 3
+	}
+	if len(m.FailureCases) > 0 {
+		for _, s := range m.FailureCases {
+			l = len(s)
+			n += 2 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.FailpointCommands) > 0 {
+		for _, s := range m.FailpointCommands {
+			l = len(s)
+			n += 2 + l + sovRpc(uint64(l))
+		}
+	}
+	l = len(m.RunnerExecPath)
+	if l > 0 {
+		n += 2 + l + sovRpc(uint64(l))
+	}
+	l = len(m.ExternalExecPath)
+	if l > 0 {
+		n += 2 + l + sovRpc(uint64(l))
+	}
+	if len(m.StressTypes) > 0 {
+		for _, s := range m.StressTypes {
+			l = len(s)
+			n += 2 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.StressKeySize != 0 {
+		n += 2 + sovRpc(uint64(m.StressKeySize))
+	}
+	if m.StressKeySizeLarge != 0 {
+		n += 2 + sovRpc(uint64(m.StressKeySizeLarge))
+	}
+	if m.StressKeySuffixRange != 0 {
+		n += 2 + sovRpc(uint64(m.StressKeySuffixRange))
+	}
+	if m.StressKeySuffixRangeTxn != 0 {
+		n += 2 + sovRpc(uint64(m.StressKeySuffixRangeTxn))
+	}
+	if m.StressKeyTxnOps != 0 {
+		n += 2 + sovRpc(uint64(m.StressKeyTxnOps))
+	}
+	if m.StressQPS != 0 {
+		n += 2 + sovRpc(uint64(m.StressQPS))
+	}
+	return n
+}
+
+func (m *Request) Size() (n int) {
+	var l int
+	_ = l
+	if m.Operation != 0 {
+		n += 1 + sovRpc(uint64(m.Operation))
+	}
+	if m.Member != nil {
+		l = m.Member.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Tester != nil {
+		l = m.Tester.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+
+func (m *Response) Size() (n int) {
+	var l int
+	_ = l
+	if m.Success {
+		n += 2
+	}
+	l = len(m.Status)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+
+func sovRpc(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozRpc(x uint64) (n int) {
+	return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Etcd) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Etcd: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Etcd: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DataDir", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DataDir = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WALDir", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.WALDir = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListenClientURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ListenClientURLs = append(m.ListenClientURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AdvertiseClientURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.AdvertiseClientURLs = append(m.AdvertiseClientURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListenPeerURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ListenPeerURLs = append(m.ListenPeerURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InitialAdvertisePeerURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.InitialAdvertisePeerURLs = append(m.InitialAdvertisePeerURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InitialCluster", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.InitialCluster = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InitialClusterState", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.InitialClusterState = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InitialClusterToken", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.InitialClusterToken = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SnapshotCount", wireType)
+			}
+			m.SnapshotCount = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.SnapshotCount |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field QuotaBackendBytes", wireType)
+			}
+			m.QuotaBackendBytes = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.QuotaBackendBytes |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 13:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PreVote", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.PreVote = bool(v != 0)
+		case 14:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InitialCorruptCheck", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.InitialCorruptCheck = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Member) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Member: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EtcdExecPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.EtcdExecPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AgentAddr", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.AgentAddr = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FailpointHTTPAddr", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FailpointHTTPAddr = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 101:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BaseDir", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.BaseDir = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 102:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EtcdLogPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.EtcdLogPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 201:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EtcdClientTLS", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.EtcdClientTLS = bool(v != 0)
+		case 202:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EtcdClientProxy", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.EtcdClientProxy = bool(v != 0)
+		case 203:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EtcdPeerProxy", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.EtcdPeerProxy = bool(v != 0)
+		case 204:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EtcdClientEndpoint", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.EtcdClientEndpoint = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 301:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Etcd", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Etcd == nil {
+				m.Etcd = &Etcd{}
+			}
+			if err := m.Etcd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Tester) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Tester: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Tester: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TesterNetwork", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TesterNetwork = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TesterAddr", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TesterAddr = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DelayLatencyMs", wireType)
+			}
+			m.DelayLatencyMs = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.DelayLatencyMs |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DelayLatencyMsRv", wireType)
+			}
+			m.DelayLatencyMsRv = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.DelayLatencyMsRv |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 21:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RoundLimit", wireType)
+			}
+			m.RoundLimit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RoundLimit |= (int32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 22:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitOnFailure", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ExitOnFailure = bool(v != 0)
+		case 23:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsistencyCheck", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ConsistencyCheck = bool(v != 0)
+		case 24:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EnablePprof", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.EnablePprof = bool(v != 0)
+		case 31:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FailureCases", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FailureCases = append(m.FailureCases, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 33:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FailpointCommands", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FailpointCommands = append(m.FailpointCommands, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 41:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RunnerExecPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RunnerExecPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 42:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExternalExecPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExternalExecPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 101:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StressTypes", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StressTypes = append(m.StressTypes, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 102:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StressKeySize", wireType)
+			}
+			m.StressKeySize = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.StressKeySize |= (int32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 103:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StressKeySizeLarge", wireType)
+			}
+			m.StressKeySizeLarge = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.StressKeySizeLarge |= (int32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 104:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StressKeySuffixRange", wireType)
+			}
+			m.StressKeySuffixRange = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.StressKeySuffixRange |= (int32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 105:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StressKeySuffixRangeTxn", wireType)
+			}
+			m.StressKeySuffixRangeTxn = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.StressKeySuffixRangeTxn |= (int32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 106:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StressKeyTxnOps", wireType)
+			}
+			m.StressKeyTxnOps = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.StressKeyTxnOps |= (int32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 107:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StressQPS", wireType)
+			}
+			m.StressQPS = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.StressQPS |= (int32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Request) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Request: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType)
+			}
+			m.Operation = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Operation |= (Operation(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Member == nil {
+				m.Member = &Member{}
+			}
+			if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Tester", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Tester == nil {
+				m.Tester = &Tester{}
+			}
+			if err := m.Tester.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Response) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Response: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Success = bool(v != 0)
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Status = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRpc(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthRpc
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowRpc
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipRpc(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRpc   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("rpcpb/rpc.proto", fileDescriptorRpc) }
+
+var fileDescriptorRpc = []byte{
+	// 1800 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x57, 0xcd, 0x72, 0xdb, 0xba,
+	0x15, 0xb6, 0x6c, 0x4b, 0xb6, 0x60, 0xcb, 0x66, 0x60, 0x3b, 0x66, 0x9c, 0xc4, 0xf4, 0x65, 0x7b,
+	0x33, 0xae, 0x67, 0xe8, 0x74, 0x72, 0x67, 0x3a, 0xed, 0x9d, 0xdb, 0x49, 0x25, 0x99, 0xb9, 0x76,
+	0xcd, 0x48, 0x0a, 0x24, 0x27, 0xe9, 0x4a, 0xa5, 0x48, 0x48, 0x62, 0x4d, 0x91, 0x0c, 0x08, 0xf9,
+	0x4a, 0x77, 0xd1, 0x6d, 0xb7, 0x5d, 0x76, 0xd3, 0x37, 0x68, 0x57, 0x7d, 0x89, 0xb4, 0xcd, 0xa2,
+	0x4f, 0xa0, 0xb6, 0xe9, 0x1b, 0xf0, 0x09, 0x3a, 0x00, 0x28, 0x09, 0xfa, 0x71, 0xbb, 0x33, 0xce,
+	0xf9, 0xbe, 0x8f, 0xc0, 0xd1, 0x39, 0x1f, 0x60, 0xb0, 0x4b, 0x22, 0x27, 0x6a, 0x3d, 0x27, 0x91,
+	0x73, 0x1e, 0x91, 0x90, 0x86, 0x30, 0xcb, 0x03, 0x47, 0x46, 0xc7, 0xa3, 0xdd, 0x7e, 0xeb, 0xdc,
+	0x09, 0x7b, 0xcf, 0x3b, 0x61, 0x27, 0x7c, 0xce, 0xb3, 0xad, 0x7e, 0x9b, 0xaf, 0xf8, 0x82, 0xff,
+	0x25, 0x58, 0xfa, 0xa7, 0x0d, 0xb0, 0x6e, 0x52, 0xc7, 0x85, 0x3f, 0x00, 0xeb, 0x15, 0xbb, 0x87,
+	0xd5, 0xcc, 0x49, 0xe6, 0x34, 0x5f, 0xda, 0x4d, 0x46, 0xda, 0xd6, 0xd0, 0xee, 0xf9, 0x5f, 0xeb,
+	0x81, 0xdd, 0xc3, 0x3a, 0xe2, 0x49, 0x68, 0x80, 0x8d, 0x0b, 0x9b, 0xda, 0x17, 0x1e, 0x51, 0x57,
+	0x39, 0x6e, 0x2f, 0x19, 0x69, 0xbb, 0x02, 0xe7, 0xda, 0xd4, 0x36, 0x5c, 0x8f, 0xe8, 0x68, 0x8c,
+	0x81, 0x67, 0x20, 0xf7, 0xae, 0x68, 0x31, 0xf4, 0x1a, 0x47, 0xc3, 0x64, 0xa4, 0xed, 0x08, 0xf4,
+	0x77, 0xb6, 0x2f, 0xc0, 0x29, 0x02, 0x5e, 0x01, 0xc5, 0xf2, 0x62, 0x8a, 0x83, 0xb2, 0xef, 0xe1,
+	0x80, 0xde, 0x20, 0x2b, 0x56, 0xd7, 0x4f, 0xd6, 0x4e, 0xf3, 0xa5, 0xa7, 0xc9, 0x48, 0x7b, 0x24,
+	0x58, 0x3e, 0x47, 0x18, 0x0e, 0x87, 0x18, 0x7d, 0xe2, 0xc7, 0x3a, 0x5a, 0xa0, 0x41, 0x04, 0xf6,
+	0x8a, 0xee, 0x1d, 0x26, 0xd4, 0x8b, 0xb1, 0xa4, 0x96, 0xe5, 0x6a, 0x27, 0xc9, 0x48, 0x7b, 0x22,
+	0xd4, 0xec, 0x31, 0x68, 0x56, 0x70, 0x19, 0x19, 0x96, 0xc1, 0x8e, 0xf8, 0x4e, 0x0d, 0x63, 0xc2,
+	0xe5, 0x72, 0x5c, 0xee, 0x71, 0x32, 0xd2, 0x0e, 0x67, 0x36, 0x17, 0x61, 0x4c, 0x52, 0xa5, 0x39,
+	0x0a, 0x6c, 0x01, 0xf5, 0x2a, 0xf0, 0xa8, 0x67, 0xfb, 0x93, 0x4f, 0x4c, 0xe4, 0x36, 0xb8, 0xdc,
+	0xb3, 0x64, 0xa4, 0xe9, 0x42, 0xce, 0x13, 0x48, 0x63, 0xba, 0x4b, 0x49, 0xf9, 0x5e, 0x1d, 0x58,
+	0x02, 0x3b, 0x69, 0xae, 0xec, 0xf7, 0x63, 0x8a, 0x89, 0xba, 0xc9, 0x6b, 0x7f, 0x94, 0x8c, 0xb4,
+	0x87, 0xb3, 0xca, 0x8e, 0x00, 0xe8, 0x68, 0x8e, 0xc1, 0x0a, 0x38, 0x1b, 0xa9, 0x53, 0x9b, 0x62,
+	0x35, 0xcf, 0x85, 0xa4, 0x02, 0xce, 0x09, 0x19, 0x31, 0x83, 0xe9, 0x68, 0x19, 0x79, 0x51, 0xb3,
+	0x11, 0xde, 0xe2, 0x40, 0x05, 0xff, 0x4f, 0x93, 0x32, 0xd8, 0x82, 0x26, 0x27, 0xc3, 0x97, 0xa0,
+	0x50, 0x0f, 0xec, 0x28, 0xee, 0x86, 0xb4, 0x1c, 0xf6, 0x03, 0xaa, 0x6e, 0x9d, 0x64, 0x4e, 0xd7,
+	0x4a, 0x8f, 0x92, 0x91, 0x76, 0x20, 0xd4, 0xe2, 0x34, 0x6d, 0x38, 0x2c, 0xaf, 0xa3, 0x59, 0x3c,
+	0xb4, 0xc0, 0x83, 0x37, 0xfd, 0x90, 0xda, 0x25, 0xdb, 0xb9, 0xc5, 0x81, 0x5b, 0x1a, 0x52, 0x1c,
+	0xab, 0xdb, 0x5c, 0xe4, 0x38, 0x19, 0x69, 0x47, 0x42, 0xe4, 0x03, 0x83, 0x18, 0x2d, 0x81, 0x31,
+	0x5a, 0x0c, 0xa4, 0xa3, 0x45, 0x22, 0x9b, 0x8e, 0x1a, 0xc1, 0x6f, 0x43, 0x8a, 0xd5, 0xc2, 0x49,
+	0xe6, 0x74, 0x53, 0x9e, 0x8e, 0x88, 0x60, 0xe3, 0x2e, 0x64, 0xd5, 0x19, 0x63, 0xe4, 0x8a, 0x84,
+	0x84, 0xf4, 0x23, 0x5a, 0xee, 0x62, 0xe7, 0x56, 0xdd, 0xe1, 0xd4, 0x65, 0x15, 0x11, 0x28, 0xc3,
+	0x61, 0x30, 0xa9, 0x22, 0x12, 0x59, 0xff, 0x7d, 0x16, 0xe4, 0x5e, 0xe3, 0x5e, 0x0b, 0x13, 0xf8,
+	0x73, 0xb0, 0xcd, 0x06, 0xdb, 0x1c, 0x60, 0xa7, 0x66, 0xd3, 0x6e, 0x3a, 0xd8, 0x52, 0x6d, 0x30,
+	0x75, 0x5c, 0x03, 0x0f, 0xb0, 0x63, 0x44, 0x36, 0xed, 0xea, 0x68, 0x06, 0x0e, 0xbf, 0x02, 0xf9,
+	0x62, 0x07, 0x07, 0xb4, 0xe8, 0xba, 0x84, 0xd7, 0x35, 0x5f, 0x3a, 0x48, 0x46, 0xda, 0x83, 0x74,
+	0x74, 0x58, 0xca, 0xb0, 0x5d, 0x97, 0xe8, 0x68, 0x8a, 0x63, 0xf5, 0x7c, 0x65, 0x7b, 0x7e, 0x14,
+	0x7a, 0x01, 0xbd, 0x6c, 0x34, 0x6a, 0x9c, 0xbc, 0xcd, 0xc9, 0x52, 0x3d, 0xdb, 0x63, 0x88, 0xd1,
+	0xa5, 0x34, 0x4a, 0x55, 0x16, 0x89, 0xac, 0x9e, 0x25, 0x3b, 0xc6, 0xcc, 0x3f, 0xf0, 0xbc, 0xdb,
+	0xb4, 0xec, 0x18, 0xa7, 0x6e, 0x93, 0x62, 0xe0, 0xd7, 0x60, 0x8b, 0x9d, 0xc0, 0x0a, 0x3b, 0xfc,
+	0xbc, 0x6d, 0x4e, 0x51, 0x93, 0x91, 0xb6, 0x2f, 0x9d, 0xd7, 0x0f, 0x3b, 0xe9, 0x71, 0x65, 0x30,
+	0x2c, 0x82, 0x02, 0x5b, 0x8a, 0x81, 0x6f, 0x58, 0x75, 0xf5, 0xaf, 0x19, 0xfe, 0x33, 0x48, 0x53,
+	0xc3, 0xe9, 0xa9, 0x51, 0x50, 0x36, 0x83, 0xb3, 0x0c, 0xf8, 0x2d, 0xd8, 0x9d, 0x06, 0x6a, 0x24,
+	0x1c, 0x0c, 0xd5, 0xbf, 0x09, 0x91, 0x27, 0xc9, 0x48, 0x53, 0x17, 0x45, 0x22, 0x86, 0xd1, 0xd1,
+	0x3c, 0x6b, 0xbc, 0x17, 0x36, 0xd1, 0x42, 0xe6, 0xef, 0xcb, 0xf7, 0xc2, 0xed, 0x20, 0x15, 0x99,
+	0x65, 0xc0, 0x1a, 0x80, 0x53, 0x55, 0x33, 0x70, 0x79, 0x5d, 0xd5, 0x4f, 0xa2, 0x05, 0xb4, 0x64,
+	0xa4, 0x3d, 0x5e, 0xdc, 0x0e, 0x4e, 0x61, 0x3a, 0x5a, 0xc2, 0x85, 0x3f, 0x15, 0xd7, 0x84, 0xfa,
+	0x67, 0xe6, 0xfb, 0x5b, 0x2f, 0xb6, 0xce, 0xf9, 0x6d, 0x73, 0xce, 0x62, 0xa5, 0x87, 0xc9, 0x48,
+	0x83, 0xb2, 0x60, 0x18, 0xb4, 0xbd, 0x8e, 0x8e, 0x38, 0x43, 0xff, 0x0b, 0x00, 0xb9, 0x06, 0xe6,
+	0xbe, 0xf2, 0x12, 0x14, 0xc4, 0x5f, 0x15, 0x4c, 0xbf, 0x0b, 0xc9, 0xed, 0x62, 0x4f, 0x52, 0x9e,
+	0x36, 0x02, 0x91, 0xd7, 0xd1, 0x2c, 0x1e, 0xfe, 0x04, 0x00, 0x11, 0xe0, 0x8d, 0x25, 0xae, 0x20,
+	0xe9, 0xeb, 0x29, 0x5b, 0x34, 0x94, 0x84, 0x64, 0xee, 0x7d, 0x81, 0x7d, 0x7b, 0x68, 0xd9, 0x14,
+	0x07, 0xce, 0xf0, 0x75, 0xcc, 0x3b, 0xba, 0x20, 0xbb, 0xb7, 0xcb, 0xf2, 0x86, 0x2f, 0x00, 0x46,
+	0x8f, 0xb9, 0xf7, 0x2c, 0x05, 0xfe, 0x12, 0x28, 0xb3, 0x11, 0x74, 0xc7, 0x7b, 0xbb, 0x20, 0xf7,
+	0xf6, 0xbc, 0x8c, 0x41, 0xee, 0x74, 0xb4, 0xc0, 0x63, 0x07, 0x41, 0x61, 0x3f, 0x70, 0x2d, 0xaf,
+	0xe7, 0x51, 0xf5, 0xe0, 0x24, 0x73, 0x9a, 0x95, 0x0f, 0x42, 0x58, 0xce, 0xf0, 0x59, 0x52, 0x47,
+	0x12, 0x12, 0xfe, 0x02, 0x14, 0xcc, 0x81, 0x47, 0xab, 0x01, 0x9b, 0x96, 0x3e, 0xc1, 0xea, 0xc3,
+	0x85, 0xd6, 0x18, 0x78, 0xd4, 0x08, 0x03, 0xa3, 0x2d, 0x00, 0xac, 0x35, 0x64, 0x02, 0xbc, 0x04,
+	0x4a, 0x39, 0x0c, 0x62, 0x7e, 0x31, 0x39, 0x43, 0x61, 0x39, 0x87, 0xf3, 0x6d, 0xea, 0x4c, 0x11,
+	0x63, 0xbb, 0x59, 0x60, 0xc1, 0x9f, 0x81, 0x2d, 0x33, 0xb0, 0x5b, 0x3e, 0xae, 0x45, 0x24, 0x6c,
+	0xab, 0x2a, 0x17, 0x39, 0x4c, 0x46, 0xda, 0x5e, 0xba, 0x13, 0x9e, 0x34, 0x22, 0x96, 0x65, 0xe3,
+	0x36, 0xc5, 0xc2, 0x6f, 0xc0, 0x76, 0xba, 0x9f, 0xb2, 0x1d, 0xe3, 0x58, 0xd5, 0xf8, 0xe5, 0x27,
+	0xcd, 0x6a, 0xba, 0x7b, 0xc3, 0x61, 0x69, 0x1d, 0xcd, 0xa0, 0xe1, 0xb5, 0xe4, 0x32, 0xe5, 0xb0,
+	0xd7, 0xb3, 0x03, 0x37, 0x56, 0xbf, 0x98, 0x7f, 0x2b, 0x4c, 0x5d, 0xc6, 0x49, 0x31, 0xb2, 0xc9,
+	0x8c, 0x79, 0xac, 0x35, 0x50, 0x3f, 0x08, 0x30, 0x99, 0x18, 0xe5, 0x8f, 0x78, 0x5b, 0x49, 0xad,
+	0x41, 0x78, 0x5e, 0xb6, 0xca, 0x39, 0x0a, 0x7b, 0xbc, 0x98, 0x03, 0x8a, 0x49, 0x60, 0xfb, 0x13,
+	0x99, 0x33, 0x2e, 0x23, 0x6d, 0x08, 0xa7, 0x08, 0x59, 0x68, 0x81, 0xc6, 0xaa, 0x5a, 0xa7, 0x04,
+	0xc7, 0x71, 0x63, 0x18, 0xe1, 0x58, 0xc5, 0xfc, 0x58, 0x52, 0x55, 0x63, 0x9e, 0x34, 0x28, 0xcb,
+	0xea, 0x48, 0xc6, 0xb2, 0xe6, 0x10, 0xcb, 0x6b, 0x3c, 0xac, 0x7b, 0xdf, 0x63, 0x6e, 0x81, 0x59,
+	0xb9, 0x39, 0x52, 0xf2, 0x2d, 0x1e, 0x1a, 0xb1, 0xf7, 0x3d, 0x6b, 0x8e, 0x19, 0x02, 0xf3, 0x8d,
+	0x99, 0x80, 0x65, 0x93, 0x0e, 0x56, 0x3b, 0x5c, 0x46, 0xba, 0x91, 0xe6, 0x64, 0x0c, 0x9f, 0xc1,
+	0x74, 0xb4, 0x84, 0x0b, 0xdf, 0x82, 0xfd, 0x69, 0xb4, 0xdf, 0x6e, 0x7b, 0x03, 0x64, 0x07, 0x1d,
+	0xac, 0x76, 0xb9, 0xa6, 0x9e, 0x8c, 0xb4, 0xe3, 0x45, 0x4d, 0x8e, 0x33, 0x08, 0x03, 0xea, 0x68,
+	0x29, 0x1f, 0xfe, 0x1a, 0x1c, 0x2e, 0x8b, 0x37, 0x06, 0x81, 0xea, 0x71, 0x69, 0xe9, 0x25, 0x75,
+	0x8f, 0xb4, 0x41, 0x07, 0x81, 0x8e, 0xee, 0x93, 0x61, 0x7e, 0x3e, 0x49, 0x35, 0x06, 0x41, 0x35,
+	0x8a, 0xd5, 0xdf, 0x70, 0x65, 0xe9, 0x27, 0x95, 0x94, 0xe9, 0x20, 0x30, 0xc2, 0x28, 0xd6, 0xd1,
+	0x3c, 0x8b, 0xdd, 0xa4, 0x22, 0xf4, 0xa6, 0x56, 0x57, 0x6f, 0xb9, 0x84, 0x74, 0x93, 0xa6, 0x12,
+	0x1f, 0x18, 0x75, 0x8a, 0xd3, 0x7f, 0x97, 0x01, 0x1b, 0x08, 0x7f, 0xe8, 0xe3, 0x98, 0xc2, 0x73,
+	0x90, 0xaf, 0x46, 0x98, 0xd8, 0xd4, 0x0b, 0x03, 0x6e, 0x99, 0x3b, 0x2f, 0x94, 0xd4, 0x7f, 0x27,
+	0x71, 0x34, 0x85, 0xc0, 0x2f, 0xc7, 0x6f, 0x00, 0x55, 0x98, 0x75, 0x21, 0x05, 0x8b, 0x20, 0x1a,
+	0x3f, 0x10, 0xbe, 0x1c, 0xfb, 0x32, 0x7f, 0x9d, 0x4f, 0x61, 0x22, 0x88, 0xd2, 0xa4, 0xfe, 0x0d,
+	0xd8, 0x44, 0x38, 0x8e, 0xc2, 0x20, 0xc6, 0x50, 0x05, 0x1b, 0xf5, 0xbe, 0xe3, 0xe0, 0x38, 0xe6,
+	0xfb, 0xd8, 0x44, 0xe3, 0x25, 0x7c, 0x08, 0x72, 0xec, 0x9d, 0xd7, 0x8f, 0x85, 0x2b, 0xa3, 0x74,
+	0x75, 0xf6, 0xcf, 0x8c, 0xb4, 0x79, 0xb8, 0x03, 0x40, 0x25, 0xa4, 0x75, 0x6a, 0x13, 0x8a, 0x5d,
+	0x65, 0x05, 0xee, 0x03, 0x25, 0x7d, 0xc5, 0xf0, 0x18, 0xbb, 0x2f, 0x94, 0x0c, 0xdc, 0x05, 0x5b,
+	0x08, 0xc7, 0x93, 0xc0, 0x2a, 0xdc, 0x06, 0x9b, 0xd7, 0x9e, 0xef, 0xf3, 0xd5, 0x1a, 0x4b, 0xb3,
+	0x31, 0x2e, 0x12, 0xa7, 0xeb, 0xdd, 0x61, 0x65, 0x9d, 0xa9, 0x5c, 0xe0, 0x98, 0x92, 0x70, 0xc8,
+	0x10, 0xfc, 0x35, 0xa2, 0x64, 0xe1, 0x23, 0x70, 0x50, 0xf2, 0x6d, 0xe7, 0xb6, 0x1b, 0xfa, 0xfc,
+	0x75, 0x5c, 0x0b, 0x09, 0x6d, 0x0c, 0xd0, 0x40, 0x71, 0xe1, 0x63, 0x70, 0x78, 0x13, 0xb4, 0x96,
+	0x26, 0x31, 0x3c, 0x00, 0x0f, 0xb8, 0x5d, 0xcf, 0x84, 0xdb, 0xf0, 0x10, 0xec, 0xdd, 0x04, 0xee,
+	0x42, 0xa2, 0x73, 0xf6, 0xc7, 0x35, 0xb1, 0x9f, 0xd4, 0x9e, 0x18, 0xff, 0xfa, 0xca, 0xb2, 0x9a,
+	0xd5, 0x8a, 0xd9, 0x7c, 0x55, 0xb5, 0xac, 0xea, 0x3b, 0x13, 0x29, 0x2b, 0x6c, 0xd7, 0x3c, 0x6c,
+	0x99, 0xc5, 0x0b, 0x13, 0x29, 0x19, 0x78, 0x0c, 0x8e, 0x16, 0x70, 0xcd, 0x57, 0x55, 0xd4, 0xb4,
+	0xaa, 0x95, 0x6f, 0x95, 0x55, 0xa8, 0x82, 0x7d, 0x89, 0x30, 0xcd, 0xac, 0x4d, 0xa4, 0xde, 0xdc,
+	0x54, 0xd1, 0xcd, 0x6b, 0x65, 0x9d, 0xd7, 0x87, 0x05, 0x8a, 0x96, 0xa5, 0x64, 0xe1, 0x19, 0x78,
+	0x56, 0xb2, 0x8a, 0xe5, 0xeb, 0xcb, 0xaa, 0x65, 0x36, 0x6b, 0xa6, 0x89, 0x9a, 0xb5, 0x2a, 0x6a,
+	0x34, 0x1b, 0xef, 0x9b, 0xe8, 0xfd, 0xec, 0xae, 0x72, 0xf0, 0x14, 0xfc, 0xf0, 0x7e, 0x6c, 0xfa,
+	0xe5, 0x6a, 0xc5, 0x54, 0x36, 0xe0, 0x17, 0xe0, 0xe9, 0xfd, 0x48, 0xf6, 0xe1, 0x4d, 0xf8, 0x0c,
+	0xe8, 0x17, 0xa6, 0x55, 0xfc, 0xd5, 0xff, 0xfe, 0x68, 0x1e, 0x9e, 0x80, 0x27, 0xcb, 0x71, 0x69,
+	0x6d, 0x00, 0x7c, 0x0a, 0x1e, 0x2d, 0x47, 0xb0, 0x0f, 0x6d, 0xb1, 0x36, 0x7a, 0x55, 0xbc, 0xb2,
+	0x6a, 0xd5, 0xab, 0x4a, 0xa3, 0xae, 0xb8, 0x70, 0x1b, 0x6c, 0x54, 0xaa, 0x4d, 0x16, 0x52, 0x3e,
+	0x66, 0x60, 0x01, 0x6c, 0x9a, 0xef, 0x1b, 0x26, 0xaa, 0x14, 0x2d, 0xe5, 0x4f, 0xab, 0x67, 0xbf,
+	0x05, 0x60, 0x6a, 0x92, 0x30, 0x07, 0x56, 0xaf, 0xdf, 0x2a, 0x2b, 0x30, 0x0f, 0xb2, 0x96, 0x59,
+	0xac, 0x9b, 0x0a, 0xc3, 0xe7, 0x2b, 0xd5, 0x66, 0xbd, 0x81, 0xcc, 0x7a, 0x5d, 0x59, 0x85, 0x7b,
+	0x60, 0xd7, 0xb4, 0xcc, 0x72, 0xe3, 0xaa, 0x5a, 0x69, 0xa2, 0x9b, 0x4a, 0xc5, 0x44, 0xca, 0x1a,
+	0x54, 0xc0, 0xf6, 0xbb, 0x62, 0xa3, 0x7c, 0x39, 0x8e, 0xac, 0xb3, 0x9f, 0xd9, 0xaa, 0x96, 0xaf,
+	0x9b, 0xa8, 0x58, 0x36, 0xd1, 0x38, 0x9c, 0x65, 0x40, 0xae, 0x3b, 0x8e, 0xe4, 0x5e, 0xbc, 0x04,
+	0xf9, 0x06, 0xb1, 0x83, 0x38, 0x0a, 0x09, 0x85, 0x2f, 0xe4, 0xc5, 0x4e, 0x3a, 0x70, 0xe9, 0x9c,
+	0x1f, 0xed, 0x4e, 0xd6, 0x62, 0xdc, 0xf4, 0x95, 0xd3, 0xcc, 0x8f, 0x33, 0xa5, 0xfd, 0x8f, 0xff,
+	0x3e, 0x5e, 0xf9, 0xf8, 0xf9, 0x38, 0xf3, 0x8f, 0xcf, 0xc7, 0x99, 0x7f, 0x7d, 0x3e, 0xce, 0xfc,
+	0xe1, 0x3f, 0xc7, 0x2b, 0xad, 0x1c, 0xff, 0xff, 0xfd, 0xab, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff,
+	0x53, 0xa4, 0xc8, 0x19, 0x08, 0x10, 0x00, 0x00,
+}

+ 187 - 0
tools/functional-tester/rpcpb/rpc.proto

@@ -0,0 +1,187 @@
+syntax = "proto3";
+package rpcpb;
+
+import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+service Transport {
+  rpc Transport(stream Request) returns (stream Response) {}
+}
+
+enum Operation {
+  NotStarted = 0;
+
+  // InitialStartEtcd is only called to start etcd very first time.
+  InitialStartEtcd = 1;
+  // RestartEtcd is sent to restart killed etcd.
+  RestartEtcd = 2;
+  // KillEtcd pauses etcd process while keeping data directories
+  // and previous etcd configurations.
+  KillEtcd = 3;
+  // FailArchive is sent when consistency check failed,
+  // thus need to archive etcd data directories.
+  FailArchive = 4;
+  // DestroyEtcdAgent destroys etcd process, etcd data, and agent server.
+  DestroyEtcdAgent = 5;
+
+  BlackholePeerPortTxRx = 100;
+  UnblackholePeerPortTxRx = 101;
+  DelayPeerPortTxRx = 102;
+  UndelayPeerPortTxRx = 103;
+}
+
+message Etcd {
+  string Name = 1 [(gogoproto.moretags) = "yaml:\"name\""];
+  string DataDir = 2 [(gogoproto.moretags) = "yaml:\"data-dir\""];
+  string WALDir = 3 [(gogoproto.moretags) = "yaml:\"wal-dir\""];
+  repeated string ListenClientURLs = 4 [(gogoproto.moretags) = "yaml:\"listen-client-urls\""];
+  repeated string AdvertiseClientURLs = 5 [(gogoproto.moretags) = "yaml:\"advertise-client-urls\""];
+  repeated string ListenPeerURLs = 6 [(gogoproto.moretags) = "yaml:\"listen-peer-urls\""];
+  repeated string InitialAdvertisePeerURLs = 7 [(gogoproto.moretags) = "yaml:\"initial-advertise-peer-urls\""];
+  string InitialCluster = 8 [(gogoproto.moretags) = "yaml:\"initial-cluster\""];
+  string InitialClusterState = 9 [(gogoproto.moretags) = "yaml:\"initial-cluster-state\""];
+  string InitialClusterToken = 10 [(gogoproto.moretags) = "yaml:\"initial-cluster-token\""];
+  int64 SnapshotCount = 11 [(gogoproto.moretags) = "yaml:\"snapshot-count\""];
+  int64 QuotaBackendBytes = 12 [(gogoproto.moretags) = "yaml:\"quota-backend-bytes\""];
+  bool PreVote = 13 [(gogoproto.moretags) = "yaml:\"pre-vote\""];
+  bool InitialCorruptCheck = 14 [(gogoproto.moretags) = "yaml:\"initial-corrupt-check\""];
+
+  // TODO: support TLS
+}
+
+message Member {
+  // EtcdExecPath is the executable etcd binary path in agent server.
+  string EtcdExecPath = 1 [(gogoproto.moretags) = "yaml:\"etcd-exec-path\""];
+
+  // TODO: support embedded etcd
+
+  // AgentAddr is the agent HTTP server address.
+  string AgentAddr = 11 [(gogoproto.moretags) = "yaml:\"agent-addr\""];
+  // FailpointHTTPAddr is the agent's failpoints HTTP server address.
+  string FailpointHTTPAddr = 12 [(gogoproto.moretags) = "yaml:\"failpoint-http-addr\""];
+
+  // BaseDir is the base directory where all logs and etcd data are stored.
+  string BaseDir = 101 [(gogoproto.moretags) = "yaml:\"base-dir\""];
+  // EtcdLogPath is the log file to store current etcd server logs.
+  string EtcdLogPath = 102 [(gogoproto.moretags) = "yaml:\"etcd-log-path\""];
+
+  // EtcdClientTLS is true when client traffic needs to be encrypted.
+  bool EtcdClientTLS = 201 [(gogoproto.moretags) = "yaml:\"etcd-client-tls\""];
+  // EtcdClientProxy is true when client traffic needs to be proxied.
+  // If true, listen client URL port must be different than advertise client URL port.
+  bool EtcdClientProxy = 202 [(gogoproto.moretags) = "yaml:\"etcd-client-proxy\""];
+  // EtcdPeerProxy is true when peer traffic needs to be proxied.
+  // If true, listen peer URL port must be different than advertise peer URL port.
+  bool EtcdPeerProxy = 203 [(gogoproto.moretags) = "yaml:\"etcd-peer-proxy\""];
+  // EtcdClientEndpoint is the etcd client endpoint.
+  string EtcdClientEndpoint = 204 [(gogoproto.moretags) = "yaml:\"etcd-client-endpoint\""];
+
+  // Etcd defines etcd binary configuration flags.
+  Etcd Etcd = 301 [(gogoproto.moretags) = "yaml:\"etcd-config\""];
+}
+
+enum FailureCase {
+  KILL_ONE_FOLLOWER = 0;
+  KILL_LEADER = 1;
+  KILL_ONE_FOLLOWER_FOR_LONG = 2;
+  KILL_LEADER_FOR_LONG = 3;
+  KILL_QUORUM = 4;
+  KILL_ALL = 5;
+
+  BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER = 6;
+  BLACKHOLE_PEER_PORT_TX_RX_LEADER_ONE = 7;
+  BLACKHOLE_PEER_PORT_TX_RX_ALL = 8;
+
+  DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER = 9;
+  DELAY_PEER_PORT_TX_RX_LEADER = 10;
+  DELAY_PEER_PORT_TX_RX_ALL = 11;
+
+  FAILPOINTS = 100;
+
+  NO_FAIL = 200;
+  // TODO: support no-op of liveness duration
+  // NO_FAIL_LIVENESS = 201;
+
+  EXTERNAL = 300;
+}
+
+enum StressType {
+  KV = 0;
+  LEASE = 1;
+  NO_STRESS = 2;
+  ELECTION_RUNNER = 3;
+  WATCH_RUNNER = 4;
+  LOCK_RACER_RUNNER = 5;
+  LEASE_RUNNER = 6;
+}
+
+message Tester {
+  string TesterNetwork = 1 [(gogoproto.moretags) = "yaml:\"tester-network\""];
+  string TesterAddr = 2 [(gogoproto.moretags) = "yaml:\"tester-addr\""];
+
+  // DelayLatencyMsRv is the delay latency in milliseconds,
+  // to inject to simulated slow network.
+  uint32 DelayLatencyMs = 11 [(gogoproto.moretags) = "yaml:\"delay-latency-ms\""];
+  // DelayLatencyMsRv is the delay latency random variable in milliseconds.
+  uint32 DelayLatencyMsRv = 12 [(gogoproto.moretags) = "yaml:\"delay-latency-ms-rv\""];
+
+  // RoundLimit is the limit of rounds to run failure set (-1 to run without limits).
+  int32 RoundLimit = 21 [(gogoproto.moretags) = "yaml:\"round-limit\""];
+  // ExitOnFailure is true, then exit tester on first failure.
+  bool ExitOnFailure = 22 [(gogoproto.moretags) = "yaml:\"exit-on-failure\""];
+  // ConsistencyCheck is true to check consistency (revision, hash).
+  bool ConsistencyCheck = 23 [(gogoproto.moretags) = "yaml:\"consistency-check\""];
+  // EnablePprof is true to enable profiler.
+  bool EnablePprof = 24 [(gogoproto.moretags) = "yaml:\"enable-pprof\""];
+
+  // FailureCases is the selected test cases to schedule.
+  // If empty, run all failure cases.
+  // TODO: support no-op
+  repeated string FailureCases = 31 [(gogoproto.moretags) = "yaml:\"failure-cases\""];
+  // FailureShuffle is true to randomize failure injecting order.
+  // TODO: support shuffle
+  // bool FailureShuffle = 32 [(gogoproto.moretags) = "yaml:\"failure-shuffle\""];
+  // FailpointCommands is the list of "gofail" commands (e.g. panic("etcd-tester"),1*sleep(1000)).
+  repeated string FailpointCommands = 33 [(gogoproto.moretags) = "yaml:\"failpoint-commands\""];
+
+  // RunnerExecPath is a path of etcd-runner binary.
+  string RunnerExecPath = 41 [(gogoproto.moretags) = "yaml:\"runner-exec-path\""];
+  // ExternalExecPath is a path of script for enabling/disabling an external fault injector.
+  string ExternalExecPath = 42 [(gogoproto.moretags) = "yaml:\"external-exec-path\""];
+
+  // StressTypes is the list of stresser names:
+  // keys, lease, nop, election-runner, watch-runner, lock-racer-runner, lease-runner.
+  repeated string StressTypes = 101 [(gogoproto.moretags) = "yaml:\"stress-types\""];
+  // StressKeySize is the size of each small key written into etcd.
+  int32 StressKeySize = 102 [(gogoproto.moretags) = "yaml:\"stress-key-size\""];
+  // StressKeySizeLarge is the size of each large key written into etcd.
+  int32 StressKeySizeLarge = 103 [(gogoproto.moretags) = "yaml:\"stress-key-size-large\""];
+  // StressKeySuffixRange is the count of key range written into etcd.
+  // Stress keys are created with "fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)".
+  int32 StressKeySuffixRange = 104 [(gogoproto.moretags) = "yaml:\"stress-key-suffix-range\""];
+  // StressKeySuffixRangeTxn is the count of key range written into etcd txn (max 100).
+  // Stress keys are created with "fmt.Sprintf("/k%03d", i)".
+  int32 StressKeySuffixRangeTxn = 105 [(gogoproto.moretags) = "yaml:\"stress-key-suffix-range-txn\""];
+  // StressKeyTxnOps is the number of operations per a transaction (max 64).
+  int32 StressKeyTxnOps = 106 [(gogoproto.moretags) = "yaml:\"stress-key-txn-ops\""];
+  // StressQPS is the maximum number of stresser requests per second.
+  int32 StressQPS = 107 [(gogoproto.moretags) = "yaml:\"stress-qps\""];
+}
+
+message Request {
+  Operation Operation = 1;
+
+  Member Member = 2;
+  Tester Tester = 3;
+}
+
+message Response {
+  bool Success = 1;
+  string Status = 2;
+
+  // TODO: support TLS
+}

+ 1 - 1
tools/functional-tester/etcd-runner/command/election_command.go → tools/functional-tester/runner/election_command.go

@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package command
+package runner
 
 import (
 	"context"

+ 1 - 1
tools/functional-tester/etcd-runner/command/error.go → tools/functional-tester/runner/error.go

@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package command
+package runner
 
 import (
 	"fmt"

+ 1 - 1
tools/functional-tester/etcd-runner/command/global.go → tools/functional-tester/runner/global.go

@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package command
+package runner
 
 import (
 	"context"

+ 1 - 1
tools/functional-tester/etcd-runner/command/help.go → tools/functional-tester/runner/help.go

@@ -14,7 +14,7 @@
 
 // copied from https://github.com/rkt/rkt/blob/master/rkt/help.go
 
-package command
+package runner
 
 import (
 	"bytes"

+ 1 - 1
tools/functional-tester/etcd-runner/command/lease_renewer_command.go → tools/functional-tester/runner/lease_renewer_command.go

@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package command
+package runner
 
 import (
 	"context"

+ 1 - 1
tools/functional-tester/etcd-runner/command/lock_racer_command.go → tools/functional-tester/runner/lock_racer_command.go

@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package command
+package runner
 
 import (
 	"context"

+ 2 - 2
tools/functional-tester/etcd-runner/command/root.go → tools/functional-tester/runner/root.go

@@ -12,8 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package command implements individual etcd-runner commands for the etcd-runner utility.
-package command
+// Package runner implements individual etcd-runner commands for the etcd-runner utility.
+package runner
 
 import (
 	"log"

+ 1 - 1
tools/functional-tester/etcd-runner/command/watch_command.go → tools/functional-tester/runner/watch_command.go

@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package command
+package runner
 
 import (
 	"context"

+ 4 - 20
tools/functional-tester/scripts/docker-local-agent.sh

@@ -13,7 +13,7 @@ if ! [[ "${0}" =~ "scripts/docker-local-agent.sh" ]]; then
 fi
 
 if [[ -z "${GO_VERSION}" ]]; then
-  GO_VERSION=1.10
+  GO_VERSION=1.10.1
 fi
 echo "Running with GO_VERSION:" ${GO_VERSION}
 
@@ -29,30 +29,14 @@ else
        exit 255 ;;
   esac
   AGENT_NAME="agent-${1}"
-  AGENT_PORT_FLAG="--port :${1}9027"
-  FAILPOINT_ADDR_FLAG="--failpoint-addr :738${1}"
+  AGENT_ADDR_FLAG="--network tcp --address 127.0.0.1:${1}9027"
 fi
 echo "AGENT_NAME:" ${AGENT_NAME}
-echo "AGENT_PORT_FLAG:" ${AGENT_PORT_FLAG}
-echo "FAILPOINT_ADDR_FLAG:" ${FAILPOINT_ADDR_FLAG}
+echo "AGENT_ADDR_FLAG:" ${AGENT_ADDR_FLAG}
 
-if [[ -z "${ETCD_EXEC_PATH}" ]]; then
-  ETCD_EXEC_PATH=/etcd
-elif [[ "${ETCD_EXEC_PATH}" != "/etcd-failpoints" ]]; then
-  echo "Cannot find etcd executable:" ${ETCD_EXEC_PATH}
-  exit 255
-fi
-echo "ETCD_EXEC_PATH:" ${ETCD_EXEC_PATH}
-
-rm -rf `pwd`/${AGENT_NAME} && mkdir -p `pwd`/${AGENT_NAME}
 docker run \
   --rm \
   --net=host \
   --name ${AGENT_NAME} \
-  --mount type=bind,source=`pwd`/${AGENT_NAME},destination=/${AGENT_NAME} \
   gcr.io/etcd-development/etcd-functional-tester:go${GO_VERSION} \
-  /bin/bash -c "/etcd-agent \
-    --etcd-path ${ETCD_EXEC_PATH} \
-    --etcd-log-dir /${AGENT_NAME} \
-    ${AGENT_PORT_FLAG} \
-    ${FAILPOINT_ADDR_FLAG}"
+  /bin/bash -c "./bin/etcd-agent ${AGENT_ADDR_FLAG}"

+ 2 - 49
tools/functional-tester/scripts/docker-local-tester.sh

@@ -1,65 +1,18 @@
 #!/usr/bin/env bash
 
-<<COMMENT
-# to run with different Go version
-# requires prebuilt Docker image
-#   GO_VERSION=1.10 make build-docker-functional-tester -f ./hack/scripts-dev/Makefile
-GO_VERSION=1.10 ./scripts/docker-local-tester.sh
-
-# to run only 1 tester round
-LIMIT=1 ./scripts/docker-local-tester.sh
-
-# to run long-running tests with no limit
-LIMIT=1 ./scripts/docker-local-tester.sh
-
-# to run only 1 tester round with election runner and others
-# default is STRESSER="keys,lease"
-LIMIT=1 \
-  STRESSER="keys,lease,election-runner,watch-runner,lock-racer-runner,lease-runner" \
-  ./scripts/docker-local-tester.sh
-
-# TODO: make stresser QPS configurable
-COMMENT
-
 if ! [[ "${0}" =~ "scripts/docker-local-tester.sh" ]]; then
   echo "must be run from tools/functional-tester"
   exit 255
 fi
 
 if [[ -z "${GO_VERSION}" ]]; then
-  GO_VERSION=1.10
+  GO_VERSION=1.10.1
 fi
 echo "Running with GO_VERSION:" ${GO_VERSION}
 
-if [[ "${LIMIT}" ]]; then
-  LIMIT_FLAG="--limit ${LIMIT}"
-  echo "Running with:" ${LIMIT_FLAG}
-else
-  echo "Running with no limit"
-fi
-
-if [[ "${STRESSER}" ]]; then
-  STRESSER_FLAG="--stresser ${STRESSER}"
-else
-  STRESSER_FLAG="--stresser keys,lease"
-fi
-echo "Running with:" ${STRESSER_FLAG}
-
 docker run \
   --rm \
   --net=host \
   --name tester \
   gcr.io/etcd-development/etcd-functional-tester:go${GO_VERSION} \
-  /bin/bash -c "/etcd-tester \
-    --agent-endpoints '127.0.0.1:19027,127.0.0.1:29027,127.0.0.1:39027' \
-    --client-ports 1379,2379,3379 \
-    --advertise-client-ports 13790,23790,33790 \
-    --peer-ports 1380,2380,3380 \
-    --advertise-peer-ports 13800,23800,33800 \
-    ${LIMIT_FLAG} \
-    --etcd-runner /etcd-runner \
-    --stress-qps=2500 \
-    --stress-key-txn-count 100 \
-    --stress-key-txn-ops 10 \
-    ${STRESSER_FLAG} \
-    --exit-on-failure"
+  /bin/bash -c "./bin/etcd-tester --config ./local-test.yaml"

+ 28 - 0
tools/functional-tester/scripts/genproto.sh

@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+set -e
+
+if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
+  echo "must be run from repository root"
+  exit 255
+fi
+
+# for now, be conservative about what version of protoc we expect
+if ! [[ $(protoc --version) =~ "3.5.1" ]]; then
+  echo "could not find protoc 3.5.1, is it installed + in PATH?"
+  exit 255
+fi
+
+echo "Installing gogo/protobuf..."
+GOGOPROTO_ROOT="$GOPATH/src/github.com/gogo/protobuf"
+rm -rf $GOGOPROTO_ROOT
+go get -v github.com/gogo/protobuf/{proto,protoc-gen-gogo,gogoproto,protoc-gen-gofast}
+go get -v golang.org/x/tools/cmd/goimports
+pushd "${GOGOPROTO_ROOT}"
+  git reset --hard HEAD
+  make install
+popd
+
+printf "Generating agent\n"
+protoc --gofast_out=plugins=grpc:. \
+  --proto_path=$GOPATH/src:$GOPATH/src/github.com/gogo/protobuf/protobuf:. \
+  rpcpb/*.proto;

+ 72 - 18
tools/functional-tester/etcd-tester/checks.go → tools/functional-tester/tester/checks.go

@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright 2018 The etcd Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package main
+package tester
 
 import (
 	"context"
@@ -23,12 +23,11 @@ import (
 	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
 	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
 
+	"go.uber.org/zap"
 	"google.golang.org/grpc"
 )
 
-const (
-	retries = 7
-)
+const retries = 7
 
 type Checker interface {
 	// Check returns an error if the system fails a consistency check.
@@ -40,10 +39,16 @@ type hashAndRevGetter interface {
 }
 
 type hashChecker struct {
-	hrg hashAndRevGetter
+	logger *zap.Logger
+	hrg    hashAndRevGetter
 }
 
-func newHashChecker(hrg hashAndRevGetter) Checker { return &hashChecker{hrg} }
+func newHashChecker(logger *zap.Logger, hrg hashAndRevGetter) Checker {
+	return &hashChecker{
+		logger: logger,
+		hrg:    hrg,
+	}
+}
 
 const leaseCheckerTimeout = 10 * time.Second
 
@@ -57,14 +62,25 @@ func (hc *hashChecker) checkRevAndHashes() (err error) {
 	for i := 0; i < retries; i++ {
 		revs, hashes, err = hc.hrg.getRevisionHash()
 		if err != nil {
-			plog.Warningf("retry %d. failed to retrieve revison and hash (%v)", i, err)
+			hc.logger.Warn(
+				"failed to get revision and hash",
+				zap.Int("retries", i),
+				zap.Error(err),
+			)
 		} else {
 			sameRev := getSameValue(revs)
 			sameHashes := getSameValue(hashes)
 			if sameRev && sameHashes {
 				return nil
 			}
-			plog.Warningf("retry %d. etcd cluster is not stable: [revisions: %v] and [hashes: %v]", i, revs, hashes)
+			hc.logger.Warn(
+				"retrying; etcd cluster is not stable",
+				zap.Int("retries", i),
+				zap.Bool("same-revisions", sameRev),
+				zap.Bool("same-hashes", sameHashes),
+				zap.String("revisions", fmt.Sprintf("%+v", revs)),
+				zap.String("hashes", fmt.Sprintf("%+v", hashes)),
+			)
 		}
 		time.Sleep(time.Second)
 	}
@@ -81,7 +97,10 @@ func (hc *hashChecker) Check() error {
 }
 
 type leaseChecker struct {
-	endpoint    string
+	logger *zap.Logger
+
+	endpoint string // TODO: use Member
+
 	ls          *leaseStresser
 	leaseClient pb.LeaseClient
 	kvc         pb.KVClient
@@ -138,14 +157,31 @@ func (lc *leaseChecker) checkShortLivedLease(ctx context.Context, leaseID int64)
 			return nil
 		}
 		if err != nil {
-			plog.Debugf("retry %d. failed to retrieve lease %v error (%v)", i, leaseID, err)
+			lc.logger.Debug(
+				"retrying; Lease TimeToLive failed",
+				zap.Int("retries", i),
+				zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+				zap.Error(err),
+			)
 			continue
 		}
 		if resp.TTL > 0 {
-			plog.Debugf("lease %v is not expired. sleep for %d until it expires.", leaseID, resp.TTL)
-			time.Sleep(time.Duration(resp.TTL) * time.Second)
+			dur := time.Duration(resp.TTL) * time.Second
+			lc.logger.Debug(
+				"lease has not been expired, wait until expire",
+				zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+				zap.Int64("ttl", resp.TTL),
+				zap.Duration("wait-duration", dur),
+			)
+			time.Sleep(dur)
 		} else {
-			plog.Debugf("retry %d. lease %v is expired but not yet revoked", i, leaseID)
+			lc.logger.Debug(
+				"lease expired but not yet revoked",
+				zap.Int("retries", i),
+				zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+				zap.Int64("ttl", resp.TTL),
+				zap.Duration("wait-duration", time.Second),
+			)
 			time.Sleep(time.Second)
 		}
 		if err = lc.checkLease(ctx, false, leaseID); err != nil {
@@ -159,12 +195,20 @@ func (lc *leaseChecker) checkShortLivedLease(ctx context.Context, leaseID int64)
 func (lc *leaseChecker) checkLease(ctx context.Context, expired bool, leaseID int64) error {
 	keysExpired, err := lc.hasKeysAttachedToLeaseExpired(ctx, leaseID)
 	if err != nil {
-		plog.Errorf("hasKeysAttachedToLeaseExpired error %v (endpoint %q)", err, lc.endpoint)
+		lc.logger.Warn(
+			"hasKeysAttachedToLeaseExpired failed",
+			zap.String("endpoint", lc.endpoint),
+			zap.Error(err),
+		)
 		return err
 	}
 	leaseExpired, err := lc.hasLeaseExpired(ctx, leaseID)
 	if err != nil {
-		plog.Errorf("hasLeaseExpired error %v (endpoint %q)", err, lc.endpoint)
+		lc.logger.Warn(
+			"hasLeaseExpired failed",
+			zap.String("endpoint", lc.endpoint),
+			zap.Error(err),
+		)
 		return err
 	}
 	if leaseExpired != keysExpired {
@@ -204,7 +248,12 @@ func (lc *leaseChecker) hasLeaseExpired(ctx context.Context, leaseID int64) (boo
 		} else {
 			return resp.TTL == -1, nil
 		}
-		plog.Warningf("hasLeaseExpired %v resp %v error %v (endpoint %q)", leaseID, resp, err, lc.endpoint)
+		lc.logger.Warn(
+			"hasLeaseExpired getLeaseByID failed",
+			zap.String("endpoint", lc.endpoint),
+			zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+			zap.Error(err),
+		)
 	}
 	return false, ctx.Err()
 }
@@ -218,7 +267,12 @@ func (lc *leaseChecker) hasKeysAttachedToLeaseExpired(ctx context.Context, lease
 		RangeEnd: []byte(clientv3.GetPrefixRangeEnd(fmt.Sprintf("%d", leaseID))),
 	}, grpc.FailFast(false))
 	if err != nil {
-		plog.Errorf("retrieving keys attached to lease %v error %v (endpoint %q)", leaseID, err, lc.endpoint)
+		lc.logger.Warn(
+			"hasKeysAttachedToLeaseExpired failed",
+			zap.String("endpoint", lc.endpoint),
+			zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+			zap.Error(err),
+		)
 		return false, err
 	}
 	return len(resp.Kvs) == 0, nil

+ 728 - 0
tools/functional-tester/tester/cluster.go

@@ -0,0 +1,728 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"path/filepath"
+	"strings"
+	"time"
+
+	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+	"github.com/coreos/etcd/pkg/debugutil"
+	"github.com/coreos/etcd/tools/functional-tester/rpcpb"
+	"golang.org/x/time/rate"
+
+	"github.com/prometheus/client_golang/prometheus/promhttp"
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+	yaml "gopkg.in/yaml.v2"
+)
+
+// Cluster defines tester cluster.
+type Cluster struct {
+	logger *zap.Logger
+
+	agentConns    []*grpc.ClientConn
+	agentClients  []rpcpb.TransportClient
+	agentStreams  []rpcpb.Transport_TransportClient
+	agentRequests []*rpcpb.Request
+
+	testerHTTPServer *http.Server
+
+	Members []*rpcpb.Member `yaml:"agent-configs"`
+	Tester  *rpcpb.Tester   `yaml:"tester-config"`
+
+	failures []Failure
+
+	rateLimiter *rate.Limiter
+	stresser    Stresser
+	checker     Checker
+
+	currentRevision int64
+	rd              int
+	cs              int
+}
+
+func newCluster(logger *zap.Logger, fpath string) (*Cluster, error) {
+	logger.Info("reading configuration file", zap.String("path", fpath))
+	bts, err := ioutil.ReadFile(fpath)
+	if err != nil {
+		return nil, err
+	}
+	logger.Info("opened configuration file", zap.String("path", fpath))
+
+	clus := &Cluster{logger: logger}
+	if err = yaml.Unmarshal(bts, clus); err != nil {
+		return nil, err
+	}
+
+	for i := range clus.Members {
+		if clus.Members[i].BaseDir == "" {
+			return nil, fmt.Errorf("Members[i].BaseDir cannot be empty (got %q)", clus.Members[i].BaseDir)
+		}
+		if clus.Members[i].EtcdLogPath == "" {
+			return nil, fmt.Errorf("Members[i].EtcdLogPath cannot be empty (got %q)", clus.Members[i].EtcdLogPath)
+		}
+
+		if clus.Members[i].Etcd.Name == "" {
+			return nil, fmt.Errorf("'--name' cannot be empty (got %+v)", clus.Members[i])
+		}
+		if clus.Members[i].Etcd.DataDir == "" {
+			return nil, fmt.Errorf("'--data-dir' cannot be empty (got %+v)", clus.Members[i])
+		}
+		if clus.Members[i].Etcd.SnapshotCount == 0 {
+			return nil, fmt.Errorf("'--snapshot-count' cannot be 0 (got %+v)", clus.Members[i].Etcd.SnapshotCount)
+		}
+		if clus.Members[i].Etcd.DataDir == "" {
+			return nil, fmt.Errorf("'--data-dir' cannot be empty (got %q)", clus.Members[i].Etcd.DataDir)
+		}
+		if clus.Members[i].Etcd.WALDir == "" {
+			clus.Members[i].Etcd.WALDir = filepath.Join(clus.Members[i].Etcd.DataDir, "member", "wal")
+		}
+
+		port := ""
+		listenClientPorts := make([]string, len(clus.Members))
+		for i, u := range clus.Members[i].Etcd.ListenClientURLs {
+			if !isValidURL(u) {
+				return nil, fmt.Errorf("'--listen-client-urls' has valid URL %q", u)
+			}
+			listenClientPorts[i], err = getPort(u)
+			if err != nil {
+				return nil, fmt.Errorf("'--listen-client-urls' has no port %q", u)
+			}
+		}
+		for i, u := range clus.Members[i].Etcd.AdvertiseClientURLs {
+			if !isValidURL(u) {
+				return nil, fmt.Errorf("'--advertise-client-urls' has valid URL %q", u)
+			}
+			port, err = getPort(u)
+			if err != nil {
+				return nil, fmt.Errorf("'--advertise-client-urls' has no port %q", u)
+			}
+			if clus.Members[i].EtcdClientProxy && listenClientPorts[i] == port {
+				return nil, fmt.Errorf("clus.Members[%d] requires client port proxy, but advertise port %q conflicts with listener port %q", i, port, listenClientPorts[i])
+			}
+		}
+
+		listenPeerPorts := make([]string, len(clus.Members))
+		for i, u := range clus.Members[i].Etcd.ListenPeerURLs {
+			if !isValidURL(u) {
+				return nil, fmt.Errorf("'--listen-peer-urls' has valid URL %q", u)
+			}
+			listenPeerPorts[i], err = getPort(u)
+			if err != nil {
+				return nil, fmt.Errorf("'--listen-peer-urls' has no port %q", u)
+			}
+		}
+		for i, u := range clus.Members[i].Etcd.InitialAdvertisePeerURLs {
+			if !isValidURL(u) {
+				return nil, fmt.Errorf("'--initial-advertise-peer-urls' has valid URL %q", u)
+			}
+			port, err = getPort(u)
+			if err != nil {
+				return nil, fmt.Errorf("'--initial-advertise-peer-urls' has no port %q", u)
+			}
+			if clus.Members[i].EtcdPeerProxy && listenPeerPorts[i] == port {
+				return nil, fmt.Errorf("clus.Members[%d] requires peer port proxy, but advertise port %q conflicts with listener port %q", i, port, listenPeerPorts[i])
+			}
+		}
+
+		if !strings.HasPrefix(clus.Members[i].EtcdLogPath, clus.Members[i].BaseDir) {
+			return nil, fmt.Errorf("EtcdLogPath must be prefixed with BaseDir (got %q)", clus.Members[i].EtcdLogPath)
+		}
+		if !strings.HasPrefix(clus.Members[i].Etcd.DataDir, clus.Members[i].BaseDir) {
+			return nil, fmt.Errorf("Etcd.DataDir must be prefixed with BaseDir (got %q)", clus.Members[i].Etcd.DataDir)
+		}
+
+		// TODO: support separate WALDir that can be handled via failure-archive
+		if !strings.HasPrefix(clus.Members[i].Etcd.WALDir, clus.Members[i].BaseDir) {
+			return nil, fmt.Errorf("Etcd.WALDir must be prefixed with BaseDir (got %q)", clus.Members[i].Etcd.WALDir)
+		}
+
+		if len(clus.Tester.FailureCases) == 0 {
+			return nil, errors.New("FailureCases not found")
+		}
+	}
+
+	for _, v := range clus.Tester.FailureCases {
+		if _, ok := rpcpb.FailureCase_value[v]; !ok {
+			return nil, fmt.Errorf("%q is not defined in 'rpcpb.FailureCase_value'", v)
+		}
+	}
+
+	for _, v := range clus.Tester.StressTypes {
+		if _, ok := rpcpb.StressType_value[v]; !ok {
+			return nil, fmt.Errorf("StressType is unknown; got %q", v)
+		}
+	}
+	if clus.Tester.StressKeySuffixRangeTxn > 100 {
+		return nil, fmt.Errorf("StressKeySuffixRangeTxn maximum value is 100, got %v", clus.Tester.StressKeySuffixRangeTxn)
+	}
+	if clus.Tester.StressKeyTxnOps > 64 {
+		return nil, fmt.Errorf("StressKeyTxnOps maximum value is 64, got %v", clus.Tester.StressKeyTxnOps)
+	}
+
+	return clus, err
+}
+
+// TODO: status handler
+
+var dialOpts = []grpc.DialOption{
+	grpc.WithInsecure(),
+	grpc.WithTimeout(5 * time.Second),
+	grpc.WithBlock(),
+}
+
+// NewCluster creates a client from a tester configuration.
+func NewCluster(logger *zap.Logger, fpath string) (*Cluster, error) {
+	clus, err := newCluster(logger, fpath)
+	if err != nil {
+		return nil, err
+	}
+
+	clus.agentConns = make([]*grpc.ClientConn, len(clus.Members))
+	clus.agentClients = make([]rpcpb.TransportClient, len(clus.Members))
+	clus.agentStreams = make([]rpcpb.Transport_TransportClient, len(clus.Members))
+	clus.agentRequests = make([]*rpcpb.Request, len(clus.Members))
+	clus.failures = make([]Failure, 0)
+
+	for i, ap := range clus.Members {
+		logger.Info("connecting", zap.String("agent-address", ap.AgentAddr))
+		var err error
+		clus.agentConns[i], err = grpc.Dial(ap.AgentAddr, dialOpts...)
+		if err != nil {
+			return nil, err
+		}
+		clus.agentClients[i] = rpcpb.NewTransportClient(clus.agentConns[i])
+		logger.Info("connected", zap.String("agent-address", ap.AgentAddr))
+
+		logger.Info("creating stream", zap.String("agent-address", ap.AgentAddr))
+		clus.agentStreams[i], err = clus.agentClients[i].Transport(context.Background())
+		if err != nil {
+			return nil, err
+		}
+		logger.Info("created stream", zap.String("agent-address", ap.AgentAddr))
+	}
+
+	mux := http.NewServeMux()
+	mux.Handle("/metrics", promhttp.Handler())
+	if clus.Tester.EnablePprof {
+		for p, h := range debugutil.PProfHandlers() {
+			mux.Handle(p, h)
+		}
+	}
+	clus.testerHTTPServer = &http.Server{
+		Addr:    clus.Tester.TesterAddr,
+		Handler: mux,
+	}
+	go clus.serveTesterServer()
+
+	for _, cs := range clus.Tester.FailureCases {
+		switch cs {
+		case "KILL_ONE_FOLLOWER":
+			clus.failures = append(clus.failures, newFailureKillOne()) // TODO
+		case "KILL_LEADER":
+			clus.failures = append(clus.failures, newFailureKillLeader())
+		case "KILL_ONE_FOLLOWER_FOR_LONG":
+			clus.failures = append(clus.failures, newFailureKillOneForLongTime()) // TODO
+		case "KILL_LEADER_FOR_LONG":
+			clus.failures = append(clus.failures, newFailureKillLeaderForLongTime())
+		case "KILL_QUORUM":
+			clus.failures = append(clus.failures, newFailureKillQuorum())
+		case "KILL_ALL":
+			clus.failures = append(clus.failures, newFailureKillAll())
+		case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER":
+			clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxOne()) // TODO
+		case "BLACKHOLE_PEER_PORT_TX_RX_LEADER_ONE":
+			clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxOne()) // TODO
+		case "BLACKHOLE_PEER_PORT_TX_RX_ALL":
+			clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxAll())
+		case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
+			clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxOneMember()) // TODO
+		case "DELAY_PEER_PORT_TX_RX_LEADER":
+			clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxLeader()) // TODO
+		case "DELAY_PEER_PORT_TX_RX_ALL":
+			clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxAll()) // TODO
+		case "FAILPOINTS":
+			fpFailures, fperr := failpointFailures(clus)
+			if len(fpFailures) == 0 {
+				clus.logger.Info("no failpoints found!", zap.Error(fperr))
+			}
+			clus.failures = append(clus.failures, fpFailures...)
+		case "NO_FAIL":
+			clus.failures = append(clus.failures, newFailureNoOp())
+		case "EXTERNAL":
+			clus.failures = append(clus.failures, newFailureExternal(clus.Tester.ExternalExecPath))
+		default:
+			return nil, fmt.Errorf("unknown failure %q", cs)
+		}
+	}
+
+	clus.rateLimiter = rate.NewLimiter(
+		rate.Limit(int(clus.Tester.StressQPS)),
+		int(clus.Tester.StressQPS),
+	)
+	clus.updateStresserChecker()
+	return clus, nil
+}
+
+func (clus *Cluster) serveTesterServer() {
+	clus.logger.Info(
+		"started tester HTTP server",
+		zap.String("tester-address", clus.Tester.TesterAddr),
+	)
+	err := clus.testerHTTPServer.ListenAndServe()
+	clus.logger.Info(
+		"tester HTTP server returned",
+		zap.String("tester-address", clus.Tester.TesterAddr),
+		zap.Error(err),
+	)
+	if err != nil && err != http.ErrServerClosed {
+		clus.logger.Fatal("tester HTTP errored", zap.Error(err))
+	}
+}
+
+func (clus *Cluster) updateStresserChecker() {
+	clus.logger.Info(
+		"updating stressers",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+	)
+
+	cs := &compositeStresser{}
+	for idx := range clus.Members {
+		cs.stressers = append(cs.stressers, newStresser(clus, idx))
+	}
+	clus.stresser = cs
+
+	if clus.Tester.ConsistencyCheck {
+		clus.checker = newHashChecker(clus.logger, hashAndRevGetter(clus))
+		if schk := cs.Checker(); schk != nil {
+			clus.checker = newCompositeChecker([]Checker{clus.checker, schk})
+		}
+	} else {
+		clus.checker = newNoChecker()
+	}
+
+	clus.logger.Info(
+		"updated stressers",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+	)
+}
+
+func (clus *Cluster) startStresser() (err error) {
+	clus.logger.Info(
+		"starting stressers",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+	)
+	err = clus.stresser.Stress()
+	clus.logger.Info(
+		"started stressers",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+	)
+	return err
+}
+
+func (clus *Cluster) closeStresser() {
+	clus.logger.Info(
+		"closing stressers",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+	)
+	clus.stresser.Close()
+	clus.logger.Info(
+		"closed stressers",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+	)
+}
+
+func (clus *Cluster) pauseStresser() {
+	clus.logger.Info(
+		"pausing stressers",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+	)
+	clus.stresser.Pause()
+	clus.logger.Info(
+		"paused stressers",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+	)
+}
+
+func (clus *Cluster) checkConsistency() (err error) {
+	defer func() {
+		if err != nil {
+			return
+		}
+		if err = clus.updateRevision(); err != nil {
+			clus.logger.Warn(
+				"updateRevision failed",
+				zap.Error(err),
+			)
+			return
+		}
+		err = clus.startStresser()
+	}()
+
+	clus.logger.Info(
+		"checking consistency and invariant of cluster",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+		zap.String("desc", clus.failures[clus.cs].Desc()),
+	)
+	if err = clus.checker.Check(); err != nil {
+		clus.logger.Warn(
+			"checker.Check failed",
+			zap.Error(err),
+		)
+		return err
+	}
+	clus.logger.Info(
+		"checked consistency and invariant of cluster",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+		zap.String("desc", clus.failures[clus.cs].Desc()),
+	)
+
+	return err
+}
+
+// Bootstrap bootstraps etcd cluster the very first time.
+// After this, just continue to call kill/restart.
+func (clus *Cluster) Bootstrap() error {
+	// this is the only time that creates request from scratch
+	return clus.broadcastOperation(rpcpb.Operation_InitialStartEtcd)
+}
+
+// FailArchive sends "FailArchive" operation.
+func (clus *Cluster) FailArchive() error {
+	return clus.broadcastOperation(rpcpb.Operation_FailArchive)
+}
+
+// Restart sends "Restart" operation.
+func (clus *Cluster) Restart() error {
+	return clus.broadcastOperation(rpcpb.Operation_RestartEtcd)
+}
+
+func (clus *Cluster) broadcastOperation(op rpcpb.Operation) error {
+	for i := range clus.agentStreams {
+		err := clus.sendOperation(i, op)
+		if err != nil {
+			if op == rpcpb.Operation_DestroyEtcdAgent &&
+				strings.Contains(err.Error(), "rpc error: code = Unavailable desc = transport is closing") {
+				// agent server has already closed;
+				// so this error is expected
+				clus.logger.Info(
+					"successfully destroyed",
+					zap.String("member", clus.Members[i].EtcdClientEndpoint),
+				)
+				continue
+			}
+			return err
+		}
+	}
+	return nil
+}
+
+func (clus *Cluster) sendOperation(idx int, op rpcpb.Operation) error {
+	if op == rpcpb.Operation_InitialStartEtcd {
+		clus.agentRequests[idx] = &rpcpb.Request{
+			Operation: op,
+			Member:    clus.Members[idx],
+			Tester:    clus.Tester,
+		}
+	} else {
+		clus.agentRequests[idx].Operation = op
+	}
+
+	clus.logger.Info(
+		"sending request",
+		zap.String("operation", op.String()),
+		zap.String("to", clus.Members[idx].EtcdClientEndpoint),
+	)
+	err := clus.agentStreams[idx].Send(clus.agentRequests[idx])
+	clus.logger.Info(
+		"sent request",
+		zap.String("operation", op.String()),
+		zap.String("to", clus.Members[idx].EtcdClientEndpoint),
+		zap.Error(err),
+	)
+	if err != nil {
+		return err
+	}
+
+	clus.logger.Info(
+		"receiving response",
+		zap.String("operation", op.String()),
+		zap.String("from", clus.Members[idx].EtcdClientEndpoint),
+	)
+	resp, err := clus.agentStreams[idx].Recv()
+	if resp != nil {
+		clus.logger.Info(
+			"received response",
+			zap.String("operation", op.String()),
+			zap.String("from", clus.Members[idx].EtcdClientEndpoint),
+			zap.Bool("success", resp.Success),
+			zap.String("status", resp.Status),
+			zap.Error(err),
+		)
+	} else {
+		clus.logger.Info(
+			"received empty response",
+			zap.String("operation", op.String()),
+			zap.String("from", clus.Members[idx].EtcdClientEndpoint),
+			zap.Error(err),
+		)
+	}
+	if err != nil {
+		return err
+	}
+
+	if !resp.Success {
+		err = errors.New(resp.Status)
+	}
+	return err
+}
+
+// DestroyEtcdAgents terminates all tester connections to agents and etcd servers.
+func (clus *Cluster) DestroyEtcdAgents() {
+	clus.logger.Info("destroying etcd servers and agents")
+	err := clus.broadcastOperation(rpcpb.Operation_DestroyEtcdAgent)
+	if err != nil {
+		clus.logger.Warn("failed to destroy etcd servers and agents", zap.Error(err))
+	} else {
+		clus.logger.Info("destroyed etcd servers and agents")
+	}
+
+	for i, conn := range clus.agentConns {
+		clus.logger.Info("closing connection to agent", zap.String("agent-address", clus.Members[i].AgentAddr))
+		err := conn.Close()
+		clus.logger.Info("closed connection to agent", zap.String("agent-address", clus.Members[i].AgentAddr), zap.Error(err))
+	}
+
+	// TODO: closing stresser connections to etcd
+
+	if clus.testerHTTPServer != nil {
+		clus.logger.Info("closing tester HTTP server", zap.String("tester-address", clus.Tester.TesterAddr))
+		ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+		err := clus.testerHTTPServer.Shutdown(ctx)
+		cancel()
+		clus.logger.Info("closed tester HTTP server", zap.String("tester-address", clus.Tester.TesterAddr), zap.Error(err))
+	}
+}
+
+// WaitHealth ensures all members are healthy
+// by writing a test key to etcd cluster.
+func (clus *Cluster) WaitHealth() error {
+	var err error
+	// wait 60s to check cluster health.
+	// TODO: set it to a reasonable value. It is set that high because
+	// follower may use long time to catch up the leader when reboot under
+	// reasonable workload (https://github.com/coreos/etcd/issues/2698)
+	for i := 0; i < 60; i++ {
+		for _, m := range clus.Members {
+			clus.logger.Info(
+				"writing health key",
+				zap.Int("retries", i),
+				zap.String("endpoint", m.EtcdClientEndpoint),
+			)
+			if err = m.WriteHealthKey(); err != nil {
+				clus.logger.Warn(
+					"writing health key failed",
+					zap.Int("retries", i),
+					zap.String("endpoint", m.EtcdClientEndpoint),
+					zap.Error(err),
+				)
+				break
+			}
+			clus.logger.Info(
+				"successfully wrote health key",
+				zap.Int("retries", i),
+				zap.String("endpoint", m.EtcdClientEndpoint),
+			)
+		}
+		if err == nil {
+			clus.logger.Info(
+				"writing health key success on all members",
+				zap.Int("retries", i),
+			)
+			return nil
+		}
+		time.Sleep(time.Second)
+	}
+	return err
+}
+
+// GetLeader returns the index of leader and error if any.
+func (clus *Cluster) GetLeader() (int, error) {
+	for i, m := range clus.Members {
+		isLeader, err := m.IsLeader()
+		if isLeader || err != nil {
+			return i, err
+		}
+	}
+	return 0, fmt.Errorf("no leader found")
+}
+
+// maxRev returns the maximum revision found on the cluster.
+func (clus *Cluster) maxRev() (rev int64, err error) {
+	ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
+	defer cancel()
+	revc, errc := make(chan int64, len(clus.Members)), make(chan error, len(clus.Members))
+	for i := range clus.Members {
+		go func(m *rpcpb.Member) {
+			mrev, merr := m.Rev(ctx)
+			revc <- mrev
+			errc <- merr
+		}(clus.Members[i])
+	}
+	for i := 0; i < len(clus.Members); i++ {
+		if merr := <-errc; merr != nil {
+			err = merr
+		}
+		if mrev := <-revc; mrev > rev {
+			rev = mrev
+		}
+	}
+	return rev, err
+}
+
+func (clus *Cluster) getRevisionHash() (map[string]int64, map[string]int64, error) {
+	revs := make(map[string]int64)
+	hashes := make(map[string]int64)
+	for _, m := range clus.Members {
+		rev, hash, err := m.RevHash()
+		if err != nil {
+			return nil, nil, err
+		}
+		revs[m.EtcdClientEndpoint] = rev
+		hashes[m.EtcdClientEndpoint] = hash
+	}
+	return revs, hashes, nil
+}
+
+func (clus *Cluster) compactKV(rev int64, timeout time.Duration) (err error) {
+	if rev <= 0 {
+		return nil
+	}
+
+	for i, m := range clus.Members {
+		conn, derr := m.DialEtcdGRPCServer()
+		if derr != nil {
+			clus.logger.Warn(
+				"compactKV dial failed",
+				zap.String("endpoint", m.EtcdClientEndpoint),
+				zap.Error(derr),
+			)
+			err = derr
+			continue
+		}
+		kvc := pb.NewKVClient(conn)
+
+		clus.logger.Info(
+			"starting compaction",
+			zap.String("endpoint", m.EtcdClientEndpoint),
+			zap.Int64("revision", rev),
+			zap.Duration("timeout", timeout),
+		)
+
+		now := time.Now()
+		ctx, cancel := context.WithTimeout(context.Background(), timeout)
+		_, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true}, grpc.FailFast(false))
+		cancel()
+
+		conn.Close()
+		succeed := true
+		if cerr != nil {
+			if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
+				clus.logger.Info(
+					"compact error is ignored",
+					zap.String("endpoint", m.EtcdClientEndpoint),
+					zap.Int64("revision", rev),
+					zap.Error(cerr),
+				)
+			} else {
+				clus.logger.Warn(
+					"compact failed",
+					zap.String("endpoint", m.EtcdClientEndpoint),
+					zap.Int64("revision", rev),
+					zap.Error(cerr),
+				)
+				err = cerr
+				succeed = false
+			}
+		}
+
+		if succeed {
+			clus.logger.Info(
+				"finished compaction",
+				zap.String("endpoint", m.EtcdClientEndpoint),
+				zap.Int64("revision", rev),
+				zap.Duration("timeout", timeout),
+				zap.Duration("took", time.Since(now)),
+			)
+		}
+	}
+	return err
+}
+
+func (clus *Cluster) checkCompact(rev int64) error {
+	if rev == 0 {
+		return nil
+	}
+	for _, m := range clus.Members {
+		if err := m.CheckCompact(rev); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (clus *Cluster) defrag() error {
+	clus.logger.Info(
+		"defragmenting",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+	)
+	for _, m := range clus.Members {
+		if err := m.Defrag(); err != nil {
+			clus.logger.Warn(
+				"defrag failed",
+				zap.Int("round", clus.rd),
+				zap.Int("case", clus.cs),
+				zap.Error(err),
+			)
+			return err
+		}
+	}
+	clus.logger.Info(
+		"defragmented",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+	)
+	return nil
+}
+
+func (clus *Cluster) Report() int64 { return clus.stresser.ModifiedKeys() }

+ 162 - 0
tools/functional-tester/tester/cluster_test.go

@@ -0,0 +1,162 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import (
+	"reflect"
+	"testing"
+
+	"github.com/coreos/etcd/tools/functional-tester/rpcpb"
+
+	"go.uber.org/zap"
+)
+
+func Test_newCluster(t *testing.T) {
+	exp := &Cluster{
+		Members: []*rpcpb.Member{
+			{
+				EtcdExecPath:       "./bin/etcd",
+				AgentAddr:          "127.0.0.1:19027",
+				FailpointHTTPAddr:  "http://127.0.0.1:7381",
+				BaseDir:            "/tmp/etcd-agent-data-1",
+				EtcdLogPath:        "/tmp/etcd-agent-data-1/current-etcd.log",
+				EtcdClientTLS:      false,
+				EtcdClientProxy:    false,
+				EtcdPeerProxy:      true,
+				EtcdClientEndpoint: "127.0.0.1:1379",
+				Etcd: &rpcpb.Etcd{
+					Name:                     "s1",
+					DataDir:                  "/tmp/etcd-agent-data-1/etcd.data",
+					WALDir:                   "/tmp/etcd-agent-data-1/etcd.data/member/wal",
+					ListenClientURLs:         []string{"http://127.0.0.1:1379"},
+					AdvertiseClientURLs:      []string{"http://127.0.0.1:1379"},
+					ListenPeerURLs:           []string{"http://127.0.0.1:1380"},
+					InitialAdvertisePeerURLs: []string{"http://127.0.0.1:13800"},
+					InitialCluster:           "s1=http://127.0.0.1:13800,s2=http://127.0.0.1:23800,s3=http://127.0.0.1:33800",
+					InitialClusterState:      "new",
+					InitialClusterToken:      "tkn",
+					SnapshotCount:            10000,
+					QuotaBackendBytes:        10740000000,
+					PreVote:                  true,
+					InitialCorruptCheck:      true,
+				},
+			},
+			{
+				EtcdExecPath:       "./bin/etcd",
+				AgentAddr:          "127.0.0.1:29027",
+				FailpointHTTPAddr:  "http://127.0.0.1:7382",
+				BaseDir:            "/tmp/etcd-agent-data-2",
+				EtcdLogPath:        "/tmp/etcd-agent-data-2/current-etcd.log",
+				EtcdClientTLS:      false,
+				EtcdClientProxy:    false,
+				EtcdPeerProxy:      true,
+				EtcdClientEndpoint: "127.0.0.1:2379",
+				Etcd: &rpcpb.Etcd{
+					Name:                     "s2",
+					DataDir:                  "/tmp/etcd-agent-data-2/etcd.data",
+					WALDir:                   "/tmp/etcd-agent-data-2/etcd.data/member/wal",
+					ListenClientURLs:         []string{"http://127.0.0.1:2379"},
+					AdvertiseClientURLs:      []string{"http://127.0.0.1:2379"},
+					ListenPeerURLs:           []string{"http://127.0.0.1:2380"},
+					InitialAdvertisePeerURLs: []string{"http://127.0.0.1:23800"},
+					InitialCluster:           "s1=http://127.0.0.1:13800,s2=http://127.0.0.1:23800,s3=http://127.0.0.1:33800",
+					InitialClusterState:      "new",
+					InitialClusterToken:      "tkn",
+					SnapshotCount:            10000,
+					QuotaBackendBytes:        10740000000,
+					PreVote:                  true,
+					InitialCorruptCheck:      true,
+				},
+			},
+			{
+				EtcdExecPath:       "./bin/etcd",
+				AgentAddr:          "127.0.0.1:39027",
+				FailpointHTTPAddr:  "http://127.0.0.1:7383",
+				BaseDir:            "/tmp/etcd-agent-data-3",
+				EtcdLogPath:        "/tmp/etcd-agent-data-3/current-etcd.log",
+				EtcdClientTLS:      false,
+				EtcdClientProxy:    false,
+				EtcdPeerProxy:      true,
+				EtcdClientEndpoint: "127.0.0.1:3379",
+				Etcd: &rpcpb.Etcd{
+					Name:                     "s3",
+					DataDir:                  "/tmp/etcd-agent-data-3/etcd.data",
+					WALDir:                   "/tmp/etcd-agent-data-3/etcd.data/member/wal",
+					ListenClientURLs:         []string{"http://127.0.0.1:3379"},
+					AdvertiseClientURLs:      []string{"http://127.0.0.1:3379"},
+					ListenPeerURLs:           []string{"http://127.0.0.1:3380"},
+					InitialAdvertisePeerURLs: []string{"http://127.0.0.1:33800"},
+					InitialCluster:           "s1=http://127.0.0.1:13800,s2=http://127.0.0.1:23800,s3=http://127.0.0.1:33800",
+					InitialClusterState:      "new",
+					InitialClusterToken:      "tkn",
+					SnapshotCount:            10000,
+					QuotaBackendBytes:        10740000000,
+					PreVote:                  true,
+					InitialCorruptCheck:      true,
+				},
+			},
+		},
+		Tester: &rpcpb.Tester{
+			TesterNetwork:    "tcp",
+			TesterAddr:       "127.0.0.1:9028",
+			DelayLatencyMs:   500,
+			DelayLatencyMsRv: 50,
+			RoundLimit:       1,
+			ExitOnFailure:    true,
+			ConsistencyCheck: true,
+			EnablePprof:      true,
+			FailureCases: []string{
+				"KILL_ONE_FOLLOWER",
+				"KILL_LEADER",
+				"KILL_ONE_FOLLOWER_FOR_LONG",
+				"KILL_LEADER_FOR_LONG",
+				"KILL_QUORUM",
+				"KILL_ALL",
+				"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER",
+				"BLACKHOLE_PEER_PORT_TX_RX_LEADER_ONE",
+				"BLACKHOLE_PEER_PORT_TX_RX_ALL",
+				"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
+				"DELAY_PEER_PORT_TX_RX_LEADER",
+				"DELAY_PEER_PORT_TX_RX_ALL",
+			},
+			FailpointCommands:       []string{`panic("etcd-tester")`},
+			RunnerExecPath:          "/etcd-runner",
+			ExternalExecPath:        "",
+			StressTypes:             []string{"KV", "LEASE"},
+			StressKeySize:           100,
+			StressKeySizeLarge:      32769,
+			StressKeySuffixRange:    250000,
+			StressKeySuffixRangeTxn: 100,
+			StressKeyTxnOps:         10,
+			StressQPS:               1000,
+		},
+	}
+
+	logger, err := zap.NewProduction()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer logger.Sync()
+
+	cfg, err := newCluster(logger, "./local-test.yaml")
+	if err != nil {
+		t.Fatal(err)
+	}
+	cfg.logger = nil
+
+	if !reflect.DeepEqual(exp, cfg) {
+		t.Fatalf("expected %+v, got %+v", exp, cfg)
+	}
+}

+ 3 - 3
tools/functional-tester/etcd-agent/client/doc.go → tools/functional-tester/tester/doc.go

@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright 2018 The etcd Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,5 +12,5 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package client provides a client implementation to control an etcd-agent.
-package client
+// Package tester implements functional-tester tester server.
+package tester

+ 30 - 0
tools/functional-tester/tester/failure.go

@@ -0,0 +1,30 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+// Failure defines failure injection interface.
+// To add a fail case:
+//  1. implement "Failure" interface
+//  2. define fail case name in "rpcpb.FailureCase"
+type Failure interface {
+	// Inject injeccts the failure into the testing cluster at the given
+	// round. When calling the function, the cluster should be in health.
+	Inject(clus *Cluster, round int) error
+	// Recover recovers the injected failure caused by the injection of the
+	// given round and wait for the recovery of the testing cluster.
+	Recover(clus *Cluster, round int) error
+	// Desc returns a description of the failure
+	Desc() string
+}

+ 49 - 0
tools/functional-tester/tester/failure_case_blackhole.go

@@ -0,0 +1,49 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import "github.com/coreos/etcd/tools/functional-tester/rpcpb"
+
+func injectBlackholePeerPortTxRx(clus *Cluster, idx int) error {
+	return clus.sendOperation(idx, rpcpb.Operation_BlackholePeerPortTxRx)
+}
+
+func recoverBlackholePeerPortTxRx(clus *Cluster, idx int) error {
+	return clus.sendOperation(idx, rpcpb.Operation_UnblackholePeerPortTxRx)
+}
+
+func newFailureBlackholePeerPortTxRxOne() Failure {
+	f := &failureOne{
+		description:   "blackhole peer port on one member",
+		injectMember:  injectBlackholePeerPortTxRx,
+		recoverMember: recoverBlackholePeerPortTxRx,
+	}
+	return &failureDelay{
+		Failure:       f,
+		delayDuration: triggerElectionDur,
+	}
+}
+
+func newFailureBlackholePeerPortTxRxAll() Failure {
+	f := &failureAll{
+		description:   "blackhole peer port on all members",
+		injectMember:  injectBlackholePeerPortTxRx,
+		recoverMember: recoverBlackholePeerPortTxRx,
+	}
+	return &failureDelay{
+		Failure:       f,
+		delayDuration: triggerElectionDur,
+	}
+}

+ 41 - 0
tools/functional-tester/tester/failure_case_delay.go

@@ -0,0 +1,41 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import (
+	"time"
+
+	"go.uber.org/zap"
+)
+
+type failureDelay struct {
+	Failure
+	delayDuration time.Duration
+}
+
+func (f *failureDelay) Inject(clus *Cluster, round int) error {
+	if err := f.Failure.Inject(clus, round); err != nil {
+		return err
+	}
+	if f.delayDuration > 0 {
+		clus.logger.Info(
+			"sleeping in failureDelay",
+			zap.Duration("delay", f.delayDuration),
+			zap.String("case", f.Failure.Desc()),
+		)
+		time.Sleep(f.delayDuration)
+	}
+	return nil
+}

+ 44 - 0
tools/functional-tester/tester/failure_case_external.go

@@ -0,0 +1,44 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import (
+	"fmt"
+	"os/exec"
+)
+
+type failureExternal struct {
+	Failure
+
+	description string
+	scriptPath  string
+}
+
+func (f *failureExternal) Inject(clus *Cluster, round int) error {
+	return exec.Command(f.scriptPath, "enable", fmt.Sprintf("%d", round)).Run()
+}
+
+func (f *failureExternal) Recover(clus *Cluster, round int) error {
+	return exec.Command(f.scriptPath, "disable", fmt.Sprintf("%d", round)).Run()
+}
+
+func (f *failureExternal) Desc() string { return f.description }
+
+func newFailureExternal(scriptPath string) Failure {
+	return &failureExternal{
+		description: fmt.Sprintf("external fault injector (script: %q)", scriptPath),
+		scriptPath:  scriptPath,
+	}
+}

+ 20 - 21
tools/functional-tester/etcd-tester/failpoint.go → tools/functional-tester/tester/failure_case_failpoints.go

@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright 2018 The etcd Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package main
+package tester
 
 import (
 	"fmt"
@@ -24,17 +24,16 @@ import (
 )
 
 type failpointStats struct {
+	mu sync.Mutex
 	// crashes counts the number of crashes for a failpoint
 	crashes map[string]int
-	// mu protects crashes
-	mu sync.Mutex
 }
 
 var fpStats failpointStats
 
-func failpointFailures(c *cluster, failpoints []string) (ret []failure, err error) {
+func failpointFailures(clus *Cluster) (ret []Failure, err error) {
 	var fps []string
-	fps, err = failpointPaths(c.Members[0].FailpointURL)
+	fps, err = failpointPaths(clus.Members[0].FailpointHTTPAddr)
 	if err != nil {
 		return nil, err
 	}
@@ -43,7 +42,7 @@ func failpointFailures(c *cluster, failpoints []string) (ret []failure, err erro
 		if len(fp) == 0 {
 			continue
 		}
-		fpFails := failuresFromFailpoint(fp, failpoints)
+		fpFails := failuresFromFailpoint(fp, clus.Tester.FailpointCommands)
 		// wrap in delays so failpoint has time to trigger
 		for i, fpf := range fpFails {
 			if strings.Contains(fp, "Snap") {
@@ -79,29 +78,29 @@ func failpointPaths(endpoint string) ([]string, error) {
 
 // failpoints follows FreeBSD KFAIL_POINT syntax.
 // e.g. panic("etcd-tester"),1*sleep(1000)->panic("etcd-tester")
-func failuresFromFailpoint(fp string, failpoints []string) (fs []failure) {
+func failuresFromFailpoint(fp string, failpointCommands []string) (fs []Failure) {
 	recov := makeRecoverFailpoint(fp)
-	for _, failpoint := range failpoints {
-		inject := makeInjectFailpoint(fp, failpoint)
-		fs = append(fs, []failure{
+	for _, fcmd := range failpointCommands {
+		inject := makeInjectFailpoint(fp, fcmd)
+		fs = append(fs, []Failure{
 			&failureOne{
-				description:   description(fmt.Sprintf("failpoint %s (one: %s)", fp, failpoint)),
+				description:   description(fmt.Sprintf("failpoint %s (one: %s)", fp, fcmd)),
 				injectMember:  inject,
 				recoverMember: recov,
 			},
 			&failureAll{
-				description:   description(fmt.Sprintf("failpoint %s (all: %s)", fp, failpoint)),
+				description:   description(fmt.Sprintf("failpoint %s (all: %s)", fp, fcmd)),
 				injectMember:  inject,
 				recoverMember: recov,
 			},
-			&failureMajority{
-				description:   description(fmt.Sprintf("failpoint %s (majority: %s)", fp, failpoint)),
+			&failureQuorum{
+				description:   description(fmt.Sprintf("failpoint %s (majority: %s)", fp, fcmd)),
 				injectMember:  inject,
 				recoverMember: recov,
 			},
 			&failureLeader{
 				failureByFunc{
-					description:   description(fmt.Sprintf("failpoint %s (leader: %s)", fp, failpoint)),
+					description:   description(fmt.Sprintf("failpoint %s (leader: %s)", fp, fcmd)),
 					injectMember:  inject,
 					recoverMember: recov,
 				},
@@ -113,21 +112,21 @@ func failuresFromFailpoint(fp string, failpoints []string) (fs []failure) {
 }
 
 func makeInjectFailpoint(fp, val string) injectMemberFunc {
-	return func(m *member) (err error) {
-		return putFailpoint(m.FailpointURL, fp, val)
+	return func(clus *Cluster, idx int) (err error) {
+		return putFailpoint(clus.Members[idx].FailpointHTTPAddr, fp, val)
 	}
 }
 
 func makeRecoverFailpoint(fp string) recoverMemberFunc {
-	return func(m *member) error {
-		if err := delFailpoint(m.FailpointURL, fp); err == nil {
+	return func(clus *Cluster, idx int) error {
+		if err := delFailpoint(clus.Members[idx].FailpointHTTPAddr, fp); err == nil {
 			return nil
 		}
 		// node not responding, likely dead from fp panic; restart
 		fpStats.mu.Lock()
 		fpStats.crashes[fp]++
 		fpStats.mu.Unlock()
-		return recoverStop(m)
+		return recoverKill(clus, idx)
 	}
 }
 

+ 210 - 0
tools/functional-tester/tester/failure_case_kill.go

@@ -0,0 +1,210 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import (
+	"fmt"
+	"math/rand"
+	"time"
+
+	"github.com/coreos/etcd/tools/functional-tester/rpcpb"
+)
+
+const snapshotCount = 10000
+
+func injectKill(clus *Cluster, idx int) error {
+	return clus.sendOperation(idx, rpcpb.Operation_KillEtcd)
+}
+
+func recoverKill(clus *Cluster, idx int) error {
+	return clus.sendOperation(idx, rpcpb.Operation_RestartEtcd)
+}
+
+func newFailureKillAll() Failure {
+	return &failureAll{
+		description:   "kill all members",
+		injectMember:  injectKill,
+		recoverMember: recoverKill,
+	}
+}
+
+func newFailureKillQuorum() Failure {
+	return &failureQuorum{
+		description:   "kill quorum of the cluster",
+		injectMember:  injectKill,
+		recoverMember: recoverKill,
+	}
+}
+
+func newFailureKillOne() Failure {
+	return &failureOne{
+		description:   "kill one random member",
+		injectMember:  injectKill,
+		recoverMember: recoverKill,
+	}
+}
+
+func newFailureKillLeader() Failure {
+	ff := failureByFunc{
+		description:   "kill leader member",
+		injectMember:  injectKill,
+		recoverMember: recoverKill,
+	}
+	return &failureLeader{ff, 0}
+}
+
+func newFailureKillOneForLongTime() Failure {
+	return &failureUntilSnapshot{newFailureKillOne()}
+}
+
+func newFailureKillLeaderForLongTime() Failure {
+	return &failureUntilSnapshot{newFailureKillLeader()}
+}
+
+type description string
+
+func (d description) Desc() string { return string(d) }
+
+type injectMemberFunc func(*Cluster, int) error
+type recoverMemberFunc func(*Cluster, int) error
+
+type failureByFunc struct {
+	description
+	injectMember  injectMemberFunc
+	recoverMember recoverMemberFunc
+}
+
+// TODO: support kill follower
+type failureOne failureByFunc
+type failureAll failureByFunc
+type failureQuorum failureByFunc
+
+type failureLeader struct {
+	failureByFunc
+	idx int
+}
+
+// failureUntilSnapshot injects a failure and waits for a snapshot event
+type failureUntilSnapshot struct{ Failure }
+
+func (f *failureOne) Inject(clus *Cluster, round int) error {
+	return f.injectMember(clus, round%len(clus.Members))
+}
+
+func (f *failureOne) Recover(clus *Cluster, round int) error {
+	if err := f.recoverMember(clus, round%len(clus.Members)); err != nil {
+		return err
+	}
+	return clus.WaitHealth()
+}
+
+func (f *failureAll) Inject(clus *Cluster, round int) error {
+	for i := range clus.Members {
+		if err := f.injectMember(clus, i); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (f *failureAll) Recover(clus *Cluster, round int) error {
+	for i := range clus.Members {
+		if err := f.recoverMember(clus, i); err != nil {
+			return err
+		}
+	}
+	return clus.WaitHealth()
+}
+
+func (f *failureQuorum) Inject(clus *Cluster, round int) error {
+	for i := range killMap(len(clus.Members), round) {
+		if err := f.injectMember(clus, i); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (f *failureQuorum) Recover(clus *Cluster, round int) error {
+	for i := range killMap(len(clus.Members), round) {
+		if err := f.recoverMember(clus, i); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (f *failureLeader) Inject(clus *Cluster, round int) error {
+	idx, err := clus.GetLeader()
+	if err != nil {
+		return err
+	}
+	f.idx = idx
+	return f.injectMember(clus, idx)
+}
+
+func (f *failureLeader) Recover(clus *Cluster, round int) error {
+	if err := f.recoverMember(clus, f.idx); err != nil {
+		return err
+	}
+	return clus.WaitHealth()
+}
+
+func (f *failureUntilSnapshot) Inject(clus *Cluster, round int) error {
+	if err := f.Failure.Inject(clus, round); err != nil {
+		return err
+	}
+	if len(clus.Members) < 3 {
+		return nil
+	}
+	// maxRev may fail since failure just injected, retry if failed.
+	startRev, err := clus.maxRev()
+	for i := 0; i < 10 && startRev == 0; i++ {
+		startRev, err = clus.maxRev()
+	}
+	if startRev == 0 {
+		return err
+	}
+	lastRev := startRev
+	// Normal healthy cluster could accept 1000req/s at least.
+	// Give it 3-times time to create a new snapshot.
+	retry := snapshotCount / 1000 * 3
+	for j := 0; j < retry; j++ {
+		lastRev, _ = clus.maxRev()
+		// If the number of proposals committed is bigger than snapshot count,
+		// a new snapshot should have been created.
+		if lastRev-startRev > snapshotCount {
+			return nil
+		}
+		time.Sleep(time.Second)
+	}
+	return fmt.Errorf("cluster too slow: only commit %d requests in %ds", lastRev-startRev, retry)
+}
+
+func (f *failureUntilSnapshot) Desc() string {
+	return f.Failure.Desc() + " for a long time and expect it to recover from an incoming snapshot"
+}
+
+func killMap(size int, seed int) map[int]bool {
+	m := make(map[int]bool)
+	r := rand.New(rand.NewSource(int64(seed)))
+	majority := size/2 + 1
+	for {
+		m[r.Intn(size)] = true
+		if len(m) >= majority {
+			return m
+		}
+	}
+}

+ 10 - 6
tools/functional-tester/etcd-runner/main.go → tools/functional-tester/tester/failure_case_no_op.go

@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright 2018 The etcd Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,11 +12,15 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// etcd-runner is a command line application that performs tests on etcd.
-package main
+package tester
 
-import "github.com/coreos/etcd/tools/functional-tester/etcd-runner/command"
+type failureNoOp failureByFunc
 
-func main() {
-	command.Start()
+func (f *failureNoOp) Inject(clus *Cluster, round int) error  { return nil }
+func (f *failureNoOp) Recover(clus *Cluster, round int) error { return nil }
+
+func newFailureNoOp() Failure {
+	return &failureNoOp{
+		description: "no failure",
+	}
 }

+ 85 - 0
tools/functional-tester/tester/failure_case_slow_network.go

@@ -0,0 +1,85 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/coreos/etcd/tools/functional-tester/rpcpb"
+)
+
+const (
+	// TODO
+	slowNetworkLatency = 500 // 500 millisecond
+
+	// delay duration to trigger leader election (default election timeout 1s)
+	triggerElectionDur = 5 * time.Second
+
+	// Wait more when it recovers from slow network, because network layer
+	// needs extra time to propagate traffic control (tc command) change.
+	// Otherwise, we get different hash values from the previous revision.
+	// For more detail, please see https://github.com/coreos/etcd/issues/5121.
+	waitRecover = 5 * time.Second
+)
+
+func injectDelayPeerPortTxRx(clus *Cluster, idx int) error {
+	return clus.sendOperation(idx, rpcpb.Operation_DelayPeerPortTxRx)
+}
+
+func recoverDelayPeerPortTxRx(clus *Cluster, idx int) error {
+	err := clus.sendOperation(idx, rpcpb.Operation_UndelayPeerPortTxRx)
+	time.Sleep(waitRecover)
+	return err
+}
+
+func newFailureDelayPeerPortTxRxOneMember() Failure {
+	desc := fmt.Sprintf("delay one member's network by adding %d ms latency", slowNetworkLatency)
+	f := &failureOne{
+		description:   description(desc),
+		injectMember:  injectDelayPeerPortTxRx,
+		recoverMember: recoverDelayPeerPortTxRx,
+	}
+	return &failureDelay{
+		Failure:       f,
+		delayDuration: triggerElectionDur,
+	}
+}
+
+func newFailureDelayPeerPortTxRxLeader() Failure {
+	desc := fmt.Sprintf("delay leader's network by adding %d ms latency", slowNetworkLatency)
+	ff := failureByFunc{
+		description:   description(desc),
+		injectMember:  injectDelayPeerPortTxRx,
+		recoverMember: recoverDelayPeerPortTxRx,
+	}
+	f := &failureLeader{ff, 0}
+	return &failureDelay{
+		Failure:       f,
+		delayDuration: triggerElectionDur,
+	}
+}
+
+func newFailureDelayPeerPortTxRxAll() Failure {
+	f := &failureAll{
+		description:   "delay all members' network",
+		injectMember:  injectDelayPeerPortTxRx,
+		recoverMember: recoverDelayPeerPortTxRx,
+	}
+	return &failureDelay{
+		Failure:       f,
+		delayDuration: triggerElectionDur,
+	}
+}

+ 126 - 0
tools/functional-tester/tester/local-test.yaml

@@ -0,0 +1,126 @@
+agent-configs:
+- etcd-exec-path: ./bin/etcd
+  agent-addr: 127.0.0.1:19027
+  failpoint-http-addr: http://127.0.0.1:7381
+  base-dir: /tmp/etcd-agent-data-1
+  etcd-log-path: /tmp/etcd-agent-data-1/current-etcd.log
+  etcd-client-tls: false
+  etcd-client-proxy: false
+  etcd-peer-proxy: true
+  etcd-client-endpoint: 127.0.0.1:1379
+  etcd-config:
+    name: s1
+    data-dir: /tmp/etcd-agent-data-1/etcd.data
+    wal-dir: /tmp/etcd-agent-data-1/etcd.data/member/wal
+    listen-client-urls: ["http://127.0.0.1:1379"]
+    advertise-client-urls: ["http://127.0.0.1:1379"]
+    listen-peer-urls: ["http://127.0.0.1:1380"]
+    initial-advertise-peer-urls: ["http://127.0.0.1:13800"]
+    initial-cluster: s1=http://127.0.0.1:13800,s2=http://127.0.0.1:23800,s3=http://127.0.0.1:33800
+    initial-cluster-state: new
+    initial-cluster-token: tkn
+    snapshot-count: 10000
+    quota-backend-bytes: 10740000000 # 10 GiB
+    pre-vote: true
+    initial-corrupt-check: true
+- etcd-exec-path: ./bin/etcd
+  agent-addr: 127.0.0.1:29027
+  failpoint-http-addr: http://127.0.0.1:7382
+  base-dir: /tmp/etcd-agent-data-2
+  etcd-log-path: /tmp/etcd-agent-data-2/current-etcd.log
+  etcd-client-tls: false
+  etcd-client-proxy: false
+  etcd-peer-proxy: true
+  etcd-client-endpoint: 127.0.0.1:2379
+  etcd-config:
+    name: s2
+    data-dir: /tmp/etcd-agent-data-2/etcd.data
+    wal-dir: /tmp/etcd-agent-data-2/etcd.data/member/wal
+    listen-client-urls: ["http://127.0.0.1:2379"]
+    advertise-client-urls: ["http://127.0.0.1:2379"]
+    listen-peer-urls: ["http://127.0.0.1:2380"]
+    initial-advertise-peer-urls: ["http://127.0.0.1:23800"]
+    initial-cluster: s1=http://127.0.0.1:13800,s2=http://127.0.0.1:23800,s3=http://127.0.0.1:33800
+    initial-cluster-state: new
+    initial-cluster-token: tkn
+    snapshot-count: 10000
+    quota-backend-bytes: 10740000000 # 10 GiB
+    pre-vote: true
+    initial-corrupt-check: true
+- etcd-exec-path: ./bin/etcd
+  agent-addr: 127.0.0.1:39027
+  failpoint-http-addr: http://127.0.0.1:7383
+  base-dir: /tmp/etcd-agent-data-3
+  etcd-log-path: /tmp/etcd-agent-data-3/current-etcd.log
+  etcd-client-tls: false
+  etcd-client-proxy: false
+  etcd-peer-proxy: true
+  etcd-client-endpoint: 127.0.0.1:3379
+  etcd-config:
+    name: s3
+    data-dir: /tmp/etcd-agent-data-3/etcd.data
+    wal-dir: /tmp/etcd-agent-data-3/etcd.data/member/wal
+    listen-client-urls: ["http://127.0.0.1:3379"]
+    advertise-client-urls: ["http://127.0.0.1:3379"]
+    listen-peer-urls: ["http://127.0.0.1:3380"]
+    initial-advertise-peer-urls: ["http://127.0.0.1:33800"]
+    initial-cluster: s1=http://127.0.0.1:13800,s2=http://127.0.0.1:23800,s3=http://127.0.0.1:33800
+    initial-cluster-state: new
+    initial-cluster-token: tkn
+    snapshot-count: 10000
+    quota-backend-bytes: 10740000000 # 10 GiB
+    pre-vote: true
+    initial-corrupt-check: true
+
+tester-config:
+  tester-network: tcp
+  tester-addr: 127.0.0.1:9028
+
+  delay-latency-ms: 500
+  delay-latency-ms-rv: 50
+
+  round-limit: 1
+  exit-on-failure: true
+  consistency-check: true
+  enable-pprof: true
+
+  failure-cases:
+  - KILL_ONE_FOLLOWER
+  - KILL_LEADER
+  - KILL_ONE_FOLLOWER_FOR_LONG
+  - KILL_LEADER_FOR_LONG
+  - KILL_QUORUM
+  - KILL_ALL
+  - BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER
+  - BLACKHOLE_PEER_PORT_TX_RX_LEADER_ONE
+  - BLACKHOLE_PEER_PORT_TX_RX_ALL
+  - DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER
+  - DELAY_PEER_PORT_TX_RX_LEADER
+  - DELAY_PEER_PORT_TX_RX_ALL
+
+  # TODO: shuffle
+  # fail-shuffle: true
+
+  failpoint-commands:
+  - panic("etcd-tester")
+  # failpoint-commands:
+  # - panic("etcd-tester"),1*sleep(1000)
+
+  runner-exec-path: /etcd-runner
+  external-exec-path: ""
+
+  stress-types:
+  - KV
+  - LEASE
+  # - NO_STRESS
+  # - ELECTION_RUNNER
+  # - WATCH_RUNNER
+  # - LOCK_RACER_RUNNER
+  # - LEASE_RUNNER
+
+  stress-key-size: 100
+  stress-key-size-large: 32769
+  stress-key-suffix-range: 250000
+  stress-key-suffix-range-txn: 100
+  stress-key-txn-ops: 10
+  stress-qps: 1000

+ 3 - 5
tools/functional-tester/etcd-tester/metrics.go → tools/functional-tester/tester/metrics.go

@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright 2018 The etcd Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,11 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package main
+package tester
 
-import (
-	"github.com/prometheus/client_golang/prometheus"
-)
+import "github.com/prometheus/client_golang/prometheus"
 
 var (
 	caseTotalCounter = prometheus.NewCounterVec(

+ 202 - 0
tools/functional-tester/tester/stress.go

@@ -0,0 +1,202 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"go.uber.org/zap"
+)
+
+type Stresser interface {
+	// Stress starts to stress the etcd cluster
+	Stress() error
+	// Pause stops the stresser from sending requests to etcd. Resume by calling Stress.
+	Pause()
+	// Close releases all of the Stresser's resources.
+	Close()
+	// ModifiedKeys reports the number of keys created and deleted by stresser
+	ModifiedKeys() int64
+	// Checker returns an invariant checker for after the stresser is canceled.
+	Checker() Checker
+}
+
+// nopStresser implements Stresser that does nothing
+type nopStresser struct {
+	start time.Time
+	qps   int
+}
+
+func (s *nopStresser) Stress() error { return nil }
+func (s *nopStresser) Pause()        {}
+func (s *nopStresser) Close()        {}
+func (s *nopStresser) ModifiedKeys() int64 {
+	return 0
+}
+func (s *nopStresser) Checker() Checker { return nil }
+
+// compositeStresser implements a Stresser that runs a slice of
+// stressing clients concurrently.
+type compositeStresser struct {
+	stressers []Stresser
+}
+
+func (cs *compositeStresser) Stress() error {
+	for i, s := range cs.stressers {
+		if err := s.Stress(); err != nil {
+			for j := 0; j < i; j++ {
+				cs.stressers[i].Close()
+			}
+			return err
+		}
+	}
+	return nil
+}
+
+func (cs *compositeStresser) Pause() {
+	var wg sync.WaitGroup
+	wg.Add(len(cs.stressers))
+	for i := range cs.stressers {
+		go func(s Stresser) {
+			defer wg.Done()
+			s.Pause()
+		}(cs.stressers[i])
+	}
+	wg.Wait()
+}
+
+func (cs *compositeStresser) Close() {
+	var wg sync.WaitGroup
+	wg.Add(len(cs.stressers))
+	for i := range cs.stressers {
+		go func(s Stresser) {
+			defer wg.Done()
+			s.Close()
+		}(cs.stressers[i])
+	}
+	wg.Wait()
+}
+
+func (cs *compositeStresser) ModifiedKeys() (modifiedKey int64) {
+	for _, stress := range cs.stressers {
+		modifiedKey += stress.ModifiedKeys()
+	}
+	return modifiedKey
+}
+
+func (cs *compositeStresser) Checker() Checker {
+	var chks []Checker
+	for _, s := range cs.stressers {
+		if chk := s.Checker(); chk != nil {
+			chks = append(chks, chk)
+		}
+	}
+	if len(chks) == 0 {
+		return nil
+	}
+	return newCompositeChecker(chks)
+}
+
+// newStresser creates stresser from a comma separated list of stresser types.
+func newStresser(clus *Cluster, idx int) Stresser {
+	stressers := make([]Stresser, len(clus.Tester.StressTypes))
+	for i, stype := range clus.Tester.StressTypes {
+		clus.logger.Info("creating stresser", zap.String("type", stype))
+
+		switch stype {
+		case "NO_STRESS":
+			stressers[i] = &nopStresser{start: time.Now(), qps: int(clus.rateLimiter.Limit())}
+
+		case "KV":
+			// TODO: Too intensive stressing clients can panic etcd member with
+			// 'out of memory' error. Put rate limits in server side.
+			stressers[i] = &keyStresser{
+				logger:            clus.logger,
+				Endpoint:          clus.Members[idx].EtcdClientEndpoint,
+				keySize:           int(clus.Tester.StressKeySize),
+				keyLargeSize:      int(clus.Tester.StressKeySizeLarge),
+				keySuffixRange:    int(clus.Tester.StressKeySuffixRange),
+				keyTxnSuffixRange: int(clus.Tester.StressKeySuffixRangeTxn),
+				keyTxnOps:         int(clus.Tester.StressKeyTxnOps),
+				N:                 100,
+				rateLimiter:       clus.rateLimiter,
+			}
+
+		case "LEASE":
+			stressers[i] = &leaseStresser{
+				logger:       clus.logger,
+				endpoint:     clus.Members[idx].EtcdClientEndpoint,
+				numLeases:    10, // TODO: configurable
+				keysPerLease: 10, // TODO: configurable
+				rateLimiter:  clus.rateLimiter,
+			}
+
+		case "ELECTION_RUNNER":
+			reqRate := 100
+			args := []string{
+				"election",
+				fmt.Sprintf("%v", time.Now().UnixNano()), // election name as current nano time
+				"--dial-timeout=10s",
+				"--endpoints", clus.Members[idx].EtcdClientEndpoint,
+				"--total-client-connections=10",
+				"--rounds=0", // runs forever
+				"--req-rate", fmt.Sprintf("%v", reqRate),
+			}
+			stressers[i] = newRunnerStresser(
+				clus.Tester.RunnerExecPath,
+				args,
+				clus.rateLimiter,
+				reqRate,
+			)
+
+		case "WATCH_RUNNER":
+			reqRate := 100
+			args := []string{
+				"watcher",
+				"--prefix", fmt.Sprintf("%v", time.Now().UnixNano()), // prefix all keys with nano time
+				"--total-keys=1",
+				"--total-prefixes=1",
+				"--watch-per-prefix=1",
+				"--endpoints", clus.Members[idx].EtcdClientEndpoint,
+				"--rounds=0", // runs forever
+				"--req-rate", fmt.Sprintf("%v", reqRate),
+			}
+			stressers[i] = newRunnerStresser(clus.Tester.RunnerExecPath, args, clus.rateLimiter, reqRate)
+
+		case "LOCK_RACER_RUNNER":
+			reqRate := 100
+			args := []string{
+				"lock-racer",
+				fmt.Sprintf("%v", time.Now().UnixNano()), // locker name as current nano time
+				"--endpoints", clus.Members[idx].EtcdClientEndpoint,
+				"--total-client-connections=10",
+				"--rounds=0", // runs forever
+				"--req-rate", fmt.Sprintf("%v", reqRate),
+			}
+			stressers[i] = newRunnerStresser(clus.Tester.RunnerExecPath, args, clus.rateLimiter, reqRate)
+
+		case "LEASE_RUNNER":
+			args := []string{
+				"lease-renewer",
+				"--ttl=30",
+				"--endpoints", clus.Members[idx].EtcdClientEndpoint,
+			}
+			stressers[i] = newRunnerStresser(clus.Tester.RunnerExecPath, args, clus.rateLimiter, 0)
+		}
+	}
+	return &compositeStresser{stressers}
+}

+ 20 - 7
tools/functional-tester/etcd-tester/key_stresser.go → tools/functional-tester/tester/stress_key.go

@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright 2018 The etcd Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package main
+package tester
 
 import (
 	"context"
@@ -26,16 +26,19 @@ import (
 	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
 	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
 
+	"go.uber.org/zap"
 	"golang.org/x/time/rate"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/transport"
 )
 
 type keyStresser struct {
-	Endpoint string
+	logger *zap.Logger
+
+	Endpoint string // TODO: use Member
 
-	keyLargeSize      int
 	keySize           int
+	keyLargeSize      int
 	keySuffixRange    int
 	keyTxnSuffixRange int
 	keyTxnOps         int
@@ -93,7 +96,10 @@ func (s *keyStresser) Stress() error {
 		go s.run(ctx)
 	}
 
-	plog.Infof("keyStresser %q is started", s.Endpoint)
+	s.logger.Info(
+		"key stresser started in background",
+		zap.String("endpoint", s.Endpoint),
+	)
 	return nil
 }
 
@@ -144,7 +150,11 @@ func (s *keyStresser) run(ctx context.Context) {
 			// from stresser.Cancel method:
 			return
 		default:
-			plog.Errorf("keyStresser %v exited with error (%v)", s.Endpoint, err)
+			s.logger.Warn(
+				"key stresser exited with error",
+				zap.String("endpoint", s.Endpoint),
+				zap.Error(err),
+			)
 			return
 		}
 	}
@@ -158,8 +168,11 @@ func (s *keyStresser) Close() {
 	s.cancel()
 	s.conn.Close()
 	s.wg.Wait()
-	plog.Infof("keyStresser %q is closed", s.Endpoint)
 
+	s.logger.Info(
+		"key stresser is closed",
+		zap.String("endpoint", s.Endpoint),
+	)
 }
 
 func (s *keyStresser) ModifiedKeys() int64 {

+ 128 - 25
tools/functional-tester/etcd-tester/lease_stresser.go → tools/functional-tester/tester/stress_lease.go

@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright 2018 The etcd Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package main
+package tester
 
 import (
 	"context"
@@ -25,6 +25,7 @@ import (
 	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
 	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
 
+	"go.uber.org/zap"
 	"golang.org/x/time/rate"
 	"google.golang.org/grpc"
 )
@@ -36,6 +37,8 @@ const (
 )
 
 type leaseStresser struct {
+	logger *zap.Logger
+
 	endpoint string
 	cancel   func()
 	conn     *grpc.ClientConn
@@ -119,7 +122,11 @@ func (ls *leaseStresser) setupOnce() error {
 }
 
 func (ls *leaseStresser) Stress() error {
-	plog.Infof("lease Stresser %v starting ...", ls.endpoint)
+	ls.logger.Info(
+		"lease stresser is started",
+		zap.String("endpoint", ls.endpoint),
+	)
+
 	if err := ls.setupOnce(); err != nil {
 		return err
 	}
@@ -153,12 +160,26 @@ func (ls *leaseStresser) run() {
 		if err == context.Canceled {
 			return
 		}
-		plog.Debugf("creating lease on %v", ls.endpoint)
+
+		ls.logger.Debug(
+			"lease stresser is creating leases",
+			zap.String("endpoint", ls.endpoint),
+		)
 		ls.createLeases()
-		plog.Debugf("done creating lease on %v", ls.endpoint)
-		plog.Debugf("dropping lease on %v", ls.endpoint)
+		ls.logger.Debug(
+			"lease stresser created leases",
+			zap.String("endpoint", ls.endpoint),
+		)
+
+		ls.logger.Debug(
+			"lease stresser is dropped leases",
+			zap.String("endpoint", ls.endpoint),
+		)
 		ls.randomlyDropLeases()
-		plog.Debugf("done dropping lease on %v", ls.endpoint)
+		ls.logger.Debug(
+			"lease stresser dropped leases",
+			zap.String("endpoint", ls.endpoint),
+		)
 	}
 }
 
@@ -185,7 +206,11 @@ func (ls *leaseStresser) createAliveLeases() {
 			defer wg.Done()
 			leaseID, err := ls.createLeaseWithKeys(TTL)
 			if err != nil {
-				plog.Debugf("lease creation error: (%v)", err)
+				ls.logger.Debug(
+					"createLeaseWithKeys failed",
+					zap.String("endpoint", ls.endpoint),
+					zap.Error(err),
+				)
 				return
 			}
 			ls.aliveLeases.add(leaseID, time.Now())
@@ -219,10 +244,19 @@ func (ls *leaseStresser) createShortLivedLeases() {
 func (ls *leaseStresser) createLeaseWithKeys(ttl int64) (int64, error) {
 	leaseID, err := ls.createLease(ttl)
 	if err != nil {
-		plog.Debugf("lease creation error: (%v)", err)
+		ls.logger.Debug(
+			"createLease failed",
+			zap.String("endpoint", ls.endpoint),
+			zap.Error(err),
+		)
 		return -1, err
 	}
-	plog.Debugf("lease %v created ", leaseID)
+
+	ls.logger.Debug(
+		"createLease created lease",
+		zap.String("endpoint", ls.endpoint),
+		zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+	)
 	if err := ls.attachKeysWithLease(leaseID); err != nil {
 		return -1, err
 	}
@@ -239,14 +273,23 @@ func (ls *leaseStresser) randomlyDropLeases() {
 			// if randomlyDropLease encountered an error such as context is cancelled, remove the lease from aliveLeases
 			// because we can't tell whether the lease is dropped or not.
 			if err != nil {
-				plog.Debugf("drop lease %v has failed error (%v)", leaseID, err)
+				ls.logger.Debug(
+					"randomlyDropLease failed",
+					zap.String("endpoint", ls.endpoint),
+					zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+					zap.Error(err),
+				)
 				ls.aliveLeases.remove(leaseID)
 				return
 			}
 			if !dropped {
 				return
 			}
-			plog.Debugf("lease %v dropped", leaseID)
+			ls.logger.Debug(
+				"randomlyDropLease dropped a lease",
+				zap.String("endpoint", ls.endpoint),
+				zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+			)
 			ls.revokedLeases.add(leaseID, time.Now())
 			ls.aliveLeases.remove(leaseID)
 		}(l)
@@ -271,7 +314,12 @@ func (ls *leaseStresser) keepLeaseAlive(leaseID int64) {
 		select {
 		case <-time.After(500 * time.Millisecond):
 		case <-ls.ctx.Done():
-			plog.Debugf("keepLeaseAlive lease %v context canceled ", leaseID)
+			ls.logger.Debug(
+				"keepLeaseAlive context canceled",
+				zap.String("endpoint", ls.endpoint),
+				zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+				zap.Error(ls.ctx.Err()),
+			)
 			// it is  possible that lease expires at invariant checking phase but not at keepLeaseAlive() phase.
 			// this scenerio is possible when alive lease is just about to expire when keepLeaseAlive() exists and expires at invariant checking phase.
 			// to circumvent that scenerio, we check each lease before keepalive loop exist to see if it has been renewed in last TTL/2 duration.
@@ -280,41 +328,78 @@ func (ls *leaseStresser) keepLeaseAlive(leaseID int64) {
 			renewTime, ok := ls.aliveLeases.read(leaseID)
 			if ok && renewTime.Add(TTL/2*time.Second).Before(time.Now()) {
 				ls.aliveLeases.remove(leaseID)
-				plog.Debugf("keepLeaseAlive lease %v has not been renewed. drop it.", leaseID)
+				ls.logger.Debug(
+					"keepLeaseAlive lease has not been renewed, dropped it",
+					zap.String("endpoint", ls.endpoint),
+					zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+				)
 			}
 			return
 		}
 
 		if err != nil {
-			plog.Debugf("keepLeaseAlive lease %v creates stream error: (%v)", leaseID, err)
+			ls.logger.Debug(
+				"keepLeaseAlive lease creates stream error",
+				zap.String("endpoint", ls.endpoint),
+				zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+				zap.Error(err),
+			)
 			cancel()
 			ctx, cancel = context.WithCancel(ls.ctx)
 			stream, err = ls.lc.LeaseKeepAlive(ctx)
 			cancel()
 			continue
 		}
+
+		ls.logger.Debug(
+			"keepLeaseAlive stream sends lease keepalive request",
+			zap.String("endpoint", ls.endpoint),
+			zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+		)
 		err = stream.Send(&pb.LeaseKeepAliveRequest{ID: leaseID})
-		plog.Debugf("keepLeaseAlive stream sends lease %v keepalive request", leaseID)
 		if err != nil {
-			plog.Debugf("keepLeaseAlive stream sends lease %v error (%v)", leaseID, err)
+			ls.logger.Debug(
+				"keepLeaseAlive stream failed to send lease keepalive request",
+				zap.String("endpoint", ls.endpoint),
+				zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+				zap.Error(err),
+			)
 			continue
 		}
 		leaseRenewTime := time.Now()
-		plog.Debugf("keepLeaseAlive stream sends lease %v keepalive request succeed", leaseID)
+		ls.logger.Debug(
+			"keepLeaseAlive stream sent lease keepalive request",
+			zap.String("endpoint", ls.endpoint),
+			zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+		)
 		respRC, err := stream.Recv()
 		if err != nil {
-			plog.Debugf("keepLeaseAlive stream receives lease %v stream error (%v)", leaseID, err)
+			ls.logger.Debug(
+				"keepLeaseAlive stream failed to receive lease keepalive response",
+				zap.String("endpoint", ls.endpoint),
+				zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+				zap.Error(err),
+			)
 			continue
 		}
 		// lease expires after TTL become 0
 		// don't send keepalive if the lease has expired
 		if respRC.TTL <= 0 {
-			plog.Debugf("keepLeaseAlive stream receives lease %v has TTL <= 0", leaseID)
+			ls.logger.Debug(
+				"keepLeaseAlive stream received lease keepalive response TTL <= 0",
+				zap.String("endpoint", ls.endpoint),
+				zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+				zap.Int64("ttl", respRC.TTL),
+			)
 			ls.aliveLeases.remove(leaseID)
 			return
 		}
 		// renew lease timestamp only if lease is present
-		plog.Debugf("keepLeaseAlive renew lease %v", leaseID)
+		ls.logger.Debug(
+			"keepLeaseAlive renewed a lease",
+			zap.String("endpoint", ls.endpoint),
+			zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+		)
 		ls.aliveLeases.update(leaseID, leaseRenewTime)
 	}
 }
@@ -358,7 +443,13 @@ func (ls *leaseStresser) randomlyDropLease(leaseID int64) (bool, error) {
 			return true, nil
 		}
 	}
-	plog.Debugf("randomlyDropLease error: (%v)", ls.ctx.Err())
+
+	ls.logger.Debug(
+		"randomlyDropLease error",
+		zap.String("endpoint", ls.endpoint),
+		zap.String("lease-id", fmt.Sprintf("%016x", leaseID)),
+		zap.Error(ls.ctx.Err()),
+	)
 	return false, ls.ctx.Err()
 }
 
@@ -367,16 +458,28 @@ func (ls *leaseStresser) Pause() {
 }
 
 func (ls *leaseStresser) Close() {
-	plog.Debugf("lease stresser %q is closing...", ls.endpoint)
+	ls.logger.Info(
+		"lease stresser is closing",
+		zap.String("endpoint", ls.endpoint),
+	)
 	ls.cancel()
 	ls.runWg.Wait()
 	ls.aliveWg.Wait()
 	ls.conn.Close()
-	plog.Infof("lease stresser %q is closed", ls.endpoint)
+	ls.logger.Info(
+		"lease stresser is closed",
+		zap.String("endpoint", ls.endpoint),
+	)
 }
 
 func (ls *leaseStresser) ModifiedKeys() int64 {
 	return atomic.LoadInt64(&ls.atomicModifiedKey)
 }
 
-func (ls *leaseStresser) Checker() Checker { return &leaseChecker{endpoint: ls.endpoint, ls: ls} }
+func (ls *leaseStresser) Checker() Checker {
+	return &leaseChecker{
+		logger:   ls.logger,
+		endpoint: ls.endpoint,
+		ls:       ls,
+	}
+}

+ 2 - 2
tools/functional-tester/etcd-tester/etcd_runner_stresser.go → tools/functional-tester/tester/stress_runner.go

@@ -1,4 +1,4 @@
-// Copyright 2017 The etcd Authors
+// Copyright 2018 The etcd Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package main
+package tester
 
 import (
 	"fmt"

+ 274 - 0
tools/functional-tester/tester/tester.go

@@ -0,0 +1,274 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import (
+	"fmt"
+	"os"
+	"time"
+
+	"go.uber.org/zap"
+)
+
+// compactQPS is rough number of compact requests per second.
+// Previous tests showed etcd can compact about 60,000 entries per second.
+const compactQPS = 50000
+
+// StartTester starts tester.
+func (clus *Cluster) StartTester() {
+	// TODO: upate status
+	clus.startStresser()
+
+	var preModifiedKey int64
+	for round := 0; round < int(clus.Tester.RoundLimit) || clus.Tester.RoundLimit == -1; round++ {
+		roundTotalCounter.Inc()
+
+		clus.rd = round
+		if err := clus.doRound(round); err != nil {
+			clus.logger.Warn(
+				"doRound failed; returning",
+				zap.Int("round", clus.rd),
+				zap.Int("case", clus.cs),
+				zap.Error(err),
+			)
+			if clus.cleanup() != nil {
+				return
+			}
+			// reset preModifiedKey after clean up
+			preModifiedKey = 0
+			continue
+		}
+		// -1 so that logPrefix doesn't print out 'case'
+		clus.cs = -1
+
+		revToCompact := max(0, clus.currentRevision-10000)
+		currentModifiedKey := clus.stresser.ModifiedKeys()
+		modifiedKey := currentModifiedKey - preModifiedKey
+		preModifiedKey = currentModifiedKey
+		timeout := 10 * time.Second
+		timeout += time.Duration(modifiedKey/compactQPS) * time.Second
+		clus.logger.Info(
+			"compacting",
+			zap.Int("round", clus.rd),
+			zap.Int("case", clus.cs),
+			zap.Duration("timeout", timeout),
+		)
+		if err := clus.compact(revToCompact, timeout); err != nil {
+			clus.logger.Warn(
+				"compact failed",
+				zap.Int("round", clus.rd),
+				zap.Int("case", clus.cs),
+				zap.Error(err),
+			)
+			if err = clus.cleanup(); err != nil {
+				clus.logger.Warn(
+					"cleanup failed",
+					zap.Int("round", clus.rd),
+					zap.Int("case", clus.cs),
+					zap.Error(err),
+				)
+				return
+			}
+			// reset preModifiedKey after clean up
+			preModifiedKey = 0
+		}
+		if round > 0 && round%500 == 0 { // every 500 rounds
+			if err := clus.defrag(); err != nil {
+				clus.logger.Warn(
+					"defrag failed; returning",
+					zap.Int("round", clus.rd),
+					zap.Int("case", clus.cs),
+					zap.Error(err),
+				)
+				clus.failed()
+				return
+			}
+		}
+	}
+
+	clus.logger.Info(
+		"functional-tester is finished",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+	)
+}
+
+func (clus *Cluster) doRound(round int) error {
+	for i, f := range clus.failures {
+		clus.cs = i
+
+		caseTotalCounter.WithLabelValues(f.Desc()).Inc()
+
+		if err := clus.WaitHealth(); err != nil {
+			return fmt.Errorf("wait full health error: %v", err)
+		}
+
+		clus.logger.Info(
+			"injecting failure",
+			zap.Int("round", clus.rd),
+			zap.Int("case", clus.cs),
+			zap.String("desc", f.Desc()),
+		)
+		if err := f.Inject(clus, round); err != nil {
+			return fmt.Errorf("injection error: %v", err)
+		}
+		clus.logger.Info(
+			"injected failure",
+			zap.Int("round", clus.rd),
+			zap.Int("case", clus.cs),
+			zap.String("desc", f.Desc()),
+		)
+
+		clus.logger.Info(
+			"recovering failure",
+			zap.Int("round", clus.rd),
+			zap.Int("case", clus.cs),
+			zap.String("desc", f.Desc()),
+		)
+		if err := f.Recover(clus, round); err != nil {
+			return fmt.Errorf("recovery error: %v", err)
+		}
+		clus.logger.Info(
+			"recovered failure",
+			zap.Int("round", clus.rd),
+			zap.Int("case", clus.cs),
+			zap.String("desc", f.Desc()),
+		)
+
+		clus.pauseStresser()
+
+		if err := clus.WaitHealth(); err != nil {
+			return fmt.Errorf("wait full health error: %v", err)
+		}
+		if err := clus.checkConsistency(); err != nil {
+			return fmt.Errorf("tt.checkConsistency error (%v)", err)
+		}
+
+		clus.logger.Info(
+			"success",
+			zap.Int("round", clus.rd),
+			zap.Int("case", clus.cs),
+			zap.String("desc", f.Desc()),
+		)
+	}
+	return nil
+}
+
+func (clus *Cluster) updateRevision() error {
+	revs, _, err := clus.getRevisionHash()
+	for _, rev := range revs {
+		clus.currentRevision = rev
+		break // just need get one of the current revisions
+	}
+
+	clus.logger.Info(
+		"updated current revision",
+		zap.Int64("current-revision", clus.currentRevision),
+	)
+	return err
+}
+
+func (clus *Cluster) compact(rev int64, timeout time.Duration) (err error) {
+	clus.pauseStresser()
+	defer func() {
+		if err == nil {
+			err = clus.startStresser()
+		}
+	}()
+
+	clus.logger.Info(
+		"compacting storage",
+		zap.Int64("current-revision", clus.currentRevision),
+		zap.Int64("compact-revision", rev),
+	)
+	if err = clus.compactKV(rev, timeout); err != nil {
+		return err
+	}
+	clus.logger.Info(
+		"compacted storage",
+		zap.Int64("current-revision", clus.currentRevision),
+		zap.Int64("compact-revision", rev),
+	)
+
+	clus.logger.Info(
+		"checking compaction",
+		zap.Int64("current-revision", clus.currentRevision),
+		zap.Int64("compact-revision", rev),
+	)
+	if err = clus.checkCompact(rev); err != nil {
+		clus.logger.Warn(
+			"checkCompact failed",
+			zap.Int64("current-revision", clus.currentRevision),
+			zap.Int64("compact-revision", rev),
+			zap.Error(err),
+		)
+		return err
+	}
+	clus.logger.Info(
+		"confirmed compaction",
+		zap.Int64("current-revision", clus.currentRevision),
+		zap.Int64("compact-revision", rev),
+	)
+
+	return nil
+}
+
+func (clus *Cluster) failed() {
+	if !clus.Tester.ExitOnFailure {
+		return
+	}
+
+	clus.logger.Info(
+		"exiting on failure",
+		zap.Int("round", clus.rd),
+		zap.Int("case", clus.cs),
+	)
+	clus.DestroyEtcdAgents()
+	os.Exit(2)
+}
+
+func (clus *Cluster) cleanup() error {
+	defer clus.failed()
+
+	roundFailedTotalCounter.Inc()
+	desc := "compact/defrag"
+	if clus.cs != -1 {
+		desc = clus.failures[clus.cs].Desc()
+	}
+	caseFailedTotalCounter.WithLabelValues(desc).Inc()
+
+	clus.closeStresser()
+	if err := clus.FailArchive(); err != nil {
+		clus.logger.Warn(
+			"Cleanup failed",
+			zap.Int("round", clus.rd),
+			zap.Int("case", clus.cs),
+			zap.Error(err),
+		)
+		return err
+	}
+	if err := clus.Restart(); err != nil {
+		clus.logger.Warn(
+			"Restart failed",
+			zap.Int("round", clus.rd),
+			zap.Int("case", clus.cs),
+			zap.Error(err),
+		)
+		return err
+	}
+
+	clus.updateStresserChecker()
+	return nil
+}

+ 30 - 2
tools/functional-tester/etcd-tester/util.go → tools/functional-tester/tester/utils.go

@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright 2018 The etcd Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,13 +12,33 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package main
+package tester
 
 import (
 	"fmt"
+	"math/rand"
+	"net"
+	"net/url"
 	"strings"
 )
 
+func isValidURL(u string) bool {
+	_, err := url.Parse(u)
+	return err == nil
+}
+
+func getPort(addr string) (port string, err error) {
+	urlAddr, err := url.Parse(addr)
+	if err != nil {
+		return "", err
+	}
+	_, port, err = net.SplitHostPort(urlAddr.Host)
+	if err != nil {
+		return "", err
+	}
+	return port, nil
+}
+
 func getSameValue(vals map[string]int64) bool {
 	var rv int64
 	for _, v := range vals {
@@ -49,3 +69,11 @@ func errsToError(errs []error) error {
 	}
 	return fmt.Errorf(strings.Join(stringArr, ", "))
 }
+
+func randBytes(size int) []byte {
+	data := make([]byte, size)
+	for i := 0; i < size; i++ {
+		data[i] = byte(int('a') + rand.Intn(26))
+	}
+	return data
+}

+ 19 - 0
vendor/go.uber.org/atomic/LICENSE.txt

@@ -0,0 +1,19 @@
+Copyright (c) 2016 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 309 - 0
vendor/go.uber.org/atomic/atomic.go

@@ -0,0 +1,309 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package atomic provides simple wrappers around numerics to enforce atomic
+// access.
+package atomic
+
+import (
+	"math"
+	"sync/atomic"
+)
+
+// Int32 is an atomic wrapper around an int32.
+type Int32 struct{ v int32 }
+
+// NewInt32 creates an Int32.
+func NewInt32(i int32) *Int32 {
+	return &Int32{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int32) Load() int32 {
+	return atomic.LoadInt32(&i.v)
+}
+
+// Add atomically adds to the wrapped int32 and returns the new value.
+func (i *Int32) Add(n int32) int32 {
+	return atomic.AddInt32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int32 and returns the new value.
+func (i *Int32) Sub(n int32) int32 {
+	return atomic.AddInt32(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int32 and returns the new value.
+func (i *Int32) Inc() int32 {
+	return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Int32) Dec() int32 {
+	return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int32) CAS(old, new int32) bool {
+	return atomic.CompareAndSwapInt32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int32) Store(n int32) {
+	atomic.StoreInt32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int32 and returns the old value.
+func (i *Int32) Swap(n int32) int32 {
+	return atomic.SwapInt32(&i.v, n)
+}
+
+// Int64 is an atomic wrapper around an int64.
+type Int64 struct{ v int64 }
+
+// NewInt64 creates an Int64.
+func NewInt64(i int64) *Int64 {
+	return &Int64{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int64) Load() int64 {
+	return atomic.LoadInt64(&i.v)
+}
+
+// Add atomically adds to the wrapped int64 and returns the new value.
+func (i *Int64) Add(n int64) int64 {
+	return atomic.AddInt64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int64 and returns the new value.
+func (i *Int64) Sub(n int64) int64 {
+	return atomic.AddInt64(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int64 and returns the new value.
+func (i *Int64) Inc() int64 {
+	return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int64 and returns the new value.
+func (i *Int64) Dec() int64 {
+	return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int64) CAS(old, new int64) bool {
+	return atomic.CompareAndSwapInt64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int64) Store(n int64) {
+	atomic.StoreInt64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int64 and returns the old value.
+func (i *Int64) Swap(n int64) int64 {
+	return atomic.SwapInt64(&i.v, n)
+}
+
+// Uint32 is an atomic wrapper around an uint32.
+type Uint32 struct{ v uint32 }
+
+// NewUint32 creates a Uint32.
+func NewUint32(i uint32) *Uint32 {
+	return &Uint32{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint32) Load() uint32 {
+	return atomic.LoadUint32(&i.v)
+}
+
+// Add atomically adds to the wrapped uint32 and returns the new value.
+func (i *Uint32) Add(n uint32) uint32 {
+	return atomic.AddUint32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint32 and returns the new value.
+func (i *Uint32) Sub(n uint32) uint32 {
+	return atomic.AddUint32(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint32 and returns the new value.
+func (i *Uint32) Inc() uint32 {
+	return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Uint32) Dec() uint32 {
+	return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint32) CAS(old, new uint32) bool {
+	return atomic.CompareAndSwapUint32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint32) Store(n uint32) {
+	atomic.StoreUint32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint32 and returns the old value.
+func (i *Uint32) Swap(n uint32) uint32 {
+	return atomic.SwapUint32(&i.v, n)
+}
+
+// Uint64 is an atomic wrapper around a uint64.
+type Uint64 struct{ v uint64 }
+
+// NewUint64 creates a Uint64.
+func NewUint64(i uint64) *Uint64 {
+	return &Uint64{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint64) Load() uint64 {
+	return atomic.LoadUint64(&i.v)
+}
+
+// Add atomically adds to the wrapped uint64 and returns the new value.
+func (i *Uint64) Add(n uint64) uint64 {
+	return atomic.AddUint64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint64 and returns the new value.
+func (i *Uint64) Sub(n uint64) uint64 {
+	return atomic.AddUint64(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint64 and returns the new value.
+func (i *Uint64) Inc() uint64 {
+	return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint64 and returns the new value.
+func (i *Uint64) Dec() uint64 {
+	return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint64) CAS(old, new uint64) bool {
+	return atomic.CompareAndSwapUint64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint64) Store(n uint64) {
+	atomic.StoreUint64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint64 and returns the old value.
+func (i *Uint64) Swap(n uint64) uint64 {
+	return atomic.SwapUint64(&i.v, n)
+}
+
+// Bool is an atomic Boolean.
+type Bool struct{ v uint32 }
+
+// NewBool creates a Bool.
+func NewBool(initial bool) *Bool {
+	return &Bool{boolToInt(initial)}
+}
+
+// Load atomically loads the Boolean.
+func (b *Bool) Load() bool {
+	return truthy(atomic.LoadUint32(&b.v))
+}
+
+// CAS is an atomic compare-and-swap.
+func (b *Bool) CAS(old, new bool) bool {
+	return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new))
+}
+
+// Store atomically stores the passed value.
+func (b *Bool) Store(new bool) {
+	atomic.StoreUint32(&b.v, boolToInt(new))
+}
+
+// Swap sets the given value and returns the previous value.
+func (b *Bool) Swap(new bool) bool {
+	return truthy(atomic.SwapUint32(&b.v, boolToInt(new)))
+}
+
+// Toggle atomically negates the Boolean and returns the previous value.
+func (b *Bool) Toggle() bool {
+	return truthy(atomic.AddUint32(&b.v, 1) - 1)
+}
+
+func truthy(n uint32) bool {
+	return n&1 == 1
+}
+
+func boolToInt(b bool) uint32 {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+// Float64 is an atomic wrapper around float64.
+type Float64 struct {
+	v uint64
+}
+
+// NewFloat64 creates a Float64.
+func NewFloat64(f float64) *Float64 {
+	return &Float64{math.Float64bits(f)}
+}
+
+// Load atomically loads the wrapped value.
+func (f *Float64) Load() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&f.v))
+}
+
+// Store atomically stores the passed value.
+func (f *Float64) Store(s float64) {
+	atomic.StoreUint64(&f.v, math.Float64bits(s))
+}
+
+// Add atomically adds to the wrapped float64 and returns the new value.
+func (f *Float64) Add(s float64) float64 {
+	for {
+		old := f.Load()
+		new := old + s
+		if f.CAS(old, new) {
+			return new
+		}
+	}
+}
+
+// Sub atomically subtracts from the wrapped float64 and returns the new value.
+func (f *Float64) Sub(s float64) float64 {
+	return f.Add(-s)
+}
+
+// CAS is an atomic compare-and-swap.
+func (f *Float64) CAS(old, new float64) bool {
+	return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new))
+}
+
+// Value shadows the type of the same name from sync/atomic
+// https://godoc.org/sync/atomic#Value
+type Value struct{ atomic.Value }

+ 49 - 0
vendor/go.uber.org/atomic/string.go

@@ -0,0 +1,49 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// String is an atomic type-safe wrapper around Value for strings.
+type String struct{ v Value }
+
+// NewString creates a String.
+func NewString(str string) *String {
+	s := &String{}
+	if str != "" {
+		s.Store(str)
+	}
+	return s
+}
+
+// Load atomically loads the wrapped string.
+func (s *String) Load() string {
+	v := s.v.Load()
+	if v == nil {
+		return ""
+	}
+	return v.(string)
+}
+
+// Store atomically stores the passed string.
+// Note: Converting the string to an interface{} to store in the Value
+// requires an allocation.
+func (s *String) Store(str string) {
+	s.v.Store(str)
+}

+ 19 - 0
vendor/go.uber.org/multierr/LICENSE.txt

@@ -0,0 +1,19 @@
+Copyright (c) 2017 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 401 - 0
vendor/go.uber.org/multierr/error.go

@@ -0,0 +1,401 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package multierr allows combining one or more errors together.
+//
+// Overview
+//
+// Errors can be combined with the use of the Combine function.
+//
+// 	multierr.Combine(
+// 		reader.Close(),
+// 		writer.Close(),
+// 		conn.Close(),
+// 	)
+//
+// If only two errors are being combined, the Append function may be used
+// instead.
+//
+// 	err = multierr.Combine(reader.Close(), writer.Close())
+//
+// This makes it possible to record resource cleanup failures from deferred
+// blocks with the help of named return values.
+//
+// 	func sendRequest(req Request) (err error) {
+// 		conn, err := openConnection()
+// 		if err != nil {
+// 			return err
+// 		}
+// 		defer func() {
+// 			err = multierr.Append(err, conn.Close())
+// 		}()
+// 		// ...
+// 	}
+//
+// The underlying list of errors for a returned error object may be retrieved
+// with the Errors function.
+//
+// 	errors := multierr.Errors(err)
+// 	if len(errors) > 0 {
+// 		fmt.Println("The following errors occurred:")
+// 	}
+//
+// Advanced Usage
+//
+// Errors returned by Combine and Append MAY implement the following
+// interface.
+//
+// 	type errorGroup interface {
+// 		// Returns a slice containing the underlying list of errors.
+// 		//
+// 		// This slice MUST NOT be modified by the caller.
+// 		Errors() []error
+// 	}
+//
+// Note that if you need access to list of errors behind a multierr error, you
+// should prefer using the Errors function. That said, if you need cheap
+// read-only access to the underlying errors slice, you can attempt to cast
+// the error to this interface. You MUST handle the failure case gracefully
+// because errors returned by Combine and Append are not guaranteed to
+// implement this interface.
+//
+// 	var errors []error
+// 	group, ok := err.(errorGroup)
+// 	if ok {
+// 		errors = group.Errors()
+// 	} else {
+// 		errors = []error{err}
+// 	}
+package multierr // import "go.uber.org/multierr"
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+	"sync"
+
+	"go.uber.org/atomic"
+)
+
+var (
+	// Separator for single-line error messages.
+	_singlelineSeparator = []byte("; ")
+
+	_newline = []byte("\n")
+
+	// Prefix for multi-line messages
+	_multilinePrefix = []byte("the following errors occurred:")
+
+	// Prefix for the first and following lines of an item in a list of
+	// multi-line error messages.
+	//
+	// For example, if a single item is:
+	//
+	// 	foo
+	// 	bar
+	//
+	// It will become,
+	//
+	// 	 -  foo
+	// 	    bar
+	_multilineSeparator = []byte("\n -  ")
+	_multilineIndent    = []byte("    ")
+)
+
+// _bufferPool is a pool of bytes.Buffers.
+var _bufferPool = sync.Pool{
+	New: func() interface{} {
+		return &bytes.Buffer{}
+	},
+}
+
+type errorGroup interface {
+	Errors() []error
+}
+
+// Errors returns a slice containing zero or more errors that the supplied
+// error is composed of. If the error is nil, the returned slice is empty.
+//
+// 	err := multierr.Append(r.Close(), w.Close())
+// 	errors := multierr.Errors(err)
+//
+// If the error is not composed of other errors, the returned slice contains
+// just the error that was passed in.
+//
+// Callers of this function are free to modify the returned slice.
+func Errors(err error) []error {
+	if err == nil {
+		return nil
+	}
+
+	// Note that we're casting to multiError, not errorGroup. Our contract is
+	// that returned errors MAY implement errorGroup. Errors, however, only
+	// has special behavior for multierr-specific error objects.
+	//
+	// This behavior can be expanded in the future but I think it's prudent to
+	// start with as little as possible in terms of contract and possibility
+	// of misuse.
+	eg, ok := err.(*multiError)
+	if !ok {
+		return []error{err}
+	}
+
+	errors := eg.Errors()
+	result := make([]error, len(errors))
+	copy(result, errors)
+	return result
+}
+
+// multiError is an error that holds one or more errors.
+//
+// An instance of this is guaranteed to be non-empty and flattened. That is,
+// none of the errors inside multiError are other multiErrors.
+//
+// multiError formats to a semi-colon delimited list of error messages with
+// %v and with a more readable multi-line format with %+v.
+type multiError struct {
+	copyNeeded atomic.Bool
+	errors     []error
+}
+
+var _ errorGroup = (*multiError)(nil)
+
+// Errors returns the list of underlying errors.
+//
+// This slice MUST NOT be modified.
+func (merr *multiError) Errors() []error {
+	if merr == nil {
+		return nil
+	}
+	return merr.errors
+}
+
+func (merr *multiError) Error() string {
+	if merr == nil {
+		return ""
+	}
+
+	buff := _bufferPool.Get().(*bytes.Buffer)
+	buff.Reset()
+
+	merr.writeSingleline(buff)
+
+	result := buff.String()
+	_bufferPool.Put(buff)
+	return result
+}
+
+func (merr *multiError) Format(f fmt.State, c rune) {
+	if c == 'v' && f.Flag('+') {
+		merr.writeMultiline(f)
+	} else {
+		merr.writeSingleline(f)
+	}
+}
+
+func (merr *multiError) writeSingleline(w io.Writer) {
+	first := true
+	for _, item := range merr.errors {
+		if first {
+			first = false
+		} else {
+			w.Write(_singlelineSeparator)
+		}
+		io.WriteString(w, item.Error())
+	}
+}
+
+func (merr *multiError) writeMultiline(w io.Writer) {
+	w.Write(_multilinePrefix)
+	for _, item := range merr.errors {
+		w.Write(_multilineSeparator)
+		writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item))
+	}
+}
+
+// Writes s to the writer with the given prefix added before each line after
+// the first.
+func writePrefixLine(w io.Writer, prefix []byte, s string) {
+	first := true
+	for len(s) > 0 {
+		if first {
+			first = false
+		} else {
+			w.Write(prefix)
+		}
+
+		idx := strings.IndexByte(s, '\n')
+		if idx < 0 {
+			idx = len(s) - 1
+		}
+
+		io.WriteString(w, s[:idx+1])
+		s = s[idx+1:]
+	}
+}
+
+type inspectResult struct {
+	// Number of top-level non-nil errors
+	Count int
+
+	// Total number of errors including multiErrors
+	Capacity int
+
+	// Index of the first non-nil error in the list. Value is meaningless if
+	// Count is zero.
+	FirstErrorIdx int
+
+	// Whether the list contains at least one multiError
+	ContainsMultiError bool
+}
+
+// Inspects the given slice of errors so that we can efficiently allocate
+// space for it.
+func inspect(errors []error) (res inspectResult) {
+	first := true
+	for i, err := range errors {
+		if err == nil {
+			continue
+		}
+
+		res.Count++
+		if first {
+			first = false
+			res.FirstErrorIdx = i
+		}
+
+		if merr, ok := err.(*multiError); ok {
+			res.Capacity += len(merr.errors)
+			res.ContainsMultiError = true
+		} else {
+			res.Capacity++
+		}
+	}
+	return
+}
+
+// fromSlice converts the given list of errors into a single error.
+func fromSlice(errors []error) error {
+	res := inspect(errors)
+	switch res.Count {
+	case 0:
+		return nil
+	case 1:
+		// only one non-nil entry
+		return errors[res.FirstErrorIdx]
+	case len(errors):
+		if !res.ContainsMultiError {
+			// already flat
+			return &multiError{errors: errors}
+		}
+	}
+
+	nonNilErrs := make([]error, 0, res.Capacity)
+	for _, err := range errors[res.FirstErrorIdx:] {
+		if err == nil {
+			continue
+		}
+
+		if nested, ok := err.(*multiError); ok {
+			nonNilErrs = append(nonNilErrs, nested.errors...)
+		} else {
+			nonNilErrs = append(nonNilErrs, err)
+		}
+	}
+
+	return &multiError{errors: nonNilErrs}
+}
+
+// Combine combines the passed errors into a single error.
+//
+// If zero arguments were passed or if all items are nil, a nil error is
+// returned.
+//
+// 	Combine(nil, nil)  // == nil
+//
+// If only a single error was passed, it is returned as-is.
+//
+// 	Combine(err)  // == err
+//
+// Combine skips over nil arguments so this function may be used to combine
+// together errors from operations that fail independently of each other.
+//
+// 	multierr.Combine(
+// 		reader.Close(),
+// 		writer.Close(),
+// 		pipe.Close(),
+// 	)
+//
+// If any of the passed errors is a multierr error, it will be flattened along
+// with the other errors.
+//
+// 	multierr.Combine(multierr.Combine(err1, err2), err3)
+// 	// is the same as
+// 	multierr.Combine(err1, err2, err3)
+//
+// The returned error formats into a readable multi-line error message if
+// formatted with %+v.
+//
+// 	fmt.Sprintf("%+v", multierr.Combine(err1, err2))
+func Combine(errors ...error) error {
+	return fromSlice(errors)
+}
+
+// Append appends the given errors together. Either value may be nil.
+//
+// This function is a specialization of Combine for the common case where
+// there are only two errors.
+//
+// 	err = multierr.Append(reader.Close(), writer.Close())
+//
+// The following pattern may also be used to record failure of deferred
+// operations without losing information about the original error.
+//
+// 	func doSomething(..) (err error) {
+// 		f := acquireResource()
+// 		defer func() {
+// 			err = multierr.Append(err, f.Close())
+// 		}()
+func Append(left error, right error) error {
+	switch {
+	case left == nil:
+		return right
+	case right == nil:
+		return left
+	}
+
+	if _, ok := right.(*multiError); !ok {
+		if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) {
+			// Common case where the error on the left is constantly being
+			// appended to.
+			errs := append(l.errors, right)
+			return &multiError{errors: errs}
+		} else if !ok {
+			// Both errors are single errors.
+			return &multiError{errors: []error{left, right}}
+		}
+	}
+
+	// Either right or both, left and right, are multiErrors. Rely on usual
+	// expensive logic.
+	errors := [2]error{left, right}
+	return fromSlice(errors[0:])
+}

+ 19 - 0
vendor/go.uber.org/zap/LICENSE.txt

@@ -0,0 +1,19 @@
+Copyright (c) 2016-2017 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 320 - 0
vendor/go.uber.org/zap/array.go

@@ -0,0 +1,320 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"time"
+
+	"go.uber.org/zap/zapcore"
+)
+
+// Array constructs a field with the given key and ArrayMarshaler. It provides
+// a flexible, but still type-safe and efficient, way to add array-like types
+// to the logging context. The struct's MarshalLogArray method is called lazily.
+func Array(key string, val zapcore.ArrayMarshaler) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val}
+}
+
+// Bools constructs a field that carries a slice of bools.
+func Bools(key string, bs []bool) zapcore.Field {
+	return Array(key, bools(bs))
+}
+
+// ByteStrings constructs a field that carries a slice of []byte, each of which
+// must be UTF-8 encoded text.
+func ByteStrings(key string, bss [][]byte) zapcore.Field {
+	return Array(key, byteStringsArray(bss))
+}
+
+// Complex128s constructs a field that carries a slice of complex numbers.
+func Complex128s(key string, nums []complex128) zapcore.Field {
+	return Array(key, complex128s(nums))
+}
+
+// Complex64s constructs a field that carries a slice of complex numbers.
+func Complex64s(key string, nums []complex64) zapcore.Field {
+	return Array(key, complex64s(nums))
+}
+
+// Durations constructs a field that carries a slice of time.Durations.
+func Durations(key string, ds []time.Duration) zapcore.Field {
+	return Array(key, durations(ds))
+}
+
+// Float64s constructs a field that carries a slice of floats.
+func Float64s(key string, nums []float64) zapcore.Field {
+	return Array(key, float64s(nums))
+}
+
+// Float32s constructs a field that carries a slice of floats.
+func Float32s(key string, nums []float32) zapcore.Field {
+	return Array(key, float32s(nums))
+}
+
+// Ints constructs a field that carries a slice of integers.
+func Ints(key string, nums []int) zapcore.Field {
+	return Array(key, ints(nums))
+}
+
+// Int64s constructs a field that carries a slice of integers.
+func Int64s(key string, nums []int64) zapcore.Field {
+	return Array(key, int64s(nums))
+}
+
+// Int32s constructs a field that carries a slice of integers.
+func Int32s(key string, nums []int32) zapcore.Field {
+	return Array(key, int32s(nums))
+}
+
+// Int16s constructs a field that carries a slice of integers.
+func Int16s(key string, nums []int16) zapcore.Field {
+	return Array(key, int16s(nums))
+}
+
+// Int8s constructs a field that carries a slice of integers.
+func Int8s(key string, nums []int8) zapcore.Field {
+	return Array(key, int8s(nums))
+}
+
+// Strings constructs a field that carries a slice of strings.
+func Strings(key string, ss []string) zapcore.Field {
+	return Array(key, stringArray(ss))
+}
+
+// Times constructs a field that carries a slice of time.Times.
+func Times(key string, ts []time.Time) zapcore.Field {
+	return Array(key, times(ts))
+}
+
+// Uints constructs a field that carries a slice of unsigned integers.
+func Uints(key string, nums []uint) zapcore.Field {
+	return Array(key, uints(nums))
+}
+
+// Uint64s constructs a field that carries a slice of unsigned integers.
+func Uint64s(key string, nums []uint64) zapcore.Field {
+	return Array(key, uint64s(nums))
+}
+
+// Uint32s constructs a field that carries a slice of unsigned integers.
+func Uint32s(key string, nums []uint32) zapcore.Field {
+	return Array(key, uint32s(nums))
+}
+
+// Uint16s constructs a field that carries a slice of unsigned integers.
+func Uint16s(key string, nums []uint16) zapcore.Field {
+	return Array(key, uint16s(nums))
+}
+
+// Uint8s constructs a field that carries a slice of unsigned integers.
+func Uint8s(key string, nums []uint8) zapcore.Field {
+	return Array(key, uint8s(nums))
+}
+
+// Uintptrs constructs a field that carries a slice of pointer addresses.
+func Uintptrs(key string, us []uintptr) zapcore.Field {
+	return Array(key, uintptrs(us))
+}
+
+// Errors constructs a field that carries a slice of errors.
+func Errors(key string, errs []error) zapcore.Field {
+	return Array(key, errArray(errs))
+}
+
+type bools []bool
+
+func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range bs {
+		arr.AppendBool(bs[i])
+	}
+	return nil
+}
+
+type byteStringsArray [][]byte
+
+func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range bss {
+		arr.AppendByteString(bss[i])
+	}
+	return nil
+}
+
+type complex128s []complex128
+
+func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendComplex128(nums[i])
+	}
+	return nil
+}
+
+type complex64s []complex64
+
+func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendComplex64(nums[i])
+	}
+	return nil
+}
+
+type durations []time.Duration
+
+func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range ds {
+		arr.AppendDuration(ds[i])
+	}
+	return nil
+}
+
+type float64s []float64
+
+func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendFloat64(nums[i])
+	}
+	return nil
+}
+
+type float32s []float32
+
+func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendFloat32(nums[i])
+	}
+	return nil
+}
+
+type ints []int
+
+func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendInt(nums[i])
+	}
+	return nil
+}
+
+type int64s []int64
+
+func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendInt64(nums[i])
+	}
+	return nil
+}
+
+type int32s []int32
+
+func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendInt32(nums[i])
+	}
+	return nil
+}
+
+type int16s []int16
+
+func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendInt16(nums[i])
+	}
+	return nil
+}
+
+type int8s []int8
+
+func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendInt8(nums[i])
+	}
+	return nil
+}
+
+type stringArray []string
+
+func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range ss {
+		arr.AppendString(ss[i])
+	}
+	return nil
+}
+
+type times []time.Time
+
+func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range ts {
+		arr.AppendTime(ts[i])
+	}
+	return nil
+}
+
+type uints []uint
+
+func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendUint(nums[i])
+	}
+	return nil
+}
+
+type uint64s []uint64
+
+func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendUint64(nums[i])
+	}
+	return nil
+}
+
+type uint32s []uint32
+
+func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendUint32(nums[i])
+	}
+	return nil
+}
+
+type uint16s []uint16
+
+func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendUint16(nums[i])
+	}
+	return nil
+}
+
+type uint8s []uint8
+
+func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendUint8(nums[i])
+	}
+	return nil
+}
+
+type uintptrs []uintptr
+
+func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range nums {
+		arr.AppendUintptr(nums[i])
+	}
+	return nil
+}

+ 106 - 0
vendor/go.uber.org/zap/buffer/buffer.go

@@ -0,0 +1,106 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package buffer provides a thin wrapper around a byte slice. Unlike the
+// standard library's bytes.Buffer, it supports a portion of the strconv
+// package's zero-allocation formatters.
+package buffer
+
+import "strconv"
+
+const _size = 1024 // by default, create 1 KiB buffers
+
+// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so
+// the only way to construct one is via a Pool.
+type Buffer struct {
+	bs   []byte
+	pool Pool
+}
+
+// AppendByte writes a single byte to the Buffer.
+func (b *Buffer) AppendByte(v byte) {
+	b.bs = append(b.bs, v)
+}
+
+// AppendString writes a string to the Buffer.
+func (b *Buffer) AppendString(s string) {
+	b.bs = append(b.bs, s...)
+}
+
+// AppendInt appends an integer to the underlying buffer (assuming base 10).
+func (b *Buffer) AppendInt(i int64) {
+	b.bs = strconv.AppendInt(b.bs, i, 10)
+}
+
+// AppendUint appends an unsigned integer to the underlying buffer (assuming
+// base 10).
+func (b *Buffer) AppendUint(i uint64) {
+	b.bs = strconv.AppendUint(b.bs, i, 10)
+}
+
+// AppendBool appends a bool to the underlying buffer.
+func (b *Buffer) AppendBool(v bool) {
+	b.bs = strconv.AppendBool(b.bs, v)
+}
+
+// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN
+// or +/- Inf.
+func (b *Buffer) AppendFloat(f float64, bitSize int) {
+	b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize)
+}
+
+// Len returns the length of the underlying byte slice.
+func (b *Buffer) Len() int {
+	return len(b.bs)
+}
+
+// Cap returns the capacity of the underlying byte slice.
+func (b *Buffer) Cap() int {
+	return cap(b.bs)
+}
+
+// Bytes returns a mutable reference to the underlying byte slice.
+func (b *Buffer) Bytes() []byte {
+	return b.bs
+}
+
+// String returns a string copy of the underlying byte slice.
+func (b *Buffer) String() string {
+	return string(b.bs)
+}
+
+// Reset resets the underlying byte slice. Subsequent writes re-use the slice's
+// backing array.
+func (b *Buffer) Reset() {
+	b.bs = b.bs[:0]
+}
+
+// Write implements io.Writer.
+func (b *Buffer) Write(bs []byte) (int, error) {
+	b.bs = append(b.bs, bs...)
+	return len(bs), nil
+}
+
+// Free returns the Buffer to its Pool.
+//
+// Callers must not retain references to the Buffer after calling Free.
+func (b *Buffer) Free() {
+	b.pool.put(b)
+}

+ 49 - 0
vendor/go.uber.org/zap/buffer/pool.go

@@ -0,0 +1,49 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package buffer
+
+import "sync"
+
+// A Pool is a type-safe wrapper around a sync.Pool.
+type Pool struct {
+	p *sync.Pool
+}
+
+// NewPool constructs a new Pool.
+func NewPool() Pool {
+	return Pool{p: &sync.Pool{
+		New: func() interface{} {
+			return &Buffer{bs: make([]byte, 0, _size)}
+		},
+	}}
+}
+
+// Get retrieves a Buffer from the pool, creating one if necessary.
+func (p Pool) Get() *Buffer {
+	buf := p.p.Get().(*Buffer)
+	buf.Reset()
+	buf.pool = p
+	return buf
+}
+
+func (p Pool) put(buf *Buffer) {
+	p.p.Put(buf)
+}

+ 243 - 0
vendor/go.uber.org/zap/config.go

@@ -0,0 +1,243 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"sort"
+	"time"
+
+	"go.uber.org/zap/zapcore"
+)
+
+// SamplingConfig sets a sampling strategy for the logger. Sampling caps the
+// global CPU and I/O load that logging puts on your process while attempting
+// to preserve a representative subset of your logs.
+//
+// Values configured here are per-second. See zapcore.NewSampler for details.
+type SamplingConfig struct {
+	Initial    int `json:"initial" yaml:"initial"`
+	Thereafter int `json:"thereafter" yaml:"thereafter"`
+}
+
+// Config offers a declarative way to construct a logger. It doesn't do
+// anything that can't be done with New, Options, and the various
+// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to
+// toggle common options.
+//
+// Note that Config intentionally supports only the most common options. More
+// unusual logging setups (logging to network connections or message queues,
+// splitting output between multiple files, etc.) are possible, but require
+// direct use of the zapcore package. For sample code, see the package-level
+// BasicConfiguration and AdvancedConfiguration examples.
+//
+// For an example showing runtime log level changes, see the documentation for
+// AtomicLevel.
+type Config struct {
+	// Level is the minimum enabled logging level. Note that this is a dynamic
+	// level, so calling Config.Level.SetLevel will atomically change the log
+	// level of all loggers descended from this config.
+	Level AtomicLevel `json:"level" yaml:"level"`
+	// Development puts the logger in development mode, which changes the
+	// behavior of DPanicLevel and takes stacktraces more liberally.
+	Development bool `json:"development" yaml:"development"`
+	// DisableCaller stops annotating logs with the calling function's file
+	// name and line number. By default, all logs are annotated.
+	DisableCaller bool `json:"disableCaller" yaml:"disableCaller"`
+	// DisableStacktrace completely disables automatic stacktrace capturing. By
+	// default, stacktraces are captured for WarnLevel and above logs in
+	// development and ErrorLevel and above in production.
+	DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"`
+	// Sampling sets a sampling policy. A nil SamplingConfig disables sampling.
+	Sampling *SamplingConfig `json:"sampling" yaml:"sampling"`
+	// Encoding sets the logger's encoding. Valid values are "json" and
+	// "console", as well as any third-party encodings registered via
+	// RegisterEncoder.
+	Encoding string `json:"encoding" yaml:"encoding"`
+	// EncoderConfig sets options for the chosen encoder. See
+	// zapcore.EncoderConfig for details.
+	EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"`
+	// OutputPaths is a list of paths to write logging output to. See Open for
+	// details.
+	OutputPaths []string `json:"outputPaths" yaml:"outputPaths"`
+	// ErrorOutputPaths is a list of paths to write internal logger errors to.
+	// The default is standard error.
+	//
+	// Note that this setting only affects internal errors; for sample code that
+	// sends error-level logs to a different location from info- and debug-level
+	// logs, see the package-level AdvancedConfiguration example.
+	ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"`
+	// InitialFields is a collection of fields to add to the root logger.
+	InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"`
+}
+
+// NewProductionEncoderConfig returns an opinionated EncoderConfig for
+// production environments.
+func NewProductionEncoderConfig() zapcore.EncoderConfig {
+	return zapcore.EncoderConfig{
+		TimeKey:        "ts",
+		LevelKey:       "level",
+		NameKey:        "logger",
+		CallerKey:      "caller",
+		MessageKey:     "msg",
+		StacktraceKey:  "stacktrace",
+		LineEnding:     zapcore.DefaultLineEnding,
+		EncodeLevel:    zapcore.LowercaseLevelEncoder,
+		EncodeTime:     zapcore.EpochTimeEncoder,
+		EncodeDuration: zapcore.SecondsDurationEncoder,
+		EncodeCaller:   zapcore.ShortCallerEncoder,
+	}
+}
+
+// NewProductionConfig is a reasonable production logging configuration.
+// Logging is enabled at InfoLevel and above.
+//
+// It uses a JSON encoder, writes to standard error, and enables sampling.
+// Stacktraces are automatically included on logs of ErrorLevel and above.
+func NewProductionConfig() Config {
+	return Config{
+		Level:       NewAtomicLevelAt(InfoLevel),
+		Development: false,
+		Sampling: &SamplingConfig{
+			Initial:    100,
+			Thereafter: 100,
+		},
+		Encoding:         "json",
+		EncoderConfig:    NewProductionEncoderConfig(),
+		OutputPaths:      []string{"stderr"},
+		ErrorOutputPaths: []string{"stderr"},
+	}
+}
+
+// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
+// development environments.
+func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
+	return zapcore.EncoderConfig{
+		// Keys can be anything except the empty string.
+		TimeKey:        "T",
+		LevelKey:       "L",
+		NameKey:        "N",
+		CallerKey:      "C",
+		MessageKey:     "M",
+		StacktraceKey:  "S",
+		LineEnding:     zapcore.DefaultLineEnding,
+		EncodeLevel:    zapcore.CapitalLevelEncoder,
+		EncodeTime:     zapcore.ISO8601TimeEncoder,
+		EncodeDuration: zapcore.StringDurationEncoder,
+		EncodeCaller:   zapcore.ShortCallerEncoder,
+	}
+}
+
+// NewDevelopmentConfig is a reasonable development logging configuration.
+// Logging is enabled at DebugLevel and above.
+//
+// It enables development mode (which makes DPanicLevel logs panic), uses a
+// console encoder, writes to standard error, and disables sampling.
+// Stacktraces are automatically included on logs of WarnLevel and above.
+func NewDevelopmentConfig() Config {
+	return Config{
+		Level:            NewAtomicLevelAt(DebugLevel),
+		Development:      true,
+		Encoding:         "console",
+		EncoderConfig:    NewDevelopmentEncoderConfig(),
+		OutputPaths:      []string{"stderr"},
+		ErrorOutputPaths: []string{"stderr"},
+	}
+}
+
+// Build constructs a logger from the Config and Options.
+func (cfg Config) Build(opts ...Option) (*Logger, error) {
+	enc, err := cfg.buildEncoder()
+	if err != nil {
+		return nil, err
+	}
+
+	sink, errSink, err := cfg.openSinks()
+	if err != nil {
+		return nil, err
+	}
+
+	log := New(
+		zapcore.NewCore(enc, sink, cfg.Level),
+		cfg.buildOptions(errSink)...,
+	)
+	if len(opts) > 0 {
+		log = log.WithOptions(opts...)
+	}
+	return log, nil
+}
+
+func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option {
+	opts := []Option{ErrorOutput(errSink)}
+
+	if cfg.Development {
+		opts = append(opts, Development())
+	}
+
+	if !cfg.DisableCaller {
+		opts = append(opts, AddCaller())
+	}
+
+	stackLevel := ErrorLevel
+	if cfg.Development {
+		stackLevel = WarnLevel
+	}
+	if !cfg.DisableStacktrace {
+		opts = append(opts, AddStacktrace(stackLevel))
+	}
+
+	if cfg.Sampling != nil {
+		opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core {
+			return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter))
+		}))
+	}
+
+	if len(cfg.InitialFields) > 0 {
+		fs := make([]zapcore.Field, 0, len(cfg.InitialFields))
+		keys := make([]string, 0, len(cfg.InitialFields))
+		for k := range cfg.InitialFields {
+			keys = append(keys, k)
+		}
+		sort.Strings(keys)
+		for _, k := range keys {
+			fs = append(fs, Any(k, cfg.InitialFields[k]))
+		}
+		opts = append(opts, Fields(fs...))
+	}
+
+	return opts
+}
+
+func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
+	sink, closeOut, err := Open(cfg.OutputPaths...)
+	if err != nil {
+		return nil, nil, err
+	}
+	errSink, _, err := Open(cfg.ErrorOutputPaths...)
+	if err != nil {
+		closeOut()
+		return nil, nil, err
+	}
+	return sink, errSink, nil
+}
+
+func (cfg Config) buildEncoder() (zapcore.Encoder, error) {
+	return newEncoder(cfg.Encoding, cfg.EncoderConfig)
+}

+ 113 - 0
vendor/go.uber.org/zap/doc.go

@@ -0,0 +1,113 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zap provides fast, structured, leveled logging.
+//
+// For applications that log in the hot path, reflection-based serialization
+// and string formatting are prohibitively expensive - they're CPU-intensive
+// and make many small allocations. Put differently, using json.Marshal and
+// fmt.Fprintf to log tons of interface{} makes your application slow.
+//
+// Zap takes a different approach. It includes a reflection-free,
+// zero-allocation JSON encoder, and the base Logger strives to avoid
+// serialization overhead and allocations wherever possible. By building the
+// high-level SugaredLogger on that foundation, zap lets users choose when
+// they need to count every allocation and when they'd prefer a more familiar,
+// loosely typed API.
+//
+// Choosing a Logger
+//
+// In contexts where performance is nice, but not critical, use the
+// SugaredLogger. It's 4-10x faster than other structured logging packages and
+// supports both structured and printf-style logging. Like log15 and go-kit,
+// the SugaredLogger's structured logging APIs are loosely typed and accept a
+// variadic number of key-value pairs. (For more advanced use cases, they also
+// accept strongly typed fields - see the SugaredLogger.With documentation for
+// details.)
+//  sugar := zap.NewExample().Sugar()
+//  defer sugar.Sync()
+//  sugar.Infow("failed to fetch URL",
+//    "url", "http://example.com",
+//    "attempt", 3,
+//    "backoff", time.Second,
+//  )
+//  sugar.Printf("failed to fetch URL: %s", "http://example.com")
+//
+// By default, loggers are unbuffered. However, since zap's low-level APIs
+// allow buffering, calling Sync before letting your process exit is a good
+// habit.
+//
+// In the rare contexts where every microsecond and every allocation matter,
+// use the Logger. It's even faster than the SugaredLogger and allocates far
+// less, but it only supports strongly-typed, structured logging.
+//  logger := zap.NewExample()
+//  defer logger.Sync()
+//  logger.Info("failed to fetch URL",
+//    zap.String("url", "http://example.com"),
+//    zap.Int("attempt", 3),
+//    zap.Duration("backoff", time.Second),
+//  )
+//
+// Choosing between the Logger and SugaredLogger doesn't need to be an
+// application-wide decision: converting between the two is simple and
+// inexpensive.
+//   logger := zap.NewExample()
+//   defer logger.Sync()
+//   sugar := logger.Sugar()
+//   plain := sugar.Desugar()
+//
+// Configuring Zap
+//
+// The simplest way to build a Logger is to use zap's opinionated presets:
+// NewExample, NewProduction, and NewDevelopment. These presets build a logger
+// with a single function call:
+//  logger, err := zap.NewProduction()
+//  if err != nil {
+//    log.Fatalf("can't initialize zap logger: %v", err)
+//  }
+//  defer logger.Sync()
+//
+// Presets are fine for small projects, but larger projects and organizations
+// naturally require a bit more customization. For most users, zap's Config
+// struct strikes the right balance between flexibility and convenience. See
+// the package-level BasicConfiguration example for sample code.
+//
+// More unusual configurations (splitting output between files, sending logs
+// to a message queue, etc.) are possible, but require direct use of
+// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
+// example for sample code.
+//
+// Extending Zap
+//
+// The zap package itself is a relatively thin wrapper around the interfaces
+// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
+// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an
+// exception aggregation service, like Sentry or Rollbar) typically requires
+// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core
+// interfaces. See the zapcore documentation for details.
+//
+// Similarly, package authors can use the high-performance Encoder and Core
+// implementations in the zapcore package to build their own loggers.
+//
+// Frequently Asked Questions
+//
+// An FAQ covering everything from installation errors to design decisions is
+// available at https://github.com/uber-go/zap/blob/master/FAQ.md.
+package zap // import "go.uber.org/zap"

+ 75 - 0
vendor/go.uber.org/zap/encoder.go

@@ -0,0 +1,75 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+
+	"go.uber.org/zap/zapcore"
+)
+
+var (
+	errNoEncoderNameSpecified = errors.New("no encoder name specified")
+
+	_encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){
+		"console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+			return zapcore.NewConsoleEncoder(encoderConfig), nil
+		},
+		"json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+			return zapcore.NewJSONEncoder(encoderConfig), nil
+		},
+	}
+	_encoderMutex sync.RWMutex
+)
+
+// RegisterEncoder registers an encoder constructor, which the Config struct
+// can then reference. By default, the "json" and "console" encoders are
+// registered.
+//
+// Attempting to register an encoder whose name is already taken returns an
+// error.
+func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error {
+	_encoderMutex.Lock()
+	defer _encoderMutex.Unlock()
+	if name == "" {
+		return errNoEncoderNameSpecified
+	}
+	if _, ok := _encoderNameToConstructor[name]; ok {
+		return fmt.Errorf("encoder already registered for name %q", name)
+	}
+	_encoderNameToConstructor[name] = constructor
+	return nil
+}
+
+func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+	_encoderMutex.RLock()
+	defer _encoderMutex.RUnlock()
+	if name == "" {
+		return nil, errNoEncoderNameSpecified
+	}
+	constructor, ok := _encoderNameToConstructor[name]
+	if !ok {
+		return nil, fmt.Errorf("no encoder registered for name %q", name)
+	}
+	return constructor(encoderConfig)
+}

+ 80 - 0
vendor/go.uber.org/zap/error.go

@@ -0,0 +1,80 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"sync"
+
+	"go.uber.org/zap/zapcore"
+)
+
+var _errArrayElemPool = sync.Pool{New: func() interface{} {
+	return &errArrayElem{}
+}}
+
+// Error is shorthand for the common idiom NamedError("error", err).
+func Error(err error) zapcore.Field {
+	return NamedError("error", err)
+}
+
+// NamedError constructs a field that lazily stores err.Error() under the
+// provided key. Errors which also implement fmt.Formatter (like those produced
+// by github.com/pkg/errors) will also have their verbose representation stored
+// under key+"Verbose". If passed a nil error, the field is a no-op.
+//
+// For the common case in which the key is simply "error", the Error function
+// is shorter and less repetitive.
+func NamedError(key string, err error) zapcore.Field {
+	if err == nil {
+		return Skip()
+	}
+	return zapcore.Field{Key: key, Type: zapcore.ErrorType, Interface: err}
+}
+
+type errArray []error
+
+func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range errs {
+		if errs[i] == nil {
+			continue
+		}
+		// To represent each error as an object with an "error" attribute and
+		// potentially an "errorVerbose" attribute, we need to wrap it in a
+		// type that implements LogObjectMarshaler. To prevent this from
+		// allocating, pool the wrapper type.
+		elem := _errArrayElemPool.Get().(*errArrayElem)
+		elem.error = errs[i]
+		arr.AppendObject(elem)
+		elem.error = nil
+		_errArrayElemPool.Put(elem)
+	}
+	return nil
+}
+
+type errArrayElem struct {
+	error
+}
+
+func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+	// Re-use the error field's logic, which supports non-standard error types.
+	Error(e.error).AddTo(enc)
+	return nil
+}

+ 306 - 0
vendor/go.uber.org/zap/field.go

@@ -0,0 +1,306 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"fmt"
+	"math"
+	"time"
+
+	"go.uber.org/zap/zapcore"
+)
+
+// Skip constructs a no-op field, which is often useful when handling invalid
+// inputs in other Field constructors.
+func Skip() zapcore.Field {
+	return zapcore.Field{Type: zapcore.SkipType}
+}
+
+// Binary constructs a field that carries an opaque binary blob.
+//
+// Binary data is serialized in an encoding-appropriate format. For example,
+// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text,
+// use ByteString.
+func Binary(key string, val []byte) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.BinaryType, Interface: val}
+}
+
+// Bool constructs a field that carries a bool.
+func Bool(key string, val bool) zapcore.Field {
+	var ival int64
+	if val {
+		ival = 1
+	}
+	return zapcore.Field{Key: key, Type: zapcore.BoolType, Integer: ival}
+}
+
+// ByteString constructs a field that carries UTF-8 encoded text as a []byte.
+// To log opaque binary blobs (which aren't necessarily valid UTF-8), use
+// Binary.
+func ByteString(key string, val []byte) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.ByteStringType, Interface: val}
+}
+
+// Complex128 constructs a field that carries a complex number. Unlike most
+// numeric fields, this costs an allocation (to convert the complex128 to
+// interface{}).
+func Complex128(key string, val complex128) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.Complex128Type, Interface: val}
+}
+
+// Complex64 constructs a field that carries a complex number. Unlike most
+// numeric fields, this costs an allocation (to convert the complex64 to
+// interface{}).
+func Complex64(key string, val complex64) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.Complex64Type, Interface: val}
+}
+
+// Float64 constructs a field that carries a float64. The way the
+// floating-point value is represented is encoder-dependent, so marshaling is
+// necessarily lazy.
+func Float64(key string, val float64) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))}
+}
+
+// Float32 constructs a field that carries a float32. The way the
+// floating-point value is represented is encoder-dependent, so marshaling is
+// necessarily lazy.
+func Float32(key string, val float32) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))}
+}
+
+// Int constructs a field with the given key and value.
+func Int(key string, val int) zapcore.Field {
+	return Int64(key, int64(val))
+}
+
+// Int64 constructs a field with the given key and value.
+func Int64(key string, val int64) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.Int64Type, Integer: val}
+}
+
+// Int32 constructs a field with the given key and value.
+func Int32(key string, val int32) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)}
+}
+
+// Int16 constructs a field with the given key and value.
+func Int16(key string, val int16) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)}
+}
+
+// Int8 constructs a field with the given key and value.
+func Int8(key string, val int8) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)}
+}
+
+// String constructs a field with the given key and value.
+func String(key string, val string) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.StringType, String: val}
+}
+
+// Uint constructs a field with the given key and value.
+func Uint(key string, val uint) zapcore.Field {
+	return Uint64(key, uint64(val))
+}
+
+// Uint64 constructs a field with the given key and value.
+func Uint64(key string, val uint64) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)}
+}
+
+// Uint32 constructs a field with the given key and value.
+func Uint32(key string, val uint32) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)}
+}
+
+// Uint16 constructs a field with the given key and value.
+func Uint16(key string, val uint16) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)}
+}
+
+// Uint8 constructs a field with the given key and value.
+func Uint8(key string, val uint8) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)}
+}
+
+// Uintptr constructs a field with the given key and value.
+func Uintptr(key string, val uintptr) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)}
+}
+
+// Reflect constructs a field with the given key and an arbitrary object. It uses
+// an encoding-appropriate, reflection-based function to lazily serialize nearly
+// any object into the logging context, but it's relatively slow and
+// allocation-heavy. Outside tests, Any is always a better choice.
+//
+// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect
+// includes the error message in the final log output.
+func Reflect(key string, val interface{}) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.ReflectType, Interface: val}
+}
+
+// Namespace creates a named, isolated scope within the logger's context. All
+// subsequent fields will be added to the new namespace.
+//
+// This helps prevent key collisions when injecting loggers into sub-components
+// or third-party libraries.
+func Namespace(key string) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.NamespaceType}
+}
+
+// Stringer constructs a field with the given key and the output of the value's
+// String method. The Stringer's String method is called lazily.
+func Stringer(key string, val fmt.Stringer) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.StringerType, Interface: val}
+}
+
+// Time constructs a zapcore.Field with the given key and value. The encoder
+// controls how the time is serialized.
+func Time(key string, val time.Time) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()}
+}
+
+// Stack constructs a field that stores a stacktrace of the current goroutine
+// under provided key. Keep in mind that taking a stacktrace is eager and
+// expensive (relatively speaking); this function both makes an allocation and
+// takes about two microseconds.
+func Stack(key string) zapcore.Field {
+	// Returning the stacktrace as a string costs an allocation, but saves us
+	// from expanding the zapcore.Field union struct to include a byte slice. Since
+	// taking a stacktrace is already so expensive (~10us), the extra allocation
+	// is okay.
+	return String(key, takeStacktrace())
+}
+
+// Duration constructs a field with the given key and value. The encoder
+// controls how the duration is serialized.
+func Duration(key string, val time.Duration) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)}
+}
+
+// Object constructs a field with the given key and ObjectMarshaler. It
+// provides a flexible, but still type-safe and efficient, way to add map- or
+// struct-like user-defined types to the logging context. The struct's
+// MarshalLogObject method is called lazily.
+func Object(key string, val zapcore.ObjectMarshaler) zapcore.Field {
+	return zapcore.Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val}
+}
+
+// Any takes a key and an arbitrary value and chooses the best way to represent
+// them as a field, falling back to a reflection-based approach only if
+// necessary.
+//
+// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between
+// them. To minimize suprise, []byte values are treated as binary blobs, byte
+// values are treated as uint8, and runes are always treated as integers.
+func Any(key string, value interface{}) zapcore.Field {
+	switch val := value.(type) {
+	case zapcore.ObjectMarshaler:
+		return Object(key, val)
+	case zapcore.ArrayMarshaler:
+		return Array(key, val)
+	case bool:
+		return Bool(key, val)
+	case []bool:
+		return Bools(key, val)
+	case complex128:
+		return Complex128(key, val)
+	case []complex128:
+		return Complex128s(key, val)
+	case complex64:
+		return Complex64(key, val)
+	case []complex64:
+		return Complex64s(key, val)
+	case float64:
+		return Float64(key, val)
+	case []float64:
+		return Float64s(key, val)
+	case float32:
+		return Float32(key, val)
+	case []float32:
+		return Float32s(key, val)
+	case int:
+		return Int(key, val)
+	case []int:
+		return Ints(key, val)
+	case int64:
+		return Int64(key, val)
+	case []int64:
+		return Int64s(key, val)
+	case int32:
+		return Int32(key, val)
+	case []int32:
+		return Int32s(key, val)
+	case int16:
+		return Int16(key, val)
+	case []int16:
+		return Int16s(key, val)
+	case int8:
+		return Int8(key, val)
+	case []int8:
+		return Int8s(key, val)
+	case string:
+		return String(key, val)
+	case []string:
+		return Strings(key, val)
+	case uint:
+		return Uint(key, val)
+	case []uint:
+		return Uints(key, val)
+	case uint64:
+		return Uint64(key, val)
+	case []uint64:
+		return Uint64s(key, val)
+	case uint32:
+		return Uint32(key, val)
+	case []uint32:
+		return Uint32s(key, val)
+	case uint16:
+		return Uint16(key, val)
+	case []uint16:
+		return Uint16s(key, val)
+	case uint8:
+		return Uint8(key, val)
+	case []byte:
+		return Binary(key, val)
+	case uintptr:
+		return Uintptr(key, val)
+	case []uintptr:
+		return Uintptrs(key, val)
+	case time.Time:
+		return Time(key, val)
+	case []time.Time:
+		return Times(key, val)
+	case time.Duration:
+		return Duration(key, val)
+	case []time.Duration:
+		return Durations(key, val)
+	case error:
+		return NamedError(key, val)
+	case []error:
+		return Errors(key, val)
+	case fmt.Stringer:
+		return Stringer(key, val)
+	default:
+		return Reflect(key, val)
+	}
+}

+ 39 - 0
vendor/go.uber.org/zap/flag.go

@@ -0,0 +1,39 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"flag"
+
+	"go.uber.org/zap/zapcore"
+)
+
+// LevelFlag uses the standard library's flag.Var to declare a global flag
+// with the specified name, default, and usage guidance. The returned value is
+// a pointer to the value of the flag.
+//
+// If you don't want to use the flag package's global state, you can use any
+// non-nil *Level as a flag.Value with your own *flag.FlagSet.
+func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level {
+	lvl := defaultLevel
+	flag.Var(&lvl, name, usage)
+	return &lvl
+}

+ 139 - 0
vendor/go.uber.org/zap/global.go

@@ -0,0 +1,139 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"os"
+	"sync"
+
+	"go.uber.org/zap/zapcore"
+)
+
+const (
+	_stdLogDefaultDepth = 2
+	_loggerWriterDepth  = 2
+)
+
+var (
+	_globalMu sync.RWMutex
+	_globalL  = NewNop()
+	_globalS  = _globalL.Sugar()
+)
+
+// L returns the global Logger, which can be reconfigured with ReplaceGlobals.
+// It's safe for concurrent use.
+func L() *Logger {
+	_globalMu.RLock()
+	l := _globalL
+	_globalMu.RUnlock()
+	return l
+}
+
+// S returns the global SugaredLogger, which can be reconfigured with
+// ReplaceGlobals. It's safe for concurrent use.
+func S() *SugaredLogger {
+	_globalMu.RLock()
+	s := _globalS
+	_globalMu.RUnlock()
+	return s
+}
+
+// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a
+// function to restore the original values. It's safe for concurrent use.
+func ReplaceGlobals(logger *Logger) func() {
+	_globalMu.Lock()
+	prev := _globalL
+	_globalL = logger
+	_globalS = logger.Sugar()
+	_globalMu.Unlock()
+	return func() { ReplaceGlobals(prev) }
+}
+
+// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at
+// InfoLevel. To redirect the standard library's package-global logging
+// functions, use RedirectStdLog instead.
+func NewStdLog(l *Logger) *log.Logger {
+	logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+	f := logger.Info
+	return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */)
+}
+
+// NewStdLogAt returns *log.Logger which writes to supplied zap logger at
+// required level.
+func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) {
+	logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+	var logFunc func(string, ...zapcore.Field)
+	switch level {
+	case DebugLevel:
+		logFunc = logger.Debug
+	case InfoLevel:
+		logFunc = logger.Info
+	case WarnLevel:
+		logFunc = logger.Warn
+	case ErrorLevel:
+		logFunc = logger.Error
+	case DPanicLevel:
+		logFunc = logger.DPanic
+	case PanicLevel:
+		logFunc = logger.Panic
+	case FatalLevel:
+		logFunc = logger.Fatal
+	default:
+		return nil, fmt.Errorf("unrecognized level: %q", level)
+	}
+	return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil
+}
+
+// RedirectStdLog redirects output from the standard library's package-global
+// logger to the supplied logger at InfoLevel. Since zap already handles caller
+// annotations, timestamps, etc., it automatically disables the standard
+// library's annotations and prefixing.
+//
+// It returns a function to restore the original prefix and flags and reset the
+// standard library's output to os.Stdout.
+func RedirectStdLog(l *Logger) func() {
+	flags := log.Flags()
+	prefix := log.Prefix()
+	log.SetFlags(0)
+	log.SetPrefix("")
+	logFunc := l.WithOptions(
+		AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth),
+	).Info
+	log.SetOutput(&loggerWriter{logFunc})
+	return func() {
+		log.SetFlags(flags)
+		log.SetPrefix(prefix)
+		log.SetOutput(os.Stderr)
+	}
+}
+
+type loggerWriter struct {
+	logFunc func(msg string, fields ...zapcore.Field)
+}
+
+func (l *loggerWriter) Write(p []byte) (int, error) {
+	p = bytes.TrimSpace(p)
+	l.logFunc(string(p))
+	return len(p), nil
+}

+ 81 - 0
vendor/go.uber.org/zap/http_handler.go

@@ -0,0 +1,81 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"go.uber.org/zap/zapcore"
+)
+
+// ServeHTTP is a simple JSON endpoint that can report on or change the current
+// logging level.
+//
+// GET requests return a JSON description of the current logging level. PUT
+// requests change the logging level and expect a payload like:
+//   {"level":"info"}
+//
+// It's perfectly safe to change the logging level while a program is running.
+func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	type errorResponse struct {
+		Error string `json:"error"`
+	}
+	type payload struct {
+		Level *zapcore.Level `json:"level"`
+	}
+
+	enc := json.NewEncoder(w)
+
+	switch r.Method {
+
+	case "GET":
+		current := lvl.Level()
+		enc.Encode(payload{Level: &current})
+
+	case "PUT":
+		var req payload
+
+		if errmess := func() string {
+			if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+				return fmt.Sprintf("Request body must be well-formed JSON: %v", err)
+			}
+			if req.Level == nil {
+				return "Must specify a logging level."
+			}
+			return ""
+		}(); errmess != "" {
+			w.WriteHeader(http.StatusBadRequest)
+			enc.Encode(errorResponse{Error: errmess})
+			return
+		}
+
+		lvl.SetLevel(*req.Level)
+		enc.Encode(req)
+
+	default:
+		w.WriteHeader(http.StatusMethodNotAllowed)
+		enc.Encode(errorResponse{
+			Error: "Only GET and PUT are supported.",
+		})
+	}
+}

+ 31 - 0
vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go

@@ -0,0 +1,31 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package bufferpool houses zap's shared internal buffer pool. Third-party
+// packages can recreate the same functionality with buffers.NewPool.
+package bufferpool
+
+import "go.uber.org/zap/buffer"
+
+var (
+	_pool = buffer.NewPool()
+	// Get retrieves a buffer from the pool, creating one if necessary.
+	Get = _pool.Get
+)

+ 44 - 0
vendor/go.uber.org/zap/internal/color/color.go

@@ -0,0 +1,44 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package color adds coloring functionality for TTY output.
+package color
+
+import "fmt"
+
+// Foreground colors.
+const (
+	Black Color = iota + 30
+	Red
+	Green
+	Yellow
+	Blue
+	Magenta
+	Cyan
+	White
+)
+
+// Color represents a text color.
+type Color uint8
+
+// Add adds the coloring to the given string.
+func (c Color) Add(s string) string {
+	return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s)
+}

+ 64 - 0
vendor/go.uber.org/zap/internal/exit/exit.go

@@ -0,0 +1,64 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package exit provides stubs so that unit tests can exercise code that calls
+// os.Exit(1).
+package exit
+
+import "os"
+
+var real = func() { os.Exit(1) }
+
+// Exit normally terminates the process by calling os.Exit(1). If the package
+// is stubbed, it instead records a call in the testing spy.
+func Exit() {
+	real()
+}
+
+// A StubbedExit is a testing fake for os.Exit.
+type StubbedExit struct {
+	Exited bool
+	prev   func()
+}
+
+// Stub substitutes a fake for the call to os.Exit(1).
+func Stub() *StubbedExit {
+	s := &StubbedExit{prev: real}
+	real = s.exit
+	return s
+}
+
+// WithStub runs the supplied function with Exit stubbed. It returns the stub
+// used, so that users can test whether the process would have crashed.
+func WithStub(f func()) *StubbedExit {
+	s := Stub()
+	defer s.Unstub()
+	f()
+	return s
+}
+
+// Unstub restores the previous exit function.
+func (se *StubbedExit) Unstub() {
+	real = se.prev
+}
+
+func (se *StubbedExit) exit() {
+	se.Exited = true
+}

+ 132 - 0
vendor/go.uber.org/zap/level.go

@@ -0,0 +1,132 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"go.uber.org/atomic"
+	"go.uber.org/zap/zapcore"
+)
+
+const (
+	// DebugLevel logs are typically voluminous, and are usually disabled in
+	// production.
+	DebugLevel = zapcore.DebugLevel
+	// InfoLevel is the default logging priority.
+	InfoLevel = zapcore.InfoLevel
+	// WarnLevel logs are more important than Info, but don't need individual
+	// human review.
+	WarnLevel = zapcore.WarnLevel
+	// ErrorLevel logs are high-priority. If an application is running smoothly,
+	// it shouldn't generate any error-level logs.
+	ErrorLevel = zapcore.ErrorLevel
+	// DPanicLevel logs are particularly important errors. In development the
+	// logger panics after writing the message.
+	DPanicLevel = zapcore.DPanicLevel
+	// PanicLevel logs a message, then panics.
+	PanicLevel = zapcore.PanicLevel
+	// FatalLevel logs a message, then calls os.Exit(1).
+	FatalLevel = zapcore.FatalLevel
+)
+
+// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with
+// an anonymous function.
+//
+// It's particularly useful when splitting log output between different
+// outputs (e.g., standard error and standard out). For sample code, see the
+// package-level AdvancedConfiguration example.
+type LevelEnablerFunc func(zapcore.Level) bool
+
+// Enabled calls the wrapped function.
+func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) }
+
+// An AtomicLevel is an atomically changeable, dynamic logging level. It lets
+// you safely change the log level of a tree of loggers (the root logger and
+// any children created by adding context) at runtime.
+//
+// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to
+// alter its level.
+//
+// AtomicLevels must be created with the NewAtomicLevel constructor to allocate
+// their internal atomic pointer.
+type AtomicLevel struct {
+	l *atomic.Int32
+}
+
+// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
+// enabled.
+func NewAtomicLevel() AtomicLevel {
+	return AtomicLevel{
+		l: atomic.NewInt32(int32(InfoLevel)),
+	}
+}
+
+// NewAtomicLevelAt is a convienence function that creates an AtomicLevel
+// and then calls SetLevel with the given level.
+func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
+	a := NewAtomicLevel()
+	a.SetLevel(l)
+	return a
+}
+
+// Enabled implements the zapcore.LevelEnabler interface, which allows the
+// AtomicLevel to be used in place of traditional static levels.
+func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
+	return lvl.Level().Enabled(l)
+}
+
+// Level returns the minimum enabled log level.
+func (lvl AtomicLevel) Level() zapcore.Level {
+	return zapcore.Level(int8(lvl.l.Load()))
+}
+
+// SetLevel alters the logging level.
+func (lvl AtomicLevel) SetLevel(l zapcore.Level) {
+	lvl.l.Store(int32(l))
+}
+
+// String returns the string representation of the underlying Level.
+func (lvl AtomicLevel) String() string {
+	return lvl.Level().String()
+}
+
+// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text
+// representations as the static zapcore.Levels ("debug", "info", "warn",
+// "error", "dpanic", "panic", and "fatal").
+func (lvl *AtomicLevel) UnmarshalText(text []byte) error {
+	if lvl.l == nil {
+		lvl.l = &atomic.Int32{}
+	}
+
+	var l zapcore.Level
+	if err := l.UnmarshalText(text); err != nil {
+		return err
+	}
+
+	lvl.SetLevel(l)
+	return nil
+}
+
+// MarshalText marshals the AtomicLevel to a byte slice. It uses the same
+// text representation as the static zapcore.Levels ("debug", "info", "warn",
+// "error", "dpanic", "panic", and "fatal").
+func (lvl AtomicLevel) MarshalText() (text []byte, err error) {
+	return lvl.Level().MarshalText()
+}

+ 305 - 0
vendor/go.uber.org/zap/logger.go

@@ -0,0 +1,305 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"runtime"
+	"strings"
+	"time"
+
+	"go.uber.org/zap/zapcore"
+)
+
+// A Logger provides fast, leveled, structured logging. All methods are safe
+// for concurrent use.
+//
+// The Logger is designed for contexts in which every microsecond and every
+// allocation matters, so its API intentionally favors performance and type
+// safety over brevity. For most applications, the SugaredLogger strikes a
+// better balance between performance and ergonomics.
+type Logger struct {
+	core zapcore.Core
+
+	development bool
+	name        string
+	errorOutput zapcore.WriteSyncer
+
+	addCaller bool
+	addStack  zapcore.LevelEnabler
+
+	callerSkip int
+}
+
+// New constructs a new Logger from the provided zapcore.Core and Options. If
+// the passed zapcore.Core is nil, it falls back to using a no-op
+// implementation.
+//
+// This is the most flexible way to construct a Logger, but also the most
+// verbose. For typical use cases, the highly-opinionated presets
+// (NewProduction, NewDevelopment, and NewExample) or the Config struct are
+// more convenient.
+//
+// For sample code, see the package-level AdvancedConfiguration example.
+func New(core zapcore.Core, options ...Option) *Logger {
+	if core == nil {
+		return NewNop()
+	}
+	log := &Logger{
+		core:        core,
+		errorOutput: zapcore.Lock(os.Stderr),
+		addStack:    zapcore.FatalLevel + 1,
+	}
+	return log.WithOptions(options...)
+}
+
+// NewNop returns a no-op Logger. It never writes out logs or internal errors,
+// and it never runs user-defined hooks.
+//
+// Using WithOptions to replace the Core or error output of a no-op Logger can
+// re-enable logging.
+func NewNop() *Logger {
+	return &Logger{
+		core:        zapcore.NewNopCore(),
+		errorOutput: zapcore.AddSync(ioutil.Discard),
+		addStack:    zapcore.FatalLevel + 1,
+	}
+}
+
+// NewProduction builds a sensible production Logger that writes InfoLevel and
+// above logs to standard error as JSON.
+//
+// It's a shortcut for NewProductionConfig().Build(...Option).
+func NewProduction(options ...Option) (*Logger, error) {
+	return NewProductionConfig().Build(options...)
+}
+
+// NewDevelopment builds a development Logger that writes DebugLevel and above
+// logs to standard error in a human-friendly format.
+//
+// It's a shortcut for NewDevelopmentConfig().Build(...Option).
+func NewDevelopment(options ...Option) (*Logger, error) {
+	return NewDevelopmentConfig().Build(options...)
+}
+
+// NewExample builds a Logger that's designed for use in zap's testable
+// examples. It writes DebugLevel and above logs to standard out as JSON, but
+// omits the timestamp and calling function to keep example output
+// short and deterministic.
+func NewExample(options ...Option) *Logger {
+	encoderCfg := zapcore.EncoderConfig{
+		MessageKey:     "msg",
+		LevelKey:       "level",
+		NameKey:        "logger",
+		EncodeLevel:    zapcore.LowercaseLevelEncoder,
+		EncodeTime:     zapcore.ISO8601TimeEncoder,
+		EncodeDuration: zapcore.StringDurationEncoder,
+	}
+	core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel)
+	return New(core).WithOptions(options...)
+}
+
+// Sugar wraps the Logger to provide a more ergonomic, but slightly slower,
+// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a
+// single application to use both Loggers and SugaredLoggers, converting
+// between them on the boundaries of performance-sensitive code.
+func (log *Logger) Sugar() *SugaredLogger {
+	core := log.clone()
+	core.callerSkip += 2
+	return &SugaredLogger{core}
+}
+
+// Named adds a new path segment to the logger's name. Segments are joined by
+// periods. By default, Loggers are unnamed.
+func (log *Logger) Named(s string) *Logger {
+	if s == "" {
+		return log
+	}
+	l := log.clone()
+	if log.name == "" {
+		l.name = s
+	} else {
+		l.name = strings.Join([]string{l.name, s}, ".")
+	}
+	return l
+}
+
+// WithOptions clones the current Logger, applies the supplied Options, and
+// returns the resulting Logger. It's safe to use concurrently.
+func (log *Logger) WithOptions(opts ...Option) *Logger {
+	c := log.clone()
+	for _, opt := range opts {
+		opt.apply(c)
+	}
+	return c
+}
+
+// With creates a child logger and adds structured context to it. Fields added
+// to the child don't affect the parent, and vice versa.
+func (log *Logger) With(fields ...zapcore.Field) *Logger {
+	if len(fields) == 0 {
+		return log
+	}
+	l := log.clone()
+	l.core = l.core.With(fields)
+	return l
+}
+
+// Check returns a CheckedEntry if logging a message at the specified level
+// is enabled. It's a completely optional optimization; in high-performance
+// applications, Check can help avoid allocating a slice to hold fields.
+func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
+	return log.check(lvl, msg)
+}
+
+// Debug logs a message at DebugLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Debug(msg string, fields ...zapcore.Field) {
+	if ce := log.check(DebugLevel, msg); ce != nil {
+		ce.Write(fields...)
+	}
+}
+
+// Info logs a message at InfoLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Info(msg string, fields ...zapcore.Field) {
+	if ce := log.check(InfoLevel, msg); ce != nil {
+		ce.Write(fields...)
+	}
+}
+
+// Warn logs a message at WarnLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Warn(msg string, fields ...zapcore.Field) {
+	if ce := log.check(WarnLevel, msg); ce != nil {
+		ce.Write(fields...)
+	}
+}
+
+// Error logs a message at ErrorLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Error(msg string, fields ...zapcore.Field) {
+	if ce := log.check(ErrorLevel, msg); ce != nil {
+		ce.Write(fields...)
+	}
+}
+
+// DPanic logs a message at DPanicLevel. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+//
+// If the logger is in development mode, it then panics (DPanic means
+// "development panic"). This is useful for catching errors that are
+// recoverable, but shouldn't ever happen.
+func (log *Logger) DPanic(msg string, fields ...zapcore.Field) {
+	if ce := log.check(DPanicLevel, msg); ce != nil {
+		ce.Write(fields...)
+	}
+}
+
+// Panic logs a message at PanicLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+//
+// The logger then panics, even if logging at PanicLevel is disabled.
+func (log *Logger) Panic(msg string, fields ...zapcore.Field) {
+	if ce := log.check(PanicLevel, msg); ce != nil {
+		ce.Write(fields...)
+	}
+}
+
+// Fatal logs a message at FatalLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+//
+// The logger then calls os.Exit(1), even if logging at FatalLevel is
+// disabled.
+func (log *Logger) Fatal(msg string, fields ...zapcore.Field) {
+	if ce := log.check(FatalLevel, msg); ce != nil {
+		ce.Write(fields...)
+	}
+}
+
+// Sync calls the underlying Core's Sync method, flushing any buffered log
+// entries. Applications should take care to call Sync before exiting.
+func (log *Logger) Sync() error {
+	return log.core.Sync()
+}
+
+// Core returns the Logger's underlying zapcore.Core.
+func (log *Logger) Core() zapcore.Core {
+	return log.core
+}
+
+func (log *Logger) clone() *Logger {
+	copy := *log
+	return &copy
+}
+
+func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
+	// check must always be called directly by a method in the Logger interface
+	// (e.g., Check, Info, Fatal).
+	const callerSkipOffset = 2
+
+	// Create basic checked entry thru the core; this will be non-nil if the
+	// log message will actually be written somewhere.
+	ent := zapcore.Entry{
+		LoggerName: log.name,
+		Time:       time.Now(),
+		Level:      lvl,
+		Message:    msg,
+	}
+	ce := log.core.Check(ent, nil)
+	willWrite := ce != nil
+
+	// Set up any required terminal behavior.
+	switch ent.Level {
+	case zapcore.PanicLevel:
+		ce = ce.Should(ent, zapcore.WriteThenPanic)
+	case zapcore.FatalLevel:
+		ce = ce.Should(ent, zapcore.WriteThenFatal)
+	case zapcore.DPanicLevel:
+		if log.development {
+			ce = ce.Should(ent, zapcore.WriteThenPanic)
+		}
+	}
+
+	// Only do further annotation if we're going to write this message; checked
+	// entries that exist only for terminal behavior don't benefit from
+	// annotation.
+	if !willWrite {
+		return ce
+	}
+
+	// Thread the error output through to the CheckedEntry.
+	ce.ErrorOutput = log.errorOutput
+	if log.addCaller {
+		ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset))
+		if !ce.Entry.Caller.Defined {
+			fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC())
+			log.errorOutput.Sync()
+		}
+	}
+	if log.addStack.Enabled(ce.Entry.Level) {
+		ce.Entry.Stack = Stack("").String
+	}
+
+	return ce
+}

+ 109 - 0
vendor/go.uber.org/zap/options.go

@@ -0,0 +1,109 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import "go.uber.org/zap/zapcore"
+
+// An Option configures a Logger.
+type Option interface {
+	apply(*Logger)
+}
+
+// optionFunc wraps a func so it satisfies the Option interface.
+type optionFunc func(*Logger)
+
+func (f optionFunc) apply(log *Logger) {
+	f(log)
+}
+
+// WrapCore wraps or replaces the Logger's underlying zapcore.Core.
+func WrapCore(f func(zapcore.Core) zapcore.Core) Option {
+	return optionFunc(func(log *Logger) {
+		log.core = f(log.core)
+	})
+}
+
+// Hooks registers functions which will be called each time the Logger writes
+// out an Entry. Repeated use of Hooks is additive.
+//
+// Hooks are useful for simple side effects, like capturing metrics for the
+// number of emitted logs. More complex side effects, including anything that
+// requires access to the Entry's structured fields, should be implemented as
+// a zapcore.Core instead. See zapcore.RegisterHooks for details.
+func Hooks(hooks ...func(zapcore.Entry) error) Option {
+	return optionFunc(func(log *Logger) {
+		log.core = zapcore.RegisterHooks(log.core, hooks...)
+	})
+}
+
+// Fields adds fields to the Logger.
+func Fields(fs ...zapcore.Field) Option {
+	return optionFunc(func(log *Logger) {
+		log.core = log.core.With(fs)
+	})
+}
+
+// ErrorOutput sets the destination for errors generated by the Logger. Note
+// that this option only affects internal errors; for sample code that sends
+// error-level logs to a different location from info- and debug-level logs,
+// see the package-level AdvancedConfiguration example.
+//
+// The supplied WriteSyncer must be safe for concurrent use. The Open and
+// zapcore.Lock functions are the simplest ways to protect files with a mutex.
+func ErrorOutput(w zapcore.WriteSyncer) Option {
+	return optionFunc(func(log *Logger) {
+		log.errorOutput = w
+	})
+}
+
+// Development puts the logger in development mode, which makes DPanic-level
+// logs panic instead of simply logging an error.
+func Development() Option {
+	return optionFunc(func(log *Logger) {
+		log.development = true
+	})
+}
+
+// AddCaller configures the Logger to annotate each message with the filename
+// and line number of zap's caller.
+func AddCaller() Option {
+	return optionFunc(func(log *Logger) {
+		log.addCaller = true
+	})
+}
+
+// AddCallerSkip increases the number of callers skipped by caller annotation
+// (as enabled by the AddCaller option). When building wrappers around the
+// Logger and SugaredLogger, supplying this Option prevents zap from always
+// reporting the wrapper code as the caller.
+func AddCallerSkip(skip int) Option {
+	return optionFunc(func(log *Logger) {
+		log.callerSkip += skip
+	})
+}
+
+// AddStacktrace configures the Logger to record a stack trace for all messages at
+// or above a given level.
+func AddStacktrace(lvl zapcore.LevelEnabler) Option {
+	return optionFunc(func(log *Logger) {
+		log.addStack = lvl
+	})
+}

+ 126 - 0
vendor/go.uber.org/zap/stacktrace.go

@@ -0,0 +1,126 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"runtime"
+	"strings"
+	"sync"
+
+	"go.uber.org/zap/internal/bufferpool"
+)
+
+const _zapPackage = "go.uber.org/zap"
+
+var (
+	_stacktracePool = sync.Pool{
+		New: func() interface{} {
+			return newProgramCounters(64)
+		},
+	}
+
+	// We add "." and "/" suffixes to the package name to ensure we only match
+	// the exact package and not any package with the same prefix.
+	_zapStacktracePrefixes       = addPrefix(_zapPackage, ".", "/")
+	_zapStacktraceVendorContains = addPrefix("/vendor/", _zapStacktracePrefixes...)
+)
+
+func takeStacktrace() string {
+	buffer := bufferpool.Get()
+	defer buffer.Free()
+	programCounters := _stacktracePool.Get().(*programCounters)
+	defer _stacktracePool.Put(programCounters)
+
+	var numFrames int
+	for {
+		// Skip the call to runtime.Counters and takeStacktrace so that the
+		// program counters start at the caller of takeStacktrace.
+		numFrames = runtime.Callers(2, programCounters.pcs)
+		if numFrames < len(programCounters.pcs) {
+			break
+		}
+		// Don't put the too-short counter slice back into the pool; this lets
+		// the pool adjust if we consistently take deep stacktraces.
+		programCounters = newProgramCounters(len(programCounters.pcs) * 2)
+	}
+
+	i := 0
+	skipZapFrames := true // skip all consecutive zap frames at the beginning.
+	frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
+
+	// Note: On the last iteration, frames.Next() returns false, with a valid
+	// frame, but we ignore this frame. The last frame is a a runtime frame which
+	// adds noise, since it's only either runtime.main or runtime.goexit.
+	for frame, more := frames.Next(); more; frame, more = frames.Next() {
+		if skipZapFrames && isZapFrame(frame.Function) {
+			continue
+		} else {
+			skipZapFrames = false
+		}
+
+		if i != 0 {
+			buffer.AppendByte('\n')
+		}
+		i++
+		buffer.AppendString(frame.Function)
+		buffer.AppendByte('\n')
+		buffer.AppendByte('\t')
+		buffer.AppendString(frame.File)
+		buffer.AppendByte(':')
+		buffer.AppendInt(int64(frame.Line))
+	}
+
+	return buffer.String()
+}
+
+func isZapFrame(function string) bool {
+	for _, prefix := range _zapStacktracePrefixes {
+		if strings.HasPrefix(function, prefix) {
+			return true
+		}
+	}
+
+	// We can't use a prefix match here since the location of the vendor
+	// directory affects the prefix. Instead we do a contains match.
+	for _, contains := range _zapStacktraceVendorContains {
+		if strings.Contains(function, contains) {
+			return true
+		}
+	}
+
+	return false
+}
+
+type programCounters struct {
+	pcs []uintptr
+}
+
+func newProgramCounters(size int) *programCounters {
+	return &programCounters{make([]uintptr, size)}
+}
+
+func addPrefix(prefix string, ss ...string) []string {
+	withPrefix := make([]string, len(ss))
+	for i, s := range ss {
+		withPrefix[i] = prefix + s
+	}
+	return withPrefix
+}

+ 304 - 0
vendor/go.uber.org/zap/sugar.go

@@ -0,0 +1,304 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"fmt"
+
+	"go.uber.org/zap/zapcore"
+
+	"go.uber.org/multierr"
+)
+
+const (
+	_oddNumberErrMsg    = "Ignored key without a value."
+	_nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
+)
+
+// A SugaredLogger wraps the base Logger functionality in a slower, but less
+// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar
+// method.
+//
+// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
+// For each log level, it exposes three methods: one for loosely-typed
+// structured logging, one for println-style formatting, and one for
+// printf-style formatting. For example, SugaredLoggers can produce InfoLevel
+// output with Infow ("info with" structured context), Info, or Infof.
+type SugaredLogger struct {
+	base *Logger
+}
+
+// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring
+// is quite inexpensive, so it's reasonable for a single application to use
+// both Loggers and SugaredLoggers, converting between them on the boundaries
+// of performance-sensitive code.
+func (s *SugaredLogger) Desugar() *Logger {
+	base := s.base.clone()
+	base.callerSkip -= 2
+	return base
+}
+
+// Named adds a sub-scope to the logger's name. See Logger.Named for details.
+func (s *SugaredLogger) Named(name string) *SugaredLogger {
+	return &SugaredLogger{base: s.base.Named(name)}
+}
+
+// With adds a variadic number of fields to the logging context. It accepts a
+// mix of strongly-typed zapcore.Field objects and loosely-typed key-value
+// pairs. When processing pairs, the first element of the pair is used as the
+// field key and the second as the field value.
+//
+// For example,
+//   sugaredLogger.With(
+//     "hello", "world",
+//     "failure", errors.New("oh no"),
+//     Stack(),
+//     "count", 42,
+//     "user", User{Name: "alice"},
+//  )
+// is the equivalent of
+//   unsugared.With(
+//     String("hello", "world"),
+//     String("failure", "oh no"),
+//     Stack(),
+//     Int("count", 42),
+//     Object("user", User{Name: "alice"}),
+//   )
+//
+// Note that the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics. In production, the logger is more
+// forgiving: a separate error is logged, but the key-value pair is skipped
+// and execution continues. Passing an orphaned key triggers similar behavior:
+// panics in development and errors in production.
+func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
+	return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
+}
+
+// Debug uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Debug(args ...interface{}) {
+	s.log(DebugLevel, "", args, nil)
+}
+
+// Info uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Info(args ...interface{}) {
+	s.log(InfoLevel, "", args, nil)
+}
+
+// Warn uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Warn(args ...interface{}) {
+	s.log(WarnLevel, "", args, nil)
+}
+
+// Error uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Error(args ...interface{}) {
+	s.log(ErrorLevel, "", args, nil)
+}
+
+// DPanic uses fmt.Sprint to construct and log a message. In development, the
+// logger then panics. (See DPanicLevel for details.)
+func (s *SugaredLogger) DPanic(args ...interface{}) {
+	s.log(DPanicLevel, "", args, nil)
+}
+
+// Panic uses fmt.Sprint to construct and log a message, then panics.
+func (s *SugaredLogger) Panic(args ...interface{}) {
+	s.log(PanicLevel, "", args, nil)
+}
+
+// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit.
+func (s *SugaredLogger) Fatal(args ...interface{}) {
+	s.log(FatalLevel, "", args, nil)
+}
+
+// Debugf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
+	s.log(DebugLevel, template, args, nil)
+}
+
+// Infof uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Infof(template string, args ...interface{}) {
+	s.log(InfoLevel, template, args, nil)
+}
+
+// Warnf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
+	s.log(WarnLevel, template, args, nil)
+}
+
+// Errorf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
+	s.log(ErrorLevel, template, args, nil)
+}
+
+// DPanicf uses fmt.Sprintf to log a templated message. In development, the
+// logger then panics. (See DPanicLevel for details.)
+func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
+	s.log(DPanicLevel, template, args, nil)
+}
+
+// Panicf uses fmt.Sprintf to log a templated message, then panics.
+func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
+	s.log(PanicLevel, template, args, nil)
+}
+
+// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit.
+func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
+	s.log(FatalLevel, template, args, nil)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+//
+// When debug-level logging is disabled, this is much faster than
+//  s.With(keysAndValues).Debug(msg)
+func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
+	s.log(DebugLevel, msg, nil, keysAndValues)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) {
+	s.log(InfoLevel, msg, nil, keysAndValues)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) {
+	s.log(WarnLevel, msg, nil, keysAndValues)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) {
+	s.log(ErrorLevel, msg, nil, keysAndValues)
+}
+
+// DPanicw logs a message with some additional context. In development, the
+// logger then panics. (See DPanicLevel for details.) The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) {
+	s.log(DPanicLevel, msg, nil, keysAndValues)
+}
+
+// Panicw logs a message with some additional context, then panics. The
+// variadic key-value pairs are treated as they are in With.
+func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) {
+	s.log(PanicLevel, msg, nil, keysAndValues)
+}
+
+// Fatalw logs a message with some additional context, then calls os.Exit. The
+// variadic key-value pairs are treated as they are in With.
+func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
+	s.log(FatalLevel, msg, nil, keysAndValues)
+}
+
+// Sync flushes any buffered log entries.
+func (s *SugaredLogger) Sync() error {
+	return s.base.Sync()
+}
+
+func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
+	// If logging at this level is completely disabled, skip the overhead of
+	// string formatting.
+	if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+		return
+	}
+
+	// Format with Sprint, Sprintf, or neither.
+	msg := template
+	if msg == "" && len(fmtArgs) > 0 {
+		msg = fmt.Sprint(fmtArgs...)
+	} else if msg != "" && len(fmtArgs) > 0 {
+		msg = fmt.Sprintf(template, fmtArgs...)
+	}
+
+	if ce := s.base.Check(lvl, msg); ce != nil {
+		ce.Write(s.sweetenFields(context)...)
+	}
+}
+
+func (s *SugaredLogger) sweetenFields(args []interface{}) []zapcore.Field {
+	if len(args) == 0 {
+		return nil
+	}
+
+	// Allocate enough space for the worst case; if users pass only structured
+	// fields, we shouldn't penalize them with extra allocations.
+	fields := make([]zapcore.Field, 0, len(args))
+	var invalid invalidPairs
+
+	for i := 0; i < len(args); {
+		// This is a strongly-typed field. Consume it and move on.
+		if f, ok := args[i].(zapcore.Field); ok {
+			fields = append(fields, f)
+			i++
+			continue
+		}
+
+		// Make sure this element isn't a dangling key.
+		if i == len(args)-1 {
+			s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i]))
+			break
+		}
+
+		// Consume this value and the next, treating them as a key-value pair. If the
+		// key isn't a string, add this pair to the slice of invalid pairs.
+		key, val := args[i], args[i+1]
+		if keyStr, ok := key.(string); !ok {
+			// Subsequent errors are likely, so allocate once up front.
+			if cap(invalid) == 0 {
+				invalid = make(invalidPairs, 0, len(args)/2)
+			}
+			invalid = append(invalid, invalidPair{i, key, val})
+		} else {
+			fields = append(fields, Any(keyStr, val))
+		}
+		i += 2
+	}
+
+	// If we encountered any invalid key-value pairs, log an error.
+	if len(invalid) > 0 {
+		s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid))
+	}
+	return fields
+}
+
+type invalidPair struct {
+	position   int
+	key, value interface{}
+}
+
+func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+	enc.AddInt64("position", int64(p.position))
+	Any("key", p.key).AddTo(enc)
+	Any("value", p.value).AddTo(enc)
+	return nil
+}
+
+type invalidPairs []invalidPair
+
+func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error {
+	var err error
+	for i := range ps {
+		err = multierr.Append(err, enc.AppendObject(ps[i]))
+	}
+	return err
+}

+ 27 - 0
vendor/go.uber.org/zap/time.go

@@ -0,0 +1,27 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import "time"
+
+func timeToMillis(t time.Time) int64 {
+	return t.UnixNano() / int64(time.Millisecond)
+}

+ 96 - 0
vendor/go.uber.org/zap/writer.go

@@ -0,0 +1,96 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+	"io/ioutil"
+	"os"
+
+	"go.uber.org/zap/zapcore"
+
+	"go.uber.org/multierr"
+)
+
+// Open is a high-level wrapper that takes a variadic number of paths, opens or
+// creates each of the specified files, and combines them into a locked
+// WriteSyncer. It also returns any error encountered and a function to close
+// any opened files.
+//
+// Passing no paths returns a no-op WriteSyncer. The special paths "stdout" and
+// "stderr" are interpreted as os.Stdout and os.Stderr, respectively.
+func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
+	writers, close, err := open(paths)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	writer := CombineWriteSyncers(writers...)
+	return writer, close, nil
+}
+
+func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
+	var openErr error
+	writers := make([]zapcore.WriteSyncer, 0, len(paths))
+	files := make([]*os.File, 0, len(paths))
+	close := func() {
+		for _, f := range files {
+			f.Close()
+		}
+	}
+	for _, path := range paths {
+		switch path {
+		case "stdout":
+			writers = append(writers, os.Stdout)
+			// Don't close standard out.
+			continue
+		case "stderr":
+			writers = append(writers, os.Stderr)
+			// Don't close standard error.
+			continue
+		}
+		f, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
+		openErr = multierr.Append(openErr, err)
+		if err == nil {
+			writers = append(writers, f)
+			files = append(files, f)
+		}
+	}
+
+	if openErr != nil {
+		close()
+		return writers, nil, openErr
+	}
+
+	return writers, close, nil
+}
+
+// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
+// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op
+// WriteSyncer.
+//
+// It's provided purely as a convenience; the result is no different from
+// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
+func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
+	if len(writers) == 0 {
+		return zapcore.AddSync(ioutil.Discard)
+	}
+	return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
+}

+ 147 - 0
vendor/go.uber.org/zap/zapcore/console_encoder.go

@@ -0,0 +1,147 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+	"fmt"
+	"sync"
+
+	"go.uber.org/zap/buffer"
+	"go.uber.org/zap/internal/bufferpool"
+)
+
+var _sliceEncoderPool = sync.Pool{
+	New: func() interface{} {
+		return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)}
+	},
+}
+
+func getSliceEncoder() *sliceArrayEncoder {
+	return _sliceEncoderPool.Get().(*sliceArrayEncoder)
+}
+
+func putSliceEncoder(e *sliceArrayEncoder) {
+	e.elems = e.elems[:0]
+	_sliceEncoderPool.Put(e)
+}
+
+type consoleEncoder struct {
+	*jsonEncoder
+}
+
+// NewConsoleEncoder creates an encoder whose output is designed for human -
+// rather than machine - consumption. It serializes the core log entry data
+// (message, level, timestamp, etc.) in a plain-text format and leaves the
+// structured context as JSON.
+//
+// Note that although the console encoder doesn't use the keys specified in the
+// encoder configuration, it will omit any element whose key is set to the empty
+// string.
+func NewConsoleEncoder(cfg EncoderConfig) Encoder {
+	return consoleEncoder{newJSONEncoder(cfg, true)}
+}
+
+func (c consoleEncoder) Clone() Encoder {
+	return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)}
+}
+
+func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
+	line := bufferpool.Get()
+
+	// We don't want the entry's metadata to be quoted and escaped (if it's
+	// encoded as strings), which means that we can't use the JSON encoder. The
+	// simplest option is to use the memory encoder and fmt.Fprint.
+	//
+	// If this ever becomes a performance bottleneck, we can implement
+	// ArrayEncoder for our plain-text format.
+	arr := getSliceEncoder()
+	if c.TimeKey != "" && c.EncodeTime != nil {
+		c.EncodeTime(ent.Time, arr)
+	}
+	if c.LevelKey != "" && c.EncodeLevel != nil {
+		c.EncodeLevel(ent.Level, arr)
+	}
+	if ent.LoggerName != "" && c.NameKey != "" {
+		nameEncoder := c.EncodeName
+
+		if nameEncoder == nil {
+			// Fall back to FullNameEncoder for backward compatibility.
+			nameEncoder = FullNameEncoder
+		}
+
+		nameEncoder(ent.LoggerName, arr)
+	}
+	if ent.Caller.Defined && c.CallerKey != "" && c.EncodeCaller != nil {
+		c.EncodeCaller(ent.Caller, arr)
+	}
+	for i := range arr.elems {
+		if i > 0 {
+			line.AppendByte('\t')
+		}
+		fmt.Fprint(line, arr.elems[i])
+	}
+	putSliceEncoder(arr)
+
+	// Add the message itself.
+	if c.MessageKey != "" {
+		c.addTabIfNecessary(line)
+		line.AppendString(ent.Message)
+	}
+
+	// Add any structured context.
+	c.writeContext(line, fields)
+
+	// If there's no stacktrace key, honor that; this allows users to force
+	// single-line output.
+	if ent.Stack != "" && c.StacktraceKey != "" {
+		line.AppendByte('\n')
+		line.AppendString(ent.Stack)
+	}
+
+	if c.LineEnding != "" {
+		line.AppendString(c.LineEnding)
+	} else {
+		line.AppendString(DefaultLineEnding)
+	}
+	return line, nil
+}
+
+func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) {
+	context := c.jsonEncoder.Clone().(*jsonEncoder)
+	defer context.buf.Free()
+
+	addFields(context, extra)
+	context.closeOpenNamespaces()
+	if context.buf.Len() == 0 {
+		return
+	}
+
+	c.addTabIfNecessary(line)
+	line.AppendByte('{')
+	line.Write(context.buf.Bytes())
+	line.AppendByte('}')
+}
+
+func (c consoleEncoder) addTabIfNecessary(line *buffer.Buffer) {
+	if line.Len() > 0 {
+		line.AppendByte('\t')
+	}
+}

Some files were not shown because too many files changed in this diff