Selaa lähdekoodia

Merge pull request #7347 from gyuho/static-check

*: add 'staticcheck' to 'test'
Gyu-Ho Lee 8 vuotta sitten
vanhempi
commit
e5d94a296f

+ 1 - 0
.travis.yml

@@ -44,6 +44,7 @@ before_install:
  - go get -v github.com/chzchzchz/goword
  - go get -v github.com/chzchzchz/goword
  - go get -v honnef.co/go/tools/cmd/gosimple
  - go get -v honnef.co/go/tools/cmd/gosimple
  - go get -v honnef.co/go/tools/cmd/unused
  - go get -v honnef.co/go/tools/cmd/unused
+ - go get -v honnef.co/go/tools/cmd/staticcheck
 
 
 # disable godep restore override
 # disable godep restore override
 install:
 install:

+ 2 - 1
auth/store_test.go

@@ -345,7 +345,7 @@ func TestRoleRevokePermission(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	r, err := as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"})
+	_, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"})
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -359,6 +359,7 @@ func TestRoleRevokePermission(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
+	var r *pb.AuthRoleGetResponse
 	r, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"})
 	r, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"})
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)

+ 2 - 2
etcdctl/ctlv2/command/role_commands.go

@@ -117,13 +117,13 @@ func actionRoleAdd(c *cli.Context) error {
 	api, role := mustRoleAPIAndName(c)
 	api, role := mustRoleAPIAndName(c)
 	ctx, cancel := contextWithTotalTimeout(c)
 	ctx, cancel := contextWithTotalTimeout(c)
 	defer cancel()
 	defer cancel()
-	currentRole, err := api.GetRole(ctx, role)
+	currentRole, _ := api.GetRole(ctx, role)
 	if currentRole != nil {
 	if currentRole != nil {
 		fmt.Fprintf(os.Stderr, "Role %s already exists\n", role)
 		fmt.Fprintf(os.Stderr, "Role %s already exists\n", role)
 		os.Exit(1)
 		os.Exit(1)
 	}
 	}
 
 
-	err = api.AddRole(ctx, role)
+	err := api.AddRole(ctx, role)
 	if err != nil {
 	if err != nil {
 		fmt.Fprintln(os.Stderr, err.Error())
 		fmt.Fprintln(os.Stderr, err.Error())
 		os.Exit(1)
 		os.Exit(1)

+ 1 - 1
etcdctl/ctlv3/command/printer_protobuf.go

@@ -60,5 +60,5 @@ func printPB(v interface{}) {
 		fmt.Fprintf(os.Stderr, "%v\n", err)
 		fmt.Fprintf(os.Stderr, "%v\n", err)
 		return
 		return
 	}
 	}
-	fmt.Printf(string(b))
+	fmt.Print(string(b))
 }
 }

+ 3 - 2
etcdserver/raft.go

@@ -113,7 +113,7 @@ type raftNode struct {
 	readStateC chan raft.ReadState
 	readStateC chan raft.ReadState
 
 
 	// utility
 	// utility
-	ticker <-chan time.Time
+	ticker *time.Ticker
 	// contention detectors for raft heartbeat message
 	// contention detectors for raft heartbeat message
 	td          *contention.TimeoutDetector
 	td          *contention.TimeoutDetector
 	heartbeat   time.Duration // for logging
 	heartbeat   time.Duration // for logging
@@ -143,7 +143,7 @@ func (r *raftNode) start(rh *raftReadyHandler) {
 
 
 		for {
 		for {
 			select {
 			select {
-			case <-r.ticker:
+			case <-r.ticker.C:
 				r.Tick()
 				r.Tick()
 			case rd := <-r.Ready():
 			case rd := <-r.Ready():
 				if rd.SoftState != nil {
 				if rd.SoftState != nil {
@@ -303,6 +303,7 @@ func (r *raftNode) stop() {
 
 
 func (r *raftNode) onStop() {
 func (r *raftNode) onStop() {
 	r.Stop()
 	r.Stop()
+	r.ticker.Stop()
 	r.transport.Stop()
 	r.transport.Stop()
 	if err := r.storage.Close(); err != nil {
 	if err := r.storage.Close(); err != nil {
 		plog.Panicf("raft close storage error: %v", err)
 		plog.Panicf("raft close storage error: %v", err)

+ 1 - 0
etcdserver/raft_test.go

@@ -158,6 +158,7 @@ func TestStopRaftWhenWaitingForApplyDone(t *testing.T) {
 		storage:     mockstorage.NewStorageRecorder(""),
 		storage:     mockstorage.NewStorageRecorder(""),
 		raftStorage: raft.NewMemoryStorage(),
 		raftStorage: raft.NewMemoryStorage(),
 		transport:   rafthttp.NewNopTransporter(),
 		transport:   rafthttp.NewNopTransporter(),
+		ticker:      &time.Ticker{},
 	}}
 	}}
 	srv.r.start(nil)
 	srv.r.start(nil)
 	n.readyc <- raft.Ready{}
 	n.readyc <- raft.Ready{}

+ 10 - 8
etcdserver/server.go

@@ -220,7 +220,7 @@ type EtcdServer struct {
 	stats  *stats.ServerStats
 	stats  *stats.ServerStats
 	lstats *stats.LeaderStats
 	lstats *stats.LeaderStats
 
 
-	SyncTicker <-chan time.Time
+	SyncTicker *time.Ticker
 	// compactor is used to auto-compact the KV.
 	// compactor is used to auto-compact the KV.
 	compactor *compactor.Periodic
 	compactor *compactor.Periodic
 
 
@@ -416,7 +416,7 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
 		r: raftNode{
 		r: raftNode{
 			isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
 			isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
 			Node:        n,
 			Node:        n,
-			ticker:      time.Tick(heartbeat),
+			ticker:      time.NewTicker(heartbeat),
 			// set up contention detectors for raft heartbeat message.
 			// set up contention detectors for raft heartbeat message.
 			// expect to send a heartbeat within 2 heartbeat intervals.
 			// expect to send a heartbeat within 2 heartbeat intervals.
 			td:          contention.NewTimeoutDetector(2 * heartbeat),
 			td:          contention.NewTimeoutDetector(2 * heartbeat),
@@ -431,7 +431,7 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
 		cluster:       cl,
 		cluster:       cl,
 		stats:         sstats,
 		stats:         sstats,
 		lstats:        lstats,
 		lstats:        lstats,
-		SyncTicker:    time.Tick(500 * time.Millisecond),
+		SyncTicker:    time.NewTicker(500 * time.Millisecond),
 		peerRt:        prt,
 		peerRt:        prt,
 		reqIDGen:      idutil.NewGenerator(uint16(id), time.Now()),
 		reqIDGen:      idutil.NewGenerator(uint16(id), time.Now()),
 		forceVersionC: make(chan struct{}),
 		forceVersionC: make(chan struct{}),
@@ -606,7 +606,7 @@ type raftReadyHandler struct {
 }
 }
 
 
 func (s *EtcdServer) run() {
 func (s *EtcdServer) run() {
-	snap, err := s.r.raftStorage.Snapshot()
+	sn, err := s.r.raftStorage.Snapshot()
 	if err != nil {
 	if err != nil {
 		plog.Panicf("get snapshot from raft storage error: %v", err)
 		plog.Panicf("get snapshot from raft storage error: %v", err)
 	}
 	}
@@ -637,7 +637,7 @@ func (s *EtcdServer) run() {
 				}
 				}
 				setSyncC(nil)
 				setSyncC(nil)
 			} else {
 			} else {
-				setSyncC(s.SyncTicker)
+				setSyncC(s.SyncTicker.C)
 				if s.compactor != nil {
 				if s.compactor != nil {
 					s.compactor.Resume()
 					s.compactor.Resume()
 				}
 				}
@@ -664,9 +664,9 @@ func (s *EtcdServer) run() {
 	// asynchronously accept apply packets, dispatch progress in-order
 	// asynchronously accept apply packets, dispatch progress in-order
 	sched := schedule.NewFIFOScheduler()
 	sched := schedule.NewFIFOScheduler()
 	ep := etcdProgress{
 	ep := etcdProgress{
-		confState: snap.Metadata.ConfState,
-		snapi:     snap.Metadata.Index,
-		appliedi:  snap.Metadata.Index,
+		confState: sn.Metadata.ConfState,
+		snapi:     sn.Metadata.Index,
+		appliedi:  sn.Metadata.Index,
 	}
 	}
 
 
 	defer func() {
 	defer func() {
@@ -679,6 +679,8 @@ func (s *EtcdServer) run() {
 		// wait for gouroutines before closing raft so wal stays open
 		// wait for gouroutines before closing raft so wal stays open
 		s.wg.Wait()
 		s.wg.Wait()
 
 
+		s.SyncTicker.Stop()
+
 		// must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
 		// must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
 		// by adding a peer after raft stops the transport
 		// by adding a peer after raft stops the transport
 		s.r.stop()
 		s.r.stop()

+ 56 - 35
etcdserver/server_test.go

@@ -173,11 +173,13 @@ func TestApplyRepeat(t *testing.T) {
 			raftStorage: raft.NewMemoryStorage(),
 			raftStorage: raft.NewMemoryStorage(),
 			storage:     mockstorage.NewStorageRecorder(""),
 			storage:     mockstorage.NewStorageRecorder(""),
 			transport:   rafthttp.NewNopTransporter(),
 			transport:   rafthttp.NewNopTransporter(),
+			ticker:      &time.Ticker{},
 		},
 		},
-		Cfg:      &ServerConfig{},
-		store:    st,
-		cluster:  cl,
-		reqIDGen: idutil.NewGenerator(0, time.Time{}),
+		Cfg:        &ServerConfig{},
+		store:      st,
+		cluster:    cl,
+		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
+		SyncTicker: &time.Ticker{},
 	}
 	}
 	s.applyV2 = &applierV2store{store: s.store, cluster: s.cluster}
 	s.applyV2 = &applierV2store{store: s.store, cluster: s.cluster}
 	s.start()
 	s.start()
@@ -635,9 +637,11 @@ func TestDoProposal(t *testing.T) {
 				storage:     mockstorage.NewStorageRecorder(""),
 				storage:     mockstorage.NewStorageRecorder(""),
 				raftStorage: raft.NewMemoryStorage(),
 				raftStorage: raft.NewMemoryStorage(),
 				transport:   rafthttp.NewNopTransporter(),
 				transport:   rafthttp.NewNopTransporter(),
+				ticker:      &time.Ticker{},
 			},
 			},
-			store:    st,
-			reqIDGen: idutil.NewGenerator(0, time.Time{}),
+			store:      st,
+			reqIDGen:   idutil.NewGenerator(0, time.Time{}),
+			SyncTicker: &time.Ticker{},
 		}
 		}
 		srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
 		srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
 		srv.start()
 		srv.start()
@@ -788,6 +792,7 @@ func TestSyncTimeout(t *testing.T) {
 func TestSyncTrigger(t *testing.T) {
 func TestSyncTrigger(t *testing.T) {
 	n := newReadyNode()
 	n := newReadyNode()
 	st := make(chan time.Time, 1)
 	st := make(chan time.Time, 1)
+	tk := &time.Ticker{C: st}
 	srv := &EtcdServer{
 	srv := &EtcdServer{
 		Cfg: &ServerConfig{TickMs: 1},
 		Cfg: &ServerConfig{TickMs: 1},
 		r: raftNode{
 		r: raftNode{
@@ -795,9 +800,10 @@ func TestSyncTrigger(t *testing.T) {
 			raftStorage: raft.NewMemoryStorage(),
 			raftStorage: raft.NewMemoryStorage(),
 			transport:   rafthttp.NewNopTransporter(),
 			transport:   rafthttp.NewNopTransporter(),
 			storage:     mockstorage.NewStorageRecorder(""),
 			storage:     mockstorage.NewStorageRecorder(""),
+			ticker:      &time.Ticker{},
 		},
 		},
 		store:      mockstore.NewNop(),
 		store:      mockstore.NewNop(),
-		SyncTicker: st,
+		SyncTicker: tk,
 		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
 		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
 	}
 	}
 
 
@@ -910,9 +916,11 @@ func TestTriggerSnap(t *testing.T) {
 			raftStorage: raft.NewMemoryStorage(),
 			raftStorage: raft.NewMemoryStorage(),
 			storage:     p,
 			storage:     p,
 			transport:   rafthttp.NewNopTransporter(),
 			transport:   rafthttp.NewNopTransporter(),
+			ticker:      &time.Ticker{},
 		},
 		},
-		store:    st,
-		reqIDGen: idutil.NewGenerator(0, time.Time{}),
+		store:      st,
+		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
+		SyncTicker: &time.Ticker{},
 	}
 	}
 	srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
 	srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
 
 
@@ -979,9 +987,11 @@ func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
 			storage:     mockstorage.NewStorageRecorder(testdir),
 			storage:     mockstorage.NewStorageRecorder(testdir),
 			raftStorage: rs,
 			raftStorage: rs,
 			msgSnapC:    make(chan raftpb.Message, maxInFlightMsgSnap),
 			msgSnapC:    make(chan raftpb.Message, maxInFlightMsgSnap),
+			ticker:      &time.Ticker{},
 		},
 		},
-		store:   st,
-		cluster: cl,
+		store:      st,
+		cluster:    cl,
+		SyncTicker: &time.Ticker{},
 	}
 	}
 	s.applyV2 = &applierV2store{store: s.store, cluster: s.cluster}
 	s.applyV2 = &applierV2store{store: s.store, cluster: s.cluster}
 
 
@@ -1059,11 +1069,13 @@ func TestAddMember(t *testing.T) {
 			raftStorage: raft.NewMemoryStorage(),
 			raftStorage: raft.NewMemoryStorage(),
 			storage:     mockstorage.NewStorageRecorder(""),
 			storage:     mockstorage.NewStorageRecorder(""),
 			transport:   rafthttp.NewNopTransporter(),
 			transport:   rafthttp.NewNopTransporter(),
+			ticker:      &time.Ticker{},
 		},
 		},
-		Cfg:      &ServerConfig{},
-		store:    st,
-		cluster:  cl,
-		reqIDGen: idutil.NewGenerator(0, time.Time{}),
+		Cfg:        &ServerConfig{},
+		store:      st,
+		cluster:    cl,
+		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
+		SyncTicker: &time.Ticker{},
 	}
 	}
 	s.start()
 	s.start()
 	m := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"foo"}}}
 	m := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"foo"}}}
@@ -1099,11 +1111,13 @@ func TestRemoveMember(t *testing.T) {
 			raftStorage: raft.NewMemoryStorage(),
 			raftStorage: raft.NewMemoryStorage(),
 			storage:     mockstorage.NewStorageRecorder(""),
 			storage:     mockstorage.NewStorageRecorder(""),
 			transport:   rafthttp.NewNopTransporter(),
 			transport:   rafthttp.NewNopTransporter(),
+			ticker:      &time.Ticker{},
 		},
 		},
-		Cfg:      &ServerConfig{},
-		store:    st,
-		cluster:  cl,
-		reqIDGen: idutil.NewGenerator(0, time.Time{}),
+		Cfg:        &ServerConfig{},
+		store:      st,
+		cluster:    cl,
+		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
+		SyncTicker: &time.Ticker{},
 	}
 	}
 	s.start()
 	s.start()
 	err := s.RemoveMember(context.TODO(), 1234)
 	err := s.RemoveMember(context.TODO(), 1234)
@@ -1138,10 +1152,12 @@ func TestUpdateMember(t *testing.T) {
 			raftStorage: raft.NewMemoryStorage(),
 			raftStorage: raft.NewMemoryStorage(),
 			storage:     mockstorage.NewStorageRecorder(""),
 			storage:     mockstorage.NewStorageRecorder(""),
 			transport:   rafthttp.NewNopTransporter(),
 			transport:   rafthttp.NewNopTransporter(),
+			ticker:      &time.Ticker{},
 		},
 		},
-		store:    st,
-		cluster:  cl,
-		reqIDGen: idutil.NewGenerator(0, time.Time{}),
+		store:      st,
+		cluster:    cl,
+		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
+		SyncTicker: &time.Ticker{},
 	}
 	}
 	s.start()
 	s.start()
 	wm := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://127.0.0.1:1"}}}
 	wm := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://127.0.0.1:1"}}}
@@ -1173,11 +1189,12 @@ func TestPublish(t *testing.T) {
 		readych:    make(chan struct{}),
 		readych:    make(chan struct{}),
 		Cfg:        &ServerConfig{TickMs: 1},
 		Cfg:        &ServerConfig{TickMs: 1},
 		id:         1,
 		id:         1,
-		r:          raftNode{Node: n},
+		r:          raftNode{Node: n, ticker: &time.Ticker{}},
 		attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}},
 		attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}},
 		cluster:    &membership.RaftCluster{},
 		cluster:    &membership.RaftCluster{},
 		w:          w,
 		w:          w,
 		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
 		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
+		SyncTicker: &time.Ticker{},
 	}
 	}
 	srv.publish(time.Hour)
 	srv.publish(time.Hour)
 
 
@@ -1216,13 +1233,15 @@ func TestPublishStopped(t *testing.T) {
 		r: raftNode{
 		r: raftNode{
 			Node:      newNodeNop(),
 			Node:      newNodeNop(),
 			transport: rafthttp.NewNopTransporter(),
 			transport: rafthttp.NewNopTransporter(),
+			ticker:    &time.Ticker{},
 		},
 		},
-		cluster:  &membership.RaftCluster{},
-		w:        mockwait.NewNop(),
-		done:     make(chan struct{}),
-		stopping: make(chan struct{}),
-		stop:     make(chan struct{}),
-		reqIDGen: idutil.NewGenerator(0, time.Time{}),
+		cluster:    &membership.RaftCluster{},
+		w:          mockwait.NewNop(),
+		done:       make(chan struct{}),
+		stopping:   make(chan struct{}),
+		stop:       make(chan struct{}),
+		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
+		SyncTicker: &time.Ticker{},
 	}
 	}
 	close(srv.stopping)
 	close(srv.stopping)
 	srv.publish(time.Hour)
 	srv.publish(time.Hour)
@@ -1232,11 +1251,12 @@ func TestPublishStopped(t *testing.T) {
 func TestPublishRetry(t *testing.T) {
 func TestPublishRetry(t *testing.T) {
 	n := newNodeRecorderStream()
 	n := newNodeRecorderStream()
 	srv := &EtcdServer{
 	srv := &EtcdServer{
-		Cfg:      &ServerConfig{TickMs: 1},
-		r:        raftNode{Node: n},
-		w:        mockwait.NewNop(),
-		stopping: make(chan struct{}),
-		reqIDGen: idutil.NewGenerator(0, time.Time{}),
+		Cfg:        &ServerConfig{TickMs: 1},
+		r:          raftNode{Node: n, ticker: &time.Ticker{}},
+		w:          mockwait.NewNop(),
+		stopping:   make(chan struct{}),
+		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
+		SyncTicker: &time.Ticker{},
 	}
 	}
 	// expect multiple proposals from retrying
 	// expect multiple proposals from retrying
 	ch := make(chan struct{})
 	ch := make(chan struct{})
@@ -1270,11 +1290,12 @@ func TestUpdateVersion(t *testing.T) {
 	srv := &EtcdServer{
 	srv := &EtcdServer{
 		id:         1,
 		id:         1,
 		Cfg:        &ServerConfig{TickMs: 1},
 		Cfg:        &ServerConfig{TickMs: 1},
-		r:          raftNode{Node: n},
+		r:          raftNode{Node: n, ticker: &time.Ticker{}},
 		attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://node1.com"}},
 		attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://node1.com"}},
 		cluster:    &membership.RaftCluster{},
 		cluster:    &membership.RaftCluster{},
 		w:          w,
 		w:          w,
 		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
 		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
+		SyncTicker: &time.Ticker{},
 	}
 	}
 	srv.updateClusterVersion("2.0.0")
 	srv.updateClusterVersion("2.0.0")
 
 

+ 1 - 1
integration/cluster.go

@@ -603,7 +603,7 @@ func (m *member) Launch() error {
 	if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil {
 	if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil {
 		return fmt.Errorf("failed to initialize the etcd server: %v", err)
 		return fmt.Errorf("failed to initialize the etcd server: %v", err)
 	}
 	}
-	m.s.SyncTicker = time.Tick(500 * time.Millisecond)
+	m.s.SyncTicker = time.NewTicker(500 * time.Millisecond)
 	m.s.Start()
 	m.s.Start()
 
 
 	m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)}
 	m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)}

+ 1 - 1
integration/v3_lease_test.go

@@ -105,7 +105,7 @@ func TestV3LeaseGrantByID(t *testing.T) {
 	}
 	}
 
 
 	// create duplicate fixed lease
 	// create duplicate fixed lease
-	lresp, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
+	_, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
 		context.TODO(),
 		context.TODO(),
 		&pb.LeaseGrantRequest{ID: 1, TTL: 1})
 		&pb.LeaseGrantRequest{ID: 1, TTL: 1})
 	if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseExist) {
 	if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseExist) {

+ 2 - 1
lease/lessor_test.go

@@ -55,11 +55,12 @@ func TestLessorGrant(t *testing.T) {
 		t.Errorf("term = %v, want at least %v", l.Remaining(), minLeaseTTLDuration-time.Second)
 		t.Errorf("term = %v, want at least %v", l.Remaining(), minLeaseTTLDuration-time.Second)
 	}
 	}
 
 
-	nl, err := le.Grant(1, 1)
+	_, err = le.Grant(1, 1)
 	if err == nil {
 	if err == nil {
 		t.Errorf("allocated the same lease")
 		t.Errorf("allocated the same lease")
 	}
 	}
 
 
+	var nl *Lease
 	nl, err = le.Grant(2, 1)
 	nl, err = le.Grant(2, 1)
 	if err != nil {
 	if err != nil {
 		t.Errorf("could not grant lease 2 (%v)", err)
 		t.Errorf("could not grant lease 2 (%v)", err)

+ 0 - 1
mvcc/kvstore_bench_test.go

@@ -108,7 +108,6 @@ func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
 		}
 		}
 	}
 	}
 	b.ResetTimer()
 	b.ResetTimer()
-	s = NewStore(be, &lease.FakeLessor{}, &i)
 }
 }
 
 
 func BenchmarkStoreRestoreRevs1(b *testing.B) {
 func BenchmarkStoreRestoreRevs1(b *testing.B) {

+ 4 - 0
pkg/transport/keepalive_listener_test.go

@@ -45,6 +45,10 @@ func TestNewKeepAliveListener(t *testing.T) {
 	ln.Close()
 	ln.Close()
 
 
 	ln, err = net.Listen("tcp", "127.0.0.1:0")
 	ln, err = net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		t.Fatalf("unexpected Listen error: %v", err)
+	}
+
 	// tls
 	// tls
 	tmp, err := createTempFile([]byte("XXX"))
 	tmp, err := createTempFile([]byte("XXX"))
 	if err != nil {
 	if err != nil {

+ 1 - 0
raft/node_test.go

@@ -301,6 +301,7 @@ func TestNodeProposeAddDuplicateNode(t *testing.T) {
 	n.Campaign(context.TODO())
 	n.Campaign(context.TODO())
 	rdyEntries := make([]raftpb.Entry, 0)
 	rdyEntries := make([]raftpb.Entry, 0)
 	ticker := time.NewTicker(time.Millisecond * 100)
 	ticker := time.NewTicker(time.Millisecond * 100)
+	defer ticker.Stop()
 	done := make(chan struct{})
 	done := make(chan struct{})
 	stop := make(chan struct{})
 	stop := make(chan struct{})
 	applyConfChan := make(chan struct{})
 	applyConfChan := make(chan struct{})

+ 3 - 2
rafthttp/stream.go

@@ -142,7 +142,8 @@ func (cw *streamWriter) run() {
 		flusher    http.Flusher
 		flusher    http.Flusher
 		batched    int
 		batched    int
 	)
 	)
-	tickc := time.Tick(ConnReadTimeout / 3)
+	tickc := time.NewTicker(ConnReadTimeout / 3)
+	defer tickc.Stop()
 	unflushed := 0
 	unflushed := 0
 
 
 	plog.Infof("started streaming with peer %s (writer)", cw.peerID)
 	plog.Infof("started streaming with peer %s (writer)", cw.peerID)
@@ -214,7 +215,7 @@ func (cw *streamWriter) run() {
 				plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
 				plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
 			}
 			}
 			plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
 			plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
-			heartbeatc, msgc = tickc, cw.msgc
+			heartbeatc, msgc = tickc.C, cw.msgc
 		case <-cw.stopc:
 		case <-cw.stopc:
 			if cw.close() {
 			if cw.close() {
 				plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
 				plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)

+ 4 - 0
store/store_test.go

@@ -348,6 +348,7 @@ func TestStoreUpdateValueTTL(t *testing.T) {
 	var eidx uint64 = 2
 	var eidx uint64 = 2
 	s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
 	s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
 	_, err := s.Update("/foo", "baz", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)})
 	_, err := s.Update("/foo", "baz", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)})
+	assert.Nil(t, err, "")
 	e, _ := s.Get("/foo", false, false)
 	e, _ := s.Get("/foo", false, false)
 	assert.Equal(t, *e.Node.Value, "baz", "")
 	assert.Equal(t, *e.Node.Value, "baz", "")
 	assert.Equal(t, e.EtcdIndex, eidx, "")
 	assert.Equal(t, e.EtcdIndex, eidx, "")
@@ -368,6 +369,7 @@ func TestStoreUpdateDirTTL(t *testing.T) {
 	s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent})
 	s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent})
 	s.Create("/foo/bar", false, "baz", false, TTLOptionSet{ExpireTime: Permanent})
 	s.Create("/foo/bar", false, "baz", false, TTLOptionSet{ExpireTime: Permanent})
 	e, err := s.Update("/foo", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)})
 	e, err := s.Update("/foo", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)})
+	assert.Nil(t, err, "")
 	assert.Equal(t, e.Node.Dir, true, "")
 	assert.Equal(t, e.Node.Dir, true, "")
 	assert.Equal(t, e.EtcdIndex, eidx, "")
 	assert.Equal(t, e.EtcdIndex, eidx, "")
 	e, _ = s.Get("/foo/bar", false, false)
 	e, _ = s.Get("/foo/bar", false, false)
@@ -911,6 +913,7 @@ func TestStoreRecover(t *testing.T) {
 	s.Update("/foo/x", "barbar", TTLOptionSet{ExpireTime: Permanent})
 	s.Update("/foo/x", "barbar", TTLOptionSet{ExpireTime: Permanent})
 	s.Create("/foo/y", false, "baz", false, TTLOptionSet{ExpireTime: Permanent})
 	s.Create("/foo/y", false, "baz", false, TTLOptionSet{ExpireTime: Permanent})
 	b, err := s.Save()
 	b, err := s.Save()
+	assert.Nil(t, err, "")
 
 
 	s2 := newStore()
 	s2 := newStore()
 	s2.Recovery(b)
 	s2.Recovery(b)
@@ -940,6 +943,7 @@ func TestStoreRecoverWithExpiration(t *testing.T) {
 	s.Create("/foo/x", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
 	s.Create("/foo/x", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
 	s.Create("/foo/y", false, "baz", false, TTLOptionSet{ExpireTime: fc.Now().Add(5 * time.Millisecond)})
 	s.Create("/foo/y", false, "baz", false, TTLOptionSet{ExpireTime: fc.Now().Add(5 * time.Millisecond)})
 	b, err := s.Save()
 	b, err := s.Save()
+	assert.Nil(t, err, "")
 
 
 	time.Sleep(10 * time.Millisecond)
 	time.Sleep(10 * time.Millisecond)
 
 

+ 1 - 1
store/watcher_hub.go

@@ -116,7 +116,7 @@ func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeInde
 }
 }
 
 
 func (wh *watcherHub) add(e *Event) {
 func (wh *watcherHub) add(e *Event) {
-	e = wh.EventHistory.addEvent(e)
+	wh.EventHistory.addEvent(e)
 }
 }
 
 
 // notify function accepts an event and notify to the watchers.
 // notify function accepts an event and notify to the watchers.

+ 22 - 3
test

@@ -35,7 +35,7 @@ TESTABLE_AND_FORMATTABLE=`echo "$TEST_PKGS" | egrep -v "$INTEGRATION_PKGS"`
 
 
 # TODO: 'client' pkg fails with gosimple from generated files
 # TODO: 'client' pkg fails with gosimple from generated files
 # TODO: 'rafttest' is failing with unused
 # TODO: 'rafttest' is failing with unused
-GOSIMPLE_UNUSED_PATHS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | grep -v 'client'`
+STATIC_ANALYSIS_PATHS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | grep -v 'client'`
 
 
 if [ -z "$GOARCH" ]; then
 if [ -z "$GOARCH" ]; then
 	GOARCH=$(go env GOARCH);
 	GOARCH=$(go env GOARCH);
@@ -232,7 +232,7 @@ function fmt_pass {
 
 
 	if which gosimple >/dev/null; then
 	if which gosimple >/dev/null; then
 		echo "Checking gosimple..."
 		echo "Checking gosimple..."
-		simplResult=`gosimple ${GOSIMPLE_UNUSED_PATHS} 2>&1 || true`
+		simplResult=`gosimple ${STATIC_ANALYSIS_PATHS} 2>&1 || true`
 		if [ -n "${simplResult}" ]; then
 		if [ -n "${simplResult}" ]; then
 			echo -e "gosimple checking failed:\n${simplResult}"
 			echo -e "gosimple checking failed:\n${simplResult}"
 			exit 255
 			exit 255
@@ -243,7 +243,7 @@ function fmt_pass {
 
 
 	if which unused >/dev/null; then
 	if which unused >/dev/null; then
 		echo "Checking unused..."
 		echo "Checking unused..."
-		unusedResult=`unused ${GOSIMPLE_UNUSED_PATHS} 2>&1 || true`
+		unusedResult=`unused ${STATIC_ANALYSIS_PATHS} 2>&1 || true`
 		if [ -n "${unusedResult}" ]; then
 		if [ -n "${unusedResult}" ]; then
 			echo -e "unused checking failed:\n${unusedResult}"
 			echo -e "unused checking failed:\n${unusedResult}"
 			exit 255
 			exit 255
@@ -252,6 +252,25 @@ function fmt_pass {
 		echo "Skipping unused..."
 		echo "Skipping unused..."
 	fi
 	fi
 
 
+	if which unused >/dev/null; then
+		echo "Checking staticcheck..."
+		staticcheckResult=`staticcheck ${STATIC_ANALYSIS_PATHS} 2>&1 || true`
+		if [ ! -n "${staticcheckResult}" ]; then
+			continue
+		fi
+		# TODO: resolve these after go1.8 migration
+		# See https://github.com/dominikh/go-tools/tree/master/cmd/staticcheck
+		STATIC_CHECK_MASK="SA(1016|1019|2002)"
+		if egrep -v "$STATIC_CHECK_MASK" "${staticcheckResult}"; then
+			echo -e "staticcheck checking ${path} failed:\n${staticcheckResult}"
+			exit 255
+		else
+			echo -e "staticcheck warning:\n${staticcheckResult}"
+		fi
+	else
+		echo "Skipping staticcheck..."
+	fi
+
 	echo "Checking for license header..."
 	echo "Checking for license header..."
 	licRes=$(for file in $(find . -type f -iname '*.go' ! -path './cmd/*' ! -path './gopath.proto/*'); do
 	licRes=$(for file in $(find . -type f -iname '*.go' ! -path './cmd/*' ! -path './gopath.proto/*'); do
 			head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" || echo -e "  ${file}"
 			head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" || echo -e "  ${file}"

+ 5 - 1
wal/repair_test.go

@@ -49,7 +49,11 @@ func testRepair(t *testing.T, ents [][]raftpb.Entry, corrupt corruptFunc, expect
 	defer os.RemoveAll(p)
 	defer os.RemoveAll(p)
 	// create WAL
 	// create WAL
 	w, err := Create(p, nil)
 	w, err := Create(p, nil)
-	defer w.Close()
+	defer func() {
+		if err = w.Close(); err != nil {
+			t.Fatal(err)
+		}
+	}()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}

+ 10 - 2
wal/wal_test.go

@@ -546,7 +546,11 @@ func TestReleaseLockTo(t *testing.T) {
 	defer os.RemoveAll(p)
 	defer os.RemoveAll(p)
 	// create WAL
 	// create WAL
 	w, err := Create(p, nil)
 	w, err := Create(p, nil)
-	defer w.Close()
+	defer func() {
+		if err = w.Close(); err != nil {
+			t.Fatal(err)
+		}
+	}()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -712,7 +716,11 @@ func TestOpenOnTornWrite(t *testing.T) {
 	}
 	}
 	defer os.RemoveAll(p)
 	defer os.RemoveAll(p)
 	w, err := Create(p, nil)
 	w, err := Create(p, nil)
-	defer w.Close()
+	defer func() {
+		if err = w.Close(); err != nil && err != os.ErrInvalid {
+			t.Fatal(err)
+		}
+	}()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}