Browse Source

functional/tester: rename files, adding LEADER_SNAPSHOT case

Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
Gyuho Lee 7 years ago
parent
commit
6b128bfb3b

+ 5 - 5
functional/tester/failure.go

@@ -69,11 +69,11 @@ type failureFollower struct {
 }
 }
 
 
 func (f *failureFollower) updateIndex(clus *Cluster) error {
 func (f *failureFollower) updateIndex(clus *Cluster) error {
-	idx, err := clus.GetLeader()
+	lead, err := clus.GetLeader()
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	f.lead = idx
+	f.lead = lead
 
 
 	n := len(clus.Members)
 	n := len(clus.Members)
 	if f.last == -1 { // first run
 	if f.last == -1 { // first run
@@ -119,12 +119,12 @@ type failureLeader struct {
 }
 }
 
 
 func (f *failureLeader) updateIndex(clus *Cluster) error {
 func (f *failureLeader) updateIndex(clus *Cluster) error {
-	idx, err := clus.GetLeader()
+	lead, err := clus.GetLeader()
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	f.lead = idx
-	f.last = idx
+	f.lead = lead
+	f.last = lead
 	return nil
 	return nil
 }
 }
 
 

+ 0 - 232
functional/tester/failure_case_sigquit_remove_quorum.go

@@ -1,232 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tester
-
-import (
-	"context"
-	"fmt"
-	"time"
-
-	"github.com/coreos/etcd/clientv3"
-	"github.com/coreos/etcd/functional/rpcpb"
-
-	"go.uber.org/zap"
-)
-
-//  1. Download snapshot from node C, before destroying node A and B.
-//  2. Destroy node A and B, and make the whole cluster inoperable.
-//  3. Now node C cannot operate either.
-//  4. SIGTERM node C and remove its data directories.
-//  5. Restore a new seed member from node C's latest snapshot file.
-//  6. Add another member to establish 2-node cluster.
-//  7. Add another member to establish 3-node cluster.
-
-type fetchSnapshotAndFailureQuorum struct {
-	failureByFunc
-	injected map[int]struct{}
-}
-
-func (f *fetchSnapshotAndFailureQuorum) Inject(clus *Cluster) error {
-	// download snapshot first before destroying quorum
-	f.injected = pickQuorum(len(clus.Members))
-	for idx := range f.injected {
-		if err := f.injectMember(clus, idx); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (f *fetchSnapshotAndFailureQuorum) Recover(clus *Cluster) error {
-	for idx := range f.injected {
-		if err := f.recoverMember(clus, idx); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (f *fetchSnapshotAndFailureQuorum) Desc() string {
-	if f.desc != "" {
-		return f.desc
-	}
-	return f.failureCase.String()
-}
-
-func (f *fetchSnapshotAndFailureQuorum) FailureCase() rpcpb.FailureCase {
-	return f.failureCase
-}
-
-func inject_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus *Cluster, idx1 int) error {
-	cli1, err := clus.Members[idx1].CreateEtcdClient()
-	if err != nil {
-		return err
-	}
-	defer cli1.Close()
-
-	var mresp *clientv3.MemberListResponse
-	mresp, err = cli1.MemberList(context.Background())
-	mss := []string{}
-	if err == nil && mresp != nil {
-		mss = describeMembers(mresp)
-	}
-	clus.lg.Info(
-		"member list before disastrous machine failure",
-		zap.String("request-to", clus.Members[idx1].EtcdClientEndpoint),
-		zap.Strings("members", mss),
-		zap.Error(err),
-	)
-	if err != nil {
-		return err
-	}
-
-	sresp, serr := cli1.Status(context.Background(), clus.Members[idx1].EtcdClientEndpoint)
-	if serr != nil {
-		return serr
-	}
-	id1 := sresp.Header.MemberId
-	is1 := fmt.Sprintf("%016x", id1)
-
-	clus.lg.Info(
-		"disastrous machine failure START",
-		zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint),
-		zap.String("target-member-id", is1),
-		zap.Error(err),
-	)
-	err = clus.sendOp(idx1, rpcpb.Operation_FETCH_SNAPSHOT)
-	clus.lg.Info(
-		"disastrous machine failure END",
-		zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint),
-		zap.String("target-member-id", is1),
-		zap.Error(err),
-	)
-	if err != nil {
-		return err
-	}
-
-	time.Sleep(2 * time.Second)
-
-	idx2 := (idx1 + 1) % len(clus.Members)
-	var cli2 *clientv3.Client
-	cli2, err = clus.Members[idx2].CreateEtcdClient()
-	if err != nil {
-		return err
-	}
-	defer cli2.Close()
-
-	// FIXME(bug): this may block forever during
-	// "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT"
-	// is the new leader too busy with snapshotting?
-	// is raft proposal dropped?
-	// enable client keepalive for failover?
-	clus.lg.Info(
-		"member remove after disaster START",
-		zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint),
-		zap.String("target-member-id", is1),
-		zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint),
-	)
-	ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
-	_, err = cli2.MemberRemove(ctx, id1)
-	cancel()
-	clus.lg.Info(
-		"member remove after disaster END",
-		zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint),
-		zap.String("target-member-id", is1),
-		zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint),
-		zap.Error(err),
-	)
-	if err != nil {
-		return err
-	}
-
-	time.Sleep(2 * time.Second)
-
-	mresp, err = cli2.MemberList(context.Background())
-	mss = []string{}
-	if err == nil && mresp != nil {
-		mss = describeMembers(mresp)
-	}
-	clus.lg.Info(
-		"member list after member remove",
-		zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint),
-		zap.Strings("members", mss),
-		zap.Error(err),
-	)
-	return err
-}
-
-func recover_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus *Cluster, idx1 int) error {
-	idx2 := (idx1 + 1) % len(clus.Members)
-	cli2, err := clus.Members[idx2].CreateEtcdClient()
-	if err != nil {
-		return err
-	}
-	defer cli2.Close()
-
-	_, err = cli2.MemberAdd(context.Background(), clus.Members[idx1].Etcd.AdvertisePeerURLs)
-	clus.lg.Info(
-		"member add before fresh restart",
-		zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint),
-		zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint),
-		zap.Error(err),
-	)
-	if err != nil {
-		return err
-	}
-
-	time.Sleep(2 * time.Second)
-
-	clus.Members[idx1].Etcd.InitialClusterState = "existing"
-	err = clus.sendOp(idx1, rpcpb.Operation_RESTART_ETCD)
-	clus.lg.Info(
-		"fresh restart after member add",
-		zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint),
-		zap.Error(err),
-	)
-	if err != nil {
-		return err
-	}
-
-	time.Sleep(2 * time.Second)
-
-	var mresp *clientv3.MemberListResponse
-	mresp, err = cli2.MemberList(context.Background())
-	mss := []string{}
-	if err == nil && mresp != nil {
-		mss = describeMembers(mresp)
-	}
-	clus.lg.Info(
-		"member list after member add",
-		zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint),
-		zap.Strings("members", mss),
-		zap.Error(err),
-	)
-	return err
-}
-
-func new_FailureCase_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus *Cluster) Failure {
-	f := &fetchSnapshotAndFailureQuorum{
-		failureByFunc: failureByFunc{
-			failureCase:   rpcpb.FailureCase_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH,
-			injectMember:  inject_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH,
-			recoverMember: recover_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH,
-		},
-		injected: make(map[int]struct{}),
-	}
-	return &failureDelay{
-		Failure:       f,
-		delayDuration: clus.GetFailureDelayDuration(),
-	}
-}

+ 0 - 0
functional/tester/failure_case_delay.go → functional/tester/failure_delay.go


+ 0 - 0
functional/tester/failure_case_external.go → functional/tester/failure_external.go


+ 0 - 0
functional/tester/failure_case_failpoints.go → functional/tester/failure_failpoints.go


+ 0 - 0
functional/tester/failure_case_network_blackhole.go → functional/tester/failure_network_blackhole.go


+ 0 - 0
functional/tester/failure_case_network_delay.go → functional/tester/failure_network_delay.go


+ 0 - 0
functional/tester/failure_case_no_fail.go → functional/tester/failure_no_fail.go


+ 0 - 0
functional/tester/failure_case_sigquit_remove.go → functional/tester/failure_sigquit_remove.go


+ 147 - 0
functional/tester/failure_sigquit_remove_quorum.go

@@ -0,0 +1,147 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tester
+
+import (
+	"context"
+
+	"github.com/coreos/etcd/clientv3"
+	"github.com/coreos/etcd/functional/rpcpb"
+
+	"go.uber.org/zap"
+)
+
+//  1. Assume node C is the current leader with most up-to-date data.
+//  2. Download snapshot from node C, before destroying node A and B.
+//  3. Destroy node A and B, and make the whole cluster inoperable.
+//  4. Now node C cannot operate either.
+//  5. SIGTERM node C and remove its data directories.
+//  6. Restore a new seed member from node C's latest snapshot file.
+//  7. Add another member to establish 2-node cluster.
+//  8. Add another member to establish 3-node cluster.
+
+type fetchSnapshotAndFailureQuorum struct {
+	desc        string
+	failureCase rpcpb.FailureCase
+	injected    map[int]struct{}
+	snapshotted int
+}
+
+func (f *fetchSnapshotAndFailureQuorum) Inject(clus *Cluster) error {
+	//  1. Assume node C is the current leader with most up-to-date data.
+	lead, err := clus.GetLeader()
+	if err != nil {
+		return err
+	}
+	f.snapshotted = lead
+
+	//  2. Download snapshot from node C, before destroying node A and B.
+	clus.lg.Info(
+		"install snapshot on leader node START",
+		zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
+		zap.Error(err),
+	)
+	var resp *rpcpb.Response
+	if resp == nil || err != nil {
+		resp, err = clus.sendOpWithResp(lead, rpcpb.Operation_FETCH_SNAPSHOT)
+		clus.lg.Info(
+			"install snapshot on leader node END",
+			zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
+			zap.Error(err),
+		)
+		return err
+	}
+	resp, err = clus.sendOpWithResp(lead, rpcpb.Operation_FETCH_SNAPSHOT)
+	clus.lg.Info(
+		"install snapshot on leader node END",
+		zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
+		zap.String("member-name", resp.SnapshotInfo.MemberName),
+		zap.Strings("member-client-urls", resp.SnapshotInfo.MemberClientURLs),
+		zap.String("snapshot-path", resp.SnapshotInfo.SnapshotPath),
+		zap.String("snapshot-file-size", resp.SnapshotInfo.SnapshotFileSize),
+		zap.String("snapshot-total-size", resp.SnapshotInfo.SnapshotTotalSize),
+		zap.Int64("snapshot-total-key", resp.SnapshotInfo.SnapshotTotalKey),
+		zap.Int64("snapshot-hash", resp.SnapshotInfo.SnapshotHash),
+		zap.Int64("snapshot-revision", resp.SnapshotInfo.SnapshotRevision),
+		zap.String("took", resp.SnapshotInfo.Took),
+		zap.Error(err),
+	)
+	if err != nil {
+		return err
+	}
+
+	cli, err := clus.Members[lead].CreateEtcdClient()
+	if err != nil {
+		return err
+	}
+	defer cli.Close()
+	var mresp *clientv3.MemberListResponse
+	mresp, err = cli.MemberList(context.Background())
+	mss := []string{}
+	if err == nil && mresp != nil {
+		mss = describeMembers(mresp)
+	}
+	clus.lg.Info(
+		"member list before disastrous machine failure",
+		zap.String("request-to", clus.Members[lead].EtcdClientEndpoint),
+		zap.Strings("members", mss),
+		zap.Error(err),
+	)
+	if err != nil {
+		return err
+	}
+
+	//  3. Destroy node A and B, and make the whole cluster inoperable.
+	for {
+		f.injected = pickQuorum(len(clus.Members))
+		if _, ok := f.injected[lead]; !ok {
+			break
+		}
+	}
+
+	return nil
+}
+
+func (f *fetchSnapshotAndFailureQuorum) Recover(clus *Cluster) error {
+	// for idx := range f.injected {
+	// 	if err := f.recoverMember(clus, idx); err != nil {
+	// 		return err
+	// 	}
+	// }
+	return nil
+}
+
+func (f *fetchSnapshotAndFailureQuorum) Desc() string {
+	if f.desc != "" {
+		return f.desc
+	}
+	return f.failureCase.String()
+}
+
+func (f *fetchSnapshotAndFailureQuorum) FailureCase() rpcpb.FailureCase {
+	return f.failureCase
+}
+
+func new_FailureCase_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus *Cluster) Failure {
+	f := &fetchSnapshotAndFailureQuorum{
+		failureCase: rpcpb.FailureCase_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH,
+		injected:    make(map[int]struct{}),
+		snapshotted: -1,
+	}
+	return &failureDelay{
+		Failure:       f,
+		delayDuration: clus.GetFailureDelayDuration(),
+	}
+}

+ 0 - 0
functional/tester/failure_case_sigterm.go → functional/tester/failure_sigterm.go