|
|
@@ -105,12 +105,22 @@ func (s *stresser) Stress() error {
|
|
|
// as well. We want to keep stressing until the cluster elects a
|
|
|
// new leader and start processing requests again.
|
|
|
shouldContinue = true
|
|
|
+
|
|
|
+ case etcdserver.ErrTimeoutDueToLeaderFail.Error():
|
|
|
+ // This retries when request is triggered at the same time as
|
|
|
+ // leader failure and follower nodes receive time out errors
|
|
|
+ // from losing their leader. Followers should retry to connect
|
|
|
+ // to the new leader.
|
|
|
+ shouldContinue = true
|
|
|
+
|
|
|
case etcdserver.ErrStopped.Error():
|
|
|
// one of the etcd nodes stopped from failure injection
|
|
|
shouldContinue = true
|
|
|
+
|
|
|
case transport.ErrConnClosing.Desc:
|
|
|
// server closed the transport (failure injected node)
|
|
|
shouldContinue = true
|
|
|
+
|
|
|
case rpctypes.ErrNotCapable.Error():
|
|
|
// capability check has not been done (in the beginning)
|
|
|
shouldContinue = true
|