cluster.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. // Copyright 2018 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package tester
  15. import (
  16. "context"
  17. "errors"
  18. "fmt"
  19. "io/ioutil"
  20. "math/rand"
  21. "net/http"
  22. "path/filepath"
  23. "strings"
  24. "time"
  25. "github.com/coreos/etcd/pkg/debugutil"
  26. "github.com/coreos/etcd/tools/functional-tester/rpcpb"
  27. "github.com/prometheus/client_golang/prometheus/promhttp"
  28. "go.uber.org/zap"
  29. "golang.org/x/time/rate"
  30. "google.golang.org/grpc"
  31. yaml "gopkg.in/yaml.v2"
  32. )
  33. // Cluster defines tester cluster.
  34. type Cluster struct {
  35. lg *zap.Logger
  36. agentConns []*grpc.ClientConn
  37. agentClients []rpcpb.TransportClient
  38. agentStreams []rpcpb.Transport_TransportClient
  39. agentRequests []*rpcpb.Request
  40. testerHTTPServer *http.Server
  41. Members []*rpcpb.Member `yaml:"agent-configs"`
  42. Tester *rpcpb.Tester `yaml:"tester-config"`
  43. failures []Failure
  44. rateLimiter *rate.Limiter
  45. stresser Stresser
  46. checker Checker
  47. currentRevision int64
  48. rd int
  49. cs int
  50. }
  51. func newCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
  52. bts, err := ioutil.ReadFile(fpath)
  53. if err != nil {
  54. return nil, err
  55. }
  56. lg.Info("opened configuration file", zap.String("path", fpath))
  57. clus := &Cluster{lg: lg}
  58. if err = yaml.Unmarshal(bts, clus); err != nil {
  59. return nil, err
  60. }
  61. for i := range clus.Members {
  62. if clus.Members[i].BaseDir == "" {
  63. return nil, fmt.Errorf("Members[i].BaseDir cannot be empty (got %q)", clus.Members[i].BaseDir)
  64. }
  65. if clus.Members[i].EtcdLogPath == "" {
  66. return nil, fmt.Errorf("Members[i].EtcdLogPath cannot be empty (got %q)", clus.Members[i].EtcdLogPath)
  67. }
  68. if clus.Members[i].Etcd.Name == "" {
  69. return nil, fmt.Errorf("'--name' cannot be empty (got %+v)", clus.Members[i])
  70. }
  71. if clus.Members[i].Etcd.DataDir == "" {
  72. return nil, fmt.Errorf("'--data-dir' cannot be empty (got %+v)", clus.Members[i])
  73. }
  74. if clus.Members[i].Etcd.SnapshotCount == 0 {
  75. return nil, fmt.Errorf("'--snapshot-count' cannot be 0 (got %+v)", clus.Members[i].Etcd.SnapshotCount)
  76. }
  77. if clus.Members[i].Etcd.DataDir == "" {
  78. return nil, fmt.Errorf("'--data-dir' cannot be empty (got %q)", clus.Members[i].Etcd.DataDir)
  79. }
  80. if clus.Members[i].Etcd.WALDir == "" {
  81. clus.Members[i].Etcd.WALDir = filepath.Join(clus.Members[i].Etcd.DataDir, "member", "wal")
  82. }
  83. port := ""
  84. listenClientPorts := make([]string, len(clus.Members))
  85. for i, u := range clus.Members[i].Etcd.ListenClientURLs {
  86. if !isValidURL(u) {
  87. return nil, fmt.Errorf("'--listen-client-urls' has valid URL %q", u)
  88. }
  89. listenClientPorts[i], err = getPort(u)
  90. if err != nil {
  91. return nil, fmt.Errorf("'--listen-client-urls' has no port %q", u)
  92. }
  93. }
  94. for i, u := range clus.Members[i].Etcd.AdvertiseClientURLs {
  95. if !isValidURL(u) {
  96. return nil, fmt.Errorf("'--advertise-client-urls' has valid URL %q", u)
  97. }
  98. port, err = getPort(u)
  99. if err != nil {
  100. return nil, fmt.Errorf("'--advertise-client-urls' has no port %q", u)
  101. }
  102. if clus.Members[i].EtcdClientProxy && listenClientPorts[i] == port {
  103. return nil, fmt.Errorf("clus.Members[%d] requires client port proxy, but advertise port %q conflicts with listener port %q", i, port, listenClientPorts[i])
  104. }
  105. }
  106. listenPeerPorts := make([]string, len(clus.Members))
  107. for i, u := range clus.Members[i].Etcd.ListenPeerURLs {
  108. if !isValidURL(u) {
  109. return nil, fmt.Errorf("'--listen-peer-urls' has valid URL %q", u)
  110. }
  111. listenPeerPorts[i], err = getPort(u)
  112. if err != nil {
  113. return nil, fmt.Errorf("'--listen-peer-urls' has no port %q", u)
  114. }
  115. }
  116. for i, u := range clus.Members[i].Etcd.InitialAdvertisePeerURLs {
  117. if !isValidURL(u) {
  118. return nil, fmt.Errorf("'--initial-advertise-peer-urls' has valid URL %q", u)
  119. }
  120. port, err = getPort(u)
  121. if err != nil {
  122. return nil, fmt.Errorf("'--initial-advertise-peer-urls' has no port %q", u)
  123. }
  124. if clus.Members[i].EtcdPeerProxy && listenPeerPorts[i] == port {
  125. return nil, fmt.Errorf("clus.Members[%d] requires peer port proxy, but advertise port %q conflicts with listener port %q", i, port, listenPeerPorts[i])
  126. }
  127. }
  128. if !strings.HasPrefix(clus.Members[i].EtcdLogPath, clus.Members[i].BaseDir) {
  129. return nil, fmt.Errorf("EtcdLogPath must be prefixed with BaseDir (got %q)", clus.Members[i].EtcdLogPath)
  130. }
  131. if !strings.HasPrefix(clus.Members[i].Etcd.DataDir, clus.Members[i].BaseDir) {
  132. return nil, fmt.Errorf("Etcd.DataDir must be prefixed with BaseDir (got %q)", clus.Members[i].Etcd.DataDir)
  133. }
  134. // TODO: support separate WALDir that can be handled via failure-archive
  135. if !strings.HasPrefix(clus.Members[i].Etcd.WALDir, clus.Members[i].BaseDir) {
  136. return nil, fmt.Errorf("Etcd.WALDir must be prefixed with BaseDir (got %q)", clus.Members[i].Etcd.WALDir)
  137. }
  138. if len(clus.Tester.FailureCases) == 0 {
  139. return nil, errors.New("FailureCases not found")
  140. }
  141. }
  142. for _, v := range clus.Tester.FailureCases {
  143. if _, ok := rpcpb.FailureCase_value[v]; !ok {
  144. return nil, fmt.Errorf("%q is not defined in 'rpcpb.FailureCase_value'", v)
  145. }
  146. }
  147. for _, v := range clus.Tester.StressTypes {
  148. if _, ok := rpcpb.StressType_value[v]; !ok {
  149. return nil, fmt.Errorf("StressType is unknown; got %q", v)
  150. }
  151. }
  152. if clus.Tester.StressKeySuffixRangeTxn > 100 {
  153. return nil, fmt.Errorf("StressKeySuffixRangeTxn maximum value is 100, got %v", clus.Tester.StressKeySuffixRangeTxn)
  154. }
  155. if clus.Tester.StressKeyTxnOps > 64 {
  156. return nil, fmt.Errorf("StressKeyTxnOps maximum value is 64, got %v", clus.Tester.StressKeyTxnOps)
  157. }
  158. return clus, err
  159. }
  160. // TODO: status handler
  161. var dialOpts = []grpc.DialOption{
  162. grpc.WithInsecure(),
  163. grpc.WithTimeout(5 * time.Second),
  164. grpc.WithBlock(),
  165. }
  166. // NewCluster creates a client from a tester configuration.
  167. func NewCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
  168. clus, err := newCluster(lg, fpath)
  169. if err != nil {
  170. return nil, err
  171. }
  172. clus.agentConns = make([]*grpc.ClientConn, len(clus.Members))
  173. clus.agentClients = make([]rpcpb.TransportClient, len(clus.Members))
  174. clus.agentStreams = make([]rpcpb.Transport_TransportClient, len(clus.Members))
  175. clus.agentRequests = make([]*rpcpb.Request, len(clus.Members))
  176. clus.failures = make([]Failure, 0)
  177. for i, ap := range clus.Members {
  178. var err error
  179. clus.agentConns[i], err = grpc.Dial(ap.AgentAddr, dialOpts...)
  180. if err != nil {
  181. return nil, err
  182. }
  183. clus.agentClients[i] = rpcpb.NewTransportClient(clus.agentConns[i])
  184. clus.lg.Info("connected", zap.String("agent-address", ap.AgentAddr))
  185. clus.agentStreams[i], err = clus.agentClients[i].Transport(context.Background())
  186. if err != nil {
  187. return nil, err
  188. }
  189. clus.lg.Info("created stream", zap.String("agent-address", ap.AgentAddr))
  190. }
  191. mux := http.NewServeMux()
  192. mux.Handle("/metrics", promhttp.Handler())
  193. if clus.Tester.EnablePprof {
  194. for p, h := range debugutil.PProfHandlers() {
  195. mux.Handle(p, h)
  196. }
  197. }
  198. clus.testerHTTPServer = &http.Server{
  199. Addr: clus.Tester.TesterAddr,
  200. Handler: mux,
  201. }
  202. go clus.serveTesterServer()
  203. clus.updateFailures()
  204. clus.rateLimiter = rate.NewLimiter(
  205. rate.Limit(int(clus.Tester.StressQPS)),
  206. int(clus.Tester.StressQPS),
  207. )
  208. clus.updateStresserChecker()
  209. return clus, nil
  210. }
  211. func (clus *Cluster) serveTesterServer() {
  212. clus.lg.Info(
  213. "started tester HTTP server",
  214. zap.String("tester-address", clus.Tester.TesterAddr),
  215. )
  216. err := clus.testerHTTPServer.ListenAndServe()
  217. clus.lg.Info(
  218. "tester HTTP server returned",
  219. zap.String("tester-address", clus.Tester.TesterAddr),
  220. zap.Error(err),
  221. )
  222. if err != nil && err != http.ErrServerClosed {
  223. clus.lg.Fatal("tester HTTP errored", zap.Error(err))
  224. }
  225. }
  226. func (clus *Cluster) updateFailures() {
  227. for _, cs := range clus.Tester.FailureCases {
  228. switch cs {
  229. case "KILL_ONE_FOLLOWER":
  230. clus.failures = append(clus.failures, newFailureKillOneFollower())
  231. case "KILL_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
  232. clus.failures = append(clus.failures, newFailureKillOneFollowerUntilTriggerSnapshot())
  233. case "KILL_LEADER":
  234. clus.failures = append(clus.failures, newFailureKillLeader())
  235. case "KILL_LEADER_UNTIL_TRIGGER_SNAPSHOT":
  236. clus.failures = append(clus.failures, newFailureKillLeaderUntilTriggerSnapshot())
  237. case "KILL_QUORUM":
  238. clus.failures = append(clus.failures, newFailureKillQuorum())
  239. case "KILL_ALL":
  240. clus.failures = append(clus.failures, newFailureKillAll())
  241. case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER":
  242. clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxOneFollower(clus))
  243. case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
  244. clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxOneFollowerUntilTriggerSnapshot())
  245. case "BLACKHOLE_PEER_PORT_TX_RX_LEADER":
  246. clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxLeader(clus))
  247. case "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
  248. clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxLeaderUntilTriggerSnapshot())
  249. case "BLACKHOLE_PEER_PORT_TX_RX_QUORUM":
  250. clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxQuorum(clus))
  251. case "BLACKHOLE_PEER_PORT_TX_RX_ALL":
  252. clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxAll(clus))
  253. case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
  254. clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxOneFollower(clus))
  255. case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
  256. clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxOneFollowerUntilTriggerSnapshot())
  257. case "DELAY_PEER_PORT_TX_RX_LEADER":
  258. clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxLeader(clus))
  259. case "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
  260. clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxLeaderUntilTriggerSnapshot())
  261. case "DELAY_PEER_PORT_TX_RX_QUORUM":
  262. clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxQuorum(clus))
  263. case "DELAY_PEER_PORT_TX_RX_ALL":
  264. clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxAll(clus))
  265. case "NO_FAIL_WITH_STRESS":
  266. clus.failures = append(clus.failures, newFailureNoFailWithStress(clus))
  267. case "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS":
  268. clus.failures = append(clus.failures, newFailureNoFailWithNoStressForLiveness(clus))
  269. case "EXTERNAL":
  270. clus.failures = append(clus.failures, newFailureExternal(clus.Tester.ExternalExecPath))
  271. case "FAILPOINTS":
  272. fpFailures, fperr := failpointFailures(clus)
  273. if len(fpFailures) == 0 {
  274. clus.lg.Info("no failpoints found!", zap.Error(fperr))
  275. }
  276. clus.failures = append(clus.failures, fpFailures...)
  277. }
  278. }
  279. }
  280. func (clus *Cluster) failureStrings() (fs []string) {
  281. fs = make([]string, len(clus.failures))
  282. for i := range clus.failures {
  283. fs[i] = clus.failures[i].Desc()
  284. }
  285. return fs
  286. }
  287. func (clus *Cluster) shuffleFailures() {
  288. rand.Seed(time.Now().UnixNano())
  289. offset := rand.Intn(1000)
  290. n := len(clus.failures)
  291. cp := coprime(n)
  292. fs := make([]Failure, n)
  293. for i := 0; i < n; i++ {
  294. fs[i] = clus.failures[(cp*i+offset)%n]
  295. }
  296. clus.failures = fs
  297. clus.lg.Info("shuffled test failure cases", zap.Int("total", n))
  298. }
  299. /*
  300. x and y of GCD 1 are coprime to each other
  301. x1 = ( coprime of n * idx1 + offset ) % n
  302. x2 = ( coprime of n * idx2 + offset ) % n
  303. (x2 - x1) = coprime of n * (idx2 - idx1) % n
  304. = (idx2 - idx1) = 1
  305. Consecutive x's are guaranteed to be distinct
  306. */
  307. func coprime(n int) int {
  308. coprime := 1
  309. for i := n / 2; i < n; i++ {
  310. if gcd(i, n) == 1 {
  311. coprime = i
  312. break
  313. }
  314. }
  315. return coprime
  316. }
  317. func gcd(x, y int) int {
  318. if y == 0 {
  319. return x
  320. }
  321. return gcd(y, x%y)
  322. }
  323. func (clus *Cluster) updateStresserChecker() {
  324. cs := &compositeStresser{}
  325. for _, m := range clus.Members {
  326. cs.stressers = append(cs.stressers, newStresser(clus, m))
  327. }
  328. clus.stresser = cs
  329. if clus.Tester.ConsistencyCheck {
  330. clus.checker = newHashChecker(clus.lg, hashAndRevGetter(clus))
  331. if schk := cs.Checker(); schk != nil {
  332. clus.checker = newCompositeChecker([]Checker{clus.checker, schk})
  333. }
  334. } else {
  335. clus.checker = newNoChecker()
  336. }
  337. clus.lg.Info(
  338. "updated stressers",
  339. zap.Int("round", clus.rd),
  340. zap.Int("case", clus.cs),
  341. )
  342. }
  343. func (clus *Cluster) checkConsistency() (err error) {
  344. defer func() {
  345. if err != nil {
  346. return
  347. }
  348. if err = clus.updateRevision(); err != nil {
  349. clus.lg.Warn(
  350. "updateRevision failed",
  351. zap.Error(err),
  352. )
  353. return
  354. }
  355. }()
  356. if err = clus.checker.Check(); err != nil {
  357. clus.lg.Warn(
  358. "consistency check FAIL",
  359. zap.Int("round", clus.rd),
  360. zap.Int("case", clus.cs),
  361. zap.Error(err),
  362. )
  363. return err
  364. }
  365. clus.lg.Info(
  366. "consistency check ALL PASS",
  367. zap.Int("round", clus.rd),
  368. zap.Int("case", clus.cs),
  369. zap.String("desc", clus.failures[clus.cs].Desc()),
  370. )
  371. return err
  372. }
  373. // Bootstrap bootstraps etcd cluster the very first time.
  374. // After this, just continue to call kill/restart.
  375. func (clus *Cluster) Bootstrap() error {
  376. // this is the only time that creates request from scratch
  377. return clus.broadcastOperation(rpcpb.Operation_InitialStartEtcd)
  378. }
  379. // FailArchive sends "FailArchive" operation.
  380. func (clus *Cluster) FailArchive() error {
  381. return clus.broadcastOperation(rpcpb.Operation_FailArchive)
  382. }
  383. // Restart sends "Restart" operation.
  384. func (clus *Cluster) Restart() error {
  385. return clus.broadcastOperation(rpcpb.Operation_RestartEtcd)
  386. }
  387. func (clus *Cluster) broadcastOperation(op rpcpb.Operation) error {
  388. for i := range clus.agentStreams {
  389. err := clus.sendOperation(i, op)
  390. if err != nil {
  391. if op == rpcpb.Operation_DestroyEtcdAgent &&
  392. strings.Contains(err.Error(), "rpc error: code = Unavailable desc = transport is closing") {
  393. // agent server has already closed;
  394. // so this error is expected
  395. clus.lg.Info(
  396. "successfully destroyed",
  397. zap.String("member", clus.Members[i].EtcdClientEndpoint),
  398. )
  399. continue
  400. }
  401. return err
  402. }
  403. }
  404. return nil
  405. }
  406. func (clus *Cluster) sendOperation(idx int, op rpcpb.Operation) error {
  407. if op == rpcpb.Operation_InitialStartEtcd {
  408. clus.agentRequests[idx] = &rpcpb.Request{
  409. Operation: op,
  410. Member: clus.Members[idx],
  411. Tester: clus.Tester,
  412. }
  413. } else {
  414. clus.agentRequests[idx].Operation = op
  415. }
  416. err := clus.agentStreams[idx].Send(clus.agentRequests[idx])
  417. clus.lg.Info(
  418. "sent request",
  419. zap.String("operation", op.String()),
  420. zap.String("to", clus.Members[idx].EtcdClientEndpoint),
  421. zap.Error(err),
  422. )
  423. if err != nil {
  424. return err
  425. }
  426. resp, err := clus.agentStreams[idx].Recv()
  427. if resp != nil {
  428. clus.lg.Info(
  429. "received response",
  430. zap.String("operation", op.String()),
  431. zap.String("from", clus.Members[idx].EtcdClientEndpoint),
  432. zap.Bool("success", resp.Success),
  433. zap.String("status", resp.Status),
  434. zap.Error(err),
  435. )
  436. } else {
  437. clus.lg.Info(
  438. "received empty response",
  439. zap.String("operation", op.String()),
  440. zap.String("from", clus.Members[idx].EtcdClientEndpoint),
  441. zap.Error(err),
  442. )
  443. }
  444. if err != nil {
  445. return err
  446. }
  447. if !resp.Success {
  448. err = errors.New(resp.Status)
  449. }
  450. return err
  451. }
  452. // DestroyEtcdAgents terminates all tester connections to agents and etcd servers.
  453. func (clus *Cluster) DestroyEtcdAgents() {
  454. err := clus.broadcastOperation(rpcpb.Operation_DestroyEtcdAgent)
  455. if err != nil {
  456. clus.lg.Warn("destroying etcd/agents FAIL", zap.Error(err))
  457. } else {
  458. clus.lg.Info("destroying etcd/agents PASS")
  459. }
  460. for i, conn := range clus.agentConns {
  461. err := conn.Close()
  462. clus.lg.Info("closed connection to agent", zap.String("agent-address", clus.Members[i].AgentAddr), zap.Error(err))
  463. }
  464. if clus.testerHTTPServer != nil {
  465. ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
  466. err := clus.testerHTTPServer.Shutdown(ctx)
  467. cancel()
  468. clus.lg.Info("closed tester HTTP server", zap.String("tester-address", clus.Tester.TesterAddr), zap.Error(err))
  469. }
  470. }
  471. // WaitHealth ensures all members are healthy
  472. // by writing a test key to etcd cluster.
  473. func (clus *Cluster) WaitHealth() error {
  474. var err error
  475. // wait 60s to check cluster health.
  476. // TODO: set it to a reasonable value. It is set that high because
  477. // follower may use long time to catch up the leader when reboot under
  478. // reasonable workload (https://github.com/coreos/etcd/issues/2698)
  479. for i := 0; i < 60; i++ {
  480. for _, m := range clus.Members {
  481. if err = m.WriteHealthKey(); err != nil {
  482. clus.lg.Warn(
  483. "health check FAIL",
  484. zap.Int("retries", i),
  485. zap.String("endpoint", m.EtcdClientEndpoint),
  486. zap.Error(err),
  487. )
  488. break
  489. }
  490. clus.lg.Info(
  491. "health check PASS",
  492. zap.Int("retries", i),
  493. zap.String("endpoint", m.EtcdClientEndpoint),
  494. )
  495. }
  496. if err == nil {
  497. clus.lg.Info(
  498. "health check ALL PASS",
  499. zap.Int("round", clus.rd),
  500. zap.Int("case", clus.cs),
  501. )
  502. return nil
  503. }
  504. time.Sleep(time.Second)
  505. }
  506. return err
  507. }
  508. // GetLeader returns the index of leader and error if any.
  509. func (clus *Cluster) GetLeader() (int, error) {
  510. for i, m := range clus.Members {
  511. isLeader, err := m.IsLeader()
  512. if isLeader || err != nil {
  513. return i, err
  514. }
  515. }
  516. return 0, fmt.Errorf("no leader found")
  517. }
  518. // maxRev returns the maximum revision found on the cluster.
  519. func (clus *Cluster) maxRev() (rev int64, err error) {
  520. ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
  521. defer cancel()
  522. revc, errc := make(chan int64, len(clus.Members)), make(chan error, len(clus.Members))
  523. for i := range clus.Members {
  524. go func(m *rpcpb.Member) {
  525. mrev, merr := m.Rev(ctx)
  526. revc <- mrev
  527. errc <- merr
  528. }(clus.Members[i])
  529. }
  530. for i := 0; i < len(clus.Members); i++ {
  531. if merr := <-errc; merr != nil {
  532. err = merr
  533. }
  534. if mrev := <-revc; mrev > rev {
  535. rev = mrev
  536. }
  537. }
  538. return rev, err
  539. }
  540. func (clus *Cluster) getRevisionHash() (map[string]int64, map[string]int64, error) {
  541. revs := make(map[string]int64)
  542. hashes := make(map[string]int64)
  543. for _, m := range clus.Members {
  544. rev, hash, err := m.RevHash()
  545. if err != nil {
  546. return nil, nil, err
  547. }
  548. revs[m.EtcdClientEndpoint] = rev
  549. hashes[m.EtcdClientEndpoint] = hash
  550. }
  551. return revs, hashes, nil
  552. }
  553. func (clus *Cluster) compactKV(rev int64, timeout time.Duration) (err error) {
  554. if rev <= 0 {
  555. return nil
  556. }
  557. for i, m := range clus.Members {
  558. clus.lg.Info(
  559. "compact START",
  560. zap.String("endpoint", m.EtcdClientEndpoint),
  561. zap.Int64("compact-revision", rev),
  562. zap.Duration("timeout", timeout),
  563. )
  564. now := time.Now()
  565. cerr := m.Compact(rev, timeout)
  566. succeed := true
  567. if cerr != nil {
  568. if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
  569. clus.lg.Info(
  570. "compact error is ignored",
  571. zap.String("endpoint", m.EtcdClientEndpoint),
  572. zap.Int64("compact-revision", rev),
  573. zap.Error(cerr),
  574. )
  575. } else {
  576. clus.lg.Warn(
  577. "compact FAIL",
  578. zap.String("endpoint", m.EtcdClientEndpoint),
  579. zap.Int64("compact-revision", rev),
  580. zap.Error(cerr),
  581. )
  582. err = cerr
  583. succeed = false
  584. }
  585. }
  586. if succeed {
  587. clus.lg.Info(
  588. "compact PASS",
  589. zap.String("endpoint", m.EtcdClientEndpoint),
  590. zap.Int64("compact-revision", rev),
  591. zap.Duration("timeout", timeout),
  592. zap.Duration("took", time.Since(now)),
  593. )
  594. }
  595. }
  596. return err
  597. }
  598. func (clus *Cluster) checkCompact(rev int64) error {
  599. if rev == 0 {
  600. return nil
  601. }
  602. for _, m := range clus.Members {
  603. if err := m.CheckCompact(rev); err != nil {
  604. return err
  605. }
  606. }
  607. return nil
  608. }
  609. func (clus *Cluster) defrag() error {
  610. for _, m := range clus.Members {
  611. if err := m.Defrag(); err != nil {
  612. clus.lg.Warn(
  613. "defrag FAIL",
  614. zap.String("endpoint", m.EtcdClientEndpoint),
  615. zap.Error(err),
  616. )
  617. return err
  618. }
  619. clus.lg.Info(
  620. "defrag PASS",
  621. zap.String("endpoint", m.EtcdClientEndpoint),
  622. )
  623. }
  624. clus.lg.Info(
  625. "defrag ALL PASS",
  626. zap.Int("round", clus.rd),
  627. zap.Int("case", clus.cs),
  628. )
  629. return nil
  630. }
  631. // GetFailureDelayDuration computes failure delay duration.
  632. func (clus *Cluster) GetFailureDelayDuration() time.Duration {
  633. return time.Duration(clus.Tester.FailureDelayMs) * time.Millisecond
  634. }
  635. // Report reports the number of modified keys.
  636. func (clus *Cluster) Report() int64 {
  637. return clus.stresser.ModifiedKeys()
  638. }