cluster.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. // Copyright 2018 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package tester
  15. import (
  16. "context"
  17. "errors"
  18. "fmt"
  19. "io"
  20. "io/ioutil"
  21. "math/rand"
  22. "net/http"
  23. "net/url"
  24. "path/filepath"
  25. "strings"
  26. "sync"
  27. "time"
  28. "github.com/coreos/etcd/functional/rpcpb"
  29. "github.com/coreos/etcd/pkg/debugutil"
  30. "github.com/coreos/etcd/pkg/fileutil"
  31. "github.com/prometheus/client_golang/prometheus/promhttp"
  32. "go.uber.org/zap"
  33. "golang.org/x/time/rate"
  34. "google.golang.org/grpc"
  35. )
  36. // Cluster defines tester cluster.
  37. type Cluster struct {
  38. lg *zap.Logger
  39. agentConns []*grpc.ClientConn
  40. agentClients []rpcpb.TransportClient
  41. agentStreams []rpcpb.Transport_TransportClient
  42. agentRequests []*rpcpb.Request
  43. testerHTTPServer *http.Server
  44. Members []*rpcpb.Member `yaml:"agent-configs"`
  45. Tester *rpcpb.Tester `yaml:"tester-config"`
  46. cases []Case
  47. rateLimiter *rate.Limiter
  48. stresser Stresser
  49. checkers []Checker
  50. currentRevision int64
  51. rd int
  52. cs int
  53. }
  54. var dialOpts = []grpc.DialOption{
  55. grpc.WithInsecure(),
  56. grpc.WithTimeout(5 * time.Second),
  57. grpc.WithBlock(),
  58. }
  59. // NewCluster creates a client from a tester configuration.
  60. func NewCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
  61. clus, err := read(lg, fpath)
  62. if err != nil {
  63. return nil, err
  64. }
  65. clus.agentConns = make([]*grpc.ClientConn, len(clus.Members))
  66. clus.agentClients = make([]rpcpb.TransportClient, len(clus.Members))
  67. clus.agentStreams = make([]rpcpb.Transport_TransportClient, len(clus.Members))
  68. clus.agentRequests = make([]*rpcpb.Request, len(clus.Members))
  69. clus.cases = make([]Case, 0)
  70. for i, ap := range clus.Members {
  71. var err error
  72. clus.agentConns[i], err = grpc.Dial(ap.AgentAddr, dialOpts...)
  73. if err != nil {
  74. return nil, err
  75. }
  76. clus.agentClients[i] = rpcpb.NewTransportClient(clus.agentConns[i])
  77. clus.lg.Info("connected", zap.String("agent-address", ap.AgentAddr))
  78. clus.agentStreams[i], err = clus.agentClients[i].Transport(context.Background())
  79. if err != nil {
  80. return nil, err
  81. }
  82. clus.lg.Info("created stream", zap.String("agent-address", ap.AgentAddr))
  83. }
  84. mux := http.NewServeMux()
  85. mux.Handle("/metrics", promhttp.Handler())
  86. if clus.Tester.EnablePprof {
  87. for p, h := range debugutil.PProfHandlers() {
  88. mux.Handle(p, h)
  89. }
  90. }
  91. clus.testerHTTPServer = &http.Server{
  92. Addr: clus.Tester.Addr,
  93. Handler: mux,
  94. }
  95. go clus.serveTesterServer()
  96. clus.updateCases()
  97. clus.rateLimiter = rate.NewLimiter(
  98. rate.Limit(int(clus.Tester.StressQPS)),
  99. int(clus.Tester.StressQPS),
  100. )
  101. clus.setStresserChecker()
  102. return clus, nil
  103. }
  104. func (clus *Cluster) serveTesterServer() {
  105. clus.lg.Info(
  106. "started tester HTTP server",
  107. zap.String("tester-address", clus.Tester.Addr),
  108. )
  109. err := clus.testerHTTPServer.ListenAndServe()
  110. clus.lg.Info(
  111. "tester HTTP server returned",
  112. zap.String("tester-address", clus.Tester.Addr),
  113. zap.Error(err),
  114. )
  115. if err != nil && err != http.ErrServerClosed {
  116. clus.lg.Fatal("tester HTTP errored", zap.Error(err))
  117. }
  118. }
  119. func (clus *Cluster) updateCases() {
  120. for _, cs := range clus.Tester.Cases {
  121. switch cs {
  122. case "SIGTERM_ONE_FOLLOWER":
  123. clus.cases = append(clus.cases,
  124. new_Case_SIGTERM_ONE_FOLLOWER(clus))
  125. case "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
  126. clus.cases = append(clus.cases,
  127. new_Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus))
  128. case "SIGTERM_LEADER":
  129. clus.cases = append(clus.cases,
  130. new_Case_SIGTERM_LEADER(clus))
  131. case "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT":
  132. clus.cases = append(clus.cases,
  133. new_Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus))
  134. case "SIGTERM_QUORUM":
  135. clus.cases = append(clus.cases,
  136. new_Case_SIGTERM_QUORUM(clus))
  137. case "SIGTERM_ALL":
  138. clus.cases = append(clus.cases,
  139. new_Case_SIGTERM_ALL(clus))
  140. case "SIGQUIT_AND_REMOVE_ONE_FOLLOWER":
  141. clus.cases = append(clus.cases,
  142. new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus))
  143. case "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
  144. clus.cases = append(clus.cases,
  145. new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus))
  146. case "SIGQUIT_AND_REMOVE_LEADER":
  147. clus.cases = append(clus.cases,
  148. new_Case_SIGQUIT_AND_REMOVE_LEADER(clus))
  149. case "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT":
  150. clus.cases = append(clus.cases,
  151. new_Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus))
  152. case "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH":
  153. clus.cases = append(clus.cases,
  154. new_Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus))
  155. case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER":
  156. clus.cases = append(clus.cases,
  157. new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER(clus))
  158. case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
  159. clus.cases = append(clus.cases,
  160. new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT())
  161. case "BLACKHOLE_PEER_PORT_TX_RX_LEADER":
  162. clus.cases = append(clus.cases,
  163. new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER(clus))
  164. case "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
  165. clus.cases = append(clus.cases,
  166. new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT())
  167. case "BLACKHOLE_PEER_PORT_TX_RX_QUORUM":
  168. clus.cases = append(clus.cases,
  169. new_Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM(clus))
  170. case "BLACKHOLE_PEER_PORT_TX_RX_ALL":
  171. clus.cases = append(clus.cases,
  172. new_Case_BLACKHOLE_PEER_PORT_TX_RX_ALL(clus))
  173. case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
  174. clus.cases = append(clus.cases,
  175. new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus, false))
  176. case "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
  177. clus.cases = append(clus.cases,
  178. new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus, true))
  179. case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
  180. clus.cases = append(clus.cases,
  181. new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus, false))
  182. case "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
  183. clus.cases = append(clus.cases,
  184. new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus, true))
  185. case "DELAY_PEER_PORT_TX_RX_LEADER":
  186. clus.cases = append(clus.cases,
  187. new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus, false))
  188. case "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER":
  189. clus.cases = append(clus.cases,
  190. new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus, true))
  191. case "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
  192. clus.cases = append(clus.cases,
  193. new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus, false))
  194. case "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
  195. clus.cases = append(clus.cases,
  196. new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus, true))
  197. case "DELAY_PEER_PORT_TX_RX_QUORUM":
  198. clus.cases = append(clus.cases,
  199. new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus, false))
  200. case "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM":
  201. clus.cases = append(clus.cases,
  202. new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus, true))
  203. case "DELAY_PEER_PORT_TX_RX_ALL":
  204. clus.cases = append(clus.cases,
  205. new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus, false))
  206. case "RANDOM_DELAY_PEER_PORT_TX_RX_ALL":
  207. clus.cases = append(clus.cases,
  208. new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus, true))
  209. case "NO_FAIL_WITH_STRESS":
  210. clus.cases = append(clus.cases,
  211. new_Case_NO_FAIL_WITH_STRESS(clus))
  212. case "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS":
  213. clus.cases = append(clus.cases,
  214. new_Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS(clus))
  215. case "EXTERNAL":
  216. clus.cases = append(clus.cases,
  217. new_Case_EXTERNAL(clus.Tester.ExternalExecPath))
  218. case "FAILPOINTS":
  219. fpFailures, fperr := failpointFailures(clus)
  220. if len(fpFailures) == 0 {
  221. clus.lg.Info("no failpoints found!", zap.Error(fperr))
  222. }
  223. clus.cases = append(clus.cases,
  224. fpFailures...)
  225. }
  226. }
  227. }
  228. func (clus *Cluster) failureStrings() (fs []string) {
  229. fs = make([]string, len(clus.cases))
  230. for i := range clus.cases {
  231. fs[i] = clus.cases[i].Desc()
  232. }
  233. return fs
  234. }
  235. // UpdateDelayLatencyMs updates delay latency with random value
  236. // within election timeout.
  237. func (clus *Cluster) UpdateDelayLatencyMs() {
  238. rand.Seed(time.Now().UnixNano())
  239. clus.Tester.UpdatedDelayLatencyMs = uint32(rand.Int63n(clus.Members[0].Etcd.ElectionTimeoutMs))
  240. minLatRv := clus.Tester.DelayLatencyMsRv + clus.Tester.DelayLatencyMsRv/5
  241. if clus.Tester.UpdatedDelayLatencyMs <= minLatRv {
  242. clus.Tester.UpdatedDelayLatencyMs += minLatRv
  243. }
  244. }
  245. func (clus *Cluster) setStresserChecker() {
  246. css := &compositeStresser{}
  247. lss := []*leaseStresser{}
  248. rss := []*runnerStresser{}
  249. for _, m := range clus.Members {
  250. sss := newStresser(clus, m)
  251. css.stressers = append(css.stressers, &compositeStresser{sss})
  252. for _, s := range sss {
  253. if v, ok := s.(*leaseStresser); ok {
  254. lss = append(lss, v)
  255. clus.lg.Info("added lease stresser", zap.String("endpoint", m.EtcdClientEndpoint))
  256. }
  257. if v, ok := s.(*runnerStresser); ok {
  258. rss = append(rss, v)
  259. clus.lg.Info("added lease stresser", zap.String("endpoint", m.EtcdClientEndpoint))
  260. }
  261. }
  262. }
  263. clus.stresser = css
  264. for _, cs := range clus.Tester.Checkers {
  265. switch cs {
  266. case "KV_HASH":
  267. clus.checkers = append(clus.checkers, newKVHashChecker(clus.lg, hashRevGetter(clus)))
  268. case "LEASE_EXPIRE":
  269. for _, ls := range lss {
  270. clus.checkers = append(clus.checkers, newLeaseExpireChecker(ls))
  271. }
  272. case "RUNNER":
  273. for _, rs := range rss {
  274. clus.checkers = append(clus.checkers, newRunnerChecker(rs.errc))
  275. }
  276. case "NO_CHECK":
  277. clus.checkers = append(clus.checkers, newNoChecker())
  278. }
  279. }
  280. clus.lg.Info("updated stressers")
  281. }
  282. func (clus *Cluster) runCheckers() (err error) {
  283. defer func() {
  284. if err != nil {
  285. return
  286. }
  287. if err = clus.updateRevision(); err != nil {
  288. clus.lg.Warn(
  289. "updateRevision failed",
  290. zap.Error(err),
  291. )
  292. return
  293. }
  294. }()
  295. for _, chk := range clus.checkers {
  296. if err = chk.Check(); err != nil {
  297. clus.lg.Warn(
  298. "consistency check FAIL",
  299. zap.String("checker", chk.Type().String()),
  300. zap.Int("round", clus.rd),
  301. zap.Int("case", clus.cs),
  302. zap.Error(err),
  303. )
  304. return err
  305. }
  306. }
  307. clus.lg.Info(
  308. "consistency check ALL PASS",
  309. zap.Int("round", clus.rd),
  310. zap.Int("case", clus.cs),
  311. zap.String("desc", clus.cases[clus.cs].Desc()),
  312. )
  313. return err
  314. }
  315. // Send_INITIAL_START_ETCD bootstraps etcd cluster the very first time.
  316. // After this, just continue to call kill/restart.
  317. func (clus *Cluster) Send_INITIAL_START_ETCD() error {
  318. // this is the only time that creates request from scratch
  319. return clus.broadcast(rpcpb.Operation_INITIAL_START_ETCD)
  320. }
  321. // send_SIGQUIT_ETCD_AND_ARCHIVE_DATA sends "send_SIGQUIT_ETCD_AND_ARCHIVE_DATA" operation.
  322. func (clus *Cluster) send_SIGQUIT_ETCD_AND_ARCHIVE_DATA() error {
  323. return clus.broadcast(rpcpb.Operation_SIGQUIT_ETCD_AND_ARCHIVE_DATA)
  324. }
  325. // send_RESTART_ETCD sends restart operation.
  326. func (clus *Cluster) send_RESTART_ETCD() error {
  327. return clus.broadcast(rpcpb.Operation_RESTART_ETCD)
  328. }
  329. func (clus *Cluster) broadcast(op rpcpb.Operation) error {
  330. var wg sync.WaitGroup
  331. wg.Add(len(clus.agentStreams))
  332. errc := make(chan error, len(clus.agentStreams))
  333. for i := range clus.agentStreams {
  334. go func(idx int, o rpcpb.Operation) {
  335. defer wg.Done()
  336. errc <- clus.sendOp(idx, o)
  337. }(i, op)
  338. }
  339. wg.Wait()
  340. close(errc)
  341. errs := []string{}
  342. for err := range errc {
  343. if err == nil {
  344. continue
  345. }
  346. if err != nil {
  347. destroyed := false
  348. if op == rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT {
  349. if err == io.EOF {
  350. destroyed = true
  351. }
  352. if strings.Contains(err.Error(),
  353. "rpc error: code = Unavailable desc = transport is closing") {
  354. // agent server has already closed;
  355. // so this error is expected
  356. destroyed = true
  357. }
  358. if strings.Contains(err.Error(),
  359. "desc = os: process already finished") {
  360. destroyed = true
  361. }
  362. }
  363. if !destroyed {
  364. errs = append(errs, err.Error())
  365. }
  366. }
  367. }
  368. if len(errs) == 0 {
  369. return nil
  370. }
  371. return errors.New(strings.Join(errs, ", "))
  372. }
  373. func (clus *Cluster) sendOp(idx int, op rpcpb.Operation) error {
  374. _, err := clus.sendOpWithResp(idx, op)
  375. return err
  376. }
  377. func (clus *Cluster) sendOpWithResp(idx int, op rpcpb.Operation) (*rpcpb.Response, error) {
  378. // maintain the initial member object
  379. // throughout the test time
  380. clus.agentRequests[idx] = &rpcpb.Request{
  381. Operation: op,
  382. Member: clus.Members[idx],
  383. Tester: clus.Tester,
  384. }
  385. err := clus.agentStreams[idx].Send(clus.agentRequests[idx])
  386. clus.lg.Info(
  387. "sent request",
  388. zap.String("operation", op.String()),
  389. zap.String("to", clus.Members[idx].EtcdClientEndpoint),
  390. zap.Error(err),
  391. )
  392. if err != nil {
  393. return nil, err
  394. }
  395. resp, err := clus.agentStreams[idx].Recv()
  396. if resp != nil {
  397. clus.lg.Info(
  398. "received response",
  399. zap.String("operation", op.String()),
  400. zap.String("from", clus.Members[idx].EtcdClientEndpoint),
  401. zap.Bool("success", resp.Success),
  402. zap.String("status", resp.Status),
  403. zap.Error(err),
  404. )
  405. } else {
  406. clus.lg.Info(
  407. "received empty response",
  408. zap.String("operation", op.String()),
  409. zap.String("from", clus.Members[idx].EtcdClientEndpoint),
  410. zap.Error(err),
  411. )
  412. }
  413. if err != nil {
  414. return nil, err
  415. }
  416. if !resp.Success {
  417. return nil, errors.New(resp.Status)
  418. }
  419. m, secure := clus.Members[idx], false
  420. for _, cu := range m.Etcd.AdvertiseClientURLs {
  421. u, err := url.Parse(cu)
  422. if err != nil {
  423. return nil, err
  424. }
  425. if u.Scheme == "https" { // TODO: handle unix
  426. secure = true
  427. }
  428. }
  429. // store TLS assets from agents/servers onto disk
  430. if secure && (op == rpcpb.Operation_INITIAL_START_ETCD || op == rpcpb.Operation_RESTART_ETCD) {
  431. dirClient := filepath.Join(
  432. clus.Tester.DataDir,
  433. clus.Members[idx].Etcd.Name,
  434. "fixtures",
  435. "client",
  436. )
  437. if err = fileutil.TouchDirAll(dirClient); err != nil {
  438. return nil, err
  439. }
  440. clientCertData := []byte(resp.Member.ClientCertData)
  441. if len(clientCertData) == 0 {
  442. return nil, fmt.Errorf("got empty client cert from %q", m.EtcdClientEndpoint)
  443. }
  444. clientCertPath := filepath.Join(dirClient, "cert.pem")
  445. if err = ioutil.WriteFile(clientCertPath, clientCertData, 0644); err != nil { // overwrite if exists
  446. return nil, err
  447. }
  448. resp.Member.ClientCertPath = clientCertPath
  449. clus.lg.Info(
  450. "saved client cert file",
  451. zap.String("path", clientCertPath),
  452. )
  453. clientKeyData := []byte(resp.Member.ClientKeyData)
  454. if len(clientKeyData) == 0 {
  455. return nil, fmt.Errorf("got empty client key from %q", m.EtcdClientEndpoint)
  456. }
  457. clientKeyPath := filepath.Join(dirClient, "key.pem")
  458. if err = ioutil.WriteFile(clientKeyPath, clientKeyData, 0644); err != nil { // overwrite if exists
  459. return nil, err
  460. }
  461. resp.Member.ClientKeyPath = clientKeyPath
  462. clus.lg.Info(
  463. "saved client key file",
  464. zap.String("path", clientKeyPath),
  465. )
  466. clientTrustedCAData := []byte(resp.Member.ClientTrustedCAData)
  467. if len(clientTrustedCAData) != 0 {
  468. // TODO: disable this when auto TLS is deprecated
  469. clientTrustedCAPath := filepath.Join(dirClient, "ca.pem")
  470. if err = ioutil.WriteFile(clientTrustedCAPath, clientTrustedCAData, 0644); err != nil { // overwrite if exists
  471. return nil, err
  472. }
  473. resp.Member.ClientTrustedCAPath = clientTrustedCAPath
  474. clus.lg.Info(
  475. "saved client trusted CA file",
  476. zap.String("path", clientTrustedCAPath),
  477. )
  478. }
  479. // no need to store peer certs for tester clients
  480. clus.Members[idx] = resp.Member
  481. }
  482. return resp, nil
  483. }
  484. // Send_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT terminates all tester connections to agents and etcd servers.
  485. func (clus *Cluster) Send_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT() {
  486. err := clus.broadcast(rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT)
  487. if err != nil {
  488. clus.lg.Warn("destroying etcd/agents FAIL", zap.Error(err))
  489. } else {
  490. clus.lg.Info("destroying etcd/agents PASS")
  491. }
  492. for i, conn := range clus.agentConns {
  493. err := conn.Close()
  494. clus.lg.Info("closed connection to agent", zap.String("agent-address", clus.Members[i].AgentAddr), zap.Error(err))
  495. }
  496. if clus.testerHTTPServer != nil {
  497. ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
  498. err := clus.testerHTTPServer.Shutdown(ctx)
  499. cancel()
  500. clus.lg.Info("closed tester HTTP server", zap.String("tester-address", clus.Tester.Addr), zap.Error(err))
  501. }
  502. }
  503. // WaitHealth ensures all members are healthy
  504. // by writing a test key to etcd cluster.
  505. func (clus *Cluster) WaitHealth() error {
  506. var err error
  507. // wait 60s to check cluster health.
  508. // TODO: set it to a reasonable value. It is set that high because
  509. // follower may use long time to catch up the leader when reboot under
  510. // reasonable workload (https://github.com/coreos/etcd/issues/2698)
  511. for i := 0; i < 60; i++ {
  512. for _, m := range clus.Members {
  513. if err = m.WriteHealthKey(); err != nil {
  514. clus.lg.Warn(
  515. "health check FAIL",
  516. zap.Int("retries", i),
  517. zap.String("endpoint", m.EtcdClientEndpoint),
  518. zap.Error(err),
  519. )
  520. break
  521. }
  522. clus.lg.Info(
  523. "health check PASS",
  524. zap.Int("retries", i),
  525. zap.String("endpoint", m.EtcdClientEndpoint),
  526. )
  527. }
  528. if err == nil {
  529. clus.lg.Info("health check ALL PASS")
  530. return nil
  531. }
  532. time.Sleep(time.Second)
  533. }
  534. return err
  535. }
  536. // GetLeader returns the index of leader and error if any.
  537. func (clus *Cluster) GetLeader() (int, error) {
  538. for i, m := range clus.Members {
  539. isLeader, err := m.IsLeader()
  540. if isLeader || err != nil {
  541. return i, err
  542. }
  543. }
  544. return 0, fmt.Errorf("no leader found")
  545. }
  546. // maxRev returns the maximum revision found on the cluster.
  547. func (clus *Cluster) maxRev() (rev int64, err error) {
  548. ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
  549. defer cancel()
  550. revc, errc := make(chan int64, len(clus.Members)), make(chan error, len(clus.Members))
  551. for i := range clus.Members {
  552. go func(m *rpcpb.Member) {
  553. mrev, merr := m.Rev(ctx)
  554. revc <- mrev
  555. errc <- merr
  556. }(clus.Members[i])
  557. }
  558. for i := 0; i < len(clus.Members); i++ {
  559. if merr := <-errc; merr != nil {
  560. err = merr
  561. }
  562. if mrev := <-revc; mrev > rev {
  563. rev = mrev
  564. }
  565. }
  566. return rev, err
  567. }
  568. func (clus *Cluster) getRevisionHash() (map[string]int64, map[string]int64, error) {
  569. revs := make(map[string]int64)
  570. hashes := make(map[string]int64)
  571. for _, m := range clus.Members {
  572. rev, hash, err := m.RevHash()
  573. if err != nil {
  574. return nil, nil, err
  575. }
  576. revs[m.EtcdClientEndpoint] = rev
  577. hashes[m.EtcdClientEndpoint] = hash
  578. }
  579. return revs, hashes, nil
  580. }
  581. func (clus *Cluster) compactKV(rev int64, timeout time.Duration) (err error) {
  582. if rev <= 0 {
  583. return nil
  584. }
  585. for i, m := range clus.Members {
  586. clus.lg.Info(
  587. "compact START",
  588. zap.String("endpoint", m.EtcdClientEndpoint),
  589. zap.Int64("compact-revision", rev),
  590. zap.Duration("timeout", timeout),
  591. )
  592. now := time.Now()
  593. cerr := m.Compact(rev, timeout)
  594. succeed := true
  595. if cerr != nil {
  596. if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
  597. clus.lg.Info(
  598. "compact error is ignored",
  599. zap.String("endpoint", m.EtcdClientEndpoint),
  600. zap.Int64("compact-revision", rev),
  601. zap.Error(cerr),
  602. )
  603. } else {
  604. clus.lg.Warn(
  605. "compact FAIL",
  606. zap.String("endpoint", m.EtcdClientEndpoint),
  607. zap.Int64("compact-revision", rev),
  608. zap.Error(cerr),
  609. )
  610. err = cerr
  611. succeed = false
  612. }
  613. }
  614. if succeed {
  615. clus.lg.Info(
  616. "compact PASS",
  617. zap.String("endpoint", m.EtcdClientEndpoint),
  618. zap.Int64("compact-revision", rev),
  619. zap.Duration("timeout", timeout),
  620. zap.Duration("took", time.Since(now)),
  621. )
  622. }
  623. }
  624. return err
  625. }
  626. func (clus *Cluster) checkCompact(rev int64) error {
  627. if rev == 0 {
  628. return nil
  629. }
  630. for _, m := range clus.Members {
  631. if err := m.CheckCompact(rev); err != nil {
  632. return err
  633. }
  634. }
  635. return nil
  636. }
  637. func (clus *Cluster) defrag() error {
  638. for _, m := range clus.Members {
  639. if err := m.Defrag(); err != nil {
  640. clus.lg.Warn(
  641. "defrag FAIL",
  642. zap.String("endpoint", m.EtcdClientEndpoint),
  643. zap.Error(err),
  644. )
  645. return err
  646. }
  647. clus.lg.Info(
  648. "defrag PASS",
  649. zap.String("endpoint", m.EtcdClientEndpoint),
  650. )
  651. }
  652. clus.lg.Info(
  653. "defrag ALL PASS",
  654. zap.Int("round", clus.rd),
  655. zap.Int("case", clus.cs),
  656. zap.Int("case-total", len(clus.cases)),
  657. )
  658. return nil
  659. }
  660. // GetCaseDelayDuration computes failure delay duration.
  661. func (clus *Cluster) GetCaseDelayDuration() time.Duration {
  662. return time.Duration(clus.Tester.CaseDelayMs) * time.Millisecond
  663. }
  664. // Report reports the number of modified keys.
  665. func (clus *Cluster) Report() int64 {
  666. return clus.stresser.ModifiedKeys()
  667. }