cluster.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. // Copyright 2018 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package tester
  15. import (
  16. "context"
  17. "errors"
  18. "fmt"
  19. "io/ioutil"
  20. "net/http"
  21. "path/filepath"
  22. "strings"
  23. "time"
  24. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  25. "github.com/coreos/etcd/pkg/debugutil"
  26. "github.com/coreos/etcd/tools/functional-tester/rpcpb"
  27. "golang.org/x/time/rate"
  28. "github.com/prometheus/client_golang/prometheus/promhttp"
  29. "go.uber.org/zap"
  30. "google.golang.org/grpc"
  31. yaml "gopkg.in/yaml.v2"
  32. )
  33. // Cluster defines tester cluster.
  34. type Cluster struct {
  35. logger *zap.Logger
  36. agentConns []*grpc.ClientConn
  37. agentClients []rpcpb.TransportClient
  38. agentStreams []rpcpb.Transport_TransportClient
  39. agentRequests []*rpcpb.Request
  40. testerHTTPServer *http.Server
  41. Members []*rpcpb.Member `yaml:"agent-configs"`
  42. Tester *rpcpb.Tester `yaml:"tester-config"`
  43. failures []Failure
  44. rateLimiter *rate.Limiter
  45. stresser Stresser
  46. checker Checker
  47. currentRevision int64
  48. rd int
  49. cs int
  50. }
  51. func newCluster(logger *zap.Logger, fpath string) (*Cluster, error) {
  52. logger.Info("reading configuration file", zap.String("path", fpath))
  53. bts, err := ioutil.ReadFile(fpath)
  54. if err != nil {
  55. return nil, err
  56. }
  57. logger.Info("opened configuration file", zap.String("path", fpath))
  58. clus := &Cluster{logger: logger}
  59. if err = yaml.Unmarshal(bts, clus); err != nil {
  60. return nil, err
  61. }
  62. for i := range clus.Members {
  63. if clus.Members[i].BaseDir == "" {
  64. return nil, fmt.Errorf("Members[i].BaseDir cannot be empty (got %q)", clus.Members[i].BaseDir)
  65. }
  66. if clus.Members[i].EtcdLogPath == "" {
  67. return nil, fmt.Errorf("Members[i].EtcdLogPath cannot be empty (got %q)", clus.Members[i].EtcdLogPath)
  68. }
  69. if clus.Members[i].Etcd.Name == "" {
  70. return nil, fmt.Errorf("'--name' cannot be empty (got %+v)", clus.Members[i])
  71. }
  72. if clus.Members[i].Etcd.DataDir == "" {
  73. return nil, fmt.Errorf("'--data-dir' cannot be empty (got %+v)", clus.Members[i])
  74. }
  75. if clus.Members[i].Etcd.SnapshotCount == 0 {
  76. return nil, fmt.Errorf("'--snapshot-count' cannot be 0 (got %+v)", clus.Members[i].Etcd.SnapshotCount)
  77. }
  78. if clus.Members[i].Etcd.DataDir == "" {
  79. return nil, fmt.Errorf("'--data-dir' cannot be empty (got %q)", clus.Members[i].Etcd.DataDir)
  80. }
  81. if clus.Members[i].Etcd.WALDir == "" {
  82. clus.Members[i].Etcd.WALDir = filepath.Join(clus.Members[i].Etcd.DataDir, "member", "wal")
  83. }
  84. port := ""
  85. listenClientPorts := make([]string, len(clus.Members))
  86. for i, u := range clus.Members[i].Etcd.ListenClientURLs {
  87. if !isValidURL(u) {
  88. return nil, fmt.Errorf("'--listen-client-urls' has valid URL %q", u)
  89. }
  90. listenClientPorts[i], err = getPort(u)
  91. if err != nil {
  92. return nil, fmt.Errorf("'--listen-client-urls' has no port %q", u)
  93. }
  94. }
  95. for i, u := range clus.Members[i].Etcd.AdvertiseClientURLs {
  96. if !isValidURL(u) {
  97. return nil, fmt.Errorf("'--advertise-client-urls' has valid URL %q", u)
  98. }
  99. port, err = getPort(u)
  100. if err != nil {
  101. return nil, fmt.Errorf("'--advertise-client-urls' has no port %q", u)
  102. }
  103. if clus.Members[i].EtcdClientProxy && listenClientPorts[i] == port {
  104. return nil, fmt.Errorf("clus.Members[%d] requires client port proxy, but advertise port %q conflicts with listener port %q", i, port, listenClientPorts[i])
  105. }
  106. }
  107. listenPeerPorts := make([]string, len(clus.Members))
  108. for i, u := range clus.Members[i].Etcd.ListenPeerURLs {
  109. if !isValidURL(u) {
  110. return nil, fmt.Errorf("'--listen-peer-urls' has valid URL %q", u)
  111. }
  112. listenPeerPorts[i], err = getPort(u)
  113. if err != nil {
  114. return nil, fmt.Errorf("'--listen-peer-urls' has no port %q", u)
  115. }
  116. }
  117. for i, u := range clus.Members[i].Etcd.InitialAdvertisePeerURLs {
  118. if !isValidURL(u) {
  119. return nil, fmt.Errorf("'--initial-advertise-peer-urls' has valid URL %q", u)
  120. }
  121. port, err = getPort(u)
  122. if err != nil {
  123. return nil, fmt.Errorf("'--initial-advertise-peer-urls' has no port %q", u)
  124. }
  125. if clus.Members[i].EtcdPeerProxy && listenPeerPorts[i] == port {
  126. return nil, fmt.Errorf("clus.Members[%d] requires peer port proxy, but advertise port %q conflicts with listener port %q", i, port, listenPeerPorts[i])
  127. }
  128. }
  129. if !strings.HasPrefix(clus.Members[i].EtcdLogPath, clus.Members[i].BaseDir) {
  130. return nil, fmt.Errorf("EtcdLogPath must be prefixed with BaseDir (got %q)", clus.Members[i].EtcdLogPath)
  131. }
  132. if !strings.HasPrefix(clus.Members[i].Etcd.DataDir, clus.Members[i].BaseDir) {
  133. return nil, fmt.Errorf("Etcd.DataDir must be prefixed with BaseDir (got %q)", clus.Members[i].Etcd.DataDir)
  134. }
  135. // TODO: support separate WALDir that can be handled via failure-archive
  136. if !strings.HasPrefix(clus.Members[i].Etcd.WALDir, clus.Members[i].BaseDir) {
  137. return nil, fmt.Errorf("Etcd.WALDir must be prefixed with BaseDir (got %q)", clus.Members[i].Etcd.WALDir)
  138. }
  139. if len(clus.Tester.FailureCases) == 0 {
  140. return nil, errors.New("FailureCases not found")
  141. }
  142. }
  143. for _, v := range clus.Tester.FailureCases {
  144. if _, ok := rpcpb.FailureCase_value[v]; !ok {
  145. return nil, fmt.Errorf("%q is not defined in 'rpcpb.FailureCase_value'", v)
  146. }
  147. }
  148. for _, v := range clus.Tester.StressTypes {
  149. if _, ok := rpcpb.StressType_value[v]; !ok {
  150. return nil, fmt.Errorf("StressType is unknown; got %q", v)
  151. }
  152. }
  153. if clus.Tester.StressKeySuffixRangeTxn > 100 {
  154. return nil, fmt.Errorf("StressKeySuffixRangeTxn maximum value is 100, got %v", clus.Tester.StressKeySuffixRangeTxn)
  155. }
  156. if clus.Tester.StressKeyTxnOps > 64 {
  157. return nil, fmt.Errorf("StressKeyTxnOps maximum value is 64, got %v", clus.Tester.StressKeyTxnOps)
  158. }
  159. return clus, err
  160. }
  161. // TODO: status handler
  162. var dialOpts = []grpc.DialOption{
  163. grpc.WithInsecure(),
  164. grpc.WithTimeout(5 * time.Second),
  165. grpc.WithBlock(),
  166. }
  167. // NewCluster creates a client from a tester configuration.
  168. func NewCluster(logger *zap.Logger, fpath string) (*Cluster, error) {
  169. clus, err := newCluster(logger, fpath)
  170. if err != nil {
  171. return nil, err
  172. }
  173. clus.agentConns = make([]*grpc.ClientConn, len(clus.Members))
  174. clus.agentClients = make([]rpcpb.TransportClient, len(clus.Members))
  175. clus.agentStreams = make([]rpcpb.Transport_TransportClient, len(clus.Members))
  176. clus.agentRequests = make([]*rpcpb.Request, len(clus.Members))
  177. clus.failures = make([]Failure, 0)
  178. for i, ap := range clus.Members {
  179. logger.Info("connecting", zap.String("agent-address", ap.AgentAddr))
  180. var err error
  181. clus.agentConns[i], err = grpc.Dial(ap.AgentAddr, dialOpts...)
  182. if err != nil {
  183. return nil, err
  184. }
  185. clus.agentClients[i] = rpcpb.NewTransportClient(clus.agentConns[i])
  186. logger.Info("connected", zap.String("agent-address", ap.AgentAddr))
  187. logger.Info("creating stream", zap.String("agent-address", ap.AgentAddr))
  188. clus.agentStreams[i], err = clus.agentClients[i].Transport(context.Background())
  189. if err != nil {
  190. return nil, err
  191. }
  192. logger.Info("created stream", zap.String("agent-address", ap.AgentAddr))
  193. }
  194. mux := http.NewServeMux()
  195. mux.Handle("/metrics", promhttp.Handler())
  196. if clus.Tester.EnablePprof {
  197. for p, h := range debugutil.PProfHandlers() {
  198. mux.Handle(p, h)
  199. }
  200. }
  201. clus.testerHTTPServer = &http.Server{
  202. Addr: clus.Tester.TesterAddr,
  203. Handler: mux,
  204. }
  205. go clus.serveTesterServer()
  206. for _, cs := range clus.Tester.FailureCases {
  207. switch cs {
  208. case "KILL_ONE_FOLLOWER":
  209. clus.failures = append(clus.failures, newFailureKillOne()) // TODO
  210. case "KILL_LEADER":
  211. clus.failures = append(clus.failures, newFailureKillLeader())
  212. case "KILL_ONE_FOLLOWER_FOR_LONG":
  213. clus.failures = append(clus.failures, newFailureKillOneForLongTime()) // TODO
  214. case "KILL_LEADER_FOR_LONG":
  215. clus.failures = append(clus.failures, newFailureKillLeaderForLongTime())
  216. case "KILL_QUORUM":
  217. clus.failures = append(clus.failures, newFailureKillQuorum())
  218. case "KILL_ALL":
  219. clus.failures = append(clus.failures, newFailureKillAll())
  220. case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER":
  221. clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxOne()) // TODO
  222. case "BLACKHOLE_PEER_PORT_TX_RX_LEADER_ONE":
  223. clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxOne()) // TODO
  224. case "BLACKHOLE_PEER_PORT_TX_RX_ALL":
  225. clus.failures = append(clus.failures, newFailureBlackholePeerPortTxRxAll())
  226. case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
  227. clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxOneMember()) // TODO
  228. case "DELAY_PEER_PORT_TX_RX_LEADER":
  229. clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxLeader()) // TODO
  230. case "DELAY_PEER_PORT_TX_RX_ALL":
  231. clus.failures = append(clus.failures, newFailureDelayPeerPortTxRxAll()) // TODO
  232. case "FAILPOINTS":
  233. fpFailures, fperr := failpointFailures(clus)
  234. if len(fpFailures) == 0 {
  235. clus.logger.Info("no failpoints found!", zap.Error(fperr))
  236. }
  237. clus.failures = append(clus.failures, fpFailures...)
  238. case "NO_FAIL":
  239. clus.failures = append(clus.failures, newFailureNoOp())
  240. case "EXTERNAL":
  241. clus.failures = append(clus.failures, newFailureExternal(clus.Tester.ExternalExecPath))
  242. default:
  243. return nil, fmt.Errorf("unknown failure %q", cs)
  244. }
  245. }
  246. clus.rateLimiter = rate.NewLimiter(
  247. rate.Limit(int(clus.Tester.StressQPS)),
  248. int(clus.Tester.StressQPS),
  249. )
  250. clus.updateStresserChecker()
  251. return clus, nil
  252. }
  253. func (clus *Cluster) serveTesterServer() {
  254. clus.logger.Info(
  255. "started tester HTTP server",
  256. zap.String("tester-address", clus.Tester.TesterAddr),
  257. )
  258. err := clus.testerHTTPServer.ListenAndServe()
  259. clus.logger.Info(
  260. "tester HTTP server returned",
  261. zap.String("tester-address", clus.Tester.TesterAddr),
  262. zap.Error(err),
  263. )
  264. if err != nil && err != http.ErrServerClosed {
  265. clus.logger.Fatal("tester HTTP errored", zap.Error(err))
  266. }
  267. }
  268. func (clus *Cluster) updateStresserChecker() {
  269. clus.logger.Info(
  270. "updating stressers",
  271. zap.Int("round", clus.rd),
  272. zap.Int("case", clus.cs),
  273. )
  274. cs := &compositeStresser{}
  275. for idx := range clus.Members {
  276. cs.stressers = append(cs.stressers, newStresser(clus, idx))
  277. }
  278. clus.stresser = cs
  279. clus.checker = newHashChecker(clus.logger, hashAndRevGetter(clus))
  280. if schk := cs.Checker(); schk != nil {
  281. clus.checker = newCompositeChecker([]Checker{clus.checker, schk})
  282. }
  283. clus.logger.Info(
  284. "updated stressers",
  285. zap.Int("round", clus.rd),
  286. zap.Int("case", clus.cs),
  287. )
  288. }
  289. func (clus *Cluster) startStresser() (err error) {
  290. clus.logger.Info(
  291. "starting stressers",
  292. zap.Int("round", clus.rd),
  293. zap.Int("case", clus.cs),
  294. )
  295. err = clus.stresser.Stress()
  296. clus.logger.Info(
  297. "started stressers",
  298. zap.Int("round", clus.rd),
  299. zap.Int("case", clus.cs),
  300. )
  301. return err
  302. }
  303. func (clus *Cluster) closeStresser() {
  304. clus.logger.Info(
  305. "closing stressers",
  306. zap.Int("round", clus.rd),
  307. zap.Int("case", clus.cs),
  308. )
  309. clus.stresser.Close()
  310. clus.logger.Info(
  311. "closed stressers",
  312. zap.Int("round", clus.rd),
  313. zap.Int("case", clus.cs),
  314. )
  315. }
  316. func (clus *Cluster) pauseStresser() {
  317. clus.logger.Info(
  318. "pausing stressers",
  319. zap.Int("round", clus.rd),
  320. zap.Int("case", clus.cs),
  321. )
  322. clus.stresser.Pause()
  323. clus.logger.Info(
  324. "paused stressers",
  325. zap.Int("round", clus.rd),
  326. zap.Int("case", clus.cs),
  327. )
  328. }
  329. func (clus *Cluster) checkConsistency() (err error) {
  330. defer func() {
  331. if err != nil {
  332. return
  333. }
  334. if err = clus.updateRevision(); err != nil {
  335. clus.logger.Warn(
  336. "updateRevision failed",
  337. zap.Error(err),
  338. )
  339. return
  340. }
  341. err = clus.startStresser()
  342. }()
  343. clus.logger.Info(
  344. "checking consistency and invariant of cluster",
  345. zap.Int("round", clus.rd),
  346. zap.Int("case", clus.cs),
  347. zap.String("desc", clus.failures[clus.cs].Desc()),
  348. )
  349. if err = clus.checker.Check(); err != nil {
  350. clus.logger.Warn(
  351. "checker.Check failed",
  352. zap.Error(err),
  353. )
  354. return err
  355. }
  356. clus.logger.Info(
  357. "checked consistency and invariant of cluster",
  358. zap.Int("round", clus.rd),
  359. zap.Int("case", clus.cs),
  360. zap.String("desc", clus.failures[clus.cs].Desc()),
  361. )
  362. return err
  363. }
  364. // Bootstrap bootstraps etcd cluster the very first time.
  365. // After this, just continue to call kill/restart.
  366. func (clus *Cluster) Bootstrap() error {
  367. // this is the only time that creates request from scratch
  368. return clus.broadcastOperation(rpcpb.Operation_InitialStartEtcd)
  369. }
  370. // FailArchive sends "FailArchive" operation.
  371. func (clus *Cluster) FailArchive() error {
  372. return clus.broadcastOperation(rpcpb.Operation_FailArchive)
  373. }
  374. // Restart sends "Restart" operation.
  375. func (clus *Cluster) Restart() error {
  376. return clus.broadcastOperation(rpcpb.Operation_RestartEtcd)
  377. }
  378. func (clus *Cluster) broadcastOperation(op rpcpb.Operation) error {
  379. for i := range clus.agentStreams {
  380. err := clus.sendOperation(i, op)
  381. if err != nil {
  382. if op == rpcpb.Operation_DestroyEtcdAgent &&
  383. strings.Contains(err.Error(), "rpc error: code = Unavailable desc = transport is closing") {
  384. // agent server has already closed;
  385. // so this error is expected
  386. clus.logger.Info(
  387. "successfully destroyed",
  388. zap.String("member", clus.Members[i].EtcdClientEndpoint),
  389. )
  390. continue
  391. }
  392. return err
  393. }
  394. }
  395. return nil
  396. }
  397. func (clus *Cluster) sendOperation(idx int, op rpcpb.Operation) error {
  398. if op == rpcpb.Operation_InitialStartEtcd {
  399. clus.agentRequests[idx] = &rpcpb.Request{
  400. Operation: op,
  401. Member: clus.Members[idx],
  402. Tester: clus.Tester,
  403. }
  404. } else {
  405. clus.agentRequests[idx].Operation = op
  406. }
  407. clus.logger.Info(
  408. "sending request",
  409. zap.String("operation", op.String()),
  410. zap.String("to", clus.Members[idx].EtcdClientEndpoint),
  411. )
  412. err := clus.agentStreams[idx].Send(clus.agentRequests[idx])
  413. clus.logger.Info(
  414. "sent request",
  415. zap.String("operation", op.String()),
  416. zap.String("to", clus.Members[idx].EtcdClientEndpoint),
  417. zap.Error(err),
  418. )
  419. if err != nil {
  420. return err
  421. }
  422. clus.logger.Info(
  423. "receiving response",
  424. zap.String("operation", op.String()),
  425. zap.String("from", clus.Members[idx].EtcdClientEndpoint),
  426. )
  427. resp, err := clus.agentStreams[idx].Recv()
  428. if resp != nil {
  429. clus.logger.Info(
  430. "received response",
  431. zap.String("operation", op.String()),
  432. zap.String("from", clus.Members[idx].EtcdClientEndpoint),
  433. zap.Bool("success", resp.Success),
  434. zap.String("status", resp.Status),
  435. zap.Error(err),
  436. )
  437. } else {
  438. clus.logger.Info(
  439. "received empty response",
  440. zap.String("operation", op.String()),
  441. zap.String("from", clus.Members[idx].EtcdClientEndpoint),
  442. zap.Error(err),
  443. )
  444. }
  445. if err != nil {
  446. return err
  447. }
  448. if !resp.Success {
  449. err = errors.New(resp.Status)
  450. }
  451. return err
  452. }
  453. // DestroyEtcdAgents terminates all tester connections to agents and etcd servers.
  454. func (clus *Cluster) DestroyEtcdAgents() {
  455. clus.logger.Info("destroying etcd servers and agents")
  456. err := clus.broadcastOperation(rpcpb.Operation_DestroyEtcdAgent)
  457. if err != nil {
  458. clus.logger.Warn("failed to destroy etcd servers and agents", zap.Error(err))
  459. } else {
  460. clus.logger.Info("destroyed etcd servers and agents")
  461. }
  462. for i, conn := range clus.agentConns {
  463. clus.logger.Info("closing connection to agent", zap.String("agent-address", clus.Members[i].AgentAddr))
  464. err := conn.Close()
  465. clus.logger.Info("closed connection to agent", zap.String("agent-address", clus.Members[i].AgentAddr), zap.Error(err))
  466. }
  467. // TODO: closing stresser connections to etcd
  468. if clus.testerHTTPServer != nil {
  469. clus.logger.Info("closing tester HTTP server", zap.String("tester-address", clus.Tester.TesterAddr))
  470. ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
  471. err := clus.testerHTTPServer.Shutdown(ctx)
  472. cancel()
  473. clus.logger.Info("closed tester HTTP server", zap.String("tester-address", clus.Tester.TesterAddr), zap.Error(err))
  474. }
  475. }
  476. // WaitHealth ensures all members are healthy
  477. // by writing a test key to etcd cluster.
  478. func (clus *Cluster) WaitHealth() error {
  479. var err error
  480. // wait 60s to check cluster health.
  481. // TODO: set it to a reasonable value. It is set that high because
  482. // follower may use long time to catch up the leader when reboot under
  483. // reasonable workload (https://github.com/coreos/etcd/issues/2698)
  484. for i := 0; i < 60; i++ {
  485. for _, m := range clus.Members {
  486. clus.logger.Info(
  487. "writing health key",
  488. zap.Int("retries", i),
  489. zap.String("endpoint", m.EtcdClientEndpoint),
  490. )
  491. if err = m.WriteHealthKey(); err != nil {
  492. clus.logger.Warn(
  493. "writing health key failed",
  494. zap.Int("retries", i),
  495. zap.String("endpoint", m.EtcdClientEndpoint),
  496. zap.Error(err),
  497. )
  498. break
  499. }
  500. clus.logger.Info(
  501. "successfully wrote health key",
  502. zap.Int("retries", i),
  503. zap.String("endpoint", m.EtcdClientEndpoint),
  504. )
  505. }
  506. if err == nil {
  507. clus.logger.Info(
  508. "writing health key success on all members",
  509. zap.Int("retries", i),
  510. )
  511. return nil
  512. }
  513. time.Sleep(time.Second)
  514. }
  515. return err
  516. }
  517. // GetLeader returns the index of leader and error if any.
  518. func (clus *Cluster) GetLeader() (int, error) {
  519. for i, m := range clus.Members {
  520. isLeader, err := m.IsLeader()
  521. if isLeader || err != nil {
  522. return i, err
  523. }
  524. }
  525. return 0, fmt.Errorf("no leader found")
  526. }
  527. // maxRev returns the maximum revision found on the cluster.
  528. func (clus *Cluster) maxRev() (rev int64, err error) {
  529. ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
  530. defer cancel()
  531. revc, errc := make(chan int64, len(clus.Members)), make(chan error, len(clus.Members))
  532. for i := range clus.Members {
  533. go func(m *rpcpb.Member) {
  534. mrev, merr := m.Rev(ctx)
  535. revc <- mrev
  536. errc <- merr
  537. }(clus.Members[i])
  538. }
  539. for i := 0; i < len(clus.Members); i++ {
  540. if merr := <-errc; merr != nil {
  541. err = merr
  542. }
  543. if mrev := <-revc; mrev > rev {
  544. rev = mrev
  545. }
  546. }
  547. return rev, err
  548. }
  549. func (clus *Cluster) getRevisionHash() (map[string]int64, map[string]int64, error) {
  550. revs := make(map[string]int64)
  551. hashes := make(map[string]int64)
  552. for _, m := range clus.Members {
  553. rev, hash, err := m.RevHash()
  554. if err != nil {
  555. return nil, nil, err
  556. }
  557. revs[m.EtcdClientEndpoint] = rev
  558. hashes[m.EtcdClientEndpoint] = hash
  559. }
  560. return revs, hashes, nil
  561. }
  562. func (clus *Cluster) compactKV(rev int64, timeout time.Duration) (err error) {
  563. if rev <= 0 {
  564. return nil
  565. }
  566. for i, m := range clus.Members {
  567. conn, derr := m.DialEtcdGRPCServer()
  568. if derr != nil {
  569. clus.logger.Warn(
  570. "compactKV dial failed",
  571. zap.String("endpoint", m.EtcdClientEndpoint),
  572. zap.Error(derr),
  573. )
  574. err = derr
  575. continue
  576. }
  577. kvc := pb.NewKVClient(conn)
  578. clus.logger.Info(
  579. "starting compaction",
  580. zap.String("endpoint", m.EtcdClientEndpoint),
  581. zap.Int64("revision", rev),
  582. zap.Duration("timeout", timeout),
  583. )
  584. now := time.Now()
  585. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  586. _, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true}, grpc.FailFast(false))
  587. cancel()
  588. conn.Close()
  589. succeed := true
  590. if cerr != nil {
  591. if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
  592. clus.logger.Info(
  593. "compact error is ignored",
  594. zap.String("endpoint", m.EtcdClientEndpoint),
  595. zap.Int64("revision", rev),
  596. zap.Error(cerr),
  597. )
  598. } else {
  599. clus.logger.Warn(
  600. "compact failed",
  601. zap.String("endpoint", m.EtcdClientEndpoint),
  602. zap.Int64("revision", rev),
  603. zap.Error(cerr),
  604. )
  605. err = cerr
  606. succeed = false
  607. }
  608. }
  609. if succeed {
  610. clus.logger.Info(
  611. "finished compaction",
  612. zap.String("endpoint", m.EtcdClientEndpoint),
  613. zap.Int64("revision", rev),
  614. zap.Duration("timeout", timeout),
  615. zap.Duration("took", time.Since(now)),
  616. )
  617. }
  618. }
  619. return err
  620. }
  621. func (clus *Cluster) checkCompact(rev int64) error {
  622. if rev == 0 {
  623. return nil
  624. }
  625. for _, m := range clus.Members {
  626. if err := m.CheckCompact(rev); err != nil {
  627. return err
  628. }
  629. }
  630. return nil
  631. }
  632. func (clus *Cluster) defrag() error {
  633. clus.logger.Info(
  634. "defragmenting",
  635. zap.Int("round", clus.rd),
  636. zap.Int("case", clus.cs),
  637. )
  638. for _, m := range clus.Members {
  639. if err := m.Defrag(); err != nil {
  640. clus.logger.Warn(
  641. "defrag failed",
  642. zap.Int("round", clus.rd),
  643. zap.Int("case", clus.cs),
  644. zap.Error(err),
  645. )
  646. return err
  647. }
  648. }
  649. clus.logger.Info(
  650. "defragmented",
  651. zap.Int("round", clus.rd),
  652. zap.Int("case", clus.cs),
  653. )
  654. return nil
  655. }
  656. func (clus *Cluster) Report() int64 { return clus.stresser.ModifiedKeys() }