cluster.go 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package main
  15. import (
  16. "fmt"
  17. "math/rand"
  18. "net"
  19. "strings"
  20. "time"
  21. "golang.org/x/net/context"
  22. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  23. "github.com/coreos/etcd/tools/functional-tester/etcd-agent/client"
  24. "google.golang.org/grpc"
  25. )
  26. const (
  27. peerURLPort = 2380
  28. failpointPort = 2381
  29. )
  30. type cluster struct {
  31. v2Only bool // to be deprecated
  32. datadir string
  33. stressQPS int
  34. stressKeySize int
  35. stressKeySuffixRange int
  36. Size int
  37. Stressers []Stresser
  38. Members []*member
  39. }
  40. type ClusterStatus struct {
  41. AgentStatuses map[string]client.Status
  42. }
  43. // newCluster starts and returns a new cluster. The caller should call Terminate when finished, to shut it down.
  44. func newCluster(agentEndpoints []string, datadir string, stressQPS, stressKeySize, stressKeySuffixRange int, isV2Only bool) (*cluster, error) {
  45. c := &cluster{
  46. v2Only: isV2Only,
  47. datadir: datadir,
  48. stressQPS: stressQPS,
  49. stressKeySize: stressKeySize,
  50. stressKeySuffixRange: stressKeySuffixRange,
  51. }
  52. if err := c.bootstrap(agentEndpoints); err != nil {
  53. return nil, err
  54. }
  55. return c, nil
  56. }
  57. func (c *cluster) bootstrap(agentEndpoints []string) error {
  58. size := len(agentEndpoints)
  59. members := make([]*member, size)
  60. memberNameURLs := make([]string, size)
  61. for i, u := range agentEndpoints {
  62. agent, err := client.NewAgent(u)
  63. if err != nil {
  64. return err
  65. }
  66. host, _, err := net.SplitHostPort(u)
  67. if err != nil {
  68. return err
  69. }
  70. members[i] = &member{
  71. Agent: agent,
  72. Endpoint: u,
  73. Name: fmt.Sprintf("etcd-%d", i),
  74. ClientURL: fmt.Sprintf("http://%s:2379", host),
  75. PeerURL: fmt.Sprintf("http://%s:%d", host, peerURLPort),
  76. FailpointURL: fmt.Sprintf("http://%s:%d", host, failpointPort),
  77. }
  78. memberNameURLs[i] = members[i].ClusterEntry()
  79. }
  80. clusterStr := strings.Join(memberNameURLs, ",")
  81. token := fmt.Sprint(rand.Int())
  82. for i, m := range members {
  83. flags := append(
  84. m.Flags(),
  85. "--data-dir", c.datadir,
  86. "--initial-cluster-token", token,
  87. "--initial-cluster", clusterStr)
  88. if _, err := m.Agent.Start(flags...); err != nil {
  89. // cleanup
  90. for _, m := range members[:i] {
  91. m.Agent.Terminate()
  92. }
  93. return err
  94. }
  95. }
  96. // TODO: Too intensive stressers can panic etcd member with
  97. // 'out of memory' error. Put rate limits in server side.
  98. stressN := 100
  99. c.Stressers = make([]Stresser, len(members))
  100. for i, m := range members {
  101. if c.v2Only {
  102. c.Stressers[i] = &stresserV2{
  103. Endpoint: m.ClientURL,
  104. KeySize: c.stressKeySize,
  105. KeySuffixRange: c.stressKeySuffixRange,
  106. N: stressN,
  107. }
  108. } else {
  109. c.Stressers[i] = &stresser{
  110. Endpoint: m.grpcAddr(),
  111. KeySize: c.stressKeySize,
  112. KeySuffixRange: c.stressKeySuffixRange,
  113. qps: c.stressQPS,
  114. N: stressN,
  115. }
  116. }
  117. go c.Stressers[i].Stress()
  118. }
  119. c.Size = size
  120. c.Members = members
  121. return nil
  122. }
  123. func (c *cluster) Reset() error {
  124. eps := make([]string, len(c.Members))
  125. for i, m := range c.Members {
  126. eps[i] = m.Endpoint
  127. }
  128. return c.bootstrap(eps)
  129. }
  130. func (c *cluster) WaitHealth() error {
  131. var err error
  132. // wait 60s to check cluster health.
  133. // TODO: set it to a reasonable value. It is set that high because
  134. // follower may use long time to catch up the leader when reboot under
  135. // reasonable workload (https://github.com/coreos/etcd/issues/2698)
  136. healthFunc := func(m *member) error { return m.SetHealthKeyV3() }
  137. if c.v2Only {
  138. healthFunc = func(m *member) error { return m.SetHealthKeyV2() }
  139. }
  140. for i := 0; i < 60; i++ {
  141. for _, m := range c.Members {
  142. if err = healthFunc(m); err != nil {
  143. break
  144. }
  145. }
  146. if err == nil {
  147. return nil
  148. }
  149. plog.Warningf("#%d setHealthKey error (%v)", i, err)
  150. time.Sleep(time.Second)
  151. }
  152. return err
  153. }
  154. // GetLeader returns the index of leader and error if any.
  155. func (c *cluster) GetLeader() (int, error) {
  156. if c.v2Only {
  157. return 0, nil
  158. }
  159. for i, m := range c.Members {
  160. isLeader, err := m.IsLeader()
  161. if isLeader || err != nil {
  162. return i, err
  163. }
  164. }
  165. return 0, fmt.Errorf("no leader found")
  166. }
  167. func (c *cluster) Report() (success, failure int) {
  168. for _, stress := range c.Stressers {
  169. s, f := stress.Report()
  170. success += s
  171. failure += f
  172. }
  173. return
  174. }
  175. func (c *cluster) Cleanup() error {
  176. var lasterr error
  177. for _, m := range c.Members {
  178. if err := m.Agent.Cleanup(); err != nil {
  179. lasterr = err
  180. }
  181. }
  182. for _, s := range c.Stressers {
  183. s.Cancel()
  184. }
  185. return lasterr
  186. }
  187. func (c *cluster) Terminate() {
  188. for _, m := range c.Members {
  189. m.Agent.Terminate()
  190. }
  191. for _, s := range c.Stressers {
  192. s.Cancel()
  193. }
  194. }
  195. func (c *cluster) Status() ClusterStatus {
  196. cs := ClusterStatus{
  197. AgentStatuses: make(map[string]client.Status),
  198. }
  199. for _, m := range c.Members {
  200. s, err := m.Agent.Status()
  201. // TODO: add a.Desc() as a key of the map
  202. desc := m.Endpoint
  203. if err != nil {
  204. cs.AgentStatuses[desc] = client.Status{State: "unknown"}
  205. plog.Printf("failed to get the status of agent [%s]", desc)
  206. }
  207. cs.AgentStatuses[desc] = s
  208. }
  209. return cs
  210. }
  211. func (c *cluster) getRevisionHash() (map[string]int64, map[string]int64, error) {
  212. revs := make(map[string]int64)
  213. hashes := make(map[string]int64)
  214. for _, m := range c.Members {
  215. rev, hash, err := m.RevHash()
  216. if err != nil {
  217. return nil, nil, err
  218. }
  219. revs[m.ClientURL] = rev
  220. hashes[m.ClientURL] = hash
  221. }
  222. return revs, hashes, nil
  223. }
  224. func (c *cluster) compactKV(rev int64, timeout time.Duration) (err error) {
  225. if rev <= 0 {
  226. return nil
  227. }
  228. for i, m := range c.Members {
  229. u := m.ClientURL
  230. conn, derr := m.dialGRPC()
  231. if derr != nil {
  232. plog.Printf("[compact kv #%d] dial error %v (endpoint %s)", i, derr, u)
  233. err = derr
  234. continue
  235. }
  236. kvc := pb.NewKVClient(conn)
  237. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  238. plog.Printf("[compact kv #%d] starting (endpoint %s)", i, u)
  239. _, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true}, grpc.FailFast(false))
  240. cancel()
  241. conn.Close()
  242. succeed := true
  243. if cerr != nil {
  244. if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
  245. plog.Printf("[compact kv #%d] already compacted (endpoint %s)", i, u)
  246. } else {
  247. plog.Warningf("[compact kv #%d] error %v (endpoint %s)", i, cerr, u)
  248. err = cerr
  249. succeed = false
  250. }
  251. }
  252. if succeed {
  253. plog.Printf("[compact kv #%d] done (endpoint %s)", i, u)
  254. }
  255. }
  256. return err
  257. }
  258. func (c *cluster) checkCompact(rev int64) error {
  259. if rev == 0 {
  260. return nil
  261. }
  262. for _, m := range c.Members {
  263. if err := m.CheckCompact(rev); err != nil {
  264. return err
  265. }
  266. }
  267. return nil
  268. }
  269. func (c *cluster) defrag() error {
  270. for _, m := range c.Members {
  271. if err := m.Defrag(); err != nil {
  272. return err
  273. }
  274. }
  275. return nil
  276. }