cluster.go 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package main
  15. import (
  16. "fmt"
  17. "math/rand"
  18. "net"
  19. "strings"
  20. "time"
  21. "golang.org/x/net/context"
  22. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  23. "github.com/coreos/etcd/tools/functional-tester/etcd-agent/client"
  24. )
  25. const (
  26. peerURLPort = 2380
  27. failpointPort = 2381
  28. )
  29. type cluster struct {
  30. v2Only bool // to be deprecated
  31. datadir string
  32. stressQPS int
  33. stressKeySize int
  34. stressKeySuffixRange int
  35. Size int
  36. Stressers []Stresser
  37. Members []*member
  38. }
  39. type ClusterStatus struct {
  40. AgentStatuses map[string]client.Status
  41. }
  42. // newCluster starts and returns a new cluster. The caller should call Terminate when finished, to shut it down.
  43. func newCluster(agentEndpoints []string, datadir string, stressQPS, stressKeySize, stressKeySuffixRange int, isV2Only bool) (*cluster, error) {
  44. c := &cluster{
  45. v2Only: isV2Only,
  46. datadir: datadir,
  47. stressQPS: stressQPS,
  48. stressKeySize: stressKeySize,
  49. stressKeySuffixRange: stressKeySuffixRange,
  50. }
  51. if err := c.bootstrap(agentEndpoints); err != nil {
  52. return nil, err
  53. }
  54. return c, nil
  55. }
  56. func (c *cluster) bootstrap(agentEndpoints []string) error {
  57. size := len(agentEndpoints)
  58. members := make([]*member, size)
  59. memberNameURLs := make([]string, size)
  60. for i, u := range agentEndpoints {
  61. agent, err := client.NewAgent(u)
  62. if err != nil {
  63. return err
  64. }
  65. host, _, err := net.SplitHostPort(u)
  66. if err != nil {
  67. return err
  68. }
  69. members[i] = &member{
  70. Agent: agent,
  71. Endpoint: u,
  72. Name: fmt.Sprintf("etcd-%d", i),
  73. ClientURL: fmt.Sprintf("http://%s:2379", host),
  74. PeerURL: fmt.Sprintf("http://%s:%d", host, peerURLPort),
  75. FailpointURL: fmt.Sprintf("http://%s:%d", host, failpointPort),
  76. }
  77. memberNameURLs[i] = members[i].ClusterEntry()
  78. }
  79. clusterStr := strings.Join(memberNameURLs, ",")
  80. token := fmt.Sprint(rand.Int())
  81. for i, m := range members {
  82. flags := append(
  83. m.Flags(),
  84. "--data-dir", c.datadir,
  85. "--initial-cluster-token", token,
  86. "--initial-cluster", clusterStr)
  87. if _, err := m.Agent.Start(flags...); err != nil {
  88. // cleanup
  89. for _, m := range members[:i] {
  90. m.Agent.Terminate()
  91. }
  92. return err
  93. }
  94. }
  95. // TODO: Too intensive stressers can panic etcd member with
  96. // 'out of memory' error. Put rate limits in server side.
  97. stressN := 100
  98. c.Stressers = make([]Stresser, len(members))
  99. for i, m := range members {
  100. if c.v2Only {
  101. c.Stressers[i] = &stresserV2{
  102. Endpoint: m.ClientURL,
  103. KeySize: c.stressKeySize,
  104. KeySuffixRange: c.stressKeySuffixRange,
  105. N: stressN,
  106. }
  107. } else {
  108. c.Stressers[i] = &stresser{
  109. Endpoint: m.grpcAddr(),
  110. KeySize: c.stressKeySize,
  111. KeySuffixRange: c.stressKeySuffixRange,
  112. qps: c.stressQPS,
  113. N: stressN,
  114. }
  115. }
  116. go c.Stressers[i].Stress()
  117. }
  118. c.Size = size
  119. c.Members = members
  120. return nil
  121. }
  122. func (c *cluster) Reset() error {
  123. eps := make([]string, len(c.Members))
  124. for i, m := range c.Members {
  125. eps[i] = m.Endpoint
  126. }
  127. return c.bootstrap(eps)
  128. }
  129. func (c *cluster) WaitHealth() error {
  130. var err error
  131. // wait 60s to check cluster health.
  132. // TODO: set it to a reasonable value. It is set that high because
  133. // follower may use long time to catch up the leader when reboot under
  134. // reasonable workload (https://github.com/coreos/etcd/issues/2698)
  135. healthFunc := func(m *member) error { return m.SetHealthKeyV3() }
  136. if c.v2Only {
  137. healthFunc = func(m *member) error { return m.SetHealthKeyV2() }
  138. }
  139. for i := 0; i < 60; i++ {
  140. for _, m := range c.Members {
  141. if err = healthFunc(m); err != nil {
  142. break
  143. }
  144. }
  145. if err == nil {
  146. return nil
  147. }
  148. plog.Warningf("#%d setHealthKey error (%v)", i, err)
  149. time.Sleep(time.Second)
  150. }
  151. return err
  152. }
  153. // GetLeader returns the index of leader and error if any.
  154. func (c *cluster) GetLeader() (int, error) {
  155. if c.v2Only {
  156. return 0, nil
  157. }
  158. for i, m := range c.Members {
  159. isLeader, err := m.IsLeader()
  160. if isLeader || err != nil {
  161. return i, err
  162. }
  163. }
  164. return 0, fmt.Errorf("no leader found")
  165. }
  166. func (c *cluster) Report() (success, failure int) {
  167. for _, stress := range c.Stressers {
  168. s, f := stress.Report()
  169. success += s
  170. failure += f
  171. }
  172. return
  173. }
  174. func (c *cluster) Cleanup() error {
  175. var lasterr error
  176. for _, m := range c.Members {
  177. if err := m.Agent.Cleanup(); err != nil {
  178. lasterr = err
  179. }
  180. }
  181. for _, s := range c.Stressers {
  182. s.Cancel()
  183. }
  184. return lasterr
  185. }
  186. func (c *cluster) Terminate() {
  187. for _, m := range c.Members {
  188. m.Agent.Terminate()
  189. }
  190. for _, s := range c.Stressers {
  191. s.Cancel()
  192. }
  193. }
  194. func (c *cluster) Status() ClusterStatus {
  195. cs := ClusterStatus{
  196. AgentStatuses: make(map[string]client.Status),
  197. }
  198. for _, m := range c.Members {
  199. s, err := m.Agent.Status()
  200. // TODO: add a.Desc() as a key of the map
  201. desc := m.Endpoint
  202. if err != nil {
  203. cs.AgentStatuses[desc] = client.Status{State: "unknown"}
  204. plog.Printf("failed to get the status of agent [%s]", desc)
  205. }
  206. cs.AgentStatuses[desc] = s
  207. }
  208. return cs
  209. }
  210. func (c *cluster) getRevisionHash() (map[string]int64, map[string]int64, error) {
  211. revs := make(map[string]int64)
  212. hashes := make(map[string]int64)
  213. for _, m := range c.Members {
  214. rev, hash, err := m.RevHash()
  215. if err != nil {
  216. return nil, nil, err
  217. }
  218. revs[m.ClientURL] = rev
  219. hashes[m.ClientURL] = hash
  220. }
  221. return revs, hashes, nil
  222. }
  223. func (c *cluster) compactKV(rev int64, timeout time.Duration) (err error) {
  224. if rev <= 0 {
  225. return nil
  226. }
  227. for i, m := range c.Members {
  228. u := m.ClientURL
  229. conn, derr := m.dialGRPC()
  230. if derr != nil {
  231. plog.Printf("[compact kv #%d] dial error %v (endpoint %s)", i, derr, u)
  232. err = derr
  233. continue
  234. }
  235. kvc := pb.NewKVClient(conn)
  236. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  237. plog.Printf("[compact kv #%d] starting (endpoint %s)", i, u)
  238. _, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true})
  239. cancel()
  240. conn.Close()
  241. succeed := true
  242. if cerr != nil {
  243. if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
  244. plog.Printf("[compact kv #%d] already compacted (endpoint %s)", i, u)
  245. } else {
  246. plog.Warningf("[compact kv #%d] error %v (endpoint %s)", i, cerr, u)
  247. err = cerr
  248. succeed = false
  249. }
  250. }
  251. if succeed {
  252. plog.Printf("[compact kv #%d] done (endpoint %s)", i, u)
  253. }
  254. }
  255. return err
  256. }
  257. func (c *cluster) checkCompact(rev int64) error {
  258. if rev == 0 {
  259. return nil
  260. }
  261. for _, m := range c.Members {
  262. if err := m.CheckCompact(rev); err != nil {
  263. return err
  264. }
  265. }
  266. return nil
  267. }
  268. func (c *cluster) defrag() error {
  269. for _, m := range c.Members {
  270. if err := m.Defrag(); err != nil {
  271. return err
  272. }
  273. }
  274. return nil
  275. }