stresser.go 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package main
  15. import (
  16. "fmt"
  17. "math/rand"
  18. "net"
  19. "net/http"
  20. "sync"
  21. "time"
  22. clientV2 "github.com/coreos/etcd/client"
  23. "github.com/coreos/etcd/etcdserver"
  24. "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
  25. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  26. "golang.org/x/net/context"
  27. "golang.org/x/time/rate"
  28. "google.golang.org/grpc"
  29. "google.golang.org/grpc/grpclog"
  30. "google.golang.org/grpc/transport"
  31. )
  32. func init() {
  33. grpclog.SetLogger(plog)
  34. }
  35. type stressFunc func(ctx context.Context) error
  36. type stressEntry struct {
  37. weight float32
  38. f stressFunc
  39. }
  40. type stressTable struct {
  41. entries []stressEntry
  42. sumWeights float32
  43. }
  44. func createStressTable(entries []stressEntry) *stressTable {
  45. st := stressTable{entries: entries}
  46. for _, entry := range st.entries {
  47. st.sumWeights += entry.weight
  48. }
  49. return &st
  50. }
  51. func (st *stressTable) choose() stressFunc {
  52. v := rand.Float32() * st.sumWeights
  53. var sum float32
  54. var idx int
  55. for i := range st.entries {
  56. sum += st.entries[i].weight
  57. if sum >= v {
  58. idx = i
  59. break
  60. }
  61. }
  62. return st.entries[idx].f
  63. }
  64. func newStressPut(kvc pb.KVClient, keySuffixRange, keySize int) stressFunc {
  65. return func(ctx context.Context) error {
  66. _, err := kvc.Put(ctx, &pb.PutRequest{
  67. Key: []byte(fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))),
  68. Value: randBytes(keySize),
  69. }, grpc.FailFast(false))
  70. return err
  71. }
  72. }
  73. func newStressRange(kvc pb.KVClient, keySuffixRange int) stressFunc {
  74. return func(ctx context.Context) error {
  75. _, err := kvc.Range(ctx, &pb.RangeRequest{
  76. Key: []byte(fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))),
  77. }, grpc.FailFast(false))
  78. return err
  79. }
  80. }
  81. func newStressRangeInterval(kvc pb.KVClient, keySuffixRange int) stressFunc {
  82. return func(ctx context.Context) error {
  83. start := rand.Intn(keySuffixRange)
  84. end := start + 500
  85. _, err := kvc.Range(ctx, &pb.RangeRequest{
  86. Key: []byte(fmt.Sprintf("foo%016x", start)),
  87. RangeEnd: []byte(fmt.Sprintf("foo%016x", end)),
  88. }, grpc.FailFast(false))
  89. return err
  90. }
  91. }
  92. func newStressDelete(kvc pb.KVClient, keySuffixRange int) stressFunc {
  93. return func(ctx context.Context) error {
  94. _, err := kvc.DeleteRange(ctx, &pb.DeleteRangeRequest{
  95. Key: []byte(fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))),
  96. }, grpc.FailFast(false))
  97. return err
  98. }
  99. }
  100. func newStressDeleteInterval(kvc pb.KVClient, keySuffixRange int) stressFunc {
  101. return func(ctx context.Context) error {
  102. start := rand.Intn(keySuffixRange)
  103. end := start + 500
  104. _, err := kvc.DeleteRange(ctx, &pb.DeleteRangeRequest{
  105. Key: []byte(fmt.Sprintf("foo%016x", start)),
  106. RangeEnd: []byte(fmt.Sprintf("foo%016x", end)),
  107. }, grpc.FailFast(false))
  108. return err
  109. }
  110. }
  111. type Stresser interface {
  112. // Stress starts to stress the etcd cluster
  113. Stress() error
  114. // Cancel cancels the stress test on the etcd cluster
  115. Cancel()
  116. // Report reports the success and failure of the stress test
  117. Report() (success int, failure int)
  118. }
  119. type stresser struct {
  120. Endpoint string
  121. keyLargeSize int
  122. keySize int
  123. keySuffixRange int
  124. N int
  125. mu sync.Mutex
  126. wg *sync.WaitGroup
  127. rateLimiter *rate.Limiter
  128. cancel func()
  129. conn *grpc.ClientConn
  130. success int
  131. failure int
  132. stressTable *stressTable
  133. }
  134. func (s *stresser) Stress() error {
  135. if s.rateLimiter == nil {
  136. panic("expect rateLimiter to be set")
  137. }
  138. // TODO: add backoff option
  139. conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure())
  140. if err != nil {
  141. return fmt.Errorf("%v (%s)", err, s.Endpoint)
  142. }
  143. ctx, cancel := context.WithCancel(context.Background())
  144. wg := &sync.WaitGroup{}
  145. wg.Add(s.N)
  146. s.mu.Lock()
  147. s.conn = conn
  148. s.cancel = cancel
  149. s.wg = wg
  150. s.mu.Unlock()
  151. kvc := pb.NewKVClient(conn)
  152. var stressEntries = []stressEntry{
  153. {weight: 0.7, f: newStressPut(kvc, s.keySuffixRange, s.keySize)},
  154. {
  155. weight: 0.7 * float32(s.keySize) / float32(s.keyLargeSize),
  156. f: newStressPut(kvc, s.keySuffixRange, s.keyLargeSize),
  157. },
  158. {weight: 0.07, f: newStressRange(kvc, s.keySuffixRange)},
  159. {weight: 0.07, f: newStressRangeInterval(kvc, s.keySuffixRange)},
  160. {weight: 0.07, f: newStressDelete(kvc, s.keySuffixRange)},
  161. {weight: 0.07, f: newStressDeleteInterval(kvc, s.keySuffixRange)},
  162. }
  163. s.stressTable = createStressTable(stressEntries)
  164. for i := 0; i < s.N; i++ {
  165. go s.run(ctx)
  166. }
  167. plog.Printf("stresser %q is started", s.Endpoint)
  168. return nil
  169. }
  170. func (s *stresser) run(ctx context.Context) {
  171. defer s.wg.Done()
  172. for {
  173. if err := s.rateLimiter.Wait(ctx); err == context.Canceled {
  174. return
  175. }
  176. // TODO: 10-second is enough timeout to cover leader failure
  177. // and immediate leader election. Find out what other cases this
  178. // could be timed out.
  179. sctx, scancel := context.WithTimeout(ctx, 10*time.Second)
  180. err := s.stressTable.choose()(sctx)
  181. scancel()
  182. if err != nil {
  183. s.mu.Lock()
  184. s.failure++
  185. s.mu.Unlock()
  186. switch grpc.ErrorDesc(err) {
  187. case context.DeadlineExceeded.Error():
  188. // This retries when request is triggered at the same time as
  189. // leader failure. When we terminate the leader, the request to
  190. // that leader cannot be processed, and times out. Also requests
  191. // to followers cannot be forwarded to the old leader, so timing out
  192. // as well. We want to keep stressing until the cluster elects a
  193. // new leader and start processing requests again.
  194. continue
  195. case etcdserver.ErrTimeoutDueToLeaderFail.Error(), etcdserver.ErrTimeout.Error():
  196. // This retries when request is triggered at the same time as
  197. // leader failure and follower nodes receive time out errors
  198. // from losing their leader. Followers should retry to connect
  199. // to the new leader.
  200. continue
  201. case etcdserver.ErrStopped.Error():
  202. // one of the etcd nodes stopped from failure injection
  203. continue
  204. case transport.ErrConnClosing.Desc:
  205. // server closed the transport (failure injected node)
  206. continue
  207. case rpctypes.ErrNotCapable.Error():
  208. // capability check has not been done (in the beginning)
  209. continue
  210. case rpctypes.ErrTooManyRequests.Error():
  211. // hitting the recovering member.
  212. continue
  213. case context.Canceled.Error():
  214. // from stresser.Cancel method:
  215. return
  216. case grpc.ErrClientConnClosing.Error():
  217. // from stresser.Cancel method:
  218. return
  219. }
  220. su, fa := s.Report()
  221. plog.Warningf("stresser %v (success %d, failure %d) exited with error (%v)", s.Endpoint, su, fa, err)
  222. return
  223. }
  224. s.mu.Lock()
  225. s.success++
  226. s.mu.Unlock()
  227. }
  228. }
  229. func (s *stresser) Cancel() {
  230. s.mu.Lock()
  231. s.cancel()
  232. s.conn.Close()
  233. wg := s.wg
  234. s.mu.Unlock()
  235. wg.Wait()
  236. plog.Printf("stresser %q is canceled", s.Endpoint)
  237. }
  238. func (s *stresser) Report() (int, int) {
  239. s.mu.Lock()
  240. defer s.mu.Unlock()
  241. return s.success, s.failure
  242. }
  243. type stresserV2 struct {
  244. Endpoint string
  245. keySize int
  246. keySuffixRange int
  247. N int
  248. mu sync.Mutex
  249. failure int
  250. success int
  251. cancel func()
  252. }
  253. func (s *stresserV2) Stress() error {
  254. cfg := clientV2.Config{
  255. Endpoints: []string{s.Endpoint},
  256. Transport: &http.Transport{
  257. Dial: (&net.Dialer{
  258. Timeout: time.Second,
  259. KeepAlive: 30 * time.Second,
  260. }).Dial,
  261. MaxIdleConnsPerHost: s.N,
  262. },
  263. }
  264. c, err := clientV2.New(cfg)
  265. if err != nil {
  266. return err
  267. }
  268. kv := clientV2.NewKeysAPI(c)
  269. ctx, cancel := context.WithCancel(context.Background())
  270. s.cancel = cancel
  271. for i := 0; i < s.N; i++ {
  272. go func() {
  273. for {
  274. setctx, setcancel := context.WithTimeout(ctx, clientV2.DefaultRequestTimeout)
  275. key := fmt.Sprintf("foo%016x", rand.Intn(s.keySuffixRange))
  276. _, err := kv.Set(setctx, key, string(randBytes(s.keySize)), nil)
  277. setcancel()
  278. if err == context.Canceled {
  279. return
  280. }
  281. s.mu.Lock()
  282. if err != nil {
  283. s.failure++
  284. } else {
  285. s.success++
  286. }
  287. s.mu.Unlock()
  288. }
  289. }()
  290. }
  291. <-ctx.Done()
  292. return nil
  293. }
  294. func (s *stresserV2) Cancel() {
  295. s.cancel()
  296. }
  297. func (s *stresserV2) Report() (success int, failure int) {
  298. s.mu.Lock()
  299. defer s.mu.Unlock()
  300. return s.success, s.failure
  301. }
  302. func randBytes(size int) []byte {
  303. data := make([]byte, size)
  304. for i := 0; i < size; i++ {
  305. data[i] = byte(int('a') + rand.Intn(26))
  306. }
  307. return data
  308. }
  309. // nopStresser implements Stresser that does nothing
  310. type nopStresser struct {
  311. start time.Time
  312. qps int
  313. }
  314. func (s *nopStresser) Stress() error { return nil }
  315. func (s *nopStresser) Cancel() {}
  316. func (s *nopStresser) Report() (int, int) {
  317. return int(time.Since(s.start).Seconds()) * s.qps, 0
  318. }
  319. type stressConfig struct {
  320. qps int
  321. keyLargeSize int
  322. keySize int
  323. keySuffixRange int
  324. v2 bool
  325. }
  326. type stressBuilder func(m *member) Stresser
  327. func newStressBuilder(s string, sc *stressConfig) stressBuilder {
  328. switch s {
  329. case "nop":
  330. return func(*member) Stresser {
  331. return &nopStresser{
  332. start: time.Now(),
  333. qps: sc.qps,
  334. }
  335. }
  336. case "default":
  337. // TODO: Too intensive stressers can panic etcd member with
  338. // 'out of memory' error. Put rate limits in server side.
  339. stressN := 100
  340. l := rate.NewLimiter(rate.Limit(sc.qps), sc.qps)
  341. return func(m *member) Stresser {
  342. if sc.v2 {
  343. return &stresserV2{
  344. Endpoint: m.ClientURL,
  345. keySize: sc.keySize,
  346. keySuffixRange: sc.keySuffixRange,
  347. N: stressN,
  348. }
  349. } else {
  350. return &stresser{
  351. Endpoint: m.grpcAddr(),
  352. keyLargeSize: sc.keyLargeSize,
  353. keySize: sc.keySize,
  354. keySuffixRange: sc.keySuffixRange,
  355. N: stressN,
  356. rateLimiter: l,
  357. }
  358. }
  359. }
  360. default:
  361. plog.Panicf("unknown stresser type: %s\n", s)
  362. }
  363. return nil // never reach here
  364. }