watchable_store_bench_test.go 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. // Copyright 2015 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package storage
  15. import (
  16. "math/rand"
  17. "os"
  18. "testing"
  19. "github.com/coreos/etcd/lease"
  20. "github.com/coreos/etcd/storage/backend"
  21. )
  22. // Benchmarks on cancel function performance for unsynced watchers
  23. // in a WatchableStore. It creates k*N watchers to populate unsynced
  24. // with a reasonably large number of watchers. And measures the time it
  25. // takes to cancel N watchers out of k*N watchers. The performance is
  26. // expected to differ depending on the unsynced member implementation.
  27. // TODO: k is an arbitrary constant. We need to figure out what factor
  28. // we should put to simulate the real-world use cases.
  29. func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
  30. be, tmpPath := backend.NewDefaultTmpBackend()
  31. s := NewStore(be, &lease.FakeLessor{})
  32. // manually create watchableStore instead of newWatchableStore
  33. // because newWatchableStore periodically calls syncWatchersLoop
  34. // method to sync watchers in unsynced map. We want to keep watchers
  35. // in unsynced for this benchmark.
  36. ws := &watchableStore{
  37. store: s,
  38. unsynced: newWatcherGroup(),
  39. // to make the test not crash from assigning to nil map.
  40. // 'synced' doesn't get populated in this test.
  41. synced: newWatcherGroup(),
  42. }
  43. defer func() {
  44. ws.store.Close()
  45. os.Remove(tmpPath)
  46. }()
  47. // Put a key so that we can spawn watchers on that key
  48. // (testKey in this test). This increases the rev to 1,
  49. // and later we can we set the watcher's startRev to 1,
  50. // and force watchers to be in unsynced.
  51. testKey := []byte("foo")
  52. testValue := []byte("bar")
  53. s.Put(testKey, testValue, lease.NoLease)
  54. w := ws.NewWatchStream()
  55. const k int = 2
  56. benchSampleN := b.N
  57. watcherN := k * benchSampleN
  58. watchIDs := make([]WatchID, watcherN)
  59. for i := 0; i < watcherN; i++ {
  60. // non-0 value to keep watchers in unsynced
  61. watchIDs[i] = w.Watch(testKey, nil, 1)
  62. }
  63. // random-cancel N watchers to make it not biased towards
  64. // data structures with an order, such as slice.
  65. ix := rand.Perm(watcherN)
  66. b.ResetTimer()
  67. b.ReportAllocs()
  68. // cancel N watchers
  69. for _, idx := range ix[:benchSampleN] {
  70. if err := w.Cancel(watchIDs[idx]); err != nil {
  71. b.Error(err)
  72. }
  73. }
  74. }
  75. func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
  76. be, tmpPath := backend.NewDefaultTmpBackend()
  77. s := newWatchableStore(be, &lease.FakeLessor{})
  78. defer func() {
  79. s.store.Close()
  80. os.Remove(tmpPath)
  81. }()
  82. // Put a key so that we can spawn watchers on that key
  83. testKey := []byte("foo")
  84. testValue := []byte("bar")
  85. s.Put(testKey, testValue, lease.NoLease)
  86. w := s.NewWatchStream()
  87. // put 1 million watchers on the same key
  88. const watcherN = 1000000
  89. watchIDs := make([]WatchID, watcherN)
  90. for i := 0; i < watcherN; i++ {
  91. // 0 for startRev to keep watchers in synced
  92. watchIDs[i] = w.Watch(testKey, nil, 0)
  93. }
  94. // randomly cancel watchers to make it not biased towards
  95. // data structures with an order, such as slice.
  96. ix := rand.Perm(watcherN)
  97. b.ResetTimer()
  98. b.ReportAllocs()
  99. for _, idx := range ix {
  100. if err := w.Cancel(watchIDs[idx]); err != nil {
  101. b.Error(err)
  102. }
  103. }
  104. }