watchable_store_bench_test.go 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "math/rand"
  17. "os"
  18. "testing"
  19. "go.etcd.io/etcd/lease"
  20. "go.etcd.io/etcd/mvcc/backend"
  21. "go.etcd.io/etcd/pkg/traceutil"
  22. "go.uber.org/zap"
  23. )
  24. func BenchmarkWatchableStorePut(b *testing.B) {
  25. be, tmpPath := backend.NewDefaultTmpBackend()
  26. s := New(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
  27. defer cleanup(s, be, tmpPath)
  28. // arbitrary number of bytes
  29. bytesN := 64
  30. keys := createBytesSlice(bytesN, b.N)
  31. vals := createBytesSlice(bytesN, b.N)
  32. b.ResetTimer()
  33. b.ReportAllocs()
  34. for i := 0; i < b.N; i++ {
  35. s.Put(keys[i], vals[i], lease.NoLease)
  36. }
  37. }
  38. // BenchmarkWatchableStoreTxnPut benchmarks the Put operation
  39. // with transaction begin and end, where transaction involves
  40. // some synchronization operations, such as mutex locking.
  41. func BenchmarkWatchableStoreTxnPut(b *testing.B) {
  42. var i fakeConsistentIndex
  43. be, tmpPath := backend.NewDefaultTmpBackend()
  44. s := New(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
  45. defer cleanup(s, be, tmpPath)
  46. // arbitrary number of bytes
  47. bytesN := 64
  48. keys := createBytesSlice(bytesN, b.N)
  49. vals := createBytesSlice(bytesN, b.N)
  50. b.ResetTimer()
  51. b.ReportAllocs()
  52. for i := 0; i < b.N; i++ {
  53. txn := s.Write(traceutil.TODO())
  54. txn.Put(keys[i], vals[i], lease.NoLease)
  55. txn.End()
  56. }
  57. }
  58. // BenchmarkWatchableStoreWatchPutSync benchmarks the case of
  59. // many synced watchers receiving a Put notification.
  60. func BenchmarkWatchableStoreWatchPutSync(b *testing.B) {
  61. benchmarkWatchableStoreWatchPut(b, true)
  62. }
  63. // BenchmarkWatchableStoreWatchPutUnsync benchmarks the case of
  64. // many unsynced watchers receiving a Put notification.
  65. func BenchmarkWatchableStoreWatchPutUnsync(b *testing.B) {
  66. benchmarkWatchableStoreWatchPut(b, false)
  67. }
  68. func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
  69. be, tmpPath := backend.NewDefaultTmpBackend()
  70. s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
  71. defer cleanup(s, be, tmpPath)
  72. k := []byte("testkey")
  73. v := []byte("testval")
  74. rev := int64(0)
  75. if !synced {
  76. // non-0 value to keep watchers in unsynced
  77. rev = 1
  78. }
  79. w := s.NewWatchStream()
  80. defer w.Close()
  81. watchIDs := make([]WatchID, b.N)
  82. for i := range watchIDs {
  83. watchIDs[i], _ = w.Watch(0, k, nil, rev)
  84. }
  85. b.ResetTimer()
  86. b.ReportAllocs()
  87. // trigger watchers
  88. s.Put(k, v, lease.NoLease)
  89. for range watchIDs {
  90. <-w.Chan()
  91. }
  92. select {
  93. case wc := <-w.Chan():
  94. b.Fatalf("unexpected data %v", wc)
  95. default:
  96. }
  97. }
  98. // Benchmarks on cancel function performance for unsynced watchers
  99. // in a WatchableStore. It creates k*N watchers to populate unsynced
  100. // with a reasonably large number of watchers. And measures the time it
  101. // takes to cancel N watchers out of k*N watchers. The performance is
  102. // expected to differ depending on the unsynced member implementation.
  103. // TODO: k is an arbitrary constant. We need to figure out what factor
  104. // we should put to simulate the real-world use cases.
  105. func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
  106. be, tmpPath := backend.NewDefaultTmpBackend()
  107. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
  108. // manually create watchableStore instead of newWatchableStore
  109. // because newWatchableStore periodically calls syncWatchersLoop
  110. // method to sync watchers in unsynced map. We want to keep watchers
  111. // in unsynced for this benchmark.
  112. ws := &watchableStore{
  113. store: s,
  114. unsynced: newWatcherGroup(),
  115. // to make the test not crash from assigning to nil map.
  116. // 'synced' doesn't get populated in this test.
  117. synced: newWatcherGroup(),
  118. }
  119. defer func() {
  120. ws.store.Close()
  121. os.Remove(tmpPath)
  122. }()
  123. // Put a key so that we can spawn watchers on that key
  124. // (testKey in this test). This increases the rev to 1,
  125. // and later we can we set the watcher's startRev to 1,
  126. // and force watchers to be in unsynced.
  127. testKey := []byte("foo")
  128. testValue := []byte("bar")
  129. s.Put(testKey, testValue, lease.NoLease)
  130. w := ws.NewWatchStream()
  131. const k int = 2
  132. benchSampleN := b.N
  133. watcherN := k * benchSampleN
  134. watchIDs := make([]WatchID, watcherN)
  135. for i := 0; i < watcherN; i++ {
  136. // non-0 value to keep watchers in unsynced
  137. watchIDs[i], _ = w.Watch(0, testKey, nil, 1)
  138. }
  139. // random-cancel N watchers to make it not biased towards
  140. // data structures with an order, such as slice.
  141. ix := rand.Perm(watcherN)
  142. b.ResetTimer()
  143. b.ReportAllocs()
  144. // cancel N watchers
  145. for _, idx := range ix[:benchSampleN] {
  146. if err := w.Cancel(watchIDs[idx]); err != nil {
  147. b.Error(err)
  148. }
  149. }
  150. }
  151. func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
  152. be, tmpPath := backend.NewDefaultTmpBackend()
  153. s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
  154. defer func() {
  155. s.store.Close()
  156. os.Remove(tmpPath)
  157. }()
  158. // Put a key so that we can spawn watchers on that key
  159. testKey := []byte("foo")
  160. testValue := []byte("bar")
  161. s.Put(testKey, testValue, lease.NoLease)
  162. w := s.NewWatchStream()
  163. // put 1 million watchers on the same key
  164. const watcherN = 1000000
  165. watchIDs := make([]WatchID, watcherN)
  166. for i := 0; i < watcherN; i++ {
  167. // 0 for startRev to keep watchers in synced
  168. watchIDs[i], _ = w.Watch(0, testKey, nil, 0)
  169. }
  170. // randomly cancel watchers to make it not biased towards
  171. // data structures with an order, such as slice.
  172. ix := rand.Perm(watcherN)
  173. b.ResetTimer()
  174. b.ReportAllocs()
  175. for _, idx := range ix {
  176. if err := w.Cancel(watchIDs[idx]); err != nil {
  177. b.Error(err)
  178. }
  179. }
  180. }