watchable_store_bench_test.go 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "math/rand"
  17. "os"
  18. "testing"
  19. "github.com/coreos/etcd/lease"
  20. "github.com/coreos/etcd/mvcc/backend"
  21. )
  22. func BenchmarkWatchableStorePut(b *testing.B) {
  23. be, tmpPath := backend.NewDefaultTmpBackend()
  24. s := New(be, &lease.FakeLessor{}, nil)
  25. defer cleanup(s, be, tmpPath)
  26. // arbitrary number of bytes
  27. bytesN := 64
  28. keys := createBytesSlice(bytesN, b.N)
  29. vals := createBytesSlice(bytesN, b.N)
  30. b.ResetTimer()
  31. b.ReportAllocs()
  32. for i := 0; i < b.N; i++ {
  33. s.Put(keys[i], vals[i], lease.NoLease)
  34. }
  35. }
  36. // BenchmarkWatchableStoreTxnPut benchmarks the Put operation
  37. // with transaction begin and end, where transaction involves
  38. // some synchronization operations, such as mutex locking.
  39. func BenchmarkWatchableStoreTxnPut(b *testing.B) {
  40. var i fakeConsistentIndex
  41. be, tmpPath := backend.NewDefaultTmpBackend()
  42. s := New(be, &lease.FakeLessor{}, &i)
  43. defer cleanup(s, be, tmpPath)
  44. // arbitrary number of bytes
  45. bytesN := 64
  46. keys := createBytesSlice(bytesN, b.N)
  47. vals := createBytesSlice(bytesN, b.N)
  48. b.ResetTimer()
  49. b.ReportAllocs()
  50. for i := 0; i < b.N; i++ {
  51. id := s.TxnBegin()
  52. if _, err := s.TxnPut(id, keys[i], vals[i], lease.NoLease); err != nil {
  53. plog.Fatalf("txn put error: %v", err)
  54. }
  55. s.TxnEnd(id)
  56. }
  57. }
  58. // BenchmarkWatchableStoreWatchSyncPut benchmarks the case of
  59. // many synced watchers receiving a Put notification.
  60. func BenchmarkWatchableStoreWatchSyncPut(b *testing.B) {
  61. be, tmpPath := backend.NewDefaultTmpBackend()
  62. s := newWatchableStore(be, &lease.FakeLessor{}, nil)
  63. defer cleanup(s, be, tmpPath)
  64. k := []byte("testkey")
  65. v := []byte("testval")
  66. w := s.NewWatchStream()
  67. defer w.Close()
  68. watchIDs := make([]WatchID, b.N)
  69. for i := range watchIDs {
  70. // non-0 value to keep watchers in unsynced
  71. watchIDs[i] = w.Watch(k, nil, 1)
  72. }
  73. b.ResetTimer()
  74. b.ReportAllocs()
  75. // trigger watchers
  76. s.Put(k, v, lease.NoLease)
  77. for range watchIDs {
  78. <-w.Chan()
  79. }
  80. select {
  81. case wc := <-w.Chan():
  82. b.Fatalf("unexpected data %v", wc)
  83. default:
  84. }
  85. }
  86. // Benchmarks on cancel function performance for unsynced watchers
  87. // in a WatchableStore. It creates k*N watchers to populate unsynced
  88. // with a reasonably large number of watchers. And measures the time it
  89. // takes to cancel N watchers out of k*N watchers. The performance is
  90. // expected to differ depending on the unsynced member implementation.
  91. // TODO: k is an arbitrary constant. We need to figure out what factor
  92. // we should put to simulate the real-world use cases.
  93. func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
  94. be, tmpPath := backend.NewDefaultTmpBackend()
  95. s := NewStore(be, &lease.FakeLessor{}, nil)
  96. // manually create watchableStore instead of newWatchableStore
  97. // because newWatchableStore periodically calls syncWatchersLoop
  98. // method to sync watchers in unsynced map. We want to keep watchers
  99. // in unsynced for this benchmark.
  100. ws := &watchableStore{
  101. store: s,
  102. unsynced: newWatcherGroup(),
  103. // to make the test not crash from assigning to nil map.
  104. // 'synced' doesn't get populated in this test.
  105. synced: newWatcherGroup(),
  106. }
  107. defer func() {
  108. ws.store.Close()
  109. os.Remove(tmpPath)
  110. }()
  111. // Put a key so that we can spawn watchers on that key
  112. // (testKey in this test). This increases the rev to 1,
  113. // and later we can we set the watcher's startRev to 1,
  114. // and force watchers to be in unsynced.
  115. testKey := []byte("foo")
  116. testValue := []byte("bar")
  117. s.Put(testKey, testValue, lease.NoLease)
  118. w := ws.NewWatchStream()
  119. const k int = 2
  120. benchSampleN := b.N
  121. watcherN := k * benchSampleN
  122. watchIDs := make([]WatchID, watcherN)
  123. for i := 0; i < watcherN; i++ {
  124. // non-0 value to keep watchers in unsynced
  125. watchIDs[i] = w.Watch(testKey, nil, 1)
  126. }
  127. // random-cancel N watchers to make it not biased towards
  128. // data structures with an order, such as slice.
  129. ix := rand.Perm(watcherN)
  130. b.ResetTimer()
  131. b.ReportAllocs()
  132. // cancel N watchers
  133. for _, idx := range ix[:benchSampleN] {
  134. if err := w.Cancel(watchIDs[idx]); err != nil {
  135. b.Error(err)
  136. }
  137. }
  138. }
  139. func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
  140. be, tmpPath := backend.NewDefaultTmpBackend()
  141. s := newWatchableStore(be, &lease.FakeLessor{}, nil)
  142. defer func() {
  143. s.store.Close()
  144. os.Remove(tmpPath)
  145. }()
  146. // Put a key so that we can spawn watchers on that key
  147. testKey := []byte("foo")
  148. testValue := []byte("bar")
  149. s.Put(testKey, testValue, lease.NoLease)
  150. w := s.NewWatchStream()
  151. // put 1 million watchers on the same key
  152. const watcherN = 1000000
  153. watchIDs := make([]WatchID, watcherN)
  154. for i := 0; i < watcherN; i++ {
  155. // 0 for startRev to keep watchers in synced
  156. watchIDs[i] = w.Watch(testKey, nil, 0)
  157. }
  158. // randomly cancel watchers to make it not biased towards
  159. // data structures with an order, such as slice.
  160. ix := rand.Perm(watcherN)
  161. b.ResetTimer()
  162. b.ReportAllocs()
  163. for _, idx := range ix {
  164. if err := w.Cancel(watchIDs[idx]); err != nil {
  165. b.Error(err)
  166. }
  167. }
  168. }