kvstore_bench_test.go 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "sync/atomic"
  17. "testing"
  18. "go.etcd.io/etcd/lease"
  19. "go.etcd.io/etcd/mvcc/backend"
  20. "go.etcd.io/etcd/pkg/traceutil"
  21. "go.uber.org/zap"
  22. )
  23. type fakeConsistentIndex uint64
  24. func (i *fakeConsistentIndex) ConsistentIndex() uint64 {
  25. return atomic.LoadUint64((*uint64)(i))
  26. }
  27. func BenchmarkStorePut(b *testing.B) {
  28. var i fakeConsistentIndex
  29. be, tmpPath := backend.NewDefaultTmpBackend()
  30. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
  31. defer cleanup(s, be, tmpPath)
  32. // arbitrary number of bytes
  33. bytesN := 64
  34. keys := createBytesSlice(bytesN, b.N)
  35. vals := createBytesSlice(bytesN, b.N)
  36. b.ResetTimer()
  37. for i := 0; i < b.N; i++ {
  38. s.Put(keys[i], vals[i], lease.NoLease)
  39. }
  40. }
  41. func BenchmarkStoreRangeKey1(b *testing.B) { benchmarkStoreRange(b, 1) }
  42. func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) }
  43. func benchmarkStoreRange(b *testing.B, n int) {
  44. var i fakeConsistentIndex
  45. be, tmpPath := backend.NewDefaultTmpBackend()
  46. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
  47. defer cleanup(s, be, tmpPath)
  48. // 64 byte key/val
  49. keys, val := createBytesSlice(64, n), createBytesSlice(64, 1)
  50. for i := range keys {
  51. s.Put(keys[i], val[0], lease.NoLease)
  52. }
  53. // Force into boltdb tx instead of backend read tx.
  54. s.Commit()
  55. var begin, end []byte
  56. if n == 1 {
  57. begin, end = keys[0], nil
  58. } else {
  59. begin, end = []byte{}, []byte{}
  60. }
  61. b.ReportAllocs()
  62. b.ResetTimer()
  63. for i := 0; i < b.N; i++ {
  64. s.Range(begin, end, RangeOptions{})
  65. }
  66. }
  67. func BenchmarkConsistentIndex(b *testing.B) {
  68. fci := fakeConsistentIndex(10)
  69. be, tmpPath := backend.NewDefaultTmpBackend()
  70. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &fci, StoreConfig{})
  71. defer cleanup(s, be, tmpPath)
  72. tx := s.b.BatchTx()
  73. tx.Lock()
  74. s.saveIndex(tx)
  75. tx.Unlock()
  76. b.ReportAllocs()
  77. b.ResetTimer()
  78. for i := 0; i < b.N; i++ {
  79. s.ConsistentIndex()
  80. }
  81. }
  82. // BenchmarkStoreTxnPutUpdate is same as above, but instead updates single key
  83. func BenchmarkStorePutUpdate(b *testing.B) {
  84. var i fakeConsistentIndex
  85. be, tmpPath := backend.NewDefaultTmpBackend()
  86. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
  87. defer cleanup(s, be, tmpPath)
  88. // arbitrary number of bytes
  89. keys := createBytesSlice(64, 1)
  90. vals := createBytesSlice(1024, 1)
  91. b.ResetTimer()
  92. for i := 0; i < b.N; i++ {
  93. s.Put(keys[0], vals[0], lease.NoLease)
  94. }
  95. }
  96. // BenchmarkStoreTxnPut benchmarks the Put operation
  97. // with transaction begin and end, where transaction involves
  98. // some synchronization operations, such as mutex locking.
  99. func BenchmarkStoreTxnPut(b *testing.B) {
  100. var i fakeConsistentIndex
  101. be, tmpPath := backend.NewDefaultTmpBackend()
  102. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
  103. defer cleanup(s, be, tmpPath)
  104. // arbitrary number of bytes
  105. bytesN := 64
  106. keys := createBytesSlice(bytesN, b.N)
  107. vals := createBytesSlice(bytesN, b.N)
  108. b.ResetTimer()
  109. b.ReportAllocs()
  110. for i := 0; i < b.N; i++ {
  111. txn := s.Write(traceutil.TODO())
  112. txn.Put(keys[i], vals[i], lease.NoLease)
  113. txn.End()
  114. }
  115. }
  116. // benchmarkStoreRestore benchmarks the restore operation
  117. func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
  118. var i fakeConsistentIndex
  119. be, tmpPath := backend.NewDefaultTmpBackend()
  120. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
  121. // use closure to capture 's' to pick up the reassignment
  122. defer func() { cleanup(s, be, tmpPath) }()
  123. // arbitrary number of bytes
  124. bytesN := 64
  125. keys := createBytesSlice(bytesN, b.N)
  126. vals := createBytesSlice(bytesN, b.N)
  127. for i := 0; i < b.N; i++ {
  128. for j := 0; j < revsPerKey; j++ {
  129. txn := s.Write(traceutil.TODO())
  130. txn.Put(keys[i], vals[i], lease.NoLease)
  131. txn.End()
  132. }
  133. }
  134. s.Close()
  135. b.ReportAllocs()
  136. b.ResetTimer()
  137. s = NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
  138. }
  139. func BenchmarkStoreRestoreRevs1(b *testing.B) {
  140. benchmarkStoreRestore(1, b)
  141. }
  142. func BenchmarkStoreRestoreRevs10(b *testing.B) {
  143. benchmarkStoreRestore(10, b)
  144. }
  145. func BenchmarkStoreRestoreRevs20(b *testing.B) {
  146. benchmarkStoreRestore(20, b)
  147. }