kvstore_bench_test.go 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "sync/atomic"
  17. "testing"
  18. "go.etcd.io/etcd/lease"
  19. "go.etcd.io/etcd/mvcc/backend"
  20. "go.uber.org/zap"
  21. )
  22. type fakeConsistentIndex uint64
  23. func (i *fakeConsistentIndex) ConsistentIndex() uint64 {
  24. return atomic.LoadUint64((*uint64)(i))
  25. }
  26. func BenchmarkStorePut(b *testing.B) {
  27. var i fakeConsistentIndex
  28. be, tmpPath := backend.NewDefaultTmpBackend()
  29. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
  30. defer cleanup(s, be, tmpPath)
  31. // arbitrary number of bytes
  32. bytesN := 64
  33. keys := createBytesSlice(bytesN, b.N)
  34. vals := createBytesSlice(bytesN, b.N)
  35. b.ResetTimer()
  36. for i := 0; i < b.N; i++ {
  37. s.Put(keys[i], vals[i], lease.NoLease)
  38. }
  39. }
  40. func BenchmarkStoreRangeKey1(b *testing.B) { benchmarkStoreRange(b, 1) }
  41. func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) }
  42. func benchmarkStoreRange(b *testing.B, n int) {
  43. var i fakeConsistentIndex
  44. be, tmpPath := backend.NewDefaultTmpBackend()
  45. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
  46. defer cleanup(s, be, tmpPath)
  47. // 64 byte key/val
  48. keys, val := createBytesSlice(64, n), createBytesSlice(64, 1)
  49. for i := range keys {
  50. s.Put(keys[i], val[0], lease.NoLease)
  51. }
  52. // Force into boltdb tx instead of backend read tx.
  53. s.Commit()
  54. var begin, end []byte
  55. if n == 1 {
  56. begin, end = keys[0], nil
  57. } else {
  58. begin, end = []byte{}, []byte{}
  59. }
  60. b.ReportAllocs()
  61. b.ResetTimer()
  62. for i := 0; i < b.N; i++ {
  63. s.Range(begin, end, RangeOptions{})
  64. }
  65. }
  66. func BenchmarkConsistentIndex(b *testing.B) {
  67. fci := fakeConsistentIndex(10)
  68. be, tmpPath := backend.NewDefaultTmpBackend()
  69. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &fci)
  70. defer cleanup(s, be, tmpPath)
  71. tx := s.b.BatchTx()
  72. tx.Lock()
  73. s.saveIndex(tx)
  74. tx.Unlock()
  75. b.ReportAllocs()
  76. b.ResetTimer()
  77. for i := 0; i < b.N; i++ {
  78. s.ConsistentIndex()
  79. }
  80. }
  81. // BenchmarkStoreTxnPutUpdate is same as above, but instead updates single key
  82. func BenchmarkStorePutUpdate(b *testing.B) {
  83. var i fakeConsistentIndex
  84. be, tmpPath := backend.NewDefaultTmpBackend()
  85. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
  86. defer cleanup(s, be, tmpPath)
  87. // arbitrary number of bytes
  88. keys := createBytesSlice(64, 1)
  89. vals := createBytesSlice(1024, 1)
  90. b.ResetTimer()
  91. for i := 0; i < b.N; i++ {
  92. s.Put(keys[0], vals[0], lease.NoLease)
  93. }
  94. }
  95. // BenchmarkStoreTxnPut benchmarks the Put operation
  96. // with transaction begin and end, where transaction involves
  97. // some synchronization operations, such as mutex locking.
  98. func BenchmarkStoreTxnPut(b *testing.B) {
  99. var i fakeConsistentIndex
  100. be, tmpPath := backend.NewDefaultTmpBackend()
  101. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
  102. defer cleanup(s, be, tmpPath)
  103. // arbitrary number of bytes
  104. bytesN := 64
  105. keys := createBytesSlice(bytesN, b.N)
  106. vals := createBytesSlice(bytesN, b.N)
  107. b.ResetTimer()
  108. b.ReportAllocs()
  109. for i := 0; i < b.N; i++ {
  110. txn := s.Write()
  111. txn.Put(keys[i], vals[i], lease.NoLease)
  112. txn.End()
  113. }
  114. }
  115. // benchmarkStoreRestore benchmarks the restore operation
  116. func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
  117. var i fakeConsistentIndex
  118. be, tmpPath := backend.NewDefaultTmpBackend()
  119. s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
  120. // use closure to capture 's' to pick up the reassignment
  121. defer func() { cleanup(s, be, tmpPath) }()
  122. // arbitrary number of bytes
  123. bytesN := 64
  124. keys := createBytesSlice(bytesN, b.N)
  125. vals := createBytesSlice(bytesN, b.N)
  126. for i := 0; i < b.N; i++ {
  127. for j := 0; j < revsPerKey; j++ {
  128. txn := s.Write()
  129. txn.Put(keys[i], vals[i], lease.NoLease)
  130. txn.End()
  131. }
  132. }
  133. s.Close()
  134. b.ReportAllocs()
  135. b.ResetTimer()
  136. s = NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
  137. }
  138. func BenchmarkStoreRestoreRevs1(b *testing.B) {
  139. benchmarkStoreRestore(1, b)
  140. }
  141. func BenchmarkStoreRestoreRevs10(b *testing.B) {
  142. benchmarkStoreRestore(10, b)
  143. }
  144. func BenchmarkStoreRestoreRevs20(b *testing.B) {
  145. benchmarkStoreRestore(20, b)
  146. }