123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218 |
- // Copyright 2015 The etcd Authors
- //
- // Licensed under the Apache License, Version 2.0 (the "License");
- // you may not use this file except in compliance with the License.
- // You may obtain a copy of the License at
- //
- // http://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS,
- // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- // See the License for the specific language governing permissions and
- // limitations under the License.
- package mvcc
- import (
- "math/rand"
- "os"
- "testing"
- "go.etcd.io/etcd/lease"
- "go.etcd.io/etcd/mvcc/backend"
- "go.etcd.io/etcd/pkg/traceutil"
- "go.uber.org/zap"
- )
- func BenchmarkWatchableStorePut(b *testing.B) {
- be, tmpPath := backend.NewDefaultTmpBackend()
- s := New(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
- defer cleanup(s, be, tmpPath)
- // arbitrary number of bytes
- bytesN := 64
- keys := createBytesSlice(bytesN, b.N)
- vals := createBytesSlice(bytesN, b.N)
- b.ResetTimer()
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- s.Put(keys[i], vals[i], lease.NoLease)
- }
- }
- // BenchmarkWatchableStoreTxnPut benchmarks the Put operation
- // with transaction begin and end, where transaction involves
- // some synchronization operations, such as mutex locking.
- func BenchmarkWatchableStoreTxnPut(b *testing.B) {
- var i fakeConsistentIndex
- be, tmpPath := backend.NewDefaultTmpBackend()
- s := New(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
- defer cleanup(s, be, tmpPath)
- // arbitrary number of bytes
- bytesN := 64
- keys := createBytesSlice(bytesN, b.N)
- vals := createBytesSlice(bytesN, b.N)
- b.ResetTimer()
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- txn := s.Write(traceutil.TODO())
- txn.Put(keys[i], vals[i], lease.NoLease)
- txn.End()
- }
- }
- // BenchmarkWatchableStoreWatchPutSync benchmarks the case of
- // many synced watchers receiving a Put notification.
- func BenchmarkWatchableStoreWatchPutSync(b *testing.B) {
- benchmarkWatchableStoreWatchPut(b, true)
- }
- // BenchmarkWatchableStoreWatchPutUnsync benchmarks the case of
- // many unsynced watchers receiving a Put notification.
- func BenchmarkWatchableStoreWatchPutUnsync(b *testing.B) {
- benchmarkWatchableStoreWatchPut(b, false)
- }
- func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
- be, tmpPath := backend.NewDefaultTmpBackend()
- s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
- defer cleanup(s, be, tmpPath)
- k := []byte("testkey")
- v := []byte("testval")
- rev := int64(0)
- if !synced {
- // non-0 value to keep watchers in unsynced
- rev = 1
- }
- w := s.NewWatchStream()
- defer w.Close()
- watchIDs := make([]WatchID, b.N)
- for i := range watchIDs {
- watchIDs[i], _ = w.Watch(0, k, nil, rev)
- }
- b.ResetTimer()
- b.ReportAllocs()
- // trigger watchers
- s.Put(k, v, lease.NoLease)
- for range watchIDs {
- <-w.Chan()
- }
- select {
- case wc := <-w.Chan():
- b.Fatalf("unexpected data %v", wc)
- default:
- }
- }
- // Benchmarks on cancel function performance for unsynced watchers
- // in a WatchableStore. It creates k*N watchers to populate unsynced
- // with a reasonably large number of watchers. And measures the time it
- // takes to cancel N watchers out of k*N watchers. The performance is
- // expected to differ depending on the unsynced member implementation.
- // TODO: k is an arbitrary constant. We need to figure out what factor
- // we should put to simulate the real-world use cases.
- func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
- be, tmpPath := backend.NewDefaultTmpBackend()
- s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
- // manually create watchableStore instead of newWatchableStore
- // because newWatchableStore periodically calls syncWatchersLoop
- // method to sync watchers in unsynced map. We want to keep watchers
- // in unsynced for this benchmark.
- ws := &watchableStore{
- store: s,
- unsynced: newWatcherGroup(),
- // to make the test not crash from assigning to nil map.
- // 'synced' doesn't get populated in this test.
- synced: newWatcherGroup(),
- }
- defer func() {
- ws.store.Close()
- os.Remove(tmpPath)
- }()
- // Put a key so that we can spawn watchers on that key
- // (testKey in this test). This increases the rev to 1,
- // and later we can we set the watcher's startRev to 1,
- // and force watchers to be in unsynced.
- testKey := []byte("foo")
- testValue := []byte("bar")
- s.Put(testKey, testValue, lease.NoLease)
- w := ws.NewWatchStream()
- const k int = 2
- benchSampleN := b.N
- watcherN := k * benchSampleN
- watchIDs := make([]WatchID, watcherN)
- for i := 0; i < watcherN; i++ {
- // non-0 value to keep watchers in unsynced
- watchIDs[i], _ = w.Watch(0, testKey, nil, 1)
- }
- // random-cancel N watchers to make it not biased towards
- // data structures with an order, such as slice.
- ix := rand.Perm(watcherN)
- b.ResetTimer()
- b.ReportAllocs()
- // cancel N watchers
- for _, idx := range ix[:benchSampleN] {
- if err := w.Cancel(watchIDs[idx]); err != nil {
- b.Error(err)
- }
- }
- }
- func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
- be, tmpPath := backend.NewDefaultTmpBackend()
- s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
- defer func() {
- s.store.Close()
- os.Remove(tmpPath)
- }()
- // Put a key so that we can spawn watchers on that key
- testKey := []byte("foo")
- testValue := []byte("bar")
- s.Put(testKey, testValue, lease.NoLease)
- w := s.NewWatchStream()
- // put 1 million watchers on the same key
- const watcherN = 1000000
- watchIDs := make([]WatchID, watcherN)
- for i := 0; i < watcherN; i++ {
- // 0 for startRev to keep watchers in synced
- watchIDs[i], _ = w.Watch(0, testKey, nil, 0)
- }
- // randomly cancel watchers to make it not biased towards
- // data structures with an order, such as slice.
- ix := rand.Perm(watcherN)
- b.ResetTimer()
- b.ReportAllocs()
- for _, idx := range ix {
- if err := w.Cancel(watchIDs[idx]); err != nil {
- b.Error(err)
- }
- }
- }
|