Browse Source

Merge pull request #7859 from heyitsanthony/cache-consistent-get

mvcc: cache consistent index
Anthony Romano 8 years ago
parent
commit
6846e49edf
2 changed files with 33 additions and 3 deletions
  1. 15 3
      mvcc/kvstore.go
  2. 18 0
      mvcc/kvstore_bench_test.go

+ 15 - 3
mvcc/kvstore.go

@@ -19,6 +19,7 @@ import (
 	"errors"
 	"errors"
 	"math"
 	"math"
 	"sync"
 	"sync"
+	"sync/atomic"
 	"time"
 	"time"
 
 
 	"github.com/coreos/etcd/lease"
 	"github.com/coreos/etcd/lease"
@@ -67,6 +68,10 @@ type store struct {
 	ReadView
 	ReadView
 	WriteView
 	WriteView
 
 
+	// consistentIndex caches the "consistent_index" key's value. Accessed
+	// through atomics so must be 64-bit aligned.
+	consistentIndex uint64
+
 	// mu read locks for txns and write locks for non-txn store changes.
 	// mu read locks for txns and write locks for non-txn store changes.
 	mu sync.RWMutex
 	mu sync.RWMutex
 
 
@@ -234,6 +239,7 @@ func (s *store) Restore(b backend.Backend) error {
 	close(s.stopc)
 	close(s.stopc)
 	s.fifoSched.Stop()
 	s.fifoSched.Stop()
 
 
+	atomic.StoreUint64(&s.consistentIndex, 0)
 	s.b = b
 	s.b = b
 	s.kvindex = newTreeIndex()
 	s.kvindex = newTreeIndex()
 	s.currentRev = 1
 	s.currentRev = 1
@@ -380,14 +386,18 @@ func (s *store) saveIndex(tx backend.BatchTx) {
 		return
 		return
 	}
 	}
 	bs := s.bytesBuf8
 	bs := s.bytesBuf8
-	binary.BigEndian.PutUint64(bs, s.ig.ConsistentIndex())
+	ci := s.ig.ConsistentIndex()
+	binary.BigEndian.PutUint64(bs, ci)
 	// put the index into the underlying backend
 	// put the index into the underlying backend
 	// tx has been locked in TxnBegin, so there is no need to lock it again
 	// tx has been locked in TxnBegin, so there is no need to lock it again
 	tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs)
 	tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs)
+	atomic.StoreUint64(&s.consistentIndex, ci)
 }
 }
 
 
 func (s *store) ConsistentIndex() uint64 {
 func (s *store) ConsistentIndex() uint64 {
-	// TODO: cache index in a uint64 field?
+	if ci := atomic.LoadUint64(&s.consistentIndex); ci > 0 {
+		return ci
+	}
 	tx := s.b.BatchTx()
 	tx := s.b.BatchTx()
 	tx.Lock()
 	tx.Lock()
 	defer tx.Unlock()
 	defer tx.Unlock()
@@ -395,7 +405,9 @@ func (s *store) ConsistentIndex() uint64 {
 	if len(vs) == 0 {
 	if len(vs) == 0 {
 		return 0
 		return 0
 	}
 	}
-	return binary.BigEndian.Uint64(vs[0])
+	v := binary.BigEndian.Uint64(vs[0])
+	atomic.StoreUint64(&s.consistentIndex, v)
+	return v
 }
 }
 
 
 // appendMarkTombstone appends tombstone mark to normal revision bytes.
 // appendMarkTombstone appends tombstone mark to normal revision bytes.

+ 18 - 0
mvcc/kvstore_bench_test.go

@@ -45,6 +45,24 @@ func BenchmarkStorePut(b *testing.B) {
 	}
 	}
 }
 }
 
 
+func BenchmarkConsistentIndex(b *testing.B) {
+	fci := fakeConsistentIndex(10)
+	be, tmpPath := backend.NewDefaultTmpBackend()
+	s := NewStore(be, &lease.FakeLessor{}, &fci)
+	defer cleanup(s, be, tmpPath)
+
+	tx := s.b.BatchTx()
+	tx.Lock()
+	s.saveIndex(tx)
+	tx.Unlock()
+
+	b.ReportAllocs()
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.ConsistentIndex()
+	}
+}
+
 // BenchmarkStoreTxnPutUpdate is same as above, but instead updates single key
 // BenchmarkStoreTxnPutUpdate is same as above, but instead updates single key
 func BenchmarkStorePutUpdate(b *testing.B) {
 func BenchmarkStorePutUpdate(b *testing.B) {
 	var i fakeConsistentIndex
 	var i fakeConsistentIndex