Browse Source

storage: decouple default parameters and storage creation

newStore() uses constants for some important parameters
e.g. batchInerval. It is not suitable for other storage users
(e.g. tools/benchmark). This commit decouples the default parameters
from storage creation. etcd should use a newly added function
newDefaultStore() for creating a store with default parameters.
Hitoshi Mitake 10 years ago
parent
commit
0b2d31f3bc

+ 18 - 18
storage/kv_test.go

@@ -89,7 +89,7 @@ func TestKVRange(t *testing.T)    { testKVRange(t, normalRangeFunc) }
 func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) }
 
 func testKVRange(t *testing.T, f rangeFunc) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	s.Put([]byte("foo"), []byte("bar"))
@@ -156,7 +156,7 @@ func TestKVRangeRev(t *testing.T)    { testKVRangeRev(t, normalRangeFunc) }
 func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, normalRangeFunc) }
 
 func testKVRangeRev(t *testing.T, f rangeFunc) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	s.Put([]byte("foo"), []byte("bar"))
@@ -198,7 +198,7 @@ func TestKVRangeBadRev(t *testing.T)    { testKVRangeBadRev(t, normalRangeFunc)
 func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, normalRangeFunc) }
 
 func testKVRangeBadRev(t *testing.T, f rangeFunc) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	s.Put([]byte("foo"), []byte("bar"))
@@ -230,7 +230,7 @@ func TestKVRangeLimit(t *testing.T)    { testKVRangeLimit(t, normalRangeFunc) }
 func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) }
 
 func testKVRangeLimit(t *testing.T, f rangeFunc) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	s.Put([]byte("foo"), []byte("bar"))
@@ -274,7 +274,7 @@ func TestKVPutMultipleTimes(t *testing.T)    { testKVPutMultipleTimes(t, normalP
 func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutFunc) }
 
 func testKVPutMultipleTimes(t *testing.T, f putFunc) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	for i := 0; i < 10; i++ {
@@ -335,7 +335,7 @@ func testKVDeleteRange(t *testing.T, f deleteRangeFunc) {
 	}
 
 	for i, tt := range tests {
-		s := newStore(tmpPath)
+		s := newDefaultStore(tmpPath)
 
 		s.Put([]byte("foo"), []byte("bar"))
 		s.Put([]byte("foo1"), []byte("bar1"))
@@ -354,7 +354,7 @@ func TestKVDeleteMultipleTimes(t *testing.T)    { testKVDeleteMultipleTimes(t, n
 func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, txnDeleteRangeFunc) }
 
 func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	s.Put([]byte("foo"), []byte("bar"))
@@ -374,7 +374,7 @@ func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
 
 // test that range, put, delete on single key in sequence repeatedly works correctly.
 func TestKVOperationInSequence(t *testing.T) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	for i := 0; i < 10; i++ {
@@ -420,7 +420,7 @@ func TestKVOperationInSequence(t *testing.T) {
 }
 
 func TestKVTxnBlockNonTnxOperations(t *testing.T) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	tests := []func(){
@@ -451,7 +451,7 @@ func TestKVTxnBlockNonTnxOperations(t *testing.T) {
 }
 
 func TestKVTxnWrongID(t *testing.T) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	id := s.TxnBegin()
@@ -487,7 +487,7 @@ func TestKVTxnWrongID(t *testing.T) {
 
 // test that txn range, put, delete on single key in sequence repeatedly works correctly.
 func TestKVTnxOperationInSequence(t *testing.T) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	for i := 0; i < 10; i++ {
@@ -542,7 +542,7 @@ func TestKVTnxOperationInSequence(t *testing.T) {
 }
 
 func TestKVCompactReserveLastValue(t *testing.T) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	s.Put([]byte("foo"), []byte("bar0"))
@@ -595,7 +595,7 @@ func TestKVCompactReserveLastValue(t *testing.T) {
 }
 
 func TestKVCompactBad(t *testing.T) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	s.Put([]byte("foo"), []byte("bar0"))
@@ -627,7 +627,7 @@ func TestKVHash(t *testing.T) {
 
 	for i := 0; i < len(hashes); i++ {
 		var err error
-		kv := newStore(tmpPath)
+		kv := newDefaultStore(tmpPath)
 		kv.Put([]byte("foo0"), []byte("bar0"))
 		kv.Put([]byte("foo1"), []byte("bar0"))
 		hashes[i], err = kv.Hash()
@@ -663,7 +663,7 @@ func TestKVRestore(t *testing.T) {
 		},
 	}
 	for i, tt := range tests {
-		s := newStore(tmpPath)
+		s := newDefaultStore(tmpPath)
 		tt(s)
 		var kvss [][]storagepb.KeyValue
 		for k := int64(0); k < 10; k++ {
@@ -672,7 +672,7 @@ func TestKVRestore(t *testing.T) {
 		}
 		s.Close()
 
-		ns := newStore(tmpPath)
+		ns := newDefaultStore(tmpPath)
 		ns.Restore()
 		// wait for possible compaction to finish
 		testutil.WaitSchedule()
@@ -690,7 +690,7 @@ func TestKVRestore(t *testing.T) {
 }
 
 func TestKVSnapshot(t *testing.T) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	s.Put([]byte("foo"), []byte("bar"))
@@ -714,7 +714,7 @@ func TestKVSnapshot(t *testing.T) {
 	}
 	f.Close()
 
-	ns := newStore("new_test")
+	ns := newDefaultStore("new_test")
 	defer cleanup(ns, "new_test")
 	ns.Restore()
 	kvs, rev, err := ns.Range([]byte("a"), []byte("z"), 0, 0)

+ 5 - 1
storage/kvstore.go

@@ -65,7 +65,7 @@ type store struct {
 	stopc chan struct{}
 }
 
-func newStore(path string) *store {
+func NewStore(path string, bachInterval time.Duration, batchLimit int) KV {
 	s := &store{
 		b:              backend.New(path, batchInterval, batchLimit),
 		kvindex:        newTreeIndex(),
@@ -84,6 +84,10 @@ func newStore(path string) *store {
 	return s
 }
 
+func newDefaultStore(path string) *store {
+	return (NewStore(path, batchInterval, batchLimit)).(*store)
+}
+
 func (s *store) Rev() int64 {
 	s.mu.Lock()
 	defer s.mu.Unlock()

+ 2 - 2
storage/kvstore_bench_test.go

@@ -20,7 +20,7 @@ import (
 )
 
 func BenchmarkStorePut(b *testing.B) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer os.Remove(tmpPath)
 
 	// arbitrary number of bytes
@@ -38,7 +38,7 @@ func BenchmarkStorePut(b *testing.B) {
 // with transaction begin and end, where transaction involves
 // some synchronization operations, such as mutex locking.
 func BenchmarkStoreTxnPut(b *testing.B) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	// arbitrary number of bytes

+ 1 - 1
storage/kvstore_compaction_test.go

@@ -58,7 +58,7 @@ func TestScheduleCompaction(t *testing.T) {
 		},
 	}
 	for i, tt := range tests {
-		s := newStore(tmpPath)
+		s := newDefaultStore(tmpPath)
 		tx := s.b.BatchTx()
 
 		tx.Lock()

+ 5 - 5
storage/kvstore_test.go

@@ -29,7 +29,7 @@ import (
 )
 
 func TestStoreRev(t *testing.T) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer os.Remove(tmpPath)
 
 	for i := 0; i < 3; i++ {
@@ -354,7 +354,7 @@ func TestStoreRestore(t *testing.T) {
 }
 
 func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
-	s0 := newStore(tmpPath)
+	s0 := newDefaultStore(tmpPath)
 	defer os.Remove(tmpPath)
 
 	s0.Put([]byte("foo"), []byte("bar"))
@@ -371,7 +371,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
 
 	s0.Close()
 
-	s1 := newStore(tmpPath)
+	s1 := newDefaultStore(tmpPath)
 	s1.Restore()
 
 	// wait for scheduled compaction to be finished
@@ -409,7 +409,7 @@ func TestTxnPut(t *testing.T) {
 	keys := createBytesSlice(bytesN, sliceN)
 	vals := createBytesSlice(bytesN, sliceN)
 
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer cleanup(s, tmpPath)
 
 	for i := 0; i < sliceN; i++ {
@@ -429,7 +429,7 @@ func TestTxnPut(t *testing.T) {
 }
 
 func TestTxnBlockBackendForceCommit(t *testing.T) {
-	s := newStore(tmpPath)
+	s := newDefaultStore(tmpPath)
 	defer os.Remove(tmpPath)
 
 	id := s.TxnBegin()

+ 1 - 1
storage/watchable_store.go

@@ -55,7 +55,7 @@ type watchableStore struct {
 
 func newWatchableStore(path string) *watchableStore {
 	s := &watchableStore{
-		store:    newStore(path),
+		store:    newDefaultStore(path),
 		unsynced: make(map[*watching]struct{}),
 		synced:   make(map[string]map[*watching]struct{}),
 		stopc:    make(chan struct{}),

+ 1 - 1
storage/watchable_store_bench_test.go

@@ -33,7 +33,7 @@ func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
 	// method to sync watchers in unsynced map. We want to keep watchers
 	// in unsynced for this benchmark.
 	s := &watchableStore{
-		store:    newStore(tmpPath),
+		store:    newDefaultStore(tmpPath),
 		unsynced: make(map[*watching]struct{}),
 
 		// to make the test not crash from assigning to nil map.

+ 2 - 2
storage/watchable_store_test.go

@@ -66,7 +66,7 @@ func TestCancelUnsynced(t *testing.T) {
 	// method to sync watchers in unsynced map. We want to keep watchers
 	// in unsynced to test if syncWatchers works as expected.
 	s := &watchableStore{
-		store:    newStore(tmpPath),
+		store:    newDefaultStore(tmpPath),
 		unsynced: make(map[*watching]struct{}),
 
 		// to make the test not crash from assigning to nil map.
@@ -120,7 +120,7 @@ func TestCancelUnsynced(t *testing.T) {
 // watchings to synced.
 func TestSyncWatchings(t *testing.T) {
 	s := &watchableStore{
-		store:    newStore(tmpPath),
+		store:    newDefaultStore(tmpPath),
 		unsynced: make(map[*watching]struct{}),
 		synced:   make(map[string]map[*watching]struct{}),
 	}