|
@@ -76,7 +76,7 @@ func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) }
|
|
|
|
|
|
|
|
func testKVRange(t *testing.T, f rangeFunc) {
|
|
func testKVRange(t *testing.T, f rangeFunc) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
kvs := put3TestKVs(s)
|
|
kvs := put3TestKVs(s)
|
|
@@ -142,7 +142,7 @@ func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, txnRangeFunc) }
|
|
|
|
|
|
|
|
func testKVRangeRev(t *testing.T, f rangeFunc) {
|
|
func testKVRangeRev(t *testing.T, f rangeFunc) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
kvs := put3TestKVs(s)
|
|
kvs := put3TestKVs(s)
|
|
@@ -178,7 +178,7 @@ func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, txnRangeFunc) }
|
|
|
|
|
|
|
|
func testKVRangeBadRev(t *testing.T, f rangeFunc) {
|
|
func testKVRangeBadRev(t *testing.T, f rangeFunc) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
put3TestKVs(s)
|
|
put3TestKVs(s)
|
|
@@ -211,7 +211,7 @@ func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) }
|
|
|
|
|
|
|
|
func testKVRangeLimit(t *testing.T, f rangeFunc) {
|
|
func testKVRangeLimit(t *testing.T, f rangeFunc) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
kvs := put3TestKVs(s)
|
|
kvs := put3TestKVs(s)
|
|
@@ -252,7 +252,7 @@ func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutF
|
|
|
|
|
|
|
|
func testKVPutMultipleTimes(t *testing.T, f putFunc) {
|
|
func testKVPutMultipleTimes(t *testing.T, f putFunc) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
for i := 0; i < 10; i++ {
|
|
for i := 0; i < 10; i++ {
|
|
@@ -314,7 +314,7 @@ func testKVDeleteRange(t *testing.T, f deleteRangeFunc) {
|
|
|
|
|
|
|
|
for i, tt := range tests {
|
|
for i, tt := range tests {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
|
|
|
|
|
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
|
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
|
|
s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease)
|
|
s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease)
|
|
@@ -334,7 +334,7 @@ func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, t
|
|
|
|
|
|
|
|
func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
|
|
func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
|
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
|
|
@@ -355,7 +355,7 @@ func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
|
|
|
// test that range, put, delete on single key in sequence repeatedly works correctly.
|
|
// test that range, put, delete on single key in sequence repeatedly works correctly.
|
|
|
func TestKVOperationInSequence(t *testing.T) {
|
|
func TestKVOperationInSequence(t *testing.T) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
for i := 0; i < 10; i++ {
|
|
for i := 0; i < 10; i++ {
|
|
@@ -402,7 +402,7 @@ func TestKVOperationInSequence(t *testing.T) {
|
|
|
|
|
|
|
|
func TestKVTxnBlockWriteOperations(t *testing.T) {
|
|
func TestKVTxnBlockWriteOperations(t *testing.T) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
|
|
|
|
|
tests := []func(){
|
|
tests := []func(){
|
|
|
func() { s.Put([]byte("foo"), nil, lease.NoLease) },
|
|
func() { s.Put([]byte("foo"), nil, lease.NoLease) },
|
|
@@ -435,7 +435,7 @@ func TestKVTxnBlockWriteOperations(t *testing.T) {
|
|
|
|
|
|
|
|
func TestKVTxnNonBlockRange(t *testing.T) {
|
|
func TestKVTxnNonBlockRange(t *testing.T) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
txn := s.Write()
|
|
txn := s.Write()
|
|
@@ -456,7 +456,7 @@ func TestKVTxnNonBlockRange(t *testing.T) {
|
|
|
// test that txn range, put, delete on single key in sequence repeatedly works correctly.
|
|
// test that txn range, put, delete on single key in sequence repeatedly works correctly.
|
|
|
func TestKVTxnOperationInSequence(t *testing.T) {
|
|
func TestKVTxnOperationInSequence(t *testing.T) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
for i := 0; i < 10; i++ {
|
|
for i := 0; i < 10; i++ {
|
|
@@ -506,7 +506,7 @@ func TestKVTxnOperationInSequence(t *testing.T) {
|
|
|
|
|
|
|
|
func TestKVCompactReserveLastValue(t *testing.T) {
|
|
func TestKVCompactReserveLastValue(t *testing.T) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
s.Put([]byte("foo"), []byte("bar0"), 1)
|
|
s.Put([]byte("foo"), []byte("bar0"), 1)
|
|
@@ -560,7 +560,7 @@ func TestKVCompactReserveLastValue(t *testing.T) {
|
|
|
|
|
|
|
|
func TestKVCompactBad(t *testing.T) {
|
|
func TestKVCompactBad(t *testing.T) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
|
|
s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
|
|
@@ -593,7 +593,7 @@ func TestKVHash(t *testing.T) {
|
|
|
for i := 0; i < len(hashes); i++ {
|
|
for i := 0; i < len(hashes); i++ {
|
|
|
var err error
|
|
var err error
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- kv := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ kv := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease)
|
|
kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease)
|
|
|
kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease)
|
|
kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease)
|
|
|
hashes[i], _, err = kv.Hash()
|
|
hashes[i], _, err = kv.Hash()
|
|
@@ -631,7 +631,7 @@ func TestKVRestore(t *testing.T) {
|
|
|
}
|
|
}
|
|
|
for i, tt := range tests {
|
|
for i, tt := range tests {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
tt(s)
|
|
tt(s)
|
|
|
var kvss [][]mvccpb.KeyValue
|
|
var kvss [][]mvccpb.KeyValue
|
|
|
for k := int64(0); k < 10; k++ {
|
|
for k := int64(0); k < 10; k++ {
|
|
@@ -643,7 +643,7 @@ func TestKVRestore(t *testing.T) {
|
|
|
s.Close()
|
|
s.Close()
|
|
|
|
|
|
|
|
// ns should recover the the previous state from backend.
|
|
// ns should recover the the previous state from backend.
|
|
|
- ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
|
|
|
|
|
if keysRestore := readGaugeInt(keysGauge); keysBefore != keysRestore {
|
|
if keysRestore := readGaugeInt(keysGauge); keysBefore != keysRestore {
|
|
|
t.Errorf("#%d: got %d key count, expected %d", i, keysRestore, keysBefore)
|
|
t.Errorf("#%d: got %d key count, expected %d", i, keysRestore, keysBefore)
|
|
@@ -675,7 +675,7 @@ func readGaugeInt(g prometheus.Gauge) int {
|
|
|
|
|
|
|
|
func TestKVSnapshot(t *testing.T) {
|
|
func TestKVSnapshot(t *testing.T) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
wkvs := put3TestKVs(s)
|
|
wkvs := put3TestKVs(s)
|
|
@@ -695,7 +695,7 @@ func TestKVSnapshot(t *testing.T) {
|
|
|
}
|
|
}
|
|
|
f.Close()
|
|
f.Close()
|
|
|
|
|
|
|
|
- ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
|
|
|
|
|
|
|
+ ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
|
|
|
defer ns.Close()
|
|
defer ns.Close()
|
|
|
r, err := ns.Range([]byte("a"), []byte("z"), RangeOptions{})
|
|
r, err := ns.Range([]byte("a"), []byte("z"), RangeOptions{})
|
|
|
if err != nil {
|
|
if err != nil {
|
|
@@ -711,7 +711,7 @@ func TestKVSnapshot(t *testing.T) {
|
|
|
|
|
|
|
|
func TestWatchableKVWatch(t *testing.T) {
|
|
func TestWatchableKVWatch(t *testing.T) {
|
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
b, tmpPath := backend.NewDefaultTmpBackend()
|
|
|
- s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
|
|
|
|
|
|
|
+ s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
|
|
|
defer cleanup(s, b, tmpPath)
|
|
defer cleanup(s, b, tmpPath)
|
|
|
|
|
|
|
|
w := s.NewWatchStream()
|
|
w := s.NewWatchStream()
|