kv_test.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "fmt"
  17. "os"
  18. "reflect"
  19. "testing"
  20. "time"
  21. "github.com/coreos/etcd/lease"
  22. "github.com/coreos/etcd/mvcc/backend"
  23. "github.com/coreos/etcd/mvcc/mvccpb"
  24. "github.com/coreos/etcd/pkg/testutil"
  25. )
  26. // Functional tests for features implemented in v3 store. It treats v3 store
  27. // as a black box, and tests it by feeding the input and validating the output.
  28. // TODO: add similar tests on operations in one txn/rev
  29. type (
  30. rangeFunc func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error)
  31. putFunc func(kv KV, key, value []byte, lease lease.LeaseID) int64
  32. deleteRangeFunc func(kv KV, key, end []byte) (n, rev int64)
  33. )
  34. var (
  35. normalRangeFunc = func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error) {
  36. return kv.Range(key, end, ro)
  37. }
  38. txnRangeFunc = func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error) {
  39. txn := kv.Read()
  40. defer txn.End()
  41. return txn.Range(key, end, ro)
  42. }
  43. normalPutFunc = func(kv KV, key, value []byte, lease lease.LeaseID) int64 {
  44. return kv.Put(key, value, lease)
  45. }
  46. txnPutFunc = func(kv KV, key, value []byte, lease lease.LeaseID) int64 {
  47. txn := kv.Write()
  48. defer txn.End()
  49. return txn.Put(key, value, lease)
  50. }
  51. normalDeleteRangeFunc = func(kv KV, key, end []byte) (n, rev int64) {
  52. return kv.DeleteRange(key, end)
  53. }
  54. txnDeleteRangeFunc = func(kv KV, key, end []byte) (n, rev int64) {
  55. txn := kv.Write()
  56. defer txn.End()
  57. return txn.DeleteRange(key, end)
  58. }
  59. )
  60. func TestKVRange(t *testing.T) { testKVRange(t, normalRangeFunc) }
  61. func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) }
  62. func testKVRange(t *testing.T, f rangeFunc) {
  63. b, tmpPath := backend.NewDefaultTmpBackend()
  64. s := NewStore(b, &lease.FakeLessor{}, nil)
  65. defer cleanup(s, b, tmpPath)
  66. kvs := put3TestKVs(s)
  67. wrev := int64(4)
  68. tests := []struct {
  69. key, end []byte
  70. wkvs []mvccpb.KeyValue
  71. }{
  72. // get no keys
  73. {
  74. []byte("doo"), []byte("foo"),
  75. nil,
  76. },
  77. // get no keys when key == end
  78. {
  79. []byte("foo"), []byte("foo"),
  80. nil,
  81. },
  82. // get no keys when ranging single key
  83. {
  84. []byte("doo"), nil,
  85. nil,
  86. },
  87. // get all keys
  88. {
  89. []byte("foo"), []byte("foo3"),
  90. kvs,
  91. },
  92. // get partial keys
  93. {
  94. []byte("foo"), []byte("foo1"),
  95. kvs[:1],
  96. },
  97. // get single key
  98. {
  99. []byte("foo"), nil,
  100. kvs[:1],
  101. },
  102. // get entire keyspace
  103. {
  104. []byte(""), []byte(""),
  105. kvs,
  106. },
  107. }
  108. for i, tt := range tests {
  109. r, err := f(s, tt.key, tt.end, RangeOptions{})
  110. if err != nil {
  111. t.Fatal(err)
  112. }
  113. if r.Rev != wrev {
  114. t.Errorf("#%d: rev = %d, want %d", i, r.Rev, wrev)
  115. }
  116. if !reflect.DeepEqual(r.KVs, tt.wkvs) {
  117. t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs)
  118. }
  119. }
  120. }
  121. func TestKVRangeRev(t *testing.T) { testKVRangeRev(t, normalRangeFunc) }
  122. func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, txnRangeFunc) }
  123. func testKVRangeRev(t *testing.T, f rangeFunc) {
  124. b, tmpPath := backend.NewDefaultTmpBackend()
  125. s := NewStore(b, &lease.FakeLessor{}, nil)
  126. defer cleanup(s, b, tmpPath)
  127. kvs := put3TestKVs(s)
  128. tests := []struct {
  129. rev int64
  130. wrev int64
  131. wkvs []mvccpb.KeyValue
  132. }{
  133. {-1, 4, kvs},
  134. {0, 4, kvs},
  135. {2, 4, kvs[:1]},
  136. {3, 4, kvs[:2]},
  137. {4, 4, kvs},
  138. }
  139. for i, tt := range tests {
  140. r, err := f(s, []byte("foo"), []byte("foo3"), RangeOptions{Rev: tt.rev})
  141. if err != nil {
  142. t.Fatal(err)
  143. }
  144. if r.Rev != tt.wrev {
  145. t.Errorf("#%d: rev = %d, want %d", i, r.Rev, tt.wrev)
  146. }
  147. if !reflect.DeepEqual(r.KVs, tt.wkvs) {
  148. t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs)
  149. }
  150. }
  151. }
  152. func TestKVRangeBadRev(t *testing.T) { testKVRangeBadRev(t, normalRangeFunc) }
  153. func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, txnRangeFunc) }
  154. func testKVRangeBadRev(t *testing.T, f rangeFunc) {
  155. b, tmpPath := backend.NewDefaultTmpBackend()
  156. s := NewStore(b, &lease.FakeLessor{}, nil)
  157. defer cleanup(s, b, tmpPath)
  158. put3TestKVs(s)
  159. if _, err := s.Compact(4); err != nil {
  160. t.Fatalf("compact error (%v)", err)
  161. }
  162. tests := []struct {
  163. rev int64
  164. werr error
  165. }{
  166. {-1, nil}, // <= 0 is most recent store
  167. {0, nil},
  168. {1, ErrCompacted},
  169. {2, ErrCompacted},
  170. {4, nil},
  171. {5, ErrFutureRev},
  172. {100, ErrFutureRev},
  173. }
  174. for i, tt := range tests {
  175. _, err := f(s, []byte("foo"), []byte("foo3"), RangeOptions{Rev: tt.rev})
  176. if err != tt.werr {
  177. t.Errorf("#%d: error = %v, want %v", i, err, tt.werr)
  178. }
  179. }
  180. }
  181. func TestKVRangeLimit(t *testing.T) { testKVRangeLimit(t, normalRangeFunc) }
  182. func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) }
  183. func testKVRangeLimit(t *testing.T, f rangeFunc) {
  184. b, tmpPath := backend.NewDefaultTmpBackend()
  185. s := NewStore(b, &lease.FakeLessor{}, nil)
  186. defer cleanup(s, b, tmpPath)
  187. kvs := put3TestKVs(s)
  188. wrev := int64(4)
  189. tests := []struct {
  190. limit int64
  191. wkvs []mvccpb.KeyValue
  192. }{
  193. // no limit
  194. {-1, kvs},
  195. // no limit
  196. {0, kvs},
  197. {1, kvs[:1]},
  198. {2, kvs[:2]},
  199. {3, kvs},
  200. {100, kvs},
  201. }
  202. for i, tt := range tests {
  203. r, err := f(s, []byte("foo"), []byte("foo3"), RangeOptions{Limit: tt.limit})
  204. if err != nil {
  205. t.Fatalf("#%d: range error (%v)", i, err)
  206. }
  207. if !reflect.DeepEqual(r.KVs, tt.wkvs) {
  208. t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs)
  209. }
  210. if r.Rev != wrev {
  211. t.Errorf("#%d: rev = %d, want %d", i, r.Rev, wrev)
  212. }
  213. if r.Count != len(kvs) {
  214. t.Errorf("#%d: count = %d, want %d", i, r.Count, len(kvs))
  215. }
  216. }
  217. }
  218. func TestKVPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, normalPutFunc) }
  219. func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutFunc) }
  220. func testKVPutMultipleTimes(t *testing.T, f putFunc) {
  221. b, tmpPath := backend.NewDefaultTmpBackend()
  222. s := NewStore(b, &lease.FakeLessor{}, nil)
  223. defer cleanup(s, b, tmpPath)
  224. for i := 0; i < 10; i++ {
  225. base := int64(i + 1)
  226. rev := f(s, []byte("foo"), []byte("bar"), lease.LeaseID(base))
  227. if rev != base+1 {
  228. t.Errorf("#%d: rev = %d, want %d", i, rev, base+1)
  229. }
  230. r, err := s.Range([]byte("foo"), nil, RangeOptions{})
  231. if err != nil {
  232. t.Fatal(err)
  233. }
  234. wkvs := []mvccpb.KeyValue{
  235. {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: base + 1, Version: base, Lease: base},
  236. }
  237. if !reflect.DeepEqual(r.KVs, wkvs) {
  238. t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, wkvs)
  239. }
  240. }
  241. }
  242. func TestKVDeleteRange(t *testing.T) { testKVDeleteRange(t, normalDeleteRangeFunc) }
  243. func TestKVTxnDeleteRange(t *testing.T) { testKVDeleteRange(t, txnDeleteRangeFunc) }
  244. func testKVDeleteRange(t *testing.T, f deleteRangeFunc) {
  245. tests := []struct {
  246. key, end []byte
  247. wrev int64
  248. wN int64
  249. }{
  250. {
  251. []byte("foo"), nil,
  252. 5, 1,
  253. },
  254. {
  255. []byte("foo"), []byte("foo1"),
  256. 5, 1,
  257. },
  258. {
  259. []byte("foo"), []byte("foo2"),
  260. 5, 2,
  261. },
  262. {
  263. []byte("foo"), []byte("foo3"),
  264. 5, 3,
  265. },
  266. {
  267. []byte("foo3"), []byte("foo8"),
  268. 4, 0,
  269. },
  270. {
  271. []byte("foo3"), nil,
  272. 4, 0,
  273. },
  274. }
  275. for i, tt := range tests {
  276. b, tmpPath := backend.NewDefaultTmpBackend()
  277. s := NewStore(b, &lease.FakeLessor{}, nil)
  278. s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
  279. s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease)
  280. s.Put([]byte("foo2"), []byte("bar2"), lease.NoLease)
  281. n, rev := f(s, tt.key, tt.end)
  282. if n != tt.wN || rev != tt.wrev {
  283. t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, tt.wN, tt.wrev)
  284. }
  285. cleanup(s, b, tmpPath)
  286. }
  287. }
  288. func TestKVDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, normalDeleteRangeFunc) }
  289. func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, txnDeleteRangeFunc) }
  290. func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
  291. b, tmpPath := backend.NewDefaultTmpBackend()
  292. s := NewStore(b, &lease.FakeLessor{}, nil)
  293. defer cleanup(s, b, tmpPath)
  294. s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
  295. n, rev := f(s, []byte("foo"), nil)
  296. if n != 1 || rev != 3 {
  297. t.Fatalf("n = %d, rev = %d, want (%d, %d)", n, rev, 1, 3)
  298. }
  299. for i := 0; i < 10; i++ {
  300. n, rev := f(s, []byte("foo"), nil)
  301. if n != 0 || rev != 3 {
  302. t.Fatalf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 0, 3)
  303. }
  304. }
  305. }
  306. // test that range, put, delete on single key in sequence repeatedly works correctly.
  307. func TestKVOperationInSequence(t *testing.T) {
  308. b, tmpPath := backend.NewDefaultTmpBackend()
  309. s := NewStore(b, &lease.FakeLessor{}, nil)
  310. defer cleanup(s, b, tmpPath)
  311. for i := 0; i < 10; i++ {
  312. base := int64(i*2 + 1)
  313. // put foo
  314. rev := s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
  315. if rev != base+1 {
  316. t.Errorf("#%d: put rev = %d, want %d", i, rev, base+1)
  317. }
  318. r, err := s.Range([]byte("foo"), nil, RangeOptions{Rev: base + 1})
  319. if err != nil {
  320. t.Fatal(err)
  321. }
  322. wkvs := []mvccpb.KeyValue{
  323. {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: base + 1, ModRevision: base + 1, Version: 1, Lease: int64(lease.NoLease)},
  324. }
  325. if !reflect.DeepEqual(r.KVs, wkvs) {
  326. t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, wkvs)
  327. }
  328. if r.Rev != base+1 {
  329. t.Errorf("#%d: range rev = %d, want %d", i, rev, base+1)
  330. }
  331. // delete foo
  332. n, rev := s.DeleteRange([]byte("foo"), nil)
  333. if n != 1 || rev != base+2 {
  334. t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 1, base+2)
  335. }
  336. r, err = s.Range([]byte("foo"), nil, RangeOptions{Rev: base + 2})
  337. if err != nil {
  338. t.Fatal(err)
  339. }
  340. if r.KVs != nil {
  341. t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, nil)
  342. }
  343. if r.Rev != base+2 {
  344. t.Errorf("#%d: range rev = %d, want %d", i, r.Rev, base+2)
  345. }
  346. }
  347. }
  348. func TestKVTxnBlockWriteOperations(t *testing.T) {
  349. b, tmpPath := backend.NewDefaultTmpBackend()
  350. s := NewStore(b, &lease.FakeLessor{}, nil)
  351. tests := []func(){
  352. func() { s.Put([]byte("foo"), nil, lease.NoLease) },
  353. func() { s.DeleteRange([]byte("foo"), nil) },
  354. }
  355. for i, tt := range tests {
  356. txn := s.Write()
  357. done := make(chan struct{}, 1)
  358. go func() {
  359. tt()
  360. done <- struct{}{}
  361. }()
  362. select {
  363. case <-done:
  364. t.Fatalf("#%d: operation failed to be blocked", i)
  365. case <-time.After(10 * time.Millisecond):
  366. }
  367. txn.End()
  368. select {
  369. case <-done:
  370. case <-time.After(10 * time.Second):
  371. testutil.FatalStack(t, fmt.Sprintf("#%d: operation failed to be unblocked", i))
  372. }
  373. }
  374. // only close backend when we know all the tx are finished
  375. cleanup(s, b, tmpPath)
  376. }
  377. func TestKVTxnNonBlockRange(t *testing.T) {
  378. b, tmpPath := backend.NewDefaultTmpBackend()
  379. s := NewStore(b, &lease.FakeLessor{}, nil)
  380. defer cleanup(s, b, tmpPath)
  381. txn := s.Write()
  382. defer txn.End()
  383. donec := make(chan struct{})
  384. go func() {
  385. defer close(donec)
  386. s.Range([]byte("foo"), nil, RangeOptions{})
  387. }()
  388. select {
  389. case <-donec:
  390. case <-time.After(100 * time.Millisecond):
  391. t.Fatalf("range operation blocked on write txn")
  392. }
  393. }
  394. // test that txn range, put, delete on single key in sequence repeatedly works correctly.
  395. func TestKVTxnOperationInSequence(t *testing.T) {
  396. b, tmpPath := backend.NewDefaultTmpBackend()
  397. s := NewStore(b, &lease.FakeLessor{}, nil)
  398. defer cleanup(s, b, tmpPath)
  399. for i := 0; i < 10; i++ {
  400. txn := s.Write()
  401. base := int64(i + 1)
  402. // put foo
  403. rev := txn.Put([]byte("foo"), []byte("bar"), lease.NoLease)
  404. if rev != base+1 {
  405. t.Errorf("#%d: put rev = %d, want %d", i, rev, base+1)
  406. }
  407. r, err := txn.Range([]byte("foo"), nil, RangeOptions{Rev: base + 1})
  408. if err != nil {
  409. t.Fatal(err)
  410. }
  411. wkvs := []mvccpb.KeyValue{
  412. {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: base + 1, ModRevision: base + 1, Version: 1, Lease: int64(lease.NoLease)},
  413. }
  414. if !reflect.DeepEqual(r.KVs, wkvs) {
  415. t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, wkvs)
  416. }
  417. if r.Rev != base+1 {
  418. t.Errorf("#%d: range rev = %d, want %d", i, r.Rev, base+1)
  419. }
  420. // delete foo
  421. n, rev := txn.DeleteRange([]byte("foo"), nil)
  422. if n != 1 || rev != base+1 {
  423. t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 1, base+1)
  424. }
  425. r, err = txn.Range([]byte("foo"), nil, RangeOptions{Rev: base + 1})
  426. if err != nil {
  427. t.Errorf("#%d: range error (%v)", i, err)
  428. }
  429. if r.KVs != nil {
  430. t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, nil)
  431. }
  432. if r.Rev != base+1 {
  433. t.Errorf("#%d: range rev = %d, want %d", i, r.Rev, base+1)
  434. }
  435. txn.End()
  436. }
  437. }
  438. func TestKVCompactReserveLastValue(t *testing.T) {
  439. b, tmpPath := backend.NewDefaultTmpBackend()
  440. s := NewStore(b, &lease.FakeLessor{}, nil)
  441. defer cleanup(s, b, tmpPath)
  442. s.Put([]byte("foo"), []byte("bar0"), 1)
  443. s.Put([]byte("foo"), []byte("bar1"), 2)
  444. s.DeleteRange([]byte("foo"), nil)
  445. s.Put([]byte("foo"), []byte("bar2"), 3)
  446. // rev in tests will be called in Compact() one by one on the same store
  447. tests := []struct {
  448. rev int64
  449. // wanted kvs right after the compacted rev
  450. wkvs []mvccpb.KeyValue
  451. }{
  452. {
  453. 1,
  454. []mvccpb.KeyValue{
  455. {Key: []byte("foo"), Value: []byte("bar0"), CreateRevision: 2, ModRevision: 2, Version: 1, Lease: 1},
  456. },
  457. },
  458. {
  459. 2,
  460. []mvccpb.KeyValue{
  461. {Key: []byte("foo"), Value: []byte("bar1"), CreateRevision: 2, ModRevision: 3, Version: 2, Lease: 2},
  462. },
  463. },
  464. {
  465. 3,
  466. nil,
  467. },
  468. {
  469. 4,
  470. []mvccpb.KeyValue{
  471. {Key: []byte("foo"), Value: []byte("bar2"), CreateRevision: 5, ModRevision: 5, Version: 1, Lease: 3},
  472. },
  473. },
  474. }
  475. for i, tt := range tests {
  476. _, err := s.Compact(tt.rev)
  477. if err != nil {
  478. t.Errorf("#%d: unexpect compact error %v", i, err)
  479. }
  480. r, err := s.Range([]byte("foo"), nil, RangeOptions{Rev: tt.rev + 1})
  481. if err != nil {
  482. t.Errorf("#%d: unexpect range error %v", i, err)
  483. }
  484. if !reflect.DeepEqual(r.KVs, tt.wkvs) {
  485. t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs)
  486. }
  487. }
  488. }
  489. func TestKVCompactBad(t *testing.T) {
  490. b, tmpPath := backend.NewDefaultTmpBackend()
  491. s := NewStore(b, &lease.FakeLessor{}, nil)
  492. defer cleanup(s, b, tmpPath)
  493. s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
  494. s.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
  495. s.Put([]byte("foo"), []byte("bar2"), lease.NoLease)
  496. // rev in tests will be called in Compact() one by one on the same store
  497. tests := []struct {
  498. rev int64
  499. werr error
  500. }{
  501. {0, nil},
  502. {1, nil},
  503. {1, ErrCompacted},
  504. {4, nil},
  505. {5, ErrFutureRev},
  506. {100, ErrFutureRev},
  507. }
  508. for i, tt := range tests {
  509. _, err := s.Compact(tt.rev)
  510. if err != tt.werr {
  511. t.Errorf("#%d: compact error = %v, want %v", i, err, tt.werr)
  512. }
  513. }
  514. }
  515. func TestKVHash(t *testing.T) {
  516. hashes := make([]uint32, 3)
  517. for i := 0; i < len(hashes); i++ {
  518. var err error
  519. b, tmpPath := backend.NewDefaultTmpBackend()
  520. kv := NewStore(b, &lease.FakeLessor{}, nil)
  521. kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease)
  522. kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease)
  523. hashes[i], _, err = kv.Hash()
  524. if err != nil {
  525. t.Fatalf("failed to get hash: %v", err)
  526. }
  527. cleanup(kv, b, tmpPath)
  528. }
  529. for i := 1; i < len(hashes); i++ {
  530. if hashes[i-1] != hashes[i] {
  531. t.Errorf("hash[%d](%d) != hash[%d](%d)", i-1, hashes[i-1], i, hashes[i])
  532. }
  533. }
  534. }
  535. func TestKVRestore(t *testing.T) {
  536. tests := []func(kv KV){
  537. func(kv KV) {
  538. kv.Put([]byte("foo"), []byte("bar0"), 1)
  539. kv.Put([]byte("foo"), []byte("bar1"), 2)
  540. kv.Put([]byte("foo"), []byte("bar2"), 3)
  541. },
  542. func(kv KV) {
  543. kv.Put([]byte("foo"), []byte("bar0"), 1)
  544. kv.DeleteRange([]byte("foo"), nil)
  545. kv.Put([]byte("foo"), []byte("bar1"), 2)
  546. },
  547. func(kv KV) {
  548. kv.Put([]byte("foo"), []byte("bar0"), 1)
  549. kv.Put([]byte("foo"), []byte("bar1"), 2)
  550. kv.Compact(1)
  551. },
  552. }
  553. for i, tt := range tests {
  554. b, tmpPath := backend.NewDefaultTmpBackend()
  555. s := NewStore(b, &lease.FakeLessor{}, nil)
  556. tt(s)
  557. var kvss [][]mvccpb.KeyValue
  558. for k := int64(0); k < 10; k++ {
  559. r, _ := s.Range([]byte("a"), []byte("z"), RangeOptions{Rev: k})
  560. kvss = append(kvss, r.KVs)
  561. }
  562. s.Close()
  563. // ns should recover the the previous state from backend.
  564. ns := NewStore(b, &lease.FakeLessor{}, nil)
  565. // wait for possible compaction to finish
  566. testutil.WaitSchedule()
  567. var nkvss [][]mvccpb.KeyValue
  568. for k := int64(0); k < 10; k++ {
  569. r, _ := ns.Range([]byte("a"), []byte("z"), RangeOptions{Rev: k})
  570. nkvss = append(nkvss, r.KVs)
  571. }
  572. cleanup(ns, b, tmpPath)
  573. if !reflect.DeepEqual(nkvss, kvss) {
  574. t.Errorf("#%d: kvs history = %+v, want %+v", i, nkvss, kvss)
  575. }
  576. }
  577. }
  578. func TestKVSnapshot(t *testing.T) {
  579. b, tmpPath := backend.NewDefaultTmpBackend()
  580. s := NewStore(b, &lease.FakeLessor{}, nil)
  581. defer cleanup(s, b, tmpPath)
  582. wkvs := put3TestKVs(s)
  583. newPath := "new_test"
  584. f, err := os.Create(newPath)
  585. if err != nil {
  586. t.Fatal(err)
  587. }
  588. defer os.Remove(newPath)
  589. snap := s.b.Snapshot()
  590. defer snap.Close()
  591. _, err = snap.WriteTo(f)
  592. if err != nil {
  593. t.Fatal(err)
  594. }
  595. f.Close()
  596. ns := NewStore(b, &lease.FakeLessor{}, nil)
  597. defer ns.Close()
  598. r, err := ns.Range([]byte("a"), []byte("z"), RangeOptions{})
  599. if err != nil {
  600. t.Errorf("unexpect range error (%v)", err)
  601. }
  602. if !reflect.DeepEqual(r.KVs, wkvs) {
  603. t.Errorf("kvs = %+v, want %+v", r.KVs, wkvs)
  604. }
  605. if r.Rev != 4 {
  606. t.Errorf("rev = %d, want %d", r.Rev, 4)
  607. }
  608. }
  609. func TestWatchableKVWatch(t *testing.T) {
  610. b, tmpPath := backend.NewDefaultTmpBackend()
  611. s := WatchableKV(newWatchableStore(b, &lease.FakeLessor{}, nil))
  612. defer cleanup(s, b, tmpPath)
  613. w := s.NewWatchStream()
  614. defer w.Close()
  615. wid := w.Watch([]byte("foo"), []byte("fop"), 0)
  616. wev := []mvccpb.Event{
  617. {Type: mvccpb.PUT,
  618. Kv: &mvccpb.KeyValue{
  619. Key: []byte("foo"),
  620. Value: []byte("bar"),
  621. CreateRevision: 2,
  622. ModRevision: 2,
  623. Version: 1,
  624. Lease: 1,
  625. },
  626. },
  627. {
  628. Type: mvccpb.PUT,
  629. Kv: &mvccpb.KeyValue{
  630. Key: []byte("foo1"),
  631. Value: []byte("bar1"),
  632. CreateRevision: 3,
  633. ModRevision: 3,
  634. Version: 1,
  635. Lease: 2,
  636. },
  637. },
  638. {
  639. Type: mvccpb.PUT,
  640. Kv: &mvccpb.KeyValue{
  641. Key: []byte("foo1"),
  642. Value: []byte("bar11"),
  643. CreateRevision: 3,
  644. ModRevision: 4,
  645. Version: 2,
  646. Lease: 3,
  647. },
  648. },
  649. }
  650. s.Put([]byte("foo"), []byte("bar"), 1)
  651. select {
  652. case resp := <-w.Chan():
  653. if resp.WatchID != wid {
  654. t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid)
  655. }
  656. ev := resp.Events[0]
  657. if !reflect.DeepEqual(ev, wev[0]) {
  658. t.Errorf("watched event = %+v, want %+v", ev, wev[0])
  659. }
  660. case <-time.After(5 * time.Second):
  661. // CPU might be too slow, and the routine is not able to switch around
  662. testutil.FatalStack(t, "failed to watch the event")
  663. }
  664. s.Put([]byte("foo1"), []byte("bar1"), 2)
  665. select {
  666. case resp := <-w.Chan():
  667. if resp.WatchID != wid {
  668. t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid)
  669. }
  670. ev := resp.Events[0]
  671. if !reflect.DeepEqual(ev, wev[1]) {
  672. t.Errorf("watched event = %+v, want %+v", ev, wev[1])
  673. }
  674. case <-time.After(5 * time.Second):
  675. testutil.FatalStack(t, "failed to watch the event")
  676. }
  677. w = s.NewWatchStream()
  678. wid = w.Watch([]byte("foo1"), []byte("foo2"), 3)
  679. select {
  680. case resp := <-w.Chan():
  681. if resp.WatchID != wid {
  682. t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid)
  683. }
  684. ev := resp.Events[0]
  685. if !reflect.DeepEqual(ev, wev[1]) {
  686. t.Errorf("watched event = %+v, want %+v", ev, wev[1])
  687. }
  688. case <-time.After(5 * time.Second):
  689. testutil.FatalStack(t, "failed to watch the event")
  690. }
  691. s.Put([]byte("foo1"), []byte("bar11"), 3)
  692. select {
  693. case resp := <-w.Chan():
  694. if resp.WatchID != wid {
  695. t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid)
  696. }
  697. ev := resp.Events[0]
  698. if !reflect.DeepEqual(ev, wev[2]) {
  699. t.Errorf("watched event = %+v, want %+v", ev, wev[2])
  700. }
  701. case <-time.After(5 * time.Second):
  702. testutil.FatalStack(t, "failed to watch the event")
  703. }
  704. }
  705. func cleanup(s KV, b backend.Backend, path string) {
  706. s.Close()
  707. b.Close()
  708. os.Remove(path)
  709. }
  710. func put3TestKVs(s KV) []mvccpb.KeyValue {
  711. s.Put([]byte("foo"), []byte("bar"), 1)
  712. s.Put([]byte("foo1"), []byte("bar1"), 2)
  713. s.Put([]byte("foo2"), []byte("bar2"), 3)
  714. return []mvccpb.KeyValue{
  715. {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1, Lease: 1},
  716. {Key: []byte("foo1"), Value: []byte("bar1"), CreateRevision: 3, ModRevision: 3, Version: 1, Lease: 2},
  717. {Key: []byte("foo2"), Value: []byte("bar2"), CreateRevision: 4, ModRevision: 4, Version: 1, Lease: 3},
  718. }
  719. }