watchable_store_test.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "bytes"
  17. "os"
  18. "reflect"
  19. "testing"
  20. "time"
  21. "github.com/coreos/etcd/lease"
  22. "github.com/coreos/etcd/mvcc/backend"
  23. "github.com/coreos/etcd/mvcc/mvccpb"
  24. )
  25. func TestWatch(t *testing.T) {
  26. b, tmpPath := backend.NewDefaultTmpBackend()
  27. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  28. defer func() {
  29. s.store.Close()
  30. os.Remove(tmpPath)
  31. }()
  32. testKey := []byte("foo")
  33. testValue := []byte("bar")
  34. s.Put(testKey, testValue, lease.NoLease)
  35. w := s.NewWatchStream()
  36. w.Watch(testKey, nil, 0)
  37. if !s.synced.contains(string(testKey)) {
  38. // the key must have had an entry in synced
  39. t.Errorf("existence = false, want true")
  40. }
  41. }
  42. func TestNewWatcherCancel(t *testing.T) {
  43. b, tmpPath := backend.NewDefaultTmpBackend()
  44. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  45. defer func() {
  46. s.store.Close()
  47. os.Remove(tmpPath)
  48. }()
  49. testKey := []byte("foo")
  50. testValue := []byte("bar")
  51. s.Put(testKey, testValue, lease.NoLease)
  52. w := s.NewWatchStream()
  53. wt := w.Watch(testKey, nil, 0)
  54. if err := w.Cancel(wt); err != nil {
  55. t.Error(err)
  56. }
  57. if s.synced.contains(string(testKey)) {
  58. // the key shoud have been deleted
  59. t.Errorf("existence = true, want false")
  60. }
  61. }
  62. // TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
  63. func TestCancelUnsynced(t *testing.T) {
  64. b, tmpPath := backend.NewDefaultTmpBackend()
  65. // manually create watchableStore instead of newWatchableStore
  66. // because newWatchableStore automatically calls syncWatchers
  67. // method to sync watchers in unsynced map. We want to keep watchers
  68. // in unsynced to test if syncWatchers works as expected.
  69. s := &watchableStore{
  70. store: NewStore(b, &lease.FakeLessor{}, nil),
  71. unsynced: newWatcherGroup(),
  72. // to make the test not crash from assigning to nil map.
  73. // 'synced' doesn't get populated in this test.
  74. synced: newWatcherGroup(),
  75. }
  76. defer func() {
  77. s.store.Close()
  78. os.Remove(tmpPath)
  79. }()
  80. // Put a key so that we can spawn watchers on that key.
  81. // (testKey in this test). This increases the rev to 1,
  82. // and later we can we set the watcher's startRev to 1,
  83. // and force watchers to be in unsynced.
  84. testKey := []byte("foo")
  85. testValue := []byte("bar")
  86. s.Put(testKey, testValue, lease.NoLease)
  87. w := s.NewWatchStream()
  88. // arbitrary number for watchers
  89. watcherN := 100
  90. // create watcherN of watch ids to cancel
  91. watchIDs := make([]WatchID, watcherN)
  92. for i := 0; i < watcherN; i++ {
  93. // use 1 to keep watchers in unsynced
  94. watchIDs[i] = w.Watch(testKey, nil, 1)
  95. }
  96. for _, idx := range watchIDs {
  97. if err := w.Cancel(idx); err != nil {
  98. t.Error(err)
  99. }
  100. }
  101. // After running CancelFunc
  102. //
  103. // unsynced should be empty
  104. // because cancel removes watcher from unsynced
  105. if size := s.unsynced.size(); size != 0 {
  106. t.Errorf("unsynced size = %d, want 0", size)
  107. }
  108. }
  109. // TestSyncWatchers populates unsynced watcher map and tests syncWatchers
  110. // method to see if it correctly sends events to channel of unsynced watchers
  111. // and moves these watchers to synced.
  112. func TestSyncWatchers(t *testing.T) {
  113. b, tmpPath := backend.NewDefaultTmpBackend()
  114. s := &watchableStore{
  115. store: NewStore(b, &lease.FakeLessor{}, nil),
  116. unsynced: newWatcherGroup(),
  117. synced: newWatcherGroup(),
  118. }
  119. defer func() {
  120. s.store.Close()
  121. os.Remove(tmpPath)
  122. }()
  123. testKey := []byte("foo")
  124. testValue := []byte("bar")
  125. s.Put(testKey, testValue, lease.NoLease)
  126. w := s.NewWatchStream()
  127. // arbitrary number for watchers
  128. watcherN := 100
  129. for i := 0; i < watcherN; i++ {
  130. // specify rev as 1 to keep watchers in unsynced
  131. w.Watch(testKey, nil, 1)
  132. }
  133. // Before running s.syncWatchers() synced should be empty because we manually
  134. // populate unsynced only
  135. sws := s.synced.watcherSetByKey(string(testKey))
  136. uws := s.unsynced.watcherSetByKey(string(testKey))
  137. if len(sws) != 0 {
  138. t.Fatalf("synced[string(testKey)] size = %d, want 0", len(sws))
  139. }
  140. // unsynced should not be empty because we manually populated unsynced only
  141. if len(uws) != watcherN {
  142. t.Errorf("unsynced size = %d, want %d", len(uws), watcherN)
  143. }
  144. // this should move all unsynced watchers to synced ones
  145. s.syncWatchers()
  146. sws = s.synced.watcherSetByKey(string(testKey))
  147. uws = s.unsynced.watcherSetByKey(string(testKey))
  148. // After running s.syncWatchers(), synced should not be empty because syncwatchers
  149. // populates synced in this test case
  150. if len(sws) != watcherN {
  151. t.Errorf("synced[string(testKey)] size = %d, want %d", len(sws), watcherN)
  152. }
  153. // unsynced should be empty because syncwatchers is expected to move all watchers
  154. // from unsynced to synced in this test case
  155. if len(uws) != 0 {
  156. t.Errorf("unsynced size = %d, want 0", len(uws))
  157. }
  158. for w := range sws {
  159. if w.minRev != s.Rev()+1 {
  160. t.Errorf("w.minRev = %d, want %d", w.minRev, s.Rev()+1)
  161. }
  162. }
  163. if len(w.(*watchStream).ch) != watcherN {
  164. t.Errorf("watched event size = %d, want %d", len(w.(*watchStream).ch), watcherN)
  165. }
  166. evs := (<-w.(*watchStream).ch).Events
  167. if len(evs) != 1 {
  168. t.Errorf("len(evs) got = %d, want = 1", len(evs))
  169. }
  170. if evs[0].Type != mvccpb.PUT {
  171. t.Errorf("got = %v, want = %v", evs[0].Type, mvccpb.PUT)
  172. }
  173. if !bytes.Equal(evs[0].Kv.Key, testKey) {
  174. t.Errorf("got = %s, want = %s", evs[0].Kv.Key, testKey)
  175. }
  176. if !bytes.Equal(evs[0].Kv.Value, testValue) {
  177. t.Errorf("got = %s, want = %s", evs[0].Kv.Value, testValue)
  178. }
  179. }
  180. // TestWatchCompacted tests a watcher that watches on a compacted revision.
  181. func TestWatchCompacted(t *testing.T) {
  182. b, tmpPath := backend.NewDefaultTmpBackend()
  183. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  184. defer func() {
  185. s.store.Close()
  186. os.Remove(tmpPath)
  187. }()
  188. testKey := []byte("foo")
  189. testValue := []byte("bar")
  190. maxRev := 10
  191. compactRev := int64(5)
  192. for i := 0; i < maxRev; i++ {
  193. s.Put(testKey, testValue, lease.NoLease)
  194. }
  195. _, err := s.Compact(compactRev)
  196. if err != nil {
  197. t.Fatalf("failed to compact kv (%v)", err)
  198. }
  199. w := s.NewWatchStream()
  200. wt := w.Watch(testKey, nil, compactRev-1)
  201. select {
  202. case resp := <-w.Chan():
  203. if resp.WatchID != wt {
  204. t.Errorf("resp.WatchID = %x, want %x", resp.WatchID, wt)
  205. }
  206. if resp.CompactRevision == 0 {
  207. t.Errorf("resp.Compacted = %v, want %v", resp.CompactRevision, compactRev)
  208. }
  209. case <-time.After(1 * time.Second):
  210. t.Fatalf("failed to receive response (timeout)")
  211. }
  212. }
  213. func TestWatchFutureRev(t *testing.T) {
  214. b, tmpPath := backend.NewDefaultTmpBackend()
  215. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  216. defer func() {
  217. s.store.Close()
  218. os.Remove(tmpPath)
  219. }()
  220. testKey := []byte("foo")
  221. testValue := []byte("bar")
  222. w := s.NewWatchStream()
  223. wrev := int64(10)
  224. w.Watch(testKey, nil, wrev)
  225. for i := 0; i < 10; i++ {
  226. rev := s.Put(testKey, testValue, lease.NoLease)
  227. if rev >= wrev {
  228. break
  229. }
  230. }
  231. select {
  232. case resp := <-w.Chan():
  233. if resp.Revision != wrev {
  234. t.Fatalf("rev = %d, want %d", resp.Revision, wrev)
  235. }
  236. if len(resp.Events) != 1 {
  237. t.Fatalf("failed to get events from the response")
  238. }
  239. if resp.Events[0].Kv.ModRevision != wrev {
  240. t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, wrev)
  241. }
  242. case <-time.After(time.Second):
  243. t.Fatal("failed to receive event in 1 second.")
  244. }
  245. }
  246. func TestWatchRestore(t *testing.T) {
  247. test := func(delay time.Duration) func(t *testing.T) {
  248. return func(t *testing.T) {
  249. b, tmpPath := backend.NewDefaultTmpBackend()
  250. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  251. defer cleanup(s, b, tmpPath)
  252. testKey := []byte("foo")
  253. testValue := []byte("bar")
  254. rev := s.Put(testKey, testValue, lease.NoLease)
  255. newBackend, newPath := backend.NewDefaultTmpBackend()
  256. newStore := newWatchableStore(newBackend, &lease.FakeLessor{}, nil)
  257. defer cleanup(newStore, newBackend, newPath)
  258. w := newStore.NewWatchStream()
  259. w.Watch(testKey, nil, rev-1)
  260. time.Sleep(delay)
  261. newStore.Restore(b)
  262. select {
  263. case resp := <-w.Chan():
  264. if resp.Revision != rev {
  265. t.Fatalf("rev = %d, want %d", resp.Revision, rev)
  266. }
  267. if len(resp.Events) != 1 {
  268. t.Fatalf("failed to get events from the response")
  269. }
  270. if resp.Events[0].Kv.ModRevision != rev {
  271. t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, rev)
  272. }
  273. case <-time.After(time.Second):
  274. t.Fatal("failed to receive event in 1 second.")
  275. }
  276. }
  277. }
  278. t.Run("Normal", test(0))
  279. t.Run("RunSyncWatchLoopBeforeRestore", test(time.Millisecond*120)) // longer than default waitDuration
  280. }
  281. // TestWatchRestoreSyncedWatcher tests such a case that:
  282. // 1. watcher is created with a future revision "math.MaxInt64 - 2"
  283. // 2. watcher with a future revision is added to "synced" watcher group
  284. // 3. restore/overwrite storage with snapshot of a higher lasat revision
  285. // 4. restore operation moves "synced" to "unsynced" watcher group
  286. // 5. choose the watcher from step 1, without panic
  287. func TestWatchRestoreSyncedWatcher(t *testing.T) {
  288. b1, b1Path := backend.NewDefaultTmpBackend()
  289. s1 := newWatchableStore(b1, &lease.FakeLessor{}, nil)
  290. defer cleanup(s1, b1, b1Path)
  291. b2, b2Path := backend.NewDefaultTmpBackend()
  292. s2 := newWatchableStore(b2, &lease.FakeLessor{}, nil)
  293. defer cleanup(s2, b2, b2Path)
  294. testKey, testValue := []byte("foo"), []byte("bar")
  295. rev := s1.Put(testKey, testValue, lease.NoLease)
  296. startRev := rev + 2
  297. // create a watcher with a future revision
  298. // add to "synced" watcher group (startRev > s.store.currentRev)
  299. w1 := s1.NewWatchStream()
  300. w1.Watch(testKey, nil, startRev)
  301. // make "s2" ends up with a higher last revision
  302. s2.Put(testKey, testValue, lease.NoLease)
  303. s2.Put(testKey, testValue, lease.NoLease)
  304. // overwrite storage with higher revisions
  305. if err := s1.Restore(b2); err != nil {
  306. t.Fatal(err)
  307. }
  308. // wait for next "syncWatchersLoop" iteration
  309. // and the unsynced watcher should be chosen
  310. time.Sleep(2 * time.Second)
  311. // trigger events for "startRev"
  312. s1.Put(testKey, testValue, lease.NoLease)
  313. select {
  314. case resp := <-w1.Chan():
  315. if resp.Revision != startRev {
  316. t.Fatalf("resp.Revision expect %d, got %d", startRev, resp.Revision)
  317. }
  318. if len(resp.Events) != 1 {
  319. t.Fatalf("len(resp.Events) expect 1, got %d", len(resp.Events))
  320. }
  321. if resp.Events[0].Kv.ModRevision != startRev {
  322. t.Fatalf("resp.Events[0].Kv.ModRevision expect %d, got %d", startRev, resp.Events[0].Kv.ModRevision)
  323. }
  324. case <-time.After(time.Second):
  325. t.Fatal("failed to receive event in 1 second")
  326. }
  327. }
  328. // TestWatchBatchUnsynced tests batching on unsynced watchers
  329. func TestWatchBatchUnsynced(t *testing.T) {
  330. b, tmpPath := backend.NewDefaultTmpBackend()
  331. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  332. oldMaxRevs := watchBatchMaxRevs
  333. defer func() {
  334. watchBatchMaxRevs = oldMaxRevs
  335. s.store.Close()
  336. os.Remove(tmpPath)
  337. }()
  338. batches := 3
  339. watchBatchMaxRevs = 4
  340. v := []byte("foo")
  341. for i := 0; i < watchBatchMaxRevs*batches; i++ {
  342. s.Put(v, v, lease.NoLease)
  343. }
  344. w := s.NewWatchStream()
  345. w.Watch(v, nil, 1)
  346. for i := 0; i < batches; i++ {
  347. if resp := <-w.Chan(); len(resp.Events) != watchBatchMaxRevs {
  348. t.Fatalf("len(events) = %d, want %d", len(resp.Events), watchBatchMaxRevs)
  349. }
  350. }
  351. s.store.mu.Lock()
  352. defer s.store.mu.Unlock()
  353. if size := s.synced.size(); size != 1 {
  354. t.Errorf("synced size = %d, want 1", size)
  355. }
  356. }
  357. func TestNewMapwatcherToEventMap(t *testing.T) {
  358. k0, k1, k2 := []byte("foo0"), []byte("foo1"), []byte("foo2")
  359. v0, v1, v2 := []byte("bar0"), []byte("bar1"), []byte("bar2")
  360. ws := []*watcher{{key: k0}, {key: k1}, {key: k2}}
  361. evs := []mvccpb.Event{
  362. {
  363. Type: mvccpb.PUT,
  364. Kv: &mvccpb.KeyValue{Key: k0, Value: v0},
  365. },
  366. {
  367. Type: mvccpb.PUT,
  368. Kv: &mvccpb.KeyValue{Key: k1, Value: v1},
  369. },
  370. {
  371. Type: mvccpb.PUT,
  372. Kv: &mvccpb.KeyValue{Key: k2, Value: v2},
  373. },
  374. }
  375. tests := []struct {
  376. sync []*watcher
  377. evs []mvccpb.Event
  378. wwe map[*watcher][]mvccpb.Event
  379. }{
  380. // no watcher in sync, some events should return empty wwe
  381. {
  382. nil,
  383. evs,
  384. map[*watcher][]mvccpb.Event{},
  385. },
  386. // one watcher in sync, one event that does not match the key of that
  387. // watcher should return empty wwe
  388. {
  389. []*watcher{ws[2]},
  390. evs[:1],
  391. map[*watcher][]mvccpb.Event{},
  392. },
  393. // one watcher in sync, one event that matches the key of that
  394. // watcher should return wwe with that matching watcher
  395. {
  396. []*watcher{ws[1]},
  397. evs[1:2],
  398. map[*watcher][]mvccpb.Event{
  399. ws[1]: evs[1:2],
  400. },
  401. },
  402. // two watchers in sync that watches two different keys, one event
  403. // that matches the key of only one of the watcher should return wwe
  404. // with the matching watcher
  405. {
  406. []*watcher{ws[0], ws[2]},
  407. evs[2:],
  408. map[*watcher][]mvccpb.Event{
  409. ws[2]: evs[2:],
  410. },
  411. },
  412. // two watchers in sync that watches the same key, two events that
  413. // match the keys should return wwe with those two watchers
  414. {
  415. []*watcher{ws[0], ws[1]},
  416. evs[:2],
  417. map[*watcher][]mvccpb.Event{
  418. ws[0]: evs[:1],
  419. ws[1]: evs[1:2],
  420. },
  421. },
  422. }
  423. for i, tt := range tests {
  424. wg := newWatcherGroup()
  425. for _, w := range tt.sync {
  426. wg.add(w)
  427. }
  428. gwe := newWatcherBatch(&wg, tt.evs)
  429. if len(gwe) != len(tt.wwe) {
  430. t.Errorf("#%d: len(gwe) got = %d, want = %d", i, len(gwe), len(tt.wwe))
  431. }
  432. // compare gwe and tt.wwe
  433. for w, eb := range gwe {
  434. if len(eb.evs) != len(tt.wwe[w]) {
  435. t.Errorf("#%d: len(eb.evs) got = %d, want = %d", i, len(eb.evs), len(tt.wwe[w]))
  436. }
  437. if !reflect.DeepEqual(eb.evs, tt.wwe[w]) {
  438. t.Errorf("#%d: reflect.DeepEqual events got = %v, want = true", i, false)
  439. }
  440. }
  441. }
  442. }