watchable_store_test.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "bytes"
  17. "fmt"
  18. "os"
  19. "reflect"
  20. "sync"
  21. "testing"
  22. "time"
  23. "github.com/coreos/etcd/lease"
  24. "github.com/coreos/etcd/mvcc/backend"
  25. "github.com/coreos/etcd/mvcc/mvccpb"
  26. )
  27. func TestWatch(t *testing.T) {
  28. b, tmpPath := backend.NewDefaultTmpBackend()
  29. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  30. defer func() {
  31. s.store.Close()
  32. os.Remove(tmpPath)
  33. }()
  34. testKey := []byte("foo")
  35. testValue := []byte("bar")
  36. s.Put(testKey, testValue, lease.NoLease)
  37. w := s.NewWatchStream()
  38. w.Watch(testKey, nil, 0)
  39. if !s.synced.contains(string(testKey)) {
  40. // the key must have had an entry in synced
  41. t.Errorf("existence = false, want true")
  42. }
  43. }
  44. func TestNewWatcherCancel(t *testing.T) {
  45. b, tmpPath := backend.NewDefaultTmpBackend()
  46. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  47. defer func() {
  48. s.store.Close()
  49. os.Remove(tmpPath)
  50. }()
  51. testKey := []byte("foo")
  52. testValue := []byte("bar")
  53. s.Put(testKey, testValue, lease.NoLease)
  54. w := s.NewWatchStream()
  55. wt := w.Watch(testKey, nil, 0)
  56. if err := w.Cancel(wt); err != nil {
  57. t.Error(err)
  58. }
  59. if s.synced.contains(string(testKey)) {
  60. // the key shoud have been deleted
  61. t.Errorf("existence = true, want false")
  62. }
  63. }
  64. // TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
  65. func TestCancelUnsynced(t *testing.T) {
  66. b, tmpPath := backend.NewDefaultTmpBackend()
  67. // manually create watchableStore instead of newWatchableStore
  68. // because newWatchableStore automatically calls syncWatchers
  69. // method to sync watchers in unsynced map. We want to keep watchers
  70. // in unsynced to test if syncWatchers works as expected.
  71. s := &watchableStore{
  72. store: NewStore(b, &lease.FakeLessor{}, nil),
  73. unsynced: newWatcherGroup(),
  74. // to make the test not crash from assigning to nil map.
  75. // 'synced' doesn't get populated in this test.
  76. synced: newWatcherGroup(),
  77. }
  78. defer func() {
  79. s.store.Close()
  80. os.Remove(tmpPath)
  81. }()
  82. // Put a key so that we can spawn watchers on that key.
  83. // (testKey in this test). This increases the rev to 1,
  84. // and later we can we set the watcher's startRev to 1,
  85. // and force watchers to be in unsynced.
  86. testKey := []byte("foo")
  87. testValue := []byte("bar")
  88. s.Put(testKey, testValue, lease.NoLease)
  89. w := s.NewWatchStream()
  90. // arbitrary number for watchers
  91. watcherN := 100
  92. // create watcherN of watch ids to cancel
  93. watchIDs := make([]WatchID, watcherN)
  94. for i := 0; i < watcherN; i++ {
  95. // use 1 to keep watchers in unsynced
  96. watchIDs[i] = w.Watch(testKey, nil, 1)
  97. }
  98. for _, idx := range watchIDs {
  99. if err := w.Cancel(idx); err != nil {
  100. t.Error(err)
  101. }
  102. }
  103. // After running CancelFunc
  104. //
  105. // unsynced should be empty
  106. // because cancel removes watcher from unsynced
  107. if size := s.unsynced.size(); size != 0 {
  108. t.Errorf("unsynced size = %d, want 0", size)
  109. }
  110. }
  111. // TestSyncWatchers populates unsynced watcher map and tests syncWatchers
  112. // method to see if it correctly sends events to channel of unsynced watchers
  113. // and moves these watchers to synced.
  114. func TestSyncWatchers(t *testing.T) {
  115. b, tmpPath := backend.NewDefaultTmpBackend()
  116. s := &watchableStore{
  117. store: NewStore(b, &lease.FakeLessor{}, nil),
  118. unsynced: newWatcherGroup(),
  119. synced: newWatcherGroup(),
  120. }
  121. defer func() {
  122. s.store.Close()
  123. os.Remove(tmpPath)
  124. }()
  125. testKey := []byte("foo")
  126. testValue := []byte("bar")
  127. s.Put(testKey, testValue, lease.NoLease)
  128. w := s.NewWatchStream()
  129. // arbitrary number for watchers
  130. watcherN := 100
  131. for i := 0; i < watcherN; i++ {
  132. // specify rev as 1 to keep watchers in unsynced
  133. w.Watch(testKey, nil, 1)
  134. }
  135. // Before running s.syncWatchers() synced should be empty because we manually
  136. // populate unsynced only
  137. sws := s.synced.watcherSetByKey(string(testKey))
  138. uws := s.unsynced.watcherSetByKey(string(testKey))
  139. if len(sws) != 0 {
  140. t.Fatalf("synced[string(testKey)] size = %d, want 0", len(sws))
  141. }
  142. // unsynced should not be empty because we manually populated unsynced only
  143. if len(uws) != watcherN {
  144. t.Errorf("unsynced size = %d, want %d", len(uws), watcherN)
  145. }
  146. // this should move all unsynced watchers to synced ones
  147. s.syncWatchers()
  148. sws = s.synced.watcherSetByKey(string(testKey))
  149. uws = s.unsynced.watcherSetByKey(string(testKey))
  150. // After running s.syncWatchers(), synced should not be empty because syncwatchers
  151. // populates synced in this test case
  152. if len(sws) != watcherN {
  153. t.Errorf("synced[string(testKey)] size = %d, want %d", len(sws), watcherN)
  154. }
  155. // unsynced should be empty because syncwatchers is expected to move all watchers
  156. // from unsynced to synced in this test case
  157. if len(uws) != 0 {
  158. t.Errorf("unsynced size = %d, want 0", len(uws))
  159. }
  160. for w := range sws {
  161. if w.minRev != s.Rev()+1 {
  162. t.Errorf("w.minRev = %d, want %d", w.minRev, s.Rev()+1)
  163. }
  164. }
  165. if len(w.(*watchStream).ch) != watcherN {
  166. t.Errorf("watched event size = %d, want %d", len(w.(*watchStream).ch), watcherN)
  167. }
  168. evs := (<-w.(*watchStream).ch).Events
  169. if len(evs) != 1 {
  170. t.Errorf("len(evs) got = %d, want = 1", len(evs))
  171. }
  172. if evs[0].Type != mvccpb.PUT {
  173. t.Errorf("got = %v, want = %v", evs[0].Type, mvccpb.PUT)
  174. }
  175. if !bytes.Equal(evs[0].Kv.Key, testKey) {
  176. t.Errorf("got = %s, want = %s", evs[0].Kv.Key, testKey)
  177. }
  178. if !bytes.Equal(evs[0].Kv.Value, testValue) {
  179. t.Errorf("got = %s, want = %s", evs[0].Kv.Value, testValue)
  180. }
  181. }
  182. // TestWatchCompacted tests a watcher that watches on a compacted revision.
  183. func TestWatchCompacted(t *testing.T) {
  184. b, tmpPath := backend.NewDefaultTmpBackend()
  185. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  186. defer func() {
  187. s.store.Close()
  188. os.Remove(tmpPath)
  189. }()
  190. testKey := []byte("foo")
  191. testValue := []byte("bar")
  192. maxRev := 10
  193. compactRev := int64(5)
  194. for i := 0; i < maxRev; i++ {
  195. s.Put(testKey, testValue, lease.NoLease)
  196. }
  197. _, err := s.Compact(compactRev)
  198. if err != nil {
  199. t.Fatalf("failed to compact kv (%v)", err)
  200. }
  201. w := s.NewWatchStream()
  202. wt := w.Watch(testKey, nil, compactRev-1)
  203. select {
  204. case resp := <-w.Chan():
  205. if resp.WatchID != wt {
  206. t.Errorf("resp.WatchID = %x, want %x", resp.WatchID, wt)
  207. }
  208. if resp.CompactRevision == 0 {
  209. t.Errorf("resp.Compacted = %v, want %v", resp.CompactRevision, compactRev)
  210. }
  211. case <-time.After(1 * time.Second):
  212. t.Fatalf("failed to receive response (timeout)")
  213. }
  214. }
  215. func TestWatchFutureRev(t *testing.T) {
  216. b, tmpPath := backend.NewDefaultTmpBackend()
  217. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  218. defer func() {
  219. s.store.Close()
  220. os.Remove(tmpPath)
  221. }()
  222. testKey := []byte("foo")
  223. testValue := []byte("bar")
  224. w := s.NewWatchStream()
  225. wrev := int64(10)
  226. w.Watch(testKey, nil, wrev)
  227. for i := 0; i < 10; i++ {
  228. rev := s.Put(testKey, testValue, lease.NoLease)
  229. if rev >= wrev {
  230. break
  231. }
  232. }
  233. select {
  234. case resp := <-w.Chan():
  235. if resp.Revision != wrev {
  236. t.Fatalf("rev = %d, want %d", resp.Revision, wrev)
  237. }
  238. if len(resp.Events) != 1 {
  239. t.Fatalf("failed to get events from the response")
  240. }
  241. if resp.Events[0].Kv.ModRevision != wrev {
  242. t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, wrev)
  243. }
  244. case <-time.After(time.Second):
  245. t.Fatal("failed to receive event in 1 second.")
  246. }
  247. }
  248. func TestWatchRestore(t *testing.T) {
  249. b, tmpPath := backend.NewDefaultTmpBackend()
  250. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  251. defer cleanup(s, b, tmpPath)
  252. testKey := []byte("foo")
  253. testValue := []byte("bar")
  254. rev := s.Put(testKey, testValue, lease.NoLease)
  255. newBackend, newPath := backend.NewDefaultTmpBackend()
  256. newStore := newWatchableStore(newBackend, &lease.FakeLessor{}, nil)
  257. defer cleanup(newStore, newBackend, newPath)
  258. w := newStore.NewWatchStream()
  259. w.Watch(testKey, nil, rev-1)
  260. newStore.Restore(b)
  261. select {
  262. case resp := <-w.Chan():
  263. if resp.Revision != rev {
  264. t.Fatalf("rev = %d, want %d", resp.Revision, rev)
  265. }
  266. if len(resp.Events) != 1 {
  267. t.Fatalf("failed to get events from the response")
  268. }
  269. if resp.Events[0].Kv.ModRevision != rev {
  270. t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, rev)
  271. }
  272. case <-time.After(time.Second):
  273. t.Fatal("failed to receive event in 1 second.")
  274. }
  275. }
  276. // TestWatchBatchUnsynced tests batching on unsynced watchers
  277. func TestWatchBatchUnsynced(t *testing.T) {
  278. b, tmpPath := backend.NewDefaultTmpBackend()
  279. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  280. oldMaxRevs := watchBatchMaxRevs
  281. defer func() {
  282. watchBatchMaxRevs = oldMaxRevs
  283. s.store.Close()
  284. os.Remove(tmpPath)
  285. }()
  286. batches := 3
  287. watchBatchMaxRevs = 4
  288. v := []byte("foo")
  289. for i := 0; i < watchBatchMaxRevs*batches; i++ {
  290. s.Put(v, v, lease.NoLease)
  291. }
  292. w := s.NewWatchStream()
  293. w.Watch(v, nil, 1)
  294. for i := 0; i < batches; i++ {
  295. if resp := <-w.Chan(); len(resp.Events) != watchBatchMaxRevs {
  296. t.Fatalf("len(events) = %d, want %d", len(resp.Events), watchBatchMaxRevs)
  297. }
  298. }
  299. s.store.revMu.Lock()
  300. defer s.store.revMu.Unlock()
  301. if size := s.synced.size(); size != 1 {
  302. t.Errorf("synced size = %d, want 1", size)
  303. }
  304. }
  305. func TestNewMapwatcherToEventMap(t *testing.T) {
  306. k0, k1, k2 := []byte("foo0"), []byte("foo1"), []byte("foo2")
  307. v0, v1, v2 := []byte("bar0"), []byte("bar1"), []byte("bar2")
  308. ws := []*watcher{{key: k0}, {key: k1}, {key: k2}}
  309. evs := []mvccpb.Event{
  310. {
  311. Type: mvccpb.PUT,
  312. Kv: &mvccpb.KeyValue{Key: k0, Value: v0},
  313. },
  314. {
  315. Type: mvccpb.PUT,
  316. Kv: &mvccpb.KeyValue{Key: k1, Value: v1},
  317. },
  318. {
  319. Type: mvccpb.PUT,
  320. Kv: &mvccpb.KeyValue{Key: k2, Value: v2},
  321. },
  322. }
  323. tests := []struct {
  324. sync []*watcher
  325. evs []mvccpb.Event
  326. wwe map[*watcher][]mvccpb.Event
  327. }{
  328. // no watcher in sync, some events should return empty wwe
  329. {
  330. nil,
  331. evs,
  332. map[*watcher][]mvccpb.Event{},
  333. },
  334. // one watcher in sync, one event that does not match the key of that
  335. // watcher should return empty wwe
  336. {
  337. []*watcher{ws[2]},
  338. evs[:1],
  339. map[*watcher][]mvccpb.Event{},
  340. },
  341. // one watcher in sync, one event that matches the key of that
  342. // watcher should return wwe with that matching watcher
  343. {
  344. []*watcher{ws[1]},
  345. evs[1:2],
  346. map[*watcher][]mvccpb.Event{
  347. ws[1]: evs[1:2],
  348. },
  349. },
  350. // two watchers in sync that watches two different keys, one event
  351. // that matches the key of only one of the watcher should return wwe
  352. // with the matching watcher
  353. {
  354. []*watcher{ws[0], ws[2]},
  355. evs[2:],
  356. map[*watcher][]mvccpb.Event{
  357. ws[2]: evs[2:],
  358. },
  359. },
  360. // two watchers in sync that watches the same key, two events that
  361. // match the keys should return wwe with those two watchers
  362. {
  363. []*watcher{ws[0], ws[1]},
  364. evs[:2],
  365. map[*watcher][]mvccpb.Event{
  366. ws[0]: evs[:1],
  367. ws[1]: evs[1:2],
  368. },
  369. },
  370. }
  371. for i, tt := range tests {
  372. wg := newWatcherGroup()
  373. for _, w := range tt.sync {
  374. wg.add(w)
  375. }
  376. gwe := newWatcherBatch(&wg, tt.evs)
  377. if len(gwe) != len(tt.wwe) {
  378. t.Errorf("#%d: len(gwe) got = %d, want = %d", i, len(gwe), len(tt.wwe))
  379. }
  380. // compare gwe and tt.wwe
  381. for w, eb := range gwe {
  382. if len(eb.evs) != len(tt.wwe[w]) {
  383. t.Errorf("#%d: len(eb.evs) got = %d, want = %d", i, len(eb.evs), len(tt.wwe[w]))
  384. }
  385. if !reflect.DeepEqual(eb.evs, tt.wwe[w]) {
  386. t.Errorf("#%d: reflect.DeepEqual events got = %v, want = true", i, false)
  387. }
  388. }
  389. }
  390. }
  391. // TestWatchVictims tests that watchable store delivers watch events
  392. // when the watch channel is temporarily clogged with too many events.
  393. func TestWatchVictims(t *testing.T) {
  394. oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
  395. b, tmpPath := backend.NewDefaultTmpBackend()
  396. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  397. defer func() {
  398. s.store.Close()
  399. os.Remove(tmpPath)
  400. chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync
  401. }()
  402. chanBufLen, maxWatchersPerSync = 1, 2
  403. numPuts := chanBufLen * 64
  404. testKey, testValue := []byte("foo"), []byte("bar")
  405. var wg sync.WaitGroup
  406. numWatches := maxWatchersPerSync * 128
  407. errc := make(chan error, numWatches)
  408. wg.Add(numWatches)
  409. for i := 0; i < numWatches; i++ {
  410. go func() {
  411. w := s.NewWatchStream()
  412. w.Watch(testKey, nil, 1)
  413. defer func() {
  414. w.Close()
  415. wg.Done()
  416. }()
  417. tc := time.After(10 * time.Second)
  418. evs, nextRev := 0, int64(2)
  419. for evs < numPuts {
  420. select {
  421. case <-tc:
  422. errc <- fmt.Errorf("time out")
  423. return
  424. case wr := <-w.Chan():
  425. evs += len(wr.Events)
  426. for _, ev := range wr.Events {
  427. if ev.Kv.ModRevision != nextRev {
  428. errc <- fmt.Errorf("expected rev=%d, got %d", nextRev, ev.Kv.ModRevision)
  429. return
  430. }
  431. nextRev++
  432. }
  433. time.Sleep(time.Millisecond)
  434. }
  435. }
  436. if evs != numPuts {
  437. errc <- fmt.Errorf("expected %d events, got %d", numPuts, evs)
  438. return
  439. }
  440. select {
  441. case <-w.Chan():
  442. errc <- fmt.Errorf("unexpected response")
  443. default:
  444. }
  445. }()
  446. time.Sleep(time.Millisecond)
  447. }
  448. var wgPut sync.WaitGroup
  449. wgPut.Add(numPuts)
  450. for i := 0; i < numPuts; i++ {
  451. go func() {
  452. defer wgPut.Done()
  453. s.Put(testKey, testValue, lease.NoLease)
  454. }()
  455. }
  456. wgPut.Wait()
  457. wg.Wait()
  458. select {
  459. case err := <-errc:
  460. t.Fatal(err)
  461. default:
  462. }
  463. }
  464. // TestStressWatchCancelClose tests closing a watch stream while
  465. // canceling its watches.
  466. func TestStressWatchCancelClose(t *testing.T) {
  467. b, tmpPath := backend.NewDefaultTmpBackend()
  468. s := newWatchableStore(b, &lease.FakeLessor{}, nil)
  469. defer func() {
  470. s.store.Close()
  471. os.Remove(tmpPath)
  472. }()
  473. testKey, testValue := []byte("foo"), []byte("bar")
  474. var wg sync.WaitGroup
  475. readyc := make(chan struct{})
  476. wg.Add(100)
  477. for i := 0; i < 100; i++ {
  478. go func() {
  479. defer wg.Done()
  480. w := s.NewWatchStream()
  481. ids := make([]WatchID, 10)
  482. for i := range ids {
  483. ids[i] = w.Watch(testKey, nil, 0)
  484. }
  485. <-readyc
  486. wg.Add(1 + len(ids)/2)
  487. for i := range ids[:len(ids)/2] {
  488. go func(n int) {
  489. defer wg.Done()
  490. w.Cancel(ids[n])
  491. }(i)
  492. }
  493. go func() {
  494. defer wg.Done()
  495. w.Close()
  496. }()
  497. }()
  498. }
  499. close(readyc)
  500. for i := 0; i < 100; i++ {
  501. s.Put(testKey, testValue, lease.NoLease)
  502. }
  503. wg.Wait()
  504. }