watchable_store_test.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "bytes"
  17. "fmt"
  18. "os"
  19. "reflect"
  20. "sync"
  21. "testing"
  22. "time"
  23. "go.etcd.io/etcd/lease"
  24. "go.etcd.io/etcd/mvcc/backend"
  25. "go.etcd.io/etcd/mvcc/mvccpb"
  26. "go.etcd.io/etcd/pkg/traceutil"
  27. "go.uber.org/zap"
  28. )
  29. func TestWatch(t *testing.T) {
  30. b, tmpPath := backend.NewDefaultTmpBackend()
  31. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
  32. defer func() {
  33. s.store.Close()
  34. os.Remove(tmpPath)
  35. }()
  36. testKey := []byte("foo")
  37. testValue := []byte("bar")
  38. s.Put(testKey, testValue, lease.NoLease)
  39. w := s.NewWatchStream()
  40. w.Watch(0, testKey, nil, 0)
  41. if !s.synced.contains(string(testKey)) {
  42. // the key must have had an entry in synced
  43. t.Errorf("existence = false, want true")
  44. }
  45. }
  46. func TestNewWatcherCancel(t *testing.T) {
  47. b, tmpPath := backend.NewDefaultTmpBackend()
  48. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
  49. defer func() {
  50. s.store.Close()
  51. os.Remove(tmpPath)
  52. }()
  53. testKey := []byte("foo")
  54. testValue := []byte("bar")
  55. s.Put(testKey, testValue, lease.NoLease)
  56. w := s.NewWatchStream()
  57. wt, _ := w.Watch(0, testKey, nil, 0)
  58. if err := w.Cancel(wt); err != nil {
  59. t.Error(err)
  60. }
  61. if s.synced.contains(string(testKey)) {
  62. // the key shoud have been deleted
  63. t.Errorf("existence = true, want false")
  64. }
  65. }
  66. // TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
  67. func TestCancelUnsynced(t *testing.T) {
  68. b, tmpPath := backend.NewDefaultTmpBackend()
  69. // manually create watchableStore instead of newWatchableStore
  70. // because newWatchableStore automatically calls syncWatchers
  71. // method to sync watchers in unsynced map. We want to keep watchers
  72. // in unsynced to test if syncWatchers works as expected.
  73. s := &watchableStore{
  74. store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}),
  75. unsynced: newWatcherGroup(),
  76. // to make the test not crash from assigning to nil map.
  77. // 'synced' doesn't get populated in this test.
  78. synced: newWatcherGroup(),
  79. }
  80. defer func() {
  81. s.store.Close()
  82. os.Remove(tmpPath)
  83. }()
  84. // Put a key so that we can spawn watchers on that key.
  85. // (testKey in this test). This increases the rev to 1,
  86. // and later we can we set the watcher's startRev to 1,
  87. // and force watchers to be in unsynced.
  88. testKey := []byte("foo")
  89. testValue := []byte("bar")
  90. s.Put(testKey, testValue, lease.NoLease)
  91. w := s.NewWatchStream()
  92. // arbitrary number for watchers
  93. watcherN := 100
  94. // create watcherN of watch ids to cancel
  95. watchIDs := make([]WatchID, watcherN)
  96. for i := 0; i < watcherN; i++ {
  97. // use 1 to keep watchers in unsynced
  98. watchIDs[i], _ = w.Watch(0, testKey, nil, 1)
  99. }
  100. for _, idx := range watchIDs {
  101. if err := w.Cancel(idx); err != nil {
  102. t.Error(err)
  103. }
  104. }
  105. // After running CancelFunc
  106. //
  107. // unsynced should be empty
  108. // because cancel removes watcher from unsynced
  109. if size := s.unsynced.size(); size != 0 {
  110. t.Errorf("unsynced size = %d, want 0", size)
  111. }
  112. }
  113. // TestSyncWatchers populates unsynced watcher map and tests syncWatchers
  114. // method to see if it correctly sends events to channel of unsynced watchers
  115. // and moves these watchers to synced.
  116. func TestSyncWatchers(t *testing.T) {
  117. b, tmpPath := backend.NewDefaultTmpBackend()
  118. s := &watchableStore{
  119. store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}),
  120. unsynced: newWatcherGroup(),
  121. synced: newWatcherGroup(),
  122. }
  123. defer func() {
  124. s.store.Close()
  125. os.Remove(tmpPath)
  126. }()
  127. testKey := []byte("foo")
  128. testValue := []byte("bar")
  129. s.Put(testKey, testValue, lease.NoLease)
  130. w := s.NewWatchStream()
  131. // arbitrary number for watchers
  132. watcherN := 100
  133. for i := 0; i < watcherN; i++ {
  134. // specify rev as 1 to keep watchers in unsynced
  135. w.Watch(0, testKey, nil, 1)
  136. }
  137. // Before running s.syncWatchers() synced should be empty because we manually
  138. // populate unsynced only
  139. sws := s.synced.watcherSetByKey(string(testKey))
  140. uws := s.unsynced.watcherSetByKey(string(testKey))
  141. if len(sws) != 0 {
  142. t.Fatalf("synced[string(testKey)] size = %d, want 0", len(sws))
  143. }
  144. // unsynced should not be empty because we manually populated unsynced only
  145. if len(uws) != watcherN {
  146. t.Errorf("unsynced size = %d, want %d", len(uws), watcherN)
  147. }
  148. // this should move all unsynced watchers to synced ones
  149. s.syncWatchers()
  150. sws = s.synced.watcherSetByKey(string(testKey))
  151. uws = s.unsynced.watcherSetByKey(string(testKey))
  152. // After running s.syncWatchers(), synced should not be empty because syncwatchers
  153. // populates synced in this test case
  154. if len(sws) != watcherN {
  155. t.Errorf("synced[string(testKey)] size = %d, want %d", len(sws), watcherN)
  156. }
  157. // unsynced should be empty because syncwatchers is expected to move all watchers
  158. // from unsynced to synced in this test case
  159. if len(uws) != 0 {
  160. t.Errorf("unsynced size = %d, want 0", len(uws))
  161. }
  162. for w := range sws {
  163. if w.minRev != s.Rev()+1 {
  164. t.Errorf("w.minRev = %d, want %d", w.minRev, s.Rev()+1)
  165. }
  166. }
  167. if len(w.(*watchStream).ch) != watcherN {
  168. t.Errorf("watched event size = %d, want %d", len(w.(*watchStream).ch), watcherN)
  169. }
  170. evs := (<-w.(*watchStream).ch).Events
  171. if len(evs) != 1 {
  172. t.Errorf("len(evs) got = %d, want = 1", len(evs))
  173. }
  174. if evs[0].Type != mvccpb.PUT {
  175. t.Errorf("got = %v, want = %v", evs[0].Type, mvccpb.PUT)
  176. }
  177. if !bytes.Equal(evs[0].Kv.Key, testKey) {
  178. t.Errorf("got = %s, want = %s", evs[0].Kv.Key, testKey)
  179. }
  180. if !bytes.Equal(evs[0].Kv.Value, testValue) {
  181. t.Errorf("got = %s, want = %s", evs[0].Kv.Value, testValue)
  182. }
  183. }
  184. // TestWatchCompacted tests a watcher that watches on a compacted revision.
  185. func TestWatchCompacted(t *testing.T) {
  186. b, tmpPath := backend.NewDefaultTmpBackend()
  187. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
  188. defer func() {
  189. s.store.Close()
  190. os.Remove(tmpPath)
  191. }()
  192. testKey := []byte("foo")
  193. testValue := []byte("bar")
  194. maxRev := 10
  195. compactRev := int64(5)
  196. for i := 0; i < maxRev; i++ {
  197. s.Put(testKey, testValue, lease.NoLease)
  198. }
  199. _, err := s.Compact(traceutil.TODO(), compactRev)
  200. if err != nil {
  201. t.Fatalf("failed to compact kv (%v)", err)
  202. }
  203. w := s.NewWatchStream()
  204. wt, _ := w.Watch(0, testKey, nil, compactRev-1)
  205. select {
  206. case resp := <-w.Chan():
  207. if resp.WatchID != wt {
  208. t.Errorf("resp.WatchID = %x, want %x", resp.WatchID, wt)
  209. }
  210. if resp.CompactRevision == 0 {
  211. t.Errorf("resp.Compacted = %v, want %v", resp.CompactRevision, compactRev)
  212. }
  213. case <-time.After(1 * time.Second):
  214. t.Fatalf("failed to receive response (timeout)")
  215. }
  216. }
  217. func TestWatchFutureRev(t *testing.T) {
  218. b, tmpPath := backend.NewDefaultTmpBackend()
  219. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
  220. defer func() {
  221. s.store.Close()
  222. os.Remove(tmpPath)
  223. }()
  224. testKey := []byte("foo")
  225. testValue := []byte("bar")
  226. w := s.NewWatchStream()
  227. wrev := int64(10)
  228. w.Watch(0, testKey, nil, wrev)
  229. for i := 0; i < 10; i++ {
  230. rev := s.Put(testKey, testValue, lease.NoLease)
  231. if rev >= wrev {
  232. break
  233. }
  234. }
  235. select {
  236. case resp := <-w.Chan():
  237. if resp.Revision != wrev {
  238. t.Fatalf("rev = %d, want %d", resp.Revision, wrev)
  239. }
  240. if len(resp.Events) != 1 {
  241. t.Fatalf("failed to get events from the response")
  242. }
  243. if resp.Events[0].Kv.ModRevision != wrev {
  244. t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, wrev)
  245. }
  246. case <-time.After(time.Second):
  247. t.Fatal("failed to receive event in 1 second.")
  248. }
  249. }
  250. func TestWatchRestore(t *testing.T) {
  251. test := func(delay time.Duration) func(t *testing.T) {
  252. return func(t *testing.T) {
  253. b, tmpPath := backend.NewDefaultTmpBackend()
  254. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
  255. defer cleanup(s, b, tmpPath)
  256. testKey := []byte("foo")
  257. testValue := []byte("bar")
  258. rev := s.Put(testKey, testValue, lease.NoLease)
  259. newBackend, newPath := backend.NewDefaultTmpBackend()
  260. newStore := newWatchableStore(zap.NewExample(), newBackend, &lease.FakeLessor{}, nil, StoreConfig{})
  261. defer cleanup(newStore, newBackend, newPath)
  262. w := newStore.NewWatchStream()
  263. w.Watch(0, testKey, nil, rev-1)
  264. time.Sleep(delay)
  265. newStore.Restore(b)
  266. select {
  267. case resp := <-w.Chan():
  268. if resp.Revision != rev {
  269. t.Fatalf("rev = %d, want %d", resp.Revision, rev)
  270. }
  271. if len(resp.Events) != 1 {
  272. t.Fatalf("failed to get events from the response")
  273. }
  274. if resp.Events[0].Kv.ModRevision != rev {
  275. t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, rev)
  276. }
  277. case <-time.After(time.Second):
  278. t.Fatal("failed to receive event in 1 second.")
  279. }
  280. }
  281. }
  282. t.Run("Normal", test(0))
  283. t.Run("RunSyncWatchLoopBeforeRestore", test(time.Millisecond*120)) // longer than default waitDuration
  284. }
  285. // TestWatchRestoreSyncedWatcher tests such a case that:
  286. // 1. watcher is created with a future revision "math.MaxInt64 - 2"
  287. // 2. watcher with a future revision is added to "synced" watcher group
  288. // 3. restore/overwrite storage with snapshot of a higher lasat revision
  289. // 4. restore operation moves "synced" to "unsynced" watcher group
  290. // 5. choose the watcher from step 1, without panic
  291. func TestWatchRestoreSyncedWatcher(t *testing.T) {
  292. b1, b1Path := backend.NewDefaultTmpBackend()
  293. s1 := newWatchableStore(zap.NewExample(), b1, &lease.FakeLessor{}, nil, StoreConfig{})
  294. defer cleanup(s1, b1, b1Path)
  295. b2, b2Path := backend.NewDefaultTmpBackend()
  296. s2 := newWatchableStore(zap.NewExample(), b2, &lease.FakeLessor{}, nil, StoreConfig{})
  297. defer cleanup(s2, b2, b2Path)
  298. testKey, testValue := []byte("foo"), []byte("bar")
  299. rev := s1.Put(testKey, testValue, lease.NoLease)
  300. startRev := rev + 2
  301. // create a watcher with a future revision
  302. // add to "synced" watcher group (startRev > s.store.currentRev)
  303. w1 := s1.NewWatchStream()
  304. w1.Watch(0, testKey, nil, startRev)
  305. // make "s2" ends up with a higher last revision
  306. s2.Put(testKey, testValue, lease.NoLease)
  307. s2.Put(testKey, testValue, lease.NoLease)
  308. // overwrite storage with higher revisions
  309. if err := s1.Restore(b2); err != nil {
  310. t.Fatal(err)
  311. }
  312. // wait for next "syncWatchersLoop" iteration
  313. // and the unsynced watcher should be chosen
  314. time.Sleep(2 * time.Second)
  315. // trigger events for "startRev"
  316. s1.Put(testKey, testValue, lease.NoLease)
  317. select {
  318. case resp := <-w1.Chan():
  319. if resp.Revision != startRev {
  320. t.Fatalf("resp.Revision expect %d, got %d", startRev, resp.Revision)
  321. }
  322. if len(resp.Events) != 1 {
  323. t.Fatalf("len(resp.Events) expect 1, got %d", len(resp.Events))
  324. }
  325. if resp.Events[0].Kv.ModRevision != startRev {
  326. t.Fatalf("resp.Events[0].Kv.ModRevision expect %d, got %d", startRev, resp.Events[0].Kv.ModRevision)
  327. }
  328. case <-time.After(time.Second):
  329. t.Fatal("failed to receive event in 1 second")
  330. }
  331. }
  332. // TestWatchBatchUnsynced tests batching on unsynced watchers
  333. func TestWatchBatchUnsynced(t *testing.T) {
  334. b, tmpPath := backend.NewDefaultTmpBackend()
  335. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
  336. oldMaxRevs := watchBatchMaxRevs
  337. defer func() {
  338. watchBatchMaxRevs = oldMaxRevs
  339. s.store.Close()
  340. os.Remove(tmpPath)
  341. }()
  342. batches := 3
  343. watchBatchMaxRevs = 4
  344. v := []byte("foo")
  345. for i := 0; i < watchBatchMaxRevs*batches; i++ {
  346. s.Put(v, v, lease.NoLease)
  347. }
  348. w := s.NewWatchStream()
  349. w.Watch(0, v, nil, 1)
  350. for i := 0; i < batches; i++ {
  351. if resp := <-w.Chan(); len(resp.Events) != watchBatchMaxRevs {
  352. t.Fatalf("len(events) = %d, want %d", len(resp.Events), watchBatchMaxRevs)
  353. }
  354. }
  355. s.store.revMu.Lock()
  356. defer s.store.revMu.Unlock()
  357. if size := s.synced.size(); size != 1 {
  358. t.Errorf("synced size = %d, want 1", size)
  359. }
  360. }
  361. func TestNewMapwatcherToEventMap(t *testing.T) {
  362. k0, k1, k2 := []byte("foo0"), []byte("foo1"), []byte("foo2")
  363. v0, v1, v2 := []byte("bar0"), []byte("bar1"), []byte("bar2")
  364. ws := []*watcher{{key: k0}, {key: k1}, {key: k2}}
  365. evs := []mvccpb.Event{
  366. {
  367. Type: mvccpb.PUT,
  368. Kv: &mvccpb.KeyValue{Key: k0, Value: v0},
  369. },
  370. {
  371. Type: mvccpb.PUT,
  372. Kv: &mvccpb.KeyValue{Key: k1, Value: v1},
  373. },
  374. {
  375. Type: mvccpb.PUT,
  376. Kv: &mvccpb.KeyValue{Key: k2, Value: v2},
  377. },
  378. }
  379. tests := []struct {
  380. sync []*watcher
  381. evs []mvccpb.Event
  382. wwe map[*watcher][]mvccpb.Event
  383. }{
  384. // no watcher in sync, some events should return empty wwe
  385. {
  386. nil,
  387. evs,
  388. map[*watcher][]mvccpb.Event{},
  389. },
  390. // one watcher in sync, one event that does not match the key of that
  391. // watcher should return empty wwe
  392. {
  393. []*watcher{ws[2]},
  394. evs[:1],
  395. map[*watcher][]mvccpb.Event{},
  396. },
  397. // one watcher in sync, one event that matches the key of that
  398. // watcher should return wwe with that matching watcher
  399. {
  400. []*watcher{ws[1]},
  401. evs[1:2],
  402. map[*watcher][]mvccpb.Event{
  403. ws[1]: evs[1:2],
  404. },
  405. },
  406. // two watchers in sync that watches two different keys, one event
  407. // that matches the key of only one of the watcher should return wwe
  408. // with the matching watcher
  409. {
  410. []*watcher{ws[0], ws[2]},
  411. evs[2:],
  412. map[*watcher][]mvccpb.Event{
  413. ws[2]: evs[2:],
  414. },
  415. },
  416. // two watchers in sync that watches the same key, two events that
  417. // match the keys should return wwe with those two watchers
  418. {
  419. []*watcher{ws[0], ws[1]},
  420. evs[:2],
  421. map[*watcher][]mvccpb.Event{
  422. ws[0]: evs[:1],
  423. ws[1]: evs[1:2],
  424. },
  425. },
  426. }
  427. for i, tt := range tests {
  428. wg := newWatcherGroup()
  429. for _, w := range tt.sync {
  430. wg.add(w)
  431. }
  432. gwe := newWatcherBatch(&wg, tt.evs)
  433. if len(gwe) != len(tt.wwe) {
  434. t.Errorf("#%d: len(gwe) got = %d, want = %d", i, len(gwe), len(tt.wwe))
  435. }
  436. // compare gwe and tt.wwe
  437. for w, eb := range gwe {
  438. if len(eb.evs) != len(tt.wwe[w]) {
  439. t.Errorf("#%d: len(eb.evs) got = %d, want = %d", i, len(eb.evs), len(tt.wwe[w]))
  440. }
  441. if !reflect.DeepEqual(eb.evs, tt.wwe[w]) {
  442. t.Errorf("#%d: reflect.DeepEqual events got = %v, want = true", i, false)
  443. }
  444. }
  445. }
  446. }
  447. // TestWatchVictims tests that watchable store delivers watch events
  448. // when the watch channel is temporarily clogged with too many events.
  449. func TestWatchVictims(t *testing.T) {
  450. oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
  451. b, tmpPath := backend.NewDefaultTmpBackend()
  452. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
  453. defer func() {
  454. s.store.Close()
  455. os.Remove(tmpPath)
  456. chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync
  457. }()
  458. chanBufLen, maxWatchersPerSync = 1, 2
  459. numPuts := chanBufLen * 64
  460. testKey, testValue := []byte("foo"), []byte("bar")
  461. var wg sync.WaitGroup
  462. numWatches := maxWatchersPerSync * 128
  463. errc := make(chan error, numWatches)
  464. wg.Add(numWatches)
  465. for i := 0; i < numWatches; i++ {
  466. go func() {
  467. w := s.NewWatchStream()
  468. w.Watch(0, testKey, nil, 1)
  469. defer func() {
  470. w.Close()
  471. wg.Done()
  472. }()
  473. tc := time.After(10 * time.Second)
  474. evs, nextRev := 0, int64(2)
  475. for evs < numPuts {
  476. select {
  477. case <-tc:
  478. errc <- fmt.Errorf("time out")
  479. return
  480. case wr := <-w.Chan():
  481. evs += len(wr.Events)
  482. for _, ev := range wr.Events {
  483. if ev.Kv.ModRevision != nextRev {
  484. errc <- fmt.Errorf("expected rev=%d, got %d", nextRev, ev.Kv.ModRevision)
  485. return
  486. }
  487. nextRev++
  488. }
  489. time.Sleep(time.Millisecond)
  490. }
  491. }
  492. if evs != numPuts {
  493. errc <- fmt.Errorf("expected %d events, got %d", numPuts, evs)
  494. return
  495. }
  496. select {
  497. case <-w.Chan():
  498. errc <- fmt.Errorf("unexpected response")
  499. default:
  500. }
  501. }()
  502. time.Sleep(time.Millisecond)
  503. }
  504. var wgPut sync.WaitGroup
  505. wgPut.Add(numPuts)
  506. for i := 0; i < numPuts; i++ {
  507. go func() {
  508. defer wgPut.Done()
  509. s.Put(testKey, testValue, lease.NoLease)
  510. }()
  511. }
  512. wgPut.Wait()
  513. wg.Wait()
  514. select {
  515. case err := <-errc:
  516. t.Fatal(err)
  517. default:
  518. }
  519. }
  520. // TestStressWatchCancelClose tests closing a watch stream while
  521. // canceling its watches.
  522. func TestStressWatchCancelClose(t *testing.T) {
  523. b, tmpPath := backend.NewDefaultTmpBackend()
  524. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
  525. defer func() {
  526. s.store.Close()
  527. os.Remove(tmpPath)
  528. }()
  529. testKey, testValue := []byte("foo"), []byte("bar")
  530. var wg sync.WaitGroup
  531. readyc := make(chan struct{})
  532. wg.Add(100)
  533. for i := 0; i < 100; i++ {
  534. go func() {
  535. defer wg.Done()
  536. w := s.NewWatchStream()
  537. ids := make([]WatchID, 10)
  538. for i := range ids {
  539. ids[i], _ = w.Watch(0, testKey, nil, 0)
  540. }
  541. <-readyc
  542. wg.Add(1 + len(ids)/2)
  543. for i := range ids[:len(ids)/2] {
  544. go func(n int) {
  545. defer wg.Done()
  546. w.Cancel(ids[n])
  547. }(i)
  548. }
  549. go func() {
  550. defer wg.Done()
  551. w.Close()
  552. }()
  553. }()
  554. }
  555. close(readyc)
  556. for i := 0; i < 100; i++ {
  557. s.Put(testKey, testValue, lease.NoLease)
  558. }
  559. wg.Wait()
  560. }