watchable_store_test.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "bytes"
  17. "fmt"
  18. "os"
  19. "reflect"
  20. "sync"
  21. "testing"
  22. "time"
  23. "go.etcd.io/etcd/lease"
  24. "go.etcd.io/etcd/mvcc/backend"
  25. "go.etcd.io/etcd/mvcc/mvccpb"
  26. "go.uber.org/zap"
  27. )
  28. func TestWatch(t *testing.T) {
  29. b, tmpPath := backend.NewDefaultTmpBackend()
  30. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
  31. defer func() {
  32. s.store.Close()
  33. os.Remove(tmpPath)
  34. }()
  35. testKey := []byte("foo")
  36. testValue := []byte("bar")
  37. s.Put(testKey, testValue, lease.NoLease)
  38. w := s.NewWatchStream()
  39. w.Watch(0, testKey, nil, 0)
  40. if !s.synced.contains(string(testKey)) {
  41. // the key must have had an entry in synced
  42. t.Errorf("existence = false, want true")
  43. }
  44. }
  45. func TestNewWatcherCancel(t *testing.T) {
  46. b, tmpPath := backend.NewDefaultTmpBackend()
  47. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
  48. defer func() {
  49. s.store.Close()
  50. os.Remove(tmpPath)
  51. }()
  52. testKey := []byte("foo")
  53. testValue := []byte("bar")
  54. s.Put(testKey, testValue, lease.NoLease)
  55. w := s.NewWatchStream()
  56. wt, _ := w.Watch(0, testKey, nil, 0)
  57. if err := w.Cancel(wt); err != nil {
  58. t.Error(err)
  59. }
  60. if s.synced.contains(string(testKey)) {
  61. // the key shoud have been deleted
  62. t.Errorf("existence = true, want false")
  63. }
  64. }
  65. // TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
  66. func TestCancelUnsynced(t *testing.T) {
  67. b, tmpPath := backend.NewDefaultTmpBackend()
  68. // manually create watchableStore instead of newWatchableStore
  69. // because newWatchableStore automatically calls syncWatchers
  70. // method to sync watchers in unsynced map. We want to keep watchers
  71. // in unsynced to test if syncWatchers works as expected.
  72. s := &watchableStore{
  73. store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil),
  74. unsynced: newWatcherGroup(),
  75. // to make the test not crash from assigning to nil map.
  76. // 'synced' doesn't get populated in this test.
  77. synced: newWatcherGroup(),
  78. }
  79. defer func() {
  80. s.store.Close()
  81. os.Remove(tmpPath)
  82. }()
  83. // Put a key so that we can spawn watchers on that key.
  84. // (testKey in this test). This increases the rev to 1,
  85. // and later we can we set the watcher's startRev to 1,
  86. // and force watchers to be in unsynced.
  87. testKey := []byte("foo")
  88. testValue := []byte("bar")
  89. s.Put(testKey, testValue, lease.NoLease)
  90. w := s.NewWatchStream()
  91. // arbitrary number for watchers
  92. watcherN := 100
  93. // create watcherN of watch ids to cancel
  94. watchIDs := make([]WatchID, watcherN)
  95. for i := 0; i < watcherN; i++ {
  96. // use 1 to keep watchers in unsynced
  97. watchIDs[i], _ = w.Watch(0, testKey, nil, 1)
  98. }
  99. for _, idx := range watchIDs {
  100. if err := w.Cancel(idx); err != nil {
  101. t.Error(err)
  102. }
  103. }
  104. // After running CancelFunc
  105. //
  106. // unsynced should be empty
  107. // because cancel removes watcher from unsynced
  108. if size := s.unsynced.size(); size != 0 {
  109. t.Errorf("unsynced size = %d, want 0", size)
  110. }
  111. }
  112. // TestSyncWatchers populates unsynced watcher map and tests syncWatchers
  113. // method to see if it correctly sends events to channel of unsynced watchers
  114. // and moves these watchers to synced.
  115. func TestSyncWatchers(t *testing.T) {
  116. b, tmpPath := backend.NewDefaultTmpBackend()
  117. s := &watchableStore{
  118. store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil),
  119. unsynced: newWatcherGroup(),
  120. synced: newWatcherGroup(),
  121. }
  122. defer func() {
  123. s.store.Close()
  124. os.Remove(tmpPath)
  125. }()
  126. testKey := []byte("foo")
  127. testValue := []byte("bar")
  128. s.Put(testKey, testValue, lease.NoLease)
  129. w := s.NewWatchStream()
  130. // arbitrary number for watchers
  131. watcherN := 100
  132. for i := 0; i < watcherN; i++ {
  133. // specify rev as 1 to keep watchers in unsynced
  134. w.Watch(0, testKey, nil, 1)
  135. }
  136. // Before running s.syncWatchers() synced should be empty because we manually
  137. // populate unsynced only
  138. sws := s.synced.watcherSetByKey(string(testKey))
  139. uws := s.unsynced.watcherSetByKey(string(testKey))
  140. if len(sws) != 0 {
  141. t.Fatalf("synced[string(testKey)] size = %d, want 0", len(sws))
  142. }
  143. // unsynced should not be empty because we manually populated unsynced only
  144. if len(uws) != watcherN {
  145. t.Errorf("unsynced size = %d, want %d", len(uws), watcherN)
  146. }
  147. // this should move all unsynced watchers to synced ones
  148. s.syncWatchers()
  149. sws = s.synced.watcherSetByKey(string(testKey))
  150. uws = s.unsynced.watcherSetByKey(string(testKey))
  151. // After running s.syncWatchers(), synced should not be empty because syncwatchers
  152. // populates synced in this test case
  153. if len(sws) != watcherN {
  154. t.Errorf("synced[string(testKey)] size = %d, want %d", len(sws), watcherN)
  155. }
  156. // unsynced should be empty because syncwatchers is expected to move all watchers
  157. // from unsynced to synced in this test case
  158. if len(uws) != 0 {
  159. t.Errorf("unsynced size = %d, want 0", len(uws))
  160. }
  161. for w := range sws {
  162. if w.minRev != s.Rev()+1 {
  163. t.Errorf("w.minRev = %d, want %d", w.minRev, s.Rev()+1)
  164. }
  165. }
  166. if len(w.(*watchStream).ch) != watcherN {
  167. t.Errorf("watched event size = %d, want %d", len(w.(*watchStream).ch), watcherN)
  168. }
  169. evs := (<-w.(*watchStream).ch).Events
  170. if len(evs) != 1 {
  171. t.Errorf("len(evs) got = %d, want = 1", len(evs))
  172. }
  173. if evs[0].Type != mvccpb.PUT {
  174. t.Errorf("got = %v, want = %v", evs[0].Type, mvccpb.PUT)
  175. }
  176. if !bytes.Equal(evs[0].Kv.Key, testKey) {
  177. t.Errorf("got = %s, want = %s", evs[0].Kv.Key, testKey)
  178. }
  179. if !bytes.Equal(evs[0].Kv.Value, testValue) {
  180. t.Errorf("got = %s, want = %s", evs[0].Kv.Value, testValue)
  181. }
  182. }
  183. // TestWatchCompacted tests a watcher that watches on a compacted revision.
  184. func TestWatchCompacted(t *testing.T) {
  185. b, tmpPath := backend.NewDefaultTmpBackend()
  186. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
  187. defer func() {
  188. s.store.Close()
  189. os.Remove(tmpPath)
  190. }()
  191. testKey := []byte("foo")
  192. testValue := []byte("bar")
  193. maxRev := 10
  194. compactRev := int64(5)
  195. for i := 0; i < maxRev; i++ {
  196. s.Put(testKey, testValue, lease.NoLease)
  197. }
  198. _, err := s.Compact(compactRev)
  199. if err != nil {
  200. t.Fatalf("failed to compact kv (%v)", err)
  201. }
  202. w := s.NewWatchStream()
  203. wt, _ := w.Watch(0, testKey, nil, compactRev-1)
  204. select {
  205. case resp := <-w.Chan():
  206. if resp.WatchID != wt {
  207. t.Errorf("resp.WatchID = %x, want %x", resp.WatchID, wt)
  208. }
  209. if resp.CompactRevision == 0 {
  210. t.Errorf("resp.Compacted = %v, want %v", resp.CompactRevision, compactRev)
  211. }
  212. case <-time.After(1 * time.Second):
  213. t.Fatalf("failed to receive response (timeout)")
  214. }
  215. }
  216. func TestWatchFutureRev(t *testing.T) {
  217. b, tmpPath := backend.NewDefaultTmpBackend()
  218. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
  219. defer func() {
  220. s.store.Close()
  221. os.Remove(tmpPath)
  222. }()
  223. testKey := []byte("foo")
  224. testValue := []byte("bar")
  225. w := s.NewWatchStream()
  226. wrev := int64(10)
  227. w.Watch(0, testKey, nil, wrev)
  228. for i := 0; i < 10; i++ {
  229. rev := s.Put(testKey, testValue, lease.NoLease)
  230. if rev >= wrev {
  231. break
  232. }
  233. }
  234. select {
  235. case resp := <-w.Chan():
  236. if resp.Revision != wrev {
  237. t.Fatalf("rev = %d, want %d", resp.Revision, wrev)
  238. }
  239. if len(resp.Events) != 1 {
  240. t.Fatalf("failed to get events from the response")
  241. }
  242. if resp.Events[0].Kv.ModRevision != wrev {
  243. t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, wrev)
  244. }
  245. case <-time.After(time.Second):
  246. t.Fatal("failed to receive event in 1 second.")
  247. }
  248. }
  249. func TestWatchRestore(t *testing.T) {
  250. test := func(delay time.Duration) func(t *testing.T) {
  251. return func(t *testing.T) {
  252. b, tmpPath := backend.NewDefaultTmpBackend()
  253. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
  254. defer cleanup(s, b, tmpPath)
  255. testKey := []byte("foo")
  256. testValue := []byte("bar")
  257. rev := s.Put(testKey, testValue, lease.NoLease)
  258. newBackend, newPath := backend.NewDefaultTmpBackend()
  259. newStore := newWatchableStore(zap.NewExample(), newBackend, &lease.FakeLessor{}, nil)
  260. defer cleanup(newStore, newBackend, newPath)
  261. w := newStore.NewWatchStream()
  262. w.Watch(0, testKey, nil, rev-1)
  263. time.Sleep(delay)
  264. newStore.Restore(b)
  265. select {
  266. case resp := <-w.Chan():
  267. if resp.Revision != rev {
  268. t.Fatalf("rev = %d, want %d", resp.Revision, rev)
  269. }
  270. if len(resp.Events) != 1 {
  271. t.Fatalf("failed to get events from the response")
  272. }
  273. if resp.Events[0].Kv.ModRevision != rev {
  274. t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, rev)
  275. }
  276. case <-time.After(time.Second):
  277. t.Fatal("failed to receive event in 1 second.")
  278. }
  279. }
  280. }
  281. t.Run("Normal", test(0))
  282. t.Run("RunSyncWatchLoopBeforeRestore", test(time.Millisecond*120)) // longer than default waitDuration
  283. }
  284. // TestWatchRestoreSyncedWatcher tests such a case that:
  285. // 1. watcher is created with a future revision "math.MaxInt64 - 2"
  286. // 2. watcher with a future revision is added to "synced" watcher group
  287. // 3. restore/overwrite storage with snapshot of a higher lasat revision
  288. // 4. restore operation moves "synced" to "unsynced" watcher group
  289. // 5. choose the watcher from step 1, without panic
  290. func TestWatchRestoreSyncedWatcher(t *testing.T) {
  291. b1, b1Path := backend.NewDefaultTmpBackend()
  292. s1 := newWatchableStore(zap.NewExample(), b1, &lease.FakeLessor{}, nil)
  293. defer cleanup(s1, b1, b1Path)
  294. b2, b2Path := backend.NewDefaultTmpBackend()
  295. s2 := newWatchableStore(zap.NewExample(), b2, &lease.FakeLessor{}, nil)
  296. defer cleanup(s2, b2, b2Path)
  297. testKey, testValue := []byte("foo"), []byte("bar")
  298. rev := s1.Put(testKey, testValue, lease.NoLease)
  299. startRev := rev + 2
  300. // create a watcher with a future revision
  301. // add to "synced" watcher group (startRev > s.store.currentRev)
  302. w1 := s1.NewWatchStream()
  303. w1.Watch(0, testKey, nil, startRev)
  304. // make "s2" ends up with a higher last revision
  305. s2.Put(testKey, testValue, lease.NoLease)
  306. s2.Put(testKey, testValue, lease.NoLease)
  307. // overwrite storage with higher revisions
  308. if err := s1.Restore(b2); err != nil {
  309. t.Fatal(err)
  310. }
  311. // wait for next "syncWatchersLoop" iteration
  312. // and the unsynced watcher should be chosen
  313. time.Sleep(2 * time.Second)
  314. // trigger events for "startRev"
  315. s1.Put(testKey, testValue, lease.NoLease)
  316. select {
  317. case resp := <-w1.Chan():
  318. if resp.Revision != startRev {
  319. t.Fatalf("resp.Revision expect %d, got %d", startRev, resp.Revision)
  320. }
  321. if len(resp.Events) != 1 {
  322. t.Fatalf("len(resp.Events) expect 1, got %d", len(resp.Events))
  323. }
  324. if resp.Events[0].Kv.ModRevision != startRev {
  325. t.Fatalf("resp.Events[0].Kv.ModRevision expect %d, got %d", startRev, resp.Events[0].Kv.ModRevision)
  326. }
  327. case <-time.After(time.Second):
  328. t.Fatal("failed to receive event in 1 second")
  329. }
  330. }
  331. // TestWatchBatchUnsynced tests batching on unsynced watchers
  332. func TestWatchBatchUnsynced(t *testing.T) {
  333. b, tmpPath := backend.NewDefaultTmpBackend()
  334. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
  335. oldMaxRevs := watchBatchMaxRevs
  336. defer func() {
  337. watchBatchMaxRevs = oldMaxRevs
  338. s.store.Close()
  339. os.Remove(tmpPath)
  340. }()
  341. batches := 3
  342. watchBatchMaxRevs = 4
  343. v := []byte("foo")
  344. for i := 0; i < watchBatchMaxRevs*batches; i++ {
  345. s.Put(v, v, lease.NoLease)
  346. }
  347. w := s.NewWatchStream()
  348. w.Watch(0, v, nil, 1)
  349. for i := 0; i < batches; i++ {
  350. if resp := <-w.Chan(); len(resp.Events) != watchBatchMaxRevs {
  351. t.Fatalf("len(events) = %d, want %d", len(resp.Events), watchBatchMaxRevs)
  352. }
  353. }
  354. s.store.revMu.Lock()
  355. defer s.store.revMu.Unlock()
  356. if size := s.synced.size(); size != 1 {
  357. t.Errorf("synced size = %d, want 1", size)
  358. }
  359. }
  360. func TestNewMapwatcherToEventMap(t *testing.T) {
  361. k0, k1, k2 := []byte("foo0"), []byte("foo1"), []byte("foo2")
  362. v0, v1, v2 := []byte("bar0"), []byte("bar1"), []byte("bar2")
  363. ws := []*watcher{{key: k0}, {key: k1}, {key: k2}}
  364. evs := []mvccpb.Event{
  365. {
  366. Type: mvccpb.PUT,
  367. Kv: &mvccpb.KeyValue{Key: k0, Value: v0},
  368. },
  369. {
  370. Type: mvccpb.PUT,
  371. Kv: &mvccpb.KeyValue{Key: k1, Value: v1},
  372. },
  373. {
  374. Type: mvccpb.PUT,
  375. Kv: &mvccpb.KeyValue{Key: k2, Value: v2},
  376. },
  377. }
  378. tests := []struct {
  379. sync []*watcher
  380. evs []mvccpb.Event
  381. wwe map[*watcher][]mvccpb.Event
  382. }{
  383. // no watcher in sync, some events should return empty wwe
  384. {
  385. nil,
  386. evs,
  387. map[*watcher][]mvccpb.Event{},
  388. },
  389. // one watcher in sync, one event that does not match the key of that
  390. // watcher should return empty wwe
  391. {
  392. []*watcher{ws[2]},
  393. evs[:1],
  394. map[*watcher][]mvccpb.Event{},
  395. },
  396. // one watcher in sync, one event that matches the key of that
  397. // watcher should return wwe with that matching watcher
  398. {
  399. []*watcher{ws[1]},
  400. evs[1:2],
  401. map[*watcher][]mvccpb.Event{
  402. ws[1]: evs[1:2],
  403. },
  404. },
  405. // two watchers in sync that watches two different keys, one event
  406. // that matches the key of only one of the watcher should return wwe
  407. // with the matching watcher
  408. {
  409. []*watcher{ws[0], ws[2]},
  410. evs[2:],
  411. map[*watcher][]mvccpb.Event{
  412. ws[2]: evs[2:],
  413. },
  414. },
  415. // two watchers in sync that watches the same key, two events that
  416. // match the keys should return wwe with those two watchers
  417. {
  418. []*watcher{ws[0], ws[1]},
  419. evs[:2],
  420. map[*watcher][]mvccpb.Event{
  421. ws[0]: evs[:1],
  422. ws[1]: evs[1:2],
  423. },
  424. },
  425. }
  426. for i, tt := range tests {
  427. wg := newWatcherGroup()
  428. for _, w := range tt.sync {
  429. wg.add(w)
  430. }
  431. gwe := newWatcherBatch(&wg, tt.evs)
  432. if len(gwe) != len(tt.wwe) {
  433. t.Errorf("#%d: len(gwe) got = %d, want = %d", i, len(gwe), len(tt.wwe))
  434. }
  435. // compare gwe and tt.wwe
  436. for w, eb := range gwe {
  437. if len(eb.evs) != len(tt.wwe[w]) {
  438. t.Errorf("#%d: len(eb.evs) got = %d, want = %d", i, len(eb.evs), len(tt.wwe[w]))
  439. }
  440. if !reflect.DeepEqual(eb.evs, tt.wwe[w]) {
  441. t.Errorf("#%d: reflect.DeepEqual events got = %v, want = true", i, false)
  442. }
  443. }
  444. }
  445. }
  446. // TestWatchVictims tests that watchable store delivers watch events
  447. // when the watch channel is temporarily clogged with too many events.
  448. func TestWatchVictims(t *testing.T) {
  449. oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
  450. b, tmpPath := backend.NewDefaultTmpBackend()
  451. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
  452. defer func() {
  453. s.store.Close()
  454. os.Remove(tmpPath)
  455. chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync
  456. }()
  457. chanBufLen, maxWatchersPerSync = 1, 2
  458. numPuts := chanBufLen * 64
  459. testKey, testValue := []byte("foo"), []byte("bar")
  460. var wg sync.WaitGroup
  461. numWatches := maxWatchersPerSync * 128
  462. errc := make(chan error, numWatches)
  463. wg.Add(numWatches)
  464. for i := 0; i < numWatches; i++ {
  465. go func() {
  466. w := s.NewWatchStream()
  467. w.Watch(0, testKey, nil, 1)
  468. defer func() {
  469. w.Close()
  470. wg.Done()
  471. }()
  472. tc := time.After(10 * time.Second)
  473. evs, nextRev := 0, int64(2)
  474. for evs < numPuts {
  475. select {
  476. case <-tc:
  477. errc <- fmt.Errorf("time out")
  478. return
  479. case wr := <-w.Chan():
  480. evs += len(wr.Events)
  481. for _, ev := range wr.Events {
  482. if ev.Kv.ModRevision != nextRev {
  483. errc <- fmt.Errorf("expected rev=%d, got %d", nextRev, ev.Kv.ModRevision)
  484. return
  485. }
  486. nextRev++
  487. }
  488. time.Sleep(time.Millisecond)
  489. }
  490. }
  491. if evs != numPuts {
  492. errc <- fmt.Errorf("expected %d events, got %d", numPuts, evs)
  493. return
  494. }
  495. select {
  496. case <-w.Chan():
  497. errc <- fmt.Errorf("unexpected response")
  498. default:
  499. }
  500. }()
  501. time.Sleep(time.Millisecond)
  502. }
  503. var wgPut sync.WaitGroup
  504. wgPut.Add(numPuts)
  505. for i := 0; i < numPuts; i++ {
  506. go func() {
  507. defer wgPut.Done()
  508. s.Put(testKey, testValue, lease.NoLease)
  509. }()
  510. }
  511. wgPut.Wait()
  512. wg.Wait()
  513. select {
  514. case err := <-errc:
  515. t.Fatal(err)
  516. default:
  517. }
  518. }
  519. // TestStressWatchCancelClose tests closing a watch stream while
  520. // canceling its watches.
  521. func TestStressWatchCancelClose(t *testing.T) {
  522. b, tmpPath := backend.NewDefaultTmpBackend()
  523. s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
  524. defer func() {
  525. s.store.Close()
  526. os.Remove(tmpPath)
  527. }()
  528. testKey, testValue := []byte("foo"), []byte("bar")
  529. var wg sync.WaitGroup
  530. readyc := make(chan struct{})
  531. wg.Add(100)
  532. for i := 0; i < 100; i++ {
  533. go func() {
  534. defer wg.Done()
  535. w := s.NewWatchStream()
  536. ids := make([]WatchID, 10)
  537. for i := range ids {
  538. ids[i], _ = w.Watch(0, testKey, nil, 0)
  539. }
  540. <-readyc
  541. wg.Add(1 + len(ids)/2)
  542. for i := range ids[:len(ids)/2] {
  543. go func(n int) {
  544. defer wg.Done()
  545. w.Cancel(ids[n])
  546. }(i)
  547. }
  548. go func() {
  549. defer wg.Done()
  550. w.Close()
  551. }()
  552. }()
  553. }
  554. close(readyc)
  555. for i := 0; i < 100; i++ {
  556. s.Put(testKey, testValue, lease.NoLease)
  557. }
  558. wg.Wait()
  559. }