watchable_store_test.go 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. // Copyright 2015 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package storage
  15. import (
  16. "bytes"
  17. "os"
  18. "reflect"
  19. "testing"
  20. "github.com/coreos/etcd/lease"
  21. "github.com/coreos/etcd/storage/backend"
  22. "github.com/coreos/etcd/storage/storagepb"
  23. )
  24. func TestWatch(t *testing.T) {
  25. b, tmpPath := backend.NewDefaultTmpBackend()
  26. s := newWatchableStore(b, &lease.FakeLessor{})
  27. defer func() {
  28. s.store.Close()
  29. os.Remove(tmpPath)
  30. }()
  31. testKey := []byte("foo")
  32. testValue := []byte("bar")
  33. s.Put(testKey, testValue, lease.NoLease)
  34. w := s.NewWatchStream()
  35. w.Watch(testKey, true, 0)
  36. if _, ok := s.synced[string(testKey)]; !ok {
  37. // the key must have had an entry in synced
  38. t.Errorf("existence = %v, want true", ok)
  39. }
  40. }
  41. func TestNewWatcherCancel(t *testing.T) {
  42. b, tmpPath := backend.NewDefaultTmpBackend()
  43. s := newWatchableStore(b, &lease.FakeLessor{})
  44. defer func() {
  45. s.store.Close()
  46. os.Remove(tmpPath)
  47. }()
  48. testKey := []byte("foo")
  49. testValue := []byte("bar")
  50. s.Put(testKey, testValue, lease.NoLease)
  51. w := s.NewWatchStream()
  52. wt := w.Watch(testKey, true, 0)
  53. if err := w.Cancel(wt); err != nil {
  54. t.Error(err)
  55. }
  56. if _, ok := s.synced[string(testKey)]; ok {
  57. // the key shoud have been deleted
  58. t.Errorf("existence = %v, want false", ok)
  59. }
  60. }
  61. // TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
  62. func TestCancelUnsynced(t *testing.T) {
  63. b, tmpPath := backend.NewDefaultTmpBackend()
  64. // manually create watchableStore instead of newWatchableStore
  65. // because newWatchableStore automatically calls syncWatchers
  66. // method to sync watchers in unsynced map. We want to keep watchers
  67. // in unsynced to test if syncWatchers works as expected.
  68. s := &watchableStore{
  69. store: NewStore(b, &lease.FakeLessor{}),
  70. unsynced: make(watcherSetByKey),
  71. // to make the test not crash from assigning to nil map.
  72. // 'synced' doesn't get populated in this test.
  73. synced: make(watcherSetByKey),
  74. }
  75. defer func() {
  76. s.store.Close()
  77. os.Remove(tmpPath)
  78. }()
  79. // Put a key so that we can spawn watchers on that key.
  80. // (testKey in this test). This increases the rev to 1,
  81. // and later we can we set the watcher's startRev to 1,
  82. // and force watchers to be in unsynced.
  83. testKey := []byte("foo")
  84. testValue := []byte("bar")
  85. s.Put(testKey, testValue, lease.NoLease)
  86. w := s.NewWatchStream()
  87. // arbitrary number for watchers
  88. watcherN := 100
  89. // create watcherN of watch ids to cancel
  90. watchIDs := make([]WatchID, watcherN)
  91. for i := 0; i < watcherN; i++ {
  92. // use 1 to keep watchers in unsynced
  93. watchIDs[i] = w.Watch(testKey, true, 1)
  94. }
  95. for _, idx := range watchIDs {
  96. if err := w.Cancel(idx); err != nil {
  97. t.Error(err)
  98. }
  99. }
  100. // After running CancelFunc
  101. //
  102. // unsynced should be empty
  103. // because cancel removes watcher from unsynced
  104. if len(s.unsynced) != 0 {
  105. t.Errorf("unsynced size = %d, want 0", len(s.unsynced))
  106. }
  107. }
  108. // TestSyncWatchers populates unsynced watcher map and tests syncWatchers
  109. // method to see if it correctly sends events to channel of unsynced watchers
  110. // and moves these watchers to synced.
  111. func TestSyncWatchers(t *testing.T) {
  112. b, tmpPath := backend.NewDefaultTmpBackend()
  113. s := &watchableStore{
  114. store: NewStore(b, &lease.FakeLessor{}),
  115. unsynced: make(watcherSetByKey),
  116. synced: make(watcherSetByKey),
  117. }
  118. defer func() {
  119. s.store.Close()
  120. os.Remove(tmpPath)
  121. }()
  122. testKey := []byte("foo")
  123. testValue := []byte("bar")
  124. s.Put(testKey, testValue, lease.NoLease)
  125. w := s.NewWatchStream()
  126. // arbitrary number for watchers
  127. watcherN := 100
  128. for i := 0; i < watcherN; i++ {
  129. // use 1 to keep watchers in unsynced
  130. w.Watch(testKey, true, 1)
  131. }
  132. // Before running s.syncWatchers()
  133. //
  134. // synced should be empty
  135. // because we manually populate unsynced only
  136. if len(s.synced[string(testKey)]) != 0 {
  137. t.Fatalf("synced[string(testKey)] size = %d, want 0", len(s.synced[string(testKey)]))
  138. }
  139. // unsynced should not be empty
  140. // because we manually populated unsynced only
  141. if len(s.unsynced) == 0 {
  142. t.Errorf("unsynced size = %d, want %d", len(s.unsynced), watcherN)
  143. }
  144. // this should move all unsynced watchers
  145. // to synced ones
  146. s.syncWatchers()
  147. // After running s.syncWatchers()
  148. //
  149. // synced should not be empty
  150. // because syncwatchers populates synced
  151. // in this test case
  152. if len(s.synced[string(testKey)]) == 0 {
  153. t.Errorf("synced[string(testKey)] size = 0, want %d", len(s.synced[string(testKey)]))
  154. }
  155. // unsynced should be empty
  156. // because syncwatchers is expected to move
  157. // all watchers from unsynced to synced
  158. // in this test case
  159. if len(s.unsynced) != 0 {
  160. t.Errorf("unsynced size = %d, want 0", len(s.unsynced))
  161. }
  162. // All of the watchers actually share one channel
  163. // so we only need to check one shared channel
  164. // (See watcher.go for more detail).
  165. if len(w.(*watchStream).ch) != watcherN {
  166. t.Errorf("watched event size = %d, want %d", len(w.(*watchStream).ch), watcherN)
  167. }
  168. wr := <-w.(*watchStream).ch
  169. evs := wr.Events
  170. if len(evs) != 1 {
  171. t.Errorf("len(evs) got = %d, want = 1", len(evs))
  172. }
  173. if evs[0].Type != storagepb.PUT {
  174. t.Errorf("got = %v, want = %v", evs[0].Type, storagepb.PUT)
  175. }
  176. if !bytes.Equal(evs[0].Kv.Key, testKey) {
  177. t.Errorf("got = %s, want = %s", evs[0].Kv.Key, testKey)
  178. }
  179. if !bytes.Equal(evs[0].Kv.Value, testValue) {
  180. t.Errorf("got = %s, want = %s", evs[0].Kv.Value, testValue)
  181. }
  182. }
  183. func TestNewMapwatcherToEventMap(t *testing.T) {
  184. k0, k1, k2 := []byte("foo0"), []byte("foo1"), []byte("foo2")
  185. v0, v1, v2 := []byte("bar0"), []byte("bar1"), []byte("bar2")
  186. ws := []*watcher{{key: k0}, {key: k1}, {key: k2}}
  187. evs := []storagepb.Event{
  188. {
  189. Type: storagepb.PUT,
  190. Kv: &storagepb.KeyValue{Key: k0, Value: v0},
  191. },
  192. {
  193. Type: storagepb.PUT,
  194. Kv: &storagepb.KeyValue{Key: k1, Value: v1},
  195. },
  196. {
  197. Type: storagepb.PUT,
  198. Kv: &storagepb.KeyValue{Key: k2, Value: v2},
  199. },
  200. }
  201. tests := []struct {
  202. sync watcherSetByKey
  203. evs []storagepb.Event
  204. wwe map[*watcher][]storagepb.Event
  205. }{
  206. // no watcher in sync, some events should return empty wwe
  207. {
  208. watcherSetByKey{},
  209. evs,
  210. map[*watcher][]storagepb.Event{},
  211. },
  212. // one watcher in sync, one event that does not match the key of that
  213. // watcher should return empty wwe
  214. {
  215. watcherSetByKey{
  216. string(k2): {ws[2]: struct{}{}},
  217. },
  218. evs[:1],
  219. map[*watcher][]storagepb.Event{},
  220. },
  221. // one watcher in sync, one event that matches the key of that
  222. // watcher should return wwe with that matching watcher
  223. {
  224. watcherSetByKey{
  225. string(k1): {ws[1]: struct{}{}},
  226. },
  227. evs[1:2],
  228. map[*watcher][]storagepb.Event{
  229. ws[1]: evs[1:2],
  230. },
  231. },
  232. // two watchers in sync that watches two different keys, one event
  233. // that matches the key of only one of the watcher should return wwe
  234. // with the matching watcher
  235. {
  236. watcherSetByKey{
  237. string(k0): {ws[0]: struct{}{}},
  238. string(k2): {ws[2]: struct{}{}},
  239. },
  240. evs[2:],
  241. map[*watcher][]storagepb.Event{
  242. ws[2]: evs[2:],
  243. },
  244. },
  245. // two watchers in sync that watches the same key, two events that
  246. // match the keys should return wwe with those two watchers
  247. {
  248. watcherSetByKey{
  249. string(k0): {ws[0]: struct{}{}},
  250. string(k1): {ws[1]: struct{}{}},
  251. },
  252. evs[:2],
  253. map[*watcher][]storagepb.Event{
  254. ws[0]: evs[:1],
  255. ws[1]: evs[1:2],
  256. },
  257. },
  258. }
  259. for i, tt := range tests {
  260. gwe := newWatcherToEventMap(tt.sync, tt.evs)
  261. if len(gwe) != len(tt.wwe) {
  262. t.Errorf("#%d: len(gwe) got = %d, want = %d", i, len(gwe), len(tt.wwe))
  263. }
  264. // compare gwe and tt.wwe
  265. for w, mevs := range gwe {
  266. if len(mevs) != len(tt.wwe[w]) {
  267. t.Errorf("#%d: len(mevs) got = %d, want = %d", i, len(mevs), len(tt.wwe[w]))
  268. }
  269. if !reflect.DeepEqual(mevs, tt.wwe[w]) {
  270. t.Errorf("#%d: reflect.DeepEqual events got = %v, want = true", i, false)
  271. }
  272. }
  273. }
  274. }