watchable_store_test.go 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. // Copyright 2015 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package storage
  15. import (
  16. "bytes"
  17. "os"
  18. "reflect"
  19. "testing"
  20. "github.com/coreos/etcd/storage/backend"
  21. "github.com/coreos/etcd/storage/storagepb"
  22. )
  23. func TestWatch(t *testing.T) {
  24. b, tmpPath := backend.NewDefaultTmpBackend()
  25. s := newWatchableStore(b)
  26. defer func() {
  27. s.store.Close()
  28. os.Remove(tmpPath)
  29. }()
  30. testKey := []byte("foo")
  31. testValue := []byte("bar")
  32. s.Put(testKey, testValue, NoLease)
  33. w := s.NewWatchStream()
  34. w.Watch(testKey, true, 0)
  35. if _, ok := s.synced[string(testKey)]; !ok {
  36. // the key must have had an entry in synced
  37. t.Errorf("existence = %v, want true", ok)
  38. }
  39. }
  40. func TestNewWatcherCancel(t *testing.T) {
  41. b, tmpPath := backend.NewDefaultTmpBackend()
  42. s := newWatchableStore(b)
  43. defer func() {
  44. s.store.Close()
  45. os.Remove(tmpPath)
  46. }()
  47. testKey := []byte("foo")
  48. testValue := []byte("bar")
  49. s.Put(testKey, testValue, NoLease)
  50. w := s.NewWatchStream()
  51. wt := w.Watch(testKey, true, 0)
  52. if err := w.Cancel(wt); err != nil {
  53. t.Error(err)
  54. }
  55. if _, ok := s.synced[string(testKey)]; ok {
  56. // the key shoud have been deleted
  57. t.Errorf("existence = %v, want false", ok)
  58. }
  59. }
  60. // TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
  61. func TestCancelUnsynced(t *testing.T) {
  62. b, tmpPath := backend.NewDefaultTmpBackend()
  63. // manually create watchableStore instead of newWatchableStore
  64. // because newWatchableStore automatically calls syncWatchers
  65. // method to sync watchers in unsynced map. We want to keep watchers
  66. // in unsynced to test if syncWatchers works as expected.
  67. s := &watchableStore{
  68. store: NewStore(b),
  69. unsynced: make(map[*watcher]struct{}),
  70. // to make the test not crash from assigning to nil map.
  71. // 'synced' doesn't get populated in this test.
  72. synced: make(map[string]map[*watcher]struct{}),
  73. }
  74. defer func() {
  75. s.store.Close()
  76. os.Remove(tmpPath)
  77. }()
  78. // Put a key so that we can spawn watchers on that key.
  79. // (testKey in this test). This increases the rev to 1,
  80. // and later we can we set the watcher's startRev to 1,
  81. // and force watchers to be in unsynced.
  82. testKey := []byte("foo")
  83. testValue := []byte("bar")
  84. s.Put(testKey, testValue, NoLease)
  85. w := s.NewWatchStream()
  86. // arbitrary number for watchers
  87. watcherN := 100
  88. // create watcherN of watch ids to cancel
  89. watchIDs := make([]WatchID, watcherN)
  90. for i := 0; i < watcherN; i++ {
  91. // use 1 to keep watchers in unsynced
  92. watchIDs[i] = w.Watch(testKey, true, 1)
  93. }
  94. for _, idx := range watchIDs {
  95. if err := w.Cancel(idx); err != nil {
  96. t.Error(err)
  97. }
  98. }
  99. // After running CancelFunc
  100. //
  101. // unsynced should be empty
  102. // because cancel removes watcher from unsynced
  103. if len(s.unsynced) != 0 {
  104. t.Errorf("unsynced size = %d, want 0", len(s.unsynced))
  105. }
  106. }
  107. // TestSyncWatchers populates unsynced watcher map and tests syncWatchers
  108. // method to see if it correctly sends events to channel of unsynced watchers
  109. // and moves these watchers to synced.
  110. func TestSyncWatchers(t *testing.T) {
  111. b, tmpPath := backend.NewDefaultTmpBackend()
  112. s := &watchableStore{
  113. store: NewStore(b),
  114. unsynced: make(map[*watcher]struct{}),
  115. synced: make(map[string]map[*watcher]struct{}),
  116. }
  117. defer func() {
  118. s.store.Close()
  119. os.Remove(tmpPath)
  120. }()
  121. testKey := []byte("foo")
  122. testValue := []byte("bar")
  123. s.Put(testKey, testValue, NoLease)
  124. w := s.NewWatchStream()
  125. // arbitrary number for watchers
  126. watcherN := 100
  127. for i := 0; i < watcherN; i++ {
  128. // use 1 to keep watchers in unsynced
  129. w.Watch(testKey, true, 1)
  130. }
  131. // Before running s.syncWatchers()
  132. //
  133. // synced should be empty
  134. // because we manually populate unsynced only
  135. if len(s.synced[string(testKey)]) != 0 {
  136. t.Fatalf("synced[string(testKey)] size = %d, want 0", len(s.synced[string(testKey)]))
  137. }
  138. // unsynced should not be empty
  139. // because we manually populated unsynced only
  140. if len(s.unsynced) == 0 {
  141. t.Errorf("unsynced size = %d, want %d", len(s.unsynced), watcherN)
  142. }
  143. // this should move all unsynced watchers
  144. // to synced ones
  145. s.syncWatchers()
  146. // After running s.syncWatchers()
  147. //
  148. // synced should not be empty
  149. // because syncwatchers populates synced
  150. // in this test case
  151. if len(s.synced[string(testKey)]) == 0 {
  152. t.Errorf("synced[string(testKey)] size = 0, want %d", len(s.synced[string(testKey)]))
  153. }
  154. // unsynced should be empty
  155. // because syncwatchers is expected to move
  156. // all watchers from unsynced to synced
  157. // in this test case
  158. if len(s.unsynced) != 0 {
  159. t.Errorf("unsynced size = %d, want 0", len(s.unsynced))
  160. }
  161. // All of the watchers actually share one channel
  162. // so we only need to check one shared channel
  163. // (See watcher.go for more detail).
  164. if len(w.(*watchStream).ch) != watcherN {
  165. t.Errorf("watched event size = %d, want %d", len(w.(*watchStream).ch), watcherN)
  166. }
  167. wr := <-w.(*watchStream).ch
  168. evs := wr.Events
  169. if len(evs) != 1 {
  170. t.Errorf("len(evs) got = %d, want = 1", len(evs))
  171. }
  172. if evs[0].Type != storagepb.PUT {
  173. t.Errorf("got = %v, want = %v", evs[0].Type, storagepb.PUT)
  174. }
  175. if !bytes.Equal(evs[0].Kv.Key, testKey) {
  176. t.Errorf("got = %s, want = %s", evs[0].Kv.Key, testKey)
  177. }
  178. if !bytes.Equal(evs[0].Kv.Value, testValue) {
  179. t.Errorf("got = %s, want = %s", evs[0].Kv.Value, testValue)
  180. }
  181. }
  182. func TestUnsafeAddWatcher(t *testing.T) {
  183. b, tmpPath := backend.NewDefaultTmpBackend()
  184. s := newWatchableStore(b)
  185. defer func() {
  186. s.store.Close()
  187. os.Remove(tmpPath)
  188. }()
  189. testKey := []byte("foo")
  190. testValue := []byte("bar")
  191. s.Put(testKey, testValue, NoLease)
  192. size := 10
  193. ws := make([]*watcher, size)
  194. for i := 0; i < size; i++ {
  195. ws[i] = &watcher{
  196. key: testKey,
  197. prefix: true,
  198. cur: 0,
  199. }
  200. }
  201. // to test if unsafeAddWatcher is correctly updating
  202. // synced map when adding new watcher.
  203. for i, wa := range ws {
  204. if err := unsafeAddWatcher(&s.synced, string(testKey), wa); err != nil {
  205. t.Errorf("#%d: error = %v, want nil", i, err)
  206. }
  207. if v, ok := s.synced[string(testKey)]; !ok {
  208. t.Errorf("#%d: ok = %v, want ok true", i, ok)
  209. } else {
  210. if len(v) != i+1 {
  211. t.Errorf("#%d: len(v) = %d, want %d", i, len(v), i+1)
  212. }
  213. if _, ok := v[wa]; !ok {
  214. t.Errorf("#%d: ok = %v, want ok true", i, ok)
  215. }
  216. }
  217. }
  218. }
  219. func TestNewMapwatcherToEventMap(t *testing.T) {
  220. k0, k1, k2 := []byte("foo0"), []byte("foo1"), []byte("foo2")
  221. v0, v1, v2 := []byte("bar0"), []byte("bar1"), []byte("bar2")
  222. ws := []*watcher{{key: k0}, {key: k1}, {key: k2}}
  223. evs := []storagepb.Event{
  224. {
  225. Type: storagepb.PUT,
  226. Kv: &storagepb.KeyValue{Key: k0, Value: v0},
  227. },
  228. {
  229. Type: storagepb.PUT,
  230. Kv: &storagepb.KeyValue{Key: k1, Value: v1},
  231. },
  232. {
  233. Type: storagepb.PUT,
  234. Kv: &storagepb.KeyValue{Key: k2, Value: v2},
  235. },
  236. }
  237. tests := []struct {
  238. sync map[string]map[*watcher]struct{}
  239. evs []storagepb.Event
  240. wwe map[*watcher][]storagepb.Event
  241. }{
  242. // no watcher in sync, some events should return empty wwe
  243. {
  244. map[string]map[*watcher]struct{}{},
  245. evs,
  246. map[*watcher][]storagepb.Event{},
  247. },
  248. // one watcher in sync, one event that does not match the key of that
  249. // watcher should return empty wwe
  250. {
  251. map[string]map[*watcher]struct{}{
  252. string(k2): {ws[2]: struct{}{}},
  253. },
  254. evs[:1],
  255. map[*watcher][]storagepb.Event{},
  256. },
  257. // one watcher in sync, one event that matches the key of that
  258. // watcher should return wwe with that matching watcher
  259. {
  260. map[string]map[*watcher]struct{}{
  261. string(k1): {ws[1]: struct{}{}},
  262. },
  263. evs[1:2],
  264. map[*watcher][]storagepb.Event{
  265. ws[1]: evs[1:2],
  266. },
  267. },
  268. // two watchers in sync that watches two different keys, one event
  269. // that matches the key of only one of the watcher should return wwe
  270. // with the matching watcher
  271. {
  272. map[string]map[*watcher]struct{}{
  273. string(k0): {ws[0]: struct{}{}},
  274. string(k2): {ws[2]: struct{}{}},
  275. },
  276. evs[2:],
  277. map[*watcher][]storagepb.Event{
  278. ws[2]: evs[2:],
  279. },
  280. },
  281. // two watchers in sync that watches the same key, two events that
  282. // match the keys should return wwe with those two watchers
  283. {
  284. map[string]map[*watcher]struct{}{
  285. string(k0): {ws[0]: struct{}{}},
  286. string(k1): {ws[1]: struct{}{}},
  287. },
  288. evs[:2],
  289. map[*watcher][]storagepb.Event{
  290. ws[0]: evs[:1],
  291. ws[1]: evs[1:2],
  292. },
  293. },
  294. }
  295. for i, tt := range tests {
  296. gwe := newWatcherToEventMap(tt.sync, tt.evs)
  297. if len(gwe) != len(tt.wwe) {
  298. t.Errorf("#%d: len(gwe) got = %d, want = %d", i, len(gwe), len(tt.wwe))
  299. }
  300. // compare gwe and tt.wwe
  301. for w, mevs := range gwe {
  302. if len(mevs) != len(tt.wwe[w]) {
  303. t.Errorf("#%d: len(mevs) got = %d, want = %d", i, len(mevs), len(tt.wwe[w]))
  304. }
  305. if !reflect.DeepEqual(mevs, tt.wwe[w]) {
  306. t.Errorf("#%d: reflect.DeepEqual events got = %v, want = true", i, false)
  307. }
  308. }
  309. }
  310. }