watchable_store.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "sync"
  17. "time"
  18. "github.com/coreos/etcd/internal/lease"
  19. "github.com/coreos/etcd/internal/mvcc/backend"
  20. "github.com/coreos/etcd/internal/mvcc/mvccpb"
  21. )
  22. // non-const so modifiable by tests
  23. var (
  24. // chanBufLen is the length of the buffered chan
  25. // for sending out watched events.
  26. // TODO: find a good buf value. 1024 is just a random one that
  27. // seems to be reasonable.
  28. chanBufLen = 1024
  29. // maxWatchersPerSync is the number of watchers to sync in a single batch
  30. maxWatchersPerSync = 512
  31. )
  32. type watchable interface {
  33. watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc)
  34. progress(w *watcher)
  35. rev() int64
  36. }
  37. type watchableStore struct {
  38. *store
  39. // mu protects watcher groups and batches. It should never be locked
  40. // before locking store.mu to avoid deadlock.
  41. mu sync.RWMutex
  42. // victims are watcher batches that were blocked on the watch channel
  43. victims []watcherBatch
  44. victimc chan struct{}
  45. // contains all unsynced watchers that needs to sync with events that have happened
  46. unsynced watcherGroup
  47. // contains all synced watchers that are in sync with the progress of the store.
  48. // The key of the map is the key that the watcher watches on.
  49. synced watcherGroup
  50. stopc chan struct{}
  51. wg sync.WaitGroup
  52. }
  53. // cancelFunc updates unsynced and synced maps when running
  54. // cancel operations.
  55. type cancelFunc func()
  56. func New(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV {
  57. return newWatchableStore(b, le, ig)
  58. }
  59. func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore {
  60. s := &watchableStore{
  61. store: NewStore(b, le, ig),
  62. victimc: make(chan struct{}, 1),
  63. unsynced: newWatcherGroup(),
  64. synced: newWatcherGroup(),
  65. stopc: make(chan struct{}),
  66. }
  67. s.store.ReadView = &readView{s}
  68. s.store.WriteView = &writeView{s}
  69. if s.le != nil {
  70. // use this store as the deleter so revokes trigger watch events
  71. s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() })
  72. }
  73. s.wg.Add(2)
  74. go s.syncWatchersLoop()
  75. go s.syncVictimsLoop()
  76. return s
  77. }
  78. func (s *watchableStore) Close() error {
  79. close(s.stopc)
  80. s.wg.Wait()
  81. return s.store.Close()
  82. }
  83. func (s *watchableStore) NewWatchStream() WatchStream {
  84. watchStreamGauge.Inc()
  85. return &watchStream{
  86. watchable: s,
  87. ch: make(chan WatchResponse, chanBufLen),
  88. cancels: make(map[WatchID]cancelFunc),
  89. watchers: make(map[WatchID]*watcher),
  90. }
  91. }
  92. func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) {
  93. wa := &watcher{
  94. key: key,
  95. end: end,
  96. minRev: startRev,
  97. id: id,
  98. ch: ch,
  99. fcs: fcs,
  100. }
  101. s.mu.Lock()
  102. s.revMu.RLock()
  103. synced := startRev > s.store.currentRev || startRev == 0
  104. if synced {
  105. wa.minRev = s.store.currentRev + 1
  106. if startRev > wa.minRev {
  107. wa.minRev = startRev
  108. }
  109. }
  110. if synced {
  111. s.synced.add(wa)
  112. } else {
  113. slowWatcherGauge.Inc()
  114. s.unsynced.add(wa)
  115. }
  116. s.revMu.RUnlock()
  117. s.mu.Unlock()
  118. watcherGauge.Inc()
  119. return wa, func() { s.cancelWatcher(wa) }
  120. }
  121. // cancelWatcher removes references of the watcher from the watchableStore
  122. func (s *watchableStore) cancelWatcher(wa *watcher) {
  123. for {
  124. s.mu.Lock()
  125. if s.unsynced.delete(wa) {
  126. slowWatcherGauge.Dec()
  127. break
  128. } else if s.synced.delete(wa) {
  129. break
  130. } else if wa.compacted {
  131. break
  132. } else if wa.ch == nil {
  133. // already canceled (e.g., cancel/close race)
  134. break
  135. }
  136. if !wa.victim {
  137. panic("watcher not victim but not in watch groups")
  138. }
  139. var victimBatch watcherBatch
  140. for _, wb := range s.victims {
  141. if wb[wa] != nil {
  142. victimBatch = wb
  143. break
  144. }
  145. }
  146. if victimBatch != nil {
  147. slowWatcherGauge.Dec()
  148. delete(victimBatch, wa)
  149. break
  150. }
  151. // victim being processed so not accessible; retry
  152. s.mu.Unlock()
  153. time.Sleep(time.Millisecond)
  154. }
  155. watcherGauge.Dec()
  156. wa.ch = nil
  157. s.mu.Unlock()
  158. }
  159. func (s *watchableStore) Restore(b backend.Backend) error {
  160. s.mu.Lock()
  161. defer s.mu.Unlock()
  162. err := s.store.Restore(b)
  163. if err != nil {
  164. return err
  165. }
  166. for wa := range s.synced.watchers {
  167. s.unsynced.add(wa)
  168. }
  169. s.synced = newWatcherGroup()
  170. return nil
  171. }
  172. // syncWatchersLoop syncs the watcher in the unsynced map every 100ms.
  173. func (s *watchableStore) syncWatchersLoop() {
  174. defer s.wg.Done()
  175. for {
  176. s.mu.RLock()
  177. st := time.Now()
  178. lastUnsyncedWatchers := s.unsynced.size()
  179. s.mu.RUnlock()
  180. unsyncedWatchers := 0
  181. if lastUnsyncedWatchers > 0 {
  182. unsyncedWatchers = s.syncWatchers()
  183. }
  184. syncDuration := time.Since(st)
  185. waitDuration := 100 * time.Millisecond
  186. // more work pending?
  187. if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers {
  188. // be fair to other store operations by yielding time taken
  189. waitDuration = syncDuration
  190. }
  191. select {
  192. case <-time.After(waitDuration):
  193. case <-s.stopc:
  194. return
  195. }
  196. }
  197. }
  198. // syncVictimsLoop tries to write precomputed watcher responses to
  199. // watchers that had a blocked watcher channel
  200. func (s *watchableStore) syncVictimsLoop() {
  201. defer s.wg.Done()
  202. for {
  203. for s.moveVictims() != 0 {
  204. // try to update all victim watchers
  205. }
  206. s.mu.RLock()
  207. isEmpty := len(s.victims) == 0
  208. s.mu.RUnlock()
  209. var tickc <-chan time.Time
  210. if !isEmpty {
  211. tickc = time.After(10 * time.Millisecond)
  212. }
  213. select {
  214. case <-tickc:
  215. case <-s.victimc:
  216. case <-s.stopc:
  217. return
  218. }
  219. }
  220. }
  221. // moveVictims tries to update watches with already pending event data
  222. func (s *watchableStore) moveVictims() (moved int) {
  223. s.mu.Lock()
  224. victims := s.victims
  225. s.victims = nil
  226. s.mu.Unlock()
  227. var newVictim watcherBatch
  228. for _, wb := range victims {
  229. // try to send responses again
  230. for w, eb := range wb {
  231. // watcher has observed the store up to, but not including, w.minRev
  232. rev := w.minRev - 1
  233. if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
  234. pendingEventsGauge.Add(float64(len(eb.evs)))
  235. } else {
  236. if newVictim == nil {
  237. newVictim = make(watcherBatch)
  238. }
  239. newVictim[w] = eb
  240. continue
  241. }
  242. moved++
  243. }
  244. // assign completed victim watchers to unsync/sync
  245. s.mu.Lock()
  246. s.store.revMu.RLock()
  247. curRev := s.store.currentRev
  248. for w, eb := range wb {
  249. if newVictim != nil && newVictim[w] != nil {
  250. // couldn't send watch response; stays victim
  251. continue
  252. }
  253. w.victim = false
  254. if eb.moreRev != 0 {
  255. w.minRev = eb.moreRev
  256. }
  257. if w.minRev <= curRev {
  258. s.unsynced.add(w)
  259. } else {
  260. slowWatcherGauge.Dec()
  261. s.synced.add(w)
  262. }
  263. }
  264. s.store.revMu.RUnlock()
  265. s.mu.Unlock()
  266. }
  267. if len(newVictim) > 0 {
  268. s.mu.Lock()
  269. s.victims = append(s.victims, newVictim)
  270. s.mu.Unlock()
  271. }
  272. return moved
  273. }
  274. // syncWatchers syncs unsynced watchers by:
  275. // 1. choose a set of watchers from the unsynced watcher group
  276. // 2. iterate over the set to get the minimum revision and remove compacted watchers
  277. // 3. use minimum revision to get all key-value pairs and send those events to watchers
  278. // 4. remove synced watchers in set from unsynced group and move to synced group
  279. func (s *watchableStore) syncWatchers() int {
  280. s.mu.Lock()
  281. defer s.mu.Unlock()
  282. if s.unsynced.size() == 0 {
  283. return 0
  284. }
  285. s.store.revMu.RLock()
  286. defer s.store.revMu.RUnlock()
  287. // in order to find key-value pairs from unsynced watchers, we need to
  288. // find min revision index, and these revisions can be used to
  289. // query the backend store of key-value pairs
  290. curRev := s.store.currentRev
  291. compactionRev := s.store.compactMainRev
  292. wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev)
  293. minBytes, maxBytes := newRevBytes(), newRevBytes()
  294. revToBytes(revision{main: minRev}, minBytes)
  295. revToBytes(revision{main: curRev + 1}, maxBytes)
  296. // UnsafeRange returns keys and values. And in boltdb, keys are revisions.
  297. // values are actual key-value pairs in backend.
  298. tx := s.store.b.ReadTx()
  299. tx.Lock()
  300. revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0)
  301. evs := kvsToEvents(wg, revs, vs)
  302. tx.Unlock()
  303. var victims watcherBatch
  304. wb := newWatcherBatch(wg, evs)
  305. for w := range wg.watchers {
  306. w.minRev = curRev + 1
  307. eb, ok := wb[w]
  308. if !ok {
  309. // bring un-notified watcher to synced
  310. s.synced.add(w)
  311. s.unsynced.delete(w)
  312. continue
  313. }
  314. if eb.moreRev != 0 {
  315. w.minRev = eb.moreRev
  316. }
  317. if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) {
  318. pendingEventsGauge.Add(float64(len(eb.evs)))
  319. } else {
  320. if victims == nil {
  321. victims = make(watcherBatch)
  322. }
  323. w.victim = true
  324. }
  325. if w.victim {
  326. victims[w] = eb
  327. } else {
  328. if eb.moreRev != 0 {
  329. // stay unsynced; more to read
  330. continue
  331. }
  332. s.synced.add(w)
  333. }
  334. s.unsynced.delete(w)
  335. }
  336. s.addVictim(victims)
  337. vsz := 0
  338. for _, v := range s.victims {
  339. vsz += len(v)
  340. }
  341. slowWatcherGauge.Set(float64(s.unsynced.size() + vsz))
  342. return s.unsynced.size()
  343. }
  344. // kvsToEvents gets all events for the watchers from all key-value pairs
  345. func kvsToEvents(wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) {
  346. for i, v := range vals {
  347. var kv mvccpb.KeyValue
  348. if err := kv.Unmarshal(v); err != nil {
  349. plog.Panicf("cannot unmarshal event: %v", err)
  350. }
  351. if !wg.contains(string(kv.Key)) {
  352. continue
  353. }
  354. ty := mvccpb.PUT
  355. if isTombstone(revs[i]) {
  356. ty = mvccpb.DELETE
  357. // patch in mod revision so watchers won't skip
  358. kv.ModRevision = bytesToRev(revs[i]).main
  359. }
  360. evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty})
  361. }
  362. return evs
  363. }
  364. // notify notifies the fact that given event at the given rev just happened to
  365. // watchers that watch on the key of the event.
  366. func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
  367. var victim watcherBatch
  368. for w, eb := range newWatcherBatch(&s.synced, evs) {
  369. if eb.revs != 1 {
  370. plog.Panicf("unexpected multiple revisions in notification")
  371. }
  372. if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
  373. pendingEventsGauge.Add(float64(len(eb.evs)))
  374. } else {
  375. // move slow watcher to victims
  376. w.minRev = rev + 1
  377. if victim == nil {
  378. victim = make(watcherBatch)
  379. }
  380. w.victim = true
  381. victim[w] = eb
  382. s.synced.delete(w)
  383. slowWatcherGauge.Inc()
  384. }
  385. }
  386. s.addVictim(victim)
  387. }
  388. func (s *watchableStore) addVictim(victim watcherBatch) {
  389. if victim == nil {
  390. return
  391. }
  392. s.victims = append(s.victims, victim)
  393. select {
  394. case s.victimc <- struct{}{}:
  395. default:
  396. }
  397. }
  398. func (s *watchableStore) rev() int64 { return s.store.Rev() }
  399. func (s *watchableStore) progress(w *watcher) {
  400. s.mu.RLock()
  401. defer s.mu.RUnlock()
  402. if _, ok := s.synced.watchers[w]; ok {
  403. w.send(WatchResponse{WatchID: w.id, Revision: s.rev()})
  404. // If the ch is full, this watcher is receiving events.
  405. // We do not need to send progress at all.
  406. }
  407. }
  408. type watcher struct {
  409. // the watcher key
  410. key []byte
  411. // end indicates the end of the range to watch.
  412. // If end is set, the watcher is on a range.
  413. end []byte
  414. // victim is set when ch is blocked and undergoing victim processing
  415. victim bool
  416. // compacted is set when the watcher is removed because of compaction
  417. compacted bool
  418. // minRev is the minimum revision update the watcher will accept
  419. minRev int64
  420. id WatchID
  421. fcs []FilterFunc
  422. // a chan to send out the watch response.
  423. // The chan might be shared with other watchers.
  424. ch chan<- WatchResponse
  425. }
  426. func (w *watcher) send(wr WatchResponse) bool {
  427. progressEvent := len(wr.Events) == 0
  428. if len(w.fcs) != 0 {
  429. ne := make([]mvccpb.Event, 0, len(wr.Events))
  430. for i := range wr.Events {
  431. filtered := false
  432. for _, filter := range w.fcs {
  433. if filter(wr.Events[i]) {
  434. filtered = true
  435. break
  436. }
  437. }
  438. if !filtered {
  439. ne = append(ne, wr.Events[i])
  440. }
  441. }
  442. wr.Events = ne
  443. }
  444. // if all events are filtered out, we should send nothing.
  445. if !progressEvent && len(wr.Events) == 0 {
  446. return true
  447. }
  448. select {
  449. case w.ch <- wr:
  450. return true
  451. default:
  452. return false
  453. }
  454. }