watch.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package v3rpc
  15. import (
  16. "context"
  17. "io"
  18. "sync"
  19. "time"
  20. "github.com/coreos/etcd/auth"
  21. "github.com/coreos/etcd/etcdserver"
  22. "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
  23. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  24. "github.com/coreos/etcd/mvcc"
  25. "github.com/coreos/etcd/mvcc/mvccpb"
  26. )
  27. type watchServer struct {
  28. clusterID int64
  29. memberID int64
  30. raftTimer etcdserver.RaftTimer
  31. watchable mvcc.WatchableKV
  32. ag AuthGetter
  33. }
  34. func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
  35. return &watchServer{
  36. clusterID: int64(s.Cluster().ID()),
  37. memberID: int64(s.ID()),
  38. raftTimer: s,
  39. watchable: s.Watchable(),
  40. ag: s,
  41. }
  42. }
  43. var (
  44. // External test can read this with GetProgressReportInterval()
  45. // and change this to a small value to finish fast with
  46. // SetProgressReportInterval().
  47. progressReportInterval = 10 * time.Minute
  48. progressReportIntervalMu sync.RWMutex
  49. )
  50. func GetProgressReportInterval() time.Duration {
  51. progressReportIntervalMu.RLock()
  52. defer progressReportIntervalMu.RUnlock()
  53. return progressReportInterval
  54. }
  55. func SetProgressReportInterval(newTimeout time.Duration) {
  56. progressReportIntervalMu.Lock()
  57. defer progressReportIntervalMu.Unlock()
  58. progressReportInterval = newTimeout
  59. }
  60. const (
  61. // We send ctrl response inside the read loop. We do not want
  62. // send to block read, but we still want ctrl response we sent to
  63. // be serialized. Thus we use a buffered chan to solve the problem.
  64. // A small buffer should be OK for most cases, since we expect the
  65. // ctrl requests are infrequent.
  66. ctrlStreamBufLen = 16
  67. )
  68. // serverWatchStream is an etcd server side stream. It receives requests
  69. // from client side gRPC stream. It receives watch events from mvcc.WatchStream,
  70. // and creates responses that forwarded to gRPC stream.
  71. // It also forwards control message like watch created and canceled.
  72. type serverWatchStream struct {
  73. clusterID int64
  74. memberID int64
  75. raftTimer etcdserver.RaftTimer
  76. watchable mvcc.WatchableKV
  77. gRPCStream pb.Watch_WatchServer
  78. watchStream mvcc.WatchStream
  79. ctrlStream chan *pb.WatchResponse
  80. // mu protects progress, prevKV
  81. mu sync.Mutex
  82. // progress tracks the watchID that stream might need to send
  83. // progress to.
  84. // TODO: combine progress and prevKV into a single struct?
  85. progress map[mvcc.WatchID]bool
  86. prevKV map[mvcc.WatchID]bool
  87. // closec indicates the stream is closed.
  88. closec chan struct{}
  89. // wg waits for the send loop to complete
  90. wg sync.WaitGroup
  91. ag AuthGetter
  92. }
  93. func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
  94. sws := serverWatchStream{
  95. clusterID: ws.clusterID,
  96. memberID: ws.memberID,
  97. raftTimer: ws.raftTimer,
  98. watchable: ws.watchable,
  99. gRPCStream: stream,
  100. watchStream: ws.watchable.NewWatchStream(),
  101. // chan for sending control response like watcher created and canceled.
  102. ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
  103. progress: make(map[mvcc.WatchID]bool),
  104. prevKV: make(map[mvcc.WatchID]bool),
  105. closec: make(chan struct{}),
  106. ag: ws.ag,
  107. }
  108. sws.wg.Add(1)
  109. go func() {
  110. sws.sendLoop()
  111. sws.wg.Done()
  112. }()
  113. errc := make(chan error, 1)
  114. // Ideally recvLoop would also use sws.wg to signal its completion
  115. // but when stream.Context().Done() is closed, the stream's recv
  116. // may continue to block since it uses a different context, leading to
  117. // deadlock when calling sws.close().
  118. go func() {
  119. if rerr := sws.recvLoop(); rerr != nil {
  120. plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
  121. errc <- rerr
  122. }
  123. }()
  124. select {
  125. case err = <-errc:
  126. close(sws.ctrlStream)
  127. case <-stream.Context().Done():
  128. err = stream.Context().Err()
  129. // the only server-side cancellation is noleader for now.
  130. if err == context.Canceled {
  131. err = rpctypes.ErrGRPCNoLeader
  132. }
  133. }
  134. sws.close()
  135. return err
  136. }
  137. func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
  138. authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
  139. if err != nil {
  140. return false
  141. }
  142. if authInfo == nil {
  143. // if auth is enabled, IsRangePermitted() can cause an error
  144. authInfo = &auth.AuthInfo{}
  145. }
  146. return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
  147. }
  148. func (sws *serverWatchStream) recvLoop() error {
  149. for {
  150. req, err := sws.gRPCStream.Recv()
  151. if err == io.EOF {
  152. return nil
  153. }
  154. if err != nil {
  155. return err
  156. }
  157. switch uv := req.RequestUnion.(type) {
  158. case *pb.WatchRequest_CreateRequest:
  159. if uv.CreateRequest == nil {
  160. break
  161. }
  162. creq := uv.CreateRequest
  163. if len(creq.Key) == 0 {
  164. // \x00 is the smallest key
  165. creq.Key = []byte{0}
  166. }
  167. if len(creq.RangeEnd) == 0 {
  168. // force nil since watchstream.Watch distinguishes
  169. // between nil and []byte{} for single key / >=
  170. creq.RangeEnd = nil
  171. }
  172. if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
  173. // support >= key queries
  174. creq.RangeEnd = []byte{}
  175. }
  176. if !sws.isWatchPermitted(creq) {
  177. wr := &pb.WatchResponse{
  178. Header: sws.newResponseHeader(sws.watchStream.Rev()),
  179. WatchId: creq.WatchId,
  180. Canceled: true,
  181. Created: true,
  182. CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
  183. }
  184. select {
  185. case sws.ctrlStream <- wr:
  186. case <-sws.closec:
  187. }
  188. return nil
  189. }
  190. filters := FiltersFromRequest(creq)
  191. wsrev := sws.watchStream.Rev()
  192. rev := creq.StartRevision
  193. if rev == 0 {
  194. rev = wsrev + 1
  195. }
  196. id, err := sws.watchStream.Watch(mvcc.WatchID(creq.WatchId), creq.Key, creq.RangeEnd, rev, filters...)
  197. if err == nil {
  198. sws.mu.Lock()
  199. if creq.ProgressNotify {
  200. sws.progress[id] = true
  201. }
  202. if creq.PrevKv {
  203. sws.prevKV[id] = true
  204. }
  205. sws.mu.Unlock()
  206. }
  207. wr := &pb.WatchResponse{
  208. Header: sws.newResponseHeader(wsrev),
  209. WatchId: int64(id),
  210. Created: true,
  211. Canceled: err != nil,
  212. }
  213. if err != nil {
  214. wr.CancelReason = err.Error()
  215. }
  216. select {
  217. case sws.ctrlStream <- wr:
  218. case <-sws.closec:
  219. return nil
  220. }
  221. case *pb.WatchRequest_CancelRequest:
  222. if uv.CancelRequest != nil {
  223. id := uv.CancelRequest.WatchId
  224. err := sws.watchStream.Cancel(mvcc.WatchID(id))
  225. if err == nil {
  226. sws.ctrlStream <- &pb.WatchResponse{
  227. Header: sws.newResponseHeader(sws.watchStream.Rev()),
  228. WatchId: id,
  229. Canceled: true,
  230. }
  231. sws.mu.Lock()
  232. delete(sws.progress, mvcc.WatchID(id))
  233. delete(sws.prevKV, mvcc.WatchID(id))
  234. sws.mu.Unlock()
  235. }
  236. }
  237. default:
  238. // we probably should not shutdown the entire stream when
  239. // receive an valid command.
  240. // so just do nothing instead.
  241. continue
  242. }
  243. }
  244. }
  245. func (sws *serverWatchStream) sendLoop() {
  246. // watch ids that are currently active
  247. ids := make(map[mvcc.WatchID]struct{})
  248. // watch responses pending on a watch id creation message
  249. pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
  250. interval := GetProgressReportInterval()
  251. progressTicker := time.NewTicker(interval)
  252. defer func() {
  253. progressTicker.Stop()
  254. // drain the chan to clean up pending events
  255. for ws := range sws.watchStream.Chan() {
  256. mvcc.ReportEventReceived(len(ws.Events))
  257. }
  258. for _, wrs := range pending {
  259. for _, ws := range wrs {
  260. mvcc.ReportEventReceived(len(ws.Events))
  261. }
  262. }
  263. }()
  264. for {
  265. select {
  266. case wresp, ok := <-sws.watchStream.Chan():
  267. if !ok {
  268. return
  269. }
  270. // TODO: evs is []mvccpb.Event type
  271. // either return []*mvccpb.Event from the mvcc package
  272. // or define protocol buffer with []mvccpb.Event.
  273. evs := wresp.Events
  274. events := make([]*mvccpb.Event, len(evs))
  275. sws.mu.Lock()
  276. needPrevKV := sws.prevKV[wresp.WatchID]
  277. sws.mu.Unlock()
  278. for i := range evs {
  279. events[i] = &evs[i]
  280. if needPrevKV {
  281. opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
  282. r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt)
  283. if err == nil && len(r.KVs) != 0 {
  284. events[i].PrevKv = &(r.KVs[0])
  285. }
  286. }
  287. }
  288. canceled := wresp.CompactRevision != 0
  289. wr := &pb.WatchResponse{
  290. Header: sws.newResponseHeader(wresp.Revision),
  291. WatchId: int64(wresp.WatchID),
  292. Events: events,
  293. CompactRevision: wresp.CompactRevision,
  294. Canceled: canceled,
  295. }
  296. if _, hasId := ids[wresp.WatchID]; !hasId {
  297. // buffer if id not yet announced
  298. wrs := append(pending[wresp.WatchID], wr)
  299. pending[wresp.WatchID] = wrs
  300. continue
  301. }
  302. mvcc.ReportEventReceived(len(evs))
  303. if err := sws.gRPCStream.Send(wr); err != nil {
  304. plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error())
  305. return
  306. }
  307. sws.mu.Lock()
  308. if len(evs) > 0 && sws.progress[wresp.WatchID] {
  309. // elide next progress update if sent a key update
  310. sws.progress[wresp.WatchID] = false
  311. }
  312. sws.mu.Unlock()
  313. case c, ok := <-sws.ctrlStream:
  314. if !ok {
  315. return
  316. }
  317. if err := sws.gRPCStream.Send(c); err != nil {
  318. plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error())
  319. return
  320. }
  321. // track id creation
  322. wid := mvcc.WatchID(c.WatchId)
  323. if c.Canceled {
  324. delete(ids, wid)
  325. continue
  326. }
  327. if c.Created {
  328. // flush buffered events
  329. ids[wid] = struct{}{}
  330. for _, v := range pending[wid] {
  331. mvcc.ReportEventReceived(len(v.Events))
  332. if err := sws.gRPCStream.Send(v); err != nil {
  333. plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error())
  334. return
  335. }
  336. }
  337. delete(pending, wid)
  338. }
  339. case <-progressTicker.C:
  340. sws.mu.Lock()
  341. for id, ok := range sws.progress {
  342. if ok {
  343. sws.watchStream.RequestProgress(id)
  344. }
  345. sws.progress[id] = true
  346. }
  347. sws.mu.Unlock()
  348. case <-sws.closec:
  349. return
  350. }
  351. }
  352. }
  353. func (sws *serverWatchStream) close() {
  354. sws.watchStream.Close()
  355. close(sws.closec)
  356. sws.wg.Wait()
  357. }
  358. func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
  359. return &pb.ResponseHeader{
  360. ClusterId: uint64(sws.clusterID),
  361. MemberId: uint64(sws.memberID),
  362. Revision: rev,
  363. RaftTerm: sws.raftTimer.Term(),
  364. }
  365. }
  366. func filterNoDelete(e mvccpb.Event) bool {
  367. return e.Type == mvccpb.DELETE
  368. }
  369. func filterNoPut(e mvccpb.Event) bool {
  370. return e.Type == mvccpb.PUT
  371. }
  372. func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
  373. filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
  374. for _, ft := range creq.Filters {
  375. switch ft {
  376. case pb.WatchCreateRequest_NOPUT:
  377. filters = append(filters, filterNoPut)
  378. case pb.WatchCreateRequest_NODELETE:
  379. filters = append(filters, filterNoDelete)
  380. default:
  381. }
  382. }
  383. return filters
  384. }