lease.go 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package grpcproxy
  15. import (
  16. "io"
  17. "sync"
  18. "sync/atomic"
  19. "time"
  20. "google.golang.org/grpc"
  21. "google.golang.org/grpc/metadata"
  22. "golang.org/x/net/context"
  23. "github.com/coreos/etcd/clientv3"
  24. "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
  25. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  26. )
  27. type leaseProxy struct {
  28. // leaseClient handles req from LeaseGrant() that requires a lease ID.
  29. leaseClient pb.LeaseClient
  30. lessor clientv3.Lease
  31. ctx context.Context
  32. leader *leader
  33. // mu protects adding outstanding leaseProxyStream through wg.
  34. mu sync.RWMutex
  35. // wg waits until all outstanding leaseProxyStream quit.
  36. wg sync.WaitGroup
  37. }
  38. func NewLeaseProxy(c *clientv3.Client) (pb.LeaseServer, <-chan struct{}) {
  39. cctx, cancel := context.WithCancel(c.Ctx())
  40. lp := &leaseProxy{
  41. leaseClient: pb.NewLeaseClient(c.ActiveConnection()),
  42. lessor: c.Lease,
  43. ctx: cctx,
  44. leader: newLeader(c.Ctx(), c.Watcher),
  45. }
  46. ch := make(chan struct{})
  47. go func() {
  48. defer close(ch)
  49. <-lp.leader.stopNotify()
  50. lp.mu.Lock()
  51. select {
  52. case <-lp.ctx.Done():
  53. case <-lp.leader.disconnectNotify():
  54. cancel()
  55. }
  56. <-lp.ctx.Done()
  57. lp.mu.Unlock()
  58. lp.wg.Wait()
  59. }()
  60. return lp, ch
  61. }
  62. func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
  63. rp, err := lp.leaseClient.LeaseGrant(ctx, cr)
  64. if err != nil {
  65. return nil, err
  66. }
  67. lp.leader.gotLeader()
  68. return rp, nil
  69. }
  70. func (lp *leaseProxy) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
  71. r, err := lp.lessor.Revoke(ctx, clientv3.LeaseID(rr.ID))
  72. if err != nil {
  73. return nil, err
  74. }
  75. lp.leader.gotLeader()
  76. return (*pb.LeaseRevokeResponse)(r), nil
  77. }
  78. func (lp *leaseProxy) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
  79. var (
  80. r *clientv3.LeaseTimeToLiveResponse
  81. err error
  82. )
  83. if rr.Keys {
  84. r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID), clientv3.WithAttachedKeys())
  85. } else {
  86. r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID))
  87. }
  88. if err != nil {
  89. return nil, err
  90. }
  91. rp := &pb.LeaseTimeToLiveResponse{
  92. Header: r.ResponseHeader,
  93. ID: int64(r.ID),
  94. TTL: r.TTL,
  95. GrantedTTL: r.GrantedTTL,
  96. Keys: r.Keys,
  97. }
  98. return rp, err
  99. }
  100. func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
  101. lp.mu.Lock()
  102. select {
  103. case <-lp.ctx.Done():
  104. lp.mu.Unlock()
  105. return lp.ctx.Err()
  106. default:
  107. lp.wg.Add(1)
  108. }
  109. lp.mu.Unlock()
  110. ctx, cancel := context.WithCancel(stream.Context())
  111. lps := leaseProxyStream{
  112. stream: stream,
  113. lessor: lp.lessor,
  114. keepAliveLeases: make(map[int64]*atomicCounter),
  115. respc: make(chan *pb.LeaseKeepAliveResponse),
  116. ctx: ctx,
  117. cancel: cancel,
  118. }
  119. errc := make(chan error, 2)
  120. var lostLeaderC <-chan struct{}
  121. if md, ok := metadata.FromOutgoingContext(stream.Context()); ok {
  122. v := md[rpctypes.MetadataRequireLeaderKey]
  123. if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader {
  124. lostLeaderC = lp.leader.lostNotify()
  125. // if leader is known to be lost at creation time, avoid
  126. // letting events through at all
  127. select {
  128. case <-lostLeaderC:
  129. lp.wg.Done()
  130. return rpctypes.ErrNoLeader
  131. default:
  132. }
  133. }
  134. }
  135. stopc := make(chan struct{}, 3)
  136. go func() {
  137. defer func() { stopc <- struct{}{} }()
  138. if err := lps.recvLoop(); err != nil {
  139. errc <- err
  140. }
  141. }()
  142. go func() {
  143. defer func() { stopc <- struct{}{} }()
  144. if err := lps.sendLoop(); err != nil {
  145. errc <- err
  146. }
  147. }()
  148. // tears down LeaseKeepAlive stream if leader goes down or entire leaseProxy is terminated.
  149. go func() {
  150. defer func() { stopc <- struct{}{} }()
  151. select {
  152. case <-lostLeaderC:
  153. case <-ctx.Done():
  154. case <-lp.ctx.Done():
  155. }
  156. }()
  157. var err error
  158. select {
  159. case <-stopc:
  160. stopc <- struct{}{}
  161. case err = <-errc:
  162. }
  163. cancel()
  164. // recv/send may only shutdown after function exits;
  165. // this goroutine notifies lease proxy that the stream is through
  166. go func() {
  167. <-stopc
  168. <-stopc
  169. <-stopc
  170. lps.close()
  171. close(errc)
  172. lp.wg.Done()
  173. }()
  174. select {
  175. case <-lostLeaderC:
  176. return rpctypes.ErrNoLeader
  177. case <-lp.leader.disconnectNotify():
  178. return grpc.ErrClientConnClosing
  179. default:
  180. if err != nil {
  181. return err
  182. }
  183. return ctx.Err()
  184. }
  185. }
  186. type leaseProxyStream struct {
  187. stream pb.Lease_LeaseKeepAliveServer
  188. lessor clientv3.Lease
  189. // wg tracks keepAliveLoop goroutines
  190. wg sync.WaitGroup
  191. // mu protects keepAliveLeases
  192. mu sync.RWMutex
  193. // keepAliveLeases tracks how many outstanding keepalive requests which need responses are on a lease.
  194. keepAliveLeases map[int64]*atomicCounter
  195. // respc receives lease keepalive responses from etcd backend
  196. respc chan *pb.LeaseKeepAliveResponse
  197. ctx context.Context
  198. cancel context.CancelFunc
  199. }
  200. func (lps *leaseProxyStream) recvLoop() error {
  201. for {
  202. rr, err := lps.stream.Recv()
  203. if err == io.EOF {
  204. return nil
  205. }
  206. if err != nil {
  207. return err
  208. }
  209. lps.mu.Lock()
  210. neededResps, ok := lps.keepAliveLeases[rr.ID]
  211. if !ok {
  212. neededResps = &atomicCounter{}
  213. lps.keepAliveLeases[rr.ID] = neededResps
  214. lps.wg.Add(1)
  215. go func() {
  216. defer lps.wg.Done()
  217. if err := lps.keepAliveLoop(rr.ID, neededResps); err != nil {
  218. lps.cancel()
  219. }
  220. }()
  221. }
  222. neededResps.add(1)
  223. lps.mu.Unlock()
  224. }
  225. }
  226. func (lps *leaseProxyStream) keepAliveLoop(leaseID int64, neededResps *atomicCounter) error {
  227. cctx, ccancel := context.WithCancel(lps.ctx)
  228. defer ccancel()
  229. respc, err := lps.lessor.KeepAlive(cctx, clientv3.LeaseID(leaseID))
  230. if err != nil {
  231. return err
  232. }
  233. // ticker expires when loop hasn't received keepalive within TTL
  234. var ticker <-chan time.Time
  235. for {
  236. select {
  237. case <-ticker:
  238. lps.mu.Lock()
  239. // if there are outstanding keepAlive reqs at the moment of ticker firing,
  240. // don't close keepAliveLoop(), let it continuing to process the KeepAlive reqs.
  241. if neededResps.get() > 0 {
  242. lps.mu.Unlock()
  243. ticker = nil
  244. continue
  245. }
  246. delete(lps.keepAliveLeases, leaseID)
  247. lps.mu.Unlock()
  248. return nil
  249. case rp, ok := <-respc:
  250. if !ok {
  251. lps.mu.Lock()
  252. delete(lps.keepAliveLeases, leaseID)
  253. lps.mu.Unlock()
  254. if neededResps.get() == 0 {
  255. return nil
  256. }
  257. ttlResp, err := lps.lessor.TimeToLive(cctx, clientv3.LeaseID(leaseID))
  258. if err != nil {
  259. return err
  260. }
  261. r := &pb.LeaseKeepAliveResponse{
  262. Header: ttlResp.ResponseHeader,
  263. ID: int64(ttlResp.ID),
  264. TTL: ttlResp.TTL,
  265. }
  266. for neededResps.get() > 0 {
  267. select {
  268. case lps.respc <- r:
  269. neededResps.add(-1)
  270. case <-lps.ctx.Done():
  271. return nil
  272. }
  273. }
  274. return nil
  275. }
  276. if neededResps.get() == 0 {
  277. continue
  278. }
  279. ticker = time.After(time.Duration(rp.TTL) * time.Second)
  280. r := &pb.LeaseKeepAliveResponse{
  281. Header: rp.ResponseHeader,
  282. ID: int64(rp.ID),
  283. TTL: rp.TTL,
  284. }
  285. lps.replyToClient(r, neededResps)
  286. }
  287. }
  288. }
  289. func (lps *leaseProxyStream) replyToClient(r *pb.LeaseKeepAliveResponse, neededResps *atomicCounter) {
  290. timer := time.After(500 * time.Millisecond)
  291. for neededResps.get() > 0 {
  292. select {
  293. case lps.respc <- r:
  294. neededResps.add(-1)
  295. case <-timer:
  296. return
  297. case <-lps.ctx.Done():
  298. return
  299. }
  300. }
  301. }
  302. func (lps *leaseProxyStream) sendLoop() error {
  303. for {
  304. select {
  305. case lrp, ok := <-lps.respc:
  306. if !ok {
  307. return nil
  308. }
  309. if err := lps.stream.Send(lrp); err != nil {
  310. return err
  311. }
  312. case <-lps.ctx.Done():
  313. return lps.ctx.Err()
  314. }
  315. }
  316. }
  317. func (lps *leaseProxyStream) close() {
  318. lps.cancel()
  319. lps.wg.Wait()
  320. // only close respc channel if all the keepAliveLoop() goroutines have finished
  321. // this ensures those goroutines don't send resp to a closed resp channel
  322. close(lps.respc)
  323. }
  324. type atomicCounter struct {
  325. counter int64
  326. }
  327. func (ac *atomicCounter) add(delta int64) {
  328. atomic.AddInt64(&ac.counter, delta)
  329. }
  330. func (ac *atomicCounter) get() int64 {
  331. return atomic.LoadInt64(&ac.counter)
  332. }