lease.go 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package grpcproxy
  15. import (
  16. "context"
  17. "io"
  18. "sync"
  19. "sync/atomic"
  20. "time"
  21. "go.etcd.io/etcd/clientv3"
  22. "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
  23. pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
  24. "google.golang.org/grpc"
  25. "google.golang.org/grpc/codes"
  26. "google.golang.org/grpc/metadata"
  27. "google.golang.org/grpc/status"
  28. )
  29. type leaseProxy struct {
  30. // leaseClient handles req from LeaseGrant() that requires a lease ID.
  31. leaseClient pb.LeaseClient
  32. lessor clientv3.Lease
  33. ctx context.Context
  34. leader *leader
  35. // mu protects adding outstanding leaseProxyStream through wg.
  36. mu sync.RWMutex
  37. // wg waits until all outstanding leaseProxyStream quit.
  38. wg sync.WaitGroup
  39. }
  40. func NewLeaseProxy(c *clientv3.Client) (pb.LeaseServer, <-chan struct{}) {
  41. cctx, cancel := context.WithCancel(c.Ctx())
  42. lp := &leaseProxy{
  43. leaseClient: pb.NewLeaseClient(c.ActiveConnection()),
  44. lessor: c.Lease,
  45. ctx: cctx,
  46. leader: newLeader(c.Ctx(), c.Watcher),
  47. }
  48. ch := make(chan struct{})
  49. go func() {
  50. defer close(ch)
  51. <-lp.leader.stopNotify()
  52. lp.mu.Lock()
  53. select {
  54. case <-lp.ctx.Done():
  55. case <-lp.leader.disconnectNotify():
  56. cancel()
  57. }
  58. <-lp.ctx.Done()
  59. lp.mu.Unlock()
  60. lp.wg.Wait()
  61. }()
  62. return lp, ch
  63. }
  64. func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
  65. rp, err := lp.leaseClient.LeaseGrant(ctx, cr, grpc.FailFast(false))
  66. if err != nil {
  67. return nil, err
  68. }
  69. lp.leader.gotLeader()
  70. return rp, nil
  71. }
  72. func (lp *leaseProxy) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
  73. r, err := lp.lessor.Revoke(ctx, clientv3.LeaseID(rr.ID))
  74. if err != nil {
  75. return nil, err
  76. }
  77. lp.leader.gotLeader()
  78. return (*pb.LeaseRevokeResponse)(r), nil
  79. }
  80. func (lp *leaseProxy) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
  81. var (
  82. r *clientv3.LeaseTimeToLiveResponse
  83. err error
  84. )
  85. if rr.Keys {
  86. r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID), clientv3.WithAttachedKeys())
  87. } else {
  88. r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID))
  89. }
  90. if err != nil {
  91. return nil, err
  92. }
  93. rp := &pb.LeaseTimeToLiveResponse{
  94. Header: r.ResponseHeader,
  95. ID: int64(r.ID),
  96. TTL: r.TTL,
  97. GrantedTTL: r.GrantedTTL,
  98. Keys: r.Keys,
  99. }
  100. return rp, err
  101. }
  102. func (lp *leaseProxy) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
  103. r, err := lp.lessor.Leases(ctx)
  104. if err != nil {
  105. return nil, err
  106. }
  107. leases := make([]*pb.LeaseStatus, len(r.Leases))
  108. for i := range r.Leases {
  109. leases[i] = &pb.LeaseStatus{ID: int64(r.Leases[i].ID)}
  110. }
  111. rp := &pb.LeaseLeasesResponse{
  112. Header: r.ResponseHeader,
  113. Leases: leases,
  114. }
  115. return rp, err
  116. }
  117. func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
  118. lp.mu.Lock()
  119. select {
  120. case <-lp.ctx.Done():
  121. lp.mu.Unlock()
  122. return lp.ctx.Err()
  123. default:
  124. lp.wg.Add(1)
  125. }
  126. lp.mu.Unlock()
  127. ctx, cancel := context.WithCancel(stream.Context())
  128. lps := leaseProxyStream{
  129. stream: stream,
  130. lessor: lp.lessor,
  131. keepAliveLeases: make(map[int64]*atomicCounter),
  132. respc: make(chan *pb.LeaseKeepAliveResponse),
  133. ctx: ctx,
  134. cancel: cancel,
  135. }
  136. errc := make(chan error, 2)
  137. var lostLeaderC <-chan struct{}
  138. if md, ok := metadata.FromOutgoingContext(stream.Context()); ok {
  139. v := md[rpctypes.MetadataRequireLeaderKey]
  140. if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader {
  141. lostLeaderC = lp.leader.lostNotify()
  142. // if leader is known to be lost at creation time, avoid
  143. // letting events through at all
  144. select {
  145. case <-lostLeaderC:
  146. lp.wg.Done()
  147. return rpctypes.ErrNoLeader
  148. default:
  149. }
  150. }
  151. }
  152. stopc := make(chan struct{}, 3)
  153. go func() {
  154. defer func() { stopc <- struct{}{} }()
  155. if err := lps.recvLoop(); err != nil {
  156. errc <- err
  157. }
  158. }()
  159. go func() {
  160. defer func() { stopc <- struct{}{} }()
  161. if err := lps.sendLoop(); err != nil {
  162. errc <- err
  163. }
  164. }()
  165. // tears down LeaseKeepAlive stream if leader goes down or entire leaseProxy is terminated.
  166. go func() {
  167. defer func() { stopc <- struct{}{} }()
  168. select {
  169. case <-lostLeaderC:
  170. case <-ctx.Done():
  171. case <-lp.ctx.Done():
  172. }
  173. }()
  174. var err error
  175. select {
  176. case <-stopc:
  177. stopc <- struct{}{}
  178. case err = <-errc:
  179. }
  180. cancel()
  181. // recv/send may only shutdown after function exits;
  182. // this goroutine notifies lease proxy that the stream is through
  183. go func() {
  184. <-stopc
  185. <-stopc
  186. <-stopc
  187. lps.close()
  188. close(errc)
  189. lp.wg.Done()
  190. }()
  191. select {
  192. case <-lostLeaderC:
  193. return rpctypes.ErrNoLeader
  194. case <-lp.leader.disconnectNotify():
  195. return status.Error(codes.Canceled, "the client connection is closing")
  196. default:
  197. if err != nil {
  198. return err
  199. }
  200. return ctx.Err()
  201. }
  202. }
  203. type leaseProxyStream struct {
  204. stream pb.Lease_LeaseKeepAliveServer
  205. lessor clientv3.Lease
  206. // wg tracks keepAliveLoop goroutines
  207. wg sync.WaitGroup
  208. // mu protects keepAliveLeases
  209. mu sync.RWMutex
  210. // keepAliveLeases tracks how many outstanding keepalive requests which need responses are on a lease.
  211. keepAliveLeases map[int64]*atomicCounter
  212. // respc receives lease keepalive responses from etcd backend
  213. respc chan *pb.LeaseKeepAliveResponse
  214. ctx context.Context
  215. cancel context.CancelFunc
  216. }
  217. func (lps *leaseProxyStream) recvLoop() error {
  218. for {
  219. rr, err := lps.stream.Recv()
  220. if err == io.EOF {
  221. return nil
  222. }
  223. if err != nil {
  224. return err
  225. }
  226. lps.mu.Lock()
  227. neededResps, ok := lps.keepAliveLeases[rr.ID]
  228. if !ok {
  229. neededResps = &atomicCounter{}
  230. lps.keepAliveLeases[rr.ID] = neededResps
  231. lps.wg.Add(1)
  232. go func() {
  233. defer lps.wg.Done()
  234. if err := lps.keepAliveLoop(rr.ID, neededResps); err != nil {
  235. lps.cancel()
  236. }
  237. }()
  238. }
  239. neededResps.add(1)
  240. lps.mu.Unlock()
  241. }
  242. }
  243. func (lps *leaseProxyStream) keepAliveLoop(leaseID int64, neededResps *atomicCounter) error {
  244. cctx, ccancel := context.WithCancel(lps.ctx)
  245. defer ccancel()
  246. respc, err := lps.lessor.KeepAlive(cctx, clientv3.LeaseID(leaseID))
  247. if err != nil {
  248. return err
  249. }
  250. // ticker expires when loop hasn't received keepalive within TTL
  251. var ticker <-chan time.Time
  252. for {
  253. select {
  254. case <-ticker:
  255. lps.mu.Lock()
  256. // if there are outstanding keepAlive reqs at the moment of ticker firing,
  257. // don't close keepAliveLoop(), let it continuing to process the KeepAlive reqs.
  258. if neededResps.get() > 0 {
  259. lps.mu.Unlock()
  260. ticker = nil
  261. continue
  262. }
  263. delete(lps.keepAliveLeases, leaseID)
  264. lps.mu.Unlock()
  265. return nil
  266. case rp, ok := <-respc:
  267. if !ok {
  268. lps.mu.Lock()
  269. delete(lps.keepAliveLeases, leaseID)
  270. lps.mu.Unlock()
  271. if neededResps.get() == 0 {
  272. return nil
  273. }
  274. ttlResp, err := lps.lessor.TimeToLive(cctx, clientv3.LeaseID(leaseID))
  275. if err != nil {
  276. return err
  277. }
  278. r := &pb.LeaseKeepAliveResponse{
  279. Header: ttlResp.ResponseHeader,
  280. ID: int64(ttlResp.ID),
  281. TTL: ttlResp.TTL,
  282. }
  283. for neededResps.get() > 0 {
  284. select {
  285. case lps.respc <- r:
  286. neededResps.add(-1)
  287. case <-lps.ctx.Done():
  288. return nil
  289. }
  290. }
  291. return nil
  292. }
  293. if neededResps.get() == 0 {
  294. continue
  295. }
  296. ticker = time.After(time.Duration(rp.TTL) * time.Second)
  297. r := &pb.LeaseKeepAliveResponse{
  298. Header: rp.ResponseHeader,
  299. ID: int64(rp.ID),
  300. TTL: rp.TTL,
  301. }
  302. lps.replyToClient(r, neededResps)
  303. }
  304. }
  305. }
  306. func (lps *leaseProxyStream) replyToClient(r *pb.LeaseKeepAliveResponse, neededResps *atomicCounter) {
  307. timer := time.After(500 * time.Millisecond)
  308. for neededResps.get() > 0 {
  309. select {
  310. case lps.respc <- r:
  311. neededResps.add(-1)
  312. case <-timer:
  313. return
  314. case <-lps.ctx.Done():
  315. return
  316. }
  317. }
  318. }
  319. func (lps *leaseProxyStream) sendLoop() error {
  320. for {
  321. select {
  322. case lrp, ok := <-lps.respc:
  323. if !ok {
  324. return nil
  325. }
  326. if err := lps.stream.Send(lrp); err != nil {
  327. return err
  328. }
  329. case <-lps.ctx.Done():
  330. return lps.ctx.Err()
  331. }
  332. }
  333. }
  334. func (lps *leaseProxyStream) close() {
  335. lps.cancel()
  336. lps.wg.Wait()
  337. // only close respc channel if all the keepAliveLoop() goroutines have finished
  338. // this ensures those goroutines don't send resp to a closed resp channel
  339. close(lps.respc)
  340. }
  341. type atomicCounter struct {
  342. counter int64
  343. }
  344. func (ac *atomicCounter) add(delta int64) {
  345. atomic.AddInt64(&ac.counter, delta)
  346. }
  347. func (ac *atomicCounter) get() int64 {
  348. return atomic.LoadInt64(&ac.counter)
  349. }