lease.go 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package grpcproxy
  15. import (
  16. "context"
  17. "io"
  18. "sync"
  19. "sync/atomic"
  20. "time"
  21. "github.com/coreos/etcd/clientv3"
  22. "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
  23. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  24. "google.golang.org/grpc"
  25. "google.golang.org/grpc/metadata"
  26. )
  27. type leaseProxy struct {
  28. // leaseClient handles req from LeaseGrant() that requires a lease ID.
  29. leaseClient pb.LeaseClient
  30. lessor clientv3.Lease
  31. ctx context.Context
  32. leader *leader
  33. // mu protects adding outstanding leaseProxyStream through wg.
  34. mu sync.RWMutex
  35. // wg waits until all outstanding leaseProxyStream quit.
  36. wg sync.WaitGroup
  37. }
  38. func NewLeaseProxy(c *clientv3.Client) (pb.LeaseServer, <-chan struct{}) {
  39. cctx, cancel := context.WithCancel(c.Ctx())
  40. lp := &leaseProxy{
  41. leaseClient: pb.NewLeaseClient(c.ActiveConnection()),
  42. lessor: c.Lease,
  43. ctx: cctx,
  44. leader: newLeader(c.Ctx(), c.Watcher),
  45. }
  46. ch := make(chan struct{})
  47. go func() {
  48. defer close(ch)
  49. <-lp.leader.stopNotify()
  50. lp.mu.Lock()
  51. select {
  52. case <-lp.ctx.Done():
  53. case <-lp.leader.disconnectNotify():
  54. cancel()
  55. }
  56. <-lp.ctx.Done()
  57. lp.mu.Unlock()
  58. lp.wg.Wait()
  59. }()
  60. return lp, ch
  61. }
  62. func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
  63. rp, err := lp.leaseClient.LeaseGrant(ctx, cr, grpc.FailFast(false))
  64. if err != nil {
  65. return nil, err
  66. }
  67. lp.leader.gotLeader()
  68. return rp, nil
  69. }
  70. func (lp *leaseProxy) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
  71. r, err := lp.lessor.Revoke(ctx, clientv3.LeaseID(rr.ID))
  72. if err != nil {
  73. return nil, err
  74. }
  75. lp.leader.gotLeader()
  76. return (*pb.LeaseRevokeResponse)(r), nil
  77. }
  78. func (lp *leaseProxy) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
  79. var (
  80. r *clientv3.LeaseTimeToLiveResponse
  81. err error
  82. )
  83. if rr.Keys {
  84. r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID), clientv3.WithAttachedKeys())
  85. } else {
  86. r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID))
  87. }
  88. if err != nil {
  89. return nil, err
  90. }
  91. rp := &pb.LeaseTimeToLiveResponse{
  92. Header: r.ResponseHeader,
  93. ID: int64(r.ID),
  94. TTL: r.TTL,
  95. GrantedTTL: r.GrantedTTL,
  96. Keys: r.Keys,
  97. }
  98. return rp, err
  99. }
  100. func (lp *leaseProxy) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
  101. r, err := lp.lessor.Leases(ctx)
  102. if err != nil {
  103. return nil, err
  104. }
  105. leases := make([]*pb.LeaseStatus, len(r.Leases))
  106. for i := range r.Leases {
  107. leases[i] = &pb.LeaseStatus{ID: int64(r.Leases[i].ID)}
  108. }
  109. rp := &pb.LeaseLeasesResponse{
  110. Header: r.ResponseHeader,
  111. Leases: leases,
  112. }
  113. return rp, err
  114. }
  115. func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
  116. lp.mu.Lock()
  117. select {
  118. case <-lp.ctx.Done():
  119. lp.mu.Unlock()
  120. return lp.ctx.Err()
  121. default:
  122. lp.wg.Add(1)
  123. }
  124. lp.mu.Unlock()
  125. ctx, cancel := context.WithCancel(stream.Context())
  126. lps := leaseProxyStream{
  127. stream: stream,
  128. lessor: lp.lessor,
  129. keepAliveLeases: make(map[int64]*atomicCounter),
  130. respc: make(chan *pb.LeaseKeepAliveResponse),
  131. ctx: ctx,
  132. cancel: cancel,
  133. }
  134. errc := make(chan error, 2)
  135. var lostLeaderC <-chan struct{}
  136. if md, ok := metadata.FromOutgoingContext(stream.Context()); ok {
  137. v := md[rpctypes.MetadataRequireLeaderKey]
  138. if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader {
  139. lostLeaderC = lp.leader.lostNotify()
  140. // if leader is known to be lost at creation time, avoid
  141. // letting events through at all
  142. select {
  143. case <-lostLeaderC:
  144. lp.wg.Done()
  145. return rpctypes.ErrNoLeader
  146. default:
  147. }
  148. }
  149. }
  150. stopc := make(chan struct{}, 3)
  151. go func() {
  152. defer func() { stopc <- struct{}{} }()
  153. if err := lps.recvLoop(); err != nil {
  154. errc <- err
  155. }
  156. }()
  157. go func() {
  158. defer func() { stopc <- struct{}{} }()
  159. if err := lps.sendLoop(); err != nil {
  160. errc <- err
  161. }
  162. }()
  163. // tears down LeaseKeepAlive stream if leader goes down or entire leaseProxy is terminated.
  164. go func() {
  165. defer func() { stopc <- struct{}{} }()
  166. select {
  167. case <-lostLeaderC:
  168. case <-ctx.Done():
  169. case <-lp.ctx.Done():
  170. }
  171. }()
  172. var err error
  173. select {
  174. case <-stopc:
  175. stopc <- struct{}{}
  176. case err = <-errc:
  177. }
  178. cancel()
  179. // recv/send may only shutdown after function exits;
  180. // this goroutine notifies lease proxy that the stream is through
  181. go func() {
  182. <-stopc
  183. <-stopc
  184. <-stopc
  185. lps.close()
  186. close(errc)
  187. lp.wg.Done()
  188. }()
  189. select {
  190. case <-lostLeaderC:
  191. return rpctypes.ErrNoLeader
  192. case <-lp.leader.disconnectNotify():
  193. return grpc.ErrClientConnClosing
  194. default:
  195. if err != nil {
  196. return err
  197. }
  198. return ctx.Err()
  199. }
  200. }
  201. type leaseProxyStream struct {
  202. stream pb.Lease_LeaseKeepAliveServer
  203. lessor clientv3.Lease
  204. // wg tracks keepAliveLoop goroutines
  205. wg sync.WaitGroup
  206. // mu protects keepAliveLeases
  207. mu sync.RWMutex
  208. // keepAliveLeases tracks how many outstanding keepalive requests which need responses are on a lease.
  209. keepAliveLeases map[int64]*atomicCounter
  210. // respc receives lease keepalive responses from etcd backend
  211. respc chan *pb.LeaseKeepAliveResponse
  212. ctx context.Context
  213. cancel context.CancelFunc
  214. }
  215. func (lps *leaseProxyStream) recvLoop() error {
  216. for {
  217. rr, err := lps.stream.Recv()
  218. if err == io.EOF {
  219. return nil
  220. }
  221. if err != nil {
  222. return err
  223. }
  224. lps.mu.Lock()
  225. neededResps, ok := lps.keepAliveLeases[rr.ID]
  226. if !ok {
  227. neededResps = &atomicCounter{}
  228. lps.keepAliveLeases[rr.ID] = neededResps
  229. lps.wg.Add(1)
  230. go func() {
  231. defer lps.wg.Done()
  232. if err := lps.keepAliveLoop(rr.ID, neededResps); err != nil {
  233. lps.cancel()
  234. }
  235. }()
  236. }
  237. neededResps.add(1)
  238. lps.mu.Unlock()
  239. }
  240. }
  241. func (lps *leaseProxyStream) keepAliveLoop(leaseID int64, neededResps *atomicCounter) error {
  242. cctx, ccancel := context.WithCancel(lps.ctx)
  243. defer ccancel()
  244. respc, err := lps.lessor.KeepAlive(cctx, clientv3.LeaseID(leaseID))
  245. if err != nil {
  246. return err
  247. }
  248. // ticker expires when loop hasn't received keepalive within TTL
  249. var ticker <-chan time.Time
  250. for {
  251. select {
  252. case <-ticker:
  253. lps.mu.Lock()
  254. // if there are outstanding keepAlive reqs at the moment of ticker firing,
  255. // don't close keepAliveLoop(), let it continuing to process the KeepAlive reqs.
  256. if neededResps.get() > 0 {
  257. lps.mu.Unlock()
  258. ticker = nil
  259. continue
  260. }
  261. delete(lps.keepAliveLeases, leaseID)
  262. lps.mu.Unlock()
  263. return nil
  264. case rp, ok := <-respc:
  265. if !ok {
  266. lps.mu.Lock()
  267. delete(lps.keepAliveLeases, leaseID)
  268. lps.mu.Unlock()
  269. if neededResps.get() == 0 {
  270. return nil
  271. }
  272. ttlResp, err := lps.lessor.TimeToLive(cctx, clientv3.LeaseID(leaseID))
  273. if err != nil {
  274. return err
  275. }
  276. r := &pb.LeaseKeepAliveResponse{
  277. Header: ttlResp.ResponseHeader,
  278. ID: int64(ttlResp.ID),
  279. TTL: ttlResp.TTL,
  280. }
  281. for neededResps.get() > 0 {
  282. select {
  283. case lps.respc <- r:
  284. neededResps.add(-1)
  285. case <-lps.ctx.Done():
  286. return nil
  287. }
  288. }
  289. return nil
  290. }
  291. if neededResps.get() == 0 {
  292. continue
  293. }
  294. ticker = time.After(time.Duration(rp.TTL) * time.Second)
  295. r := &pb.LeaseKeepAliveResponse{
  296. Header: rp.ResponseHeader,
  297. ID: int64(rp.ID),
  298. TTL: rp.TTL,
  299. }
  300. lps.replyToClient(r, neededResps)
  301. }
  302. }
  303. }
  304. func (lps *leaseProxyStream) replyToClient(r *pb.LeaseKeepAliveResponse, neededResps *atomicCounter) {
  305. timer := time.After(500 * time.Millisecond)
  306. for neededResps.get() > 0 {
  307. select {
  308. case lps.respc <- r:
  309. neededResps.add(-1)
  310. case <-timer:
  311. return
  312. case <-lps.ctx.Done():
  313. return
  314. }
  315. }
  316. }
  317. func (lps *leaseProxyStream) sendLoop() error {
  318. for {
  319. select {
  320. case lrp, ok := <-lps.respc:
  321. if !ok {
  322. return nil
  323. }
  324. if err := lps.stream.Send(lrp); err != nil {
  325. return err
  326. }
  327. case <-lps.ctx.Done():
  328. return lps.ctx.Err()
  329. }
  330. }
  331. }
  332. func (lps *leaseProxyStream) close() {
  333. lps.cancel()
  334. lps.wg.Wait()
  335. // only close respc channel if all the keepAliveLoop() goroutines have finished
  336. // this ensures those goroutines don't send resp to a closed resp channel
  337. close(lps.respc)
  338. }
  339. type atomicCounter struct {
  340. counter int64
  341. }
  342. func (ac *atomicCounter) add(delta int64) {
  343. atomic.AddInt64(&ac.counter, delta)
  344. }
  345. func (ac *atomicCounter) get() int64 {
  346. return atomic.LoadInt64(&ac.counter)
  347. }