lease.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package clientv3
  15. import (
  16. "context"
  17. "sync"
  18. "time"
  19. "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
  20. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  21. "google.golang.org/grpc/metadata"
  22. )
  23. type (
  24. LeaseRevokeResponse pb.LeaseRevokeResponse
  25. LeaseID int64
  26. )
  27. // LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
  28. type LeaseGrantResponse struct {
  29. *pb.ResponseHeader
  30. ID LeaseID
  31. TTL int64
  32. Error string
  33. }
  34. // LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
  35. type LeaseKeepAliveResponse struct {
  36. *pb.ResponseHeader
  37. ID LeaseID
  38. TTL int64
  39. }
  40. // LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
  41. type LeaseTimeToLiveResponse struct {
  42. *pb.ResponseHeader
  43. ID LeaseID `json:"id"`
  44. // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
  45. TTL int64 `json:"ttl"`
  46. // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
  47. GrantedTTL int64 `json:"granted-ttl"`
  48. // Keys is the list of keys attached to this lease.
  49. Keys [][]byte `json:"keys"`
  50. }
  51. // LeaseStatus represents a lease status.
  52. type LeaseStatus struct {
  53. ID LeaseID `json:"id"`
  54. // TODO: TTL int64
  55. }
  56. // LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
  57. type LeaseLeasesResponse struct {
  58. *pb.ResponseHeader
  59. Leases []LeaseStatus `json:"leases"`
  60. }
  61. const (
  62. // defaultTTL is the assumed lease TTL used for the first keepalive
  63. // deadline before the actual TTL is known to the client.
  64. defaultTTL = 5 * time.Second
  65. // a small buffer to store unsent lease responses.
  66. leaseResponseChSize = 16
  67. // NoLease is a lease ID for the absence of a lease.
  68. NoLease LeaseID = 0
  69. // retryConnWait is how long to wait before retrying request due to an error
  70. retryConnWait = 500 * time.Millisecond
  71. )
  72. // ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
  73. //
  74. // This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
  75. type ErrKeepAliveHalted struct {
  76. Reason error
  77. }
  78. func (e ErrKeepAliveHalted) Error() string {
  79. s := "etcdclient: leases keep alive halted"
  80. if e.Reason != nil {
  81. s += ": " + e.Reason.Error()
  82. }
  83. return s
  84. }
  85. type Lease interface {
  86. // Grant creates a new lease.
  87. Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
  88. // Revoke revokes the given lease.
  89. Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
  90. // TimeToLive retrieves the lease information of the given lease ID.
  91. TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
  92. // Leases retrieves all leases.
  93. Leases(ctx context.Context) (*LeaseLeasesResponse, error)
  94. // KeepAlive keeps the given lease alive forever. If the keepalive response
  95. // posted to the channel is not consumed immediately, the lease client will
  96. // continue sending keep alive requests to the etcd server at least every
  97. // second until latest response is consumed.
  98. //
  99. // The returned "LeaseKeepAliveResponse" channel closes if underlying keep
  100. // alive stream is interrupted in some way the client cannot handle itself;
  101. // given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
  102. // from this closed channel is nil.
  103. //
  104. // If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
  105. // no leader") or canceled by the caller (e.g. context.Canceled), the error
  106. // is returned. Otherwise, it retries.
  107. //
  108. // TODO(v4.0): post errors to last keep alive message before closing
  109. // (see https://github.com/coreos/etcd/pull/7866)
  110. KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
  111. // KeepAliveOnce renews the lease once. The response corresponds to the
  112. // first message from calling KeepAlive. If the response has a recoverable
  113. // error, KeepAliveOnce will retry the RPC with a new keep alive message.
  114. //
  115. // In most of the cases, Keepalive should be used instead of KeepAliveOnce.
  116. KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
  117. // Close releases all resources Lease keeps for efficient communication
  118. // with the etcd server.
  119. Close() error
  120. }
  121. type lessor struct {
  122. mu sync.Mutex // guards all fields
  123. // donec is closed and loopErr is set when recvKeepAliveLoop stops
  124. donec chan struct{}
  125. loopErr error
  126. remote pb.LeaseClient
  127. stream pb.Lease_LeaseKeepAliveClient
  128. streamCancel context.CancelFunc
  129. stopCtx context.Context
  130. stopCancel context.CancelFunc
  131. keepAlives map[LeaseID]*keepAlive
  132. // firstKeepAliveTimeout is the timeout for the first keepalive request
  133. // before the actual TTL is known to the lease client
  134. firstKeepAliveTimeout time.Duration
  135. // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
  136. firstKeepAliveOnce sync.Once
  137. }
  138. // keepAlive multiplexes a keepalive for a lease over multiple channels
  139. type keepAlive struct {
  140. chs []chan<- *LeaseKeepAliveResponse
  141. ctxs []context.Context
  142. // deadline is the time the keep alive channels close if no response
  143. deadline time.Time
  144. // nextKeepAlive is when to send the next keep alive message
  145. nextKeepAlive time.Time
  146. // donec is closed on lease revoke, expiration, or cancel.
  147. donec chan struct{}
  148. }
  149. func NewLease(c *Client) Lease {
  150. return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second)
  151. }
  152. func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease {
  153. l := &lessor{
  154. donec: make(chan struct{}),
  155. keepAlives: make(map[LeaseID]*keepAlive),
  156. remote: remote,
  157. firstKeepAliveTimeout: keepAliveTimeout,
  158. }
  159. if l.firstKeepAliveTimeout == time.Second {
  160. l.firstKeepAliveTimeout = defaultTTL
  161. }
  162. reqLeaderCtx := WithRequireLeader(context.Background())
  163. l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
  164. return l
  165. }
  166. func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
  167. r := &pb.LeaseGrantRequest{TTL: ttl}
  168. resp, err := l.remote.LeaseGrant(ctx, r)
  169. if err == nil {
  170. gresp := &LeaseGrantResponse{
  171. ResponseHeader: resp.GetHeader(),
  172. ID: LeaseID(resp.ID),
  173. TTL: resp.TTL,
  174. Error: resp.Error,
  175. }
  176. return gresp, nil
  177. }
  178. return nil, toErr(ctx, err)
  179. }
  180. func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
  181. r := &pb.LeaseRevokeRequest{ID: int64(id)}
  182. resp, err := l.remote.LeaseRevoke(ctx, r)
  183. if err == nil {
  184. return (*LeaseRevokeResponse)(resp), nil
  185. }
  186. return nil, toErr(ctx, err)
  187. }
  188. func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
  189. r := toLeaseTimeToLiveRequest(id, opts...)
  190. resp, err := l.remote.LeaseTimeToLive(ctx, r)
  191. if err == nil {
  192. gresp := &LeaseTimeToLiveResponse{
  193. ResponseHeader: resp.GetHeader(),
  194. ID: LeaseID(resp.ID),
  195. TTL: resp.TTL,
  196. GrantedTTL: resp.GrantedTTL,
  197. Keys: resp.Keys,
  198. }
  199. return gresp, nil
  200. }
  201. return nil, toErr(ctx, err)
  202. }
  203. func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
  204. resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{})
  205. if err == nil {
  206. leases := make([]LeaseStatus, len(resp.Leases))
  207. for i := range resp.Leases {
  208. leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
  209. }
  210. return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
  211. }
  212. return nil, toErr(ctx, err)
  213. }
  214. func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
  215. ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
  216. l.mu.Lock()
  217. // ensure that recvKeepAliveLoop is still running
  218. select {
  219. case <-l.donec:
  220. err := l.loopErr
  221. l.mu.Unlock()
  222. close(ch)
  223. return ch, ErrKeepAliveHalted{Reason: err}
  224. default:
  225. }
  226. ka, ok := l.keepAlives[id]
  227. if !ok {
  228. // create fresh keep alive
  229. ka = &keepAlive{
  230. chs: []chan<- *LeaseKeepAliveResponse{ch},
  231. ctxs: []context.Context{ctx},
  232. deadline: time.Now().Add(l.firstKeepAliveTimeout),
  233. nextKeepAlive: time.Now(),
  234. donec: make(chan struct{}),
  235. }
  236. l.keepAlives[id] = ka
  237. } else {
  238. // add channel and context to existing keep alive
  239. ka.ctxs = append(ka.ctxs, ctx)
  240. ka.chs = append(ka.chs, ch)
  241. }
  242. l.mu.Unlock()
  243. go l.keepAliveCtxCloser(id, ctx, ka.donec)
  244. l.firstKeepAliveOnce.Do(func() {
  245. go l.recvKeepAliveLoop()
  246. go l.deadlineLoop()
  247. })
  248. return ch, nil
  249. }
  250. func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
  251. for {
  252. resp, err := l.keepAliveOnce(ctx, id)
  253. if err == nil {
  254. if resp.TTL <= 0 {
  255. err = rpctypes.ErrLeaseNotFound
  256. }
  257. return resp, err
  258. }
  259. if isHaltErr(ctx, err) {
  260. return nil, toErr(ctx, err)
  261. }
  262. }
  263. }
  264. func (l *lessor) Close() error {
  265. l.stopCancel()
  266. // close for synchronous teardown if stream goroutines never launched
  267. l.firstKeepAliveOnce.Do(func() { close(l.donec) })
  268. <-l.donec
  269. return nil
  270. }
  271. func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
  272. select {
  273. case <-donec:
  274. return
  275. case <-l.donec:
  276. return
  277. case <-ctx.Done():
  278. }
  279. l.mu.Lock()
  280. defer l.mu.Unlock()
  281. ka, ok := l.keepAlives[id]
  282. if !ok {
  283. return
  284. }
  285. // close channel and remove context if still associated with keep alive
  286. for i, c := range ka.ctxs {
  287. if c == ctx {
  288. close(ka.chs[i])
  289. ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
  290. ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
  291. break
  292. }
  293. }
  294. // remove if no one more listeners
  295. if len(ka.chs) == 0 {
  296. delete(l.keepAlives, id)
  297. }
  298. }
  299. // closeRequireLeader scans keepAlives for ctxs that have require leader
  300. // and closes the associated channels.
  301. func (l *lessor) closeRequireLeader() {
  302. l.mu.Lock()
  303. defer l.mu.Unlock()
  304. for _, ka := range l.keepAlives {
  305. reqIdxs := 0
  306. // find all required leader channels, close, mark as nil
  307. for i, ctx := range ka.ctxs {
  308. md, ok := metadata.FromOutgoingContext(ctx)
  309. if !ok {
  310. continue
  311. }
  312. ks := md[rpctypes.MetadataRequireLeaderKey]
  313. if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
  314. continue
  315. }
  316. close(ka.chs[i])
  317. ka.chs[i] = nil
  318. reqIdxs++
  319. }
  320. if reqIdxs == 0 {
  321. continue
  322. }
  323. // remove all channels that required a leader from keepalive
  324. newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
  325. newCtxs := make([]context.Context, len(newChs))
  326. newIdx := 0
  327. for i := range ka.chs {
  328. if ka.chs[i] == nil {
  329. continue
  330. }
  331. newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
  332. newIdx++
  333. }
  334. ka.chs, ka.ctxs = newChs, newCtxs
  335. }
  336. }
  337. func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
  338. cctx, cancel := context.WithCancel(ctx)
  339. defer cancel()
  340. stream, err := l.remote.LeaseKeepAlive(cctx)
  341. if err != nil {
  342. return nil, toErr(ctx, err)
  343. }
  344. err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
  345. if err != nil {
  346. return nil, toErr(ctx, err)
  347. }
  348. resp, rerr := stream.Recv()
  349. if rerr != nil {
  350. return nil, toErr(ctx, rerr)
  351. }
  352. karesp := &LeaseKeepAliveResponse{
  353. ResponseHeader: resp.GetHeader(),
  354. ID: LeaseID(resp.ID),
  355. TTL: resp.TTL,
  356. }
  357. return karesp, nil
  358. }
  359. func (l *lessor) recvKeepAliveLoop() (gerr error) {
  360. defer func() {
  361. l.mu.Lock()
  362. close(l.donec)
  363. l.loopErr = gerr
  364. for _, ka := range l.keepAlives {
  365. ka.close()
  366. }
  367. l.keepAlives = make(map[LeaseID]*keepAlive)
  368. l.mu.Unlock()
  369. }()
  370. for {
  371. stream, err := l.resetRecv()
  372. if err != nil {
  373. if canceledByCaller(l.stopCtx, err) {
  374. return err
  375. }
  376. } else {
  377. for {
  378. resp, err := stream.Recv()
  379. if err != nil {
  380. if canceledByCaller(l.stopCtx, err) {
  381. return err
  382. }
  383. if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
  384. l.closeRequireLeader()
  385. }
  386. break
  387. }
  388. l.recvKeepAlive(resp)
  389. }
  390. }
  391. select {
  392. case <-time.After(retryConnWait):
  393. continue
  394. case <-l.stopCtx.Done():
  395. return l.stopCtx.Err()
  396. }
  397. }
  398. }
  399. // resetRecv opens a new lease stream and starts sending keep alive requests.
  400. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
  401. sctx, cancel := context.WithCancel(l.stopCtx)
  402. stream, err := l.remote.LeaseKeepAlive(sctx)
  403. if err != nil {
  404. cancel()
  405. return nil, err
  406. }
  407. l.mu.Lock()
  408. defer l.mu.Unlock()
  409. if l.stream != nil && l.streamCancel != nil {
  410. l.streamCancel()
  411. }
  412. l.streamCancel = cancel
  413. l.stream = stream
  414. go l.sendKeepAliveLoop(stream)
  415. return stream, nil
  416. }
  417. // recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
  418. func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
  419. karesp := &LeaseKeepAliveResponse{
  420. ResponseHeader: resp.GetHeader(),
  421. ID: LeaseID(resp.ID),
  422. TTL: resp.TTL,
  423. }
  424. l.mu.Lock()
  425. defer l.mu.Unlock()
  426. ka, ok := l.keepAlives[karesp.ID]
  427. if !ok {
  428. return
  429. }
  430. if karesp.TTL <= 0 {
  431. // lease expired; close all keep alive channels
  432. delete(l.keepAlives, karesp.ID)
  433. ka.close()
  434. return
  435. }
  436. // send update to all channels
  437. nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
  438. ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
  439. for _, ch := range ka.chs {
  440. select {
  441. case ch <- karesp:
  442. ka.nextKeepAlive = nextKeepAlive
  443. default:
  444. }
  445. }
  446. }
  447. // deadlineLoop reaps any keep alive channels that have not received a response
  448. // within the lease TTL
  449. func (l *lessor) deadlineLoop() {
  450. for {
  451. select {
  452. case <-time.After(time.Second):
  453. case <-l.donec:
  454. return
  455. }
  456. now := time.Now()
  457. l.mu.Lock()
  458. for id, ka := range l.keepAlives {
  459. if ka.deadline.Before(now) {
  460. // waited too long for response; lease may be expired
  461. ka.close()
  462. delete(l.keepAlives, id)
  463. }
  464. }
  465. l.mu.Unlock()
  466. }
  467. }
  468. // sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
  469. func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
  470. for {
  471. var tosend []LeaseID
  472. now := time.Now()
  473. l.mu.Lock()
  474. for id, ka := range l.keepAlives {
  475. if ka.nextKeepAlive.Before(now) {
  476. tosend = append(tosend, id)
  477. }
  478. }
  479. l.mu.Unlock()
  480. for _, id := range tosend {
  481. r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
  482. if err := stream.Send(r); err != nil {
  483. // TODO do something with this error?
  484. return
  485. }
  486. }
  487. select {
  488. case <-time.After(500 * time.Millisecond):
  489. case <-stream.Context().Done():
  490. return
  491. case <-l.donec:
  492. return
  493. case <-l.stopCtx.Done():
  494. return
  495. }
  496. }
  497. }
  498. func (ka *keepAlive) close() {
  499. close(ka.donec)
  500. for _, ch := range ka.chs {
  501. close(ch)
  502. }
  503. }