lease.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package clientv3
  15. import (
  16. "context"
  17. "sync"
  18. "time"
  19. "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
  20. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  21. "google.golang.org/grpc"
  22. "google.golang.org/grpc/metadata"
  23. )
  24. type (
  25. LeaseRevokeResponse pb.LeaseRevokeResponse
  26. LeaseID int64
  27. )
  28. // LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
  29. type LeaseGrantResponse struct {
  30. *pb.ResponseHeader
  31. ID LeaseID
  32. TTL int64
  33. Error string
  34. }
  35. // LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
  36. type LeaseKeepAliveResponse struct {
  37. *pb.ResponseHeader
  38. ID LeaseID
  39. TTL int64
  40. }
  41. // LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
  42. type LeaseTimeToLiveResponse struct {
  43. *pb.ResponseHeader
  44. ID LeaseID `json:"id"`
  45. // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
  46. TTL int64 `json:"ttl"`
  47. // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
  48. GrantedTTL int64 `json:"granted-ttl"`
  49. // Keys is the list of keys attached to this lease.
  50. Keys [][]byte `json:"keys"`
  51. }
  52. // LeaseStatus represents a lease status.
  53. type LeaseStatus struct {
  54. ID LeaseID `json:"id"`
  55. // TODO: TTL int64
  56. }
  57. // LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
  58. type LeaseLeasesResponse struct {
  59. *pb.ResponseHeader
  60. Leases []LeaseStatus `json:"leases"`
  61. }
  62. const (
  63. // defaultTTL is the assumed lease TTL used for the first keepalive
  64. // deadline before the actual TTL is known to the client.
  65. defaultTTL = 5 * time.Second
  66. // NoLease is a lease ID for the absence of a lease.
  67. NoLease LeaseID = 0
  68. // retryConnWait is how long to wait before retrying request due to an error
  69. retryConnWait = 500 * time.Millisecond
  70. )
  71. // LeaseResponseChSize is the size of buffer to store unsent lease responses.
  72. // WARNING: DO NOT UPDATE.
  73. // Only for testing purposes.
  74. var LeaseResponseChSize = 16
  75. // ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
  76. //
  77. // This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
  78. type ErrKeepAliveHalted struct {
  79. Reason error
  80. }
  81. func (e ErrKeepAliveHalted) Error() string {
  82. s := "etcdclient: leases keep alive halted"
  83. if e.Reason != nil {
  84. s += ": " + e.Reason.Error()
  85. }
  86. return s
  87. }
  88. type Lease interface {
  89. // Grant creates a new lease.
  90. Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
  91. // Revoke revokes the given lease.
  92. Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
  93. // TimeToLive retrieves the lease information of the given lease ID.
  94. TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
  95. // Leases retrieves all leases.
  96. Leases(ctx context.Context) (*LeaseLeasesResponse, error)
  97. // KeepAlive keeps the given lease alive forever. If the keepalive response
  98. // posted to the channel is not consumed immediately, the lease client will
  99. // continue sending keep alive requests to the etcd server at least every
  100. // second until latest response is consumed.
  101. //
  102. // The returned "LeaseKeepAliveResponse" channel closes if underlying keep
  103. // alive stream is interrupted in some way the client cannot handle itself;
  104. // given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
  105. // from this closed channel is nil.
  106. //
  107. // If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
  108. // no leader") or canceled by the caller (e.g. context.Canceled), the error
  109. // is returned. Otherwise, it retries.
  110. //
  111. // TODO(v4.0): post errors to last keep alive message before closing
  112. // (see https://github.com/coreos/etcd/pull/7866)
  113. KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
  114. // KeepAliveOnce renews the lease once. The response corresponds to the
  115. // first message from calling KeepAlive. If the response has a recoverable
  116. // error, KeepAliveOnce will retry the RPC with a new keep alive message.
  117. //
  118. // In most of the cases, Keepalive should be used instead of KeepAliveOnce.
  119. KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
  120. // Close releases all resources Lease keeps for efficient communication
  121. // with the etcd server.
  122. Close() error
  123. }
  124. type lessor struct {
  125. mu sync.Mutex // guards all fields
  126. // donec is closed and loopErr is set when recvKeepAliveLoop stops
  127. donec chan struct{}
  128. loopErr error
  129. remote pb.LeaseClient
  130. stream pb.Lease_LeaseKeepAliveClient
  131. streamCancel context.CancelFunc
  132. stopCtx context.Context
  133. stopCancel context.CancelFunc
  134. keepAlives map[LeaseID]*keepAlive
  135. // firstKeepAliveTimeout is the timeout for the first keepalive request
  136. // before the actual TTL is known to the lease client
  137. firstKeepAliveTimeout time.Duration
  138. // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
  139. firstKeepAliveOnce sync.Once
  140. callOpts []grpc.CallOption
  141. }
  142. // keepAlive multiplexes a keepalive for a lease over multiple channels
  143. type keepAlive struct {
  144. chs []chan<- *LeaseKeepAliveResponse
  145. ctxs []context.Context
  146. // deadline is the time the keep alive channels close if no response
  147. deadline time.Time
  148. // nextKeepAlive is when to send the next keep alive message
  149. nextKeepAlive time.Time
  150. // donec is closed on lease revoke, expiration, or cancel.
  151. donec chan struct{}
  152. }
  153. func NewLease(c *Client) Lease {
  154. return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
  155. }
  156. func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
  157. l := &lessor{
  158. donec: make(chan struct{}),
  159. keepAlives: make(map[LeaseID]*keepAlive),
  160. remote: remote,
  161. firstKeepAliveTimeout: keepAliveTimeout,
  162. }
  163. if l.firstKeepAliveTimeout == time.Second {
  164. l.firstKeepAliveTimeout = defaultTTL
  165. }
  166. if c != nil {
  167. l.callOpts = c.callOpts
  168. }
  169. reqLeaderCtx := WithRequireLeader(context.Background())
  170. l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
  171. return l
  172. }
  173. func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
  174. r := &pb.LeaseGrantRequest{TTL: ttl}
  175. resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
  176. if err == nil {
  177. gresp := &LeaseGrantResponse{
  178. ResponseHeader: resp.GetHeader(),
  179. ID: LeaseID(resp.ID),
  180. TTL: resp.TTL,
  181. Error: resp.Error,
  182. }
  183. return gresp, nil
  184. }
  185. return nil, toErr(ctx, err)
  186. }
  187. func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
  188. r := &pb.LeaseRevokeRequest{ID: int64(id)}
  189. resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
  190. if err == nil {
  191. return (*LeaseRevokeResponse)(resp), nil
  192. }
  193. return nil, toErr(ctx, err)
  194. }
  195. func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
  196. r := toLeaseTimeToLiveRequest(id, opts...)
  197. resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
  198. if err == nil {
  199. gresp := &LeaseTimeToLiveResponse{
  200. ResponseHeader: resp.GetHeader(),
  201. ID: LeaseID(resp.ID),
  202. TTL: resp.TTL,
  203. GrantedTTL: resp.GrantedTTL,
  204. Keys: resp.Keys,
  205. }
  206. return gresp, nil
  207. }
  208. return nil, toErr(ctx, err)
  209. }
  210. func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
  211. resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
  212. if err == nil {
  213. leases := make([]LeaseStatus, len(resp.Leases))
  214. for i := range resp.Leases {
  215. leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
  216. }
  217. return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
  218. }
  219. return nil, toErr(ctx, err)
  220. }
  221. func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
  222. ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
  223. l.mu.Lock()
  224. // ensure that recvKeepAliveLoop is still running
  225. select {
  226. case <-l.donec:
  227. err := l.loopErr
  228. l.mu.Unlock()
  229. close(ch)
  230. return ch, ErrKeepAliveHalted{Reason: err}
  231. default:
  232. }
  233. ka, ok := l.keepAlives[id]
  234. if !ok {
  235. // create fresh keep alive
  236. ka = &keepAlive{
  237. chs: []chan<- *LeaseKeepAliveResponse{ch},
  238. ctxs: []context.Context{ctx},
  239. deadline: time.Now().Add(l.firstKeepAliveTimeout),
  240. nextKeepAlive: time.Now(),
  241. donec: make(chan struct{}),
  242. }
  243. l.keepAlives[id] = ka
  244. } else {
  245. // add channel and context to existing keep alive
  246. ka.ctxs = append(ka.ctxs, ctx)
  247. ka.chs = append(ka.chs, ch)
  248. }
  249. l.mu.Unlock()
  250. go l.keepAliveCtxCloser(id, ctx, ka.donec)
  251. l.firstKeepAliveOnce.Do(func() {
  252. go l.recvKeepAliveLoop()
  253. go l.deadlineLoop()
  254. })
  255. return ch, nil
  256. }
  257. func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
  258. for {
  259. resp, err := l.keepAliveOnce(ctx, id)
  260. if err == nil {
  261. if resp.TTL <= 0 {
  262. err = rpctypes.ErrLeaseNotFound
  263. }
  264. return resp, err
  265. }
  266. if isHaltErr(ctx, err) {
  267. return nil, toErr(ctx, err)
  268. }
  269. }
  270. }
  271. func (l *lessor) Close() error {
  272. l.stopCancel()
  273. // close for synchronous teardown if stream goroutines never launched
  274. l.firstKeepAliveOnce.Do(func() { close(l.donec) })
  275. <-l.donec
  276. return nil
  277. }
  278. func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
  279. select {
  280. case <-donec:
  281. return
  282. case <-l.donec:
  283. return
  284. case <-ctx.Done():
  285. }
  286. l.mu.Lock()
  287. defer l.mu.Unlock()
  288. ka, ok := l.keepAlives[id]
  289. if !ok {
  290. return
  291. }
  292. // close channel and remove context if still associated with keep alive
  293. for i, c := range ka.ctxs {
  294. if c == ctx {
  295. close(ka.chs[i])
  296. ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
  297. ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
  298. break
  299. }
  300. }
  301. // remove if no one more listeners
  302. if len(ka.chs) == 0 {
  303. delete(l.keepAlives, id)
  304. }
  305. }
  306. // closeRequireLeader scans keepAlives for ctxs that have require leader
  307. // and closes the associated channels.
  308. func (l *lessor) closeRequireLeader() {
  309. l.mu.Lock()
  310. defer l.mu.Unlock()
  311. for _, ka := range l.keepAlives {
  312. reqIdxs := 0
  313. // find all required leader channels, close, mark as nil
  314. for i, ctx := range ka.ctxs {
  315. md, ok := metadata.FromOutgoingContext(ctx)
  316. if !ok {
  317. continue
  318. }
  319. ks := md[rpctypes.MetadataRequireLeaderKey]
  320. if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
  321. continue
  322. }
  323. close(ka.chs[i])
  324. ka.chs[i] = nil
  325. reqIdxs++
  326. }
  327. if reqIdxs == 0 {
  328. continue
  329. }
  330. // remove all channels that required a leader from keepalive
  331. newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
  332. newCtxs := make([]context.Context, len(newChs))
  333. newIdx := 0
  334. for i := range ka.chs {
  335. if ka.chs[i] == nil {
  336. continue
  337. }
  338. newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
  339. newIdx++
  340. }
  341. ka.chs, ka.ctxs = newChs, newCtxs
  342. }
  343. }
  344. func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
  345. cctx, cancel := context.WithCancel(ctx)
  346. defer cancel()
  347. stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
  348. if err != nil {
  349. return nil, toErr(ctx, err)
  350. }
  351. err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
  352. if err != nil {
  353. return nil, toErr(ctx, err)
  354. }
  355. resp, rerr := stream.Recv()
  356. if rerr != nil {
  357. return nil, toErr(ctx, rerr)
  358. }
  359. karesp := &LeaseKeepAliveResponse{
  360. ResponseHeader: resp.GetHeader(),
  361. ID: LeaseID(resp.ID),
  362. TTL: resp.TTL,
  363. }
  364. return karesp, nil
  365. }
  366. func (l *lessor) recvKeepAliveLoop() (gerr error) {
  367. defer func() {
  368. l.mu.Lock()
  369. close(l.donec)
  370. l.loopErr = gerr
  371. for _, ka := range l.keepAlives {
  372. ka.close()
  373. }
  374. l.keepAlives = make(map[LeaseID]*keepAlive)
  375. l.mu.Unlock()
  376. }()
  377. for {
  378. stream, err := l.resetRecv()
  379. if err != nil {
  380. if canceledByCaller(l.stopCtx, err) {
  381. return err
  382. }
  383. } else {
  384. for {
  385. resp, err := stream.Recv()
  386. if err != nil {
  387. if canceledByCaller(l.stopCtx, err) {
  388. return err
  389. }
  390. if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
  391. l.closeRequireLeader()
  392. }
  393. break
  394. }
  395. l.recvKeepAlive(resp)
  396. }
  397. }
  398. select {
  399. case <-time.After(retryConnWait):
  400. continue
  401. case <-l.stopCtx.Done():
  402. return l.stopCtx.Err()
  403. }
  404. }
  405. }
  406. // resetRecv opens a new lease stream and starts sending keep alive requests.
  407. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
  408. sctx, cancel := context.WithCancel(l.stopCtx)
  409. stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...)
  410. if err != nil {
  411. cancel()
  412. return nil, err
  413. }
  414. l.mu.Lock()
  415. defer l.mu.Unlock()
  416. if l.stream != nil && l.streamCancel != nil {
  417. l.streamCancel()
  418. }
  419. l.streamCancel = cancel
  420. l.stream = stream
  421. go l.sendKeepAliveLoop(stream)
  422. return stream, nil
  423. }
  424. // recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
  425. func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
  426. karesp := &LeaseKeepAliveResponse{
  427. ResponseHeader: resp.GetHeader(),
  428. ID: LeaseID(resp.ID),
  429. TTL: resp.TTL,
  430. }
  431. l.mu.Lock()
  432. defer l.mu.Unlock()
  433. ka, ok := l.keepAlives[karesp.ID]
  434. if !ok {
  435. return
  436. }
  437. if karesp.TTL <= 0 {
  438. // lease expired; close all keep alive channels
  439. delete(l.keepAlives, karesp.ID)
  440. ka.close()
  441. return
  442. }
  443. // send update to all channels
  444. nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
  445. ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
  446. for _, ch := range ka.chs {
  447. select {
  448. case ch <- karesp:
  449. default:
  450. }
  451. // still advance in order to rate-limit keep-alive sends
  452. ka.nextKeepAlive = nextKeepAlive
  453. }
  454. }
  455. // deadlineLoop reaps any keep alive channels that have not received a response
  456. // within the lease TTL
  457. func (l *lessor) deadlineLoop() {
  458. for {
  459. select {
  460. case <-time.After(time.Second):
  461. case <-l.donec:
  462. return
  463. }
  464. now := time.Now()
  465. l.mu.Lock()
  466. for id, ka := range l.keepAlives {
  467. if ka.deadline.Before(now) {
  468. // waited too long for response; lease may be expired
  469. ka.close()
  470. delete(l.keepAlives, id)
  471. }
  472. }
  473. l.mu.Unlock()
  474. }
  475. }
  476. // sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
  477. func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
  478. for {
  479. var tosend []LeaseID
  480. now := time.Now()
  481. l.mu.Lock()
  482. for id, ka := range l.keepAlives {
  483. if ka.nextKeepAlive.Before(now) {
  484. tosend = append(tosend, id)
  485. }
  486. }
  487. l.mu.Unlock()
  488. for _, id := range tosend {
  489. r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
  490. if err := stream.Send(r); err != nil {
  491. // TODO do something with this error?
  492. return
  493. }
  494. }
  495. select {
  496. case <-time.After(500 * time.Millisecond):
  497. case <-stream.Context().Done():
  498. return
  499. case <-l.donec:
  500. return
  501. case <-l.stopCtx.Done():
  502. return
  503. }
  504. }
  505. }
  506. func (ka *keepAlive) close() {
  507. close(ka.donec)
  508. for _, ch := range ka.chs {
  509. close(ch)
  510. }
  511. }