lease.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package clientv3
  15. import (
  16. "sync"
  17. "time"
  18. "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
  19. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  20. "golang.org/x/net/context"
  21. "google.golang.org/grpc"
  22. "google.golang.org/grpc/metadata"
  23. )
  24. type (
  25. LeaseRevokeResponse pb.LeaseRevokeResponse
  26. LeaseID int64
  27. )
  28. // LeaseGrantResponse is used to convert the protobuf grant response.
  29. type LeaseGrantResponse struct {
  30. *pb.ResponseHeader
  31. ID LeaseID
  32. TTL int64
  33. Error string
  34. }
  35. // LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
  36. type LeaseKeepAliveResponse struct {
  37. *pb.ResponseHeader
  38. ID LeaseID
  39. TTL int64
  40. }
  41. // LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
  42. type LeaseTimeToLiveResponse struct {
  43. *pb.ResponseHeader
  44. ID LeaseID `json:"id"`
  45. // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
  46. TTL int64 `json:"ttl"`
  47. // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
  48. GrantedTTL int64 `json:"granted-ttl"`
  49. // Keys is the list of keys attached to this lease.
  50. Keys [][]byte `json:"keys"`
  51. }
  52. // LeaseStatus represents a lease status.
  53. type LeaseStatus struct {
  54. ID LeaseID `json:"id"`
  55. // TODO: TTL int64
  56. }
  57. // LeaseLeasesResponse is used to convert the protobuf lease list response.
  58. type LeaseLeasesResponse struct {
  59. *pb.ResponseHeader
  60. Leases []LeaseStatus `json:"leases"`
  61. }
  62. const (
  63. // defaultTTL is the assumed lease TTL used for the first keepalive
  64. // deadline before the actual TTL is known to the client.
  65. defaultTTL = 5 * time.Second
  66. // a small buffer to store unsent lease responses.
  67. leaseResponseChSize = 16
  68. // NoLease is a lease ID for the absence of a lease.
  69. NoLease LeaseID = 0
  70. // retryConnWait is how long to wait before retrying request due to an error
  71. retryConnWait = 500 * time.Millisecond
  72. )
  73. // ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
  74. //
  75. // This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
  76. type ErrKeepAliveHalted struct {
  77. Reason error
  78. }
  79. func (e ErrKeepAliveHalted) Error() string {
  80. s := "etcdclient: leases keep alive halted"
  81. if e.Reason != nil {
  82. s += ": " + e.Reason.Error()
  83. }
  84. return s
  85. }
  86. type Lease interface {
  87. // Grant creates a new lease.
  88. Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
  89. // Revoke revokes the given lease.
  90. Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
  91. // TimeToLive retrieves the lease information of the given lease ID.
  92. TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
  93. // Leases retrieves all leases.
  94. Leases(ctx context.Context) (*LeaseLeasesResponse, error)
  95. // KeepAlive keeps the given lease alive forever.
  96. KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
  97. // KeepAliveOnce renews the lease once. In most of the cases, Keepalive
  98. // should be used instead of KeepAliveOnce.
  99. KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
  100. // Close releases all resources Lease keeps for efficient communication
  101. // with the etcd server.
  102. Close() error
  103. }
  104. type lessor struct {
  105. mu sync.Mutex // guards all fields
  106. // donec is closed and loopErr is set when recvKeepAliveLoop stops
  107. donec chan struct{}
  108. loopErr error
  109. remote pb.LeaseClient
  110. stream pb.Lease_LeaseKeepAliveClient
  111. streamCancel context.CancelFunc
  112. stopCtx context.Context
  113. stopCancel context.CancelFunc
  114. keepAlives map[LeaseID]*keepAlive
  115. // firstKeepAliveTimeout is the timeout for the first keepalive request
  116. // before the actual TTL is known to the lease client
  117. firstKeepAliveTimeout time.Duration
  118. // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
  119. firstKeepAliveOnce sync.Once
  120. }
  121. // keepAlive multiplexes a keepalive for a lease over multiple channels
  122. type keepAlive struct {
  123. chs []chan<- *LeaseKeepAliveResponse
  124. ctxs []context.Context
  125. // deadline is the time the keep alive channels close if no response
  126. deadline time.Time
  127. // nextKeepAlive is when to send the next keep alive message
  128. nextKeepAlive time.Time
  129. // donec is closed on lease revoke, expiration, or cancel.
  130. donec chan struct{}
  131. }
  132. func NewLease(c *Client) Lease {
  133. return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second)
  134. }
  135. func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease {
  136. l := &lessor{
  137. donec: make(chan struct{}),
  138. keepAlives: make(map[LeaseID]*keepAlive),
  139. remote: remote,
  140. firstKeepAliveTimeout: keepAliveTimeout,
  141. }
  142. if l.firstKeepAliveTimeout == time.Second {
  143. l.firstKeepAliveTimeout = defaultTTL
  144. }
  145. reqLeaderCtx := WithRequireLeader(context.Background())
  146. l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
  147. return l
  148. }
  149. func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
  150. for {
  151. r := &pb.LeaseGrantRequest{TTL: ttl}
  152. resp, err := l.remote.LeaseGrant(ctx, r)
  153. if err == nil {
  154. gresp := &LeaseGrantResponse{
  155. ResponseHeader: resp.GetHeader(),
  156. ID: LeaseID(resp.ID),
  157. TTL: resp.TTL,
  158. Error: resp.Error,
  159. }
  160. return gresp, nil
  161. }
  162. if isHaltErr(ctx, err) {
  163. return nil, toErr(ctx, err)
  164. }
  165. }
  166. }
  167. func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
  168. for {
  169. r := &pb.LeaseRevokeRequest{ID: int64(id)}
  170. resp, err := l.remote.LeaseRevoke(ctx, r)
  171. if err == nil {
  172. return (*LeaseRevokeResponse)(resp), nil
  173. }
  174. if isHaltErr(ctx, err) {
  175. return nil, toErr(ctx, err)
  176. }
  177. }
  178. }
  179. func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
  180. for {
  181. r := toLeaseTimeToLiveRequest(id, opts...)
  182. resp, err := l.remote.LeaseTimeToLive(ctx, r, grpc.FailFast(false))
  183. if err == nil {
  184. gresp := &LeaseTimeToLiveResponse{
  185. ResponseHeader: resp.GetHeader(),
  186. ID: LeaseID(resp.ID),
  187. TTL: resp.TTL,
  188. GrantedTTL: resp.GrantedTTL,
  189. Keys: resp.Keys,
  190. }
  191. return gresp, nil
  192. }
  193. if isHaltErr(ctx, err) {
  194. return nil, toErr(ctx, err)
  195. }
  196. }
  197. }
  198. func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
  199. for {
  200. resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, grpc.FailFast(false))
  201. if err == nil {
  202. leases := make([]LeaseStatus, len(resp.Leases))
  203. for i := range resp.Leases {
  204. leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
  205. }
  206. return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
  207. }
  208. if isHaltErr(ctx, err) {
  209. return nil, toErr(ctx, err)
  210. }
  211. }
  212. }
  213. func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
  214. ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
  215. l.mu.Lock()
  216. // ensure that recvKeepAliveLoop is still running
  217. select {
  218. case <-l.donec:
  219. err := l.loopErr
  220. l.mu.Unlock()
  221. close(ch)
  222. return ch, ErrKeepAliveHalted{Reason: err}
  223. default:
  224. }
  225. ka, ok := l.keepAlives[id]
  226. if !ok {
  227. // create fresh keep alive
  228. ka = &keepAlive{
  229. chs: []chan<- *LeaseKeepAliveResponse{ch},
  230. ctxs: []context.Context{ctx},
  231. deadline: time.Now().Add(l.firstKeepAliveTimeout),
  232. nextKeepAlive: time.Now(),
  233. donec: make(chan struct{}),
  234. }
  235. l.keepAlives[id] = ka
  236. } else {
  237. // add channel and context to existing keep alive
  238. ka.ctxs = append(ka.ctxs, ctx)
  239. ka.chs = append(ka.chs, ch)
  240. }
  241. l.mu.Unlock()
  242. go l.keepAliveCtxCloser(id, ctx, ka.donec)
  243. l.firstKeepAliveOnce.Do(func() {
  244. go l.recvKeepAliveLoop()
  245. go l.deadlineLoop()
  246. })
  247. return ch, nil
  248. }
  249. func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
  250. for {
  251. resp, err := l.keepAliveOnce(ctx, id)
  252. if err == nil {
  253. if resp.TTL <= 0 {
  254. err = rpctypes.ErrLeaseNotFound
  255. }
  256. return resp, err
  257. }
  258. if isHaltErr(ctx, err) {
  259. return nil, toErr(ctx, err)
  260. }
  261. }
  262. }
  263. func (l *lessor) Close() error {
  264. l.stopCancel()
  265. // close for synchronous teardown if stream goroutines never launched
  266. l.firstKeepAliveOnce.Do(func() { close(l.donec) })
  267. <-l.donec
  268. return nil
  269. }
  270. func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
  271. select {
  272. case <-donec:
  273. return
  274. case <-l.donec:
  275. return
  276. case <-ctx.Done():
  277. }
  278. l.mu.Lock()
  279. defer l.mu.Unlock()
  280. ka, ok := l.keepAlives[id]
  281. if !ok {
  282. return
  283. }
  284. // close channel and remove context if still associated with keep alive
  285. for i, c := range ka.ctxs {
  286. if c == ctx {
  287. close(ka.chs[i])
  288. ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
  289. ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
  290. break
  291. }
  292. }
  293. // remove if no one more listeners
  294. if len(ka.chs) == 0 {
  295. delete(l.keepAlives, id)
  296. }
  297. }
  298. // closeRequireLeader scans all keep alives for ctxs that have require leader
  299. // and closes the associated channels.
  300. func (l *lessor) closeRequireLeader() {
  301. l.mu.Lock()
  302. defer l.mu.Unlock()
  303. for _, ka := range l.keepAlives {
  304. reqIdxs := 0
  305. // find all required leader channels, close, mark as nil
  306. for i, ctx := range ka.ctxs {
  307. md, ok := metadata.FromOutgoingContext(ctx)
  308. if !ok {
  309. continue
  310. }
  311. ks := md[rpctypes.MetadataRequireLeaderKey]
  312. if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
  313. continue
  314. }
  315. close(ka.chs[i])
  316. ka.chs[i] = nil
  317. reqIdxs++
  318. }
  319. if reqIdxs == 0 {
  320. continue
  321. }
  322. // remove all channels that required a leader from keepalive
  323. newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
  324. newCtxs := make([]context.Context, len(newChs))
  325. newIdx := 0
  326. for i := range ka.chs {
  327. if ka.chs[i] == nil {
  328. continue
  329. }
  330. newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
  331. newIdx++
  332. }
  333. ka.chs, ka.ctxs = newChs, newCtxs
  334. }
  335. }
  336. func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
  337. cctx, cancel := context.WithCancel(ctx)
  338. defer cancel()
  339. stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
  340. if err != nil {
  341. return nil, toErr(ctx, err)
  342. }
  343. err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
  344. if err != nil {
  345. return nil, toErr(ctx, err)
  346. }
  347. resp, rerr := stream.Recv()
  348. if rerr != nil {
  349. return nil, toErr(ctx, rerr)
  350. }
  351. karesp := &LeaseKeepAliveResponse{
  352. ResponseHeader: resp.GetHeader(),
  353. ID: LeaseID(resp.ID),
  354. TTL: resp.TTL,
  355. }
  356. return karesp, nil
  357. }
  358. func (l *lessor) recvKeepAliveLoop() (gerr error) {
  359. defer func() {
  360. l.mu.Lock()
  361. close(l.donec)
  362. l.loopErr = gerr
  363. for _, ka := range l.keepAlives {
  364. ka.close()
  365. }
  366. l.keepAlives = make(map[LeaseID]*keepAlive)
  367. l.mu.Unlock()
  368. }()
  369. for {
  370. stream, err := l.resetRecv()
  371. if err != nil {
  372. if canceledByCaller(l.stopCtx, err) {
  373. return err
  374. }
  375. } else {
  376. for {
  377. resp, err := stream.Recv()
  378. if err != nil {
  379. if canceledByCaller(l.stopCtx, err) {
  380. return err
  381. }
  382. if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
  383. l.closeRequireLeader()
  384. }
  385. break
  386. }
  387. l.recvKeepAlive(resp)
  388. }
  389. }
  390. select {
  391. case <-time.After(retryConnWait):
  392. continue
  393. case <-l.stopCtx.Done():
  394. return l.stopCtx.Err()
  395. }
  396. }
  397. }
  398. // resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
  399. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
  400. sctx, cancel := context.WithCancel(l.stopCtx)
  401. stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
  402. if err != nil {
  403. cancel()
  404. return nil, err
  405. }
  406. l.mu.Lock()
  407. defer l.mu.Unlock()
  408. if l.stream != nil && l.streamCancel != nil {
  409. l.streamCancel()
  410. }
  411. l.streamCancel = cancel
  412. l.stream = stream
  413. go l.sendKeepAliveLoop(stream)
  414. return stream, nil
  415. }
  416. // recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
  417. func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
  418. karesp := &LeaseKeepAliveResponse{
  419. ResponseHeader: resp.GetHeader(),
  420. ID: LeaseID(resp.ID),
  421. TTL: resp.TTL,
  422. }
  423. l.mu.Lock()
  424. defer l.mu.Unlock()
  425. ka, ok := l.keepAlives[karesp.ID]
  426. if !ok {
  427. return
  428. }
  429. if karesp.TTL <= 0 {
  430. // lease expired; close all keep alive channels
  431. delete(l.keepAlives, karesp.ID)
  432. ka.close()
  433. return
  434. }
  435. // send update to all channels
  436. nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
  437. ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
  438. for _, ch := range ka.chs {
  439. select {
  440. case ch <- karesp:
  441. ka.nextKeepAlive = nextKeepAlive
  442. default:
  443. }
  444. }
  445. }
  446. // deadlineLoop reaps any keep alive channels that have not received a response
  447. // within the lease TTL
  448. func (l *lessor) deadlineLoop() {
  449. for {
  450. select {
  451. case <-time.After(time.Second):
  452. case <-l.donec:
  453. return
  454. }
  455. now := time.Now()
  456. l.mu.Lock()
  457. for id, ka := range l.keepAlives {
  458. if ka.deadline.Before(now) {
  459. // waited too long for response; lease may be expired
  460. ka.close()
  461. delete(l.keepAlives, id)
  462. }
  463. }
  464. l.mu.Unlock()
  465. }
  466. }
  467. // sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
  468. func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
  469. for {
  470. var tosend []LeaseID
  471. now := time.Now()
  472. l.mu.Lock()
  473. for id, ka := range l.keepAlives {
  474. if ka.nextKeepAlive.Before(now) {
  475. tosend = append(tosend, id)
  476. }
  477. }
  478. l.mu.Unlock()
  479. for _, id := range tosend {
  480. r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
  481. if err := stream.Send(r); err != nil {
  482. // TODO do something with this error?
  483. return
  484. }
  485. }
  486. select {
  487. case <-time.After(500 * time.Millisecond):
  488. case <-stream.Context().Done():
  489. return
  490. case <-l.donec:
  491. return
  492. case <-l.stopCtx.Done():
  493. return
  494. }
  495. }
  496. }
  497. func (ka *keepAlive) close() {
  498. close(ka.donec)
  499. for _, ch := range ka.chs {
  500. close(ch)
  501. }
  502. }