lease.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package clientv3
  15. import (
  16. "sync"
  17. "time"
  18. "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
  19. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  20. "golang.org/x/net/context"
  21. "google.golang.org/grpc"
  22. "google.golang.org/grpc/metadata"
  23. )
  24. type (
  25. LeaseRevokeResponse pb.LeaseRevokeResponse
  26. LeaseID int64
  27. )
  28. // LeaseGrantResponse is used to convert the protobuf grant response.
  29. type LeaseGrantResponse struct {
  30. *pb.ResponseHeader
  31. ID LeaseID
  32. TTL int64
  33. Error string
  34. }
  35. // LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
  36. type LeaseKeepAliveResponse struct {
  37. *pb.ResponseHeader
  38. ID LeaseID
  39. TTL int64
  40. }
  41. // LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
  42. type LeaseTimeToLiveResponse struct {
  43. *pb.ResponseHeader
  44. ID LeaseID `json:"id"`
  45. // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
  46. TTL int64 `json:"ttl"`
  47. // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
  48. GrantedTTL int64 `json:"granted-ttl"`
  49. // Keys is the list of keys attached to this lease.
  50. Keys [][]byte `json:"keys"`
  51. }
  52. const (
  53. // defaultTTL is the assumed lease TTL used for the first keepalive
  54. // deadline before the actual TTL is known to the client.
  55. defaultTTL = 5 * time.Second
  56. // a small buffer to store unsent lease responses.
  57. leaseResponseChSize = 16
  58. // NoLease is a lease ID for the absence of a lease.
  59. NoLease LeaseID = 0
  60. // retryConnWait is how long to wait before retrying on a lost leader
  61. retryConnWait = 500 * time.Millisecond
  62. )
  63. // ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
  64. //
  65. // This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
  66. type ErrKeepAliveHalted struct {
  67. Reason error
  68. }
  69. func (e ErrKeepAliveHalted) Error() string {
  70. s := "etcdclient: leases keep alive halted"
  71. if e.Reason != nil {
  72. s += ": " + e.Reason.Error()
  73. }
  74. return s
  75. }
  76. type Lease interface {
  77. // Grant creates a new lease.
  78. Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
  79. // Revoke revokes the given lease.
  80. Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
  81. // TimeToLive retrieves the lease information of the given lease ID.
  82. TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
  83. // KeepAlive keeps the given lease alive forever.
  84. KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
  85. // KeepAliveOnce renews the lease once. In most of the cases, Keepalive
  86. // should be used instead of KeepAliveOnce.
  87. KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
  88. // Close releases all resources Lease keeps for efficient communication
  89. // with the etcd server.
  90. Close() error
  91. }
  92. type lessor struct {
  93. mu sync.Mutex // guards all fields
  94. // donec is closed and loopErr is set when recvKeepAliveLoop stops
  95. donec chan struct{}
  96. loopErr error
  97. remote pb.LeaseClient
  98. stream pb.Lease_LeaseKeepAliveClient
  99. streamCancel context.CancelFunc
  100. stopCtx context.Context
  101. stopCancel context.CancelFunc
  102. keepAlives map[LeaseID]*keepAlive
  103. // firstKeepAliveTimeout is the timeout for the first keepalive request
  104. // before the actual TTL is known to the lease client
  105. firstKeepAliveTimeout time.Duration
  106. // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
  107. firstKeepAliveOnce sync.Once
  108. }
  109. // keepAlive multiplexes a keepalive for a lease over multiple channels
  110. type keepAlive struct {
  111. chs []chan<- *LeaseKeepAliveResponse
  112. ctxs []context.Context
  113. // deadline is the time the keep alive channels close if no response
  114. deadline time.Time
  115. // nextKeepAlive is when to send the next keep alive message
  116. nextKeepAlive time.Time
  117. // donec is closed on lease revoke, expiration, or cancel.
  118. donec chan struct{}
  119. }
  120. func NewLease(c *Client) Lease {
  121. return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second)
  122. }
  123. func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease {
  124. l := &lessor{
  125. donec: make(chan struct{}),
  126. keepAlives: make(map[LeaseID]*keepAlive),
  127. remote: remote,
  128. firstKeepAliveTimeout: keepAliveTimeout,
  129. }
  130. if l.firstKeepAliveTimeout == time.Second {
  131. l.firstKeepAliveTimeout = defaultTTL
  132. }
  133. reqLeaderCtx := WithRequireLeader(context.Background())
  134. l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
  135. return l
  136. }
  137. func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
  138. for {
  139. r := &pb.LeaseGrantRequest{TTL: ttl}
  140. resp, err := l.remote.LeaseGrant(ctx, r)
  141. if err == nil {
  142. gresp := &LeaseGrantResponse{
  143. ResponseHeader: resp.GetHeader(),
  144. ID: LeaseID(resp.ID),
  145. TTL: resp.TTL,
  146. Error: resp.Error,
  147. }
  148. return gresp, nil
  149. }
  150. if isHaltErr(ctx, err) {
  151. return nil, toErr(ctx, err)
  152. }
  153. }
  154. }
  155. func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
  156. for {
  157. r := &pb.LeaseRevokeRequest{ID: int64(id)}
  158. resp, err := l.remote.LeaseRevoke(ctx, r)
  159. if err == nil {
  160. return (*LeaseRevokeResponse)(resp), nil
  161. }
  162. if isHaltErr(ctx, err) {
  163. return nil, toErr(ctx, err)
  164. }
  165. }
  166. }
  167. func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
  168. for {
  169. r := toLeaseTimeToLiveRequest(id, opts...)
  170. resp, err := l.remote.LeaseTimeToLive(ctx, r, grpc.FailFast(false))
  171. if err == nil {
  172. gresp := &LeaseTimeToLiveResponse{
  173. ResponseHeader: resp.GetHeader(),
  174. ID: LeaseID(resp.ID),
  175. TTL: resp.TTL,
  176. GrantedTTL: resp.GrantedTTL,
  177. Keys: resp.Keys,
  178. }
  179. return gresp, nil
  180. }
  181. if isHaltErr(ctx, err) {
  182. return nil, toErr(ctx, err)
  183. }
  184. }
  185. }
  186. func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
  187. ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
  188. l.mu.Lock()
  189. // ensure that recvKeepAliveLoop is still running
  190. select {
  191. case <-l.donec:
  192. err := l.loopErr
  193. l.mu.Unlock()
  194. close(ch)
  195. return ch, ErrKeepAliveHalted{Reason: err}
  196. default:
  197. }
  198. ka, ok := l.keepAlives[id]
  199. if !ok {
  200. // create fresh keep alive
  201. ka = &keepAlive{
  202. chs: []chan<- *LeaseKeepAliveResponse{ch},
  203. ctxs: []context.Context{ctx},
  204. deadline: time.Now().Add(l.firstKeepAliveTimeout),
  205. nextKeepAlive: time.Now(),
  206. donec: make(chan struct{}),
  207. }
  208. l.keepAlives[id] = ka
  209. } else {
  210. // add channel and context to existing keep alive
  211. ka.ctxs = append(ka.ctxs, ctx)
  212. ka.chs = append(ka.chs, ch)
  213. }
  214. l.mu.Unlock()
  215. go l.keepAliveCtxCloser(id, ctx, ka.donec)
  216. l.firstKeepAliveOnce.Do(func() {
  217. go l.recvKeepAliveLoop()
  218. go l.deadlineLoop()
  219. })
  220. return ch, nil
  221. }
  222. func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
  223. for {
  224. resp, err := l.keepAliveOnce(ctx, id)
  225. if err == nil {
  226. if resp.TTL <= 0 {
  227. err = rpctypes.ErrLeaseNotFound
  228. }
  229. return resp, err
  230. }
  231. if isHaltErr(ctx, err) {
  232. return nil, toErr(ctx, err)
  233. }
  234. }
  235. }
  236. func (l *lessor) Close() error {
  237. l.stopCancel()
  238. // close for synchronous teardown if stream goroutines never launched
  239. l.firstKeepAliveOnce.Do(func() { close(l.donec) })
  240. <-l.donec
  241. return nil
  242. }
  243. func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
  244. select {
  245. case <-donec:
  246. return
  247. case <-l.donec:
  248. return
  249. case <-ctx.Done():
  250. }
  251. l.mu.Lock()
  252. defer l.mu.Unlock()
  253. ka, ok := l.keepAlives[id]
  254. if !ok {
  255. return
  256. }
  257. // close channel and remove context if still associated with keep alive
  258. for i, c := range ka.ctxs {
  259. if c == ctx {
  260. close(ka.chs[i])
  261. ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
  262. ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
  263. break
  264. }
  265. }
  266. // remove if no one more listeners
  267. if len(ka.chs) == 0 {
  268. delete(l.keepAlives, id)
  269. }
  270. }
  271. // closeRequireLeader scans all keep alives for ctxs that have require leader
  272. // and closes the associated channels.
  273. func (l *lessor) closeRequireLeader() {
  274. l.mu.Lock()
  275. defer l.mu.Unlock()
  276. for _, ka := range l.keepAlives {
  277. reqIdxs := 0
  278. // find all required leader channels, close, mark as nil
  279. for i, ctx := range ka.ctxs {
  280. md, ok := metadata.FromContext(ctx)
  281. if !ok {
  282. continue
  283. }
  284. ks := md[rpctypes.MetadataRequireLeaderKey]
  285. if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
  286. continue
  287. }
  288. close(ka.chs[i])
  289. ka.chs[i] = nil
  290. reqIdxs++
  291. }
  292. if reqIdxs == 0 {
  293. continue
  294. }
  295. // remove all channels that required a leader from keepalive
  296. newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
  297. newCtxs := make([]context.Context, len(newChs))
  298. newIdx := 0
  299. for i := range ka.chs {
  300. if ka.chs[i] == nil {
  301. continue
  302. }
  303. newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
  304. newIdx++
  305. }
  306. ka.chs, ka.ctxs = newChs, newCtxs
  307. }
  308. }
  309. func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
  310. cctx, cancel := context.WithCancel(ctx)
  311. defer cancel()
  312. stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
  313. if err != nil {
  314. return nil, toErr(ctx, err)
  315. }
  316. err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
  317. if err != nil {
  318. return nil, toErr(ctx, err)
  319. }
  320. resp, rerr := stream.Recv()
  321. if rerr != nil {
  322. return nil, toErr(ctx, rerr)
  323. }
  324. karesp := &LeaseKeepAliveResponse{
  325. ResponseHeader: resp.GetHeader(),
  326. ID: LeaseID(resp.ID),
  327. TTL: resp.TTL,
  328. }
  329. return karesp, nil
  330. }
  331. func (l *lessor) recvKeepAliveLoop() (gerr error) {
  332. defer func() {
  333. l.mu.Lock()
  334. close(l.donec)
  335. l.loopErr = gerr
  336. for _, ka := range l.keepAlives {
  337. ka.Close()
  338. }
  339. l.keepAlives = make(map[LeaseID]*keepAlive)
  340. l.mu.Unlock()
  341. }()
  342. stream, serr := l.resetRecv()
  343. for serr == nil {
  344. resp, err := stream.Recv()
  345. if err == nil {
  346. l.recvKeepAlive(resp)
  347. continue
  348. }
  349. err = toErr(l.stopCtx, err)
  350. if err == rpctypes.ErrNoLeader {
  351. l.closeRequireLeader()
  352. select {
  353. case <-time.After(retryConnWait):
  354. case <-l.stopCtx.Done():
  355. return err
  356. }
  357. } else if isHaltErr(l.stopCtx, err) {
  358. return err
  359. }
  360. stream, serr = l.resetRecv()
  361. }
  362. return serr
  363. }
  364. // resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
  365. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
  366. sctx, cancel := context.WithCancel(l.stopCtx)
  367. stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
  368. if err = toErr(sctx, err); err != nil {
  369. cancel()
  370. return nil, err
  371. }
  372. l.mu.Lock()
  373. defer l.mu.Unlock()
  374. if l.stream != nil && l.streamCancel != nil {
  375. l.streamCancel()
  376. }
  377. l.streamCancel = cancel
  378. l.stream = stream
  379. go l.sendKeepAliveLoop(stream)
  380. return stream, nil
  381. }
  382. // recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
  383. func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
  384. karesp := &LeaseKeepAliveResponse{
  385. ResponseHeader: resp.GetHeader(),
  386. ID: LeaseID(resp.ID),
  387. TTL: resp.TTL,
  388. }
  389. l.mu.Lock()
  390. defer l.mu.Unlock()
  391. ka, ok := l.keepAlives[karesp.ID]
  392. if !ok {
  393. return
  394. }
  395. if karesp.TTL <= 0 {
  396. // lease expired; close all keep alive channels
  397. delete(l.keepAlives, karesp.ID)
  398. ka.Close()
  399. return
  400. }
  401. // send update to all channels
  402. nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
  403. ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
  404. for _, ch := range ka.chs {
  405. select {
  406. case ch <- karesp:
  407. ka.nextKeepAlive = nextKeepAlive
  408. default:
  409. }
  410. }
  411. }
  412. // deadlineLoop reaps any keep alive channels that have not received a response
  413. // within the lease TTL
  414. func (l *lessor) deadlineLoop() {
  415. for {
  416. select {
  417. case <-time.After(time.Second):
  418. case <-l.donec:
  419. return
  420. }
  421. now := time.Now()
  422. l.mu.Lock()
  423. for id, ka := range l.keepAlives {
  424. if ka.deadline.Before(now) {
  425. // waited too long for response; lease may be expired
  426. ka.Close()
  427. delete(l.keepAlives, id)
  428. }
  429. }
  430. l.mu.Unlock()
  431. }
  432. }
  433. // sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
  434. func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
  435. for {
  436. var tosend []LeaseID
  437. now := time.Now()
  438. l.mu.Lock()
  439. for id, ka := range l.keepAlives {
  440. if ka.nextKeepAlive.Before(now) {
  441. tosend = append(tosend, id)
  442. }
  443. }
  444. l.mu.Unlock()
  445. for _, id := range tosend {
  446. r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
  447. if err := stream.Send(r); err != nil {
  448. // TODO do something with this error?
  449. return
  450. }
  451. }
  452. select {
  453. case <-time.After(500 * time.Millisecond):
  454. case <-stream.Context().Done():
  455. return
  456. case <-l.donec:
  457. return
  458. case <-l.stopCtx.Done():
  459. return
  460. }
  461. }
  462. }
  463. func (ka *keepAlive) Close() {
  464. close(ka.donec)
  465. for _, ch := range ka.chs {
  466. close(ch)
  467. }
  468. }