leader.go 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. // Copyright 2017 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package grpcproxy
  15. import (
  16. "context"
  17. "math"
  18. "sync"
  19. "github.com/coreos/etcd/clientv3"
  20. "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
  21. "golang.org/x/time/rate"
  22. "google.golang.org/grpc"
  23. )
  24. const (
  25. lostLeaderKey = "__lostleader" // watched to detect leader loss
  26. retryPerSecond = 10
  27. )
  28. type leader struct {
  29. ctx context.Context
  30. w clientv3.Watcher
  31. // mu protects leaderc updates.
  32. mu sync.RWMutex
  33. leaderc chan struct{}
  34. disconnc chan struct{}
  35. donec chan struct{}
  36. }
  37. func newLeader(ctx context.Context, w clientv3.Watcher) *leader {
  38. l := &leader{
  39. ctx: clientv3.WithRequireLeader(ctx),
  40. w: w,
  41. leaderc: make(chan struct{}),
  42. disconnc: make(chan struct{}),
  43. donec: make(chan struct{}),
  44. }
  45. // begin assuming leader is lost
  46. close(l.leaderc)
  47. go l.recvLoop()
  48. return l
  49. }
  50. func (l *leader) recvLoop() {
  51. defer close(l.donec)
  52. limiter := rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond)
  53. rev := int64(math.MaxInt64 - 2)
  54. for limiter.Wait(l.ctx) == nil {
  55. wch := l.w.Watch(l.ctx, lostLeaderKey, clientv3.WithRev(rev), clientv3.WithCreatedNotify())
  56. cresp, ok := <-wch
  57. if !ok {
  58. l.loseLeader()
  59. continue
  60. }
  61. if cresp.Err() != nil {
  62. l.loseLeader()
  63. if rpctypes.ErrorDesc(cresp.Err()) == grpc.ErrClientConnClosing.Error() {
  64. close(l.disconnc)
  65. return
  66. }
  67. continue
  68. }
  69. l.gotLeader()
  70. <-wch
  71. l.loseLeader()
  72. }
  73. }
  74. func (l *leader) loseLeader() {
  75. l.mu.RLock()
  76. defer l.mu.RUnlock()
  77. select {
  78. case <-l.leaderc:
  79. default:
  80. close(l.leaderc)
  81. }
  82. }
  83. // gotLeader will force update the leadership status to having a leader.
  84. func (l *leader) gotLeader() {
  85. l.mu.Lock()
  86. defer l.mu.Unlock()
  87. select {
  88. case <-l.leaderc:
  89. l.leaderc = make(chan struct{})
  90. default:
  91. }
  92. }
  93. func (l *leader) disconnectNotify() <-chan struct{} { return l.disconnc }
  94. func (l *leader) stopNotify() <-chan struct{} { return l.donec }
  95. // lostNotify returns a channel that is closed if there has been
  96. // a leader loss not yet followed by a leader reacquire.
  97. func (l *leader) lostNotify() <-chan struct{} {
  98. l.mu.RLock()
  99. defer l.mu.RUnlock()
  100. return l.leaderc
  101. }