leader.go 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. // Copyright 2017 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package grpcproxy
  15. import (
  16. "math"
  17. "sync"
  18. "golang.org/x/net/context"
  19. "golang.org/x/time/rate"
  20. "google.golang.org/grpc"
  21. "github.com/coreos/etcd/clientv3"
  22. )
  23. const (
  24. lostLeaderKey = "__lostleader" // watched to detect leader loss
  25. retryPerSecond = 10
  26. )
  27. type leader struct {
  28. ctx context.Context
  29. w clientv3.Watcher
  30. // mu protects leaderc updates.
  31. mu sync.RWMutex
  32. leaderc chan struct{}
  33. disconnc chan struct{}
  34. donec chan struct{}
  35. }
  36. func newLeader(ctx context.Context, w clientv3.Watcher) *leader {
  37. l := &leader{
  38. ctx: clientv3.WithRequireLeader(ctx),
  39. w: w,
  40. leaderc: make(chan struct{}),
  41. disconnc: make(chan struct{}),
  42. donec: make(chan struct{}),
  43. }
  44. // begin assuming leader is lost
  45. close(l.leaderc)
  46. go l.recvLoop()
  47. return l
  48. }
  49. func (l *leader) recvLoop() {
  50. defer close(l.donec)
  51. limiter := rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond)
  52. rev := int64(math.MaxInt64 - 2)
  53. for limiter.Wait(l.ctx) == nil {
  54. wch := l.w.Watch(l.ctx, lostLeaderKey, clientv3.WithRev(rev), clientv3.WithCreatedNotify())
  55. cresp, ok := <-wch
  56. if !ok {
  57. l.loseLeader()
  58. continue
  59. }
  60. if cresp.Err() != nil {
  61. l.loseLeader()
  62. if grpc.ErrorDesc(cresp.Err()) == grpc.ErrClientConnClosing.Error() {
  63. close(l.disconnc)
  64. return
  65. }
  66. continue
  67. }
  68. l.gotLeader()
  69. <-wch
  70. l.loseLeader()
  71. }
  72. }
  73. func (l *leader) loseLeader() {
  74. l.mu.RLock()
  75. defer l.mu.RUnlock()
  76. select {
  77. case <-l.leaderc:
  78. default:
  79. close(l.leaderc)
  80. }
  81. }
  82. // gotLeader will force update the leadership status to having a leader.
  83. func (l *leader) gotLeader() {
  84. l.mu.Lock()
  85. defer l.mu.Unlock()
  86. select {
  87. case <-l.leaderc:
  88. l.leaderc = make(chan struct{})
  89. default:
  90. }
  91. }
  92. func (l *leader) disconnectNotify() <-chan struct{} { return l.disconnc }
  93. func (l *leader) stopNotify() <-chan struct{} { return l.donec }
  94. // lostNotify returns a channel that is closed if there has been
  95. // a leader loss not yet followed by a leader reacquire.
  96. func (l *leader) lostNotify() <-chan struct{} {
  97. l.mu.RLock()
  98. defer l.mu.RUnlock()
  99. return l.leaderc
  100. }