|
|
@@ -29,11 +29,40 @@ import (
|
|
|
// This error is returned only when opts.BlockingWait is true.
|
|
|
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
|
|
|
|
|
|
+type notifyMsg int
|
|
|
+
|
|
|
+const (
|
|
|
+ notifyReset notifyMsg = iota
|
|
|
+ notifyNext
|
|
|
+)
|
|
|
+
|
|
|
+type balancer interface {
|
|
|
+ grpc.Balancer
|
|
|
+ ConnectNotify() <-chan struct{}
|
|
|
+
|
|
|
+ endpoint(host string) string
|
|
|
+ endpoints() []string
|
|
|
+
|
|
|
+ // up is Up but includes whether the balancer will use the connection.
|
|
|
+ up(addr grpc.Address) (func(error), bool)
|
|
|
+
|
|
|
+ // updateAddrs changes the balancer's endpoints.
|
|
|
+ updateAddrs(endpoints ...string)
|
|
|
+ // ready returns a channel that closes when the balancer first connects.
|
|
|
+ ready() <-chan struct{}
|
|
|
+ // next forces the balancer to switch endpoints.
|
|
|
+ next()
|
|
|
+}
|
|
|
+
|
|
|
// simpleBalancer does the bare minimum to expose multiple eps
|
|
|
// to the grpc reconnection code path
|
|
|
type simpleBalancer struct {
|
|
|
- // addrs are the client's endpoints for grpc
|
|
|
+ // addrs are the client's endpoint addresses for grpc
|
|
|
addrs []grpc.Address
|
|
|
+
|
|
|
+ // eps holds the raw endpoints from the client
|
|
|
+ eps []string
|
|
|
+
|
|
|
// notifyCh notifies grpc of the set of addresses for connecting
|
|
|
notifyCh chan []grpc.Address
|
|
|
|
|
|
@@ -57,7 +86,7 @@ type simpleBalancer struct {
|
|
|
donec chan struct{}
|
|
|
|
|
|
// updateAddrsC notifies updateNotifyLoop to update addrs.
|
|
|
- updateAddrsC chan struct{}
|
|
|
+ updateAddrsC chan notifyMsg
|
|
|
|
|
|
// grpc issues TLS cert checks using the string passed into dial so
|
|
|
// that string must be the host. To recover the full scheme://host URL,
|
|
|
@@ -72,20 +101,18 @@ type simpleBalancer struct {
|
|
|
}
|
|
|
|
|
|
func newSimpleBalancer(eps []string) *simpleBalancer {
|
|
|
- notifyCh := make(chan []grpc.Address, 1)
|
|
|
- addrs := make([]grpc.Address, len(eps))
|
|
|
- for i := range eps {
|
|
|
- addrs[i].Addr = getHost(eps[i])
|
|
|
- }
|
|
|
+ notifyCh := make(chan []grpc.Address)
|
|
|
+ addrs := eps2addrs(eps)
|
|
|
sb := &simpleBalancer{
|
|
|
addrs: addrs,
|
|
|
+ eps: eps,
|
|
|
notifyCh: notifyCh,
|
|
|
readyc: make(chan struct{}),
|
|
|
upc: make(chan struct{}),
|
|
|
stopc: make(chan struct{}),
|
|
|
downc: make(chan struct{}),
|
|
|
donec: make(chan struct{}),
|
|
|
- updateAddrsC: make(chan struct{}, 1),
|
|
|
+ updateAddrsC: make(chan notifyMsg),
|
|
|
host2ep: getHost2ep(eps),
|
|
|
}
|
|
|
close(sb.downc)
|
|
|
@@ -101,12 +128,20 @@ func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
|
|
return b.upc
|
|
|
}
|
|
|
|
|
|
-func (b *simpleBalancer) getEndpoint(host string) string {
|
|
|
+func (b *simpleBalancer) ready() <-chan struct{} { return b.readyc }
|
|
|
+
|
|
|
+func (b *simpleBalancer) endpoint(host string) string {
|
|
|
b.mu.Lock()
|
|
|
defer b.mu.Unlock()
|
|
|
return b.host2ep[host]
|
|
|
}
|
|
|
|
|
|
+func (b *simpleBalancer) endpoints() []string {
|
|
|
+ b.mu.RLock()
|
|
|
+ defer b.mu.RUnlock()
|
|
|
+ return b.eps
|
|
|
+}
|
|
|
+
|
|
|
func getHost2ep(eps []string) map[string]string {
|
|
|
hm := make(map[string]string, len(eps))
|
|
|
for i := range eps {
|
|
|
@@ -116,7 +151,7 @@ func getHost2ep(eps []string) map[string]string {
|
|
|
return hm
|
|
|
}
|
|
|
|
|
|
-func (b *simpleBalancer) updateAddrs(eps []string) {
|
|
|
+func (b *simpleBalancer) updateAddrs(eps ...string) {
|
|
|
np := getHost2ep(eps)
|
|
|
|
|
|
b.mu.Lock()
|
|
|
@@ -135,27 +170,37 @@ func (b *simpleBalancer) updateAddrs(eps []string) {
|
|
|
}
|
|
|
|
|
|
b.host2ep = np
|
|
|
-
|
|
|
- addrs := make([]grpc.Address, 0, len(eps))
|
|
|
- for i := range eps {
|
|
|
- addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
|
|
|
- }
|
|
|
- b.addrs = addrs
|
|
|
+ b.addrs, b.eps = eps2addrs(eps), eps
|
|
|
|
|
|
// updating notifyCh can trigger new connections,
|
|
|
// only update addrs if all connections are down
|
|
|
// or addrs does not include pinAddr.
|
|
|
- update := !hasAddr(addrs, b.pinAddr)
|
|
|
+ update := !hasAddr(b.addrs, b.pinAddr)
|
|
|
b.mu.Unlock()
|
|
|
|
|
|
if update {
|
|
|
select {
|
|
|
- case b.updateAddrsC <- struct{}{}:
|
|
|
+ case b.updateAddrsC <- notifyReset:
|
|
|
case <-b.stopc:
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+func (b *simpleBalancer) next() {
|
|
|
+ b.mu.RLock()
|
|
|
+ downc := b.downc
|
|
|
+ b.mu.RUnlock()
|
|
|
+ select {
|
|
|
+ case b.updateAddrsC <- notifyNext:
|
|
|
+ case <-b.stopc:
|
|
|
+ }
|
|
|
+ // wait until disconnect so new RPCs are not issued on old connection
|
|
|
+ select {
|
|
|
+ case <-downc:
|
|
|
+ case <-b.stopc:
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
|
|
|
for _, addr := range addrs {
|
|
|
if targetAddr == addr.Addr {
|
|
|
@@ -192,11 +237,11 @@ func (b *simpleBalancer) updateNotifyLoop() {
|
|
|
default:
|
|
|
}
|
|
|
case downc == nil:
|
|
|
- b.notifyAddrs()
|
|
|
+ b.notifyAddrs(notifyReset)
|
|
|
select {
|
|
|
case <-upc:
|
|
|
- case <-b.updateAddrsC:
|
|
|
- b.notifyAddrs()
|
|
|
+ case msg := <-b.updateAddrsC:
|
|
|
+ b.notifyAddrs(msg)
|
|
|
case <-b.stopc:
|
|
|
return
|
|
|
}
|
|
|
@@ -210,16 +255,24 @@ func (b *simpleBalancer) updateNotifyLoop() {
|
|
|
}
|
|
|
select {
|
|
|
case <-downc:
|
|
|
- case <-b.updateAddrsC:
|
|
|
+ b.notifyAddrs(notifyReset)
|
|
|
+ case msg := <-b.updateAddrsC:
|
|
|
+ b.notifyAddrs(msg)
|
|
|
case <-b.stopc:
|
|
|
return
|
|
|
}
|
|
|
- b.notifyAddrs()
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-func (b *simpleBalancer) notifyAddrs() {
|
|
|
+func (b *simpleBalancer) notifyAddrs(msg notifyMsg) {
|
|
|
+ if msg == notifyNext {
|
|
|
+ select {
|
|
|
+ case b.notifyCh <- []grpc.Address{}:
|
|
|
+ case <-b.stopc:
|
|
|
+ return
|
|
|
+ }
|
|
|
+ }
|
|
|
b.mu.RLock()
|
|
|
addrs := b.addrs
|
|
|
b.mu.RUnlock()
|
|
|
@@ -230,6 +283,11 @@ func (b *simpleBalancer) notifyAddrs() {
|
|
|
}
|
|
|
|
|
|
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
|
|
+ f, _ := b.up(addr)
|
|
|
+ return f
|
|
|
+}
|
|
|
+
|
|
|
+func (b *simpleBalancer) up(addr grpc.Address) (func(error), bool) {
|
|
|
b.mu.Lock()
|
|
|
defer b.mu.Unlock()
|
|
|
|
|
|
@@ -237,15 +295,15 @@ func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
|
|
// to "fix" it up at application layer. Otherwise, will panic
|
|
|
// if b.upc is already closed.
|
|
|
if b.closed {
|
|
|
- return func(err error) {}
|
|
|
+ return func(err error) {}, false
|
|
|
}
|
|
|
// gRPC might call Up on a stale address.
|
|
|
// Prevent updating pinAddr with a stale address.
|
|
|
if !hasAddr(b.addrs, addr.Addr) {
|
|
|
- return func(err error) {}
|
|
|
+ return func(err error) {}, false
|
|
|
}
|
|
|
if b.pinAddr != "" {
|
|
|
- return func(err error) {}
|
|
|
+ return func(err error) {}, false
|
|
|
}
|
|
|
// notify waiting Get()s and pin first connected address
|
|
|
close(b.upc)
|
|
|
@@ -259,7 +317,7 @@ func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
|
|
close(b.downc)
|
|
|
b.pinAddr = ""
|
|
|
b.mu.Unlock()
|
|
|
- }
|
|
|
+ }, true
|
|
|
}
|
|
|
|
|
|
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
|
|
@@ -354,3 +412,11 @@ func getHost(ep string) string {
|
|
|
}
|
|
|
return url.Host
|
|
|
}
|
|
|
+
|
|
|
+func eps2addrs(eps []string) []grpc.Address {
|
|
|
+ addrs := make([]grpc.Address, len(eps))
|
|
|
+ for i := range eps {
|
|
|
+ addrs[i].Addr = getHost(eps[i])
|
|
|
+ }
|
|
|
+ return addrs
|
|
|
+}
|