balancer_v1_wrapper.go 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. /*
  2. *
  3. * Copyright 2017 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. package grpc
  19. import (
  20. "context"
  21. "sync"
  22. "google.golang.org/grpc/balancer"
  23. "google.golang.org/grpc/connectivity"
  24. "google.golang.org/grpc/grpclog"
  25. "google.golang.org/grpc/resolver"
  26. )
  27. type balancerWrapperBuilder struct {
  28. b Balancer // The v1 balancer.
  29. }
  30. func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
  31. bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
  32. DialCreds: opts.DialCreds,
  33. Dialer: opts.Dialer,
  34. })
  35. _, pickfirst := bwb.b.(*pickFirst)
  36. bw := &balancerWrapper{
  37. balancer: bwb.b,
  38. pickfirst: pickfirst,
  39. cc: cc,
  40. targetAddr: opts.Target.Endpoint,
  41. startCh: make(chan struct{}),
  42. conns: make(map[resolver.Address]balancer.SubConn),
  43. connSt: make(map[balancer.SubConn]*scState),
  44. csEvltr: &balancer.ConnectivityStateEvaluator{},
  45. state: connectivity.Idle,
  46. }
  47. cc.UpdateBalancerState(connectivity.Idle, bw)
  48. go bw.lbWatcher()
  49. return bw
  50. }
  51. func (bwb *balancerWrapperBuilder) Name() string {
  52. return "wrapper"
  53. }
  54. type scState struct {
  55. addr Address // The v1 address type.
  56. s connectivity.State
  57. down func(error)
  58. }
  59. type balancerWrapper struct {
  60. balancer Balancer // The v1 balancer.
  61. pickfirst bool
  62. cc balancer.ClientConn
  63. targetAddr string // Target without the scheme.
  64. mu sync.Mutex
  65. conns map[resolver.Address]balancer.SubConn
  66. connSt map[balancer.SubConn]*scState
  67. // This channel is closed when handling the first resolver result.
  68. // lbWatcher blocks until this is closed, to avoid race between
  69. // - NewSubConn is created, cc wants to notify balancer of state changes;
  70. // - Build hasn't return, cc doesn't have access to balancer.
  71. startCh chan struct{}
  72. // To aggregate the connectivity state.
  73. csEvltr *balancer.ConnectivityStateEvaluator
  74. state connectivity.State
  75. }
  76. // lbWatcher watches the Notify channel of the balancer and manages
  77. // connections accordingly.
  78. func (bw *balancerWrapper) lbWatcher() {
  79. <-bw.startCh
  80. notifyCh := bw.balancer.Notify()
  81. if notifyCh == nil {
  82. // There's no resolver in the balancer. Connect directly.
  83. a := resolver.Address{
  84. Addr: bw.targetAddr,
  85. Type: resolver.Backend,
  86. }
  87. sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
  88. if err != nil {
  89. grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
  90. } else {
  91. bw.mu.Lock()
  92. bw.conns[a] = sc
  93. bw.connSt[sc] = &scState{
  94. addr: Address{Addr: bw.targetAddr},
  95. s: connectivity.Idle,
  96. }
  97. bw.mu.Unlock()
  98. sc.Connect()
  99. }
  100. return
  101. }
  102. for addrs := range notifyCh {
  103. grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
  104. if bw.pickfirst {
  105. var (
  106. oldA resolver.Address
  107. oldSC balancer.SubConn
  108. )
  109. bw.mu.Lock()
  110. for oldA, oldSC = range bw.conns {
  111. break
  112. }
  113. bw.mu.Unlock()
  114. if len(addrs) <= 0 {
  115. if oldSC != nil {
  116. // Teardown old sc.
  117. bw.mu.Lock()
  118. delete(bw.conns, oldA)
  119. delete(bw.connSt, oldSC)
  120. bw.mu.Unlock()
  121. bw.cc.RemoveSubConn(oldSC)
  122. }
  123. continue
  124. }
  125. var newAddrs []resolver.Address
  126. for _, a := range addrs {
  127. newAddr := resolver.Address{
  128. Addr: a.Addr,
  129. Type: resolver.Backend, // All addresses from balancer are all backends.
  130. ServerName: "",
  131. Metadata: a.Metadata,
  132. }
  133. newAddrs = append(newAddrs, newAddr)
  134. }
  135. if oldSC == nil {
  136. // Create new sc.
  137. sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
  138. if err != nil {
  139. grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
  140. } else {
  141. bw.mu.Lock()
  142. // For pickfirst, there should be only one SubConn, so the
  143. // address doesn't matter. All states updating (up and down)
  144. // and picking should all happen on that only SubConn.
  145. bw.conns[resolver.Address{}] = sc
  146. bw.connSt[sc] = &scState{
  147. addr: addrs[0], // Use the first address.
  148. s: connectivity.Idle,
  149. }
  150. bw.mu.Unlock()
  151. sc.Connect()
  152. }
  153. } else {
  154. bw.mu.Lock()
  155. bw.connSt[oldSC].addr = addrs[0]
  156. bw.mu.Unlock()
  157. oldSC.UpdateAddresses(newAddrs)
  158. }
  159. } else {
  160. var (
  161. add []resolver.Address // Addresses need to setup connections.
  162. del []balancer.SubConn // Connections need to tear down.
  163. )
  164. resAddrs := make(map[resolver.Address]Address)
  165. for _, a := range addrs {
  166. resAddrs[resolver.Address{
  167. Addr: a.Addr,
  168. Type: resolver.Backend, // All addresses from balancer are all backends.
  169. ServerName: "",
  170. Metadata: a.Metadata,
  171. }] = a
  172. }
  173. bw.mu.Lock()
  174. for a := range resAddrs {
  175. if _, ok := bw.conns[a]; !ok {
  176. add = append(add, a)
  177. }
  178. }
  179. for a, c := range bw.conns {
  180. if _, ok := resAddrs[a]; !ok {
  181. del = append(del, c)
  182. delete(bw.conns, a)
  183. // Keep the state of this sc in bw.connSt until its state becomes Shutdown.
  184. }
  185. }
  186. bw.mu.Unlock()
  187. for _, a := range add {
  188. sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
  189. if err != nil {
  190. grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
  191. } else {
  192. bw.mu.Lock()
  193. bw.conns[a] = sc
  194. bw.connSt[sc] = &scState{
  195. addr: resAddrs[a],
  196. s: connectivity.Idle,
  197. }
  198. bw.mu.Unlock()
  199. sc.Connect()
  200. }
  201. }
  202. for _, c := range del {
  203. bw.cc.RemoveSubConn(c)
  204. }
  205. }
  206. }
  207. }
  208. func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
  209. bw.mu.Lock()
  210. defer bw.mu.Unlock()
  211. scSt, ok := bw.connSt[sc]
  212. if !ok {
  213. return
  214. }
  215. if s == connectivity.Idle {
  216. sc.Connect()
  217. }
  218. oldS := scSt.s
  219. scSt.s = s
  220. if oldS != connectivity.Ready && s == connectivity.Ready {
  221. scSt.down = bw.balancer.Up(scSt.addr)
  222. } else if oldS == connectivity.Ready && s != connectivity.Ready {
  223. if scSt.down != nil {
  224. scSt.down(errConnClosing)
  225. }
  226. }
  227. sa := bw.csEvltr.RecordTransition(oldS, s)
  228. if bw.state != sa {
  229. bw.state = sa
  230. }
  231. bw.cc.UpdateBalancerState(bw.state, bw)
  232. if s == connectivity.Shutdown {
  233. // Remove state for this sc.
  234. delete(bw.connSt, sc)
  235. }
  236. }
  237. func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
  238. bw.mu.Lock()
  239. defer bw.mu.Unlock()
  240. select {
  241. case <-bw.startCh:
  242. default:
  243. close(bw.startCh)
  244. }
  245. // There should be a resolver inside the balancer.
  246. // All updates here, if any, are ignored.
  247. }
  248. func (bw *balancerWrapper) Close() {
  249. bw.mu.Lock()
  250. defer bw.mu.Unlock()
  251. select {
  252. case <-bw.startCh:
  253. default:
  254. close(bw.startCh)
  255. }
  256. bw.balancer.Close()
  257. }
  258. // The picker is the balancerWrapper itself.
  259. // It either blocks or returns error, consistent with v1 balancer Get().
  260. func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) {
  261. failfast := true // Default failfast is true.
  262. if ss, ok := rpcInfoFromContext(ctx); ok {
  263. failfast = ss.failfast
  264. }
  265. a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
  266. if err != nil {
  267. return nil, nil, err
  268. }
  269. if p != nil {
  270. done = func(balancer.DoneInfo) { p() }
  271. defer func() {
  272. if err != nil {
  273. p()
  274. }
  275. }()
  276. }
  277. bw.mu.Lock()
  278. defer bw.mu.Unlock()
  279. if bw.pickfirst {
  280. // Get the first sc in conns.
  281. for _, sc := range bw.conns {
  282. return sc, done, nil
  283. }
  284. return nil, nil, balancer.ErrNoSubConnAvailable
  285. }
  286. sc, ok1 := bw.conns[resolver.Address{
  287. Addr: a.Addr,
  288. Type: resolver.Backend,
  289. ServerName: "",
  290. Metadata: a.Metadata,
  291. }]
  292. s, ok2 := bw.connSt[sc]
  293. if !ok1 || !ok2 {
  294. // This can only happen due to a race where Get() returned an address
  295. // that was subsequently removed by Notify. In this case we should
  296. // retry always.
  297. return nil, nil, balancer.ErrNoSubConnAvailable
  298. }
  299. switch s.s {
  300. case connectivity.Ready, connectivity.Idle:
  301. return sc, done, nil
  302. case connectivity.Shutdown, connectivity.TransientFailure:
  303. // If the returned sc has been shut down or is in transient failure,
  304. // return error, and this RPC will fail or wait for another picker (if
  305. // non-failfast).
  306. return nil, nil, balancer.ErrTransientFailure
  307. default:
  308. // For other states (connecting or unknown), the v1 balancer would
  309. // traditionally wait until ready and then issue the RPC. Returning
  310. // ErrNoSubConnAvailable will be a slight improvement in that it will
  311. // allow the balancer to choose another address in case others are
  312. // connected.
  313. return nil, nil, balancer.ErrNoSubConnAvailable
  314. }
  315. }