Browse Source

vendor: Bump to grpc v1.12.2

Joe Betz 7 years ago
parent
commit
6572d605ad
37 changed files with 3881 additions and 1468 deletions
  1. 3 2
      Gopkg.lock
  2. 10 3
      vendor/google.golang.org/grpc/balancer.go
  3. 9 4
      vendor/google.golang.org/grpc/balancer/balancer.go
  4. 0 1
      vendor/google.golang.org/grpc/balancer/base/balancer.go
  5. 1 1
      vendor/google.golang.org/grpc/balancer_conn_wrappers.go
  6. 0 3
      vendor/google.golang.org/grpc/balancer_v1_wrapper.go
  7. 573 0
      vendor/google.golang.org/grpc/channelz/funcs.go
  8. 418 0
      vendor/google.golang.org/grpc/channelz/types.go
  9. 202 14
      vendor/google.golang.org/grpc/clientconn.go
  10. 1 1
      vendor/google.golang.org/grpc/encoding/encoding.go
  11. 37 0
      vendor/google.golang.org/grpc/envconfig.go
  12. 4 5
      vendor/google.golang.org/grpc/grpclb.go
  13. 297 113
      vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go
  14. 24 12
      vendor/google.golang.org/grpc/grpclb_remote_balancer.go
  15. 124 0
      vendor/google.golang.org/grpc/grpclb_util.go
  16. 3 0
      vendor/google.golang.org/grpc/grpclog/grpclog.go
  17. 2 0
      vendor/google.golang.org/grpc/grpclog/logger.go
  18. 64 27
      vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
  19. 1 1
      vendor/google.golang.org/grpc/health/health.go
  20. 27 1
      vendor/google.golang.org/grpc/metadata/metadata.go
  21. 3 3
      vendor/google.golang.org/grpc/naming/dns_resolver.go
  22. 11 1
      vendor/google.golang.org/grpc/naming/naming.go
  23. 174 1
      vendor/google.golang.org/grpc/picker_wrapper.go
  24. 23 21
      vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
  25. 8 2
      vendor/google.golang.org/grpc/resolver/resolver.go
  26. 2 2
      vendor/google.golang.org/grpc/resolver_conn_wrapper.go
  27. 21 11
      vendor/google.golang.org/grpc/rpc_util.go
  28. 133 18
      vendor/google.golang.org/grpc/server.go
  29. 11 4
      vendor/google.golang.org/grpc/service_config.go
  30. 30 1
      vendor/google.golang.org/grpc/stream.go
  31. 769 0
      vendor/google.golang.org/grpc/transport/controlbuf.go
  32. 87 185
      vendor/google.golang.org/grpc/transport/flowcontrol.go
  33. 6 2
      vendor/google.golang.org/grpc/transport/handler_server.go
  34. 403 431
      vendor/google.golang.org/grpc/transport/http2_client.go
  35. 235 418
      vendor/google.golang.org/grpc/transport/http2_server.go
  36. 64 16
      vendor/google.golang.org/grpc/transport/http_util.go
  37. 101 164
      vendor/google.golang.org/grpc/transport/transport.go

+ 3 - 2
Gopkg.lock

@@ -343,6 +343,7 @@
     "balancer",
     "balancer",
     "balancer/base",
     "balancer/base",
     "balancer/roundrobin",
     "balancer/roundrobin",
+    "channelz",
     "codes",
     "codes",
     "connectivity",
     "connectivity",
     "credentials",
     "credentials",
@@ -365,8 +366,8 @@
     "tap",
     "tap",
     "transport"
     "transport"
   ]
   ]
-  revision = "d11072e7ca9811b1100b80ca0269ac831f06d024"
-  version = "v1.11.3"
+  revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b"
+  version = "v1.12.2"
 
 
 [[projects]]
 [[projects]]
   name = "gopkg.in/cheggaaa/pb.v1"
   name = "gopkg.in/cheggaaa/pb.v1"

+ 10 - 3
vendor/google.golang.org/grpc/balancer.go

@@ -32,7 +32,8 @@ import (
 )
 )
 
 
 // Address represents a server the client connects to.
 // Address represents a server the client connects to.
-// This is the EXPERIMENTAL API and may be changed or extended in the future.
+//
+// Deprecated: please use package balancer.
 type Address struct {
 type Address struct {
 	// Addr is the server address on which a connection will be established.
 	// Addr is the server address on which a connection will be established.
 	Addr string
 	Addr string
@@ -42,6 +43,8 @@ type Address struct {
 }
 }
 
 
 // BalancerConfig specifies the configurations for Balancer.
 // BalancerConfig specifies the configurations for Balancer.
+//
+// Deprecated: please use package balancer.
 type BalancerConfig struct {
 type BalancerConfig struct {
 	// DialCreds is the transport credential the Balancer implementation can
 	// DialCreds is the transport credential the Balancer implementation can
 	// use to dial to a remote load balancer server. The Balancer implementations
 	// use to dial to a remote load balancer server. The Balancer implementations
@@ -54,7 +57,8 @@ type BalancerConfig struct {
 }
 }
 
 
 // BalancerGetOptions configures a Get call.
 // BalancerGetOptions configures a Get call.
-// This is the EXPERIMENTAL API and may be changed or extended in the future.
+//
+// Deprecated: please use package balancer.
 type BalancerGetOptions struct {
 type BalancerGetOptions struct {
 	// BlockingWait specifies whether Get should block when there is no
 	// BlockingWait specifies whether Get should block when there is no
 	// connected address.
 	// connected address.
@@ -62,7 +66,8 @@ type BalancerGetOptions struct {
 }
 }
 
 
 // Balancer chooses network addresses for RPCs.
 // Balancer chooses network addresses for RPCs.
-// This is the EXPERIMENTAL API and may be changed or extended in the future.
+//
+// Deprecated: please use package balancer.
 type Balancer interface {
 type Balancer interface {
 	// Start does the initialization work to bootstrap a Balancer. For example,
 	// Start does the initialization work to bootstrap a Balancer. For example,
 	// this function may start the name resolution and watch the updates. It will
 	// this function may start the name resolution and watch the updates. It will
@@ -135,6 +140,8 @@ func downErrorf(timeout, temporary bool, format string, a ...interface{}) downEr
 
 
 // RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
 // RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
 // the name resolution updates and updates the addresses available correspondingly.
 // the name resolution updates and updates the addresses available correspondingly.
+//
+// Deprecated: please use package balancer/roundrobin.
 func RoundRobin(r naming.Resolver) Balancer {
 func RoundRobin(r naming.Resolver) Balancer {
 	return &roundRobin{r: r}
 	return &roundRobin{r: r}
 }
 }

+ 9 - 4
vendor/google.golang.org/grpc/balancer/balancer.go

@@ -36,9 +36,12 @@ var (
 	m = make(map[string]Builder)
 	m = make(map[string]Builder)
 )
 )
 
 
-// Register registers the balancer builder to the balancer map.
-// b.Name (lowercased) will be used as the name registered with
-// this builder.
+// Register registers the balancer builder to the balancer map. b.Name
+// (lowercased) will be used as the name registered with this builder.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple Balancers are
+// registered with the same name, the one registered last will take effect.
 func Register(b Builder) {
 func Register(b Builder) {
 	m[strings.ToLower(b.Name())] = b
 	m[strings.ToLower(b.Name())] = b
 }
 }
@@ -126,6 +129,8 @@ type BuildOptions struct {
 	// to a remote load balancer server. The Balancer implementations
 	// to a remote load balancer server. The Balancer implementations
 	// can ignore this if it doesn't need to talk to remote balancer.
 	// can ignore this if it doesn't need to talk to remote balancer.
 	Dialer func(context.Context, string) (net.Conn, error)
 	Dialer func(context.Context, string) (net.Conn, error)
+	// ChannelzParentID is the entity parent's channelz unique identification number.
+	ChannelzParentID int64
 }
 }
 
 
 // Builder creates a balancer.
 // Builder creates a balancer.
@@ -160,7 +165,7 @@ var (
 )
 )
 
 
 // Picker is used by gRPC to pick a SubConn to send an RPC.
 // Picker is used by gRPC to pick a SubConn to send an RPC.
-// Balancer is expected to generate a new picker from its snapshot everytime its
+// Balancer is expected to generate a new picker from its snapshot every time its
 // internal state has changed.
 // internal state has changed.
 //
 //
 // The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
 // The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().

+ 0 - 1
vendor/google.golang.org/grpc/balancer/base/balancer.go

@@ -146,7 +146,6 @@ func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectiv
 	}
 	}
 
 
 	b.cc.UpdateBalancerState(b.state, b.picker)
 	b.cc.UpdateBalancerState(b.state, b.picker)
-	return
 }
 }
 
 
 // Close is a nop because base balancer doesn't have internal state to clean up,
 // Close is a nop because base balancer doesn't have internal state to clean up,

+ 1 - 1
vendor/google.golang.org/grpc/balancer_conn_wrappers.go

@@ -115,7 +115,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
 	return ccb
 	return ccb
 }
 }
 
 
-// watcher balancer functions sequencially, so the balancer can be implemeneted
+// watcher balancer functions sequentially, so the balancer can be implemented
 // lock-free.
 // lock-free.
 func (ccb *ccBalancerWrapper) watcher() {
 func (ccb *ccBalancerWrapper) watcher() {
 	for {
 	for {

+ 0 - 3
vendor/google.golang.org/grpc/balancer_v1_wrapper.go

@@ -257,7 +257,6 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne
 		// Remove state for this sc.
 		// Remove state for this sc.
 		delete(bw.connSt, sc)
 		delete(bw.connSt, sc)
 	}
 	}
-	return
 }
 }
 
 
 func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
 func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
@@ -270,7 +269,6 @@ func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
 	}
 	}
 	// There should be a resolver inside the balancer.
 	// There should be a resolver inside the balancer.
 	// All updates here, if any, are ignored.
 	// All updates here, if any, are ignored.
-	return
 }
 }
 
 
 func (bw *balancerWrapper) Close() {
 func (bw *balancerWrapper) Close() {
@@ -282,7 +280,6 @@ func (bw *balancerWrapper) Close() {
 		close(bw.startCh)
 		close(bw.startCh)
 	}
 	}
 	bw.balancer.Close()
 	bw.balancer.Close()
-	return
 }
 }
 
 
 // The picker is the balancerWrapper itself.
 // The picker is the balancerWrapper itself.

+ 573 - 0
vendor/google.golang.org/grpc/channelz/funcs.go

@@ -0,0 +1,573 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package channelz defines APIs for enabling channelz service, entry
+// registration/deletion, and accessing channelz data. It also defines channelz
+// metric struct formats.
+//
+// All APIs in this package are experimental.
+package channelz
+
+import (
+	"sort"
+	"sync"
+	"sync/atomic"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var (
+	db    dbWrapper
+	idGen idGenerator
+	// EntryPerPage defines the number of channelz entries to be shown on a web page.
+	EntryPerPage = 50
+	curState     int32
+)
+
+// TurnOn turns on channelz data collection.
+func TurnOn() {
+	if !IsOn() {
+		NewChannelzStorage()
+		atomic.StoreInt32(&curState, 1)
+	}
+}
+
+// IsOn returns whether channelz data collection is on.
+func IsOn() bool {
+	return atomic.CompareAndSwapInt32(&curState, 1, 1)
+}
+
+// dbWarpper wraps around a reference to internal channelz data storage, and
+// provide synchronized functionality to set and get the reference.
+type dbWrapper struct {
+	mu sync.RWMutex
+	DB *channelMap
+}
+
+func (d *dbWrapper) set(db *channelMap) {
+	d.mu.Lock()
+	d.DB = db
+	d.mu.Unlock()
+}
+
+func (d *dbWrapper) get() *channelMap {
+	d.mu.RLock()
+	defer d.mu.RUnlock()
+	return d.DB
+}
+
+// NewChannelzStorage initializes channelz data storage and id generator.
+//
+// Note: This function is exported for testing purpose only. User should not call
+// it in most cases.
+func NewChannelzStorage() {
+	db.set(&channelMap{
+		topLevelChannels: make(map[int64]struct{}),
+		channels:         make(map[int64]*channel),
+		listenSockets:    make(map[int64]*listenSocket),
+		normalSockets:    make(map[int64]*normalSocket),
+		servers:          make(map[int64]*server),
+		subChannels:      make(map[int64]*subChannel),
+	})
+	idGen.reset()
+}
+
+// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
+// boolean indicating whether there's more top channels to be queried for.
+//
+// The arg id specifies that only top channel with id at or above it will be included
+// in the result. The returned slice is up to a length of EntryPerPage, and is
+// sorted in ascending id order.
+func GetTopChannels(id int64) ([]*ChannelMetric, bool) {
+	return db.get().GetTopChannels(id)
+}
+
+// GetServers returns a slice of server's ServerMetric, along with a
+// boolean indicating whether there's more servers to be queried for.
+//
+// The arg id specifies that only server with id at or above it will be included
+// in the result. The returned slice is up to a length of EntryPerPage, and is
+// sorted in ascending id order.
+func GetServers(id int64) ([]*ServerMetric, bool) {
+	return db.get().GetServers(id)
+}
+
+// GetServerSockets returns a slice of server's (identified by id) normal socket's
+// SocketMetric, along with a boolean indicating whether there's more sockets to
+// be queried for.
+//
+// The arg startID specifies that only sockets with id at or above it will be
+// included in the result. The returned slice is up to a length of EntryPerPage,
+// and is sorted in ascending id order.
+func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
+	return db.get().GetServerSockets(id, startID)
+}
+
+// GetChannel returns the ChannelMetric for the channel (identified by id).
+func GetChannel(id int64) *ChannelMetric {
+	return db.get().GetChannel(id)
+}
+
+// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
+func GetSubChannel(id int64) *SubChannelMetric {
+	return db.get().GetSubChannel(id)
+}
+
+// GetSocket returns the SocketInternalMetric for the socket (identified by id).
+func GetSocket(id int64) *SocketMetric {
+	return db.get().GetSocket(id)
+}
+
+// RegisterChannel registers the given channel c in channelz database with ref
+// as its reference name, and add it to the child list of its parent (identified
+// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
+// assigned to this channel.
+func RegisterChannel(c Channel, pid int64, ref string) int64 {
+	id := idGen.genID()
+	cn := &channel{
+		refName:     ref,
+		c:           c,
+		subChans:    make(map[int64]string),
+		nestedChans: make(map[int64]string),
+		id:          id,
+		pid:         pid,
+	}
+	if pid == 0 {
+		db.get().addChannel(id, cn, true, pid, ref)
+	} else {
+		db.get().addChannel(id, cn, false, pid, ref)
+	}
+	return id
+}
+
+// RegisterSubChannel registers the given channel c in channelz database with ref
+// as its reference name, and add it to the child list of its parent (identified
+// by pid). It returns the unique channelz tracking id assigned to this subchannel.
+func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
+	if pid == 0 {
+		grpclog.Error("a SubChannel's parent id cannot be 0")
+		return 0
+	}
+	id := idGen.genID()
+	sc := &subChannel{
+		refName: ref,
+		c:       c,
+		sockets: make(map[int64]string),
+		id:      id,
+		pid:     pid,
+	}
+	db.get().addSubChannel(id, sc, pid, ref)
+	return id
+}
+
+// RegisterServer registers the given server s in channelz database. It returns
+// the unique channelz tracking id assigned to this server.
+func RegisterServer(s Server, ref string) int64 {
+	id := idGen.genID()
+	svr := &server{
+		refName:       ref,
+		s:             s,
+		sockets:       make(map[int64]string),
+		listenSockets: make(map[int64]string),
+		id:            id,
+	}
+	db.get().addServer(id, svr)
+	return id
+}
+
+// RegisterListenSocket registers the given listen socket s in channelz database
+// with ref as its reference name, and add it to the child list of its parent
+// (identified by pid). It returns the unique channelz tracking id assigned to
+// this listen socket.
+func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
+	if pid == 0 {
+		grpclog.Error("a ListenSocket's parent id cannot be 0")
+		return 0
+	}
+	id := idGen.genID()
+	ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
+	db.get().addListenSocket(id, ls, pid, ref)
+	return id
+}
+
+// RegisterNormalSocket registers the given normal socket s in channelz database
+// with ref as its reference name, and add it to the child list of its parent
+// (identified by pid). It returns the unique channelz tracking id assigned to
+// this normal socket.
+func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
+	if pid == 0 {
+		grpclog.Error("a NormalSocket's parent id cannot be 0")
+		return 0
+	}
+	id := idGen.genID()
+	ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
+	db.get().addNormalSocket(id, ns, pid, ref)
+	return id
+}
+
+// RemoveEntry removes an entry with unique channelz trakcing id to be id from
+// channelz database.
+func RemoveEntry(id int64) {
+	db.get().removeEntry(id)
+}
+
+// channelMap is the storage data structure for channelz.
+// Methods of channelMap can be divided in two two categories with respect to locking.
+// 1. Methods acquire the global lock.
+// 2. Methods that can only be called when global lock is held.
+// A second type of method need always to be called inside a first type of method.
+type channelMap struct {
+	mu               sync.RWMutex
+	topLevelChannels map[int64]struct{}
+	servers          map[int64]*server
+	channels         map[int64]*channel
+	subChannels      map[int64]*subChannel
+	listenSockets    map[int64]*listenSocket
+	normalSockets    map[int64]*normalSocket
+}
+
+func (c *channelMap) addServer(id int64, s *server) {
+	c.mu.Lock()
+	s.cm = c
+	c.servers[id] = s
+	c.mu.Unlock()
+}
+
+func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
+	c.mu.Lock()
+	cn.cm = c
+	c.channels[id] = cn
+	if isTopChannel {
+		c.topLevelChannels[id] = struct{}{}
+	} else {
+		c.findEntry(pid).addChild(id, cn)
+	}
+	c.mu.Unlock()
+}
+
+func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
+	c.mu.Lock()
+	sc.cm = c
+	c.subChannels[id] = sc
+	c.findEntry(pid).addChild(id, sc)
+	c.mu.Unlock()
+}
+
+func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
+	c.mu.Lock()
+	ls.cm = c
+	c.listenSockets[id] = ls
+	c.findEntry(pid).addChild(id, ls)
+	c.mu.Unlock()
+}
+
+func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
+	c.mu.Lock()
+	ns.cm = c
+	c.normalSockets[id] = ns
+	c.findEntry(pid).addChild(id, ns)
+	c.mu.Unlock()
+}
+
+// removeEntry triggers the removal of an entry, which may not indeed delete the
+// entry, if it has to wait on the deletion of its children, or may lead to a chain
+// of entry deletion. For example, deleting the last socket of a gracefully shutting
+// down server will lead to the server being also deleted.
+func (c *channelMap) removeEntry(id int64) {
+	c.mu.Lock()
+	c.findEntry(id).triggerDelete()
+	c.mu.Unlock()
+}
+
+// c.mu must be held by the caller.
+func (c *channelMap) findEntry(id int64) entry {
+	var v entry
+	var ok bool
+	if v, ok = c.channels[id]; ok {
+		return v
+	}
+	if v, ok = c.subChannels[id]; ok {
+		return v
+	}
+	if v, ok = c.servers[id]; ok {
+		return v
+	}
+	if v, ok = c.listenSockets[id]; ok {
+		return v
+	}
+	if v, ok = c.normalSockets[id]; ok {
+		return v
+	}
+	return &dummyEntry{idNotFound: id}
+}
+
+// c.mu must be held by the caller
+// deleteEntry simply deletes an entry from the channelMap. Before calling this
+// method, caller must check this entry is ready to be deleted, i.e removeEntry()
+// has been called on it, and no children still exist.
+// Conditionals are ordered by the expected frequency of deletion of each entity
+// type, in order to optimize performance.
+func (c *channelMap) deleteEntry(id int64) {
+	var ok bool
+	if _, ok = c.normalSockets[id]; ok {
+		delete(c.normalSockets, id)
+		return
+	}
+	if _, ok = c.subChannels[id]; ok {
+		delete(c.subChannels, id)
+		return
+	}
+	if _, ok = c.channels[id]; ok {
+		delete(c.channels, id)
+		delete(c.topLevelChannels, id)
+		return
+	}
+	if _, ok = c.listenSockets[id]; ok {
+		delete(c.listenSockets, id)
+		return
+	}
+	if _, ok = c.servers[id]; ok {
+		delete(c.servers, id)
+		return
+	}
+}
+
+type int64Slice []int64
+
+func (s int64Slice) Len() int           { return len(s) }
+func (s int64Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
+
+func copyMap(m map[int64]string) map[int64]string {
+	n := make(map[int64]string)
+	for k, v := range m {
+		n[k] = v
+	}
+	return n
+}
+
+func min(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
+	c.mu.RLock()
+	l := len(c.topLevelChannels)
+	ids := make([]int64, 0, l)
+	cns := make([]*channel, 0, min(l, EntryPerPage))
+
+	for k := range c.topLevelChannels {
+		ids = append(ids, k)
+	}
+	sort.Sort(int64Slice(ids))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+	count := 0
+	var end bool
+	var t []*ChannelMetric
+	for i, v := range ids[idx:] {
+		if count == EntryPerPage {
+			break
+		}
+		if cn, ok := c.channels[v]; ok {
+			cns = append(cns, cn)
+			t = append(t, &ChannelMetric{
+				NestedChans: copyMap(cn.nestedChans),
+				SubChans:    copyMap(cn.subChans),
+			})
+			count++
+		}
+		if i == len(ids[idx:])-1 {
+			end = true
+			break
+		}
+	}
+	c.mu.RUnlock()
+	if count == 0 {
+		end = true
+	}
+
+	for i, cn := range cns {
+		t[i].ChannelData = cn.c.ChannelzMetric()
+		t[i].ID = cn.id
+		t[i].RefName = cn.refName
+	}
+	return t, end
+}
+
+func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
+	c.mu.RLock()
+	l := len(c.servers)
+	ids := make([]int64, 0, l)
+	ss := make([]*server, 0, min(l, EntryPerPage))
+	for k := range c.servers {
+		ids = append(ids, k)
+	}
+	sort.Sort(int64Slice(ids))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+	count := 0
+	var end bool
+	var s []*ServerMetric
+	for i, v := range ids[idx:] {
+		if count == EntryPerPage {
+			break
+		}
+		if svr, ok := c.servers[v]; ok {
+			ss = append(ss, svr)
+			s = append(s, &ServerMetric{
+				ListenSockets: copyMap(svr.listenSockets),
+			})
+			count++
+		}
+		if i == len(ids[idx:])-1 {
+			end = true
+			break
+		}
+	}
+	c.mu.RUnlock()
+	if count == 0 {
+		end = true
+	}
+
+	for i, svr := range ss {
+		s[i].ServerData = svr.s.ChannelzMetric()
+		s[i].ID = svr.id
+		s[i].RefName = svr.refName
+	}
+	return s, end
+}
+
+func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
+	var svr *server
+	var ok bool
+	c.mu.RLock()
+	if svr, ok = c.servers[id]; !ok {
+		// server with id doesn't exist.
+		c.mu.RUnlock()
+		return nil, true
+	}
+	svrskts := svr.sockets
+	l := len(svrskts)
+	ids := make([]int64, 0, l)
+	sks := make([]*normalSocket, 0, min(l, EntryPerPage))
+	for k := range svrskts {
+		ids = append(ids, k)
+	}
+	sort.Sort((int64Slice(ids)))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+	count := 0
+	var end bool
+	for i, v := range ids[idx:] {
+		if count == EntryPerPage {
+			break
+		}
+		if ns, ok := c.normalSockets[v]; ok {
+			sks = append(sks, ns)
+			count++
+		}
+		if i == len(ids[idx:])-1 {
+			end = true
+			break
+		}
+	}
+	c.mu.RUnlock()
+	if count == 0 {
+		end = true
+	}
+	var s []*SocketMetric
+	for _, ns := range sks {
+		sm := &SocketMetric{}
+		sm.SocketData = ns.s.ChannelzMetric()
+		sm.ID = ns.id
+		sm.RefName = ns.refName
+		s = append(s, sm)
+	}
+	return s, end
+}
+
+func (c *channelMap) GetChannel(id int64) *ChannelMetric {
+	cm := &ChannelMetric{}
+	var cn *channel
+	var ok bool
+	c.mu.RLock()
+	if cn, ok = c.channels[id]; !ok {
+		// channel with id doesn't exist.
+		c.mu.RUnlock()
+		return nil
+	}
+	cm.NestedChans = copyMap(cn.nestedChans)
+	cm.SubChans = copyMap(cn.subChans)
+	c.mu.RUnlock()
+	cm.ChannelData = cn.c.ChannelzMetric()
+	cm.ID = cn.id
+	cm.RefName = cn.refName
+	return cm
+}
+
+func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
+	cm := &SubChannelMetric{}
+	var sc *subChannel
+	var ok bool
+	c.mu.RLock()
+	if sc, ok = c.subChannels[id]; !ok {
+		// subchannel with id doesn't exist.
+		c.mu.RUnlock()
+		return nil
+	}
+	cm.Sockets = copyMap(sc.sockets)
+	c.mu.RUnlock()
+	cm.ChannelData = sc.c.ChannelzMetric()
+	cm.ID = sc.id
+	cm.RefName = sc.refName
+	return cm
+}
+
+func (c *channelMap) GetSocket(id int64) *SocketMetric {
+	sm := &SocketMetric{}
+	c.mu.RLock()
+	if ls, ok := c.listenSockets[id]; ok {
+		c.mu.RUnlock()
+		sm.SocketData = ls.s.ChannelzMetric()
+		sm.ID = ls.id
+		sm.RefName = ls.refName
+		return sm
+	}
+	if ns, ok := c.normalSockets[id]; ok {
+		c.mu.RUnlock()
+		sm.SocketData = ns.s.ChannelzMetric()
+		sm.ID = ns.id
+		sm.RefName = ns.refName
+		return sm
+	}
+	c.mu.RUnlock()
+	return nil
+}
+
+type idGenerator struct {
+	id int64
+}
+
+func (i *idGenerator) reset() {
+	atomic.StoreInt64(&i.id, 0)
+}
+
+func (i *idGenerator) genID() int64 {
+	return atomic.AddInt64(&i.id, 1)
+}

+ 418 - 0
vendor/google.golang.org/grpc/channelz/types.go

@@ -0,0 +1,418 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"net"
+	"time"
+
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/grpclog"
+)
+
+// entry represents a node in the channelz database.
+type entry interface {
+	// addChild adds a child e, whose channelz id is id to child list
+	addChild(id int64, e entry)
+	// deleteChild deletes a child with channelz id to be id from child list
+	deleteChild(id int64)
+	// triggerDelete tries to delete self from channelz database. However, if child
+	// list is not empty, then deletion from the database is on hold until the last
+	// child is deleted from database.
+	triggerDelete()
+	// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
+	// list is now empty. If both conditions are met, then delete self from database.
+	deleteSelfIfReady()
+}
+
+// dummyEntry is a fake entry to handle entry not found case.
+type dummyEntry struct {
+	idNotFound int64
+}
+
+func (d *dummyEntry) addChild(id int64, e entry) {
+	// Note: It is possible for a normal program to reach here under race condition.
+	// For example, there could be a race between ClientConn.Close() info being propagated
+	// to addrConn and http2Client. ClientConn.Close() cancel the context and result
+	// in http2Client to error. The error info is then caught by transport monitor
+	// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
+	// the addrConn will create a new transport. And when registering the new transport in
+	// channelz, its parent addrConn could have already been torn down and deleted
+	// from channelz tracking, and thus reach the code here.
+	grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
+}
+
+func (d *dummyEntry) deleteChild(id int64) {
+	// It is possible for a normal program to reach here under race condition.
+	// Refer to the example described in addChild().
+	grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
+}
+
+func (d *dummyEntry) triggerDelete() {
+	grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
+}
+
+func (*dummyEntry) deleteSelfIfReady() {
+	// code should not reach here. deleteSelfIfReady is always called on an existing entry.
+}
+
+// ChannelMetric defines the info channelz provides for a specific Channel, which
+// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
+// child list, etc.
+type ChannelMetric struct {
+	// ID is the channelz id of this channel.
+	ID int64
+	// RefName is the human readable reference string of this channel.
+	RefName string
+	// ChannelData contains channel internal metric reported by the channel through
+	// ChannelzMetric().
+	ChannelData *ChannelInternalMetric
+	// NestedChans tracks the nested channel type children of this channel in the format of
+	// a map from nested channel channelz id to corresponding reference string.
+	NestedChans map[int64]string
+	// SubChans tracks the subchannel type children of this channel in the format of a
+	// map from subchannel channelz id to corresponding reference string.
+	SubChans map[int64]string
+	// Sockets tracks the socket type children of this channel in the format of a map
+	// from socket channelz id to corresponding reference string.
+	// Note current grpc implementation doesn't allow channel having sockets directly,
+	// therefore, this is field is unused.
+	Sockets map[int64]string
+}
+
+// SubChannelMetric defines the info channelz provides for a specific SubChannel,
+// which includes ChannelInternalMetric and channelz-specific data, such as
+// channelz id, child list, etc.
+type SubChannelMetric struct {
+	// ID is the channelz id of this subchannel.
+	ID int64
+	// RefName is the human readable reference string of this subchannel.
+	RefName string
+	// ChannelData contains subchannel internal metric reported by the subchannel
+	// through ChannelzMetric().
+	ChannelData *ChannelInternalMetric
+	// NestedChans tracks the nested channel type children of this subchannel in the format of
+	// a map from nested channel channelz id to corresponding reference string.
+	// Note current grpc implementation doesn't allow subchannel to have nested channels
+	// as children, therefore, this field is unused.
+	NestedChans map[int64]string
+	// SubChans tracks the subchannel type children of this subchannel in the format of a
+	// map from subchannel channelz id to corresponding reference string.
+	// Note current grpc implementation doesn't allow subchannel to have subchannels
+	// as children, therefore, this field is unused.
+	SubChans map[int64]string
+	// Sockets tracks the socket type children of this subchannel in the format of a map
+	// from socket channelz id to corresponding reference string.
+	Sockets map[int64]string
+}
+
+// ChannelInternalMetric defines the struct that the implementor of Channel interface
+// should return from ChannelzMetric().
+type ChannelInternalMetric struct {
+	// current connectivity state of the channel.
+	State connectivity.State
+	// The target this channel originally tried to connect to.  May be absent
+	Target string
+	// The number of calls started on the channel.
+	CallsStarted int64
+	// The number of calls that have completed with an OK status.
+	CallsSucceeded int64
+	// The number of calls that have a completed with a non-OK status.
+	CallsFailed int64
+	// The last time a call was started on the channel.
+	LastCallStartedTimestamp time.Time
+	//TODO: trace
+}
+
+// Channel is the interface that should be satisfied in order to be tracked by
+// channelz as Channel or SubChannel.
+type Channel interface {
+	ChannelzMetric() *ChannelInternalMetric
+}
+
+type channel struct {
+	refName     string
+	c           Channel
+	closeCalled bool
+	nestedChans map[int64]string
+	subChans    map[int64]string
+	id          int64
+	pid         int64
+	cm          *channelMap
+}
+
+func (c *channel) addChild(id int64, e entry) {
+	switch v := e.(type) {
+	case *subChannel:
+		c.subChans[id] = v.refName
+	case *channel:
+		c.nestedChans[id] = v.refName
+	default:
+		grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
+	}
+}
+
+func (c *channel) deleteChild(id int64) {
+	delete(c.subChans, id)
+	delete(c.nestedChans, id)
+	c.deleteSelfIfReady()
+}
+
+func (c *channel) triggerDelete() {
+	c.closeCalled = true
+	c.deleteSelfIfReady()
+}
+
+func (c *channel) deleteSelfIfReady() {
+	if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
+		return
+	}
+	c.cm.deleteEntry(c.id)
+	// not top channel
+	if c.pid != 0 {
+		c.cm.findEntry(c.pid).deleteChild(c.id)
+	}
+}
+
+type subChannel struct {
+	refName     string
+	c           Channel
+	closeCalled bool
+	sockets     map[int64]string
+	id          int64
+	pid         int64
+	cm          *channelMap
+}
+
+func (sc *subChannel) addChild(id int64, e entry) {
+	if v, ok := e.(*normalSocket); ok {
+		sc.sockets[id] = v.refName
+	} else {
+		grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
+	}
+}
+
+func (sc *subChannel) deleteChild(id int64) {
+	delete(sc.sockets, id)
+	sc.deleteSelfIfReady()
+}
+
+func (sc *subChannel) triggerDelete() {
+	sc.closeCalled = true
+	sc.deleteSelfIfReady()
+}
+
+func (sc *subChannel) deleteSelfIfReady() {
+	if !sc.closeCalled || len(sc.sockets) != 0 {
+		return
+	}
+	sc.cm.deleteEntry(sc.id)
+	sc.cm.findEntry(sc.pid).deleteChild(sc.id)
+}
+
+// SocketMetric defines the info channelz provides for a specific Socket, which
+// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
+type SocketMetric struct {
+	// ID is the channelz id of this socket.
+	ID int64
+	// RefName is the human readable reference string of this socket.
+	RefName string
+	// SocketData contains socket internal metric reported by the socket through
+	// ChannelzMetric().
+	SocketData *SocketInternalMetric
+}
+
+// SocketInternalMetric defines the struct that the implementor of Socket interface
+// should return from ChannelzMetric().
+type SocketInternalMetric struct {
+	// The number of streams that have been started.
+	StreamsStarted int64
+	// The number of streams that have ended successfully:
+	// On client side, receiving frame with eos bit set.
+	// On server side, sending frame with eos bit set.
+	StreamsSucceeded int64
+	// The number of streams that have ended unsuccessfully:
+	// On client side, termination without receiving frame with eos bit set.
+	// On server side, termination without sending frame with eos bit set.
+	StreamsFailed int64
+	// The number of messages successfully sent on this socket.
+	MessagesSent     int64
+	MessagesReceived int64
+	// The number of keep alives sent.  This is typically implemented with HTTP/2
+	// ping messages.
+	KeepAlivesSent int64
+	// The last time a stream was created by this endpoint.  Usually unset for
+	// servers.
+	LastLocalStreamCreatedTimestamp time.Time
+	// The last time a stream was created by the remote endpoint.  Usually unset
+	// for clients.
+	LastRemoteStreamCreatedTimestamp time.Time
+	// The last time a message was sent by this endpoint.
+	LastMessageSentTimestamp time.Time
+	// The last time a message was received by this endpoint.
+	LastMessageReceivedTimestamp time.Time
+	// The amount of window, granted to the local endpoint by the remote endpoint.
+	// This may be slightly out of date due to network latency.  This does NOT
+	// include stream level or TCP level flow control info.
+	LocalFlowControlWindow int64
+	// The amount of window, granted to the remote endpoint by the local endpoint.
+	// This may be slightly out of date due to network latency.  This does NOT
+	// include stream level or TCP level flow control info.
+	RemoteFlowControlWindow int64
+	// The locally bound address.
+	LocalAddr net.Addr
+	// The remote bound address.  May be absent.
+	RemoteAddr net.Addr
+	// Optional, represents the name of the remote endpoint, if different than
+	// the original target name.
+	RemoteName string
+	//TODO: socket options
+	//TODO: Security
+}
+
+// Socket is the interface that should be satisfied in order to be tracked by
+// channelz as Socket.
+type Socket interface {
+	ChannelzMetric() *SocketInternalMetric
+}
+
+type listenSocket struct {
+	refName string
+	s       Socket
+	id      int64
+	pid     int64
+	cm      *channelMap
+}
+
+func (ls *listenSocket) addChild(id int64, e entry) {
+	grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
+}
+
+func (ls *listenSocket) deleteChild(id int64) {
+	grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
+}
+
+func (ls *listenSocket) triggerDelete() {
+	ls.cm.deleteEntry(ls.id)
+	ls.cm.findEntry(ls.pid).deleteChild(ls.id)
+}
+
+func (ls *listenSocket) deleteSelfIfReady() {
+	grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
+}
+
+type normalSocket struct {
+	refName string
+	s       Socket
+	id      int64
+	pid     int64
+	cm      *channelMap
+}
+
+func (ns *normalSocket) addChild(id int64, e entry) {
+	grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
+}
+
+func (ns *normalSocket) deleteChild(id int64) {
+	grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
+}
+
+func (ns *normalSocket) triggerDelete() {
+	ns.cm.deleteEntry(ns.id)
+	ns.cm.findEntry(ns.pid).deleteChild(ns.id)
+}
+
+func (ns *normalSocket) deleteSelfIfReady() {
+	grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
+}
+
+// ServerMetric defines the info channelz provides for a specific Server, which
+// includes ServerInternalMetric and channelz-specific data, such as channelz id,
+// child list, etc.
+type ServerMetric struct {
+	// ID is the channelz id of this server.
+	ID int64
+	// RefName is the human readable reference string of this server.
+	RefName string
+	// ServerData contains server internal metric reported by the server through
+	// ChannelzMetric().
+	ServerData *ServerInternalMetric
+	// ListenSockets tracks the listener socket type children of this server in the
+	// format of a map from socket channelz id to corresponding reference string.
+	ListenSockets map[int64]string
+}
+
+// ServerInternalMetric defines the struct that the implementor of Server interface
+// should return from ChannelzMetric().
+type ServerInternalMetric struct {
+	// The number of incoming calls started on the server.
+	CallsStarted int64
+	// The number of incoming calls that have completed with an OK status.
+	CallsSucceeded int64
+	// The number of incoming calls that have a completed with a non-OK status.
+	CallsFailed int64
+	// The last time a call was started on the server.
+	LastCallStartedTimestamp time.Time
+	//TODO: trace
+}
+
+// Server is the interface to be satisfied in order to be tracked by channelz as
+// Server.
+type Server interface {
+	ChannelzMetric() *ServerInternalMetric
+}
+
+type server struct {
+	refName       string
+	s             Server
+	closeCalled   bool
+	sockets       map[int64]string
+	listenSockets map[int64]string
+	id            int64
+	cm            *channelMap
+}
+
+func (s *server) addChild(id int64, e entry) {
+	switch v := e.(type) {
+	case *normalSocket:
+		s.sockets[id] = v.refName
+	case *listenSocket:
+		s.listenSockets[id] = v.refName
+	default:
+		grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
+	}
+}
+
+func (s *server) deleteChild(id int64) {
+	delete(s.sockets, id)
+	delete(s.listenSockets, id)
+	s.deleteSelfIfReady()
+}
+
+func (s *server) triggerDelete() {
+	s.closeCalled = true
+	s.deleteSelfIfReady()
+}
+
+func (s *server) deleteSelfIfReady() {
+	if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
+		return
+	}
+	s.cm.deleteEntry(s.id)
+}

+ 202 - 14
vendor/google.golang.org/grpc/clientconn.go

@@ -32,6 +32,7 @@ import (
 	"golang.org/x/net/trace"
 	"golang.org/x/net/trace"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/balancer"
 	_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
 	_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
+	"google.golang.org/grpc/channelz"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/credentials"
@@ -107,8 +108,10 @@ type dialOptions struct {
 	// balancer, and also by WithBalancerName dial option.
 	// balancer, and also by WithBalancerName dial option.
 	balancerBuilder balancer.Builder
 	balancerBuilder balancer.Builder
 	// This is to support grpclb.
 	// This is to support grpclb.
-	resolverBuilder  resolver.Builder
-	waitForHandshake bool
+	resolverBuilder      resolver.Builder
+	waitForHandshake     bool
+	channelzParentID     int64
+	disableServiceConfig bool
 }
 }
 
 
 const (
 const (
@@ -116,6 +119,12 @@ const (
 	defaultClientMaxSendMessageSize    = math.MaxInt32
 	defaultClientMaxSendMessageSize    = math.MaxInt32
 )
 )
 
 
+// RegisterChannelz turns on channelz service.
+// This is an EXPERIMENTAL API.
+func RegisterChannelz() {
+	channelz.TurnOn()
+}
+
 // DialOption configures how we set up the connection.
 // DialOption configures how we set up the connection.
 type DialOption func(*dialOptions)
 type DialOption func(*dialOptions)
 
 
@@ -160,7 +169,9 @@ func WithInitialConnWindowSize(s int32) DialOption {
 	}
 	}
 }
 }
 
 
-// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
+// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive.
+//
+// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
 func WithMaxMsgSize(s int) DialOption {
 func WithMaxMsgSize(s int) DialOption {
 	return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
 	return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
 }
 }
@@ -243,7 +254,8 @@ func withResolverBuilder(b resolver.Builder) DialOption {
 }
 }
 
 
 // WithServiceConfig returns a DialOption which has a channel to read the service configuration.
 // WithServiceConfig returns a DialOption which has a channel to read the service configuration.
-// DEPRECATED: service config should be received through name resolver, as specified here.
+//
+// Deprecated: service config should be received through name resolver, as specified here.
 // https://github.com/grpc/grpc/blob/master/doc/service_config.md
 // https://github.com/grpc/grpc/blob/master/doc/service_config.md
 func WithServiceConfig(c <-chan ServiceConfig) DialOption {
 func WithServiceConfig(c <-chan ServiceConfig) DialOption {
 	return func(o *dialOptions) {
 	return func(o *dialOptions) {
@@ -314,6 +326,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
 
 
 // WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
 // WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
 // initially. This is valid if and only if WithBlock() is present.
 // initially. This is valid if and only if WithBlock() is present.
+//
 // Deprecated: use DialContext and context.WithTimeout instead.
 // Deprecated: use DialContext and context.WithTimeout instead.
 func WithTimeout(d time.Duration) DialOption {
 func WithTimeout(d time.Duration) DialOption {
 	return func(o *dialOptions) {
 	return func(o *dialOptions) {
@@ -396,15 +409,40 @@ func WithAuthority(a string) DialOption {
 	}
 	}
 }
 }
 
 
+// WithChannelzParentID returns a DialOption that specifies the channelz ID of current ClientConn's
+// parent. This function is used in nested channel creation (e.g. grpclb dial).
+func WithChannelzParentID(id int64) DialOption {
+	return func(o *dialOptions) {
+		o.channelzParentID = id
+	}
+}
+
+// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any
+// service config provided by the resolver and provides a hint to the resolver
+// to not fetch service configs.
+func WithDisableServiceConfig() DialOption {
+	return func(o *dialOptions) {
+		o.disableServiceConfig = true
+	}
+}
+
 // Dial creates a client connection to the given target.
 // Dial creates a client connection to the given target.
 func Dial(target string, opts ...DialOption) (*ClientConn, error) {
 func Dial(target string, opts ...DialOption) (*ClientConn, error) {
 	return DialContext(context.Background(), target, opts...)
 	return DialContext(context.Background(), target, opts...)
 }
 }
 
 
-// DialContext creates a client connection to the given target. ctx can be used to
-// cancel or expire the pending connection. Once this function returns, the
-// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close
-// to terminate all the pending operations after this function returns.
+// DialContext creates a client connection to the given target. By default, it's
+// a non-blocking dial (the function won't wait for connections to be
+// established, and connecting happens in the background). To make it a blocking
+// dial, use WithBlock() dial option.
+//
+// In the non-blocking case, the ctx does not act against the connection. It
+// only controls the setup steps.
+//
+// In the blocking case, ctx can be used to cancel or expire the pending
+// connection. Once this function returns, the cancellation and expiration of
+// ctx will be noop. Users should call ClientConn.Close to terminate all the
+// pending operations after this function returns.
 //
 //
 // The target name syntax is defined in
 // The target name syntax is defined in
 // https://github.com/grpc/grpc/blob/master/doc/naming.md.
 // https://github.com/grpc/grpc/blob/master/doc/naming.md.
@@ -423,6 +461,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
 		opt(&cc.dopts)
 		opt(&cc.dopts)
 	}
 	}
 
 
+	if channelz.IsOn() {
+		if cc.dopts.channelzParentID != 0 {
+			cc.channelzID = channelz.RegisterChannel(cc, cc.dopts.channelzParentID, target)
+		} else {
+			cc.channelzID = channelz.RegisterChannel(cc, 0, target)
+		}
+	}
+
 	if !cc.dopts.insecure {
 	if !cc.dopts.insecure {
 		if cc.dopts.copts.TransportCredentials == nil {
 		if cc.dopts.copts.TransportCredentials == nil {
 			return nil, errNoTransportSecurity
 			return nil, errNoTransportSecurity
@@ -538,8 +584,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
 		credsClone = creds.Clone()
 		credsClone = creds.Clone()
 	}
 	}
 	cc.balancerBuildOpts = balancer.BuildOptions{
 	cc.balancerBuildOpts = balancer.BuildOptions{
-		DialCreds: credsClone,
-		Dialer:    cc.dopts.copts.Dialer,
+		DialCreds:        credsClone,
+		Dialer:           cc.dopts.copts.Dialer,
+		ChannelzParentID: cc.channelzID,
 	}
 	}
 
 
 	// Build the resolver.
 	// Build the resolver.
@@ -641,6 +688,13 @@ type ClientConn struct {
 	preBalancerName string // previous balancer name.
 	preBalancerName string // previous balancer name.
 	curAddresses    []resolver.Address
 	curAddresses    []resolver.Address
 	balancerWrapper *ccBalancerWrapper
 	balancerWrapper *ccBalancerWrapper
+
+	channelzID          int64 // channelz unique identification number
+	czmu                sync.RWMutex
+	callsStarted        int64
+	callsSucceeded      int64
+	callsFailed         int64
+	lastCallStartedTime time.Time
 }
 }
 
 
 // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
 // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
@@ -765,6 +819,8 @@ func (cc *ClientConn) switchBalancer(name string) {
 	if cc.balancerWrapper != nil {
 	if cc.balancerWrapper != nil {
 		cc.balancerWrapper.close()
 		cc.balancerWrapper.close()
 	}
 	}
+	// Clear all stickiness state.
+	cc.blockingpicker.clearStickinessState()
 
 
 	builder := balancer.Get(name)
 	builder := balancer.Get(name)
 	if builder == nil {
 	if builder == nil {
@@ -804,6 +860,9 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) {
 		cc.mu.Unlock()
 		cc.mu.Unlock()
 		return nil, ErrClientConnClosing
 		return nil, ErrClientConnClosing
 	}
 	}
+	if channelz.IsOn() {
+		ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
+	}
 	cc.conns[ac] = struct{}{}
 	cc.conns[ac] = struct{}{}
 	cc.mu.Unlock()
 	cc.mu.Unlock()
 	return ac, nil
 	return ac, nil
@@ -822,6 +881,42 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
 	ac.tearDown(err)
 	ac.tearDown(err)
 }
 }
 
 
+// ChannelzMetric returns ChannelInternalMetric of current ClientConn.
+// This is an EXPERIMENTAL API.
+func (cc *ClientConn) ChannelzMetric() *channelz.ChannelInternalMetric {
+	state := cc.GetState()
+	cc.czmu.RLock()
+	defer cc.czmu.RUnlock()
+	return &channelz.ChannelInternalMetric{
+		State:                    state,
+		Target:                   cc.target,
+		CallsStarted:             cc.callsStarted,
+		CallsSucceeded:           cc.callsSucceeded,
+		CallsFailed:              cc.callsFailed,
+		LastCallStartedTimestamp: cc.lastCallStartedTime,
+	}
+}
+
+func (cc *ClientConn) incrCallsStarted() {
+	cc.czmu.Lock()
+	cc.callsStarted++
+	// TODO(yuxuanli): will make this a time.Time pointer improve performance?
+	cc.lastCallStartedTime = time.Now()
+	cc.czmu.Unlock()
+}
+
+func (cc *ClientConn) incrCallsSucceeded() {
+	cc.czmu.Lock()
+	cc.callsSucceeded++
+	cc.czmu.Unlock()
+}
+
+func (cc *ClientConn) incrCallsFailed() {
+	cc.czmu.Lock()
+	cc.callsFailed++
+	cc.czmu.Unlock()
+}
+
 // connect starts to creating transport and also starts the transport monitor
 // connect starts to creating transport and also starts the transport monitor
 // goroutine for this ac.
 // goroutine for this ac.
 // It does nothing if the ac is not IDLE.
 // It does nothing if the ac is not IDLE.
@@ -901,7 +996,7 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
 	m, ok := cc.sc.Methods[method]
 	m, ok := cc.sc.Methods[method]
 	if !ok {
 	if !ok {
 		i := strings.LastIndex(method, "/")
 		i := strings.LastIndex(method, "/")
-		m, _ = cc.sc.Methods[method[:i+1]]
+		m = cc.sc.Methods[method[:i+1]]
 	}
 	}
 	return m
 	return m
 }
 }
@@ -917,6 +1012,9 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transpor
 // handleServiceConfig parses the service config string in JSON format to Go native
 // handleServiceConfig parses the service config string in JSON format to Go native
 // struct ServiceConfig, and store both the struct and the JSON string in ClientConn.
 // struct ServiceConfig, and store both the struct and the JSON string in ClientConn.
 func (cc *ClientConn) handleServiceConfig(js string) error {
 func (cc *ClientConn) handleServiceConfig(js string) error {
+	if cc.dopts.disableServiceConfig {
+		return nil
+	}
 	sc, err := parseServiceConfig(js)
 	sc, err := parseServiceConfig(js)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -937,6 +1035,18 @@ func (cc *ClientConn) handleServiceConfig(js string) error {
 			cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
 			cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
 		}
 		}
 	}
 	}
+
+	if envConfigStickinessOn {
+		var newStickinessMDKey string
+		if sc.stickinessMetadataKey != nil && *sc.stickinessMetadataKey != "" {
+			newStickinessMDKey = *sc.stickinessMetadataKey
+		}
+		// newStickinessMDKey is "" if one of the following happens:
+		// - stickinessMetadataKey is set to ""
+		// - stickinessMetadataKey field doesn't exist in service config
+		cc.blockingpicker.updateStickinessMDKey(strings.ToLower(newStickinessMDKey))
+	}
+
 	cc.mu.Unlock()
 	cc.mu.Unlock()
 	return nil
 	return nil
 }
 }
@@ -969,16 +1079,22 @@ func (cc *ClientConn) Close() error {
 	bWrapper := cc.balancerWrapper
 	bWrapper := cc.balancerWrapper
 	cc.balancerWrapper = nil
 	cc.balancerWrapper = nil
 	cc.mu.Unlock()
 	cc.mu.Unlock()
+
 	cc.blockingpicker.close()
 	cc.blockingpicker.close()
+
 	if rWrapper != nil {
 	if rWrapper != nil {
 		rWrapper.close()
 		rWrapper.close()
 	}
 	}
 	if bWrapper != nil {
 	if bWrapper != nil {
 		bWrapper.close()
 		bWrapper.close()
 	}
 	}
+
 	for ac := range conns {
 	for ac := range conns {
 		ac.tearDown(ErrClientConnClosing)
 		ac.tearDown(ErrClientConnClosing)
 	}
 	}
+	if channelz.IsOn() {
+		channelz.RemoveEntry(cc.channelzID)
+	}
 	return nil
 	return nil
 }
 }
 
 
@@ -1012,6 +1128,13 @@ type addrConn struct {
 	// connectDeadline is the time by which all connection
 	// connectDeadline is the time by which all connection
 	// negotiations must complete.
 	// negotiations must complete.
 	connectDeadline time.Time
 	connectDeadline time.Time
+
+	channelzID          int64 // channelz unique identification number
+	czmu                sync.RWMutex
+	callsStarted        int64
+	callsSucceeded      int64
+	callsFailed         int64
+	lastCallStartedTime time.Time
 }
 }
 
 
 // adjustParams updates parameters used to create transports upon
 // adjustParams updates parameters used to create transports upon
@@ -1047,7 +1170,7 @@ func (ac *addrConn) errorf(format string, a ...interface{}) {
 // resetTransport recreates a transport to the address for ac.  The old
 // resetTransport recreates a transport to the address for ac.  The old
 // transport will close itself on error or when the clientconn is closed.
 // transport will close itself on error or when the clientconn is closed.
 // The created transport must receive initial settings frame from the server.
 // The created transport must receive initial settings frame from the server.
-// In case that doesnt happen, transportMonitor will kill the newly created
+// In case that doesn't happen, transportMonitor will kill the newly created
 // transport after connectDeadline has expired.
 // transport after connectDeadline has expired.
 // In case there was an error on the transport before the settings frame was
 // In case there was an error on the transport before the settings frame was
 // received, resetTransport resumes connecting to backends after the one that
 // received, resetTransport resumes connecting to backends after the one that
@@ -1092,7 +1215,7 @@ func (ac *addrConn) resetTransport() error {
 			connectDeadline = start.Add(dialDuration)
 			connectDeadline = start.Add(dialDuration)
 			ridx = 0 // Start connecting from the beginning.
 			ridx = 0 // Start connecting from the beginning.
 		} else {
 		} else {
-			// Continue trying to conect with the same deadlines.
+			// Continue trying to connect with the same deadlines.
 			connectRetryNum = ac.connectRetryNum
 			connectRetryNum = ac.connectRetryNum
 			backoffDeadline = ac.backoffDeadline
 			backoffDeadline = ac.backoffDeadline
 			connectDeadline = ac.connectDeadline
 			connectDeadline = ac.connectDeadline
@@ -1153,6 +1276,9 @@ func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline,
 		// Do not cancel in the success path because of
 		// Do not cancel in the success path because of
 		// this issue in Go1.6: https://github.com/golang/go/issues/15078.
 		// this issue in Go1.6: https://github.com/golang/go/issues/15078.
 		connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
 		connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
+		if channelz.IsOn() {
+			copts.ChannelzParentID = ac.channelzID
+		}
 		newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt)
 		newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt)
 		if err != nil {
 		if err != nil {
 			cancel()
 			cancel()
@@ -1208,6 +1334,10 @@ func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline,
 		return true, nil
 		return true, nil
 	}
 	}
 	ac.mu.Lock()
 	ac.mu.Lock()
+	if ac.state == connectivity.Shutdown {
+		ac.mu.Unlock()
+		return false, errConnClosing
+	}
 	ac.state = connectivity.TransientFailure
 	ac.state = connectivity.TransientFailure
 	ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
 	ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
 	ac.cc.resolveNow(resolver.ResolveNowOption{})
 	ac.cc.resolveNow(resolver.ResolveNowOption{})
@@ -1242,7 +1372,20 @@ func (ac *addrConn) transportMonitor() {
 		// Block until we receive a goaway or an error occurs.
 		// Block until we receive a goaway or an error occurs.
 		select {
 		select {
 		case <-t.GoAway():
 		case <-t.GoAway():
+			done := t.Error()
+			cleanup := t.Close
+			// Since this transport will be orphaned (won't have a transportMonitor)
+			// we need to launch a goroutine to keep track of clientConn.Close()
+			// happening since it might not be noticed by any other goroutine for a while.
+			go func() {
+				<-done
+				cleanup()
+			}()
 		case <-t.Error():
 		case <-t.Error():
+			// In case this is triggered because clientConn.Close()
+			// was called, we want to immeditately close the transport
+			// since no other goroutine might notice it for a while.
+			t.Close()
 		case <-cdeadline:
 		case <-cdeadline:
 			ac.mu.Lock()
 			ac.mu.Lock()
 			// This implies that client received server preface.
 			// This implies that client received server preface.
@@ -1386,7 +1529,9 @@ func (ac *addrConn) tearDown(err error) {
 		close(ac.ready)
 		close(ac.ready)
 		ac.ready = nil
 		ac.ready = nil
 	}
 	}
-	return
+	if channelz.IsOn() {
+		channelz.RemoveEntry(ac.channelzID)
+	}
 }
 }
 
 
 func (ac *addrConn) getState() connectivity.State {
 func (ac *addrConn) getState() connectivity.State {
@@ -1395,6 +1540,49 @@ func (ac *addrConn) getState() connectivity.State {
 	return ac.state
 	return ac.state
 }
 }
 
 
+func (ac *addrConn) getCurAddr() (ret resolver.Address) {
+	ac.mu.Lock()
+	ret = ac.curAddr
+	ac.mu.Unlock()
+	return
+}
+
+func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric {
+	ac.mu.Lock()
+	addr := ac.curAddr.Addr
+	ac.mu.Unlock()
+	state := ac.getState()
+	ac.czmu.RLock()
+	defer ac.czmu.RUnlock()
+	return &channelz.ChannelInternalMetric{
+		State:                    state,
+		Target:                   addr,
+		CallsStarted:             ac.callsStarted,
+		CallsSucceeded:           ac.callsSucceeded,
+		CallsFailed:              ac.callsFailed,
+		LastCallStartedTimestamp: ac.lastCallStartedTime,
+	}
+}
+
+func (ac *addrConn) incrCallsStarted() {
+	ac.czmu.Lock()
+	ac.callsStarted++
+	ac.lastCallStartedTime = time.Now()
+	ac.czmu.Unlock()
+}
+
+func (ac *addrConn) incrCallsSucceeded() {
+	ac.czmu.Lock()
+	ac.callsSucceeded++
+	ac.czmu.Unlock()
+}
+
+func (ac *addrConn) incrCallsFailed() {
+	ac.czmu.Lock()
+	ac.callsFailed++
+	ac.czmu.Unlock()
+}
+
 // ErrClientConnTimeout indicates that the ClientConn cannot establish the
 // ErrClientConnTimeout indicates that the ClientConn cannot establish the
 // underlying connections within the specified timeout.
 // underlying connections within the specified timeout.
 //
 //

+ 1 - 1
vendor/google.golang.org/grpc/encoding/encoding.go

@@ -82,7 +82,7 @@ type Codec interface {
 	Name() string
 	Name() string
 }
 }
 
 
-var registeredCodecs = make(map[string]Codec, 0)
+var registeredCodecs = make(map[string]Codec)
 
 
 // RegisterCodec registers the provided Codec for use with all gRPC clients and
 // RegisterCodec registers the provided Codec for use with all gRPC clients and
 // servers.
 // servers.

+ 37 - 0
vendor/google.golang.org/grpc/envconfig.go

@@ -0,0 +1,37 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"os"
+	"strings"
+)
+
+const (
+	envConfigPrefix        = "GRPC_GO_"
+	envConfigStickinessStr = envConfigPrefix + "STICKINESS"
+)
+
+var (
+	envConfigStickinessOn bool
+)
+
+func init() {
+	envConfigStickinessOn = strings.EqualFold(os.Getenv(envConfigStickinessStr), "on")
+}

+ 4 - 5
vendor/google.golang.org/grpc/grpclb.go

@@ -58,7 +58,7 @@ func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption
 		ServerStreams: true,
 		ServerStreams: true,
 		ClientStreams: true,
 		ClientStreams: true,
 	}
 	}
-	stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
+	stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -127,7 +127,7 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal
 	}
 	}
 
 
 	lb := &lbBalancer{
 	lb := &lbBalancer{
-		cc:              cc,
+		cc:              newLBCacheClientConn(cc),
 		target:          target,
 		target:          target,
 		opt:             opt,
 		opt:             opt,
 		fallbackTimeout: b.fallbackTimeout,
 		fallbackTimeout: b.fallbackTimeout,
@@ -145,7 +145,7 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal
 }
 }
 
 
 type lbBalancer struct {
 type lbBalancer struct {
-	cc              balancer.ClientConn
+	cc              *lbCacheClientConn
 	target          string
 	target          string
 	opt             balancer.BuildOptions
 	opt             balancer.BuildOptions
 	fallbackTimeout time.Duration
 	fallbackTimeout time.Duration
@@ -220,7 +220,6 @@ func (lb *lbBalancer) regeneratePicker() {
 		subConns:   readySCs,
 		subConns:   readySCs,
 		stats:      lb.clientStats,
 		stats:      lb.clientStats,
 	}
 	}
-	return
 }
 }
 
 
 func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
 func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
@@ -257,7 +256,6 @@ func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivi
 	}
 	}
 
 
 	lb.cc.UpdateBalancerState(lb.state, lb.picker)
 	lb.cc.UpdateBalancerState(lb.state, lb.picker)
-	return
 }
 }
 
 
 // fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
 // fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
@@ -339,4 +337,5 @@ func (lb *lbBalancer) Close() {
 	if lb.ccRemoteLB != nil {
 	if lb.ccRemoteLB != nil {
 		lb.ccRemoteLB.Close()
 		lb.ccRemoteLB.Close()
 	}
 	}
+	lb.cc.close()
 }
 }

+ 297 - 113
vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go

@@ -1,24 +1,7 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // source: grpc_lb_v1/messages/messages.proto
 // source: grpc_lb_v1/messages/messages.proto
 
 
-/*
-Package messages is a generated protocol buffer package.
-
-It is generated from these files:
-	grpc_lb_v1/messages/messages.proto
-
-It has these top-level messages:
-	Duration
-	Timestamp
-	LoadBalanceRequest
-	InitialLoadBalanceRequest
-	ClientStats
-	LoadBalanceResponse
-	InitialLoadBalanceResponse
-	ServerList
-	Server
-*/
-package messages
+package messages // import "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
 
 
 import proto "github.com/golang/protobuf/proto"
 import proto "github.com/golang/protobuf/proto"
 import fmt "fmt"
 import fmt "fmt"
@@ -45,13 +28,35 @@ type Duration struct {
 	// of one second or more, a non-zero value for the `nanos` field must be
 	// of one second or more, a non-zero value for the `nanos` field must be
 	// of the same sign as the `seconds` field. Must be from -999,999,999
 	// of the same sign as the `seconds` field. Must be from -999,999,999
 	// to +999,999,999 inclusive.
 	// to +999,999,999 inclusive.
-	Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+	Nanos                int32    `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 }
 
 
-func (m *Duration) Reset()                    { *m = Duration{} }
-func (m *Duration) String() string            { return proto.CompactTextString(m) }
-func (*Duration) ProtoMessage()               {}
-func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (m *Duration) Reset()         { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage()    {}
+func (*Duration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_messages_b81c731f0e83edbd, []int{0}
+}
+func (m *Duration) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Duration.Unmarshal(m, b)
+}
+func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
+}
+func (dst *Duration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Duration.Merge(dst, src)
+}
+func (m *Duration) XXX_Size() int {
+	return xxx_messageInfo_Duration.Size(m)
+}
+func (m *Duration) XXX_DiscardUnknown() {
+	xxx_messageInfo_Duration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Duration proto.InternalMessageInfo
 
 
 func (m *Duration) GetSeconds() int64 {
 func (m *Duration) GetSeconds() int64 {
 	if m != nil {
 	if m != nil {
@@ -76,13 +81,35 @@ type Timestamp struct {
 	// second values with fractions must still have non-negative nanos values
 	// second values with fractions must still have non-negative nanos values
 	// that count forward in time. Must be from 0 to 999,999,999
 	// that count forward in time. Must be from 0 to 999,999,999
 	// inclusive.
 	// inclusive.
-	Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+	Nanos                int32    `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Timestamp) Reset()         { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage()    {}
+func (*Timestamp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_messages_b81c731f0e83edbd, []int{1}
+}
+func (m *Timestamp) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Timestamp.Unmarshal(m, b)
+}
+func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
+}
+func (dst *Timestamp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Timestamp.Merge(dst, src)
+}
+func (m *Timestamp) XXX_Size() int {
+	return xxx_messageInfo_Timestamp.Size(m)
+}
+func (m *Timestamp) XXX_DiscardUnknown() {
+	xxx_messageInfo_Timestamp.DiscardUnknown(m)
 }
 }
 
 
-func (m *Timestamp) Reset()                    { *m = Timestamp{} }
-func (m *Timestamp) String() string            { return proto.CompactTextString(m) }
-func (*Timestamp) ProtoMessage()               {}
-func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+var xxx_messageInfo_Timestamp proto.InternalMessageInfo
 
 
 func (m *Timestamp) GetSeconds() int64 {
 func (m *Timestamp) GetSeconds() int64 {
 	if m != nil {
 	if m != nil {
@@ -103,12 +130,34 @@ type LoadBalanceRequest struct {
 	//	*LoadBalanceRequest_InitialRequest
 	//	*LoadBalanceRequest_InitialRequest
 	//	*LoadBalanceRequest_ClientStats
 	//	*LoadBalanceRequest_ClientStats
 	LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
 	LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
+	XXX_NoUnkeyedLiteral   struct{}                                    `json:"-"`
+	XXX_unrecognized       []byte                                      `json:"-"`
+	XXX_sizecache          int32                                       `json:"-"`
+}
+
+func (m *LoadBalanceRequest) Reset()         { *m = LoadBalanceRequest{} }
+func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
+func (*LoadBalanceRequest) ProtoMessage()    {}
+func (*LoadBalanceRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_messages_b81c731f0e83edbd, []int{2}
+}
+func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LoadBalanceRequest.Unmarshal(m, b)
+}
+func (m *LoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LoadBalanceRequest.Marshal(b, m, deterministic)
+}
+func (dst *LoadBalanceRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LoadBalanceRequest.Merge(dst, src)
+}
+func (m *LoadBalanceRequest) XXX_Size() int {
+	return xxx_messageInfo_LoadBalanceRequest.Size(m)
+}
+func (m *LoadBalanceRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LoadBalanceRequest.DiscardUnknown(m)
 }
 }
 
 
-func (m *LoadBalanceRequest) Reset()                    { *m = LoadBalanceRequest{} }
-func (m *LoadBalanceRequest) String() string            { return proto.CompactTextString(m) }
-func (*LoadBalanceRequest) ProtoMessage()               {}
-func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+var xxx_messageInfo_LoadBalanceRequest proto.InternalMessageInfo
 
 
 type isLoadBalanceRequest_LoadBalanceRequestType interface {
 type isLoadBalanceRequest_LoadBalanceRequestType interface {
 	isLoadBalanceRequest_LoadBalanceRequestType()
 	isLoadBalanceRequest_LoadBalanceRequestType()
@@ -204,12 +253,12 @@ func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
 	switch x := m.LoadBalanceRequestType.(type) {
 	switch x := m.LoadBalanceRequestType.(type) {
 	case *LoadBalanceRequest_InitialRequest:
 	case *LoadBalanceRequest_InitialRequest:
 		s := proto.Size(x.InitialRequest)
 		s := proto.Size(x.InitialRequest)
-		n += proto.SizeVarint(1<<3 | proto.WireBytes)
+		n += 1 // tag and wire
 		n += proto.SizeVarint(uint64(s))
 		n += proto.SizeVarint(uint64(s))
 		n += s
 		n += s
 	case *LoadBalanceRequest_ClientStats:
 	case *LoadBalanceRequest_ClientStats:
 		s := proto.Size(x.ClientStats)
 		s := proto.Size(x.ClientStats)
-		n += proto.SizeVarint(2<<3 | proto.WireBytes)
+		n += 1 // tag and wire
 		n += proto.SizeVarint(uint64(s))
 		n += proto.SizeVarint(uint64(s))
 		n += s
 		n += s
 	case nil:
 	case nil:
@@ -222,13 +271,35 @@ func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
 type InitialLoadBalanceRequest struct {
 type InitialLoadBalanceRequest struct {
 	// Name of load balanced service (IE, balancer.service.com)
 	// Name of load balanced service (IE, balancer.service.com)
 	// length should be less than 256 bytes.
 	// length should be less than 256 bytes.
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Name                 string   `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *InitialLoadBalanceRequest) Reset()         { *m = InitialLoadBalanceRequest{} }
+func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
+func (*InitialLoadBalanceRequest) ProtoMessage()    {}
+func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_messages_b81c731f0e83edbd, []int{3}
+}
+func (m *InitialLoadBalanceRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_InitialLoadBalanceRequest.Unmarshal(m, b)
+}
+func (m *InitialLoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_InitialLoadBalanceRequest.Marshal(b, m, deterministic)
+}
+func (dst *InitialLoadBalanceRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_InitialLoadBalanceRequest.Merge(dst, src)
+}
+func (m *InitialLoadBalanceRequest) XXX_Size() int {
+	return xxx_messageInfo_InitialLoadBalanceRequest.Size(m)
+}
+func (m *InitialLoadBalanceRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_InitialLoadBalanceRequest.DiscardUnknown(m)
 }
 }
 
 
-func (m *InitialLoadBalanceRequest) Reset()                    { *m = InitialLoadBalanceRequest{} }
-func (m *InitialLoadBalanceRequest) String() string            { return proto.CompactTextString(m) }
-func (*InitialLoadBalanceRequest) ProtoMessage()               {}
-func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+var xxx_messageInfo_InitialLoadBalanceRequest proto.InternalMessageInfo
 
 
 func (m *InitialLoadBalanceRequest) GetName() string {
 func (m *InitialLoadBalanceRequest) GetName() string {
 	if m != nil {
 	if m != nil {
@@ -256,13 +327,35 @@ type ClientStats struct {
 	NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
 	NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
 	// The total number of RPCs that finished and are known to have been received
 	// The total number of RPCs that finished and are known to have been received
 	// by a server.
 	// by a server.
-	NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
+	NumCallsFinishedKnownReceived int64    `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
+	XXX_NoUnkeyedLiteral          struct{} `json:"-"`
+	XXX_unrecognized              []byte   `json:"-"`
+	XXX_sizecache                 int32    `json:"-"`
+}
+
+func (m *ClientStats) Reset()         { *m = ClientStats{} }
+func (m *ClientStats) String() string { return proto.CompactTextString(m) }
+func (*ClientStats) ProtoMessage()    {}
+func (*ClientStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_messages_b81c731f0e83edbd, []int{4}
+}
+func (m *ClientStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ClientStats.Unmarshal(m, b)
+}
+func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic)
+}
+func (dst *ClientStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ClientStats.Merge(dst, src)
+}
+func (m *ClientStats) XXX_Size() int {
+	return xxx_messageInfo_ClientStats.Size(m)
+}
+func (m *ClientStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_ClientStats.DiscardUnknown(m)
 }
 }
 
 
-func (m *ClientStats) Reset()                    { *m = ClientStats{} }
-func (m *ClientStats) String() string            { return proto.CompactTextString(m) }
-func (*ClientStats) ProtoMessage()               {}
-func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+var xxx_messageInfo_ClientStats proto.InternalMessageInfo
 
 
 func (m *ClientStats) GetTimestamp() *Timestamp {
 func (m *ClientStats) GetTimestamp() *Timestamp {
 	if m != nil {
 	if m != nil {
@@ -318,12 +411,34 @@ type LoadBalanceResponse struct {
 	//	*LoadBalanceResponse_InitialResponse
 	//	*LoadBalanceResponse_InitialResponse
 	//	*LoadBalanceResponse_ServerList
 	//	*LoadBalanceResponse_ServerList
 	LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
 	LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
+	XXX_NoUnkeyedLiteral    struct{}                                      `json:"-"`
+	XXX_unrecognized        []byte                                        `json:"-"`
+	XXX_sizecache           int32                                         `json:"-"`
+}
+
+func (m *LoadBalanceResponse) Reset()         { *m = LoadBalanceResponse{} }
+func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
+func (*LoadBalanceResponse) ProtoMessage()    {}
+func (*LoadBalanceResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_messages_b81c731f0e83edbd, []int{5}
+}
+func (m *LoadBalanceResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LoadBalanceResponse.Unmarshal(m, b)
+}
+func (m *LoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LoadBalanceResponse.Marshal(b, m, deterministic)
+}
+func (dst *LoadBalanceResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LoadBalanceResponse.Merge(dst, src)
+}
+func (m *LoadBalanceResponse) XXX_Size() int {
+	return xxx_messageInfo_LoadBalanceResponse.Size(m)
+}
+func (m *LoadBalanceResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LoadBalanceResponse.DiscardUnknown(m)
 }
 }
 
 
-func (m *LoadBalanceResponse) Reset()                    { *m = LoadBalanceResponse{} }
-func (m *LoadBalanceResponse) String() string            { return proto.CompactTextString(m) }
-func (*LoadBalanceResponse) ProtoMessage()               {}
-func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+var xxx_messageInfo_LoadBalanceResponse proto.InternalMessageInfo
 
 
 type isLoadBalanceResponse_LoadBalanceResponseType interface {
 type isLoadBalanceResponse_LoadBalanceResponseType interface {
 	isLoadBalanceResponse_LoadBalanceResponseType()
 	isLoadBalanceResponse_LoadBalanceResponseType()
@@ -419,12 +534,12 @@ func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) {
 	switch x := m.LoadBalanceResponseType.(type) {
 	switch x := m.LoadBalanceResponseType.(type) {
 	case *LoadBalanceResponse_InitialResponse:
 	case *LoadBalanceResponse_InitialResponse:
 		s := proto.Size(x.InitialResponse)
 		s := proto.Size(x.InitialResponse)
-		n += proto.SizeVarint(1<<3 | proto.WireBytes)
+		n += 1 // tag and wire
 		n += proto.SizeVarint(uint64(s))
 		n += proto.SizeVarint(uint64(s))
 		n += s
 		n += s
 	case *LoadBalanceResponse_ServerList:
 	case *LoadBalanceResponse_ServerList:
 		s := proto.Size(x.ServerList)
 		s := proto.Size(x.ServerList)
-		n += proto.SizeVarint(2<<3 | proto.WireBytes)
+		n += 1 // tag and wire
 		n += proto.SizeVarint(uint64(s))
 		n += proto.SizeVarint(uint64(s))
 		n += s
 		n += s
 	case nil:
 	case nil:
@@ -445,12 +560,34 @@ type InitialLoadBalanceResponse struct {
 	// to the load balancer. Stats should only be reported when the duration is
 	// to the load balancer. Stats should only be reported when the duration is
 	// positive.
 	// positive.
 	ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
 	ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
+	XXX_NoUnkeyedLiteral      struct{}  `json:"-"`
+	XXX_unrecognized          []byte    `json:"-"`
+	XXX_sizecache             int32     `json:"-"`
+}
+
+func (m *InitialLoadBalanceResponse) Reset()         { *m = InitialLoadBalanceResponse{} }
+func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
+func (*InitialLoadBalanceResponse) ProtoMessage()    {}
+func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_messages_b81c731f0e83edbd, []int{6}
+}
+func (m *InitialLoadBalanceResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_InitialLoadBalanceResponse.Unmarshal(m, b)
+}
+func (m *InitialLoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_InitialLoadBalanceResponse.Marshal(b, m, deterministic)
+}
+func (dst *InitialLoadBalanceResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_InitialLoadBalanceResponse.Merge(dst, src)
+}
+func (m *InitialLoadBalanceResponse) XXX_Size() int {
+	return xxx_messageInfo_InitialLoadBalanceResponse.Size(m)
+}
+func (m *InitialLoadBalanceResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_InitialLoadBalanceResponse.DiscardUnknown(m)
 }
 }
 
 
-func (m *InitialLoadBalanceResponse) Reset()                    { *m = InitialLoadBalanceResponse{} }
-func (m *InitialLoadBalanceResponse) String() string            { return proto.CompactTextString(m) }
-func (*InitialLoadBalanceResponse) ProtoMessage()               {}
-func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+var xxx_messageInfo_InitialLoadBalanceResponse proto.InternalMessageInfo
 
 
 func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
 func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
 	if m != nil {
 	if m != nil {
@@ -471,13 +608,35 @@ type ServerList struct {
 	// be updated when server resolutions change or as needed to balance load
 	// be updated when server resolutions change or as needed to balance load
 	// across more servers. The client should consume the server list in order
 	// across more servers. The client should consume the server list in order
 	// unless instructed otherwise via the client_config.
 	// unless instructed otherwise via the client_config.
-	Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
+	Servers              []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *ServerList) Reset()         { *m = ServerList{} }
+func (m *ServerList) String() string { return proto.CompactTextString(m) }
+func (*ServerList) ProtoMessage()    {}
+func (*ServerList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_messages_b81c731f0e83edbd, []int{7}
+}
+func (m *ServerList) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ServerList.Unmarshal(m, b)
+}
+func (m *ServerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ServerList.Marshal(b, m, deterministic)
+}
+func (dst *ServerList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServerList.Merge(dst, src)
+}
+func (m *ServerList) XXX_Size() int {
+	return xxx_messageInfo_ServerList.Size(m)
+}
+func (m *ServerList) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServerList.DiscardUnknown(m)
 }
 }
 
 
-func (m *ServerList) Reset()                    { *m = ServerList{} }
-func (m *ServerList) String() string            { return proto.CompactTextString(m) }
-func (*ServerList) ProtoMessage()               {}
-func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+var xxx_messageInfo_ServerList proto.InternalMessageInfo
 
 
 func (m *ServerList) GetServers() []*Server {
 func (m *ServerList) GetServers() []*Server {
 	if m != nil {
 	if m != nil {
@@ -508,13 +667,35 @@ type Server struct {
 	DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
 	DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
 	// Indicates whether this particular request should be dropped by the client
 	// Indicates whether this particular request should be dropped by the client
 	// for load balancing.
 	// for load balancing.
-	DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
+	DropForLoadBalancing bool     `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 }
 
 
-func (m *Server) Reset()                    { *m = Server{} }
-func (m *Server) String() string            { return proto.CompactTextString(m) }
-func (*Server) ProtoMessage()               {}
-func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (m *Server) Reset()         { *m = Server{} }
+func (m *Server) String() string { return proto.CompactTextString(m) }
+func (*Server) ProtoMessage()    {}
+func (*Server) Descriptor() ([]byte, []int) {
+	return fileDescriptor_messages_b81c731f0e83edbd, []int{8}
+}
+func (m *Server) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Server.Unmarshal(m, b)
+}
+func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Server.Marshal(b, m, deterministic)
+}
+func (dst *Server) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Server.Merge(dst, src)
+}
+func (m *Server) XXX_Size() int {
+	return xxx_messageInfo_Server.Size(m)
+}
+func (m *Server) XXX_DiscardUnknown() {
+	xxx_messageInfo_Server.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Server proto.InternalMessageInfo
 
 
 func (m *Server) GetIpAddress() []byte {
 func (m *Server) GetIpAddress() []byte {
 	if m != nil {
 	if m != nil {
@@ -563,53 +744,56 @@ func init() {
 	proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
 	proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
 }
 }
 
 
-func init() { proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
-	// 709 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x3b,
-	0x10, 0x26, 0x27, 0x01, 0x92, 0x09, 0x3a, 0xe4, 0x98, 0x1c, 0x08, 0x14, 0x24, 0xba, 0x52, 0x69,
-	0x54, 0xd1, 0x20, 0xa0, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, 0x55,
-	0xa9, 0x52, 0x65, 0x39, 0xd9, 0x21, 0x58, 0x6c, 0xec, 0xad, 0xed, 0x04, 0xf5, 0x11, 0xfa, 0x28,
-	0x7d, 0x8c, 0xaa, 0xcf, 0xd0, 0xf7, 0xa9, 0xd6, 0xbb, 0x9b, 0x5d, 0x20, 0x80, 0x7a, 0x67, 0x8f,
-	0xbf, 0xf9, 0xbe, 0xf1, 0xac, 0xbf, 0x59, 0xf0, 0x06, 0x3a, 0xec, 0xb3, 0xa0, 0xc7, 0xc6, 0xbb,
-	0x3b, 0x43, 0x34, 0x86, 0x0f, 0xd0, 0x4c, 0x16, 0xad, 0x50, 0x2b, 0xab, 0x08, 0x44, 0x98, 0x56,
-	0xd0, 0x6b, 0x8d, 0x77, 0xbd, 0x97, 0x50, 0x3e, 0x1c, 0x69, 0x6e, 0x85, 0x92, 0xa4, 0x01, 0xf3,
-	0x06, 0xfb, 0x4a, 0xfa, 0xa6, 0x51, 0xd8, 0x2c, 0x34, 0x8b, 0x34, 0xdd, 0x92, 0x3a, 0xcc, 0x4a,
-	0x2e, 0x95, 0x69, 0xfc, 0xb3, 0x59, 0x68, 0xce, 0xd2, 0x78, 0xe3, 0xbd, 0x82, 0xca, 0xa9, 0x18,
-	0xa2, 0xb1, 0x7c, 0x18, 0xfe, 0x75, 0xf2, 0xcf, 0x02, 0x90, 0x13, 0xc5, 0xfd, 0x36, 0x0f, 0xb8,
-	0xec, 0x23, 0xc5, 0xaf, 0x23, 0x34, 0x96, 0x7c, 0x80, 0x45, 0x21, 0x85, 0x15, 0x3c, 0x60, 0x3a,
-	0x0e, 0x39, 0xba, 0xea, 0xde, 0xa3, 0x56, 0x56, 0x75, 0xeb, 0x38, 0x86, 0xdc, 0xcc, 0xef, 0xcc,
-	0xd0, 0x7f, 0x93, 0xfc, 0x94, 0xf1, 0x35, 0x2c, 0xf4, 0x03, 0x81, 0xd2, 0x32, 0x63, 0xb9, 0x8d,
-	0xab, 0xa8, 0xee, 0xad, 0xe4, 0xe9, 0x0e, 0xdc, 0x79, 0x37, 0x3a, 0xee, 0xcc, 0xd0, 0x6a, 0x3f,
-	0xdb, 0xb6, 0x1f, 0xc0, 0x6a, 0xa0, 0xb8, 0xcf, 0x7a, 0xb1, 0x4c, 0x5a, 0x14, 0xb3, 0xdf, 0x42,
-	0xf4, 0x76, 0x60, 0xf5, 0xd6, 0x4a, 0x08, 0x81, 0x92, 0xe4, 0x43, 0x74, 0xe5, 0x57, 0xa8, 0x5b,
-	0x7b, 0xdf, 0x4b, 0x50, 0xcd, 0x89, 0x91, 0x7d, 0xa8, 0xd8, 0xb4, 0x83, 0xc9, 0x3d, 0xff, 0xcf,
-	0x17, 0x36, 0x69, 0x2f, 0xcd, 0x70, 0xe4, 0x09, 0xfc, 0x27, 0x47, 0x43, 0xd6, 0xe7, 0x41, 0x60,
-	0xa2, 0x3b, 0x69, 0x8b, 0xbe, 0xbb, 0x55, 0x91, 0x2e, 0xca, 0xd1, 0xf0, 0x20, 0x8a, 0x77, 0xe3,
-	0x30, 0xd9, 0x06, 0x92, 0x61, 0xcf, 0x84, 0x14, 0xe6, 0x1c, 0xfd, 0x46, 0xd1, 0x81, 0x6b, 0x29,
-	0xf8, 0x28, 0x89, 0x13, 0x06, 0xad, 0x9b, 0x68, 0x76, 0x29, 0xec, 0x39, 0xf3, 0xb5, 0x0a, 0xd9,
-	0x99, 0xd2, 0x4c, 0x73, 0x8b, 0x2c, 0x10, 0x43, 0x61, 0x85, 0x1c, 0x34, 0x4a, 0x8e, 0xe9, 0xf1,
-	0x75, 0xa6, 0x4f, 0xc2, 0x9e, 0x1f, 0x6a, 0x15, 0x1e, 0x29, 0x4d, 0xb9, 0xc5, 0x93, 0x04, 0x4e,
-	0x38, 0xec, 0xdc, 0x2b, 0x90, 0x6b, 0x77, 0xa4, 0x30, 0xeb, 0x14, 0x9a, 0x77, 0x28, 0x64, 0xbd,
-	0x8f, 0x24, 0xbe, 0xc0, 0xd3, 0xdb, 0x24, 0x92, 0x67, 0x70, 0xc6, 0x45, 0x80, 0x3e, 0xb3, 0x8a,
-	0x19, 0x94, 0x7e, 0x63, 0xce, 0x09, 0x6c, 0x4d, 0x13, 0x88, 0x3f, 0xd5, 0x91, 0xc3, 0x9f, 0xaa,
-	0x2e, 0x4a, 0x9f, 0x74, 0xe0, 0xe1, 0x14, 0xfa, 0x0b, 0xa9, 0x2e, 0x25, 0xd3, 0xd8, 0x47, 0x31,
-	0x46, 0xbf, 0x31, 0xef, 0x28, 0x37, 0xae, 0x53, 0xbe, 0x8f, 0x50, 0x34, 0x01, 0x79, 0xbf, 0x0a,
-	0xb0, 0x74, 0xe5, 0xd9, 0x98, 0x50, 0x49, 0x83, 0xa4, 0x0b, 0xb5, 0xcc, 0x01, 0x71, 0x2c, 0x79,
-	0x1a, 0x5b, 0xf7, 0x59, 0x20, 0x46, 0x77, 0x66, 0xe8, 0xe2, 0xc4, 0x03, 0x09, 0xe9, 0x0b, 0xa8,
-	0x1a, 0xd4, 0x63, 0xd4, 0x2c, 0x10, 0xc6, 0x26, 0x1e, 0x58, 0xce, 0xf3, 0x75, 0xdd, 0xf1, 0x89,
-	0x70, 0x1e, 0x02, 0x33, 0xd9, 0xb5, 0xd7, 0x61, 0xed, 0x9a, 0x03, 0x62, 0xce, 0xd8, 0x02, 0x3f,
-	0x0a, 0xb0, 0x76, 0x7b, 0x29, 0xe4, 0x19, 0x2c, 0xe7, 0x93, 0x35, 0xf3, 0x31, 0xc0, 0x01, 0xb7,
-	0xa9, 0x2d, 0xea, 0x41, 0x96, 0xa4, 0x0f, 0x93, 0x33, 0xf2, 0x11, 0xd6, 0xf3, 0x96, 0x65, 0x1a,
-	0x43, 0xa5, 0x2d, 0x13, 0xd2, 0xa2, 0x1e, 0xf3, 0x20, 0x29, 0xbf, 0x9e, 0x2f, 0x3f, 0x1d, 0x62,
-	0x74, 0x35, 0xe7, 0x5e, 0xea, 0xf2, 0x8e, 0x93, 0x34, 0xef, 0x0d, 0x40, 0x76, 0x4b, 0xb2, 0x1d,
-	0x0d, 0xac, 0x68, 0x17, 0x0d, 0xac, 0x62, 0xb3, 0xba, 0x47, 0x6e, 0xb6, 0x83, 0xa6, 0x90, 0x77,
-	0xa5, 0x72, 0xb1, 0x56, 0xf2, 0x7e, 0x17, 0x60, 0x2e, 0x3e, 0x21, 0x1b, 0x00, 0x22, 0x64, 0xdc,
-	0xf7, 0x35, 0x9a, 0x78, 0xe4, 0x2d, 0xd0, 0x8a, 0x08, 0xdf, 0xc6, 0x81, 0xc8, 0xfd, 0x91, 0x76,
-	0x32, 0xf3, 0xdc, 0x3a, 0x32, 0xe3, 0x95, 0x4e, 0x5a, 0x75, 0x81, 0xd2, 0x99, 0xb1, 0x42, 0x6b,
-	0xb9, 0x46, 0x9c, 0x46, 0x71, 0xb2, 0x0f, 0xcb, 0x77, 0x98, 0xae, 0x4c, 0x97, 0xfc, 0x29, 0x06,
-	0x7b, 0x0e, 0x2b, 0x77, 0x19, 0xa9, 0x4c, 0xeb, 0xfe, 0x14, 0xd3, 0xb4, 0xe1, 0x73, 0x39, 0xfd,
-	0x47, 0xf4, 0xe6, 0xdc, 0x4f, 0x62, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x36, 0x86,
-	0xa6, 0x4a, 0x06, 0x00, 0x00,
+func init() {
+	proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor_messages_b81c731f0e83edbd)
+}
+
+var fileDescriptor_messages_b81c731f0e83edbd = []byte{
+	// 731 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39,
+	0x14, 0x26, 0x9b, 0x00, 0xc9, 0x09, 0x5a, 0xb2, 0x26, 0x0b, 0x81, 0x05, 0x89, 0x1d, 0x69, 0xd9,
+	0x68, 0xc5, 0x4e, 0x04, 0xd9, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43,
+	0x55, 0xa9, 0x52, 0x65, 0x39, 0x19, 0x33, 0x58, 0x38, 0xf6, 0xd4, 0x76, 0x82, 0xfa, 0x08, 0x7d,
+	0x94, 0x3e, 0x46, 0xd5, 0x67, 0xe8, 0xfb, 0x54, 0xe3, 0x99, 0xc9, 0x0c, 0x10, 0x40, 0xbd, 0x89,
+	0xec, 0xe3, 0xef, 0x7c, 0xdf, 0xf1, 0x89, 0xbf, 0x33, 0xe0, 0x85, 0x3a, 0x1a, 0x11, 0x31, 0x24,
+	0xd3, 0x83, 0xce, 0x98, 0x19, 0x43, 0x43, 0x66, 0x66, 0x0b, 0x3f, 0xd2, 0xca, 0x2a, 0x04, 0x31,
+	0xc6, 0x17, 0x43, 0x7f, 0x7a, 0xe0, 0x3d, 0x85, 0xea, 0xf1, 0x44, 0x53, 0xcb, 0x95, 0x44, 0x2d,
+	0x58, 0x36, 0x6c, 0xa4, 0x64, 0x60, 0x5a, 0xa5, 0xdd, 0x52, 0xbb, 0x8c, 0xb3, 0x2d, 0x6a, 0xc2,
+	0xa2, 0xa4, 0x52, 0x99, 0xd6, 0x2f, 0xbb, 0xa5, 0xf6, 0x22, 0x4e, 0x36, 0xde, 0x33, 0xa8, 0x9d,
+	0xf3, 0x31, 0x33, 0x96, 0x8e, 0xa3, 0x9f, 0x4e, 0xfe, 0x5a, 0x02, 0x74, 0xa6, 0x68, 0xd0, 0xa3,
+	0x82, 0xca, 0x11, 0xc3, 0xec, 0xe3, 0x84, 0x19, 0x8b, 0xde, 0xc0, 0x2a, 0x97, 0xdc, 0x72, 0x2a,
+	0x88, 0x4e, 0x42, 0x8e, 0xae, 0x7e, 0xf8, 0x97, 0x9f, 0x57, 0xed, 0x9f, 0x26, 0x90, 0xbb, 0xf9,
+	0xfd, 0x05, 0xfc, 0x6b, 0x9a, 0x9f, 0x31, 0x3e, 0x87, 0x95, 0x91, 0xe0, 0x4c, 0x5a, 0x62, 0x2c,
+	0xb5, 0x49, 0x15, 0xf5, 0xc3, 0x8d, 0x22, 0xdd, 0x91, 0x3b, 0x1f, 0xc4, 0xc7, 0xfd, 0x05, 0x5c,
+	0x1f, 0xe5, 0xdb, 0xde, 0x1f, 0xb0, 0x29, 0x14, 0x0d, 0xc8, 0x30, 0x91, 0xc9, 0x8a, 0x22, 0xf6,
+	0x53, 0xc4, 0xbc, 0x0e, 0x6c, 0xde, 0x5b, 0x09, 0x42, 0x50, 0x91, 0x74, 0xcc, 0x5c, 0xf9, 0x35,
+	0xec, 0xd6, 0xde, 0xe7, 0x0a, 0xd4, 0x0b, 0x62, 0xa8, 0x0b, 0x35, 0x9b, 0x75, 0x30, 0xbd, 0xe7,
+	0xef, 0xc5, 0xc2, 0x66, 0xed, 0xc5, 0x39, 0x0e, 0xfd, 0x03, 0xbf, 0xc9, 0xc9, 0x98, 0x8c, 0xa8,
+	0x10, 0x26, 0xbe, 0x93, 0xb6, 0x2c, 0x70, 0xb7, 0x2a, 0xe3, 0x55, 0x39, 0x19, 0x1f, 0xc5, 0xf1,
+	0x41, 0x12, 0x46, 0xfb, 0x80, 0x72, 0xec, 0x05, 0x97, 0xdc, 0x5c, 0xb2, 0xa0, 0x55, 0x76, 0xe0,
+	0x46, 0x06, 0x3e, 0x49, 0xe3, 0x88, 0x80, 0x7f, 0x17, 0x4d, 0xae, 0xb9, 0xbd, 0x24, 0x81, 0x56,
+	0x11, 0xb9, 0x50, 0x9a, 0x68, 0x6a, 0x19, 0x11, 0x7c, 0xcc, 0x2d, 0x97, 0x61, 0xab, 0xe2, 0x98,
+	0xfe, 0xbe, 0xcd, 0xf4, 0x8e, 0xdb, 0xcb, 0x63, 0xad, 0xa2, 0x13, 0xa5, 0x31, 0xb5, 0xec, 0x2c,
+	0x85, 0x23, 0x0a, 0x9d, 0x47, 0x05, 0x0a, 0xed, 0x8e, 0x15, 0x16, 0x9d, 0x42, 0xfb, 0x01, 0x85,
+	0xbc, 0xf7, 0xb1, 0xc4, 0x07, 0xf8, 0xf7, 0x3e, 0x89, 0xf4, 0x19, 0x5c, 0x50, 0x2e, 0x58, 0x40,
+	0xac, 0x22, 0x86, 0xc9, 0xa0, 0xb5, 0xe4, 0x04, 0xf6, 0xe6, 0x09, 0x24, 0x7f, 0xd5, 0x89, 0xc3,
+	0x9f, 0xab, 0x01, 0x93, 0x01, 0xea, 0xc3, 0x9f, 0x73, 0xe8, 0xaf, 0xa4, 0xba, 0x96, 0x44, 0xb3,
+	0x11, 0xe3, 0x53, 0x16, 0xb4, 0x96, 0x1d, 0xe5, 0xce, 0x6d, 0xca, 0xd7, 0x31, 0x0a, 0xa7, 0x20,
+	0xef, 0x5b, 0x09, 0xd6, 0x6e, 0x3c, 0x1b, 0x13, 0x29, 0x69, 0x18, 0x1a, 0x40, 0x23, 0x77, 0x40,
+	0x12, 0x4b, 0x9f, 0xc6, 0xde, 0x63, 0x16, 0x48, 0xd0, 0xfd, 0x05, 0xbc, 0x3a, 0xf3, 0x40, 0x4a,
+	0xfa, 0x04, 0xea, 0x86, 0xe9, 0x29, 0xd3, 0x44, 0x70, 0x63, 0x53, 0x0f, 0xac, 0x17, 0xf9, 0x06,
+	0xee, 0xf8, 0x8c, 0x3b, 0x0f, 0x81, 0x99, 0xed, 0x7a, 0xdb, 0xb0, 0x75, 0xcb, 0x01, 0x09, 0x67,
+	0x62, 0x81, 0x2f, 0x25, 0xd8, 0xba, 0xbf, 0x14, 0xf4, 0x1f, 0xac, 0x17, 0x93, 0x35, 0x09, 0x98,
+	0x60, 0x21, 0xb5, 0x99, 0x2d, 0x9a, 0x22, 0x4f, 0xd2, 0xc7, 0xe9, 0x19, 0x7a, 0x0b, 0xdb, 0x45,
+	0xcb, 0x12, 0xcd, 0x22, 0xa5, 0x2d, 0xe1, 0xd2, 0x32, 0x3d, 0xa5, 0x22, 0x2d, 0xbf, 0x59, 0x2c,
+	0x3f, 0x1b, 0x62, 0x78, 0xb3, 0xe0, 0x5e, 0xec, 0xf2, 0x4e, 0xd3, 0x34, 0xef, 0x05, 0x40, 0x7e,
+	0x4b, 0xb4, 0x1f, 0x0f, 0xac, 0x78, 0x17, 0x0f, 0xac, 0x72, 0xbb, 0x7e, 0x88, 0xee, 0xb6, 0x03,
+	0x67, 0x90, 0x57, 0x95, 0x6a, 0xb9, 0x51, 0xf1, 0xbe, 0x97, 0x60, 0x29, 0x39, 0x41, 0x3b, 0x00,
+	0x3c, 0x22, 0x34, 0x08, 0x34, 0x33, 0xc9, 0xc8, 0x5b, 0xc1, 0x35, 0x1e, 0xbd, 0x4c, 0x02, 0xb1,
+	0xfb, 0x63, 0xed, 0x74, 0xe6, 0xb9, 0x75, 0x6c, 0xc6, 0x1b, 0x9d, 0xb4, 0xea, 0x8a, 0x49, 0x67,
+	0xc6, 0x1a, 0x6e, 0x14, 0x1a, 0x71, 0x1e, 0xc7, 0x51, 0x17, 0xd6, 0x1f, 0x30, 0x5d, 0x15, 0xaf,
+	0x05, 0x73, 0x0c, 0xf6, 0x3f, 0x6c, 0x3c, 0x64, 0xa4, 0x2a, 0x6e, 0x06, 0x73, 0x4c, 0xd3, 0xeb,
+	0xbe, 0x3f, 0x08, 0x95, 0x0a, 0x05, 0xf3, 0x43, 0x25, 0xa8, 0x0c, 0x7d, 0xa5, 0xc3, 0x4e, 0xdc,
+	0x0d, 0xf7, 0x23, 0x86, 0x9d, 0x39, 0x5f, 0x95, 0xe1, 0x92, 0xfb, 0x9a, 0x74, 0x7f, 0x04, 0x00,
+	0x00, 0xff, 0xff, 0x8e, 0xd0, 0x70, 0xb7, 0x73, 0x06, 0x00, 0x00,
 }
 }

+ 24 - 12
vendor/google.golang.org/grpc/grpclb_remote_balancer.go

@@ -26,6 +26,8 @@ import (
 
 
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/channelz"
+
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/connectivity"
 	lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
 	lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/grpclog"
@@ -74,15 +76,16 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
 	}
 	}
 
 
 	// Call refreshSubConns to create/remove SubConns.
 	// Call refreshSubConns to create/remove SubConns.
-	backendsUpdated := lb.refreshSubConns(backendAddrs)
-	// If no backend was updated, no SubConn will be newed/removed. But since
-	// the full serverList was different, there might be updates in drops or
-	// pick weights(different number of duplicates). We need to update picker
-	// with the fulllist.
-	if !backendsUpdated {
-		lb.regeneratePicker()
-		lb.cc.UpdateBalancerState(lb.state, lb.picker)
-	}
+	lb.refreshSubConns(backendAddrs)
+	// Regenerate and update picker no matter if there's update on backends (if
+	// any SubConn will be newed/removed). Because since the full serverList was
+	// different, there might be updates in drops or pick weights(different
+	// number of duplicates). We need to update picker with the fulllist.
+	//
+	// Now with cache, even if SubConn was newed/removed, there might be no
+	// state changes.
+	lb.regeneratePicker()
+	lb.cc.UpdateBalancerState(lb.state, lb.picker)
 }
 }
 
 
 // refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool
 // refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool
@@ -112,7 +115,11 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address) bool {
 				continue
 				continue
 			}
 			}
 			lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map.
 			lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map.
-			lb.scStates[sc] = connectivity.Idle
+			if _, ok := lb.scStates[sc]; !ok {
+				// Only set state of new sc to IDLE. The state could already be
+				// READY for cached SubConns.
+				lb.scStates[sc] = connectivity.Idle
+			}
 			sc.Connect()
 			sc.Connect()
 		}
 		}
 	}
 	}
@@ -168,6 +175,7 @@ func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.D
 		}
 		}
 	}
 	}
 }
 }
+
 func (lb *lbBalancer) callRemoteBalancer() error {
 func (lb *lbBalancer) callRemoteBalancer() error {
 	lbClient := &loadBalancerClient{cc: lb.ccRemoteLB}
 	lbClient := &loadBalancerClient{cc: lb.ccRemoteLB}
 	ctx, cancel := context.WithCancel(context.Background())
 	ctx, cancel := context.WithCancel(context.Background())
@@ -243,9 +251,13 @@ func (lb *lbBalancer) dialRemoteLB(remoteLBName string) {
 	// Explicitly set pickfirst as the balancer.
 	// Explicitly set pickfirst as the balancer.
 	dopts = append(dopts, WithBalancerName(PickFirstBalancerName))
 	dopts = append(dopts, WithBalancerName(PickFirstBalancerName))
 	dopts = append(dopts, withResolverBuilder(lb.manualResolver))
 	dopts = append(dopts, withResolverBuilder(lb.manualResolver))
-	// Dial using manualResolver.Scheme, which is a random scheme generated
+	if channelz.IsOn() {
+		dopts = append(dopts, WithChannelzParentID(lb.opt.ChannelzParentID))
+	}
+
+	// DialContext using manualResolver.Scheme, which is a random scheme generated
 	// when init grpclb. The target name is not important.
 	// when init grpclb. The target name is not important.
-	cc, err := Dial("grpclb:///grpclb.server", dopts...)
+	cc, err := DialContext(context.Background(), "grpclb:///grpclb.server", dopts...)
 	if err != nil {
 	if err != nil {
 		grpclog.Fatalf("failed to dial: %v", err)
 		grpclog.Fatalf("failed to dial: %v", err)
 	}
 	}

+ 124 - 0
vendor/google.golang.org/grpc/grpclb_util.go

@@ -19,7 +19,12 @@
 package grpc
 package grpc
 
 
 import (
 import (
+	"fmt"
+	"sync"
+	"time"
+
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/resolver"
 	"google.golang.org/grpc/resolver"
 )
 )
 
 
@@ -88,3 +93,122 @@ func (r *lbManualResolver) NewAddress(addrs []resolver.Address) {
 func (r *lbManualResolver) NewServiceConfig(sc string) {
 func (r *lbManualResolver) NewServiceConfig(sc string) {
 	r.ccr.NewServiceConfig(sc)
 	r.ccr.NewServiceConfig(sc)
 }
 }
+
+const subConnCacheTime = time.Second * 10
+
+// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache.
+// SubConns will be kept in cache for subConnCacheTime before being removed.
+//
+// Its new and remove methods are updated to do cache first.
+type lbCacheClientConn struct {
+	cc      balancer.ClientConn
+	timeout time.Duration
+
+	mu sync.Mutex
+	// subConnCache only keeps subConns that are being deleted.
+	subConnCache  map[resolver.Address]*subConnCacheEntry
+	subConnToAddr map[balancer.SubConn]resolver.Address
+}
+
+type subConnCacheEntry struct {
+	sc balancer.SubConn
+
+	cancel        func()
+	abortDeleting bool
+}
+
+func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn {
+	return &lbCacheClientConn{
+		cc:            cc,
+		timeout:       subConnCacheTime,
+		subConnCache:  make(map[resolver.Address]*subConnCacheEntry),
+		subConnToAddr: make(map[balancer.SubConn]resolver.Address),
+	}
+}
+
+func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+	if len(addrs) != 1 {
+		return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs))
+	}
+	addrWithoutMD := addrs[0]
+	addrWithoutMD.Metadata = nil
+
+	ccc.mu.Lock()
+	defer ccc.mu.Unlock()
+	if entry, ok := ccc.subConnCache[addrWithoutMD]; ok {
+		// If entry is in subConnCache, the SubConn was being deleted.
+		// cancel function will never be nil.
+		entry.cancel()
+		delete(ccc.subConnCache, addrWithoutMD)
+		return entry.sc, nil
+	}
+
+	scNew, err := ccc.cc.NewSubConn(addrs, opts)
+	if err != nil {
+		return nil, err
+	}
+
+	ccc.subConnToAddr[scNew] = addrWithoutMD
+	return scNew, nil
+}
+
+func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) {
+	ccc.mu.Lock()
+	defer ccc.mu.Unlock()
+	addr, ok := ccc.subConnToAddr[sc]
+	if !ok {
+		return
+	}
+
+	if entry, ok := ccc.subConnCache[addr]; ok {
+		if entry.sc != sc {
+			// This could happen if NewSubConn was called multiple times for the
+			// same address, and those SubConns are all removed. We remove sc
+			// immediately here.
+			delete(ccc.subConnToAddr, sc)
+			ccc.cc.RemoveSubConn(sc)
+		}
+		return
+	}
+
+	entry := &subConnCacheEntry{
+		sc: sc,
+	}
+	ccc.subConnCache[addr] = entry
+
+	timer := time.AfterFunc(ccc.timeout, func() {
+		ccc.mu.Lock()
+		if entry.abortDeleting {
+			return
+		}
+		ccc.cc.RemoveSubConn(sc)
+		delete(ccc.subConnToAddr, sc)
+		delete(ccc.subConnCache, addr)
+		ccc.mu.Unlock()
+	})
+	entry.cancel = func() {
+		if !timer.Stop() {
+			// If stop was not successful, the timer has fired (this can only
+			// happen in a race). But the deleting function is blocked on ccc.mu
+			// because the mutex was held by the caller of this function.
+			//
+			// Set abortDeleting to true to abort the deleting function. When
+			// the lock is released, the deleting function will acquire the
+			// lock, check the value of abortDeleting and return.
+			entry.abortDeleting = true
+		}
+	}
+}
+
+func (ccc *lbCacheClientConn) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
+	ccc.cc.UpdateBalancerState(s, p)
+}
+
+func (ccc *lbCacheClientConn) close() {
+	ccc.mu.Lock()
+	// Only cancel all existing timers. There's no need to remove SubConns.
+	for _, entry := range ccc.subConnCache {
+		entry.cancel()
+	}
+	ccc.mu.Unlock()
+}

+ 3 - 0
vendor/google.golang.org/grpc/grpclog/grpclog.go

@@ -105,18 +105,21 @@ func Fatalln(args ...interface{}) {
 }
 }
 
 
 // Print prints to the logger. Arguments are handled in the manner of fmt.Print.
 // Print prints to the logger. Arguments are handled in the manner of fmt.Print.
+//
 // Deprecated: use Info.
 // Deprecated: use Info.
 func Print(args ...interface{}) {
 func Print(args ...interface{}) {
 	logger.Info(args...)
 	logger.Info(args...)
 }
 }
 
 
 // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
 // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
+//
 // Deprecated: use Infof.
 // Deprecated: use Infof.
 func Printf(format string, args ...interface{}) {
 func Printf(format string, args ...interface{}) {
 	logger.Infof(format, args...)
 	logger.Infof(format, args...)
 }
 }
 
 
 // Println prints to the logger. Arguments are handled in the manner of fmt.Println.
 // Println prints to the logger. Arguments are handled in the manner of fmt.Println.
+//
 // Deprecated: use Infoln.
 // Deprecated: use Infoln.
 func Println(args ...interface{}) {
 func Println(args ...interface{}) {
 	logger.Infoln(args...)
 	logger.Infoln(args...)

+ 2 - 0
vendor/google.golang.org/grpc/grpclog/logger.go

@@ -19,6 +19,7 @@
 package grpclog
 package grpclog
 
 
 // Logger mimics golang's standard Logger as an interface.
 // Logger mimics golang's standard Logger as an interface.
+//
 // Deprecated: use LoggerV2.
 // Deprecated: use LoggerV2.
 type Logger interface {
 type Logger interface {
 	Fatal(args ...interface{})
 	Fatal(args ...interface{})
@@ -31,6 +32,7 @@ type Logger interface {
 
 
 // SetLogger sets the logger that is used in grpc. Call only from
 // SetLogger sets the logger that is used in grpc. Call only from
 // init() functions.
 // init() functions.
+//
 // Deprecated: use SetLoggerV2.
 // Deprecated: use SetLoggerV2.
 func SetLogger(l Logger) {
 func SetLogger(l Logger) {
 	logger = &loggerWrapper{Logger: l}
 	logger = &loggerWrapper{Logger: l}

+ 64 - 27
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go

@@ -1,17 +1,7 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // source: grpc_health_v1/health.proto
 // source: grpc_health_v1/health.proto
 
 
-/*
-Package grpc_health_v1 is a generated protocol buffer package.
-
-It is generated from these files:
-	grpc_health_v1/health.proto
-
-It has these top-level messages:
-	HealthCheckRequest
-	HealthCheckResponse
-*/
-package grpc_health_v1
+package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1"
 
 
 import proto "github.com/golang/protobuf/proto"
 import proto "github.com/golang/protobuf/proto"
 import fmt "fmt"
 import fmt "fmt"
@@ -56,17 +46,39 @@ func (x HealthCheckResponse_ServingStatus) String() string {
 	return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
 	return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
 }
 }
 func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
 func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor0, []int{1, 0}
+	return fileDescriptor_health_8e5b8a3074428511, []int{1, 0}
 }
 }
 
 
 type HealthCheckRequest struct {
 type HealthCheckRequest struct {
-	Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
+	Service              string   `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HealthCheckRequest) Reset()         { *m = HealthCheckRequest{} }
+func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
+func (*HealthCheckRequest) ProtoMessage()    {}
+func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_health_8e5b8a3074428511, []int{0}
+}
+func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
+}
+func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
+}
+func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HealthCheckRequest.Merge(dst, src)
+}
+func (m *HealthCheckRequest) XXX_Size() int {
+	return xxx_messageInfo_HealthCheckRequest.Size(m)
+}
+func (m *HealthCheckRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m)
 }
 }
 
 
-func (m *HealthCheckRequest) Reset()                    { *m = HealthCheckRequest{} }
-func (m *HealthCheckRequest) String() string            { return proto.CompactTextString(m) }
-func (*HealthCheckRequest) ProtoMessage()               {}
-func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo
 
 
 func (m *HealthCheckRequest) GetService() string {
 func (m *HealthCheckRequest) GetService() string {
 	if m != nil {
 	if m != nil {
@@ -76,13 +88,35 @@ func (m *HealthCheckRequest) GetService() string {
 }
 }
 
 
 type HealthCheckResponse struct {
 type HealthCheckResponse struct {
-	Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
+	Status               HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                          `json:"-"`
+	XXX_unrecognized     []byte                            `json:"-"`
+	XXX_sizecache        int32                             `json:"-"`
+}
+
+func (m *HealthCheckResponse) Reset()         { *m = HealthCheckResponse{} }
+func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
+func (*HealthCheckResponse) ProtoMessage()    {}
+func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_health_8e5b8a3074428511, []int{1}
+}
+func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
+}
+func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
+}
+func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HealthCheckResponse.Merge(dst, src)
+}
+func (m *HealthCheckResponse) XXX_Size() int {
+	return xxx_messageInfo_HealthCheckResponse.Size(m)
+}
+func (m *HealthCheckResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m)
 }
 }
 
 
-func (m *HealthCheckResponse) Reset()                    { *m = HealthCheckResponse{} }
-func (m *HealthCheckResponse) String() string            { return proto.CompactTextString(m) }
-func (*HealthCheckResponse) ProtoMessage()               {}
-func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo
 
 
 func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
 func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
 	if m != nil {
 	if m != nil {
@@ -169,10 +203,10 @@ var _Health_serviceDesc = grpc.ServiceDesc{
 	Metadata: "grpc_health_v1/health.proto",
 	Metadata: "grpc_health_v1/health.proto",
 }
 }
 
 
-func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor0) }
+func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor_health_8e5b8a3074428511) }
 
 
-var fileDescriptor0 = []byte{
-	// 213 bytes of a gzipped FileDescriptorProto
+var fileDescriptor_health_8e5b8a3074428511 = []byte{
+	// 269 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
 	0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a,
 	0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a,
 	0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21,
 	0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21,
@@ -185,6 +219,9 @@ var fileDescriptor0 = []byte{
 	0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85,
 	0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85,
 	0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b,
 	0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b,
 	0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44,
 	0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44,
-	0xb8, 0x36, 0x89, 0x0d, 0x1c, 0x82, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x53, 0x2b, 0x65,
-	0x20, 0x60, 0x01, 0x00, 0x00,
+	0xb8, 0xd6, 0x29, 0x91, 0x4b, 0x30, 0x33, 0x1f, 0x4d, 0xa1, 0x13, 0x37, 0x44, 0x65, 0x00, 0x28,
+	0x70, 0x03, 0x18, 0xa3, 0x74, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xd2, 0xf3, 0x73, 0x12,
+	0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0x41, 0x1a, 0xa0, 0x71, 0xa0, 0x8f, 0x1a, 0x33, 0xab,
+	0x98, 0xf8, 0xdc, 0x41, 0xa6, 0x41, 0x8c, 0xd0, 0x0b, 0x33, 0x4c, 0x62, 0x03, 0x47, 0x92, 0x31,
+	0x20, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x70, 0xc4, 0xa7, 0xc3, 0x01, 0x00, 0x00,
 }
 }

+ 1 - 1
vendor/google.golang.org/grpc/health/health.go

@@ -16,7 +16,7 @@
  *
  *
  */
  */
 
 
-//go:generate protoc --go_out=plugins=grpc:. grpc_health_v1/health.proto
+//go:generate protoc --go_out=plugins=grpc,paths=source_relative:. grpc_health_v1/health.proto
 
 
 // Package health provides some utility functions to health-check a server. The implementation
 // Package health provides some utility functions to health-check a server. The implementation
 // is based on protobuf. Users need to write their own implementations if other IDLs are used.
 // is based on protobuf. Users need to write their own implementations if other IDLs are used.

+ 27 - 1
vendor/google.golang.org/grpc/metadata/metadata.go

@@ -28,7 +28,9 @@ import (
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
-// DecodeKeyValue returns k, v, nil.  It is deprecated and should not be used.
+// DecodeKeyValue returns k, v, nil.
+//
+// Deprecated: use k and v directly instead.
 func DecodeKeyValue(k, v string) (string, string, error) {
 func DecodeKeyValue(k, v string) (string, string, error) {
 	return k, v, nil
 	return k, v, nil
 }
 }
@@ -95,6 +97,30 @@ func (md MD) Copy() MD {
 	return Join(md)
 	return Join(md)
 }
 }
 
 
+// Get obtains the values for a given key.
+func (md MD) Get(k string) []string {
+	k = strings.ToLower(k)
+	return md[k]
+}
+
+// Set sets the value of a given key with a slice of values.
+func (md MD) Set(k string, vals ...string) {
+	if len(vals) == 0 {
+		return
+	}
+	k = strings.ToLower(k)
+	md[k] = vals
+}
+
+// Append adds the values to key k, not overwriting what was already stored at that key.
+func (md MD) Append(k string, vals ...string) {
+	if len(vals) == 0 {
+		return
+	}
+	k = strings.ToLower(k)
+	md[k] = append(md[k], vals...)
+}
+
 // Join joins any number of mds into a single MD.
 // Join joins any number of mds into a single MD.
 // The order of values for each key is determined by the order in which
 // The order of values for each key is determined by the order in which
 // the mds containing those values are presented to Join.
 // the mds containing those values are presented to Join.

+ 3 - 3
vendor/google.golang.org/grpc/naming/dns_resolver.go

@@ -153,10 +153,10 @@ type ipWatcher struct {
 	updateChan chan *Update
 	updateChan chan *Update
 }
 }
 
 
-// Next returns the adrress resolution Update for the target. For IP address,
-// the resolution is itself, thus polling name server is unncessary. Therefore,
+// Next returns the address resolution Update for the target. For IP address,
+// the resolution is itself, thus polling name server is unnecessary. Therefore,
 // Next() will return an Update the first time it is called, and will be blocked
 // Next() will return an Update the first time it is called, and will be blocked
-// for all following calls as no Update exisits until watcher is closed.
+// for all following calls as no Update exists until watcher is closed.
 func (i *ipWatcher) Next() ([]*Update, error) {
 func (i *ipWatcher) Next() ([]*Update, error) {
 	u, ok := <-i.updateChan
 	u, ok := <-i.updateChan
 	if !ok {
 	if !ok {

+ 11 - 1
vendor/google.golang.org/grpc/naming/naming.go

@@ -18,20 +18,26 @@
 
 
 // Package naming defines the naming API and related data structures for gRPC.
 // Package naming defines the naming API and related data structures for gRPC.
 // The interface is EXPERIMENTAL and may be suject to change.
 // The interface is EXPERIMENTAL and may be suject to change.
+//
+// Deprecated: please use package resolver.
 package naming
 package naming
 
 
 // Operation defines the corresponding operations for a name resolution change.
 // Operation defines the corresponding operations for a name resolution change.
+//
+// Deprecated: please use package resolver.
 type Operation uint8
 type Operation uint8
 
 
 const (
 const (
 	// Add indicates a new address is added.
 	// Add indicates a new address is added.
 	Add Operation = iota
 	Add Operation = iota
-	// Delete indicates an exisiting address is deleted.
+	// Delete indicates an existing address is deleted.
 	Delete
 	Delete
 )
 )
 
 
 // Update defines a name resolution update. Notice that it is not valid having both
 // Update defines a name resolution update. Notice that it is not valid having both
 // empty string Addr and nil Metadata in an Update.
 // empty string Addr and nil Metadata in an Update.
+//
+// Deprecated: please use package resolver.
 type Update struct {
 type Update struct {
 	// Op indicates the operation of the update.
 	// Op indicates the operation of the update.
 	Op Operation
 	Op Operation
@@ -43,12 +49,16 @@ type Update struct {
 }
 }
 
 
 // Resolver creates a Watcher for a target to track its resolution changes.
 // Resolver creates a Watcher for a target to track its resolution changes.
+//
+// Deprecated: please use package resolver.
 type Resolver interface {
 type Resolver interface {
 	// Resolve creates a Watcher for target.
 	// Resolve creates a Watcher for target.
 	Resolve(target string) (Watcher, error)
 	Resolve(target string) (Watcher, error)
 }
 }
 
 
 // Watcher watches for the updates on the specified target.
 // Watcher watches for the updates on the specified target.
+//
+// Deprecated: please use package resolver.
 type Watcher interface {
 type Watcher interface {
 	// Next blocks until an update or error happens. It may return one or more
 	// Next blocks until an update or error happens. It may return one or more
 	// updates. The first call should get the full set of the results. It should
 	// updates. The first call should get the full set of the results. It should

+ 174 - 1
vendor/google.golang.org/grpc/picker_wrapper.go

@@ -19,12 +19,17 @@
 package grpc
 package grpc
 
 
 import (
 import (
+	"io"
 	"sync"
 	"sync"
+	"sync/atomic"
 
 
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/channelz"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
 	"google.golang.org/grpc/status"
 	"google.golang.org/grpc/status"
 	"google.golang.org/grpc/transport"
 	"google.golang.org/grpc/transport"
 )
 )
@@ -40,10 +45,16 @@ type pickerWrapper struct {
 	// The latest connection happened.
 	// The latest connection happened.
 	connErrMu sync.Mutex
 	connErrMu sync.Mutex
 	connErr   error
 	connErr   error
+
+	stickinessMDKey atomic.Value
+	stickiness      *stickyStore
 }
 }
 
 
 func newPickerWrapper() *pickerWrapper {
 func newPickerWrapper() *pickerWrapper {
-	bp := &pickerWrapper{blockingCh: make(chan struct{})}
+	bp := &pickerWrapper{
+		blockingCh: make(chan struct{}),
+		stickiness: newStickyStore(),
+	}
 	return bp
 	return bp
 }
 }
 
 
@@ -60,6 +71,27 @@ func (bp *pickerWrapper) connectionError() error {
 	return err
 	return err
 }
 }
 
 
+func (bp *pickerWrapper) updateStickinessMDKey(newKey string) {
+	// No need to check ok because mdKey == "" if ok == false.
+	if oldKey, _ := bp.stickinessMDKey.Load().(string); oldKey != newKey {
+		bp.stickinessMDKey.Store(newKey)
+		bp.stickiness.reset(newKey)
+	}
+}
+
+func (bp *pickerWrapper) getStickinessMDKey() string {
+	// No need to check ok because mdKey == "" if ok == false.
+	mdKey, _ := bp.stickinessMDKey.Load().(string)
+	return mdKey
+}
+
+func (bp *pickerWrapper) clearStickinessState() {
+	if oldKey := bp.getStickinessMDKey(); oldKey != "" {
+		// There's no need to reset store if mdKey was "".
+		bp.stickiness.reset(oldKey)
+	}
+}
+
 // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
 // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
 func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
 func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
 	bp.mu.Lock()
 	bp.mu.Lock()
@@ -74,6 +106,23 @@ func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
 	bp.mu.Unlock()
 	bp.mu.Unlock()
 }
 }
 
 
+func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
+	acw.mu.Lock()
+	ac := acw.ac
+	acw.mu.Unlock()
+	ac.incrCallsStarted()
+	return func(b balancer.DoneInfo) {
+		if b.Err != nil && b.Err != io.EOF {
+			ac.incrCallsFailed()
+		} else {
+			ac.incrCallsSucceeded()
+		}
+		if done != nil {
+			done(b)
+		}
+	}
+}
+
 // pick returns the transport that will be used for the RPC.
 // pick returns the transport that will be used for the RPC.
 // It may block in the following cases:
 // It may block in the following cases:
 // - there's no picker
 // - there's no picker
@@ -82,6 +131,27 @@ func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
 // - the subConn returned by the current picker is not READY
 // - the subConn returned by the current picker is not READY
 // When one of these situations happens, pick blocks until the picker gets updated.
 // When one of these situations happens, pick blocks until the picker gets updated.
 func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
 func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
+
+	mdKey := bp.getStickinessMDKey()
+	stickyKey, isSticky := stickyKeyFromContext(ctx, mdKey)
+
+	// Potential race here: if stickinessMDKey is updated after the above two
+	// lines, and this pick is a sticky pick, the following put could add an
+	// entry to sticky store with an outdated sticky key.
+	//
+	// The solution: keep the current md key in sticky store, and at the
+	// beginning of each get/put, check the mdkey against store.curMDKey.
+	//  - Cons: one more string comparing for each get/put.
+	//  - Pros: the string matching happens inside get/put, so the overhead for
+	//  non-sticky RPCs will be minimal.
+
+	if isSticky {
+		if t, ok := bp.stickiness.get(mdKey, stickyKey); ok {
+			// Done function returned is always nil.
+			return t, nil, nil
+		}
+	}
+
 	var (
 	var (
 		p  balancer.Picker
 		p  balancer.Picker
 		ch chan struct{}
 		ch chan struct{}
@@ -137,6 +207,12 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
 			continue
 			continue
 		}
 		}
 		if t, ok := acw.getAddrConn().getReadyTransport(); ok {
 		if t, ok := acw.getAddrConn().getReadyTransport(); ok {
+			if isSticky {
+				bp.stickiness.put(mdKey, stickyKey, acw)
+			}
+			if channelz.IsOn() {
+				return t, doneChannelzWrapper(acw, done), nil
+			}
 			return t, done, nil
 			return t, done, nil
 		}
 		}
 		grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
 		grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
@@ -156,3 +232,100 @@ func (bp *pickerWrapper) close() {
 	bp.done = true
 	bp.done = true
 	close(bp.blockingCh)
 	close(bp.blockingCh)
 }
 }
+
+type stickyStoreEntry struct {
+	acw  *acBalancerWrapper
+	addr resolver.Address
+}
+
+type stickyStore struct {
+	mu sync.Mutex
+	// curMDKey is check before every get/put to avoid races. The operation will
+	// abort immediately when the given mdKey is different from the curMDKey.
+	curMDKey string
+	store    map[string]*stickyStoreEntry
+}
+
+func newStickyStore() *stickyStore {
+	return &stickyStore{
+		store: make(map[string]*stickyStoreEntry),
+	}
+}
+
+// reset clears the map in stickyStore, and set the currentMDKey to newMDKey.
+func (ss *stickyStore) reset(newMDKey string) {
+	ss.mu.Lock()
+	ss.curMDKey = newMDKey
+	ss.store = make(map[string]*stickyStoreEntry)
+	ss.mu.Unlock()
+}
+
+// stickyKey is the key to look up in store. mdKey will be checked against
+// curMDKey to avoid races.
+func (ss *stickyStore) put(mdKey, stickyKey string, acw *acBalancerWrapper) {
+	ss.mu.Lock()
+	defer ss.mu.Unlock()
+	if mdKey != ss.curMDKey {
+		return
+	}
+	// TODO(stickiness): limit the total number of entries.
+	ss.store[stickyKey] = &stickyStoreEntry{
+		acw:  acw,
+		addr: acw.getAddrConn().getCurAddr(),
+	}
+}
+
+// stickyKey is the key to look up in store. mdKey will be checked against
+// curMDKey to avoid races.
+func (ss *stickyStore) get(mdKey, stickyKey string) (transport.ClientTransport, bool) {
+	ss.mu.Lock()
+	defer ss.mu.Unlock()
+	if mdKey != ss.curMDKey {
+		return nil, false
+	}
+	entry, ok := ss.store[stickyKey]
+	if !ok {
+		return nil, false
+	}
+	ac := entry.acw.getAddrConn()
+	if ac.getCurAddr() != entry.addr {
+		delete(ss.store, stickyKey)
+		return nil, false
+	}
+	t, ok := ac.getReadyTransport()
+	if !ok {
+		delete(ss.store, stickyKey)
+		return nil, false
+	}
+	return t, true
+}
+
+// Get one value from metadata in ctx with key stickinessMDKey.
+//
+// It returns "", false if stickinessMDKey is an empty string.
+func stickyKeyFromContext(ctx context.Context, stickinessMDKey string) (string, bool) {
+	if stickinessMDKey == "" {
+		return "", false
+	}
+
+	md, added, ok := metadata.FromOutgoingContextRaw(ctx)
+	if !ok {
+		return "", false
+	}
+
+	if vv, ok := md[stickinessMDKey]; ok {
+		if len(vv) > 0 {
+			return vv[0], true
+		}
+	}
+
+	for _, ss := range added {
+		for i := 0; i < len(ss)-1; i += 2 {
+			if ss[i] == stickinessMDKey {
+				return ss[i+1], true
+			}
+		}
+	}
+
+	return "", false
+}

+ 23 - 21
vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go

@@ -50,7 +50,10 @@ const (
 	txtAttribute = "grpc_config="
 	txtAttribute = "grpc_config="
 )
 )
 
 
-var errMissingAddr = errors.New("missing address")
+var (
+	errMissingAddr = errors.New("missing address")
+	randomGen      = rand.New(rand.NewSource(time.Now().UnixNano()))
+)
 
 
 // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
 // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
 func NewBuilder() resolver.Builder {
 func NewBuilder() resolver.Builder {
@@ -87,14 +90,15 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
 	// DNS address (non-IP).
 	// DNS address (non-IP).
 	ctx, cancel := context.WithCancel(context.Background())
 	ctx, cancel := context.WithCancel(context.Background())
 	d := &dnsResolver{
 	d := &dnsResolver{
-		freq:   b.freq,
-		host:   host,
-		port:   port,
-		ctx:    ctx,
-		cancel: cancel,
-		cc:     cc,
-		t:      time.NewTimer(0),
-		rn:     make(chan struct{}, 1),
+		freq:                 b.freq,
+		host:                 host,
+		port:                 port,
+		ctx:                  ctx,
+		cancel:               cancel,
+		cc:                   cc,
+		t:                    time.NewTimer(0),
+		rn:                   make(chan struct{}, 1),
+		disableServiceConfig: opts.DisableServiceConfig,
 	}
 	}
 
 
 	d.wg.Add(1)
 	d.wg.Add(1)
@@ -157,7 +161,8 @@ type dnsResolver struct {
 	// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
 	// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
 	// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
 	// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
 	// has data race with replaceNetFunc (WRITE the lookup function pointers).
 	// has data race with replaceNetFunc (WRITE the lookup function pointers).
-	wg sync.WaitGroup
+	wg                   sync.WaitGroup
+	disableServiceConfig bool
 }
 }
 
 
 // ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
 // ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
@@ -187,7 +192,7 @@ func (d *dnsResolver) watcher() {
 		result, sc := d.lookup()
 		result, sc := d.lookup()
 		// Next lookup should happen after an interval defined by d.freq.
 		// Next lookup should happen after an interval defined by d.freq.
 		d.t.Reset(d.freq)
 		d.t.Reset(d.freq)
-		d.cc.NewServiceConfig(string(sc))
+		d.cc.NewServiceConfig(sc)
 		d.cc.NewAddress(result)
 		d.cc.NewAddress(result)
 	}
 	}
 }
 }
@@ -202,7 +207,7 @@ func (d *dnsResolver) lookupSRV() []resolver.Address {
 	for _, s := range srvs {
 	for _, s := range srvs {
 		lbAddrs, err := lookupHost(d.ctx, s.Target)
 		lbAddrs, err := lookupHost(d.ctx, s.Target)
 		if err != nil {
 		if err != nil {
-			grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err)
+			grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
 			continue
 			continue
 		}
 		}
 		for _, a := range lbAddrs {
 		for _, a := range lbAddrs {
@@ -221,7 +226,7 @@ func (d *dnsResolver) lookupSRV() []resolver.Address {
 func (d *dnsResolver) lookupTXT() string {
 func (d *dnsResolver) lookupTXT() string {
 	ss, err := lookupTXT(d.ctx, d.host)
 	ss, err := lookupTXT(d.ctx, d.host)
 	if err != nil {
 	if err != nil {
-		grpclog.Warningf("grpc: failed dns TXT record lookup due to %v.\n", err)
+		grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
 		return ""
 		return ""
 	}
 	}
 	var res string
 	var res string
@@ -257,10 +262,12 @@ func (d *dnsResolver) lookupHost() []resolver.Address {
 }
 }
 
 
 func (d *dnsResolver) lookup() ([]resolver.Address, string) {
 func (d *dnsResolver) lookup() ([]resolver.Address, string) {
-	var newAddrs []resolver.Address
-	newAddrs = d.lookupSRV()
+	newAddrs := d.lookupSRV()
 	// Support fallback to non-balancer address.
 	// Support fallback to non-balancer address.
 	newAddrs = append(newAddrs, d.lookupHost()...)
 	newAddrs = append(newAddrs, d.lookupHost()...)
+	if d.disableServiceConfig {
+		return newAddrs, ""
+	}
 	sc := d.lookupTXT()
 	sc := d.lookupTXT()
 	return newAddrs, canaryingSC(sc)
 	return newAddrs, canaryingSC(sc)
 }
 }
@@ -339,12 +346,7 @@ func chosenByPercentage(a *int) bool {
 	if a == nil {
 	if a == nil {
 		return true
 		return true
 	}
 	}
-	s := rand.NewSource(time.Now().UnixNano())
-	r := rand.New(s)
-	if r.Intn(100)+1 > *a {
-		return false
-	}
-	return true
+	return randomGen.Intn(100)+1 <= *a
 }
 }
 
 
 func canaryingSC(js string) string {
 func canaryingSC(js string) string {

+ 8 - 2
vendor/google.golang.org/grpc/resolver/resolver.go

@@ -29,8 +29,12 @@ var (
 
 
 // TODO(bar) install dns resolver in init(){}.
 // TODO(bar) install dns resolver in init(){}.
 
 
-// Register registers the resolver builder to the resolver map.
-// b.Scheme will be used as the scheme registered with this builder.
+// Register registers the resolver builder to the resolver map. b.Scheme will be
+// used as the scheme registered with this builder.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple Resolvers are
+// registered with the same name, the one registered last will take effect.
 func Register(b Builder) {
 func Register(b Builder) {
 	m[b.Scheme()] = b
 	m[b.Scheme()] = b
 }
 }
@@ -86,6 +90,8 @@ type Address struct {
 // BuildOption includes additional information for the builder to create
 // BuildOption includes additional information for the builder to create
 // the resolver.
 // the resolver.
 type BuildOption struct {
 type BuildOption struct {
+	// DisableServiceConfig indicates whether resolver should fetch service config data.
+	DisableServiceConfig bool
 }
 }
 
 
 // ClientConn contains the callbacks for resolver to notify any updates
 // ClientConn contains the callbacks for resolver to notify any updates

+ 2 - 2
vendor/google.golang.org/grpc/resolver_conn_wrapper.go

@@ -84,7 +84,7 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
 	}
 	}
 
 
 	var err error
 	var err error
-	ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{})
+	ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig})
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -95,7 +95,7 @@ func (ccr *ccResolverWrapper) start() {
 	go ccr.watcher()
 	go ccr.watcher()
 }
 }
 
 
-// watcher processes address updates and service config updates sequencially.
+// watcher processes address updates and service config updates sequentially.
 // Otherwise, we need to resolve possible races between address and service
 // Otherwise, we need to resolve possible races between address and service
 // config (e.g. they specify different balancer types).
 // config (e.g. they specify different balancer types).
 func (ccr *ccResolverWrapper) watcher() {
 func (ccr *ccResolverWrapper) watcher() {

+ 21 - 11
vendor/google.golang.org/grpc/rpc_util.go

@@ -44,6 +44,8 @@ import (
 )
 )
 
 
 // Compressor defines the interface gRPC uses to compress a message.
 // Compressor defines the interface gRPC uses to compress a message.
+//
+// Deprecated: use package encoding.
 type Compressor interface {
 type Compressor interface {
 	// Do compresses p into w.
 	// Do compresses p into w.
 	Do(w io.Writer, p []byte) error
 	Do(w io.Writer, p []byte) error
@@ -56,6 +58,8 @@ type gzipCompressor struct {
 }
 }
 
 
 // NewGZIPCompressor creates a Compressor based on GZIP.
 // NewGZIPCompressor creates a Compressor based on GZIP.
+//
+// Deprecated: use package encoding/gzip.
 func NewGZIPCompressor() Compressor {
 func NewGZIPCompressor() Compressor {
 	c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
 	c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
 	return c
 	return c
@@ -65,6 +69,8 @@ func NewGZIPCompressor() Compressor {
 // of assuming DefaultCompression.
 // of assuming DefaultCompression.
 //
 //
 // The error returned will be nil if the level is valid.
 // The error returned will be nil if the level is valid.
+//
+// Deprecated: use package encoding/gzip.
 func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
 func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
 	if level < gzip.DefaultCompression || level > gzip.BestCompression {
 	if level < gzip.DefaultCompression || level > gzip.BestCompression {
 		return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
 		return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
@@ -97,6 +103,8 @@ func (c *gzipCompressor) Type() string {
 }
 }
 
 
 // Decompressor defines the interface gRPC uses to decompress a message.
 // Decompressor defines the interface gRPC uses to decompress a message.
+//
+// Deprecated: use package encoding.
 type Decompressor interface {
 type Decompressor interface {
 	// Do reads the data from r and uncompress them.
 	// Do reads the data from r and uncompress them.
 	Do(r io.Reader) ([]byte, error)
 	Do(r io.Reader) ([]byte, error)
@@ -109,6 +117,8 @@ type gzipDecompressor struct {
 }
 }
 
 
 // NewGZIPDecompressor creates a Decompressor based on GZIP.
 // NewGZIPDecompressor creates a Decompressor based on GZIP.
+//
+// Deprecated: use package encoding/gzip.
 func NewGZIPDecompressor() Decompressor {
 func NewGZIPDecompressor() Decompressor {
 	return &gzipDecompressor{}
 	return &gzipDecompressor{}
 }
 }
@@ -218,8 +228,8 @@ func (o TrailerCallOption) after(c *callInfo) {
 	}
 	}
 }
 }
 
 
-// Peer returns a CallOption that retrieves peer information for a
-// unary RPC.
+// Peer returns a CallOption that retrieves peer information for a unary RPC.
+// The peer field will be populated *after* the RPC completes.
 func Peer(p *peer.Peer) CallOption {
 func Peer(p *peer.Peer) CallOption {
 	return PeerCallOption{PeerAddr: p}
 	return PeerCallOption{PeerAddr: p}
 }
 }
@@ -265,7 +275,7 @@ func (o FailFastCallOption) before(c *callInfo) error {
 	c.failFast = o.FailFast
 	c.failFast = o.FailFast
 	return nil
 	return nil
 }
 }
-func (o FailFastCallOption) after(c *callInfo) { return }
+func (o FailFastCallOption) after(c *callInfo) {}
 
 
 // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
 // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
 func MaxCallRecvMsgSize(s int) CallOption {
 func MaxCallRecvMsgSize(s int) CallOption {
@@ -283,7 +293,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
 	c.maxReceiveMessageSize = &o.MaxRecvMsgSize
 	c.maxReceiveMessageSize = &o.MaxRecvMsgSize
 	return nil
 	return nil
 }
 }
-func (o MaxRecvMsgSizeCallOption) after(c *callInfo) { return }
+func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
 
 
 // MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
 // MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
 func MaxCallSendMsgSize(s int) CallOption {
 func MaxCallSendMsgSize(s int) CallOption {
@@ -301,7 +311,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
 	c.maxSendMessageSize = &o.MaxSendMsgSize
 	c.maxSendMessageSize = &o.MaxSendMsgSize
 	return nil
 	return nil
 }
 }
-func (o MaxSendMsgSizeCallOption) after(c *callInfo) { return }
+func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
 
 
 // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
 // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
 // for a call.
 // for a call.
@@ -320,7 +330,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error {
 	c.creds = o.Creds
 	c.creds = o.Creds
 	return nil
 	return nil
 }
 }
-func (o PerRPCCredsCallOption) after(c *callInfo) { return }
+func (o PerRPCCredsCallOption) after(c *callInfo) {}
 
 
 // UseCompressor returns a CallOption which sets the compressor used when
 // UseCompressor returns a CallOption which sets the compressor used when
 // sending the request.  If WithCompressor is also set, UseCompressor has
 // sending the request.  If WithCompressor is also set, UseCompressor has
@@ -341,7 +351,7 @@ func (o CompressorCallOption) before(c *callInfo) error {
 	c.compressorType = o.CompressorType
 	c.compressorType = o.CompressorType
 	return nil
 	return nil
 }
 }
-func (o CompressorCallOption) after(c *callInfo) { return }
+func (o CompressorCallOption) after(c *callInfo) {}
 
 
 // CallContentSubtype returns a CallOption that will set the content-subtype
 // CallContentSubtype returns a CallOption that will set the content-subtype
 // for a call. For example, if content-subtype is "json", the Content-Type over
 // for a call. For example, if content-subtype is "json", the Content-Type over
@@ -352,7 +362,7 @@ func (o CompressorCallOption) after(c *callInfo) { return }
 //
 //
 // If CallCustomCodec is not also used, the content-subtype will be used to
 // If CallCustomCodec is not also used, the content-subtype will be used to
 // look up the Codec to use in the registry controlled by RegisterCodec. See
 // look up the Codec to use in the registry controlled by RegisterCodec. See
-// the documention on RegisterCodec for details on registration. The lookup
+// the documentation on RegisterCodec for details on registration. The lookup
 // of content-subtype is case-insensitive. If no such Codec is found, the call
 // of content-subtype is case-insensitive. If no such Codec is found, the call
 // will result in an error with code codes.Internal.
 // will result in an error with code codes.Internal.
 //
 //
@@ -374,7 +384,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error {
 	c.contentSubtype = o.ContentSubtype
 	c.contentSubtype = o.ContentSubtype
 	return nil
 	return nil
 }
 }
-func (o ContentSubtypeCallOption) after(c *callInfo) { return }
+func (o ContentSubtypeCallOption) after(c *callInfo) {}
 
 
 // CallCustomCodec returns a CallOption that will set the given Codec to be
 // CallCustomCodec returns a CallOption that will set the given Codec to be
 // used for all request and response messages for a call. The result of calling
 // used for all request and response messages for a call. The result of calling
@@ -403,7 +413,7 @@ func (o CustomCodecCallOption) before(c *callInfo) error {
 	c.codec = o.Codec
 	c.codec = o.Codec
 	return nil
 	return nil
 }
 }
-func (o CustomCodecCallOption) after(c *callInfo) { return }
+func (o CustomCodecCallOption) after(c *callInfo) {}
 
 
 // The format of the payload: compressed or not?
 // The format of the payload: compressed or not?
 type payloadFormat uint8
 type payloadFormat uint8
@@ -712,6 +722,6 @@ const (
 )
 )
 
 
 // Version is the current grpc version.
 // Version is the current grpc version.
-const Version = "1.11.3"
+const Version = "1.12.2"
 
 
 const grpcUA = "grpc-go/" + Version
 const grpcUA = "grpc-go/" + Version

+ 133 - 18
vendor/google.golang.org/grpc/server.go

@@ -37,6 +37,8 @@ import (
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2"
 	"golang.org/x/net/trace"
 	"golang.org/x/net/trace"
+
+	"google.golang.org/grpc/channelz"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/encoding"
@@ -97,11 +99,19 @@ type Server struct {
 	m      map[string]*service // service name -> service info
 	m      map[string]*service // service name -> service info
 	events trace.EventLog
 	events trace.EventLog
 
 
-	quit     chan struct{}
-	done     chan struct{}
-	quitOnce sync.Once
-	doneOnce sync.Once
-	serveWG  sync.WaitGroup // counts active Serve goroutines for GracefulStop
+	quit               chan struct{}
+	done               chan struct{}
+	quitOnce           sync.Once
+	doneOnce           sync.Once
+	channelzRemoveOnce sync.Once
+	serveWG            sync.WaitGroup // counts active Serve goroutines for GracefulStop
+
+	channelzID          int64 // channelz unique identification number
+	czmu                sync.RWMutex
+	callsStarted        int64
+	callsFailed         int64
+	callsSucceeded      int64
+	lastCallStartedTime time.Time
 }
 }
 
 
 type options struct {
 type options struct {
@@ -216,7 +226,9 @@ func RPCDecompressor(dc Decompressor) ServerOption {
 }
 }
 
 
 // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
 // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
-// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead.
+// If this is not set, gRPC uses the default limit.
+//
+// Deprecated: use MaxRecvMsgSize instead.
 func MaxMsgSize(m int) ServerOption {
 func MaxMsgSize(m int) ServerOption {
 	return MaxRecvMsgSize(m)
 	return MaxRecvMsgSize(m)
 }
 }
@@ -343,6 +355,10 @@ func NewServer(opt ...ServerOption) *Server {
 		_, file, line, _ := runtime.Caller(1)
 		_, file, line, _ := runtime.Caller(1)
 		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
 		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
 	}
 	}
+
+	if channelz.IsOn() {
+		s.channelzID = channelz.RegisterServer(s, "")
+	}
 	return s
 	return s
 }
 }
 
 
@@ -458,6 +474,25 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
 	return s.opts.creds.ServerHandshake(rawConn)
 	return s.opts.creds.ServerHandshake(rawConn)
 }
 }
 
 
+type listenSocket struct {
+	net.Listener
+	channelzID int64
+}
+
+func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
+	return &channelz.SocketInternalMetric{
+		LocalAddr: l.Listener.Addr(),
+	}
+}
+
+func (l *listenSocket) Close() error {
+	err := l.Listener.Close()
+	if channelz.IsOn() {
+		channelz.RemoveEntry(l.channelzID)
+	}
+	return err
+}
+
 // Serve accepts incoming connections on the listener lis, creating a new
 // Serve accepts incoming connections on the listener lis, creating a new
 // ServerTransport and service goroutine for each. The service goroutines
 // ServerTransport and service goroutine for each. The service goroutines
 // read gRPC requests and then call the registered handlers to reply to them.
 // read gRPC requests and then call the registered handlers to reply to them.
@@ -486,13 +521,19 @@ func (s *Server) Serve(lis net.Listener) error {
 		}
 		}
 	}()
 	}()
 
 
-	s.lis[lis] = true
+	ls := &listenSocket{Listener: lis}
+	s.lis[ls] = true
+
+	if channelz.IsOn() {
+		ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, "")
+	}
 	s.mu.Unlock()
 	s.mu.Unlock()
+
 	defer func() {
 	defer func() {
 		s.mu.Lock()
 		s.mu.Lock()
-		if s.lis != nil && s.lis[lis] {
-			lis.Close()
-			delete(s.lis, lis)
+		if s.lis != nil && s.lis[ls] {
+			ls.Close()
+			delete(s.lis, ls)
 		}
 		}
 		s.mu.Unlock()
 		s.mu.Unlock()
 	}()
 	}()
@@ -614,6 +655,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
 		InitialConnWindowSize: s.opts.initialConnWindowSize,
 		InitialConnWindowSize: s.opts.initialConnWindowSize,
 		WriteBufferSize:       s.opts.writeBufferSize,
 		WriteBufferSize:       s.opts.writeBufferSize,
 		ReadBufferSize:        s.opts.readBufferSize,
 		ReadBufferSize:        s.opts.readBufferSize,
+		ChannelzParentID:      s.channelzID,
 	}
 	}
 	st, err := transport.NewServerTransport("http2", c, config)
 	st, err := transport.NewServerTransport("http2", c, config)
 	if err != nil {
 	if err != nil {
@@ -624,6 +666,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
 		grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
 		grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
 		return nil
 		return nil
 	}
 	}
+
 	return st
 	return st
 }
 }
 
 
@@ -751,6 +794,38 @@ func (s *Server) removeConn(c io.Closer) {
 	}
 	}
 }
 }
 
 
+// ChannelzMetric returns ServerInternalMetric of current server.
+// This is an EXPERIMENTAL API.
+func (s *Server) ChannelzMetric() *channelz.ServerInternalMetric {
+	s.czmu.RLock()
+	defer s.czmu.RUnlock()
+	return &channelz.ServerInternalMetric{
+		CallsStarted:             s.callsStarted,
+		CallsSucceeded:           s.callsSucceeded,
+		CallsFailed:              s.callsFailed,
+		LastCallStartedTimestamp: s.lastCallStartedTime,
+	}
+}
+
+func (s *Server) incrCallsStarted() {
+	s.czmu.Lock()
+	s.callsStarted++
+	s.lastCallStartedTime = time.Now()
+	s.czmu.Unlock()
+}
+
+func (s *Server) incrCallsSucceeded() {
+	s.czmu.Lock()
+	s.callsSucceeded++
+	s.czmu.Unlock()
+}
+
+func (s *Server) incrCallsFailed() {
+	s.czmu.Lock()
+	s.callsFailed++
+	s.czmu.Unlock()
+}
+
 func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
 func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
 	var (
 	var (
 		outPayload *stats.OutPayload
 		outPayload *stats.OutPayload
@@ -775,6 +850,16 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
 }
 }
 
 
 func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
 func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
+	if channelz.IsOn() {
+		s.incrCallsStarted()
+		defer func() {
+			if err != nil && err != io.EOF {
+				s.incrCallsFailed()
+			} else {
+				s.incrCallsSucceeded()
+			}
+		}()
+	}
 	sh := s.opts.statsHandler
 	sh := s.opts.statsHandler
 	if sh != nil {
 	if sh != nil {
 		beginTime := time.Now()
 		beginTime := time.Now()
@@ -869,6 +954,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 		}
 		}
 		return err
 		return err
 	}
 	}
+	if channelz.IsOn() {
+		t.IncrMsgRecv()
+	}
 	if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
 	if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
 		if e := t.WriteStatus(stream, st); e != nil {
 		if e := t.WriteStatus(stream, st); e != nil {
 			grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
 			grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
@@ -968,6 +1056,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 		}
 		}
 		return err
 		return err
 	}
 	}
+	if channelz.IsOn() {
+		t.IncrMsgSent()
+	}
 	if trInfo != nil {
 	if trInfo != nil {
 		trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
 		trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
 	}
 	}
@@ -978,6 +1069,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 }
 }
 
 
 func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
 func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
+	if channelz.IsOn() {
+		s.incrCallsStarted()
+		defer func() {
+			if err != nil && err != io.EOF {
+				s.incrCallsFailed()
+			} else {
+				s.incrCallsSucceeded()
+			}
+		}()
+	}
 	sh := s.opts.statsHandler
 	sh := s.opts.statsHandler
 	if sh != nil {
 	if sh != nil {
 		beginTime := time.Now()
 		beginTime := time.Now()
@@ -1199,10 +1300,12 @@ type ServerTransportStream interface {
 	SetTrailer(md metadata.MD) error
 	SetTrailer(md metadata.MD) error
 }
 }
 
 
-// serverStreamFromContext returns the server stream saved in ctx. Returns
-// nil if the given context has no stream associated with it (which implies
-// it is not an RPC invocation context).
-func serverTransportStreamFromContext(ctx context.Context) ServerTransportStream {
+// ServerTransportStreamFromContext returns the ServerTransportStream saved in
+// ctx. Returns nil if the given context has no stream associated with it
+// (which implies it is not an RPC invocation context).
+//
+// This API is EXPERIMENTAL.
+func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
 	s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
 	s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
 	return s
 	return s
 }
 }
@@ -1224,6 +1327,12 @@ func (s *Server) Stop() {
 		})
 		})
 	}()
 	}()
 
 
+	s.channelzRemoveOnce.Do(func() {
+		if channelz.IsOn() {
+			channelz.RemoveEntry(s.channelzID)
+		}
+	})
+
 	s.mu.Lock()
 	s.mu.Lock()
 	listeners := s.lis
 	listeners := s.lis
 	s.lis = nil
 	s.lis = nil
@@ -1262,11 +1371,17 @@ func (s *Server) GracefulStop() {
 		})
 		})
 	}()
 	}()
 
 
+	s.channelzRemoveOnce.Do(func() {
+		if channelz.IsOn() {
+			channelz.RemoveEntry(s.channelzID)
+		}
+	})
 	s.mu.Lock()
 	s.mu.Lock()
 	if s.conns == nil {
 	if s.conns == nil {
 		s.mu.Unlock()
 		s.mu.Unlock()
 		return
 		return
 	}
 	}
+
 	for lis := range s.lis {
 	for lis := range s.lis {
 		lis.Close()
 		lis.Close()
 	}
 	}
@@ -1327,7 +1442,7 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
 	if md.Len() == 0 {
 	if md.Len() == 0 {
 		return nil
 		return nil
 	}
 	}
-	stream := serverTransportStreamFromContext(ctx)
+	stream := ServerTransportStreamFromContext(ctx)
 	if stream == nil {
 	if stream == nil {
 		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
 		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
 	}
 	}
@@ -1337,7 +1452,7 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
 // SendHeader sends header metadata. It may be called at most once.
 // SendHeader sends header metadata. It may be called at most once.
 // The provided md and headers set by SetHeader() will be sent.
 // The provided md and headers set by SetHeader() will be sent.
 func SendHeader(ctx context.Context, md metadata.MD) error {
 func SendHeader(ctx context.Context, md metadata.MD) error {
-	stream := serverTransportStreamFromContext(ctx)
+	stream := ServerTransportStreamFromContext(ctx)
 	if stream == nil {
 	if stream == nil {
 		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
 		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
 	}
 	}
@@ -1353,7 +1468,7 @@ func SetTrailer(ctx context.Context, md metadata.MD) error {
 	if md.Len() == 0 {
 	if md.Len() == 0 {
 		return nil
 		return nil
 	}
 	}
-	stream := serverTransportStreamFromContext(ctx)
+	stream := ServerTransportStreamFromContext(ctx)
 	if stream == nil {
 	if stream == nil {
 		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
 		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
 	}
 	}
@@ -1363,7 +1478,7 @@ func SetTrailer(ctx context.Context, md metadata.MD) error {
 // Method returns the method string for the server context.  The returned
 // Method returns the method string for the server context.  The returned
 // string is in the format of "/service/method".
 // string is in the format of "/service/method".
 func Method(ctx context.Context) (string, bool) {
 func Method(ctx context.Context) (string, bool) {
-	s := serverTransportStreamFromContext(ctx)
+	s := ServerTransportStreamFromContext(ctx)
 	if s == nil {
 	if s == nil {
 		return "", false
 		return "", false
 	}
 	}

+ 11 - 4
vendor/google.golang.org/grpc/service_config.go

@@ -32,7 +32,8 @@ const maxInt = int(^uint(0) >> 1)
 
 
 // MethodConfig defines the configuration recommended by the service providers for a
 // MethodConfig defines the configuration recommended by the service providers for a
 // particular method.
 // particular method.
-// DEPRECATED: Users should not use this struct. Service config should be received
+//
+// Deprecated: Users should not use this struct. Service config should be received
 // through name resolver, as specified here
 // through name resolver, as specified here
 // https://github.com/grpc/grpc/blob/master/doc/service_config.md
 // https://github.com/grpc/grpc/blob/master/doc/service_config.md
 type MethodConfig struct {
 type MethodConfig struct {
@@ -59,7 +60,8 @@ type MethodConfig struct {
 
 
 // ServiceConfig is provided by the service provider and contains parameters for how
 // ServiceConfig is provided by the service provider and contains parameters for how
 // clients that connect to the service should behave.
 // clients that connect to the service should behave.
-// DEPRECATED: Users should not use this struct. Service config should be received
+//
+// Deprecated: Users should not use this struct. Service config should be received
 // through name resolver, as specified here
 // through name resolver, as specified here
 // https://github.com/grpc/grpc/blob/master/doc/service_config.md
 // https://github.com/grpc/grpc/blob/master/doc/service_config.md
 type ServiceConfig struct {
 type ServiceConfig struct {
@@ -71,6 +73,8 @@ type ServiceConfig struct {
 	// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
 	// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
 	// Otherwise, the method has no MethodConfig to use.
 	// Otherwise, the method has no MethodConfig to use.
 	Methods map[string]MethodConfig
 	Methods map[string]MethodConfig
+
+	stickinessMetadataKey *string
 }
 }
 
 
 func parseDuration(s *string) (*time.Duration, error) {
 func parseDuration(s *string) (*time.Duration, error) {
@@ -144,8 +148,9 @@ type jsonMC struct {
 
 
 // TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
 // TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
 type jsonSC struct {
 type jsonSC struct {
-	LoadBalancingPolicy *string
-	MethodConfig        *[]jsonMC
+	LoadBalancingPolicy   *string
+	StickinessMetadataKey *string
+	MethodConfig          *[]jsonMC
 }
 }
 
 
 func parseServiceConfig(js string) (ServiceConfig, error) {
 func parseServiceConfig(js string) (ServiceConfig, error) {
@@ -158,6 +163,8 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
 	sc := ServiceConfig{
 	sc := ServiceConfig{
 		LB:      rsc.LoadBalancingPolicy,
 		LB:      rsc.LoadBalancingPolicy,
 		Methods: make(map[string]MethodConfig),
 		Methods: make(map[string]MethodConfig),
+
+		stickinessMetadataKey: rsc.StickinessMetadataKey,
 	}
 	}
 	if rsc.MethodConfig == nil {
 	if rsc.MethodConfig == nil {
 		return sc, nil
 		return sc, nil

+ 30 - 1
vendor/google.golang.org/grpc/stream.go

@@ -27,6 +27,7 @@ import (
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 	"golang.org/x/net/trace"
 	"golang.org/x/net/trace"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/channelz"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/metadata"
@@ -121,6 +122,14 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 }
 }
 
 
 func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
 func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+	if channelz.IsOn() {
+		cc.incrCallsStarted()
+		defer func() {
+			if err != nil {
+				cc.incrCallsFailed()
+			}
+		}()
+	}
 	c := defaultCallInfo()
 	c := defaultCallInfo()
 	mc := cc.GetMethodConfig(method)
 	mc := cc.GetMethodConfig(method)
 	if mc.WaitForReady != nil {
 	if mc.WaitForReady != nil {
@@ -272,6 +281,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 	cs := &clientStream{
 	cs := &clientStream{
 		opts:   opts,
 		opts:   opts,
 		c:      c,
 		c:      c,
+		cc:     cc,
 		desc:   desc,
 		desc:   desc,
 		codec:  c.codec,
 		codec:  c.codec,
 		cp:     cp,
 		cp:     cp,
@@ -313,6 +323,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 type clientStream struct {
 type clientStream struct {
 	opts []CallOption
 	opts []CallOption
 	c    *callInfo
 	c    *callInfo
+	cc   *ClientConn
 	desc *StreamDesc
 	desc *StreamDesc
 
 
 	codec baseCodec
 	codec baseCodec
@@ -401,6 +412,13 @@ func (cs *clientStream) finish(err error) {
 	}
 	}
 	cs.finished = true
 	cs.finished = true
 	cs.mu.Unlock()
 	cs.mu.Unlock()
+	if channelz.IsOn() {
+		if err != nil {
+			cs.cc.incrCallsFailed()
+		} else {
+			cs.cc.incrCallsSucceeded()
+		}
+	}
 	// TODO(retry): commit current attempt if necessary.
 	// TODO(retry): commit current attempt if necessary.
 	cs.attempt.finish(err)
 	cs.attempt.finish(err)
 	for _, o := range cs.opts {
 	for _, o := range cs.opts {
@@ -470,6 +488,9 @@ func (a *csAttempt) sendMsg(m interface{}) (err error) {
 			outPayload.SentTime = time.Now()
 			outPayload.SentTime = time.Now()
 			a.statsHandler.HandleRPC(a.ctx, outPayload)
 			a.statsHandler.HandleRPC(a.ctx, outPayload)
 		}
 		}
+		if channelz.IsOn() {
+			a.t.IncrMsgSent()
+		}
 		return nil
 		return nil
 	}
 	}
 	return io.EOF
 	return io.EOF
@@ -525,6 +546,9 @@ func (a *csAttempt) recvMsg(m interface{}) (err error) {
 	if inPayload != nil {
 	if inPayload != nil {
 		a.statsHandler.HandleRPC(a.ctx, inPayload)
 		a.statsHandler.HandleRPC(a.ctx, inPayload)
 	}
 	}
+	if channelz.IsOn() {
+		a.t.IncrMsgRecv()
+	}
 	if cs.desc.ServerStreams {
 	if cs.desc.ServerStreams {
 		// Subsequent messages should be received by subsequent RecvMsg calls.
 		// Subsequent messages should be received by subsequent RecvMsg calls.
 		return nil
 		return nil
@@ -648,7 +672,6 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
 		return
 		return
 	}
 	}
 	ss.s.SetTrailer(md)
 	ss.s.SetTrailer(md)
-	return
 }
 }
 
 
 func (ss *serverStream) SendMsg(m interface{}) (err error) {
 func (ss *serverStream) SendMsg(m interface{}) (err error) {
@@ -669,6 +692,9 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
 			st, _ := status.FromError(toRPCErr(err))
 			st, _ := status.FromError(toRPCErr(err))
 			ss.t.WriteStatus(ss.s, st)
 			ss.t.WriteStatus(ss.s, st)
 		}
 		}
+		if channelz.IsOn() && err == nil {
+			ss.t.IncrMsgSent()
+		}
 	}()
 	}()
 	var outPayload *stats.OutPayload
 	var outPayload *stats.OutPayload
 	if ss.statsHandler != nil {
 	if ss.statsHandler != nil {
@@ -709,6 +735,9 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
 			st, _ := status.FromError(toRPCErr(err))
 			st, _ := status.FromError(toRPCErr(err))
 			ss.t.WriteStatus(ss.s, st)
 			ss.t.WriteStatus(ss.s, st)
 		}
 		}
+		if channelz.IsOn() && err == nil {
+			ss.t.IncrMsgRecv()
+		}
 	}()
 	}()
 	var inPayload *stats.InPayload
 	var inPayload *stats.InPayload
 	if ss.statsHandler != nil {
 	if ss.statsHandler != nil {

+ 769 - 0
vendor/google.golang.org/grpc/transport/controlbuf.go

@@ -0,0 +1,769 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"bytes"
+	"fmt"
+	"runtime"
+	"sync"
+
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+)
+
+type itemNode struct {
+	it   interface{}
+	next *itemNode
+}
+
+type itemList struct {
+	head *itemNode
+	tail *itemNode
+}
+
+func (il *itemList) enqueue(i interface{}) {
+	n := &itemNode{it: i}
+	if il.tail == nil {
+		il.head, il.tail = n, n
+		return
+	}
+	il.tail.next = n
+	il.tail = n
+}
+
+// peek returns the first item in the list without removing it from the
+// list.
+func (il *itemList) peek() interface{} {
+	return il.head.it
+}
+
+func (il *itemList) dequeue() interface{} {
+	if il.head == nil {
+		return nil
+	}
+	i := il.head.it
+	il.head = il.head.next
+	if il.head == nil {
+		il.tail = nil
+	}
+	return i
+}
+
+func (il *itemList) dequeueAll() *itemNode {
+	h := il.head
+	il.head, il.tail = nil, nil
+	return h
+}
+
+func (il *itemList) isEmpty() bool {
+	return il.head == nil
+}
+
+// The following defines various control items which could flow through
+// the control buffer of transport. They represent different aspects of
+// control tasks, e.g., flow control, settings, streaming resetting, etc.
+
+type headerFrame struct {
+	streamID   uint32
+	hf         []hpack.HeaderField
+	endStream  bool                       // Valid on server side.
+	initStream func(uint32) (bool, error) // Used only on the client side.
+	onWrite    func()
+	wq         *writeQuota    // write quota for the stream created.
+	cleanup    *cleanupStream // Valid on the server side.
+	onOrphaned func(error)    // Valid on client-side
+}
+
+type cleanupStream struct {
+	streamID uint32
+	idPtr    *uint32
+	rst      bool
+	rstCode  http2.ErrCode
+	onWrite  func()
+}
+
+type dataFrame struct {
+	streamID  uint32
+	endStream bool
+	h         []byte
+	d         []byte
+	// onEachWrite is called every time
+	// a part of d is written out.
+	onEachWrite func()
+}
+
+type incomingWindowUpdate struct {
+	streamID  uint32
+	increment uint32
+}
+
+type outgoingWindowUpdate struct {
+	streamID  uint32
+	increment uint32
+}
+
+type incomingSettings struct {
+	ss []http2.Setting
+}
+
+type outgoingSettings struct {
+	ss []http2.Setting
+}
+
+type settingsAck struct {
+}
+
+type incomingGoAway struct {
+}
+
+type goAway struct {
+	code      http2.ErrCode
+	debugData []byte
+	headsUp   bool
+	closeConn bool
+}
+
+type ping struct {
+	ack  bool
+	data [8]byte
+}
+
+type outFlowControlSizeRequest struct {
+	resp chan uint32
+}
+
+type outStreamState int
+
+const (
+	active outStreamState = iota
+	empty
+	waitingOnStreamQuota
+)
+
+type outStream struct {
+	id               uint32
+	state            outStreamState
+	itl              *itemList
+	bytesOutStanding int
+	wq               *writeQuota
+
+	next *outStream
+	prev *outStream
+}
+
+func (s *outStream) deleteSelf() {
+	if s.prev != nil {
+		s.prev.next = s.next
+	}
+	if s.next != nil {
+		s.next.prev = s.prev
+	}
+	s.next, s.prev = nil, nil
+}
+
+type outStreamList struct {
+	// Following are sentinel objects that mark the
+	// beginning and end of the list. They do not
+	// contain any item lists. All valid objects are
+	// inserted in between them.
+	// This is needed so that an outStream object can
+	// deleteSelf() in O(1) time without knowing which
+	// list it belongs to.
+	head *outStream
+	tail *outStream
+}
+
+func newOutStreamList() *outStreamList {
+	head, tail := new(outStream), new(outStream)
+	head.next = tail
+	tail.prev = head
+	return &outStreamList{
+		head: head,
+		tail: tail,
+	}
+}
+
+func (l *outStreamList) enqueue(s *outStream) {
+	e := l.tail.prev
+	e.next = s
+	s.prev = e
+	s.next = l.tail
+	l.tail.prev = s
+}
+
+// remove from the beginning of the list.
+func (l *outStreamList) dequeue() *outStream {
+	b := l.head.next
+	if b == l.tail {
+		return nil
+	}
+	b.deleteSelf()
+	return b
+}
+
+type controlBuffer struct {
+	ch              chan struct{}
+	done            <-chan struct{}
+	mu              sync.Mutex
+	consumerWaiting bool
+	list            *itemList
+	err             error
+}
+
+func newControlBuffer(done <-chan struct{}) *controlBuffer {
+	return &controlBuffer{
+		ch:   make(chan struct{}, 1),
+		list: &itemList{},
+		done: done,
+	}
+}
+
+func (c *controlBuffer) put(it interface{}) error {
+	_, err := c.executeAndPut(nil, it)
+	return err
+}
+
+func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
+	var wakeUp bool
+	c.mu.Lock()
+	if c.err != nil {
+		c.mu.Unlock()
+		return false, c.err
+	}
+	if f != nil {
+		if !f(it) { // f wasn't successful
+			c.mu.Unlock()
+			return false, nil
+		}
+	}
+	if c.consumerWaiting {
+		wakeUp = true
+		c.consumerWaiting = false
+	}
+	c.list.enqueue(it)
+	c.mu.Unlock()
+	if wakeUp {
+		select {
+		case c.ch <- struct{}{}:
+		default:
+		}
+	}
+	return true, nil
+}
+
+func (c *controlBuffer) get(block bool) (interface{}, error) {
+	for {
+		c.mu.Lock()
+		if c.err != nil {
+			c.mu.Unlock()
+			return nil, c.err
+		}
+		if !c.list.isEmpty() {
+			h := c.list.dequeue()
+			c.mu.Unlock()
+			return h, nil
+		}
+		if !block {
+			c.mu.Unlock()
+			return nil, nil
+		}
+		c.consumerWaiting = true
+		c.mu.Unlock()
+		select {
+		case <-c.ch:
+		case <-c.done:
+			c.finish()
+			return nil, ErrConnClosing
+		}
+	}
+}
+
+func (c *controlBuffer) finish() {
+	c.mu.Lock()
+	if c.err != nil {
+		c.mu.Unlock()
+		return
+	}
+	c.err = ErrConnClosing
+	// There may be headers for streams in the control buffer.
+	// These streams need to be cleaned out since the transport
+	// is still not aware of these yet.
+	for head := c.list.dequeueAll(); head != nil; head = head.next {
+		hdr, ok := head.it.(*headerFrame)
+		if !ok {
+			continue
+		}
+		if hdr.onOrphaned != nil { // It will be nil on the server-side.
+			hdr.onOrphaned(ErrConnClosing)
+		}
+	}
+	c.mu.Unlock()
+}
+
+type side int
+
+const (
+	clientSide side = iota
+	serverSide
+)
+
+type loopyWriter struct {
+	side          side
+	cbuf          *controlBuffer
+	sendQuota     uint32
+	oiws          uint32                // outbound initial window size.
+	estdStreams   map[uint32]*outStream // Established streams.
+	activeStreams *outStreamList        // Streams that are sending data.
+	framer        *framer
+	hBuf          *bytes.Buffer  // The buffer for HPACK encoding.
+	hEnc          *hpack.Encoder // HPACK encoder.
+	bdpEst        *bdpEstimator
+	draining      bool
+
+	// Side-specific handlers
+	ssGoAwayHandler func(*goAway) (bool, error)
+}
+
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
+	var buf bytes.Buffer
+	l := &loopyWriter{
+		side:          s,
+		cbuf:          cbuf,
+		sendQuota:     defaultWindowSize,
+		oiws:          defaultWindowSize,
+		estdStreams:   make(map[uint32]*outStream),
+		activeStreams: newOutStreamList(),
+		framer:        fr,
+		hBuf:          &buf,
+		hEnc:          hpack.NewEncoder(&buf),
+		bdpEst:        bdpEst,
+	}
+	return l
+}
+
+const minBatchSize = 1000
+
+// run should be run in a separate goroutine.
+func (l *loopyWriter) run() {
+	var (
+		it      interface{}
+		err     error
+		isEmpty bool
+	)
+	defer func() {
+		errorf("transport: loopyWriter.run returning. Err: %v", err)
+	}()
+	for {
+		it, err = l.cbuf.get(true)
+		if err != nil {
+			return
+		}
+		if err = l.handle(it); err != nil {
+			return
+		}
+		if _, err = l.processData(); err != nil {
+			return
+		}
+		gosched := true
+	hasdata:
+		for {
+			it, err = l.cbuf.get(false)
+			if err != nil {
+				return
+			}
+			if it != nil {
+				if err = l.handle(it); err != nil {
+					return
+				}
+				if _, err = l.processData(); err != nil {
+					return
+				}
+				continue hasdata
+			}
+			if isEmpty, err = l.processData(); err != nil {
+				return
+			}
+			if !isEmpty {
+				continue hasdata
+			}
+			if gosched {
+				gosched = false
+				if l.framer.writer.offset < minBatchSize {
+					runtime.Gosched()
+					continue hasdata
+				}
+			}
+			l.framer.writer.Flush()
+			break hasdata
+
+		}
+	}
+}
+
+func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
+	return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
+}
+
+func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
+	// Otherwise update the quota.
+	if w.streamID == 0 {
+		l.sendQuota += w.increment
+		return nil
+	}
+	// Find the stream and update it.
+	if str, ok := l.estdStreams[w.streamID]; ok {
+		str.bytesOutStanding -= int(w.increment)
+		if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
+			str.state = active
+			l.activeStreams.enqueue(str)
+			return nil
+		}
+	}
+	return nil
+}
+
+func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
+	return l.framer.fr.WriteSettings(s.ss...)
+}
+
+func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
+	if err := l.applySettings(s.ss); err != nil {
+		return err
+	}
+	return l.framer.fr.WriteSettingsAck()
+}
+
+func (l *loopyWriter) headerHandler(h *headerFrame) error {
+	if l.side == serverSide {
+		if h.endStream { // Case 1.A: Server wants to close stream.
+			// Make sure it's not a trailers only response.
+			if str, ok := l.estdStreams[h.streamID]; ok {
+				if str.state != empty { // either active or waiting on stream quota.
+					// add it str's list of items.
+					str.itl.enqueue(h)
+					return nil
+				}
+			}
+			if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
+				return err
+			}
+			return l.cleanupStreamHandler(h.cleanup)
+		}
+		// Case 1.B: Server is responding back with headers.
+		str := &outStream{
+			state: empty,
+			itl:   &itemList{},
+			wq:    h.wq,
+		}
+		l.estdStreams[h.streamID] = str
+		return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
+	}
+	// Case 2: Client wants to originate stream.
+	str := &outStream{
+		id:    h.streamID,
+		state: empty,
+		itl:   &itemList{},
+		wq:    h.wq,
+	}
+	str.itl.enqueue(h)
+	return l.originateStream(str)
+}
+
+func (l *loopyWriter) originateStream(str *outStream) error {
+	hdr := str.itl.dequeue().(*headerFrame)
+	sendPing, err := hdr.initStream(str.id)
+	if err != nil {
+		if err == ErrConnClosing {
+			return err
+		}
+		// Other errors(errStreamDrain) need not close transport.
+		return nil
+	}
+	if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
+		return err
+	}
+	l.estdStreams[str.id] = str
+	if sendPing {
+		return l.pingHandler(&ping{data: [8]byte{}})
+	}
+	return nil
+}
+
+func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
+	if onWrite != nil {
+		onWrite()
+	}
+	l.hBuf.Reset()
+	for _, f := range hf {
+		if err := l.hEnc.WriteField(f); err != nil {
+			warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
+		}
+	}
+	var (
+		err               error
+		endHeaders, first bool
+	)
+	first = true
+	for !endHeaders {
+		size := l.hBuf.Len()
+		if size > http2MaxFrameLen {
+			size = http2MaxFrameLen
+		} else {
+			endHeaders = true
+		}
+		if first {
+			first = false
+			err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
+				StreamID:      streamID,
+				BlockFragment: l.hBuf.Next(size),
+				EndStream:     endStream,
+				EndHeaders:    endHeaders,
+			})
+		} else {
+			err = l.framer.fr.WriteContinuation(
+				streamID,
+				endHeaders,
+				l.hBuf.Next(size),
+			)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (l *loopyWriter) preprocessData(df *dataFrame) error {
+	str, ok := l.estdStreams[df.streamID]
+	if !ok {
+		return nil
+	}
+	// If we got data for a stream it means that
+	// stream was originated and the headers were sent out.
+	str.itl.enqueue(df)
+	if str.state == empty {
+		str.state = active
+		l.activeStreams.enqueue(str)
+	}
+	return nil
+}
+
+func (l *loopyWriter) pingHandler(p *ping) error {
+	if !p.ack {
+		l.bdpEst.timesnap(p.data)
+	}
+	return l.framer.fr.WritePing(p.ack, p.data)
+
+}
+
+func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
+	o.resp <- l.sendQuota
+	return nil
+}
+
+func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
+	c.onWrite()
+	if str, ok := l.estdStreams[c.streamID]; ok {
+		// On the server side it could be a trailers-only response or
+		// a RST_STREAM before stream initialization thus the stream might
+		// not be established yet.
+		delete(l.estdStreams, c.streamID)
+		str.deleteSelf()
+	}
+	if c.rst { // If RST_STREAM needs to be sent.
+		if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
+			return err
+		}
+	}
+	if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
+		return ErrConnClosing
+	}
+	return nil
+}
+
+func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
+	if l.side == clientSide {
+		l.draining = true
+		if len(l.estdStreams) == 0 {
+			return ErrConnClosing
+		}
+	}
+	return nil
+}
+
+func (l *loopyWriter) goAwayHandler(g *goAway) error {
+	// Handling of outgoing GoAway is very specific to side.
+	if l.ssGoAwayHandler != nil {
+		draining, err := l.ssGoAwayHandler(g)
+		if err != nil {
+			return err
+		}
+		l.draining = draining
+	}
+	return nil
+}
+
+func (l *loopyWriter) handle(i interface{}) error {
+	switch i := i.(type) {
+	case *incomingWindowUpdate:
+		return l.incomingWindowUpdateHandler(i)
+	case *outgoingWindowUpdate:
+		return l.outgoingWindowUpdateHandler(i)
+	case *incomingSettings:
+		return l.incomingSettingsHandler(i)
+	case *outgoingSettings:
+		return l.outgoingSettingsHandler(i)
+	case *headerFrame:
+		return l.headerHandler(i)
+	case *cleanupStream:
+		return l.cleanupStreamHandler(i)
+	case *incomingGoAway:
+		return l.incomingGoAwayHandler(i)
+	case *dataFrame:
+		return l.preprocessData(i)
+	case *ping:
+		return l.pingHandler(i)
+	case *goAway:
+		return l.goAwayHandler(i)
+	case *outFlowControlSizeRequest:
+		return l.outFlowControlSizeRequestHandler(i)
+	default:
+		return fmt.Errorf("transport: unknown control message type %T", i)
+	}
+}
+
+func (l *loopyWriter) applySettings(ss []http2.Setting) error {
+	for _, s := range ss {
+		switch s.ID {
+		case http2.SettingInitialWindowSize:
+			o := l.oiws
+			l.oiws = s.Val
+			if o < l.oiws {
+				// If the new limit is greater make all depleted streams active.
+				for _, stream := range l.estdStreams {
+					if stream.state == waitingOnStreamQuota {
+						stream.state = active
+						l.activeStreams.enqueue(stream)
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (l *loopyWriter) processData() (bool, error) {
+	if l.sendQuota == 0 {
+		return true, nil
+	}
+	str := l.activeStreams.dequeue()
+	if str == nil {
+		return true, nil
+	}
+	dataItem := str.itl.peek().(*dataFrame)
+	if len(dataItem.h) == 0 && len(dataItem.d) == 0 {
+		// Client sends out empty data frame with endStream = true
+		if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
+			return false, err
+		}
+		str.itl.dequeue()
+		if str.itl.isEmpty() {
+			str.state = empty
+		} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
+			if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
+				return false, err
+			}
+			if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
+				return false, nil
+			}
+		} else {
+			l.activeStreams.enqueue(str)
+		}
+		return false, nil
+	}
+	var (
+		idx int
+		buf []byte
+	)
+	if len(dataItem.h) != 0 { // data header has not been written out yet.
+		buf = dataItem.h
+	} else {
+		idx = 1
+		buf = dataItem.d
+	}
+	size := http2MaxFrameLen
+	if len(buf) < size {
+		size = len(buf)
+	}
+	if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 {
+		str.state = waitingOnStreamQuota
+		return false, nil
+	} else if strQuota < size {
+		size = strQuota
+	}
+
+	if l.sendQuota < uint32(size) {
+		size = int(l.sendQuota)
+	}
+	// Now that outgoing flow controls are checked we can replenish str's write quota
+	str.wq.replenish(size)
+	var endStream bool
+	// This last data message on this stream and all
+	// of it can be written in this go.
+	if dataItem.endStream && size == len(buf) {
+		// buf contains either data or it contains header but data is empty.
+		if idx == 1 || len(dataItem.d) == 0 {
+			endStream = true
+		}
+	}
+	if dataItem.onEachWrite != nil {
+		dataItem.onEachWrite()
+	}
+	if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
+		return false, err
+	}
+	buf = buf[size:]
+	str.bytesOutStanding += size
+	l.sendQuota -= uint32(size)
+	if idx == 0 {
+		dataItem.h = buf
+	} else {
+		dataItem.d = buf
+	}
+
+	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
+		str.itl.dequeue()
+	}
+	if str.itl.isEmpty() {
+		str.state = empty
+	} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
+		if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
+			return false, err
+		}
+		if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
+			return false, err
+		}
+	} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
+		str.state = waitingOnStreamQuota
+	} else { // Otherwise add it back to the list of active streams.
+		l.activeStreams.enqueue(str)
+	}
+	return false, nil
+}

+ 87 - 185
vendor/google.golang.org/grpc/transport/control.go → vendor/google.golang.org/grpc/transport/flowcontrol.go

@@ -20,13 +20,10 @@ package transport
 
 
 import (
 import (
 	"fmt"
 	"fmt"
-	"io"
 	"math"
 	"math"
 	"sync"
 	"sync"
+	"sync/atomic"
 	"time"
 	"time"
-
-	"golang.org/x/net/http2"
-	"golang.org/x/net/http2/hpack"
 )
 )
 
 
 const (
 const (
@@ -36,202 +33,109 @@ const (
 	initialWindowSize             = defaultWindowSize // for an RPC
 	initialWindowSize             = defaultWindowSize // for an RPC
 	infinity                      = time.Duration(math.MaxInt64)
 	infinity                      = time.Duration(math.MaxInt64)
 	defaultClientKeepaliveTime    = infinity
 	defaultClientKeepaliveTime    = infinity
-	defaultClientKeepaliveTimeout = time.Duration(20 * time.Second)
+	defaultClientKeepaliveTimeout = 20 * time.Second
 	defaultMaxStreamsClient       = 100
 	defaultMaxStreamsClient       = 100
 	defaultMaxConnectionIdle      = infinity
 	defaultMaxConnectionIdle      = infinity
 	defaultMaxConnectionAge       = infinity
 	defaultMaxConnectionAge       = infinity
 	defaultMaxConnectionAgeGrace  = infinity
 	defaultMaxConnectionAgeGrace  = infinity
-	defaultServerKeepaliveTime    = time.Duration(2 * time.Hour)
-	defaultServerKeepaliveTimeout = time.Duration(20 * time.Second)
-	defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute)
+	defaultServerKeepaliveTime    = 2 * time.Hour
+	defaultServerKeepaliveTimeout = 20 * time.Second
+	defaultKeepalivePolicyMinTime = 5 * time.Minute
 	// max window limit set by HTTP2 Specs.
 	// max window limit set by HTTP2 Specs.
 	maxWindowSize = math.MaxInt32
 	maxWindowSize = math.MaxInt32
-	// defaultLocalSendQuota sets is default value for number of data
+	// defaultWriteQuota is the default value for number of data
 	// bytes that each stream can schedule before some of it being
 	// bytes that each stream can schedule before some of it being
 	// flushed out.
 	// flushed out.
-	defaultLocalSendQuota = 128 * 1024
+	defaultWriteQuota = 64 * 1024
 )
 )
 
 
-// The following defines various control items which could flow through
-// the control buffer of transport. They represent different aspects of
-// control tasks, e.g., flow control, settings, streaming resetting, etc.
-
-type headerFrame struct {
-	streamID  uint32
-	hf        []hpack.HeaderField
-	endStream bool
-}
-
-func (*headerFrame) item() {}
-
-type continuationFrame struct {
-	streamID            uint32
-	endHeaders          bool
-	headerBlockFragment []byte
-}
-
-type dataFrame struct {
-	streamID  uint32
-	endStream bool
-	d         []byte
-	f         func()
-}
-
-func (*dataFrame) item() {}
-
-func (*continuationFrame) item() {}
-
-type windowUpdate struct {
-	streamID  uint32
-	increment uint32
-}
-
-func (*windowUpdate) item() {}
-
-type settings struct {
-	ss []http2.Setting
-}
-
-func (*settings) item() {}
-
-type settingsAck struct {
-}
-
-func (*settingsAck) item() {}
-
-type resetStream struct {
-	streamID uint32
-	code     http2.ErrCode
-}
-
-func (*resetStream) item() {}
-
-type goAway struct {
-	code      http2.ErrCode
-	debugData []byte
-	headsUp   bool
-	closeConn bool
-}
-
-func (*goAway) item() {}
-
-type flushIO struct {
-	closeTr bool
-}
-
-func (*flushIO) item() {}
-
-type ping struct {
-	ack  bool
-	data [8]byte
-}
-
-func (*ping) item() {}
-
-// quotaPool is a pool which accumulates the quota and sends it to acquire()
-// when it is available.
-type quotaPool struct {
-	mu      sync.Mutex
-	c       chan struct{}
-	version uint32
-	quota   int
-}
-
-// newQuotaPool creates a quotaPool which has quota q available to consume.
-func newQuotaPool(q int) *quotaPool {
-	qb := &quotaPool{
-		quota: q,
-		c:     make(chan struct{}, 1),
+// writeQuota is a soft limit on the amount of data a stream can
+// schedule before some of it is written out.
+type writeQuota struct {
+	quota int32
+	// get waits on read from when quota goes less than or equal to zero.
+	// replenish writes on it when quota goes positive again.
+	ch chan struct{}
+	// done is triggered in error case.
+	done <-chan struct{}
+}
+
+func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
+	return &writeQuota{
+		quota: sz,
+		ch:    make(chan struct{}, 1),
+		done:  done,
 	}
 	}
-	return qb
 }
 }
 
 
-// add cancels the pending quota sent on acquired, incremented by v and sends
-// it back on acquire.
-func (qb *quotaPool) add(v int) {
-	qb.mu.Lock()
-	defer qb.mu.Unlock()
-	qb.lockedAdd(v)
+func (w *writeQuota) get(sz int32) error {
+	for {
+		if atomic.LoadInt32(&w.quota) > 0 {
+			atomic.AddInt32(&w.quota, -sz)
+			return nil
+		}
+		select {
+		case <-w.ch:
+			continue
+		case <-w.done:
+			return errStreamDone
+		}
+	}
 }
 }
 
 
-func (qb *quotaPool) lockedAdd(v int) {
-	var wakeUp bool
-	if qb.quota <= 0 {
-		wakeUp = true // Wake up potential waiters.
-	}
-	qb.quota += v
-	if wakeUp && qb.quota > 0 {
+func (w *writeQuota) replenish(n int) {
+	sz := int32(n)
+	a := atomic.AddInt32(&w.quota, sz)
+	b := a - sz
+	if b <= 0 && a > 0 {
 		select {
 		select {
-		case qb.c <- struct{}{}:
+		case w.ch <- struct{}{}:
 		default:
 		default:
 		}
 		}
 	}
 	}
 }
 }
 
 
-func (qb *quotaPool) addAndUpdate(v int) {
-	qb.mu.Lock()
-	qb.lockedAdd(v)
-	qb.version++
-	qb.mu.Unlock()
+type trInFlow struct {
+	limit               uint32
+	unacked             uint32
+	effectiveWindowSize uint32
 }
 }
 
 
-func (qb *quotaPool) get(v int, wc waiters) (int, uint32, error) {
-	qb.mu.Lock()
-	if qb.quota > 0 {
-		if v > qb.quota {
-			v = qb.quota
-		}
-		qb.quota -= v
-		ver := qb.version
-		qb.mu.Unlock()
-		return v, ver, nil
-	}
-	qb.mu.Unlock()
-	for {
-		select {
-		case <-wc.ctx.Done():
-			return 0, 0, ContextErr(wc.ctx.Err())
-		case <-wc.tctx.Done():
-			return 0, 0, ErrConnClosing
-		case <-wc.done:
-			return 0, 0, io.EOF
-		case <-wc.goAway:
-			return 0, 0, errStreamDrain
-		case <-qb.c:
-			qb.mu.Lock()
-			if qb.quota > 0 {
-				if v > qb.quota {
-					v = qb.quota
-				}
-				qb.quota -= v
-				ver := qb.version
-				if qb.quota > 0 {
-					select {
-					case qb.c <- struct{}{}:
-					default:
-					}
-				}
-				qb.mu.Unlock()
-				return v, ver, nil
+func (f *trInFlow) newLimit(n uint32) uint32 {
+	d := n - f.limit
+	f.limit = n
+	f.updateEffectiveWindowSize()
+	return d
+}
 
 
-			}
-			qb.mu.Unlock()
-		}
+func (f *trInFlow) onData(n uint32) uint32 {
+	f.unacked += n
+	if f.unacked >= f.limit/4 {
+		w := f.unacked
+		f.unacked = 0
+		f.updateEffectiveWindowSize()
+		return w
 	}
 	}
+	f.updateEffectiveWindowSize()
+	return 0
 }
 }
 
 
-func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool {
-	qb.mu.Lock()
-	if version == qb.version {
-		success()
-		qb.mu.Unlock()
-		return true
-	}
-	failure()
-	qb.mu.Unlock()
-	return false
+func (f *trInFlow) reset() uint32 {
+	w := f.unacked
+	f.unacked = 0
+	f.updateEffectiveWindowSize()
+	return w
 }
 }
 
 
+func (f *trInFlow) updateEffectiveWindowSize() {
+	atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
+}
+
+func (f *trInFlow) getSize() uint32 {
+	return atomic.LoadUint32(&f.effectiveWindowSize)
+}
+
+// TODO(mmukhi): Simplify this code.
 // inFlow deals with inbound flow control
 // inFlow deals with inbound flow control
 type inFlow struct {
 type inFlow struct {
 	mu sync.Mutex
 	mu sync.Mutex
@@ -252,9 +156,9 @@ type inFlow struct {
 // It assumes that n is always greater than the old limit.
 // It assumes that n is always greater than the old limit.
 func (f *inFlow) newLimit(n uint32) uint32 {
 func (f *inFlow) newLimit(n uint32) uint32 {
 	f.mu.Lock()
 	f.mu.Lock()
-	defer f.mu.Unlock()
 	d := n - f.limit
 	d := n - f.limit
 	f.limit = n
 	f.limit = n
+	f.mu.Unlock()
 	return d
 	return d
 }
 }
 
 
@@ -263,7 +167,6 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
 		n = uint32(math.MaxInt32)
 		n = uint32(math.MaxInt32)
 	}
 	}
 	f.mu.Lock()
 	f.mu.Lock()
-	defer f.mu.Unlock()
 	// estSenderQuota is the receiver's view of the maximum number of bytes the sender
 	// estSenderQuota is the receiver's view of the maximum number of bytes the sender
 	// can send without a window update.
 	// can send without a window update.
 	estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
 	estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
@@ -275,7 +178,7 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
 	// for this message. Therefore we must send an update over the limit since there's an active read
 	// for this message. Therefore we must send an update over the limit since there's an active read
 	// request from the application.
 	// request from the application.
 	if estUntransmittedData > estSenderQuota {
 	if estUntransmittedData > estSenderQuota {
-		// Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec.
+		// Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
 		if f.limit+n > maxWindowSize {
 		if f.limit+n > maxWindowSize {
 			f.delta = maxWindowSize - f.limit
 			f.delta = maxWindowSize - f.limit
 		} else {
 		} else {
@@ -284,19 +187,24 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
 			// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
 			// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
 			f.delta = n
 			f.delta = n
 		}
 		}
+		f.mu.Unlock()
 		return f.delta
 		return f.delta
 	}
 	}
+	f.mu.Unlock()
 	return 0
 	return 0
 }
 }
 
 
 // onData is invoked when some data frame is received. It updates pendingData.
 // onData is invoked when some data frame is received. It updates pendingData.
 func (f *inFlow) onData(n uint32) error {
 func (f *inFlow) onData(n uint32) error {
 	f.mu.Lock()
 	f.mu.Lock()
-	defer f.mu.Unlock()
 	f.pendingData += n
 	f.pendingData += n
 	if f.pendingData+f.pendingUpdate > f.limit+f.delta {
 	if f.pendingData+f.pendingUpdate > f.limit+f.delta {
-		return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
+		limit := f.limit
+		rcvd := f.pendingData + f.pendingUpdate
+		f.mu.Unlock()
+		return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
 	}
 	}
+	f.mu.Unlock()
 	return nil
 	return nil
 }
 }
 
 
@@ -304,8 +212,8 @@ func (f *inFlow) onData(n uint32) error {
 // to be sent to the peer.
 // to be sent to the peer.
 func (f *inFlow) onRead(n uint32) uint32 {
 func (f *inFlow) onRead(n uint32) uint32 {
 	f.mu.Lock()
 	f.mu.Lock()
-	defer f.mu.Unlock()
 	if f.pendingData == 0 {
 	if f.pendingData == 0 {
+		f.mu.Unlock()
 		return 0
 		return 0
 	}
 	}
 	f.pendingData -= n
 	f.pendingData -= n
@@ -320,15 +228,9 @@ func (f *inFlow) onRead(n uint32) uint32 {
 	if f.pendingUpdate >= f.limit/4 {
 	if f.pendingUpdate >= f.limit/4 {
 		wu := f.pendingUpdate
 		wu := f.pendingUpdate
 		f.pendingUpdate = 0
 		f.pendingUpdate = 0
+		f.mu.Unlock()
 		return wu
 		return wu
 	}
 	}
+	f.mu.Unlock()
 	return 0
 	return 0
 }
 }
-
-func (f *inFlow) resetPendingUpdate() uint32 {
-	f.mu.Lock()
-	defer f.mu.Unlock()
-	n := f.pendingUpdate
-	f.pendingUpdate = 0
-	return n
-}

+ 6 - 2
vendor/google.golang.org/grpc/transport/handler_server.go

@@ -92,7 +92,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
 	}
 	}
 	for k, vv := range r.Header {
 	for k, vv := range r.Header {
 		k = strings.ToLower(k)
 		k = strings.ToLower(k)
-		if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) {
+		if isReservedHeader(k) && !isWhitelistedHeader(k) {
 			continue
 			continue
 		}
 		}
 		for _, v := range vv {
 		for _, v := range vv {
@@ -365,7 +365,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
 		ht.stats.HandleRPC(s.ctx, inHeader)
 		ht.stats.HandleRPC(s.ctx, inHeader)
 	}
 	}
 	s.trReader = &transportReader{
 	s.trReader = &transportReader{
-		reader:        &recvBufferReader{ctx: s.ctx, recv: s.buf},
+		reader:        &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
 		windowHandler: func(int) {},
 		windowHandler: func(int) {},
 	}
 	}
 
 
@@ -420,6 +420,10 @@ func (ht *serverHandlerTransport) runStream() {
 	}
 	}
 }
 }
 
 
+func (ht *serverHandlerTransport) IncrMsgSent() {}
+
+func (ht *serverHandlerTransport) IncrMsgRecv() {}
+
 func (ht *serverHandlerTransport) Drain() {
 func (ht *serverHandlerTransport) Drain() {
 	panic("Drain() is not implemented")
 	panic("Drain() is not implemented")
 }
 }

File diff suppressed because it is too large
+ 403 - 431
vendor/google.golang.org/grpc/transport/http2_client.go


File diff suppressed because it is too large
+ 235 - 418
vendor/google.golang.org/grpc/transport/http2_server.go


+ 64 - 16
vendor/google.golang.org/grpc/transport/http_util.go

@@ -23,7 +23,6 @@ import (
 	"bytes"
 	"bytes"
 	"encoding/base64"
 	"encoding/base64"
 	"fmt"
 	"fmt"
-	"io"
 	"net"
 	"net"
 	"net/http"
 	"net/http"
 	"strconv"
 	"strconv"
@@ -132,6 +131,7 @@ func isReservedHeader(hdr string) bool {
 	}
 	}
 	switch hdr {
 	switch hdr {
 	case "content-type",
 	case "content-type",
+		"user-agent",
 		"grpc-message-type",
 		"grpc-message-type",
 		"grpc-encoding",
 		"grpc-encoding",
 		"grpc-message",
 		"grpc-message",
@@ -145,11 +145,11 @@ func isReservedHeader(hdr string) bool {
 	}
 	}
 }
 }
 
 
-// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders
-// that should be propagated into metadata visible to users.
-func isWhitelistedPseudoHeader(hdr string) bool {
+// isWhitelistedHeader checks whether hdr should be propagated
+// into metadata visible to users.
+func isWhitelistedHeader(hdr string) bool {
 	switch hdr {
 	switch hdr {
-	case ":authority":
+	case ":authority", "user-agent":
 		return true
 		return true
 	default:
 	default:
 		return false
 		return false
@@ -262,9 +262,9 @@ func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error
 	// gRPC status doesn't exist and http status is OK.
 	// gRPC status doesn't exist and http status is OK.
 	// Set rawStatusCode to be unknown and return nil error.
 	// Set rawStatusCode to be unknown and return nil error.
 	// So that, if the stream has ended this Unknown status
 	// So that, if the stream has ended this Unknown status
-	// will be propogated to the user.
+	// will be propagated to the user.
 	// Otherwise, it will be ignored. In which case, status from
 	// Otherwise, it will be ignored. In which case, status from
-	// a later trailer, that has StreamEnded flag set, is propogated.
+	// a later trailer, that has StreamEnded flag set, is propagated.
 	code := int(codes.Unknown)
 	code := int(codes.Unknown)
 	d.rawStatusCode = &code
 	d.rawStatusCode = &code
 	return nil
 	return nil
@@ -340,7 +340,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
 		d.statsTrace = v
 		d.statsTrace = v
 		d.addMetadata(f.Name, string(v))
 		d.addMetadata(f.Name, string(v))
 	default:
 	default:
-		if isReservedHeader(f.Name) && !isWhitelistedPseudoHeader(f.Name) {
+		if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
 			break
 			break
 		}
 		}
 		v, err := decodeMetadataHeader(f.Name, f.Value)
 		v, err := decodeMetadataHeader(f.Name, f.Value)
@@ -348,7 +348,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
 			errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
 			errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
 			return nil
 			return nil
 		}
 		}
-		d.addMetadata(f.Name, string(v))
+		d.addMetadata(f.Name, v)
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -509,19 +509,67 @@ func decodeGrpcMessageUnchecked(msg string) string {
 	return buf.String()
 	return buf.String()
 }
 }
 
 
+type bufWriter struct {
+	buf       []byte
+	offset    int
+	batchSize int
+	conn      net.Conn
+	err       error
+
+	onFlush func()
+}
+
+func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
+	return &bufWriter{
+		buf:       make([]byte, batchSize*2),
+		batchSize: batchSize,
+		conn:      conn,
+	}
+}
+
+func (w *bufWriter) Write(b []byte) (n int, err error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	for len(b) > 0 {
+		nn := copy(w.buf[w.offset:], b)
+		b = b[nn:]
+		w.offset += nn
+		n += nn
+		if w.offset >= w.batchSize {
+			err = w.Flush()
+		}
+	}
+	return n, err
+}
+
+func (w *bufWriter) Flush() error {
+	if w.err != nil {
+		return w.err
+	}
+	if w.offset == 0 {
+		return nil
+	}
+	if w.onFlush != nil {
+		w.onFlush()
+	}
+	_, w.err = w.conn.Write(w.buf[:w.offset])
+	w.offset = 0
+	return w.err
+}
+
 type framer struct {
 type framer struct {
-	numWriters int32
-	reader     io.Reader
-	writer     *bufio.Writer
-	fr         *http2.Framer
+	writer *bufWriter
+	fr     *http2.Framer
 }
 }
 
 
 func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
 func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
+	r := bufio.NewReaderSize(conn, readBufferSize)
+	w := newBufWriter(conn, writeBufferSize)
 	f := &framer{
 	f := &framer{
-		reader: bufio.NewReaderSize(conn, readBufferSize),
-		writer: bufio.NewWriterSize(conn, writeBufferSize),
+		writer: w,
+		fr:     http2.NewFramer(w, r),
 	}
 	}
-	f.fr = http2.NewFramer(f.writer, f.reader)
 	// Opt-in to Frame reuse API on framer to reduce garbage.
 	// Opt-in to Frame reuse API on framer to reduce garbage.
 	// Frames aren't safe to read from after a subsequent call to ReadFrame.
 	// Frames aren't safe to read from after a subsequent call to ReadFrame.
 	f.fr.SetReuseFrames()
 	f.fr.SetReuseFrames()

+ 101 - 164
vendor/google.golang.org/grpc/transport/transport.go

@@ -19,16 +19,17 @@
 // Package transport defines and implements message oriented communication
 // Package transport defines and implements message oriented communication
 // channel to complete various transactions (e.g., an RPC).  It is meant for
 // channel to complete various transactions (e.g., an RPC).  It is meant for
 // grpc-internal usage and is not intended to be imported directly by users.
 // grpc-internal usage and is not intended to be imported directly by users.
-package transport // import "google.golang.org/grpc/transport"
+package transport // externally used as import "google.golang.org/grpc/transport"
 
 
 import (
 import (
+	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"net"
 	"net"
 	"sync"
 	"sync"
+	"sync/atomic"
 
 
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
-	"golang.org/x/net/http2"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/keepalive"
@@ -57,6 +58,7 @@ type recvBuffer struct {
 	c       chan recvMsg
 	c       chan recvMsg
 	mu      sync.Mutex
 	mu      sync.Mutex
 	backlog []recvMsg
 	backlog []recvMsg
+	err     error
 }
 }
 
 
 func newRecvBuffer() *recvBuffer {
 func newRecvBuffer() *recvBuffer {
@@ -68,6 +70,13 @@ func newRecvBuffer() *recvBuffer {
 
 
 func (b *recvBuffer) put(r recvMsg) {
 func (b *recvBuffer) put(r recvMsg) {
 	b.mu.Lock()
 	b.mu.Lock()
+	if b.err != nil {
+		b.mu.Unlock()
+		// An error had occurred earlier, don't accept more
+		// data or errors.
+		return
+	}
+	b.err = r.err
 	if len(b.backlog) == 0 {
 	if len(b.backlog) == 0 {
 		select {
 		select {
 		case b.c <- r:
 		case b.c <- r:
@@ -101,14 +110,15 @@ func (b *recvBuffer) get() <-chan recvMsg {
 	return b.c
 	return b.c
 }
 }
 
 
+//
 // recvBufferReader implements io.Reader interface to read the data from
 // recvBufferReader implements io.Reader interface to read the data from
 // recvBuffer.
 // recvBuffer.
 type recvBufferReader struct {
 type recvBufferReader struct {
-	ctx    context.Context
-	goAway chan struct{}
-	recv   *recvBuffer
-	last   []byte // Stores the remaining data in the previous calls.
-	err    error
+	ctx     context.Context
+	ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
+	recv    *recvBuffer
+	last    []byte // Stores the remaining data in the previous calls.
+	err     error
 }
 }
 
 
 // Read reads the next len(p) bytes from last. If last is drained, it tries to
 // Read reads the next len(p) bytes from last. If last is drained, it tries to
@@ -130,10 +140,8 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
 		return copied, nil
 		return copied, nil
 	}
 	}
 	select {
 	select {
-	case <-r.ctx.Done():
+	case <-r.ctxDone:
 		return 0, ContextErr(r.ctx.Err())
 		return 0, ContextErr(r.ctx.Err())
-	case <-r.goAway:
-		return 0, errStreamDrain
 	case m := <-r.recv.get():
 	case m := <-r.recv.get():
 		r.recv.load()
 		r.recv.load()
 		if m.err != nil {
 		if m.err != nil {
@@ -145,61 +153,7 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
 	}
 	}
 }
 }
 
 
-// All items in an out of a controlBuffer should be the same type.
-type item interface {
-	item()
-}
-
-// controlBuffer is an unbounded channel of item.
-type controlBuffer struct {
-	c       chan item
-	mu      sync.Mutex
-	backlog []item
-}
-
-func newControlBuffer() *controlBuffer {
-	b := &controlBuffer{
-		c: make(chan item, 1),
-	}
-	return b
-}
-
-func (b *controlBuffer) put(r item) {
-	b.mu.Lock()
-	if len(b.backlog) == 0 {
-		select {
-		case b.c <- r:
-			b.mu.Unlock()
-			return
-		default:
-		}
-	}
-	b.backlog = append(b.backlog, r)
-	b.mu.Unlock()
-}
-
-func (b *controlBuffer) load() {
-	b.mu.Lock()
-	if len(b.backlog) > 0 {
-		select {
-		case b.c <- b.backlog[0]:
-			b.backlog[0] = nil
-			b.backlog = b.backlog[1:]
-		default:
-		}
-	}
-	b.mu.Unlock()
-}
-
-// get returns the channel that receives an item in the buffer.
-//
-// Upon receipt of an item, the caller should call load to send another
-// item onto the channel if there is any.
-func (b *controlBuffer) get() <-chan item {
-	return b.c
-}
-
-type streamState uint8
+type streamState uint32
 
 
 const (
 const (
 	streamActive    streamState = iota
 	streamActive    streamState = iota
@@ -214,8 +168,8 @@ type Stream struct {
 	st           ServerTransport    // nil for client side Stream
 	st           ServerTransport    // nil for client side Stream
 	ctx          context.Context    // the associated context of the stream
 	ctx          context.Context    // the associated context of the stream
 	cancel       context.CancelFunc // always nil for client side Stream
 	cancel       context.CancelFunc // always nil for client side Stream
-	done         chan struct{}      // closed when the final status arrives
-	goAway       chan struct{}      // closed when a GOAWAY control message is received
+	done         chan struct{}      // closed at the end of stream to unblock writers. On the client side.
+	ctxDone      <-chan struct{}    // same as done chan but for server side. Cache of ctx.Done() (for performance)
 	method       string             // the associated RPC method of the stream
 	method       string             // the associated RPC method of the stream
 	recvCompress string
 	recvCompress string
 	sendCompress string
 	sendCompress string
@@ -223,47 +177,69 @@ type Stream struct {
 	trReader     io.Reader
 	trReader     io.Reader
 	fc           *inFlow
 	fc           *inFlow
 	recvQuota    uint32
 	recvQuota    uint32
-	waiters      waiters
+	wq           *writeQuota
 
 
 	// Callback to state application's intentions to read data. This
 	// Callback to state application's intentions to read data. This
 	// is used to adjust flow control, if needed.
 	// is used to adjust flow control, if needed.
 	requestRead func(int)
 	requestRead func(int)
 
 
-	sendQuotaPool *quotaPool
-	headerChan    chan struct{} // closed to indicate the end of header metadata.
-	headerDone    bool          // set when headerChan is closed. Used to avoid closing headerChan multiple times.
-	header        metadata.MD   // the received header metadata.
-	trailer       metadata.MD   // the key-value map of trailer metadata.
+	headerChan chan struct{} // closed to indicate the end of header metadata.
+	headerDone uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+
+	// hdrMu protects header and trailer metadata on the server-side.
+	hdrMu   sync.Mutex
+	header  metadata.MD // the received header metadata.
+	trailer metadata.MD // the key-value map of trailer metadata.
 
 
-	mu       sync.RWMutex // guard the following
-	headerOk bool         // becomes true from the first header is about to send
-	state    streamState
+	// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
+	headerSent uint32
 
 
-	status *status.Status // the status error received from the server
+	state streamState
 
 
-	rstStream bool          // indicates whether a RST_STREAM frame needs to be sent
-	rstError  http2.ErrCode // the error that needs to be sent along with the RST_STREAM frame
+	// On client-side it is the status error received from the server.
+	// On server-side it is unused.
+	status *status.Status
 
 
-	bytesReceived bool // indicates whether any bytes have been received on this stream
-	unprocessed   bool // set if the server sends a refused stream or GOAWAY including this stream
+	bytesReceived uint32 // indicates whether any bytes have been received on this stream
+	unprocessed   uint32 // set if the server sends a refused stream or GOAWAY including this stream
 
 
 	// contentSubtype is the content-subtype for requests.
 	// contentSubtype is the content-subtype for requests.
 	// this must be lowercase or the behavior is undefined.
 	// this must be lowercase or the behavior is undefined.
 	contentSubtype string
 	contentSubtype string
 }
 }
 
 
+// isHeaderSent is only valid on the server-side.
+func (s *Stream) isHeaderSent() bool {
+	return atomic.LoadUint32(&s.headerSent) == 1
+}
+
+// updateHeaderSent updates headerSent and returns true
+// if it was alreay set. It is valid only on server-side.
+func (s *Stream) updateHeaderSent() bool {
+	return atomic.SwapUint32(&s.headerSent, 1) == 1
+}
+
+func (s *Stream) swapState(st streamState) streamState {
+	return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
+}
+
+func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
+	return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
+}
+
+func (s *Stream) getState() streamState {
+	return streamState(atomic.LoadUint32((*uint32)(&s.state)))
+}
+
 func (s *Stream) waitOnHeader() error {
 func (s *Stream) waitOnHeader() error {
 	if s.headerChan == nil {
 	if s.headerChan == nil {
 		// On the server headerChan is always nil since a stream originates
 		// On the server headerChan is always nil since a stream originates
 		// only after having received headers.
 		// only after having received headers.
 		return nil
 		return nil
 	}
 	}
-	wc := s.waiters
 	select {
 	select {
-	case <-wc.ctx.Done():
-		return ContextErr(wc.ctx.Err())
-	case <-wc.goAway:
-		return errStreamDrain
+	case <-s.ctx.Done():
+		return ContextErr(s.ctx.Err())
 	case <-s.headerChan:
 	case <-s.headerChan:
 		return nil
 		return nil
 	}
 	}
@@ -289,12 +265,6 @@ func (s *Stream) Done() <-chan struct{} {
 	return s.done
 	return s.done
 }
 }
 
 
-// GoAway returns a channel which is closed when the server sent GoAways signal
-// before this stream was initiated.
-func (s *Stream) GoAway() <-chan struct{} {
-	return s.goAway
-}
-
 // Header acquires the key-value pairs of header metadata once it
 // Header acquires the key-value pairs of header metadata once it
 // is available. It blocks until i) the metadata is ready or ii) there is no
 // is available. It blocks until i) the metadata is ready or ii) there is no
 // header metadata or iii) the stream is canceled/expired.
 // header metadata or iii) the stream is canceled/expired.
@@ -303,6 +273,9 @@ func (s *Stream) Header() (metadata.MD, error) {
 	// Even if the stream is closed, header is returned if available.
 	// Even if the stream is closed, header is returned if available.
 	select {
 	select {
 	case <-s.headerChan:
 	case <-s.headerChan:
+		if s.header == nil {
+			return nil, nil
+		}
 		return s.header.Copy(), nil
 		return s.header.Copy(), nil
 	default:
 	default:
 	}
 	}
@@ -312,10 +285,10 @@ func (s *Stream) Header() (metadata.MD, error) {
 // Trailer returns the cached trailer metedata. Note that if it is not called
 // Trailer returns the cached trailer metedata. Note that if it is not called
 // after the entire stream is done, it could return an empty MD. Client
 // after the entire stream is done, it could return an empty MD. Client
 // side only.
 // side only.
+// It can be safely read only after stream has ended that is either read
+// or write have returned io.EOF.
 func (s *Stream) Trailer() metadata.MD {
 func (s *Stream) Trailer() metadata.MD {
-	s.mu.RLock()
 	c := s.trailer.Copy()
 	c := s.trailer.Copy()
-	s.mu.RUnlock()
 	return c
 	return c
 }
 }
 
 
@@ -345,24 +318,25 @@ func (s *Stream) Method() string {
 }
 }
 
 
 // Status returns the status received from the server.
 // Status returns the status received from the server.
+// Status can be read safely only after the stream has ended,
+// that is, read or write has returned io.EOF.
 func (s *Stream) Status() *status.Status {
 func (s *Stream) Status() *status.Status {
 	return s.status
 	return s.status
 }
 }
 
 
 // SetHeader sets the header metadata. This can be called multiple times.
 // SetHeader sets the header metadata. This can be called multiple times.
 // Server side only.
 // Server side only.
+// This should not be called in parallel to other data writes.
 func (s *Stream) SetHeader(md metadata.MD) error {
 func (s *Stream) SetHeader(md metadata.MD) error {
-	s.mu.Lock()
-	if s.headerOk || s.state == streamDone {
-		s.mu.Unlock()
-		return ErrIllegalHeaderWrite
-	}
 	if md.Len() == 0 {
 	if md.Len() == 0 {
-		s.mu.Unlock()
 		return nil
 		return nil
 	}
 	}
+	if s.isHeaderSent() || s.getState() == streamDone {
+		return ErrIllegalHeaderWrite
+	}
+	s.hdrMu.Lock()
 	s.header = metadata.Join(s.header, md)
 	s.header = metadata.Join(s.header, md)
-	s.mu.Unlock()
+	s.hdrMu.Unlock()
 	return nil
 	return nil
 }
 }
 
 
@@ -376,13 +350,17 @@ func (s *Stream) SendHeader(md metadata.MD) error {
 
 
 // SetTrailer sets the trailer metadata which will be sent with the RPC status
 // SetTrailer sets the trailer metadata which will be sent with the RPC status
 // by the server. This can be called multiple times. Server side only.
 // by the server. This can be called multiple times. Server side only.
+// This should not be called parallel to other data writes.
 func (s *Stream) SetTrailer(md metadata.MD) error {
 func (s *Stream) SetTrailer(md metadata.MD) error {
 	if md.Len() == 0 {
 	if md.Len() == 0 {
 		return nil
 		return nil
 	}
 	}
-	s.mu.Lock()
+	if s.getState() == streamDone {
+		return ErrIllegalHeaderWrite
+	}
+	s.hdrMu.Lock()
 	s.trailer = metadata.Join(s.trailer, md)
 	s.trailer = metadata.Join(s.trailer, md)
-	s.mu.Unlock()
+	s.hdrMu.Unlock()
 	return nil
 	return nil
 }
 }
 
 
@@ -422,29 +400,15 @@ func (t *transportReader) Read(p []byte) (n int, err error) {
 	return
 	return
 }
 }
 
 
-// finish sets the stream's state and status, and closes the done channel.
-// s.mu must be held by the caller.  st must always be non-nil.
-func (s *Stream) finish(st *status.Status) {
-	s.status = st
-	s.state = streamDone
-	close(s.done)
-}
-
 // BytesReceived indicates whether any bytes have been received on this stream.
 // BytesReceived indicates whether any bytes have been received on this stream.
 func (s *Stream) BytesReceived() bool {
 func (s *Stream) BytesReceived() bool {
-	s.mu.Lock()
-	br := s.bytesReceived
-	s.mu.Unlock()
-	return br
+	return atomic.LoadUint32(&s.bytesReceived) == 1
 }
 }
 
 
 // Unprocessed indicates whether the server did not process this stream --
 // Unprocessed indicates whether the server did not process this stream --
 // i.e. it sent a refused stream or GOAWAY including this stream ID.
 // i.e. it sent a refused stream or GOAWAY including this stream ID.
 func (s *Stream) Unprocessed() bool {
 func (s *Stream) Unprocessed() bool {
-	s.mu.Lock()
-	br := s.unprocessed
-	s.mu.Unlock()
-	return br
+	return atomic.LoadUint32(&s.unprocessed) == 1
 }
 }
 
 
 // GoString is implemented by Stream so context.String() won't
 // GoString is implemented by Stream so context.String() won't
@@ -474,6 +438,7 @@ type ServerConfig struct {
 	InitialConnWindowSize int32
 	InitialConnWindowSize int32
 	WriteBufferSize       int
 	WriteBufferSize       int
 	ReadBufferSize        int
 	ReadBufferSize        int
+	ChannelzParentID      int64
 }
 }
 
 
 // NewServerTransport creates a ServerTransport with conn or non-nil error
 // NewServerTransport creates a ServerTransport with conn or non-nil error
@@ -509,6 +474,8 @@ type ConnectOptions struct {
 	WriteBufferSize int
 	WriteBufferSize int
 	// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
 	// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
 	ReadBufferSize int
 	ReadBufferSize int
+	// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
+	ChannelzParentID int64
 }
 }
 
 
 // TargetInfo contains the information of the target such as network address and metadata.
 // TargetInfo contains the information of the target such as network address and metadata.
@@ -608,6 +575,12 @@ type ClientTransport interface {
 
 
 	// GetGoAwayReason returns the reason why GoAway frame was received.
 	// GetGoAwayReason returns the reason why GoAway frame was received.
 	GetGoAwayReason() GoAwayReason
 	GetGoAwayReason() GoAwayReason
+
+	// IncrMsgSent increments the number of message sent through this transport.
+	IncrMsgSent()
+
+	// IncrMsgRecv increments the number of message received through this transport.
+	IncrMsgRecv()
 }
 }
 
 
 // ServerTransport is the common interface for all gRPC server-side transport
 // ServerTransport is the common interface for all gRPC server-side transport
@@ -641,6 +614,12 @@ type ServerTransport interface {
 
 
 	// Drain notifies the client this ServerTransport stops accepting new RPCs.
 	// Drain notifies the client this ServerTransport stops accepting new RPCs.
 	Drain()
 	Drain()
+
+	// IncrMsgSent increments the number of message sent through this transport.
+	IncrMsgSent()
+
+	// IncrMsgRecv increments the number of message received through this transport.
+	IncrMsgRecv()
 }
 }
 
 
 // streamErrorf creates an StreamError with the specified error code and description.
 // streamErrorf creates an StreamError with the specified error code and description.
@@ -694,6 +673,9 @@ var (
 	// connection is draining. This could be caused by goaway or balancer
 	// connection is draining. This could be caused by goaway or balancer
 	// removing the address.
 	// removing the address.
 	errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining")
 	errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining")
+	// errStreamDone is returned from write at the client side to indiacte application
+	// layer of an error.
+	errStreamDone = errors.New("the stream is done")
 	// StatusGoAway indicates that the server sent a GOAWAY that included this
 	// StatusGoAway indicates that the server sent a GOAWAY that included this
 	// stream's ID in unprocessed RPCs.
 	// stream's ID in unprocessed RPCs.
 	statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
 	statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
@@ -711,15 +693,6 @@ func (e StreamError) Error() string {
 	return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
 	return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
 }
 }
 
 
-// waiters are passed to quotaPool get methods to
-// wait on in addition to waiting on quota.
-type waiters struct {
-	ctx    context.Context
-	tctx   context.Context
-	done   chan struct{}
-	goAway chan struct{}
-}
-
 // GoAwayReason contains the reason for the GoAway frame received.
 // GoAwayReason contains the reason for the GoAway frame received.
 type GoAwayReason uint8
 type GoAwayReason uint8
 
 
@@ -733,39 +706,3 @@ const (
 	// "too_many_pings".
 	// "too_many_pings".
 	GoAwayTooManyPings GoAwayReason = 2
 	GoAwayTooManyPings GoAwayReason = 2
 )
 )
-
-// loopyWriter is run in a separate go routine. It is the single code path that will
-// write data on wire.
-func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) {
-	for {
-		select {
-		case i := <-cbuf.get():
-			cbuf.load()
-			if err := handler(i); err != nil {
-				errorf("transport: Error while handling item. Err: %v", err)
-				return
-			}
-		case <-ctx.Done():
-			return
-		}
-	hasData:
-		for {
-			select {
-			case i := <-cbuf.get():
-				cbuf.load()
-				if err := handler(i); err != nil {
-					errorf("transport: Error while handling item. Err: %v", err)
-					return
-				}
-			case <-ctx.Done():
-				return
-			default:
-				if err := handler(&flushIO{}); err != nil {
-					errorf("transport: Error while flushing. Err: %v", err)
-					return
-				}
-				break hasData
-			}
-		}
-	}
-}

Some files were not shown because too many files changed in this diff