Browse Source

vendor: upgrade "grpc/grpc-go" to v1.11.1

Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
Gyuho Lee 7 years ago
parent
commit
6e2bf40015
52 changed files with 4774 additions and 2316 deletions
  1. 9 3
      Gopkg.lock
  2. 6 8
      vendor/google.golang.org/grpc/backoff.go
  3. 2 1
      vendor/google.golang.org/grpc/balancer.go
  4. 26 9
      vendor/google.golang.org/grpc/balancer/balancer.go
  5. 209 0
      vendor/google.golang.org/grpc/balancer/base/balancer.go
  6. 52 0
      vendor/google.golang.org/grpc/balancer/base/base.go
  7. 79 0
      vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
  8. 57 9
      vendor/google.golang.org/grpc/balancer_conn_wrappers.go
  9. 25 17
      vendor/google.golang.org/grpc/balancer_v1_wrapper.go
  10. 47 261
      vendor/google.golang.org/grpc/call.go
  11. 485 239
      vendor/google.golang.org/grpc/clientconn.go
  12. 17 71
      vendor/google.golang.org/grpc/codec.go
  13. 56 10
      vendor/google.golang.org/grpc/codes/code_string.go
  14. 53 13
      vendor/google.golang.org/grpc/codes/codes.go
  15. 12 15
      vendor/google.golang.org/grpc/credentials/credentials.go
  16. 118 0
      vendor/google.golang.org/grpc/encoding/encoding.go
  17. 110 0
      vendor/google.golang.org/grpc/encoding/proto/proto.go
  18. 70 0
      vendor/google.golang.org/grpc/go16.go
  19. 71 0
      vendor/google.golang.org/grpc/go17.go
  20. 229 591
      vendor/google.golang.org/grpc/grpclb.go
  21. 159 0
      vendor/google.golang.org/grpc/grpclb_picker.go
  22. 254 0
      vendor/google.golang.org/grpc/grpclb_remote_balancer.go
  23. 90 0
      vendor/google.golang.org/grpc/grpclb_util.go
  24. 2 2
      vendor/google.golang.org/grpc/health/health.go
  25. 3 1
      vendor/google.golang.org/grpc/interceptor.go
  26. 0 7
      vendor/google.golang.org/grpc/internal/internal.go
  27. 54 7
      vendor/google.golang.org/grpc/metadata/metadata.go
  28. 1 1
      vendor/google.golang.org/grpc/naming/go17.go
  29. 20 3
      vendor/google.golang.org/grpc/picker_wrapper.go
  30. 15 2
      vendor/google.golang.org/grpc/pickfirst.go
  31. 1 2
      vendor/google.golang.org/grpc/proxy.go
  32. 377 0
      vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
  33. 35 0
      vendor/google.golang.org/grpc/resolver/dns/go17.go
  34. 11 3
      vendor/google.golang.org/grpc/resolver/dns/go18.go
  35. 57 0
      vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
  36. 21 16
      vendor/google.golang.org/grpc/resolver/resolver.go
  37. 42 23
      vendor/google.golang.org/grpc/resolver_conn_wrapper.go
  38. 317 207
      vendor/google.golang.org/grpc/rpc_util.go
  39. 277 110
      vendor/google.golang.org/grpc/server.go
  40. 226 0
      vendor/google.golang.org/grpc/service_config.go
  41. 2 0
      vendor/google.golang.org/grpc/stats/stats.go
  42. 26 5
      vendor/google.golang.org/grpc/status/status.go
  43. 349 266
      vendor/google.golang.org/grpc/stream.go
  44. 3 6
      vendor/google.golang.org/grpc/transport/bdp_estimator.go
  45. 68 45
      vendor/google.golang.org/grpc/transport/control.go
  46. 51 0
      vendor/google.golang.org/grpc/transport/go16.go
  47. 52 0
      vendor/google.golang.org/grpc/transport/go17.go
  48. 56 22
      vendor/google.golang.org/grpc/transport/handler_server.go
  49. 189 125
      vendor/google.golang.org/grpc/transport/http2_client.go
  50. 111 79
      vendor/google.golang.org/grpc/transport/http2_server.go
  51. 56 15
      vendor/google.golang.org/grpc/transport/http_util.go
  52. 116 122
      vendor/google.golang.org/grpc/transport/transport.go

+ 9 - 3
Gopkg.lock

@@ -335,9 +335,13 @@
   packages = [
     ".",
     "balancer",
+    "balancer/base",
+    "balancer/roundrobin",
     "codes",
     "connectivity",
     "credentials",
+    "encoding",
+    "encoding/proto",
     "grpclb/grpc_lb_v1/messages",
     "grpclog",
     "health",
@@ -348,13 +352,15 @@
     "naming",
     "peer",
     "resolver",
+    "resolver/dns",
+    "resolver/passthrough",
     "stats",
     "status",
     "tap",
     "transport"
   ]
-  revision = "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
-  version = "v1.7.5"
+  revision = "1e2570b1b19ade82d8dbb31bba4e65e9f9ef5b34"
+  version = "v1.11.1"
 
 [[projects]]
   name = "gopkg.in/cheggaaa/pb.v1"
@@ -371,6 +377,6 @@
 [solve-meta]
   analyzer-name = "dep"
   analyzer-version = 1
-  inputs-digest = "943bf7648c0129f59546321f569622e933f24a103b5d68525b82d5e47d52733f"
+  inputs-digest = "b747b3fd3120687183829e5d2d5b2d10bba1719402c9bcc7c955d27ab5f884a0"
   solver-name = "gps-cdcl"
   solver-version = 1

+ 6 - 8
vendor/google.golang.org/grpc/backoff.go

@@ -25,14 +25,12 @@ import (
 
 // DefaultBackoffConfig uses values specified for backoff in
 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
-var (
-	DefaultBackoffConfig = BackoffConfig{
-		MaxDelay:  120 * time.Second,
-		baseDelay: 1.0 * time.Second,
-		factor:    1.6,
-		jitter:    0.2,
-	}
-)
+var DefaultBackoffConfig = BackoffConfig{
+	MaxDelay:  120 * time.Second,
+	baseDelay: 1.0 * time.Second,
+	factor:    1.6,
+	jitter:    0.2,
+}
 
 // backoffStrategy defines the methodology for backing off after a grpc
 // connection failure.

+ 2 - 1
vendor/google.golang.org/grpc/balancer.go

@@ -28,6 +28,7 @@ import (
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/naming"
+	"google.golang.org/grpc/status"
 )
 
 // Address represents a server the client connects to.
@@ -310,7 +311,7 @@ func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Ad
 	if !opts.BlockingWait {
 		if len(rr.addrs) == 0 {
 			rr.mu.Unlock()
-			err = Errorf(codes.Unavailable, "there is no address available")
+			err = status.Errorf(codes.Unavailable, "there is no address available")
 			return
 		}
 		// Returns the next addr on rr.addrs for failfast RPCs.

+ 26 - 9
vendor/google.golang.org/grpc/balancer/balancer.go

@@ -23,6 +23,7 @@ package balancer
 import (
 	"errors"
 	"net"
+	"strings"
 
 	"golang.org/x/net/context"
 	"google.golang.org/grpc/connectivity"
@@ -33,24 +34,23 @@ import (
 var (
 	// m is a map from name to balancer builder.
 	m = make(map[string]Builder)
-	// defaultBuilder is the default balancer to use.
-	defaultBuilder Builder // TODO(bar) install pickfirst as default.
 )
 
 // Register registers the balancer builder to the balancer map.
-// b.Name will be used as the name registered with this builder.
+// b.Name (lowercased) will be used as the name registered with
+// this builder.
 func Register(b Builder) {
-	m[b.Name()] = b
+	m[strings.ToLower(b.Name())] = b
 }
 
 // Get returns the resolver builder registered with the given name.
-// If no builder is register with the name, the default pickfirst will
-// be used.
+// Note that the compare is done in a case-insenstive fashion.
+// If no builder is register with the name, nil will be returned.
 func Get(name string) Builder {
-	if b, ok := m[name]; ok {
+	if b, ok := m[strings.ToLower(name)]; ok {
 		return b
 	}
-	return defaultBuilder
+	return nil
 }
 
 // SubConn represents a gRPC sub connection.
@@ -66,6 +66,11 @@ func Get(name string) Builder {
 // When the connection encounters an error, it will reconnect immediately.
 // When the connection becomes IDLE, it will not reconnect unless Connect is
 // called.
+//
+// This interface is to be implemented by gRPC. Users should not need a
+// brand new implementation of this interface. For the situations like
+// testing, the new implementation should embed this interface. This allows
+// gRPC to add new methods to this interface.
 type SubConn interface {
 	// UpdateAddresses updates the addresses used in this SubConn.
 	// gRPC checks if currently-connected address is still in the new list.
@@ -83,6 +88,11 @@ type SubConn interface {
 type NewSubConnOptions struct{}
 
 // ClientConn represents a gRPC ClientConn.
+//
+// This interface is to be implemented by gRPC. Users should not need a
+// brand new implementation of this interface. For the situations like
+// testing, the new implementation should embed this interface. This allows
+// gRPC to add new methods to this interface.
 type ClientConn interface {
 	// NewSubConn is called by balancer to create a new SubConn.
 	// It doesn't block and wait for the connections to be established.
@@ -99,6 +109,9 @@ type ClientConn interface {
 	// on the new picker to pick new SubConn.
 	UpdateBalancerState(s connectivity.State, p Picker)
 
+	// ResolveNow is called by balancer to notify gRPC to do a name resolving.
+	ResolveNow(resolver.ResolveNowOption)
+
 	// Target returns the dial target for this ClientConn.
 	Target() string
 }
@@ -131,6 +144,10 @@ type PickOptions struct{}
 type DoneInfo struct {
 	// Err is the rpc error the RPC finished with. It could be nil.
 	Err error
+	// BytesSent indicates if any bytes have been sent to the server.
+	BytesSent bool
+	// BytesReceived indicates if any byte has been received from the server.
+	BytesReceived bool
 }
 
 var (
@@ -161,7 +178,7 @@ type Picker interface {
 	// If a SubConn is returned:
 	// - If it is READY, gRPC will send the RPC on it;
 	// - If it is not ready, or becomes not ready after it's returned, gRPC will block
-	//   this call until a new picker is updated and will call pick on the new picker.
+	//   until UpdateBalancerState() is called and will call pick on the new picker.
 	//
 	// If the returned error is not nil:
 	// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()

+ 209 - 0
vendor/google.golang.org/grpc/balancer/base/balancer.go

@@ -0,0 +1,209 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package base
+
+import (
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/resolver"
+)
+
+type baseBuilder struct {
+	name          string
+	pickerBuilder PickerBuilder
+}
+
+func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+	return &baseBalancer{
+		cc:            cc,
+		pickerBuilder: bb.pickerBuilder,
+
+		subConns: make(map[resolver.Address]balancer.SubConn),
+		scStates: make(map[balancer.SubConn]connectivity.State),
+		csEvltr:  &connectivityStateEvaluator{},
+		// Initialize picker to a picker that always return
+		// ErrNoSubConnAvailable, because when state of a SubConn changes, we
+		// may call UpdateBalancerState with this picker.
+		picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
+	}
+}
+
+func (bb *baseBuilder) Name() string {
+	return bb.name
+}
+
+type baseBalancer struct {
+	cc            balancer.ClientConn
+	pickerBuilder PickerBuilder
+
+	csEvltr *connectivityStateEvaluator
+	state   connectivity.State
+
+	subConns map[resolver.Address]balancer.SubConn
+	scStates map[balancer.SubConn]connectivity.State
+	picker   balancer.Picker
+}
+
+func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
+	if err != nil {
+		grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err)
+		return
+	}
+	grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
+	// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
+	addrsSet := make(map[resolver.Address]struct{})
+	for _, a := range addrs {
+		addrsSet[a] = struct{}{}
+		if _, ok := b.subConns[a]; !ok {
+			// a is a new address (not existing in b.subConns).
+			sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
+			if err != nil {
+				grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
+				continue
+			}
+			b.subConns[a] = sc
+			b.scStates[sc] = connectivity.Idle
+			sc.Connect()
+		}
+	}
+	for a, sc := range b.subConns {
+		// a was removed by resolver.
+		if _, ok := addrsSet[a]; !ok {
+			b.cc.RemoveSubConn(sc)
+			delete(b.subConns, a)
+			// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
+			// The entry will be deleted in HandleSubConnStateChange.
+		}
+	}
+}
+
+// regeneratePicker takes a snapshot of the balancer, and generates a picker
+// from it. The picker is
+//  - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
+//  - built by the pickerBuilder with all READY SubConns otherwise.
+func (b *baseBalancer) regeneratePicker() {
+	if b.state == connectivity.TransientFailure {
+		b.picker = NewErrPicker(balancer.ErrTransientFailure)
+		return
+	}
+	readySCs := make(map[resolver.Address]balancer.SubConn)
+
+	// Filter out all ready SCs from full subConn map.
+	for addr, sc := range b.subConns {
+		if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
+			readySCs[addr] = sc
+		}
+	}
+	b.picker = b.pickerBuilder.Build(readySCs)
+}
+
+func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+	grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
+	oldS, ok := b.scStates[sc]
+	if !ok {
+		grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+		return
+	}
+	b.scStates[sc] = s
+	switch s {
+	case connectivity.Idle:
+		sc.Connect()
+	case connectivity.Shutdown:
+		// When an address was removed by resolver, b called RemoveSubConn but
+		// kept the sc's state in scStates. Remove state for this sc here.
+		delete(b.scStates, sc)
+	}
+
+	oldAggrState := b.state
+	b.state = b.csEvltr.recordTransition(oldS, s)
+
+	// Regenerate picker when one of the following happens:
+	//  - this sc became ready from not-ready
+	//  - this sc became not-ready from ready
+	//  - the aggregated state of balancer became TransientFailure from non-TransientFailure
+	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
+	if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
+		(b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
+		b.regeneratePicker()
+	}
+
+	b.cc.UpdateBalancerState(b.state, b.picker)
+	return
+}
+
+// Close is a nop because base balancer doesn't have internal state to clean up,
+// and it doesn't need to call RemoveSubConn for the SubConns.
+func (b *baseBalancer) Close() {
+}
+
+// NewErrPicker returns a picker that always returns err on Pick().
+func NewErrPicker(err error) balancer.Picker {
+	return &errPicker{err: err}
+}
+
+type errPicker struct {
+	err error // Pick() always returns this err.
+}
+
+func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+	return nil, nil, p.err
+}
+
+// connectivityStateEvaluator gets updated by addrConns when their
+// states transition, based on which it evaluates the state of
+// ClientConn.
+type connectivityStateEvaluator struct {
+	numReady            uint64 // Number of addrConns in ready state.
+	numConnecting       uint64 // Number of addrConns in connecting state.
+	numTransientFailure uint64 // Number of addrConns in transientFailure.
+}
+
+// recordTransition records state change happening in every subConn and based on
+// that it evaluates what aggregated state should be.
+// It can only transition between Ready, Connecting and TransientFailure. Other states,
+// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
+// before any subConn is created ClientConn is in idle state. In the end when ClientConn
+// closes it is in Shutdown state.
+//
+// recordTransition should only be called synchronously from the same goroutine.
+func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
+	// Update counters.
+	for idx, state := range []connectivity.State{oldState, newState} {
+		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+		switch state {
+		case connectivity.Ready:
+			cse.numReady += updateVal
+		case connectivity.Connecting:
+			cse.numConnecting += updateVal
+		case connectivity.TransientFailure:
+			cse.numTransientFailure += updateVal
+		}
+	}
+
+	// Evaluate.
+	if cse.numReady > 0 {
+		return connectivity.Ready
+	}
+	if cse.numConnecting > 0 {
+		return connectivity.Connecting
+	}
+	return connectivity.TransientFailure
+}

+ 52 - 0
vendor/google.golang.org/grpc/balancer/base/base.go

@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package base defines a balancer base that can be used to build balancers with
+// different picking algorithms.
+//
+// The base balancer creates a new SubConn for each resolved address. The
+// provided picker will only be notified about READY SubConns.
+//
+// This package is the base of round_robin balancer, its purpose is to be used
+// to build round_robin like balancers with complex picking algorithms.
+// Balancers with more complicated logic should try to implement a balancer
+// builder from scratch.
+//
+// All APIs in this package are experimental.
+package base
+
+import (
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+// PickerBuilder creates balancer.Picker.
+type PickerBuilder interface {
+	// Build takes a slice of ready SubConns, and returns a picker that will be
+	// used by gRPC to pick a SubConn.
+	Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
+}
+
+// NewBalancerBuilder returns a balancer builder. The balancers
+// built by this builder will use the picker builder to build pickers.
+func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
+	return &baseBuilder{
+		name:          name,
+		pickerBuilder: pb,
+	}
+}

+ 79 - 0
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go

@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
+// installed as one of the default balancers in gRPC, users don't need to
+// explicitly install this balancer.
+package roundrobin
+
+import (
+	"sync"
+
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/base"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/resolver"
+)
+
+// Name is the name of round_robin balancer.
+const Name = "round_robin"
+
+// newBuilder creates a new roundrobin balancer builder.
+func newBuilder() balancer.Builder {
+	return base.NewBalancerBuilder(Name, &rrPickerBuilder{})
+}
+
+func init() {
+	balancer.Register(newBuilder())
+}
+
+type rrPickerBuilder struct{}
+
+func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
+	grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
+	var scs []balancer.SubConn
+	for _, sc := range readySCs {
+		scs = append(scs, sc)
+	}
+	return &rrPicker{
+		subConns: scs,
+	}
+}
+
+type rrPicker struct {
+	// subConns is the snapshot of the roundrobin balancer when this picker was
+	// created. The slice is immutable. Each Get() will do a round robin
+	// selection from it and return the selected SubConn.
+	subConns []balancer.SubConn
+
+	mu   sync.Mutex
+	next int
+}
+
+func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+	if len(p.subConns) <= 0 {
+		return nil, nil, balancer.ErrNoSubConnAvailable
+	}
+
+	p.mu.Lock()
+	sc := p.subConns[p.next]
+	p.next = (p.next + 1) % len(p.subConns)
+	p.mu.Unlock()
+	return sc, nil, nil
+}

+ 57 - 9
vendor/google.golang.org/grpc/balancer_conn_wrappers.go

@@ -19,6 +19,7 @@
 package grpc
 
 import (
+	"fmt"
 	"sync"
 
 	"google.golang.org/grpc/balancer"
@@ -73,7 +74,7 @@ func (b *scStateUpdateBuffer) load() {
 	}
 }
 
-// get returns the channel that receives a recvMsg in the buffer.
+// get returns the channel that the scStateUpdate will be sent to.
 //
 // Upon receiving, the caller should call load to send another
 // scStateChangeTuple onto the channel if there is any.
@@ -96,6 +97,9 @@ type ccBalancerWrapper struct {
 	stateChangeQueue *scStateUpdateBuffer
 	resolverUpdateCh chan *resolverUpdate
 	done             chan struct{}
+
+	mu       sync.Mutex
+	subConns map[*acBalancerWrapper]struct{}
 }
 
 func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
@@ -104,6 +108,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
 		stateChangeQueue: newSCStateUpdateBuffer(),
 		resolverUpdateCh: make(chan *resolverUpdate, 1),
 		done:             make(chan struct{}),
+		subConns:         make(map[*acBalancerWrapper]struct{}),
 	}
 	go ccb.watcher()
 	ccb.balancer = b.Build(ccb, bopts)
@@ -117,8 +122,20 @@ func (ccb *ccBalancerWrapper) watcher() {
 		select {
 		case t := <-ccb.stateChangeQueue.get():
 			ccb.stateChangeQueue.load()
+			select {
+			case <-ccb.done:
+				ccb.balancer.Close()
+				return
+			default:
+			}
 			ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
 		case t := <-ccb.resolverUpdateCh:
+			select {
+			case <-ccb.done:
+				ccb.balancer.Close()
+				return
+			default:
+			}
 			ccb.balancer.HandleResolvedAddrs(t.addrs, t.err)
 		case <-ccb.done:
 		}
@@ -126,6 +143,13 @@ func (ccb *ccBalancerWrapper) watcher() {
 		select {
 		case <-ccb.done:
 			ccb.balancer.Close()
+			ccb.mu.Lock()
+			scs := ccb.subConns
+			ccb.subConns = nil
+			ccb.mu.Unlock()
+			for acbw := range scs {
+				ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
+			}
 			return
 		default:
 		}
@@ -165,33 +189,54 @@ func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err
 }
 
 func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
-	grpclog.Infof("ccBalancerWrapper: new subconn: %v", addrs)
+	if len(addrs) <= 0 {
+		return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
+	}
+	ccb.mu.Lock()
+	defer ccb.mu.Unlock()
+	if ccb.subConns == nil {
+		return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
+	}
 	ac, err := ccb.cc.newAddrConn(addrs)
 	if err != nil {
 		return nil, err
 	}
 	acbw := &acBalancerWrapper{ac: ac}
-	ac.mu.Lock()
+	acbw.ac.mu.Lock()
 	ac.acbw = acbw
-	ac.mu.Unlock()
+	acbw.ac.mu.Unlock()
+	ccb.subConns[acbw] = struct{}{}
 	return acbw, nil
 }
 
 func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
-	grpclog.Infof("ccBalancerWrapper: removing subconn")
 	acbw, ok := sc.(*acBalancerWrapper)
 	if !ok {
 		return
 	}
+	ccb.mu.Lock()
+	defer ccb.mu.Unlock()
+	if ccb.subConns == nil {
+		return
+	}
+	delete(ccb.subConns, acbw)
 	ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
 }
 
 func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
-	grpclog.Infof("ccBalancerWrapper: updating state and picker called by balancer: %v, %p", s, p)
+	ccb.mu.Lock()
+	defer ccb.mu.Unlock()
+	if ccb.subConns == nil {
+		return
+	}
 	ccb.cc.csMgr.updateState(s)
 	ccb.cc.blockingpicker.updatePicker(p)
 }
 
+func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
+	ccb.cc.resolveNow(o)
+}
+
 func (ccb *ccBalancerWrapper) Target() string {
 	return ccb.cc.target
 }
@@ -204,9 +249,12 @@ type acBalancerWrapper struct {
 }
 
 func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
-	grpclog.Infof("acBalancerWrapper: UpdateAddresses called with %v", addrs)
 	acbw.mu.Lock()
 	defer acbw.mu.Unlock()
+	if len(addrs) <= 0 {
+		acbw.ac.tearDown(errConnDrain)
+		return
+	}
 	if !acbw.ac.tryUpdateAddrs(addrs) {
 		cc := acbw.ac.cc
 		acbw.ac.mu.Lock()
@@ -234,7 +282,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
 		ac.acbw = acbw
 		ac.mu.Unlock()
 		if acState != connectivity.Idle {
-			ac.connect(false)
+			ac.connect()
 		}
 	}
 }
@@ -242,7 +290,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
 func (acbw *acBalancerWrapper) Connect() {
 	acbw.mu.Lock()
 	defer acbw.mu.Unlock()
-	acbw.ac.connect(false)
+	acbw.ac.connect()
 }
 
 func (acbw *acBalancerWrapper) getAddrConn() *addrConn {

+ 25 - 17
vendor/google.golang.org/grpc/balancer_v1_wrapper.go

@@ -19,6 +19,7 @@
 package grpc
 
 import (
+	"strings"
 	"sync"
 
 	"golang.org/x/net/context"
@@ -27,6 +28,7 @@ import (
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/status"
 )
 
 type balancerWrapperBuilder struct {
@@ -34,20 +36,27 @@ type balancerWrapperBuilder struct {
 }
 
 func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
-	bwb.b.Start(cc.Target(), BalancerConfig{
+	targetAddr := cc.Target()
+	targetSplitted := strings.Split(targetAddr, ":///")
+	if len(targetSplitted) >= 2 {
+		targetAddr = targetSplitted[1]
+	}
+
+	bwb.b.Start(targetAddr, BalancerConfig{
 		DialCreds: opts.DialCreds,
 		Dialer:    opts.Dialer,
 	})
 	_, pickfirst := bwb.b.(*pickFirst)
 	bw := &balancerWrapper{
-		balancer:  bwb.b,
-		pickfirst: pickfirst,
-		cc:        cc,
-		startCh:   make(chan struct{}),
-		conns:     make(map[resolver.Address]balancer.SubConn),
-		connSt:    make(map[balancer.SubConn]*scState),
-		csEvltr:   &connectivityStateEvaluator{},
-		state:     connectivity.Idle,
+		balancer:   bwb.b,
+		pickfirst:  pickfirst,
+		cc:         cc,
+		targetAddr: targetAddr,
+		startCh:    make(chan struct{}),
+		conns:      make(map[resolver.Address]balancer.SubConn),
+		connSt:     make(map[balancer.SubConn]*scState),
+		csEvltr:    &connectivityStateEvaluator{},
+		state:      connectivity.Idle,
 	}
 	cc.UpdateBalancerState(connectivity.Idle, bw)
 	go bw.lbWatcher()
@@ -68,7 +77,8 @@ type balancerWrapper struct {
 	balancer  Balancer // The v1 balancer.
 	pickfirst bool
 
-	cc balancer.ClientConn
+	cc         balancer.ClientConn
+	targetAddr string // Target without the scheme.
 
 	// To aggregate the connectivity state.
 	csEvltr *connectivityStateEvaluator
@@ -88,12 +98,11 @@ type balancerWrapper struct {
 // connections accordingly.
 func (bw *balancerWrapper) lbWatcher() {
 	<-bw.startCh
-	grpclog.Infof("balancerWrapper: is pickfirst: %v\n", bw.pickfirst)
 	notifyCh := bw.balancer.Notify()
 	if notifyCh == nil {
 		// There's no resolver in the balancer. Connect directly.
 		a := resolver.Address{
-			Addr: bw.cc.Target(),
+			Addr: bw.targetAddr,
 			Type: resolver.Backend,
 		}
 		sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
@@ -103,7 +112,7 @@ func (bw *balancerWrapper) lbWatcher() {
 			bw.mu.Lock()
 			bw.conns[a] = sc
 			bw.connSt[sc] = &scState{
-				addr: Address{Addr: bw.cc.Target()},
+				addr: Address{Addr: bw.targetAddr},
 				s:    connectivity.Idle,
 			}
 			bw.mu.Unlock()
@@ -165,10 +174,10 @@ func (bw *balancerWrapper) lbWatcher() {
 					sc.Connect()
 				}
 			} else {
-				oldSC.UpdateAddresses(newAddrs)
 				bw.mu.Lock()
 				bw.connSt[oldSC].addr = addrs[0]
 				bw.mu.Unlock()
+				oldSC.UpdateAddresses(newAddrs)
 			}
 		} else {
 			var (
@@ -221,7 +230,6 @@ func (bw *balancerWrapper) lbWatcher() {
 }
 
 func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	grpclog.Infof("balancerWrapper: handle subconn state change: %p, %v", sc, s)
 	bw.mu.Lock()
 	defer bw.mu.Unlock()
 	scSt, ok := bw.connSt[sc]
@@ -310,12 +318,12 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions)
 			Metadata:   a.Metadata,
 		}]
 		if !ok && failfast {
-			return nil, nil, Errorf(codes.Unavailable, "there is no connection available")
+			return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
 		}
 		if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
 			// If the returned sc is not ready and RPC is failfast,
 			// return error, and this RPC will fail.
-			return nil, nil, Errorf(codes.Unavailable, "there is no connection available")
+			return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
 		}
 	}
 

+ 47 - 261
vendor/google.golang.org/grpc/call.go

@@ -19,289 +19,75 @@
 package grpc
 
 import (
-	"bytes"
-	"io"
-	"time"
-
 	"golang.org/x/net/context"
-	"golang.org/x/net/trace"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/peer"
-	"google.golang.org/grpc/stats"
-	"google.golang.org/grpc/status"
-	"google.golang.org/grpc/transport"
 )
 
-// recvResponse receives and parses an RPC response.
-// On error, it returns the error and indicates whether the call should be retried.
+// Invoke sends the RPC request on the wire and returns after response is
+// received.  This is typically called by generated code.
 //
-// TODO(zhaoq): Check whether the received message sequence is valid.
-// TODO ctx is used for stats collection and processing. It is the context passed from the application.
-func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) {
-	// Try to acquire header metadata from the server if there is any.
-	defer func() {
-		if err != nil {
-			if _, ok := err.(transport.ConnectionError); !ok {
-				t.CloseStream(stream, err)
-			}
-		}
-	}()
-	c.headerMD, err = stream.Header()
-	if err != nil {
-		return
-	}
-	p := &parser{r: stream}
-	var inPayload *stats.InPayload
-	if dopts.copts.StatsHandler != nil {
-		inPayload = &stats.InPayload{
-			Client: true,
-		}
-	}
-	for {
-		if c.maxReceiveMessageSize == nil {
-			return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
-		}
-		if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil {
-			if err == io.EOF {
-				break
-			}
-			return
-		}
-	}
-	if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
-		// TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
-		// Fix the order if necessary.
-		dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
-	}
-	c.trailerMD = stream.Trailer()
-	return nil
-}
+// All errors returned by Invoke are compatible with the status package.
+func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
+	// allow interceptor to see all applicable call options, which means those
+	// configured as defaults from dial option as well as per-call options
+	opts = combine(cc.dopts.callOptions, opts)
 
-// sendRequest writes out various information of an RPC such as Context and Message.
-func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) {
-	defer func() {
-		if err != nil {
-			// If err is connection error, t will be closed, no need to close stream here.
-			if _, ok := err.(transport.ConnectionError); !ok {
-				t.CloseStream(stream, err)
-			}
-		}
-	}()
-	var (
-		cbuf       *bytes.Buffer
-		outPayload *stats.OutPayload
-	)
-	if compressor != nil {
-		cbuf = new(bytes.Buffer)
-	}
-	if dopts.copts.StatsHandler != nil {
-		outPayload = &stats.OutPayload{
-			Client: true,
-		}
-	}
-	hdr, data, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
-	if err != nil {
-		return err
-	}
-	if c.maxSendMessageSize == nil {
-		return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
-	}
-	if len(data) > *c.maxSendMessageSize {
-		return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize)
-	}
-	err = t.Write(stream, hdr, data, opts)
-	if err == nil && outPayload != nil {
-		outPayload.SentTime = time.Now()
-		dopts.copts.StatsHandler.HandleRPC(ctx, outPayload)
-	}
-	// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
-	// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
-	// recvResponse to get the final status.
-	if err != nil && err != io.EOF {
-		return err
-	}
-	// Sent successfully.
-	return nil
-}
-
-// Invoke sends the RPC request on the wire and returns after response is received.
-// Invoke is called by generated code. Also users can call Invoke directly when it
-// is really needed in their use cases.
-func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
 	if cc.dopts.unaryInt != nil {
 		return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
 	}
 	return invoke(ctx, method, args, reply, cc, opts...)
 }
 
-func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
-	c := defaultCallInfo()
-	mc := cc.GetMethodConfig(method)
-	if mc.WaitForReady != nil {
-		c.failFast = !*mc.WaitForReady
-	}
-
-	if mc.Timeout != nil && *mc.Timeout >= 0 {
-		var cancel context.CancelFunc
-		ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
-		defer cancel()
-	}
+func combine(o1 []CallOption, o2 []CallOption) []CallOption {
+	// we don't use append because o1 could have extra capacity whose
+	// elements would be overwritten, which could cause inadvertent
+	// sharing (and race connditions) between concurrent calls
+	if len(o1) == 0 {
+		return o2
+	} else if len(o2) == 0 {
+		return o1
+	}
+	ret := make([]CallOption, len(o1)+len(o2))
+	copy(ret, o1)
+	copy(ret[len(o1):], o2)
+	return ret
+}
 
-	opts = append(cc.dopts.callOptions, opts...)
-	for _, o := range opts {
-		if err := o.before(c); err != nil {
-			return toRPCErr(err)
-		}
-	}
-	defer func() {
-		for _, o := range opts {
-			o.after(c)
-		}
-	}()
+// Invoke sends the RPC request on the wire and returns after response is
+// received.  This is typically called by generated code.
+//
+// DEPRECATED: Use ClientConn.Invoke instead.
+func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+	return cc.Invoke(ctx, method, args, reply, opts...)
+}
 
-	c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
-	c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
 
-	if EnableTracing {
-		c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
-		defer c.traceInfo.tr.Finish()
-		c.traceInfo.firstLine.client = true
-		if deadline, ok := ctx.Deadline(); ok {
-			c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
-		}
-		c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
-		// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
-		defer func() {
-			if e != nil {
-				c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true)
-				c.traceInfo.tr.SetError()
-			}
-		}()
-	}
-	ctx = newContextWithRPCInfo(ctx, c.failFast)
-	sh := cc.dopts.copts.StatsHandler
-	if sh != nil {
-		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
-		begin := &stats.Begin{
-			Client:    true,
-			BeginTime: time.Now(),
-			FailFast:  c.failFast,
-		}
-		sh.HandleRPC(ctx, begin)
-		defer func() {
-			end := &stats.End{
-				Client:  true,
-				EndTime: time.Now(),
-				Error:   e,
-			}
-			sh.HandleRPC(ctx, end)
-		}()
-	}
-	topts := &transport.Options{
-		Last:  true,
-		Delay: false,
-	}
+func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+	// TODO: implement retries in clientStream and make this simply
+	// newClientStream, SendMsg, RecvMsg.
+	firstAttempt := true
 	for {
-		var (
-			err    error
-			t      transport.ClientTransport
-			stream *transport.Stream
-			// Record the done handler from Balancer.Get(...). It is called once the
-			// RPC has completed or failed.
-			done func(balancer.DoneInfo)
-		)
-		// TODO(zhaoq): Need a formal spec of fail-fast.
-		callHdr := &transport.CallHdr{
-			Host:   cc.authority,
-			Method: method,
-		}
-		if cc.dopts.cp != nil {
-			callHdr.SendCompress = cc.dopts.cp.Type()
-		}
-		if c.creds != nil {
-			callHdr.Creds = c.creds
-		}
-
-		t, done, err = cc.getTransport(ctx, c.failFast)
+		csInt, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
 		if err != nil {
-			// TODO(zhaoq): Probably revisit the error handling.
-			if _, ok := status.FromError(err); ok {
-				return err
-			}
-			if err == errConnClosing || err == errConnUnavailable {
-				if c.failFast {
-					return Errorf(codes.Unavailable, "%v", err)
-				}
-				continue
-			}
-			// All the other errors are treated as Internal errors.
-			return Errorf(codes.Internal, "%v", err)
-		}
-		if c.traceInfo.tr != nil {
-			c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
-		}
-		stream, err = t.NewStream(ctx, callHdr)
-		if err != nil {
-			if done != nil {
-				if _, ok := err.(transport.ConnectionError); ok {
-					// If error is connection error, transport was sending data on wire,
-					// and we are not sure if anything has been sent on wire.
-					// If error is not connection error, we are sure nothing has been sent.
-					updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
-				}
-				done(balancer.DoneInfo{Err: err})
-			}
-			if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
-				continue
-			}
-			return toRPCErr(err)
-		}
-		if peer, ok := peer.FromContext(stream.Context()); ok {
-			c.peer = peer
+			return err
 		}
-		err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts)
-		if err != nil {
-			if done != nil {
-				updateRPCInfoInContext(ctx, rpcInfo{
-					bytesSent:     stream.BytesSent(),
-					bytesReceived: stream.BytesReceived(),
-				})
-				done(balancer.DoneInfo{Err: err})
-			}
-			// Retry a non-failfast RPC when
-			// i) there is a connection error; or
-			// ii) the server started to drain before this RPC was initiated.
-			if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
+		cs := csInt.(*clientStream)
+		if err := cs.SendMsg(req); err != nil {
+			if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt {
+				// TODO: Add a field to header for grpc-transparent-retry-attempts
+				firstAttempt = false
 				continue
 			}
-			return toRPCErr(err)
+			return err
 		}
-		err = recvResponse(ctx, cc.dopts, t, c, stream, reply)
-		if err != nil {
-			if done != nil {
-				updateRPCInfoInContext(ctx, rpcInfo{
-					bytesSent:     stream.BytesSent(),
-					bytesReceived: stream.BytesReceived(),
-				})
-				done(balancer.DoneInfo{Err: err})
-			}
-			if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
+		if err := cs.RecvMsg(reply); err != nil {
+			if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt {
+				// TODO: Add a field to header for grpc-transparent-retry-attempts
+				firstAttempt = false
 				continue
 			}
-			return toRPCErr(err)
-		}
-		if c.traceInfo.tr != nil {
-			c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
-		}
-		t.CloseStream(stream, nil)
-		if done != nil {
-			updateRPCInfoInContext(ctx, rpcInfo{
-				bytesSent:     stream.BytesSent(),
-				bytesReceived: stream.BytesReceived(),
-			})
-			done(balancer.DoneInfo{Err: err})
+			return err
 		}
-		return stream.Status().Err()
+		return nil
 	}
 }

File diff suppressed because it is too large
+ 485 - 239
vendor/google.golang.org/grpc/clientconn.go


+ 17 - 71
vendor/google.golang.org/grpc/codec.go

@@ -19,86 +19,32 @@
 package grpc
 
 import (
-	"math"
-	"sync"
-
-	"github.com/golang/protobuf/proto"
+	"google.golang.org/grpc/encoding"
+	_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
 )
 
+// baseCodec contains the functionality of both Codec and encoding.Codec, but
+// omits the name/string, which vary between the two and are not needed for
+// anything besides the registry in the encoding package.
+type baseCodec interface {
+	Marshal(v interface{}) ([]byte, error)
+	Unmarshal(data []byte, v interface{}) error
+}
+
+var _ baseCodec = Codec(nil)
+var _ baseCodec = encoding.Codec(nil)
+
 // Codec defines the interface gRPC uses to encode and decode messages.
 // Note that implementations of this interface must be thread safe;
 // a Codec's methods can be called from concurrent goroutines.
+//
+// Deprecated: use encoding.Codec instead.
 type Codec interface {
 	// Marshal returns the wire format of v.
 	Marshal(v interface{}) ([]byte, error)
 	// Unmarshal parses the wire format into v.
 	Unmarshal(data []byte, v interface{}) error
-	// String returns the name of the Codec implementation. The returned
-	// string will be used as part of content type in transmission.
+	// String returns the name of the Codec implementation.  This is unused by
+	// gRPC.
 	String() string
 }
-
-// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
-type protoCodec struct {
-}
-
-type cachedProtoBuffer struct {
-	lastMarshaledSize uint32
-	proto.Buffer
-}
-
-func capToMaxInt32(val int) uint32 {
-	if val > math.MaxInt32 {
-		return uint32(math.MaxInt32)
-	}
-	return uint32(val)
-}
-
-func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
-	protoMsg := v.(proto.Message)
-	newSlice := make([]byte, 0, cb.lastMarshaledSize)
-
-	cb.SetBuf(newSlice)
-	cb.Reset()
-	if err := cb.Marshal(protoMsg); err != nil {
-		return nil, err
-	}
-	out := cb.Bytes()
-	cb.lastMarshaledSize = capToMaxInt32(len(out))
-	return out, nil
-}
-
-func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
-	cb := protoBufferPool.Get().(*cachedProtoBuffer)
-	out, err := p.marshal(v, cb)
-
-	// put back buffer and lose the ref to the slice
-	cb.SetBuf(nil)
-	protoBufferPool.Put(cb)
-	return out, err
-}
-
-func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
-	cb := protoBufferPool.Get().(*cachedProtoBuffer)
-	cb.SetBuf(data)
-	v.(proto.Message).Reset()
-	err := cb.Unmarshal(v.(proto.Message))
-	cb.SetBuf(nil)
-	protoBufferPool.Put(cb)
-	return err
-}
-
-func (protoCodec) String() string {
-	return "proto"
-}
-
-var (
-	protoBufferPool = &sync.Pool{
-		New: func() interface{} {
-			return &cachedProtoBuffer{
-				Buffer:            proto.Buffer{},
-				lastMarshaledSize: 16,
-			}
-		},
-	}
-)

+ 56 - 10
vendor/google.golang.org/grpc/codes/code_string.go

@@ -1,16 +1,62 @@
-// Code generated by "stringer -type=Code"; DO NOT EDIT.
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
 
 package codes
 
-import "fmt"
+import "strconv"
 
-const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
-
-var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
-
-func (i Code) String() string {
-	if i >= Code(len(_Code_index)-1) {
-		return fmt.Sprintf("Code(%d)", i)
+func (c Code) String() string {
+	switch c {
+	case OK:
+		return "OK"
+	case Canceled:
+		return "Canceled"
+	case Unknown:
+		return "Unknown"
+	case InvalidArgument:
+		return "InvalidArgument"
+	case DeadlineExceeded:
+		return "DeadlineExceeded"
+	case NotFound:
+		return "NotFound"
+	case AlreadyExists:
+		return "AlreadyExists"
+	case PermissionDenied:
+		return "PermissionDenied"
+	case ResourceExhausted:
+		return "ResourceExhausted"
+	case FailedPrecondition:
+		return "FailedPrecondition"
+	case Aborted:
+		return "Aborted"
+	case OutOfRange:
+		return "OutOfRange"
+	case Unimplemented:
+		return "Unimplemented"
+	case Internal:
+		return "Internal"
+	case Unavailable:
+		return "Unavailable"
+	case DataLoss:
+		return "DataLoss"
+	case Unauthenticated:
+		return "Unauthenticated"
+	default:
+		return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
 	}
-	return _Code_name[_Code_index[i]:_Code_index[i+1]]
 }

+ 53 - 13
vendor/google.golang.org/grpc/codes/codes.go

@@ -20,11 +20,13 @@
 // consistent across various languages.
 package codes // import "google.golang.org/grpc/codes"
 
+import (
+	"fmt"
+)
+
 // A Code is an unsigned 32-bit error code as defined in the gRPC spec.
 type Code uint32
 
-//go:generate stringer -type=Code
-
 const (
 	// OK is returned on success.
 	OK Code = 0
@@ -32,9 +34,9 @@ const (
 	// Canceled indicates the operation was canceled (typically by the caller).
 	Canceled Code = 1
 
-	// Unknown error.  An example of where this error may be returned is
+	// Unknown error. An example of where this error may be returned is
 	// if a Status value received from another address space belongs to
-	// an error-space that is not known in this address space.  Also
+	// an error-space that is not known in this address space. Also
 	// errors raised by APIs that do not return enough error information
 	// may be converted to this error.
 	Unknown Code = 2
@@ -63,15 +65,11 @@ const (
 	// PermissionDenied indicates the caller does not have permission to
 	// execute the specified operation. It must not be used for rejections
 	// caused by exhausting some resource (use ResourceExhausted
-	// instead for those errors).  It must not be
+	// instead for those errors). It must not be
 	// used if the caller cannot be identified (use Unauthenticated
 	// instead for those errors).
 	PermissionDenied Code = 7
 
-	// Unauthenticated indicates the request does not have valid
-	// authentication credentials for the operation.
-	Unauthenticated Code = 16
-
 	// ResourceExhausted indicates some resource has been exhausted, perhaps
 	// a per-user quota, or perhaps the entire file system is out of space.
 	ResourceExhausted Code = 8
@@ -87,7 +85,7 @@ const (
 	//  (b) Use Aborted if the client should retry at a higher-level
 	//      (e.g., restarting a read-modify-write sequence).
 	//  (c) Use FailedPrecondition if the client should not retry until
-	//      the system state has been explicitly fixed.  E.g., if an "rmdir"
+	//      the system state has been explicitly fixed. E.g., if an "rmdir"
 	//      fails because the directory is non-empty, FailedPrecondition
 	//      should be returned since the client should not retry unless
 	//      they have first fixed up the directory by deleting files from it.
@@ -116,7 +114,7 @@ const (
 	// file size.
 	//
 	// There is a fair bit of overlap between FailedPrecondition and
-	// OutOfRange.  We recommend using OutOfRange (the more specific
+	// OutOfRange. We recommend using OutOfRange (the more specific
 	// error) when it applies so that callers who are iterating through
 	// a space can easily look for an OutOfRange error to detect when
 	// they are done.
@@ -126,8 +124,8 @@ const (
 	// supported/enabled in this service.
 	Unimplemented Code = 12
 
-	// Internal errors.  Means some invariants expected by underlying
-	// system has been broken.  If you see one of these errors,
+	// Internal errors. Means some invariants expected by underlying
+	// system has been broken. If you see one of these errors,
 	// something is very broken.
 	Internal Code = 13
 
@@ -141,4 +139,46 @@ const (
 
 	// DataLoss indicates unrecoverable data loss or corruption.
 	DataLoss Code = 15
+
+	// Unauthenticated indicates the request does not have valid
+	// authentication credentials for the operation.
+	Unauthenticated Code = 16
 )
+
+var strToCode = map[string]Code{
+	`"OK"`: OK,
+	`"CANCELLED"`:/* [sic] */ Canceled,
+	`"UNKNOWN"`:             Unknown,
+	`"INVALID_ARGUMENT"`:    InvalidArgument,
+	`"DEADLINE_EXCEEDED"`:   DeadlineExceeded,
+	`"NOT_FOUND"`:           NotFound,
+	`"ALREADY_EXISTS"`:      AlreadyExists,
+	`"PERMISSION_DENIED"`:   PermissionDenied,
+	`"RESOURCE_EXHAUSTED"`:  ResourceExhausted,
+	`"FAILED_PRECONDITION"`: FailedPrecondition,
+	`"ABORTED"`:             Aborted,
+	`"OUT_OF_RANGE"`:        OutOfRange,
+	`"UNIMPLEMENTED"`:       Unimplemented,
+	`"INTERNAL"`:            Internal,
+	`"UNAVAILABLE"`:         Unavailable,
+	`"DATA_LOSS"`:           DataLoss,
+	`"UNAUTHENTICATED"`:     Unauthenticated,
+}
+
+// UnmarshalJSON unmarshals b into the Code.
+func (c *Code) UnmarshalJSON(b []byte) error {
+	// From json.Unmarshaler: By convention, to approximate the behavior of
+	// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
+	// a no-op.
+	if string(b) == "null" {
+		return nil
+	}
+	if c == nil {
+		return fmt.Errorf("nil receiver passed to UnmarshalJSON")
+	}
+	if jc, ok := strToCode[string(b)]; ok {
+		*c = jc
+		return nil
+	}
+	return fmt.Errorf("invalid code: %q", string(b))
+}

+ 12 - 15
vendor/google.golang.org/grpc/credentials/credentials.go

@@ -34,10 +34,8 @@ import (
 	"golang.org/x/net/context"
 )
 
-var (
-	// alpnProtoStr are the specified application level protocols for gRPC.
-	alpnProtoStr = []string{"h2"}
-)
+// alpnProtoStr are the specified application level protocols for gRPC.
+var alpnProtoStr = []string{"h2"}
 
 // PerRPCCredentials defines the common interface for the credentials which need to
 // attach security information to every RPC (e.g., oauth2).
@@ -45,8 +43,9 @@ type PerRPCCredentials interface {
 	// GetRequestMetadata gets the current request metadata, refreshing
 	// tokens if required. This should be called by the transport layer on
 	// each request, and the data should be populated in headers or other
-	// context. uri is the URI of the entry point for the request. When
-	// supported by the underlying implementation, ctx can be used for
+	// context. If a status code is returned, it will be used as the status
+	// for the RPC. uri is the URI of the entry point for the request.
+	// When supported by the underlying implementation, ctx can be used for
 	// timeout and cancellation.
 	// TODO(zhaoq): Define the set of the qualified keys instead of leaving
 	// it as an arbitrary string.
@@ -74,11 +73,9 @@ type AuthInfo interface {
 	AuthType() string
 }
 
-var (
-	// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
-	// and the caller should not close rawConn.
-	ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
-)
+// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
+// and the caller should not close rawConn.
+var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
 
 // TransportCredentials defines the common interface for all the live gRPC wire
 // protocols and supported transport security protocols (e.g., TLS, SSL).
@@ -135,15 +132,15 @@ func (c tlsCreds) Info() ProtocolInfo {
 	}
 }
 
-func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
+func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
 	// use local cfg to avoid clobbering ServerName if using multiple endpoints
 	cfg := cloneTLSConfig(c.config)
 	if cfg.ServerName == "" {
-		colonPos := strings.LastIndex(addr, ":")
+		colonPos := strings.LastIndex(authority, ":")
 		if colonPos == -1 {
-			colonPos = len(addr)
+			colonPos = len(authority)
 		}
-		cfg.ServerName = addr[:colonPos]
+		cfg.ServerName = authority[:colonPos]
 	}
 	conn := tls.Client(rawConn, cfg)
 	errChannel := make(chan error, 1)

+ 118 - 0
vendor/google.golang.org/grpc/encoding/encoding.go

@@ -0,0 +1,118 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package encoding defines the interface for the compressor and codec, and
+// functions to register and retrieve compressors and codecs.
+//
+// This package is EXPERIMENTAL.
+package encoding
+
+import (
+	"io"
+	"strings"
+)
+
+// Identity specifies the optional encoding for uncompressed streams.
+// It is intended for grpc internal use only.
+const Identity = "identity"
+
+// Compressor is used for compressing and decompressing when sending or
+// receiving messages.
+type Compressor interface {
+	// Compress writes the data written to wc to w after compressing it.  If an
+	// error occurs while initializing the compressor, that error is returned
+	// instead.
+	Compress(w io.Writer) (io.WriteCloser, error)
+	// Decompress reads data from r, decompresses it, and provides the
+	// uncompressed data via the returned io.Reader.  If an error occurs while
+	// initializing the decompressor, that error is returned instead.
+	Decompress(r io.Reader) (io.Reader, error)
+	// Name is the name of the compression codec and is used to set the content
+	// coding header.  The result must be static; the result cannot change
+	// between calls.
+	Name() string
+}
+
+var registeredCompressor = make(map[string]Compressor)
+
+// RegisterCompressor registers the compressor with gRPC by its name.  It can
+// be activated when sending an RPC via grpc.UseCompressor().  It will be
+// automatically accessed when receiving a message based on the content coding
+// header.  Servers also use it to send a response with the same encoding as
+// the request.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe.  If multiple Compressors are
+// registered with the same name, the one registered last will take effect.
+func RegisterCompressor(c Compressor) {
+	registeredCompressor[c.Name()] = c
+}
+
+// GetCompressor returns Compressor for the given compressor name.
+func GetCompressor(name string) Compressor {
+	return registeredCompressor[name]
+}
+
+// Codec defines the interface gRPC uses to encode and decode messages.  Note
+// that implementations of this interface must be thread safe; a Codec's
+// methods can be called from concurrent goroutines.
+type Codec interface {
+	// Marshal returns the wire format of v.
+	Marshal(v interface{}) ([]byte, error)
+	// Unmarshal parses the wire format into v.
+	Unmarshal(data []byte, v interface{}) error
+	// Name returns the name of the Codec implementation. The returned string
+	// will be used as part of content type in transmission.  The result must be
+	// static; the result cannot change between calls.
+	Name() string
+}
+
+var registeredCodecs = make(map[string]Codec, 0)
+
+// RegisterCodec registers the provided Codec for use with all gRPC clients and
+// servers.
+//
+// The Codec will be stored and looked up by result of its Name() method, which
+// should match the content-subtype of the encoding handled by the Codec.  This
+// is case-insensitive, and is stored and looked up as lowercase.  If the
+// result of calling Name() is an empty string, RegisterCodec will panic. See
+// Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe.  If multiple Compressors are
+// registered with the same name, the one registered last will take effect.
+func RegisterCodec(codec Codec) {
+	if codec == nil {
+		panic("cannot register a nil Codec")
+	}
+	contentSubtype := strings.ToLower(codec.Name())
+	if contentSubtype == "" {
+		panic("cannot register Codec with empty string result for String()")
+	}
+	registeredCodecs[contentSubtype] = codec
+}
+
+// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is
+// registered for the content-subtype.
+//
+// The content-subtype is expected to be lowercase.
+func GetCodec(contentSubtype string) Codec {
+	return registeredCodecs[contentSubtype]
+}

+ 110 - 0
vendor/google.golang.org/grpc/encoding/proto/proto.go

@@ -0,0 +1,110 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package proto defines the protobuf codec. Importing this package will
+// register the codec.
+package proto
+
+import (
+	"math"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/grpc/encoding"
+)
+
+// Name is the name registered for the proto compressor.
+const Name = "proto"
+
+func init() {
+	encoding.RegisterCodec(codec{})
+}
+
+// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
+type codec struct{}
+
+type cachedProtoBuffer struct {
+	lastMarshaledSize uint32
+	proto.Buffer
+}
+
+func capToMaxInt32(val int) uint32 {
+	if val > math.MaxInt32 {
+		return uint32(math.MaxInt32)
+	}
+	return uint32(val)
+}
+
+func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
+	protoMsg := v.(proto.Message)
+	newSlice := make([]byte, 0, cb.lastMarshaledSize)
+
+	cb.SetBuf(newSlice)
+	cb.Reset()
+	if err := cb.Marshal(protoMsg); err != nil {
+		return nil, err
+	}
+	out := cb.Bytes()
+	cb.lastMarshaledSize = capToMaxInt32(len(out))
+	return out, nil
+}
+
+func (codec) Marshal(v interface{}) ([]byte, error) {
+	if pm, ok := v.(proto.Marshaler); ok {
+		// object can marshal itself, no need for buffer
+		return pm.Marshal()
+	}
+
+	cb := protoBufferPool.Get().(*cachedProtoBuffer)
+	out, err := marshal(v, cb)
+
+	// put back buffer and lose the ref to the slice
+	cb.SetBuf(nil)
+	protoBufferPool.Put(cb)
+	return out, err
+}
+
+func (codec) Unmarshal(data []byte, v interface{}) error {
+	protoMsg := v.(proto.Message)
+	protoMsg.Reset()
+
+	if pu, ok := protoMsg.(proto.Unmarshaler); ok {
+		// object can unmarshal itself, no need for buffer
+		return pu.Unmarshal(data)
+	}
+
+	cb := protoBufferPool.Get().(*cachedProtoBuffer)
+	cb.SetBuf(data)
+	err := cb.Unmarshal(protoMsg)
+	cb.SetBuf(nil)
+	protoBufferPool.Put(cb)
+	return err
+}
+
+func (codec) Name() string {
+	return Name
+}
+
+var protoBufferPool = &sync.Pool{
+	New: func() interface{} {
+		return &cachedProtoBuffer{
+			Buffer:            proto.Buffer{},
+			lastMarshaledSize: 16,
+		}
+	},
+}

+ 70 - 0
vendor/google.golang.org/grpc/go16.go

@@ -0,0 +1,70 @@
+// +build go1.6,!go1.7
+
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"google.golang.org/grpc/transport"
+)
+
+// dialContext connects to the address on the named network.
+func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
+	return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
+}
+
+func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
+	req.Cancel = ctx.Done()
+	if err := req.Write(conn); err != nil {
+		return fmt.Errorf("failed to write the HTTP request: %v", err)
+	}
+	return nil
+}
+
+// toRPCErr converts an error into an error from the status package.
+func toRPCErr(err error) error {
+	if err == nil || err == io.EOF {
+		return err
+	}
+	if _, ok := status.FromError(err); ok {
+		return err
+	}
+	switch e := err.(type) {
+	case transport.StreamError:
+		return status.Error(e.Code, e.Desc)
+	case transport.ConnectionError:
+		return status.Error(codes.Unavailable, e.Desc)
+	default:
+		switch err {
+		case context.DeadlineExceeded:
+			return status.Error(codes.DeadlineExceeded, err.Error())
+		case context.Canceled:
+			return status.Error(codes.Canceled, err.Error())
+		}
+	}
+	return status.Error(codes.Unknown, err.Error())
+}

+ 71 - 0
vendor/google.golang.org/grpc/go17.go

@@ -0,0 +1,71 @@
+// +build go1.7
+
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+
+	netctx "golang.org/x/net/context"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"google.golang.org/grpc/transport"
+)
+
+// dialContext connects to the address on the named network.
+func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
+	return (&net.Dialer{}).DialContext(ctx, network, address)
+}
+
+func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
+	req = req.WithContext(ctx)
+	if err := req.Write(conn); err != nil {
+		return fmt.Errorf("failed to write the HTTP request: %v", err)
+	}
+	return nil
+}
+
+// toRPCErr converts an error into an error from the status package.
+func toRPCErr(err error) error {
+	if err == nil || err == io.EOF {
+		return err
+	}
+	if _, ok := status.FromError(err); ok {
+		return err
+	}
+	switch e := err.(type) {
+	case transport.StreamError:
+		return status.Error(e.Code, e.Desc)
+	case transport.ConnectionError:
+		return status.Error(codes.Unavailable, e.Desc)
+	default:
+		switch err {
+		case context.DeadlineExceeded, netctx.DeadlineExceeded:
+			return status.Error(codes.DeadlineExceeded, err.Error())
+		case context.Canceled, netctx.Canceled:
+			return status.Error(codes.Canceled, err.Error())
+		}
+	}
+	return status.Error(codes.Unknown, err.Error())
+}

+ 229 - 591
vendor/google.golang.org/grpc/grpclb.go

@@ -19,21 +19,32 @@
 package grpc
 
 import (
-	"errors"
-	"fmt"
-	"math/rand"
-	"net"
+	"strconv"
+	"strings"
 	"sync"
 	"time"
 
 	"golang.org/x/net/context"
-	"google.golang.org/grpc/codes"
-	lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/connectivity"
+	lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
 	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/metadata"
-	"google.golang.org/grpc/naming"
+	"google.golang.org/grpc/resolver"
 )
 
+const (
+	lbTokeyKey             = "lb-token"
+	defaultFallbackTimeout = 10 * time.Second
+	grpclbName             = "grpclb"
+)
+
+func convertDuration(d *lbpb.Duration) time.Duration {
+	if d == nil {
+		return 0
+	}
+	return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
+}
+
 // Client API for LoadBalancer service.
 // Mostly copied from generated pb.go file.
 // To avoid circular dependency.
@@ -59,646 +70,273 @@ type balanceLoadClientStream struct {
 	ClientStream
 }
 
-func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error {
+func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
 	return x.ClientStream.SendMsg(m)
 }
 
-func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) {
-	m := new(lbmpb.LoadBalanceResponse)
+func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
+	m := new(lbpb.LoadBalanceResponse)
 	if err := x.ClientStream.RecvMsg(m); err != nil {
 		return nil, err
 	}
 	return m, nil
 }
 
-// NewGRPCLBBalancer creates a grpclb load balancer.
-func NewGRPCLBBalancer(r naming.Resolver) Balancer {
-	return &grpclbBalancer{
-		r: r,
-	}
+func init() {
+	balancer.Register(newLBBuilder())
 }
 
-type remoteBalancerInfo struct {
-	addr string
-	// the server name used for authentication with the remote LB server.
-	name string
+// newLBBuilder creates a builder for grpclb.
+func newLBBuilder() balancer.Builder {
+	return NewLBBuilderWithFallbackTimeout(defaultFallbackTimeout)
 }
 
-// grpclbAddrInfo consists of the information of a backend server.
-type grpclbAddrInfo struct {
-	addr      Address
-	connected bool
-	// dropForRateLimiting indicates whether this particular request should be
-	// dropped by the client for rate limiting.
-	dropForRateLimiting bool
-	// dropForLoadBalancing indicates whether this particular request should be
-	// dropped by the client for load balancing.
-	dropForLoadBalancing bool
+// NewLBBuilderWithFallbackTimeout creates a grpclb builder with the given
+// fallbackTimeout. If no response is received from the remote balancer within
+// fallbackTimeout, the backend addresses from the resolved address list will be
+// used.
+//
+// Only call this function when a non-default fallback timeout is needed.
+func NewLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder {
+	return &lbBuilder{
+		fallbackTimeout: fallbackTimeout,
+	}
 }
 
-type grpclbBalancer struct {
-	r      naming.Resolver
-	target string
-	mu     sync.Mutex
-	seq    int // a sequence number to make sure addrCh does not get stale addresses.
-	w      naming.Watcher
-	addrCh chan []Address
-	rbs    []remoteBalancerInfo
-	addrs  []*grpclbAddrInfo
-	next   int
-	waitCh chan struct{}
-	done   bool
-	rand   *rand.Rand
-
-	clientStats lbmpb.ClientStats
+type lbBuilder struct {
+	fallbackTimeout time.Duration
 }
 
-func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
-	updates, err := w.Next()
-	if err != nil {
-		grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err)
-		return err
-	}
-	b.mu.Lock()
-	defer b.mu.Unlock()
-	if b.done {
-		return ErrClientConnClosing
-	}
-	for _, update := range updates {
-		switch update.Op {
-		case naming.Add:
-			var exist bool
-			for _, v := range b.rbs {
-				// TODO: Is the same addr with different server name a different balancer?
-				if update.Addr == v.addr {
-					exist = true
-					break
-				}
-			}
-			if exist {
-				continue
-			}
-			md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB)
-			if !ok {
-				// TODO: Revisit the handling here and may introduce some fallback mechanism.
-				grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata)
-				continue
-			}
-			switch md.AddrType {
-			case naming.Backend:
-				// TODO: Revisit the handling here and may introduce some fallback mechanism.
-				grpclog.Errorf("The name resolution does not give grpclb addresses")
-				continue
-			case naming.GRPCLB:
-				b.rbs = append(b.rbs, remoteBalancerInfo{
-					addr: update.Addr,
-					name: md.ServerName,
-				})
-			default:
-				grpclog.Errorf("Received unknow address type %d", md.AddrType)
-				continue
-			}
-		case naming.Delete:
-			for i, v := range b.rbs {
-				if update.Addr == v.addr {
-					copy(b.rbs[i:], b.rbs[i+1:])
-					b.rbs = b.rbs[:len(b.rbs)-1]
-					break
-				}
-			}
-		default:
-			grpclog.Errorf("Unknown update.Op %v", update.Op)
-		}
+func (b *lbBuilder) Name() string {
+	return grpclbName
+}
+
+func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+	// This generates a manual resolver builder with a random scheme. This
+	// scheme will be used to dial to remote LB, so we can send filtered address
+	// updates to remote LB ClientConn using this manual resolver.
+	scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36)
+	r := &lbManualResolver{scheme: scheme, ccb: cc}
+
+	var target string
+	targetSplitted := strings.Split(cc.Target(), ":///")
+	if len(targetSplitted) < 2 {
+		target = cc.Target()
+	} else {
+		target = targetSplitted[1]
 	}
-	// TODO: Fall back to the basic round-robin load balancing if the resulting address is
-	// not a load balancer.
-	select {
-	case <-ch:
-	default:
+
+	lb := &lbBalancer{
+		cc:              cc,
+		target:          target,
+		opt:             opt,
+		fallbackTimeout: b.fallbackTimeout,
+		doneCh:          make(chan struct{}),
+
+		manualResolver: r,
+		csEvltr:        &connectivityStateEvaluator{},
+		subConns:       make(map[resolver.Address]balancer.SubConn),
+		scStates:       make(map[balancer.SubConn]connectivity.State),
+		picker:         &errPicker{err: balancer.ErrNoSubConnAvailable},
+		clientStats:    &rpcStats{},
 	}
-	ch <- b.rbs
-	return nil
+
+	return lb
 }
 
-func convertDuration(d *lbmpb.Duration) time.Duration {
-	if d == nil {
-		return 0
-	}
-	return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
+type lbBalancer struct {
+	cc              balancer.ClientConn
+	target          string
+	opt             balancer.BuildOptions
+	fallbackTimeout time.Duration
+	doneCh          chan struct{}
+
+	// manualResolver is used in the remote LB ClientConn inside grpclb. When
+	// resolved address updates are received by grpclb, filtered updates will be
+	// send to remote LB ClientConn through this resolver.
+	manualResolver *lbManualResolver
+	// The ClientConn to talk to the remote balancer.
+	ccRemoteLB *ClientConn
+
+	// Support client side load reporting. Each picker gets a reference to this,
+	// and will update its content.
+	clientStats *rpcStats
+
+	mu sync.Mutex // guards everything following.
+	// The full server list including drops, used to check if the newly received
+	// serverList contains anything new. Each generate picker will also have
+	// reference to this list to do the first layer pick.
+	fullServerList []*lbpb.Server
+	// All backends addresses, with metadata set to nil. This list contains all
+	// backend addresses in the same order and with the same duplicates as in
+	// serverlist. When generating picker, a SubConn slice with the same order
+	// but with only READY SCs will be gerenated.
+	backendAddrs []resolver.Address
+	// Roundrobin functionalities.
+	csEvltr  *connectivityStateEvaluator
+	state    connectivity.State
+	subConns map[resolver.Address]balancer.SubConn   // Used to new/remove SubConn.
+	scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
+	picker   balancer.Picker
+	// Support fallback to resolved backend addresses if there's no response
+	// from remote balancer within fallbackTimeout.
+	fallbackTimerExpired bool
+	serverListReceived   bool
+	// resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set
+	// when resolved address updates are received, and read in the goroutine
+	// handling fallback.
+	resolvedBackendAddrs []resolver.Address
 }
 
-func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) {
-	if l == nil {
+// regeneratePicker takes a snapshot of the balancer, and generates a picker from
+// it. The picker
+//  - always returns ErrTransientFailure if the balancer is in TransientFailure,
+//  - does two layer roundrobin pick otherwise.
+// Caller must hold lb.mu.
+func (lb *lbBalancer) regeneratePicker() {
+	if lb.state == connectivity.TransientFailure {
+		lb.picker = &errPicker{err: balancer.ErrTransientFailure}
 		return
 	}
-	servers := l.GetServers()
-	var (
-		sl    []*grpclbAddrInfo
-		addrs []Address
-	)
-	for _, s := range servers {
-		md := metadata.Pairs("lb-token", s.LoadBalanceToken)
-		ip := net.IP(s.IpAddress)
-		ipStr := ip.String()
-		if ip.To4() == nil {
-			// Add square brackets to ipv6 addresses, otherwise net.Dial() and
-			// net.SplitHostPort() will return too many colons error.
-			ipStr = fmt.Sprintf("[%s]", ipStr)
-		}
-		addr := Address{
-			Addr:     fmt.Sprintf("%s:%d", ipStr, s.Port),
-			Metadata: &md,
+	var readySCs []balancer.SubConn
+	for _, a := range lb.backendAddrs {
+		if sc, ok := lb.subConns[a]; ok {
+			if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready {
+				readySCs = append(readySCs, sc)
+			}
 		}
-		sl = append(sl, &grpclbAddrInfo{
-			addr:                 addr,
-			dropForRateLimiting:  s.DropForRateLimiting,
-			dropForLoadBalancing: s.DropForLoadBalancing,
-		})
-		addrs = append(addrs, addr)
 	}
-	b.mu.Lock()
-	defer b.mu.Unlock()
-	if b.done || seq < b.seq {
-		return
-	}
-	if len(sl) > 0 {
-		// reset b.next to 0 when replacing the server list.
-		b.next = 0
-		b.addrs = sl
-		b.addrCh <- addrs
-	}
-	return
-}
 
-func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
-	ticker := time.NewTicker(interval)
-	defer ticker.Stop()
-	for {
-		select {
-		case <-ticker.C:
-		case <-done:
-			return
-		}
-		b.mu.Lock()
-		stats := b.clientStats
-		b.clientStats = lbmpb.ClientStats{} // Clear the stats.
-		b.mu.Unlock()
-		t := time.Now()
-		stats.Timestamp = &lbmpb.Timestamp{
-			Seconds: t.Unix(),
-			Nanos:   int32(t.Nanosecond()),
-		}
-		if err := s.Send(&lbmpb.LoadBalanceRequest{
-			LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{
-				ClientStats: &stats,
-			},
-		}); err != nil {
-			grpclog.Errorf("grpclb: failed to send load report: %v", err)
+	if len(lb.fullServerList) <= 0 {
+		if len(readySCs) <= 0 {
+			lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
 			return
 		}
-	}
-}
-
-func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
-	ctx, cancel := context.WithCancel(context.Background())
-	defer cancel()
-	stream, err := lbc.BalanceLoad(ctx)
-	if err != nil {
-		grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
+		lb.picker = &rrPicker{subConns: readySCs}
 		return
 	}
-	b.mu.Lock()
-	if b.done {
-		b.mu.Unlock()
-		return
-	}
-	b.mu.Unlock()
-	initReq := &lbmpb.LoadBalanceRequest{
-		LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{
-			InitialRequest: &lbmpb.InitialLoadBalanceRequest{
-				Name: b.target,
-			},
-		},
+	lb.picker = &lbPicker{
+		serverList: lb.fullServerList,
+		subConns:   readySCs,
+		stats:      lb.clientStats,
 	}
-	if err := stream.Send(initReq); err != nil {
-		grpclog.Errorf("grpclb: failed to send init request: %v", err)
-		// TODO: backoff on retry?
-		return true
-	}
-	reply, err := stream.Recv()
-	if err != nil {
-		grpclog.Errorf("grpclb: failed to recv init response: %v", err)
-		// TODO: backoff on retry?
-		return true
-	}
-	initResp := reply.GetInitialResponse()
-	if initResp == nil {
-		grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.")
-		return
-	}
-	// TODO: Support delegation.
-	if initResp.LoadBalancerDelegate != "" {
-		// delegation
-		grpclog.Errorf("TODO: Delegation is not supported yet.")
-		return
-	}
-	streamDone := make(chan struct{})
-	defer close(streamDone)
-	b.mu.Lock()
-	b.clientStats = lbmpb.ClientStats{} // Clear client stats.
-	b.mu.Unlock()
-	if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
-		go b.sendLoadReport(stream, d, streamDone)
-	}
-	// Retrieve the server list.
-	for {
-		reply, err := stream.Recv()
-		if err != nil {
-			grpclog.Errorf("grpclb: failed to recv server list: %v", err)
-			break
-		}
-		b.mu.Lock()
-		if b.done || seq < b.seq {
-			b.mu.Unlock()
-			return
-		}
-		b.seq++ // tick when receiving a new list of servers.
-		seq = b.seq
-		b.mu.Unlock()
-		if serverList := reply.GetServerList(); serverList != nil {
-			b.processServerList(serverList, seq)
-		}
-	}
-	return true
+	return
 }
 
-func (b *grpclbBalancer) Start(target string, config BalancerConfig) error {
-	b.rand = rand.New(rand.NewSource(time.Now().Unix()))
-	// TODO: Fall back to the basic direct connection if there is no name resolver.
-	if b.r == nil {
-		return errors.New("there is no name resolver installed")
+func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+	grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
+	lb.mu.Lock()
+	defer lb.mu.Unlock()
+
+	oldS, ok := lb.scStates[sc]
+	if !ok {
+		grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+		return
 	}
-	b.target = target
-	b.mu.Lock()
-	if b.done {
-		b.mu.Unlock()
-		return ErrClientConnClosing
+	lb.scStates[sc] = s
+	switch s {
+	case connectivity.Idle:
+		sc.Connect()
+	case connectivity.Shutdown:
+		// When an address was removed by resolver, b called RemoveSubConn but
+		// kept the sc's state in scStates. Remove state for this sc here.
+		delete(lb.scStates, sc)
 	}
-	b.addrCh = make(chan []Address)
-	w, err := b.r.Resolve(target)
-	if err != nil {
-		b.mu.Unlock()
-		grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err)
-		return err
-	}
-	b.w = w
-	b.mu.Unlock()
-	balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
-	// Spawn a goroutine to monitor the name resolution of remote load balancer.
-	go func() {
-		for {
-			if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
-				grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err)
-				close(balancerAddrsCh)
-				return
-			}
-		}
-	}()
-	// Spawn a goroutine to talk to the remote load balancer.
-	go func() {
-		var (
-			cc *ClientConn
-			// ccError is closed when there is an error in the current cc.
-			// A new rb should be picked from rbs and connected.
-			ccError chan struct{}
-			rb      *remoteBalancerInfo
-			rbs     []remoteBalancerInfo
-			rbIdx   int
-		)
-
-		defer func() {
-			if ccError != nil {
-				select {
-				case <-ccError:
-				default:
-					close(ccError)
-				}
-			}
-			if cc != nil {
-				cc.Close()
-			}
-		}()
-
-		for {
-			var ok bool
-			select {
-			case rbs, ok = <-balancerAddrsCh:
-				if !ok {
-					return
-				}
-				foundIdx := -1
-				if rb != nil {
-					for i, trb := range rbs {
-						if trb == *rb {
-							foundIdx = i
-							break
-						}
-					}
-				}
-				if foundIdx >= 0 {
-					if foundIdx >= 1 {
-						// Move the address in use to the beginning of the list.
-						b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
-						rbIdx = 0
-					}
-					continue // If found, don't dial new cc.
-				} else if len(rbs) > 0 {
-					// Pick a random one from the list, instead of always using the first one.
-					if l := len(rbs); l > 1 && rb != nil {
-						tmpIdx := b.rand.Intn(l - 1)
-						b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
-					}
-					rbIdx = 0
-					rb = &rbs[0]
-				} else {
-					// foundIdx < 0 && len(rbs) <= 0.
-					rb = nil
-				}
-			case <-ccError:
-				ccError = nil
-				if rbIdx < len(rbs)-1 {
-					rbIdx++
-					rb = &rbs[rbIdx]
-				} else {
-					rb = nil
-				}
-			}
-
-			if rb == nil {
-				continue
-			}
 
-			if cc != nil {
-				cc.Close()
-			}
-			// Talk to the remote load balancer to get the server list.
-			var (
-				err   error
-				dopts []DialOption
-			)
-			if creds := config.DialCreds; creds != nil {
-				if rb.name != "" {
-					if err := creds.OverrideServerName(rb.name); err != nil {
-						grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err)
-						continue
-					}
-				}
-				dopts = append(dopts, WithTransportCredentials(creds))
-			} else {
-				dopts = append(dopts, WithInsecure())
-			}
-			if dialer := config.Dialer; dialer != nil {
-				// WithDialer takes a different type of function, so we instead use a special DialOption here.
-				dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
-			}
-			dopts = append(dopts, WithBlock())
-			ccError = make(chan struct{})
-			ctx, cancel := context.WithTimeout(context.Background(), time.Second)
-			cc, err = DialContext(ctx, rb.addr, dopts...)
-			cancel()
-			if err != nil {
-				grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
-				close(ccError)
-				continue
-			}
-			b.mu.Lock()
-			b.seq++ // tick when getting a new balancer address
-			seq := b.seq
-			b.next = 0
-			b.mu.Unlock()
-			go func(cc *ClientConn, ccError chan struct{}) {
-				lbc := &loadBalancerClient{cc}
-				b.callRemoteBalancer(lbc, seq)
-				cc.Close()
-				select {
-				case <-ccError:
-				default:
-					close(ccError)
-				}
-			}(cc, ccError)
-		}
-	}()
-	return nil
-}
+	oldAggrState := lb.state
+	lb.state = lb.csEvltr.recordTransition(oldS, s)
 
-func (b *grpclbBalancer) down(addr Address, err error) {
-	b.mu.Lock()
-	defer b.mu.Unlock()
-	for _, a := range b.addrs {
-		if addr == a.addr {
-			a.connected = false
-			break
-		}
+	// Regenerate picker when one of the following happens:
+	//  - this sc became ready from not-ready
+	//  - this sc became not-ready from ready
+	//  - the aggregated state of balancer became TransientFailure from non-TransientFailure
+	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
+	if (oldS == connectivity.Ready) != (s == connectivity.Ready) ||
+		(lb.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
+		lb.regeneratePicker()
 	}
+
+	lb.cc.UpdateBalancerState(lb.state, lb.picker)
+	return
 }
 
-func (b *grpclbBalancer) Up(addr Address) func(error) {
-	b.mu.Lock()
-	defer b.mu.Unlock()
-	if b.done {
-		return nil
-	}
-	var cnt int
-	for _, a := range b.addrs {
-		if a.addr == addr {
-			if a.connected {
-				return nil
-			}
-			a.connected = true
-		}
-		if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
-			cnt++
-		}
-	}
-	// addr is the only one which is connected. Notify the Get() callers who are blocking.
-	if cnt == 1 && b.waitCh != nil {
-		close(b.waitCh)
-		b.waitCh = nil
+// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
+// resolved backends (backends received from resolver, not from remote balancer)
+// if no connection to remote balancers was successful.
+func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
+	timer := time.NewTimer(fallbackTimeout)
+	defer timer.Stop()
+	select {
+	case <-timer.C:
+	case <-lb.doneCh:
+		return
 	}
-	return func(err error) {
-		b.down(addr, err)
+	lb.mu.Lock()
+	if lb.serverListReceived {
+		lb.mu.Unlock()
+		return
 	}
+	lb.fallbackTimerExpired = true
+	lb.refreshSubConns(lb.resolvedBackendAddrs)
+	lb.mu.Unlock()
 }
 
-func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
-	var ch chan struct{}
-	b.mu.Lock()
-	if b.done {
-		b.mu.Unlock()
-		err = ErrClientConnClosing
+// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
+// clientConn. The remoteLB clientConn will handle creating/removing remoteLB
+// connections.
+func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
+	grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs)
+	if len(addrs) <= 0 {
 		return
 	}
-	seq := b.seq
 
-	defer func() {
-		if err != nil {
-			return
-		}
-		put = func() {
-			s, ok := rpcInfoFromContext(ctx)
-			if !ok {
-				return
-			}
-			b.mu.Lock()
-			defer b.mu.Unlock()
-			if b.done || seq < b.seq {
-				return
-			}
-			b.clientStats.NumCallsFinished++
-			if !s.bytesSent {
-				b.clientStats.NumCallsFinishedWithClientFailedToSend++
-			} else if s.bytesReceived {
-				b.clientStats.NumCallsFinishedKnownReceived++
-			}
+	var remoteBalancerAddrs, backendAddrs []resolver.Address
+	for _, a := range addrs {
+		if a.Type == resolver.GRPCLB {
+			remoteBalancerAddrs = append(remoteBalancerAddrs, a)
+		} else {
+			backendAddrs = append(backendAddrs, a)
 		}
-	}()
-
-	b.clientStats.NumCallsStarted++
-	if len(b.addrs) > 0 {
-		if b.next >= len(b.addrs) {
-			b.next = 0
-		}
-		next := b.next
-		for {
-			a := b.addrs[next]
-			next = (next + 1) % len(b.addrs)
-			if a.connected {
-				if !a.dropForRateLimiting && !a.dropForLoadBalancing {
-					addr = a.addr
-					b.next = next
-					b.mu.Unlock()
-					return
-				}
-				if !opts.BlockingWait {
-					b.next = next
-					if a.dropForLoadBalancing {
-						b.clientStats.NumCallsFinished++
-						b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
-					} else if a.dropForRateLimiting {
-						b.clientStats.NumCallsFinished++
-						b.clientStats.NumCallsFinishedWithDropForRateLimiting++
-					}
-					b.mu.Unlock()
-					err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
-					return
-				}
-			}
-			if next == b.next {
-				// Has iterated all the possible address but none is connected.
-				break
-			}
-		}
-	}
-	if !opts.BlockingWait {
-		b.clientStats.NumCallsFinished++
-		b.clientStats.NumCallsFinishedWithClientFailedToSend++
-		b.mu.Unlock()
-		err = Errorf(codes.Unavailable, "there is no address available")
-		return
 	}
-	// Wait on b.waitCh for non-failfast RPCs.
-	if b.waitCh == nil {
-		ch = make(chan struct{})
-		b.waitCh = ch
-	} else {
-		ch = b.waitCh
-	}
-	b.mu.Unlock()
-	for {
-		select {
-		case <-ctx.Done():
-			b.mu.Lock()
-			b.clientStats.NumCallsFinished++
-			b.clientStats.NumCallsFinishedWithClientFailedToSend++
-			b.mu.Unlock()
-			err = ctx.Err()
-			return
-		case <-ch:
-			b.mu.Lock()
-			if b.done {
-				b.clientStats.NumCallsFinished++
-				b.clientStats.NumCallsFinishedWithClientFailedToSend++
-				b.mu.Unlock()
-				err = ErrClientConnClosing
-				return
-			}
 
-			if len(b.addrs) > 0 {
-				if b.next >= len(b.addrs) {
-					b.next = 0
-				}
-				next := b.next
-				for {
-					a := b.addrs[next]
-					next = (next + 1) % len(b.addrs)
-					if a.connected {
-						if !a.dropForRateLimiting && !a.dropForLoadBalancing {
-							addr = a.addr
-							b.next = next
-							b.mu.Unlock()
-							return
-						}
-						if !opts.BlockingWait {
-							b.next = next
-							if a.dropForLoadBalancing {
-								b.clientStats.NumCallsFinished++
-								b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
-							} else if a.dropForRateLimiting {
-								b.clientStats.NumCallsFinished++
-								b.clientStats.NumCallsFinishedWithDropForRateLimiting++
-							}
-							b.mu.Unlock()
-							err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
-							return
-						}
-					}
-					if next == b.next {
-						// Has iterated all the possible address but none is connected.
-						break
-					}
-				}
-			}
-			// The newly added addr got removed by Down() again.
-			if b.waitCh == nil {
-				ch = make(chan struct{})
-				b.waitCh = ch
-			} else {
-				ch = b.waitCh
-			}
-			b.mu.Unlock()
+	if lb.ccRemoteLB == nil {
+		if len(remoteBalancerAddrs) <= 0 {
+			grpclog.Errorf("grpclb: no remote balancer address is available, should never happen")
+			return
 		}
+		// First time receiving resolved addresses, create a cc to remote
+		// balancers.
+		lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName)
+		// Start the fallback goroutine.
+		go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
 	}
-}
 
-func (b *grpclbBalancer) Notify() <-chan []Address {
-	return b.addrCh
+	// cc to remote balancers uses lb.manualResolver. Send the updated remote
+	// balancer addresses to it through manualResolver.
+	lb.manualResolver.NewAddress(remoteBalancerAddrs)
+
+	lb.mu.Lock()
+	lb.resolvedBackendAddrs = backendAddrs
+	// If serverListReceived is true, connection to remote balancer was
+	// successful and there's no need to do fallback anymore.
+	// If fallbackTimerExpired is false, fallback hasn't happened yet.
+	if !lb.serverListReceived && lb.fallbackTimerExpired {
+		// This means we received a new list of resolved backends, and we are
+		// still in fallback mode. Need to update the list of backends we are
+		// using to the new list of backends.
+		lb.refreshSubConns(lb.resolvedBackendAddrs)
+	}
+	lb.mu.Unlock()
 }
 
-func (b *grpclbBalancer) Close() error {
-	b.mu.Lock()
-	defer b.mu.Unlock()
-	if b.done {
-		return errBalancerClosed
-	}
-	b.done = true
-	if b.waitCh != nil {
-		close(b.waitCh)
-	}
-	if b.addrCh != nil {
-		close(b.addrCh)
+func (lb *lbBalancer) Close() {
+	select {
+	case <-lb.doneCh:
+		return
+	default:
 	}
-	if b.w != nil {
-		b.w.Close()
+	close(lb.doneCh)
+	if lb.ccRemoteLB != nil {
+		lb.ccRemoteLB.Close()
 	}
-	return nil
 }

+ 159 - 0
vendor/google.golang.org/grpc/grpclb_picker.go

@@ -0,0 +1,159 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"sync"
+	"sync/atomic"
+
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
+	lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
+	"google.golang.org/grpc/status"
+)
+
+type rpcStats struct {
+	NumCallsStarted                          int64
+	NumCallsFinished                         int64
+	NumCallsFinishedWithDropForRateLimiting  int64
+	NumCallsFinishedWithDropForLoadBalancing int64
+	NumCallsFinishedWithClientFailedToSend   int64
+	NumCallsFinishedKnownReceived            int64
+}
+
+// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats.
+func (s *rpcStats) toClientStats() *lbpb.ClientStats {
+	stats := &lbpb.ClientStats{
+		NumCallsStarted:                          atomic.SwapInt64(&s.NumCallsStarted, 0),
+		NumCallsFinished:                         atomic.SwapInt64(&s.NumCallsFinished, 0),
+		NumCallsFinishedWithDropForRateLimiting:  atomic.SwapInt64(&s.NumCallsFinishedWithDropForRateLimiting, 0),
+		NumCallsFinishedWithDropForLoadBalancing: atomic.SwapInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 0),
+		NumCallsFinishedWithClientFailedToSend:   atomic.SwapInt64(&s.NumCallsFinishedWithClientFailedToSend, 0),
+		NumCallsFinishedKnownReceived:            atomic.SwapInt64(&s.NumCallsFinishedKnownReceived, 0),
+	}
+	return stats
+}
+
+func (s *rpcStats) dropForRateLimiting() {
+	atomic.AddInt64(&s.NumCallsStarted, 1)
+	atomic.AddInt64(&s.NumCallsFinishedWithDropForRateLimiting, 1)
+	atomic.AddInt64(&s.NumCallsFinished, 1)
+}
+
+func (s *rpcStats) dropForLoadBalancing() {
+	atomic.AddInt64(&s.NumCallsStarted, 1)
+	atomic.AddInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 1)
+	atomic.AddInt64(&s.NumCallsFinished, 1)
+}
+
+func (s *rpcStats) failedToSend() {
+	atomic.AddInt64(&s.NumCallsStarted, 1)
+	atomic.AddInt64(&s.NumCallsFinishedWithClientFailedToSend, 1)
+	atomic.AddInt64(&s.NumCallsFinished, 1)
+}
+
+func (s *rpcStats) knownReceived() {
+	atomic.AddInt64(&s.NumCallsStarted, 1)
+	atomic.AddInt64(&s.NumCallsFinishedKnownReceived, 1)
+	atomic.AddInt64(&s.NumCallsFinished, 1)
+}
+
+type errPicker struct {
+	// Pick always returns this err.
+	err error
+}
+
+func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+	return nil, nil, p.err
+}
+
+// rrPicker does roundrobin on subConns. It's typically used when there's no
+// response from remote balancer, and grpclb falls back to the resolved
+// backends.
+//
+// It guaranteed that len(subConns) > 0.
+type rrPicker struct {
+	mu           sync.Mutex
+	subConns     []balancer.SubConn // The subConns that were READY when taking the snapshot.
+	subConnsNext int
+}
+
+func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	sc := p.subConns[p.subConnsNext]
+	p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
+	return sc, nil, nil
+}
+
+// lbPicker does two layers of picks:
+//
+// First layer: roundrobin on all servers in serverList, including drops and backends.
+// - If it picks a drop, the RPC will fail as being dropped.
+// - If it picks a backend, do a second layer pick to pick the real backend.
+//
+// Second layer: roundrobin on all READY backends.
+//
+// It's guaranteed that len(serverList) > 0.
+type lbPicker struct {
+	mu             sync.Mutex
+	serverList     []*lbpb.Server
+	serverListNext int
+	subConns       []balancer.SubConn // The subConns that were READY when taking the snapshot.
+	subConnsNext   int
+
+	stats *rpcStats
+}
+
+func (p *lbPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	// Layer one roundrobin on serverList.
+	s := p.serverList[p.serverListNext]
+	p.serverListNext = (p.serverListNext + 1) % len(p.serverList)
+
+	// If it's a drop, return an error and fail the RPC.
+	if s.DropForRateLimiting {
+		p.stats.dropForRateLimiting()
+		return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
+	}
+	if s.DropForLoadBalancing {
+		p.stats.dropForLoadBalancing()
+		return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
+	}
+
+	// If not a drop but there's no ready subConns.
+	if len(p.subConns) <= 0 {
+		return nil, nil, balancer.ErrNoSubConnAvailable
+	}
+
+	// Return the next ready subConn in the list, also collect rpc stats.
+	sc := p.subConns[p.subConnsNext]
+	p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
+	done := func(info balancer.DoneInfo) {
+		if !info.BytesSent {
+			p.stats.failedToSend()
+		} else if info.BytesReceived {
+			p.stats.knownReceived()
+		}
+	}
+	return sc, done, nil
+}

+ 254 - 0
vendor/google.golang.org/grpc/grpclb_remote_balancer.go

@@ -0,0 +1,254 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"fmt"
+	"net"
+	"reflect"
+	"time"
+
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/connectivity"
+	lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
+)
+
+// processServerList updates balaner's internal state, create/remove SubConns
+// and regenerates picker using the received serverList.
+func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
+	grpclog.Infof("lbBalancer: processing server list: %+v", l)
+	lb.mu.Lock()
+	defer lb.mu.Unlock()
+
+	// Set serverListReceived to true so fallback will not take effect if it has
+	// not hit timeout.
+	lb.serverListReceived = true
+
+	// If the new server list == old server list, do nothing.
+	if reflect.DeepEqual(lb.fullServerList, l.Servers) {
+		grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring")
+		return
+	}
+	lb.fullServerList = l.Servers
+
+	var backendAddrs []resolver.Address
+	for _, s := range l.Servers {
+		if s.DropForLoadBalancing || s.DropForRateLimiting {
+			continue
+		}
+
+		md := metadata.Pairs(lbTokeyKey, s.LoadBalanceToken)
+		ip := net.IP(s.IpAddress)
+		ipStr := ip.String()
+		if ip.To4() == nil {
+			// Add square brackets to ipv6 addresses, otherwise net.Dial() and
+			// net.SplitHostPort() will return too many colons error.
+			ipStr = fmt.Sprintf("[%s]", ipStr)
+		}
+		addr := resolver.Address{
+			Addr:     fmt.Sprintf("%s:%d", ipStr, s.Port),
+			Metadata: &md,
+		}
+
+		backendAddrs = append(backendAddrs, addr)
+	}
+
+	// Call refreshSubConns to create/remove SubConns.
+	backendsUpdated := lb.refreshSubConns(backendAddrs)
+	// If no backend was updated, no SubConn will be newed/removed. But since
+	// the full serverList was different, there might be updates in drops or
+	// pick weights(different number of duplicates). We need to update picker
+	// with the fulllist.
+	if !backendsUpdated {
+		lb.regeneratePicker()
+		lb.cc.UpdateBalancerState(lb.state, lb.picker)
+	}
+}
+
+// refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool
+// indicating whether the backendAddrs are different from the cached
+// backendAddrs (whether any SubConn was newed/removed).
+// Caller must hold lb.mu.
+func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address) bool {
+	lb.backendAddrs = nil
+	var backendsUpdated bool
+	// addrsSet is the set converted from backendAddrs, it's used to quick
+	// lookup for an address.
+	addrsSet := make(map[resolver.Address]struct{})
+	// Create new SubConns.
+	for _, addr := range backendAddrs {
+		addrWithoutMD := addr
+		addrWithoutMD.Metadata = nil
+		addrsSet[addrWithoutMD] = struct{}{}
+		lb.backendAddrs = append(lb.backendAddrs, addrWithoutMD)
+
+		if _, ok := lb.subConns[addrWithoutMD]; !ok {
+			backendsUpdated = true
+
+			// Use addrWithMD to create the SubConn.
+			sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
+			if err != nil {
+				grpclog.Warningf("roundrobinBalancer: failed to create new SubConn: %v", err)
+				continue
+			}
+			lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map.
+			lb.scStates[sc] = connectivity.Idle
+			sc.Connect()
+		}
+	}
+
+	for a, sc := range lb.subConns {
+		// a was removed by resolver.
+		if _, ok := addrsSet[a]; !ok {
+			backendsUpdated = true
+
+			lb.cc.RemoveSubConn(sc)
+			delete(lb.subConns, a)
+			// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
+			// The entry will be deleted in HandleSubConnStateChange.
+		}
+	}
+
+	return backendsUpdated
+}
+
+func (lb *lbBalancer) readServerList(s *balanceLoadClientStream) error {
+	for {
+		reply, err := s.Recv()
+		if err != nil {
+			return fmt.Errorf("grpclb: failed to recv server list: %v", err)
+		}
+		if serverList := reply.GetServerList(); serverList != nil {
+			lb.processServerList(serverList)
+		}
+	}
+}
+
+func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) {
+	ticker := time.NewTicker(interval)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+		case <-s.Context().Done():
+			return
+		}
+		stats := lb.clientStats.toClientStats()
+		t := time.Now()
+		stats.Timestamp = &lbpb.Timestamp{
+			Seconds: t.Unix(),
+			Nanos:   int32(t.Nanosecond()),
+		}
+		if err := s.Send(&lbpb.LoadBalanceRequest{
+			LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
+				ClientStats: stats,
+			},
+		}); err != nil {
+			return
+		}
+	}
+}
+func (lb *lbBalancer) callRemoteBalancer() error {
+	lbClient := &loadBalancerClient{cc: lb.ccRemoteLB}
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	stream, err := lbClient.BalanceLoad(ctx, FailFast(false))
+	if err != nil {
+		return fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
+	}
+
+	// grpclb handshake on the stream.
+	initReq := &lbpb.LoadBalanceRequest{
+		LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
+			InitialRequest: &lbpb.InitialLoadBalanceRequest{
+				Name: lb.target,
+			},
+		},
+	}
+	if err := stream.Send(initReq); err != nil {
+		return fmt.Errorf("grpclb: failed to send init request: %v", err)
+	}
+	reply, err := stream.Recv()
+	if err != nil {
+		return fmt.Errorf("grpclb: failed to recv init response: %v", err)
+	}
+	initResp := reply.GetInitialResponse()
+	if initResp == nil {
+		return fmt.Errorf("grpclb: reply from remote balancer did not include initial response")
+	}
+	if initResp.LoadBalancerDelegate != "" {
+		return fmt.Errorf("grpclb: Delegation is not supported")
+	}
+
+	go func() {
+		if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
+			lb.sendLoadReport(stream, d)
+		}
+	}()
+	return lb.readServerList(stream)
+}
+
+func (lb *lbBalancer) watchRemoteBalancer() {
+	for {
+		err := lb.callRemoteBalancer()
+		select {
+		case <-lb.doneCh:
+			return
+		default:
+			if err != nil {
+				grpclog.Error(err)
+			}
+		}
+
+	}
+}
+
+func (lb *lbBalancer) dialRemoteLB(remoteLBName string) {
+	var dopts []DialOption
+	if creds := lb.opt.DialCreds; creds != nil {
+		if err := creds.OverrideServerName(remoteLBName); err == nil {
+			dopts = append(dopts, WithTransportCredentials(creds))
+		} else {
+			grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v, using Insecure", err)
+			dopts = append(dopts, WithInsecure())
+		}
+	} else {
+		dopts = append(dopts, WithInsecure())
+	}
+	if lb.opt.Dialer != nil {
+		// WithDialer takes a different type of function, so we instead use a
+		// special DialOption here.
+		dopts = append(dopts, withContextDialer(lb.opt.Dialer))
+	}
+	// Explicitly set pickfirst as the balancer.
+	dopts = append(dopts, WithBalancerName(PickFirstBalancerName))
+	dopts = append(dopts, withResolverBuilder(lb.manualResolver))
+	// Dial using manualResolver.Scheme, which is a random scheme generated
+	// when init grpclb. The target name is not important.
+	cc, err := Dial("grpclb:///grpclb.server", dopts...)
+	if err != nil {
+		grpclog.Fatalf("failed to dial: %v", err)
+	}
+	lb.ccRemoteLB = cc
+	go lb.watchRemoteBalancer()
+}

+ 90 - 0
vendor/google.golang.org/grpc/grpclb_util.go

@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+// The parent ClientConn should re-resolve when grpclb loses connection to the
+// remote balancer. When the ClientConn inside grpclb gets a TransientFailure,
+// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's
+// ResolveNow, and eventually results in re-resolve happening in parent
+// ClientConn's resolver (DNS for example).
+//
+//                          parent
+//                          ClientConn
+//  +-----------------------------------------------------------------+
+//  |             parent          +---------------------------------+ |
+//  | DNS         ClientConn      |  grpclb                         | |
+//  | resolver    balancerWrapper |                                 | |
+//  | +              +            |    grpclb          grpclb       | |
+//  | |              |            |    ManualResolver  ClientConn   | |
+//  | |              |            |     +              +            | |
+//  | |              |            |     |              | Transient  | |
+//  | |              |            |     |              | Failure    | |
+//  | |              |            |     |  <---------  |            | |
+//  | |              | <--------------- |  ResolveNow  |            | |
+//  | |  <---------  | ResolveNow |     |              |            | |
+//  | |  ResolveNow  |            |     |              |            | |
+//  | |              |            |     |              |            | |
+//  | +              +            |     +              +            | |
+//  |                             +---------------------------------+ |
+//  +-----------------------------------------------------------------+
+
+// lbManualResolver is used by the ClientConn inside grpclb. It's a manual
+// resolver with a special ResolveNow() function.
+//
+// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn,
+// so when grpclb client lose contact with remote balancers, the parent
+// ClientConn's resolver will re-resolve.
+type lbManualResolver struct {
+	scheme string
+	ccr    resolver.ClientConn
+
+	ccb balancer.ClientConn
+}
+
+func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) {
+	r.ccr = cc
+	return r, nil
+}
+
+func (r *lbManualResolver) Scheme() string {
+	return r.scheme
+}
+
+// ResolveNow calls resolveNow on the parent ClientConn.
+func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOption) {
+	r.ccb.ResolveNow(o)
+}
+
+// Close is a noop for Resolver.
+func (*lbManualResolver) Close() {}
+
+// NewAddress calls cc.NewAddress.
+func (r *lbManualResolver) NewAddress(addrs []resolver.Address) {
+	r.ccr.NewAddress(addrs)
+}
+
+// NewServiceConfig calls cc.NewServiceConfig.
+func (r *lbManualResolver) NewServiceConfig(sc string) {
+	r.ccr.NewServiceConfig(sc)
+}

+ 2 - 2
vendor/google.golang.org/grpc/health/health.go

@@ -26,9 +26,9 @@ import (
 	"sync"
 
 	"golang.org/x/net/context"
-	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	healthpb "google.golang.org/grpc/health/grpc_health_v1"
+	"google.golang.org/grpc/status"
 )
 
 // Server implements `service Health`.
@@ -60,7 +60,7 @@ func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*h
 			Status: status,
 		}, nil
 	}
-	return nil, grpc.Errorf(codes.NotFound, "unknown service")
+	return nil, status.Error(codes.NotFound, "unknown service")
 }
 
 // SetServingStatus is called when need to reset the serving status of a service

+ 3 - 1
vendor/google.golang.org/grpc/interceptor.go

@@ -48,7 +48,9 @@ type UnaryServerInfo struct {
 }
 
 // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
-// execution of a unary RPC.
+// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
+// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
+// the status message of the RPC.
 type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
 
 // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info

+ 0 - 7
vendor/google.golang.org/grpc/internal/internal.go

@@ -19,13 +19,6 @@
 // the godoc of the top-level grpc package.
 package internal
 
-// TestingCloseConns closes all existing transports but keeps
-// grpcServer.lis accepting new connections.
-//
-// The provided grpcServer must be of type *grpc.Server. It is untyped
-// for circular dependency reasons.
-var TestingCloseConns func(grpcServer interface{})
-
 // TestingUseHandlerImpl enables the http.Handler-based server implementation.
 // It must be called before Serve and requires TLS credentials.
 //

+ 54 - 7
vendor/google.golang.org/grpc/metadata/metadata.go

@@ -17,7 +17,8 @@
  */
 
 // Package metadata define the structure of the metadata supported by gRPC library.
-// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata.
+// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
+// for more information about custom-metadata.
 package metadata // import "google.golang.org/grpc/metadata"
 
 import (
@@ -115,9 +116,26 @@ func NewIncomingContext(ctx context.Context, md MD) context.Context {
 	return context.WithValue(ctx, mdIncomingKey{}, md)
 }
 
-// NewOutgoingContext creates a new context with outgoing md attached.
+// NewOutgoingContext creates a new context with outgoing md attached. If used
+// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
+// overwrite any previously-appended metadata.
 func NewOutgoingContext(ctx context.Context, md MD) context.Context {
-	return context.WithValue(ctx, mdOutgoingKey{}, md)
+	return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
+}
+
+// AppendToOutgoingContext returns a new context with the provided kv merged
+// with any existing metadata in the context. Please refer to the
+// documentation of Pairs for a description of kv.
+func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
+	if len(kv)%2 == 1 {
+		panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
+	}
+	md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
+	added := make([][]string, len(md.added)+1)
+	copy(added, md.added)
+	added[len(added)-1] = make([]string, len(kv))
+	copy(added[len(added)-1], kv)
+	return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
 }
 
 // FromIncomingContext returns the incoming metadata in ctx if it exists.  The
@@ -128,10 +146,39 @@ func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
 	return
 }
 
+// FromOutgoingContextRaw returns the un-merged, intermediary contents
+// of rawMD. Remember to perform strings.ToLower on the keys. The returned
+// MD should not be modified. Writing to it may cause races. Modification
+// should be made to copies of the returned MD.
+//
+// This is intended for gRPC-internal use ONLY.
+func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
+	raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
+	if !ok {
+		return nil, nil, false
+	}
+
+	return raw.md, raw.added, true
+}
+
 // FromOutgoingContext returns the outgoing metadata in ctx if it exists.  The
 // returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to the copies of the returned MD.
-func FromOutgoingContext(ctx context.Context) (md MD, ok bool) {
-	md, ok = ctx.Value(mdOutgoingKey{}).(MD)
-	return
+// Modification should be made to copies of the returned MD.
+func FromOutgoingContext(ctx context.Context) (MD, bool) {
+	raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
+	if !ok {
+		return nil, false
+	}
+
+	mds := make([]MD, 0, len(raw.added)+1)
+	mds = append(mds, raw.md)
+	for _, vv := range raw.added {
+		mds = append(mds, Pairs(vv...))
+	}
+	return Join(mds...), ok
+}
+
+type rawMD struct {
+	md    MD
+	added [][]string
 }

+ 1 - 1
vendor/google.golang.org/grpc/naming/go17.go

@@ -1,4 +1,4 @@
-// +build go1.7, !go1.8
+// +build go1.6,!go1.8
 
 /*
  *

+ 20 - 3
vendor/google.golang.org/grpc/picker_wrapper.go

@@ -36,6 +36,10 @@ type pickerWrapper struct {
 	done       bool
 	blockingCh chan struct{}
 	picker     balancer.Picker
+
+	// The latest connection happened.
+	connErrMu sync.Mutex
+	connErr   error
 }
 
 func newPickerWrapper() *pickerWrapper {
@@ -43,6 +47,19 @@ func newPickerWrapper() *pickerWrapper {
 	return bp
 }
 
+func (bp *pickerWrapper) updateConnectionError(err error) {
+	bp.connErrMu.Lock()
+	bp.connErr = err
+	bp.connErrMu.Unlock()
+}
+
+func (bp *pickerWrapper) connectionError() error {
+	bp.connErrMu.Lock()
+	err := bp.connErr
+	bp.connErrMu.Unlock()
+	return err
+}
+
 // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
 func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
 	bp.mu.Lock()
@@ -97,7 +114,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
 		p = bp.picker
 		bp.mu.Unlock()
 
-		subConn, put, err := p.Pick(ctx, opts)
+		subConn, done, err := p.Pick(ctx, opts)
 
 		if err != nil {
 			switch err {
@@ -107,7 +124,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
 				if !failfast {
 					continue
 				}
-				return nil, nil, status.Errorf(codes.Unavailable, "%v", err)
+				return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
 			default:
 				// err is some other error.
 				return nil, nil, toRPCErr(err)
@@ -120,7 +137,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
 			continue
 		}
 		if t, ok := acw.getAddrConn().getReadyTransport(); ok {
-			return t, put, nil
+			return t, done, nil
 		}
 		grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
 		// If ok == false, ac.state is not READY.

+ 15 - 2
vendor/google.golang.org/grpc/pickfirst.go

@@ -26,6 +26,9 @@ import (
 	"google.golang.org/grpc/resolver"
 )
 
+// PickFirstBalancerName is the name of the pick_first balancer.
+const PickFirstBalancerName = "pick_first"
+
 func newPickfirstBuilder() balancer.Builder {
 	return &pickfirstBuilder{}
 }
@@ -37,7 +40,7 @@ func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions
 }
 
 func (*pickfirstBuilder) Name() string {
-	return "pickfirst"
+	return PickFirstBalancerName
 }
 
 type pickfirstBalancer struct {
@@ -57,14 +60,20 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er
 			return
 		}
 		b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
+		b.sc.Connect()
 	} else {
 		b.sc.UpdateAddresses(addrs)
+		b.sc.Connect()
 	}
 }
 
 func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
 	grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
-	if b.sc != sc || s == connectivity.Shutdown {
+	if b.sc != sc {
+		grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
+		return
+	}
+	if s == connectivity.Shutdown {
 		b.sc = nil
 		return
 	}
@@ -93,3 +102,7 @@ func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.
 	}
 	return p.sc, nil, nil
 }
+
+func init() {
+	balancer.Register(newPickfirstBuilder())
+}

+ 1 - 2
vendor/google.golang.org/grpc/proxy.go

@@ -82,8 +82,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_
 		Header: map[string][]string{"User-Agent": {grpcUA}},
 	})
 
-	req = req.WithContext(ctx)
-	if err := req.Write(conn); err != nil {
+	if err := sendHTTPRequest(ctx, req, conn); err != nil {
 		return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
 	}
 

+ 377 - 0
vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go

@@ -0,0 +1,377 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package dns implements a dns resolver to be installed as the default resolver
+// in grpc.
+package dns
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math/rand"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/resolver"
+)
+
+func init() {
+	resolver.Register(NewBuilder())
+}
+
+const (
+	defaultPort = "443"
+	defaultFreq = time.Minute * 30
+	golang      = "GO"
+	// In DNS, service config is encoded in a TXT record via the mechanism
+	// described in RFC-1464 using the attribute name grpc_config.
+	txtAttribute = "grpc_config="
+)
+
+var errMissingAddr = errors.New("missing address")
+
+// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
+func NewBuilder() resolver.Builder {
+	return &dnsBuilder{freq: defaultFreq}
+}
+
+type dnsBuilder struct {
+	// frequency of polling the DNS server.
+	freq time.Duration
+}
+
+// Build creates and starts a DNS resolver that watches the name resolution of the target.
+func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+	host, port, err := parseTarget(target.Endpoint)
+	if err != nil {
+		return nil, err
+	}
+
+	// IP address.
+	if net.ParseIP(host) != nil {
+		host, _ = formatIP(host)
+		addr := []resolver.Address{{Addr: host + ":" + port}}
+		i := &ipResolver{
+			cc: cc,
+			ip: addr,
+			rn: make(chan struct{}, 1),
+			q:  make(chan struct{}),
+		}
+		cc.NewAddress(addr)
+		go i.watcher()
+		return i, nil
+	}
+
+	// DNS address (non-IP).
+	ctx, cancel := context.WithCancel(context.Background())
+	d := &dnsResolver{
+		freq:   b.freq,
+		host:   host,
+		port:   port,
+		ctx:    ctx,
+		cancel: cancel,
+		cc:     cc,
+		t:      time.NewTimer(0),
+		rn:     make(chan struct{}, 1),
+	}
+
+	d.wg.Add(1)
+	go d.watcher()
+	return d, nil
+}
+
+// Scheme returns the naming scheme of this resolver builder, which is "dns".
+func (b *dnsBuilder) Scheme() string {
+	return "dns"
+}
+
+// ipResolver watches for the name resolution update for an IP address.
+type ipResolver struct {
+	cc resolver.ClientConn
+	ip []resolver.Address
+	// rn channel is used by ResolveNow() to force an immediate resolution of the target.
+	rn chan struct{}
+	q  chan struct{}
+}
+
+// ResolveNow resend the address it stores, no resolution is needed.
+func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
+	select {
+	case i.rn <- struct{}{}:
+	default:
+	}
+}
+
+// Close closes the ipResolver.
+func (i *ipResolver) Close() {
+	close(i.q)
+}
+
+func (i *ipResolver) watcher() {
+	for {
+		select {
+		case <-i.rn:
+			i.cc.NewAddress(i.ip)
+		case <-i.q:
+			return
+		}
+	}
+}
+
+// dnsResolver watches for the name resolution update for a non-IP target.
+type dnsResolver struct {
+	freq   time.Duration
+	host   string
+	port   string
+	ctx    context.Context
+	cancel context.CancelFunc
+	cc     resolver.ClientConn
+	// rn channel is used by ResolveNow() to force an immediate resolution of the target.
+	rn chan struct{}
+	t  *time.Timer
+	// wg is used to enforce Close() to return after the watcher() goroutine has finished.
+	// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
+	// replace the real lookup functions with mocked ones to facilitate testing.
+	// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
+	// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
+	// has data race with replaceNetFunc (WRITE the lookup function pointers).
+	wg sync.WaitGroup
+}
+
+// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
+func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
+	select {
+	case d.rn <- struct{}{}:
+	default:
+	}
+}
+
+// Close closes the dnsResolver.
+func (d *dnsResolver) Close() {
+	d.cancel()
+	d.wg.Wait()
+	d.t.Stop()
+}
+
+func (d *dnsResolver) watcher() {
+	defer d.wg.Done()
+	for {
+		select {
+		case <-d.ctx.Done():
+			return
+		case <-d.t.C:
+		case <-d.rn:
+		}
+		result, sc := d.lookup()
+		// Next lookup should happen after an interval defined by d.freq.
+		d.t.Reset(d.freq)
+		d.cc.NewServiceConfig(string(sc))
+		d.cc.NewAddress(result)
+	}
+}
+
+func (d *dnsResolver) lookupSRV() []resolver.Address {
+	var newAddrs []resolver.Address
+	_, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host)
+	if err != nil {
+		grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
+		return nil
+	}
+	for _, s := range srvs {
+		lbAddrs, err := lookupHost(d.ctx, s.Target)
+		if err != nil {
+			grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err)
+			continue
+		}
+		for _, a := range lbAddrs {
+			a, ok := formatIP(a)
+			if !ok {
+				grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
+				continue
+			}
+			addr := a + ":" + strconv.Itoa(int(s.Port))
+			newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
+		}
+	}
+	return newAddrs
+}
+
+func (d *dnsResolver) lookupTXT() string {
+	ss, err := lookupTXT(d.ctx, d.host)
+	if err != nil {
+		grpclog.Warningf("grpc: failed dns TXT record lookup due to %v.\n", err)
+		return ""
+	}
+	var res string
+	for _, s := range ss {
+		res += s
+	}
+
+	// TXT record must have "grpc_config=" attribute in order to be used as service config.
+	if !strings.HasPrefix(res, txtAttribute) {
+		grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
+		return ""
+	}
+	return strings.TrimPrefix(res, txtAttribute)
+}
+
+func (d *dnsResolver) lookupHost() []resolver.Address {
+	var newAddrs []resolver.Address
+	addrs, err := lookupHost(d.ctx, d.host)
+	if err != nil {
+		grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
+		return nil
+	}
+	for _, a := range addrs {
+		a, ok := formatIP(a)
+		if !ok {
+			grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
+			continue
+		}
+		addr := a + ":" + d.port
+		newAddrs = append(newAddrs, resolver.Address{Addr: addr})
+	}
+	return newAddrs
+}
+
+func (d *dnsResolver) lookup() ([]resolver.Address, string) {
+	var newAddrs []resolver.Address
+	newAddrs = d.lookupSRV()
+	// Support fallback to non-balancer address.
+	newAddrs = append(newAddrs, d.lookupHost()...)
+	sc := d.lookupTXT()
+	return newAddrs, canaryingSC(sc)
+}
+
+// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
+// If addr is an IPv4 address, return the addr and ok = true.
+// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
+func formatIP(addr string) (addrIP string, ok bool) {
+	ip := net.ParseIP(addr)
+	if ip == nil {
+		return "", false
+	}
+	if ip.To4() != nil {
+		return addr, true
+	}
+	return "[" + addr + "]", true
+}
+
+// parseTarget takes the user input target string, returns formatted host and port info.
+// If target doesn't specify a port, set the port to be the defaultPort.
+// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
+// are strippd when setting the host.
+// examples:
+// target: "www.google.com" returns host: "www.google.com", port: "443"
+// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
+// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
+// target: ":80" returns host: "localhost", port: "80"
+// target: ":" returns host: "localhost", port: "443"
+func parseTarget(target string) (host, port string, err error) {
+	if target == "" {
+		return "", "", errMissingAddr
+	}
+	if ip := net.ParseIP(target); ip != nil {
+		// target is an IPv4 or IPv6(without brackets) address
+		return target, defaultPort, nil
+	}
+	if host, port, err = net.SplitHostPort(target); err == nil {
+		// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
+		if host == "" {
+			// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
+			host = "localhost"
+		}
+		if port == "" {
+			// If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
+			port = defaultPort
+		}
+		return host, port, nil
+	}
+	if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
+		// target doesn't have port
+		return host, port, nil
+	}
+	return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
+}
+
+type rawChoice struct {
+	ClientLanguage *[]string        `json:"clientLanguage,omitempty"`
+	Percentage     *int             `json:"percentage,omitempty"`
+	ClientHostName *[]string        `json:"clientHostName,omitempty"`
+	ServiceConfig  *json.RawMessage `json:"serviceConfig,omitempty"`
+}
+
+func containsString(a *[]string, b string) bool {
+	if a == nil {
+		return true
+	}
+	for _, c := range *a {
+		if c == b {
+			return true
+		}
+	}
+	return false
+}
+
+func chosenByPercentage(a *int) bool {
+	if a == nil {
+		return true
+	}
+	s := rand.NewSource(time.Now().UnixNano())
+	r := rand.New(s)
+	if r.Intn(100)+1 > *a {
+		return false
+	}
+	return true
+}
+
+func canaryingSC(js string) string {
+	if js == "" {
+		return ""
+	}
+	var rcs []rawChoice
+	err := json.Unmarshal([]byte(js), &rcs)
+	if err != nil {
+		grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
+		return ""
+	}
+	cliHostname, err := os.Hostname()
+	if err != nil {
+		grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
+		return ""
+	}
+	var sc string
+	for _, c := range rcs {
+		if !containsString(c.ClientLanguage, golang) ||
+			!chosenByPercentage(c.Percentage) ||
+			!containsString(c.ClientHostName, cliHostname) ||
+			c.ServiceConfig == nil {
+			continue
+		}
+		sc = string(*c.ServiceConfig)
+		break
+	}
+	return sc
+}

+ 35 - 0
vendor/google.golang.org/grpc/resolver/dns/go17.go

@@ -0,0 +1,35 @@
+// +build go1.6, !go1.8
+
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package dns
+
+import (
+	"net"
+
+	"golang.org/x/net/context"
+)
+
+var (
+	lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
+	lookupSRV  = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
+		return net.LookupSRV(service, proto, name)
+	}
+	lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) }
+)

+ 11 - 3
vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go → vendor/google.golang.org/grpc/resolver/dns/go18.go

@@ -1,3 +1,5 @@
+// +build go1.8
+
 /*
  *
  * Copyright 2017 gRPC authors.
@@ -16,6 +18,12 @@
  *
  */
 
-// Package grpc_lb_v1 is the parent package of all gRPC loadbalancer
-// message and service protobuf definitions.
-package grpc_lb_v1
+package dns
+
+import "net"
+
+var (
+	lookupHost = net.DefaultResolver.LookupHost
+	lookupSRV  = net.DefaultResolver.LookupSRV
+	lookupTXT  = net.DefaultResolver.LookupTXT
+)

+ 57 - 0
vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go

@@ -0,0 +1,57 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package passthrough implements a pass-through resolver. It sends the target
+// name without scheme back to gRPC as resolved address.
+package passthrough
+
+import "google.golang.org/grpc/resolver"
+
+const scheme = "passthrough"
+
+type passthroughBuilder struct{}
+
+func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+	r := &passthroughResolver{
+		target: target,
+		cc:     cc,
+	}
+	r.start()
+	return r, nil
+}
+
+func (*passthroughBuilder) Scheme() string {
+	return scheme
+}
+
+type passthroughResolver struct {
+	target resolver.Target
+	cc     resolver.ClientConn
+}
+
+func (r *passthroughResolver) start() {
+	r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}})
+}
+
+func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
+
+func (*passthroughResolver) Close() {}
+
+func init() {
+	resolver.Register(&passthroughBuilder{})
+}

+ 21 - 16
vendor/google.golang.org/grpc/resolver/resolver.go

@@ -24,7 +24,7 @@ var (
 	// m is a map from scheme to resolver builder.
 	m = make(map[string]Builder)
 	// defaultScheme is the default scheme to use.
-	defaultScheme string
+	defaultScheme = "passthrough"
 )
 
 // TODO(bar) install dns resolver in init(){}.
@@ -36,30 +36,26 @@ func Register(b Builder) {
 }
 
 // Get returns the resolver builder registered with the given scheme.
-// If no builder is register with the scheme, the default scheme will
-// be used.
-// If the default scheme is not modified, "dns" will be the default
-// scheme, and the preinstalled dns resolver will be used.
-// If the default scheme is modified, and a resolver is registered with
-// the scheme, that resolver will be returned.
-// If the default scheme is modified, and no resolver is registered with
-// the scheme, nil will be returned.
+//
+// If no builder is register with the scheme, nil will be returned.
 func Get(scheme string) Builder {
 	if b, ok := m[scheme]; ok {
 		return b
 	}
-	if b, ok := m[defaultScheme]; ok {
-		return b
-	}
 	return nil
 }
 
 // SetDefaultScheme sets the default scheme that will be used.
-// The default default scheme is "dns".
+// The default default scheme is "passthrough".
 func SetDefaultScheme(scheme string) {
 	defaultScheme = scheme
 }
 
+// GetDefaultScheme gets the default scheme that will be used.
+func GetDefaultScheme() string {
+	return defaultScheme
+}
+
 // AddressType indicates the address type returned by name resolution.
 type AddressType uint8
 
@@ -78,7 +74,9 @@ type Address struct {
 	// Type is the type of this address.
 	Type AddressType
 	// ServerName is the name of this address.
-	// It's the name of the grpc load balancer, which will be used for authentication.
+	//
+	// e.g. if Type is GRPCLB, ServerName should be the name of the remote load
+	// balancer, not the name of the backend.
 	ServerName string
 	// Metadata is the information associated with Addr, which may be used
 	// to make load balancing decision.
@@ -92,6 +90,11 @@ type BuildOption struct {
 
 // ClientConn contains the callbacks for resolver to notify any updates
 // to the gRPC ClientConn.
+//
+// This interface is to be implemented by gRPC. Users should not need a
+// brand new implementation of this interface. For the situations like
+// testing, the new implementation should embed this interface. This allows
+// gRPC to add new methods to this interface.
 type ClientConn interface {
 	// NewAddress is called by resolver to notify ClientConn a new list
 	// of resolved addresses.
@@ -128,8 +131,10 @@ type ResolveNowOption struct{}
 // Resolver watches for the updates on the specified target.
 // Updates include address updates and service config updates.
 type Resolver interface {
-	// ResolveNow will be called by gRPC to try to resolve the target name again.
-	// It's just a hint, resolver can ignore this if it's not necessary.
+	// ResolveNow will be called by gRPC to try to resolve the target name
+	// again. It's just a hint, resolver can ignore this if it's not necessary.
+	//
+	// It could be called multiple times concurrently.
 	ResolveNow(ResolveNowOption)
 	// Close closes the resolver.
 	Close()

+ 42 - 23
vendor/google.golang.org/grpc/resolver_conn_wrapper.go

@@ -19,6 +19,7 @@
 package grpc
 
 import (
+	"fmt"
 	"strings"
 
 	"google.golang.org/grpc/grpclog"
@@ -36,20 +37,30 @@ type ccResolverWrapper struct {
 }
 
 // split2 returns the values from strings.SplitN(s, sep, 2).
-// If sep is not found, it returns "", s instead.
-func split2(s, sep string) (string, string) {
+// If sep is not found, it returns ("", s, false) instead.
+func split2(s, sep string) (string, string, bool) {
 	spl := strings.SplitN(s, sep, 2)
 	if len(spl) < 2 {
-		return "", s
+		return "", "", false
 	}
-	return spl[0], spl[1]
+	return spl[0], spl[1], true
 }
 
 // parseTarget splits target into a struct containing scheme, authority and
 // endpoint.
+//
+// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
+// target}.
 func parseTarget(target string) (ret resolver.Target) {
-	ret.Scheme, ret.Endpoint = split2(target, "://")
-	ret.Authority, ret.Endpoint = split2(ret.Endpoint, "/")
+	var ok bool
+	ret.Scheme, ret.Endpoint, ok = split2(target, "://")
+	if !ok {
+		return resolver.Target{Endpoint: target}
+	}
+	ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
+	if !ok {
+		return resolver.Target{Endpoint: target}
+	}
 	return ret
 }
 
@@ -57,18 +68,12 @@ func parseTarget(target string) (ret resolver.Target) {
 // builder for this scheme. It then builds the resolver and starts the
 // monitoring goroutine for it.
 //
-// This function could return nil, nil, in tests for old behaviors.
-// TODO(bar) never return nil, nil when DNS becomes the default resolver.
+// If withResolverBuilder dial option is set, the specified resolver will be
+// used instead.
 func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
-	target := parseTarget(cc.target)
-	grpclog.Infof("dialing to target with scheme: %q", target.Scheme)
-
-	rb := resolver.Get(target.Scheme)
+	rb := cc.dopts.resolverBuilder
 	if rb == nil {
-		// TODO(bar) return error when DNS becomes the default (implemented and
-		// registered by DNS package).
-		grpclog.Infof("could not get resolver for scheme: %q", target.Scheme)
-		return nil, nil
+		return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
 	}
 
 	ccr := &ccResolverWrapper{
@@ -79,14 +84,17 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
 	}
 
 	var err error
-	ccr.resolver, err = rb.Build(target, ccr, resolver.BuildOption{})
+	ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{})
 	if err != nil {
 		return nil, err
 	}
-	go ccr.watcher()
 	return ccr, nil
 }
 
+func (ccr *ccResolverWrapper) start() {
+	go ccr.watcher()
+}
+
 // watcher processes address updates and service config updates sequencially.
 // Otherwise, we need to resolve possible races between address and service
 // config (e.g. they specify different balancer types).
@@ -100,20 +108,31 @@ func (ccr *ccResolverWrapper) watcher() {
 
 		select {
 		case addrs := <-ccr.addrCh:
-			grpclog.Infof("ccResolverWrapper: sending new addresses to balancer wrapper: %v", addrs)
-			// TODO(bar switching) this should never be nil. Pickfirst should be default.
-			if ccr.cc.balancerWrapper != nil {
-				// TODO(bar switching) create balancer if it's nil?
-				ccr.cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
+			select {
+			case <-ccr.done:
+				return
+			default:
 			}
+			grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
+			ccr.cc.handleResolvedAddrs(addrs, nil)
 		case sc := <-ccr.scCh:
+			select {
+			case <-ccr.done:
+				return
+			default:
+			}
 			grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
+			ccr.cc.handleServiceConfig(sc)
 		case <-ccr.done:
 			return
 		}
 	}
 }
 
+func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
+	ccr.resolver.ResolveNow(o)
+}
+
 func (ccr *ccResolverWrapper) close() {
 	ccr.resolver.Close()
 	close(ccr.done)

+ 317 - 207
vendor/google.golang.org/grpc/rpc_util.go

@@ -21,18 +21,20 @@ package grpc
 import (
 	"bytes"
 	"compress/gzip"
-	stdctx "context"
 	"encoding/binary"
+	"fmt"
 	"io"
 	"io/ioutil"
 	"math"
-	"os"
+	"strings"
 	"sync"
 	"time"
 
 	"golang.org/x/net/context"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/encoding"
+	"google.golang.org/grpc/encoding/proto"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
@@ -54,13 +56,29 @@ type gzipCompressor struct {
 
 // NewGZIPCompressor creates a Compressor based on GZIP.
 func NewGZIPCompressor() Compressor {
+	c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
+	return c
+}
+
+// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead
+// of assuming DefaultCompression.
+//
+// The error returned will be nil if the level is valid.
+func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
+	if level < gzip.DefaultCompression || level > gzip.BestCompression {
+		return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
+	}
 	return &gzipCompressor{
 		pool: sync.Pool{
 			New: func() interface{} {
-				return gzip.NewWriter(ioutil.Discard)
+				w, err := gzip.NewWriterLevel(ioutil.Discard, level)
+				if err != nil {
+					panic(err)
+				}
+				return w
 			},
 		},
-	}
+	}, nil
 }
 
 func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
@@ -124,14 +142,15 @@ func (d *gzipDecompressor) Type() string {
 
 // callInfo contains all related configuration and information about an RPC.
 type callInfo struct {
+	compressorType        string
 	failFast              bool
-	headerMD              metadata.MD
-	trailerMD             metadata.MD
-	peer                  *peer.Peer
+	stream                *clientStream
 	traceInfo             traceInfo // in trace.go
 	maxReceiveMessageSize *int
 	maxSendMessageSize    *int
 	creds                 credentials.PerRPCCredentials
+	contentSubtype        string
+	codec                 baseCodec
 }
 
 func defaultCallInfo() *callInfo {
@@ -158,80 +177,232 @@ type EmptyCallOption struct{}
 func (EmptyCallOption) before(*callInfo) error { return nil }
 func (EmptyCallOption) after(*callInfo)        {}
 
-type beforeCall func(c *callInfo) error
-
-func (o beforeCall) before(c *callInfo) error { return o(c) }
-func (o beforeCall) after(c *callInfo)        {}
-
-type afterCall func(c *callInfo)
-
-func (o afterCall) before(c *callInfo) error { return nil }
-func (o afterCall) after(c *callInfo)        { o(c) }
-
 // Header returns a CallOptions that retrieves the header metadata
 // for a unary RPC.
 func Header(md *metadata.MD) CallOption {
-	return afterCall(func(c *callInfo) {
-		*md = c.headerMD
-	})
+	return HeaderCallOption{HeaderAddr: md}
+}
+
+// HeaderCallOption is a CallOption for collecting response header metadata.
+// The metadata field will be populated *after* the RPC completes.
+// This is an EXPERIMENTAL API.
+type HeaderCallOption struct {
+	HeaderAddr *metadata.MD
+}
+
+func (o HeaderCallOption) before(c *callInfo) error { return nil }
+func (o HeaderCallOption) after(c *callInfo) {
+	if c.stream != nil {
+		*o.HeaderAddr, _ = c.stream.Header()
+	}
 }
 
 // Trailer returns a CallOptions that retrieves the trailer metadata
 // for a unary RPC.
 func Trailer(md *metadata.MD) CallOption {
-	return afterCall(func(c *callInfo) {
-		*md = c.trailerMD
-	})
+	return TrailerCallOption{TrailerAddr: md}
+}
+
+// TrailerCallOption is a CallOption for collecting response trailer metadata.
+// The metadata field will be populated *after* the RPC completes.
+// This is an EXPERIMENTAL API.
+type TrailerCallOption struct {
+	TrailerAddr *metadata.MD
+}
+
+func (o TrailerCallOption) before(c *callInfo) error { return nil }
+func (o TrailerCallOption) after(c *callInfo) {
+	if c.stream != nil {
+		*o.TrailerAddr = c.stream.Trailer()
+	}
 }
 
 // Peer returns a CallOption that retrieves peer information for a
 // unary RPC.
-func Peer(peer *peer.Peer) CallOption {
-	return afterCall(func(c *callInfo) {
-		if c.peer != nil {
-			*peer = *c.peer
+func Peer(p *peer.Peer) CallOption {
+	return PeerCallOption{PeerAddr: p}
+}
+
+// PeerCallOption is a CallOption for collecting the identity of the remote
+// peer. The peer field will be populated *after* the RPC completes.
+// This is an EXPERIMENTAL API.
+type PeerCallOption struct {
+	PeerAddr *peer.Peer
+}
+
+func (o PeerCallOption) before(c *callInfo) error { return nil }
+func (o PeerCallOption) after(c *callInfo) {
+	if c.stream != nil {
+		if x, ok := peer.FromContext(c.stream.Context()); ok {
+			*o.PeerAddr = *x
 		}
-	})
+	}
 }
 
 // FailFast configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers. If failfast is true, the RPC will fail
+// connections or unreachable servers.  If failFast is true, the RPC will fail
 // immediately. Otherwise, the RPC client will block the call until a
-// connection is available (or the call is canceled or times out) and will retry
-// the call if it fails due to a transient error. Please refer to
+// connection is available (or the call is canceled or times out) and will
+// retry the call if it fails due to a transient error.  gRPC will not retry if
+// data was written to the wire unless the server indicates it did not process
+// the data.  Please refer to
 // https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
-// Note: failFast is default to true.
+//
+// By default, RPCs are "Fail Fast".
 func FailFast(failFast bool) CallOption {
-	return beforeCall(func(c *callInfo) error {
-		c.failFast = failFast
-		return nil
-	})
+	return FailFastCallOption{FailFast: failFast}
+}
+
+// FailFastCallOption is a CallOption for indicating whether an RPC should fail
+// fast or not.
+// This is an EXPERIMENTAL API.
+type FailFastCallOption struct {
+	FailFast bool
+}
+
+func (o FailFastCallOption) before(c *callInfo) error {
+	c.failFast = o.FailFast
+	return nil
 }
+func (o FailFastCallOption) after(c *callInfo) { return }
 
 // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
 func MaxCallRecvMsgSize(s int) CallOption {
-	return beforeCall(func(o *callInfo) error {
-		o.maxReceiveMessageSize = &s
-		return nil
-	})
+	return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
+}
+
+// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
+// size the client can receive.
+// This is an EXPERIMENTAL API.
+type MaxRecvMsgSizeCallOption struct {
+	MaxRecvMsgSize int
 }
 
+func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
+	c.maxReceiveMessageSize = &o.MaxRecvMsgSize
+	return nil
+}
+func (o MaxRecvMsgSizeCallOption) after(c *callInfo) { return }
+
 // MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
 func MaxCallSendMsgSize(s int) CallOption {
-	return beforeCall(func(o *callInfo) error {
-		o.maxSendMessageSize = &s
-		return nil
-	})
+	return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
 }
 
+// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
+// size the client can send.
+// This is an EXPERIMENTAL API.
+type MaxSendMsgSizeCallOption struct {
+	MaxSendMsgSize int
+}
+
+func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
+	c.maxSendMessageSize = &o.MaxSendMsgSize
+	return nil
+}
+func (o MaxSendMsgSizeCallOption) after(c *callInfo) { return }
+
 // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
 // for a call.
 func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
-	return beforeCall(func(c *callInfo) error {
-		c.creds = creds
-		return nil
-	})
+	return PerRPCCredsCallOption{Creds: creds}
+}
+
+// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
+// credentials to use for the call.
+// This is an EXPERIMENTAL API.
+type PerRPCCredsCallOption struct {
+	Creds credentials.PerRPCCredentials
+}
+
+func (o PerRPCCredsCallOption) before(c *callInfo) error {
+	c.creds = o.Creds
+	return nil
 }
+func (o PerRPCCredsCallOption) after(c *callInfo) { return }
+
+// UseCompressor returns a CallOption which sets the compressor used when
+// sending the request.  If WithCompressor is also set, UseCompressor has
+// higher priority.
+//
+// This API is EXPERIMENTAL.
+func UseCompressor(name string) CallOption {
+	return CompressorCallOption{CompressorType: name}
+}
+
+// CompressorCallOption is a CallOption that indicates the compressor to use.
+// This is an EXPERIMENTAL API.
+type CompressorCallOption struct {
+	CompressorType string
+}
+
+func (o CompressorCallOption) before(c *callInfo) error {
+	c.compressorType = o.CompressorType
+	return nil
+}
+func (o CompressorCallOption) after(c *callInfo) { return }
+
+// CallContentSubtype returns a CallOption that will set the content-subtype
+// for a call. For example, if content-subtype is "json", the Content-Type over
+// the wire will be "application/grpc+json". The content-subtype is converted
+// to lowercase before being included in Content-Type. See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If CallCustomCodec is not also used, the content-subtype will be used to
+// look up the Codec to use in the registry controlled by RegisterCodec. See
+// the documention on RegisterCodec for details on registration. The lookup
+// of content-subtype is case-insensitive. If no such Codec is found, the call
+// will result in an error with code codes.Internal.
+//
+// If CallCustomCodec is also used, that Codec will be used for all request and
+// response messages, with the content-subtype set to the given contentSubtype
+// here for requests.
+func CallContentSubtype(contentSubtype string) CallOption {
+	return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)}
+}
+
+// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
+// used for marshaling messages.
+// This is an EXPERIMENTAL API.
+type ContentSubtypeCallOption struct {
+	ContentSubtype string
+}
+
+func (o ContentSubtypeCallOption) before(c *callInfo) error {
+	c.contentSubtype = o.ContentSubtype
+	return nil
+}
+func (o ContentSubtypeCallOption) after(c *callInfo) { return }
+
+// CallCustomCodec returns a CallOption that will set the given Codec to be
+// used for all request and response messages for a call. The result of calling
+// String() will be used as the content-subtype in a case-insensitive manner.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between Codec and
+// content-subtype.
+//
+// This function is provided for advanced users; prefer to use only
+// CallContentSubtype to select a registered codec instead.
+func CallCustomCodec(codec Codec) CallOption {
+	return CustomCodecCallOption{Codec: codec}
+}
+
+// CustomCodecCallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+// This is an EXPERIMENTAL API.
+type CustomCodecCallOption struct {
+	Codec Codec
+}
+
+func (o CustomCodecCallOption) before(c *callInfo) error {
+	c.codec = o.Codec
+	return nil
+}
+func (o CustomCodecCallOption) after(c *callInfo) { return }
 
 // The format of the payload: compressed or not?
 type payloadFormat uint8
@@ -248,8 +419,8 @@ type parser struct {
 	// error types.
 	r io.Reader
 
-	// The header of a gRPC message. Find more detail
-	// at https://grpc.io/docs/guides/wire.html.
+	// The header of a gRPC message. Find more detail at
+	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
 	header [5]byte
 }
 
@@ -277,8 +448,11 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
 	if length == 0 {
 		return pf, nil, nil
 	}
-	if length > uint32(maxReceiveMessageSize) {
-		return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
+	if int64(length) > int64(maxInt) {
+		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
+	}
+	if int(length) > maxReceiveMessageSize {
+		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
 	}
 	// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
 	// of making it for each message:
@@ -294,18 +468,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
 
 // encode serializes msg and returns a buffer of message header and a buffer of msg.
 // If msg is nil, it generates the message header and an empty msg buffer.
-func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, []byte, error) {
-	var b []byte
+// TODO(ddyihai): eliminate extra Compressor parameter.
+func encode(c baseCodec, msg interface{}, cp Compressor, outPayload *stats.OutPayload, compressor encoding.Compressor) ([]byte, []byte, error) {
+	var (
+		b    []byte
+		cbuf *bytes.Buffer
+	)
 	const (
 		payloadLen = 1
 		sizeLen    = 4
 	)
-
 	if msg != nil {
 		var err error
 		b, err = c.Marshal(msg)
 		if err != nil {
-			return nil, nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
+			return nil, nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
 		}
 		if outPayload != nil {
 			outPayload.Payload = msg
@@ -313,24 +490,35 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl
 			outPayload.Data = b
 			outPayload.Length = len(b)
 		}
-		if cp != nil {
-			if err := cp.Do(cbuf, b); err != nil {
-				return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
+		if compressor != nil || cp != nil {
+			cbuf = new(bytes.Buffer)
+			// Has compressor, check Compressor is set by UseCompressor first.
+			if compressor != nil {
+				z, _ := compressor.Compress(cbuf)
+				if _, err := z.Write(b); err != nil {
+					return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
+				}
+				z.Close()
+			} else {
+				// If Compressor is not set by UseCompressor, use default Compressor
+				if err := cp.Do(cbuf, b); err != nil {
+					return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
+				}
 			}
 			b = cbuf.Bytes()
 		}
 	}
-
 	if uint(len(b)) > math.MaxUint32 {
-		return nil, nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
+		return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
 	}
 
 	bufHeader := make([]byte, payloadLen+sizeLen)
-	if cp == nil {
-		bufHeader[0] = byte(compressionNone)
-	} else {
+	if compressor != nil || cp != nil {
 		bufHeader[0] = byte(compressionMade)
+	} else {
+		bufHeader[0] = byte(compressionNone)
 	}
+
 	// Write length of b into buf
 	binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b)))
 	if outPayload != nil {
@@ -339,20 +527,26 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl
 	return bufHeader, b, nil
 }
 
-func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
+func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
 	switch pf {
 	case compressionNone:
 	case compressionMade:
-		if dc == nil || recvCompress != dc.Type() {
-			return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+		if recvCompress == "" || recvCompress == encoding.Identity {
+			return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
+		}
+		if !haveCompressor {
+			return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
 		}
 	default:
-		return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf)
+		return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
 	}
 	return nil
 }
 
-func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error {
+// For the two compressor parameters, both should not be set, but if they are,
+// dc takes precedence over compressor.
+// TODO(dfawley): wrap the old compressor/decompressor using the new API?
+func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
 	pf, d, err := p.recvMsg(maxReceiveMessageSize)
 	if err != nil {
 		return err
@@ -360,22 +554,37 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
 	if inPayload != nil {
 		inPayload.WireLength = len(d)
 	}
-	if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
-		return err
+
+	if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
+		return st.Err()
 	}
+
 	if pf == compressionMade {
-		d, err = dc.Do(bytes.NewReader(d))
-		if err != nil {
-			return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+		// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
+		// use this decompressor as the default.
+		if dc != nil {
+			d, err = dc.Do(bytes.NewReader(d))
+			if err != nil {
+				return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+			}
+		} else {
+			dcReader, err := compressor.Decompress(bytes.NewReader(d))
+			if err != nil {
+				return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+			}
+			d, err = ioutil.ReadAll(dcReader)
+			if err != nil {
+				return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+			}
 		}
 	}
 	if len(d) > maxReceiveMessageSize {
 		// TODO: Revisit the error code. Currently keep it consistent with java
 		// implementation.
-		return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
+		return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
 	}
 	if err := c.Unmarshal(d, m); err != nil {
-		return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
+		return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
 	}
 	if inPayload != nil {
 		inPayload.RecvTime = time.Now()
@@ -388,9 +597,7 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
 }
 
 type rpcInfo struct {
-	failfast      bool
-	bytesSent     bool
-	bytesReceived bool
+	failfast bool
 }
 
 type rpcInfoContextKey struct{}
@@ -404,69 +611,10 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
 	return
 }
 
-func updateRPCInfoInContext(ctx context.Context, s rpcInfo) {
-	if ss, ok := rpcInfoFromContext(ctx); ok {
-		ss.bytesReceived = s.bytesReceived
-		ss.bytesSent = s.bytesSent
-	}
-	return
-}
-
-// toRPCErr converts an error into an error from the status package.
-func toRPCErr(err error) error {
-	if _, ok := status.FromError(err); ok {
-		return err
-	}
-	switch e := err.(type) {
-	case transport.StreamError:
-		return status.Error(e.Code, e.Desc)
-	case transport.ConnectionError:
-		return status.Error(codes.Unavailable, e.Desc)
-	default:
-		switch err {
-		case context.DeadlineExceeded, stdctx.DeadlineExceeded:
-			return status.Error(codes.DeadlineExceeded, err.Error())
-		case context.Canceled, stdctx.Canceled:
-			return status.Error(codes.Canceled, err.Error())
-		case ErrClientConnClosing:
-			return status.Error(codes.FailedPrecondition, err.Error())
-		}
-	}
-	return status.Error(codes.Unknown, err.Error())
-}
-
-// convertCode converts a standard Go error into its canonical code. Note that
-// this is only used to translate the error returned by the server applications.
-func convertCode(err error) codes.Code {
-	switch err {
-	case nil:
-		return codes.OK
-	case io.EOF:
-		return codes.OutOfRange
-	case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
-		return codes.FailedPrecondition
-	case os.ErrInvalid:
-		return codes.InvalidArgument
-	case context.Canceled, stdctx.Canceled:
-		return codes.Canceled
-	case context.DeadlineExceeded, stdctx.DeadlineExceeded:
-		return codes.DeadlineExceeded
-	}
-	switch {
-	case os.IsExist(err):
-		return codes.AlreadyExists
-	case os.IsNotExist(err):
-		return codes.NotFound
-	case os.IsPermission(err):
-		return codes.PermissionDenied
-	}
-	return codes.Unknown
-}
-
 // Code returns the error code for err if it was produced by the rpc system.
 // Otherwise, it returns codes.Unknown.
 //
-// Deprecated; use status.FromError and Code method instead.
+// Deprecated: use status.FromError and Code method instead.
 func Code(err error) codes.Code {
 	if s, ok := status.FromError(err); ok {
 		return s.Code()
@@ -477,7 +625,7 @@ func Code(err error) codes.Code {
 // ErrorDesc returns the error description of err if it was produced by the rpc system.
 // Otherwise, it returns err.Error() or empty string when err is nil.
 //
-// Deprecated; use status.FromError and Message method instead.
+// Deprecated: use status.FromError and Message method instead.
 func ErrorDesc(err error) string {
 	if s, ok := status.FromError(err); ok {
 		return s.Message()
@@ -488,85 +636,47 @@ func ErrorDesc(err error) string {
 // Errorf returns an error containing an error code and a description;
 // Errorf returns nil if c is OK.
 //
-// Deprecated; use status.Errorf instead.
+// Deprecated: use status.Errorf instead.
 func Errorf(c codes.Code, format string, a ...interface{}) error {
 	return status.Errorf(c, format, a...)
 }
 
-// MethodConfig defines the configuration recommended by the service providers for a
-// particular method.
-// This is EXPERIMENTAL and subject to change.
-type MethodConfig struct {
-	// WaitForReady indicates whether RPCs sent to this method should wait until
-	// the connection is ready by default (!failfast). The value specified via the
-	// gRPC client API will override the value set here.
-	WaitForReady *bool
-	// Timeout is the default timeout for RPCs sent to this method. The actual
-	// deadline used will be the minimum of the value specified here and the value
-	// set by the application via the gRPC client API.  If either one is not set,
-	// then the other will be used.  If neither is set, then the RPC has no deadline.
-	Timeout *time.Duration
-	// MaxReqSize is the maximum allowed payload size for an individual request in a
-	// stream (client->server) in bytes. The size which is measured is the serialized
-	// payload after per-message compression (but before stream compression) in bytes.
-	// The actual value used is the minimum of the value specified here and the value set
-	// by the application via the gRPC client API. If either one is not set, then the other
-	// will be used.  If neither is set, then the built-in default is used.
-	MaxReqSize *int
-	// MaxRespSize is the maximum allowed payload size for an individual response in a
-	// stream (server->client) in bytes.
-	MaxRespSize *int
-}
-
-// ServiceConfig is provided by the service provider and contains parameters for how
-// clients that connect to the service should behave.
-// This is EXPERIMENTAL and subject to change.
-type ServiceConfig struct {
-	// LB is the load balancer the service providers recommends. The balancer specified
-	// via grpc.WithBalancer will override this.
-	LB Balancer
-	// Methods contains a map for the methods in this service.
-	// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
-	// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
-	// Otherwise, the method has no MethodConfig to use.
-	Methods map[string]MethodConfig
-}
-
-func min(a, b *int) *int {
-	if *a < *b {
-		return a
+// setCallInfoCodec should only be called after CallOptions have been applied.
+func setCallInfoCodec(c *callInfo) error {
+	if c.codec != nil {
+		// codec was already set by a CallOption; use it.
+		return nil
 	}
-	return b
-}
 
-func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
-	if mcMax == nil && doptMax == nil {
-		return &defaultVal
-	}
-	if mcMax != nil && doptMax != nil {
-		return min(mcMax, doptMax)
+	if c.contentSubtype == "" {
+		// No codec specified in CallOptions; use proto by default.
+		c.codec = encoding.GetCodec(proto.Name)
+		return nil
 	}
-	if mcMax != nil {
-		return mcMax
+
+	// c.contentSubtype is already lowercased in CallContentSubtype
+	c.codec = encoding.GetCodec(c.contentSubtype)
+	if c.codec == nil {
+		return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
 	}
-	return doptMax
+	return nil
 }
 
-// SupportPackageIsVersion3 is referenced from generated protocol buffer files.
-// The latest support package version is 4.
-// SupportPackageIsVersion3 is kept for compatibility. It will be removed in the
-// next support package version update.
-const SupportPackageIsVersion3 = true
-
-// SupportPackageIsVersion4 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the grpc package.
+// The SupportPackageIsVersion variables are referenced from generated protocol
+// buffer files to ensure compatibility with the gRPC version used.  The latest
+// support package version is 5.
 //
-// This constant may be renamed in the future if a change in the generated code
-// requires a synchronised update of grpc-go and protoc-gen-go. This constant
-// should not be referenced from any other code.
-const SupportPackageIsVersion4 = true
+// Older versions are kept for compatibility. They may be removed if
+// compatibility cannot be maintained.
+//
+// These constants should not be referenced from any other code.
+const (
+	SupportPackageIsVersion3 = true
+	SupportPackageIsVersion4 = true
+	SupportPackageIsVersion5 = true
+)
 
 // Version is the current grpc version.
-const Version = "1.7.5"
+const Version = "1.11.1"
 
 const grpcUA = "grpc-go/" + Version

+ 277 - 110
vendor/google.golang.org/grpc/server.go

@@ -32,11 +32,15 @@ import (
 	"sync"
 	"time"
 
+	"io/ioutil"
+
 	"golang.org/x/net/context"
 	"golang.org/x/net/http2"
 	"golang.org/x/net/trace"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/encoding"
+	"google.golang.org/grpc/encoding/proto"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/keepalive"
@@ -89,18 +93,20 @@ type Server struct {
 	conns  map[io.Closer]bool
 	serve  bool
 	drain  bool
-	ctx    context.Context
-	cancel context.CancelFunc
-	// A CondVar to let GracefulStop() blocks until all the pending RPCs are finished
-	// and all the transport goes away.
-	cv     *sync.Cond
+	cv     *sync.Cond          // signaled when connections close for GracefulStop
 	m      map[string]*service // service name -> service info
 	events trace.EventLog
+
+	quit     chan struct{}
+	done     chan struct{}
+	quitOnce sync.Once
+	doneOnce sync.Once
+	serveWG  sync.WaitGroup // counts active Serve goroutines for GracefulStop
 }
 
 type options struct {
 	creds                 credentials.TransportCredentials
-	codec                 Codec
+	codec                 baseCodec
 	cp                    Compressor
 	dc                    Decompressor
 	unaryInt              UnaryServerInterceptor
@@ -177,20 +183,32 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
 }
 
 // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
+//
+// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
 func CustomCodec(codec Codec) ServerOption {
 	return func(o *options) {
 		o.codec = codec
 	}
 }
 
-// RPCCompressor returns a ServerOption that sets a compressor for outbound messages.
+// RPCCompressor returns a ServerOption that sets a compressor for outbound
+// messages.  For backward compatibility, all outbound messages will be sent
+// using this compressor, regardless of incoming message compression.  By
+// default, server messages will be sent using the same compressor with which
+// request messages were sent.
+//
+// Deprecated: use encoding.RegisterCompressor instead.
 func RPCCompressor(cp Compressor) ServerOption {
 	return func(o *options) {
 		o.cp = cp
 	}
 }
 
-// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages.
+// RPCDecompressor returns a ServerOption that sets a decompressor for inbound
+// messages.  It has higher priority than decompressors registered via
+// encoding.RegisterCompressor.
+//
+// Deprecated: use encoding.RegisterCompressor instead.
 func RPCDecompressor(dc Decompressor) ServerOption {
 	return func(o *options) {
 		o.dc = dc
@@ -297,6 +315,8 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
 // connection establishment (up to and including HTTP/2 handshaking) for all
 // new connections.  If this is not set, the default is 120 seconds.  A zero or
 // negative value will result in an immediate timeout.
+//
+// This API is EXPERIMENTAL.
 func ConnectionTimeout(d time.Duration) ServerOption {
 	return func(o *options) {
 		o.connectionTimeout = d
@@ -310,18 +330,15 @@ func NewServer(opt ...ServerOption) *Server {
 	for _, o := range opt {
 		o(&opts)
 	}
-	if opts.codec == nil {
-		// Set the default codec.
-		opts.codec = protoCodec{}
-	}
 	s := &Server{
 		lis:   make(map[net.Listener]bool),
 		opts:  opts,
 		conns: make(map[io.Closer]bool),
 		m:     make(map[string]*service),
+		quit:  make(chan struct{}),
+		done:  make(chan struct{}),
 	}
 	s.cv = sync.NewCond(&s.mu)
-	s.ctx, s.cancel = context.WithCancel(context.Background())
 	if EnableTracing {
 		_, file, line, _ := runtime.Caller(1)
 		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
@@ -430,11 +447,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo {
 	return ret
 }
 
-var (
-	// ErrServerStopped indicates that the operation is now illegal because of
-	// the server being stopped.
-	ErrServerStopped = errors.New("grpc: the server has been stopped")
-)
+// ErrServerStopped indicates that the operation is now illegal because of
+// the server being stopped.
+var ErrServerStopped = errors.New("grpc: the server has been stopped")
 
 func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
 	if s.opts.creds == nil {
@@ -448,16 +463,29 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
 // read gRPC requests and then call the registered handlers to reply to them.
 // Serve returns when lis.Accept fails with fatal errors.  lis will be closed when
 // this method returns.
-// Serve always returns non-nil error.
+// Serve will return a non-nil error unless Stop or GracefulStop is called.
 func (s *Server) Serve(lis net.Listener) error {
 	s.mu.Lock()
 	s.printf("serving")
 	s.serve = true
 	if s.lis == nil {
+		// Serve called after Stop or GracefulStop.
 		s.mu.Unlock()
 		lis.Close()
 		return ErrServerStopped
 	}
+
+	s.serveWG.Add(1)
+	defer func() {
+		s.serveWG.Done()
+		select {
+		// Stop or GracefulStop called; block until done and return nil.
+		case <-s.quit:
+			<-s.done
+		default:
+		}
+	}()
+
 	s.lis[lis] = true
 	s.mu.Unlock()
 	defer func() {
@@ -491,25 +519,39 @@ func (s *Server) Serve(lis net.Listener) error {
 				timer := time.NewTimer(tempDelay)
 				select {
 				case <-timer.C:
-				case <-s.ctx.Done():
+				case <-s.quit:
+					timer.Stop()
+					return nil
 				}
-				timer.Stop()
 				continue
 			}
 			s.mu.Lock()
 			s.printf("done serving; Accept = %v", err)
 			s.mu.Unlock()
+
+			select {
+			case <-s.quit:
+				return nil
+			default:
+			}
 			return err
 		}
 		tempDelay = 0
-		// Start a new goroutine to deal with rawConn
-		// so we don't stall this Accept loop goroutine.
-		go s.handleRawConn(rawConn)
+		// Start a new goroutine to deal with rawConn so we don't stall this Accept
+		// loop goroutine.
+		//
+		// Make sure we account for the goroutine so GracefulStop doesn't nil out
+		// s.conns before this conn can be added.
+		s.serveWG.Add(1)
+		go func() {
+			s.handleRawConn(rawConn)
+			s.serveWG.Done()
+		}()
 	}
 }
 
-// handleRawConn is run in its own goroutine and handles a just-accepted
-// connection that has not had any I/O performed on it yet.
+// handleRawConn forks a goroutine to handle a just-accepted connection that
+// has not had any I/O performed on it yet.
 func (s *Server) handleRawConn(rawConn net.Conn) {
 	rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
 	conn, authInfo, err := s.useTransportAuthenticator(rawConn)
@@ -534,17 +576,28 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
 	}
 	s.mu.Unlock()
 
+	var serve func()
+	c := conn.(io.Closer)
 	if s.opts.useHandlerImpl {
-		rawConn.SetDeadline(time.Time{})
-		s.serveUsingHandler(conn)
+		serve = func() { s.serveUsingHandler(conn) }
 	} else {
+		// Finish handshaking (HTTP2)
 		st := s.newHTTP2Transport(conn, authInfo)
 		if st == nil {
 			return
 		}
-		rawConn.SetDeadline(time.Time{})
-		s.serveStreams(st)
+		c = st
+		serve = func() { s.serveStreams(st) }
 	}
+
+	rawConn.SetDeadline(time.Time{})
+	if !s.addConn(c) {
+		return
+	}
+	go func() {
+		serve()
+		s.removeConn(c)
+	}()
 }
 
 // newHTTP2Transport sets up a http/2 transport (using the
@@ -571,15 +624,10 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
 		grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
 		return nil
 	}
-	if !s.addConn(st) {
-		st.Close()
-		return nil
-	}
 	return st
 }
 
 func (s *Server) serveStreams(st transport.ServerTransport) {
-	defer s.removeConn(st)
 	defer st.Close()
 	var wg sync.WaitGroup
 	st.HandleStreams(func(stream *transport.Stream) {
@@ -613,11 +661,6 @@ var _ http.Handler = (*Server)(nil)
 //
 // conn is the *tls.Conn that's already been authenticated.
 func (s *Server) serveUsingHandler(conn net.Conn) {
-	if !s.addConn(conn) {
-		conn.Close()
-		return
-	}
-	defer s.removeConn(conn)
 	h2s := &http2.Server{
 		MaxConcurrentStreams: s.opts.maxConcurrentStreams,
 	}
@@ -651,13 +694,12 @@ func (s *Server) serveUsingHandler(conn net.Conn) {
 // available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
 // and subject to change.
 func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	st, err := transport.NewServerHandlerTransport(w, r)
+	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
 	if err != nil {
 		http.Error(w, err.Error(), http.StatusInternalServerError)
 		return
 	}
 	if !s.addConn(st) {
-		st.Close()
 		return
 	}
 	defer s.removeConn(st)
@@ -687,9 +729,15 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea
 func (s *Server) addConn(c io.Closer) bool {
 	s.mu.Lock()
 	defer s.mu.Unlock()
-	if s.conns == nil || s.drain {
+	if s.conns == nil {
+		c.Close()
 		return false
 	}
+	if s.drain {
+		// Transport added after we drained our existing conns: drain it
+		// immediately.
+		c.(transport.ServerTransport).Drain()
+	}
 	s.conns[c] = true
 	return true
 }
@@ -703,18 +751,14 @@ func (s *Server) removeConn(c io.Closer) {
 	}
 }
 
-func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
+func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
 	var (
-		cbuf       *bytes.Buffer
 		outPayload *stats.OutPayload
 	)
-	if cp != nil {
-		cbuf = new(bytes.Buffer)
-	}
 	if s.opts.statsHandler != nil {
 		outPayload = &stats.OutPayload{}
 	}
-	hdr, data, err := encode(s.opts.codec, msg, cp, cbuf, outPayload)
+	hdr, data, err := encode(s.getCodec(stream.ContentSubtype()), msg, cp, outPayload, comp)
 	if err != nil {
 		grpclog.Errorln("grpc: server failed to encode response: ", err)
 		return err
@@ -733,13 +777,15 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
 func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
 	sh := s.opts.statsHandler
 	if sh != nil {
+		beginTime := time.Now()
 		begin := &stats.Begin{
-			BeginTime: time.Now(),
+			BeginTime: beginTime,
 		}
 		sh.HandleRPC(stream.Context(), begin)
 		defer func() {
 			end := &stats.End{
-				EndTime: time.Now(),
+				BeginTime: beginTime,
+				EndTime:   time.Now(),
 			}
 			if err != nil && err != io.EOF {
 				end.Error = toRPCErr(err)
@@ -758,10 +804,43 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 			}
 		}()
 	}
+
+	// comp and cp are used for compression.  decomp and dc are used for
+	// decompression.  If comp and decomp are both set, they are the same;
+	// however they are kept separate to ensure that at most one of the
+	// compressor/decompressor variable pairs are set for use later.
+	var comp, decomp encoding.Compressor
+	var cp Compressor
+	var dc Decompressor
+
+	// If dc is set and matches the stream's compression, use it.  Otherwise, try
+	// to find a matching registered compressor for decomp.
+	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
+		dc = s.opts.dc
+	} else if rc != "" && rc != encoding.Identity {
+		decomp = encoding.GetCompressor(rc)
+		if decomp == nil {
+			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
+			t.WriteStatus(stream, st)
+			return st.Err()
+		}
+	}
+
+	// If cp is set, use it.  Otherwise, attempt to compress the response using
+	// the incoming message compression method.
+	//
+	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
 	if s.opts.cp != nil {
-		// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
-		stream.SetSendCompress(s.opts.cp.Type())
+		cp = s.opts.cp
+		stream.SetSendCompress(cp.Type())
+	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
+		// Legacy compressor not specified; attempt to respond with same encoding.
+		comp = encoding.GetCompressor(rc)
+		if comp != nil {
+			stream.SetSendCompress(rc)
+		}
 	}
+
 	p := &parser{r: stream}
 	pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize)
 	if err == io.EOF {
@@ -769,7 +848,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 		return err
 	}
 	if err == io.ErrUnexpectedEOF {
-		err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+		err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
 	}
 	if err != nil {
 		if st, ok := status.FromError(err); ok {
@@ -790,19 +869,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 		}
 		return err
 	}
-
-	if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
-		if st, ok := status.FromError(err); ok {
-			if e := t.WriteStatus(stream, st); e != nil {
-				grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
-			}
-			return err
-		}
-		if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil {
+	if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
+		if e := t.WriteStatus(stream, st); e != nil {
 			grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
 		}
-
-		// TODO checkRecvPayload always return RPC error. Add a return here if necessary.
+		return st.Err()
 	}
 	var inPayload *stats.InPayload
 	if sh != nil {
@@ -816,9 +887,17 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 		}
 		if pf == compressionMade {
 			var err error
-			req, err = s.opts.dc.Do(bytes.NewReader(req))
-			if err != nil {
-				return Errorf(codes.Internal, err.Error())
+			if dc != nil {
+				req, err = dc.Do(bytes.NewReader(req))
+				if err != nil {
+					return status.Errorf(codes.Internal, err.Error())
+				}
+			} else {
+				tmp, _ := decomp.Decompress(bytes.NewReader(req))
+				req, err = ioutil.ReadAll(tmp)
+				if err != nil {
+					return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+				}
 			}
 		}
 		if len(req) > s.opts.maxReceiveMessageSize {
@@ -826,7 +905,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 			// java implementation.
 			return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
 		}
-		if err := s.opts.codec.Unmarshal(req, v); err != nil {
+		if err := s.getCodec(stream.ContentSubtype()).Unmarshal(req, v); err != nil {
 			return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
 		}
 		if inPayload != nil {
@@ -840,12 +919,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 		}
 		return nil
 	}
-	reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
+	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
+	reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt)
 	if appErr != nil {
 		appStatus, ok := status.FromError(appErr)
 		if !ok {
 			// Convert appErr if it is not a grpc status error.
-			appErr = status.Error(convertCode(appErr), appErr.Error())
+			appErr = status.Error(codes.Unknown, appErr.Error())
 			appStatus, _ = status.FromError(appErr)
 		}
 		if trInfo != nil {
@@ -864,7 +944,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 		Last:  true,
 		Delay: false,
 	}
-	if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
+
+	if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
 		if err == io.EOF {
 			// The entire stream is done (for unary RPC only).
 			return err
@@ -899,13 +980,15 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
 	sh := s.opts.statsHandler
 	if sh != nil {
+		beginTime := time.Now()
 		begin := &stats.Begin{
-			BeginTime: time.Now(),
+			BeginTime: beginTime,
 		}
 		sh.HandleRPC(stream.Context(), begin)
 		defer func() {
 			end := &stats.End{
-				EndTime: time.Now(),
+				BeginTime: beginTime,
+				EndTime:   time.Now(),
 			}
 			if err != nil && err != io.EOF {
 				end.Error = toRPCErr(err)
@@ -913,21 +996,47 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
 			sh.HandleRPC(stream.Context(), end)
 		}()
 	}
-	if s.opts.cp != nil {
-		stream.SetSendCompress(s.opts.cp.Type())
-	}
+	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
 	ss := &serverStream{
+		ctx:   ctx,
 		t:     t,
 		s:     stream,
 		p:     &parser{r: stream},
-		codec: s.opts.codec,
-		cp:    s.opts.cp,
-		dc:    s.opts.dc,
+		codec: s.getCodec(stream.ContentSubtype()),
 		maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
 		maxSendMessageSize:    s.opts.maxSendMessageSize,
 		trInfo:                trInfo,
 		statsHandler:          sh,
 	}
+
+	// If dc is set and matches the stream's compression, use it.  Otherwise, try
+	// to find a matching registered compressor for decomp.
+	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
+		ss.dc = s.opts.dc
+	} else if rc != "" && rc != encoding.Identity {
+		ss.decomp = encoding.GetCompressor(rc)
+		if ss.decomp == nil {
+			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
+			t.WriteStatus(ss.s, st)
+			return st.Err()
+		}
+	}
+
+	// If cp is set, use it.  Otherwise, attempt to compress the response using
+	// the incoming message compression method.
+	//
+	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
+	if s.opts.cp != nil {
+		ss.cp = s.opts.cp
+		stream.SetSendCompress(s.opts.cp.Type())
+	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
+		// Legacy compressor not specified; attempt to respond with same encoding.
+		ss.comp = encoding.GetCompressor(rc)
+		if ss.comp != nil {
+			stream.SetSendCompress(rc)
+		}
+	}
+
 	if trInfo != nil {
 		trInfo.tr.LazyLog(&trInfo.firstLine, false)
 		defer func() {
@@ -963,7 +1072,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
 			case transport.StreamError:
 				appStatus = status.New(err.Code, err.Desc)
 			default:
-				appStatus = status.New(convertCode(appErr), appErr.Error())
+				appStatus = status.New(codes.Unknown, appErr.Error())
 			}
 			appErr = appStatus.Err()
 		}
@@ -983,7 +1092,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
 		ss.mu.Unlock()
 	}
 	return t.WriteStatus(ss.s, status.New(codes.OK, ""))
-
 }
 
 func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
@@ -1065,12 +1173,57 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
 	}
 }
 
+// The key to save ServerTransportStream in the context.
+type streamKey struct{}
+
+// NewContextWithServerTransportStream creates a new context from ctx and
+// attaches stream to it.
+//
+// This API is EXPERIMENTAL.
+func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
+	return context.WithValue(ctx, streamKey{}, stream)
+}
+
+// ServerTransportStream is a minimal interface that a transport stream must
+// implement. This can be used to mock an actual transport stream for tests of
+// handler code that use, for example, grpc.SetHeader (which requires some
+// stream to be in context).
+//
+// See also NewContextWithServerTransportStream.
+//
+// This API is EXPERIMENTAL.
+type ServerTransportStream interface {
+	Method() string
+	SetHeader(md metadata.MD) error
+	SendHeader(md metadata.MD) error
+	SetTrailer(md metadata.MD) error
+}
+
+// serverStreamFromContext returns the server stream saved in ctx. Returns
+// nil if the given context has no stream associated with it (which implies
+// it is not an RPC invocation context).
+func serverTransportStreamFromContext(ctx context.Context) ServerTransportStream {
+	s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
+	return s
+}
+
 // Stop stops the gRPC server. It immediately closes all open
 // connections and listeners.
 // It cancels all active RPCs on the server side and the corresponding
 // pending RPCs on the client side will get notified by connection
 // errors.
 func (s *Server) Stop() {
+	s.quitOnce.Do(func() {
+		close(s.quit)
+	})
+
+	defer func() {
+		s.serveWG.Wait()
+		s.doneOnce.Do(func() {
+			close(s.done)
+		})
+	}()
+
 	s.mu.Lock()
 	listeners := s.lis
 	s.lis = nil
@@ -1088,7 +1241,6 @@ func (s *Server) Stop() {
 	}
 
 	s.mu.Lock()
-	s.cancel()
 	if s.events != nil {
 		s.events.Finish()
 		s.events = nil
@@ -1100,22 +1252,38 @@ func (s *Server) Stop() {
 // accepting new connections and RPCs and blocks until all the pending RPCs are
 // finished.
 func (s *Server) GracefulStop() {
+	s.quitOnce.Do(func() {
+		close(s.quit)
+	})
+
+	defer func() {
+		s.doneOnce.Do(func() {
+			close(s.done)
+		})
+	}()
+
 	s.mu.Lock()
-	defer s.mu.Unlock()
 	if s.conns == nil {
+		s.mu.Unlock()
 		return
 	}
 	for lis := range s.lis {
 		lis.Close()
 	}
 	s.lis = nil
-	s.cancel()
 	if !s.drain {
 		for c := range s.conns {
 			c.(transport.ServerTransport).Drain()
 		}
 		s.drain = true
 	}
+
+	// Wait for serving threads to be ready to exit.  Only then can we be sure no
+	// new conns will be created.
+	s.mu.Unlock()
+	s.serveWG.Wait()
+	s.mu.Lock()
+
 	for len(s.conns) != 0 {
 		s.cv.Wait()
 	}
@@ -1124,26 +1292,29 @@ func (s *Server) GracefulStop() {
 		s.events.Finish()
 		s.events = nil
 	}
+	s.mu.Unlock()
 }
 
 func init() {
-	internal.TestingCloseConns = func(arg interface{}) {
-		arg.(*Server).testingCloseConns()
-	}
 	internal.TestingUseHandlerImpl = func(arg interface{}) {
 		arg.(*Server).opts.useHandlerImpl = true
 	}
 }
 
-// testingCloseConns closes all existing transports but keeps s.lis
-// accepting new connections.
-func (s *Server) testingCloseConns() {
-	s.mu.Lock()
-	for c := range s.conns {
-		c.Close()
-		delete(s.conns, c)
+// contentSubtype must be lowercase
+// cannot return nil
+func (s *Server) getCodec(contentSubtype string) baseCodec {
+	if s.opts.codec != nil {
+		return s.opts.codec
 	}
-	s.mu.Unlock()
+	if contentSubtype == "" {
+		return encoding.GetCodec(proto.Name)
+	}
+	codec := encoding.GetCodec(contentSubtype)
+	if codec == nil {
+		return encoding.GetCodec(proto.Name)
+	}
+	return codec
 }
 
 // SetHeader sets the header metadata.
@@ -1156,9 +1327,9 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
 	if md.Len() == 0 {
 		return nil
 	}
-	stream, ok := transport.StreamFromContext(ctx)
-	if !ok {
-		return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+	stream := serverTransportStreamFromContext(ctx)
+	if stream == nil {
+		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
 	}
 	return stream.SetHeader(md)
 }
@@ -1166,15 +1337,11 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
 // SendHeader sends header metadata. It may be called at most once.
 // The provided md and headers set by SetHeader() will be sent.
 func SendHeader(ctx context.Context, md metadata.MD) error {
-	stream, ok := transport.StreamFromContext(ctx)
-	if !ok {
-		return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
-	}
-	t := stream.ServerTransport()
-	if t == nil {
-		grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream)
+	stream := serverTransportStreamFromContext(ctx)
+	if stream == nil {
+		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
 	}
-	if err := t.WriteHeader(stream, md); err != nil {
+	if err := stream.SendHeader(md); err != nil {
 		return toRPCErr(err)
 	}
 	return nil
@@ -1186,9 +1353,9 @@ func SetTrailer(ctx context.Context, md metadata.MD) error {
 	if md.Len() == 0 {
 		return nil
 	}
-	stream, ok := transport.StreamFromContext(ctx)
-	if !ok {
-		return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+	stream := serverTransportStreamFromContext(ctx)
+	if stream == nil {
+		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
 	}
 	return stream.SetTrailer(md)
 }

+ 226 - 0
vendor/google.golang.org/grpc/service_config.go

@@ -0,0 +1,226 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"encoding/json"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+const maxInt = int(^uint(0) >> 1)
+
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+// DEPRECATED: Users should not use this struct. Service config should be received
+// through name resolver, as specified here
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+type MethodConfig struct {
+	// WaitForReady indicates whether RPCs sent to this method should wait until
+	// the connection is ready by default (!failfast). The value specified via the
+	// gRPC client API will override the value set here.
+	WaitForReady *bool
+	// Timeout is the default timeout for RPCs sent to this method. The actual
+	// deadline used will be the minimum of the value specified here and the value
+	// set by the application via the gRPC client API.  If either one is not set,
+	// then the other will be used.  If neither is set, then the RPC has no deadline.
+	Timeout *time.Duration
+	// MaxReqSize is the maximum allowed payload size for an individual request in a
+	// stream (client->server) in bytes. The size which is measured is the serialized
+	// payload after per-message compression (but before stream compression) in bytes.
+	// The actual value used is the minimum of the value specified here and the value set
+	// by the application via the gRPC client API. If either one is not set, then the other
+	// will be used.  If neither is set, then the built-in default is used.
+	MaxReqSize *int
+	// MaxRespSize is the maximum allowed payload size for an individual response in a
+	// stream (server->client) in bytes.
+	MaxRespSize *int
+}
+
+// ServiceConfig is provided by the service provider and contains parameters for how
+// clients that connect to the service should behave.
+// DEPRECATED: Users should not use this struct. Service config should be received
+// through name resolver, as specified here
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+type ServiceConfig struct {
+	// LB is the load balancer the service providers recommends. The balancer specified
+	// via grpc.WithBalancer will override this.
+	LB *string
+	// Methods contains a map for the methods in this service.
+	// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
+	// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
+	// Otherwise, the method has no MethodConfig to use.
+	Methods map[string]MethodConfig
+}
+
+func parseDuration(s *string) (*time.Duration, error) {
+	if s == nil {
+		return nil, nil
+	}
+	if !strings.HasSuffix(*s, "s") {
+		return nil, fmt.Errorf("malformed duration %q", *s)
+	}
+	ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
+	if len(ss) > 2 {
+		return nil, fmt.Errorf("malformed duration %q", *s)
+	}
+	// hasDigits is set if either the whole or fractional part of the number is
+	// present, since both are optional but one is required.
+	hasDigits := false
+	var d time.Duration
+	if len(ss[0]) > 0 {
+		i, err := strconv.ParseInt(ss[0], 10, 32)
+		if err != nil {
+			return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
+		}
+		d = time.Duration(i) * time.Second
+		hasDigits = true
+	}
+	if len(ss) == 2 && len(ss[1]) > 0 {
+		if len(ss[1]) > 9 {
+			return nil, fmt.Errorf("malformed duration %q", *s)
+		}
+		f, err := strconv.ParseInt(ss[1], 10, 64)
+		if err != nil {
+			return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
+		}
+		for i := 9; i > len(ss[1]); i-- {
+			f *= 10
+		}
+		d += time.Duration(f)
+		hasDigits = true
+	}
+	if !hasDigits {
+		return nil, fmt.Errorf("malformed duration %q", *s)
+	}
+
+	return &d, nil
+}
+
+type jsonName struct {
+	Service *string
+	Method  *string
+}
+
+func (j jsonName) generatePath() (string, bool) {
+	if j.Service == nil {
+		return "", false
+	}
+	res := "/" + *j.Service + "/"
+	if j.Method != nil {
+		res += *j.Method
+	}
+	return res, true
+}
+
+// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
+type jsonMC struct {
+	Name                    *[]jsonName
+	WaitForReady            *bool
+	Timeout                 *string
+	MaxRequestMessageBytes  *int64
+	MaxResponseMessageBytes *int64
+}
+
+// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
+type jsonSC struct {
+	LoadBalancingPolicy *string
+	MethodConfig        *[]jsonMC
+}
+
+func parseServiceConfig(js string) (ServiceConfig, error) {
+	var rsc jsonSC
+	err := json.Unmarshal([]byte(js), &rsc)
+	if err != nil {
+		grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+		return ServiceConfig{}, err
+	}
+	sc := ServiceConfig{
+		LB:      rsc.LoadBalancingPolicy,
+		Methods: make(map[string]MethodConfig),
+	}
+	if rsc.MethodConfig == nil {
+		return sc, nil
+	}
+
+	for _, m := range *rsc.MethodConfig {
+		if m.Name == nil {
+			continue
+		}
+		d, err := parseDuration(m.Timeout)
+		if err != nil {
+			grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+			return ServiceConfig{}, err
+		}
+
+		mc := MethodConfig{
+			WaitForReady: m.WaitForReady,
+			Timeout:      d,
+		}
+		if m.MaxRequestMessageBytes != nil {
+			if *m.MaxRequestMessageBytes > int64(maxInt) {
+				mc.MaxReqSize = newInt(maxInt)
+			} else {
+				mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes))
+			}
+		}
+		if m.MaxResponseMessageBytes != nil {
+			if *m.MaxResponseMessageBytes > int64(maxInt) {
+				mc.MaxRespSize = newInt(maxInt)
+			} else {
+				mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
+			}
+		}
+		for _, n := range *m.Name {
+			if path, valid := n.generatePath(); valid {
+				sc.Methods[path] = mc
+			}
+		}
+	}
+
+	return sc, nil
+}
+
+func min(a, b *int) *int {
+	if *a < *b {
+		return a
+	}
+	return b
+}
+
+func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
+	if mcMax == nil && doptMax == nil {
+		return &defaultVal
+	}
+	if mcMax != nil && doptMax != nil {
+		return min(mcMax, doptMax)
+	}
+	if mcMax != nil {
+		return mcMax
+	}
+	return doptMax
+}
+
+func newInt(b int) *int {
+	return &b
+}

+ 2 - 0
vendor/google.golang.org/grpc/stats/stats.go

@@ -169,6 +169,8 @@ func (s *OutTrailer) isRPCStats() {}
 type End struct {
 	// Client is true if this End is from client side.
 	Client bool
+	// BeginTime is the time when the RPC began.
+	BeginTime time.Time
 	// EndTime is the time when the RPC ends.
 	EndTime time.Time
 	// Error is the error the RPC ended with. It is an error generated from

+ 26 - 5
vendor/google.golang.org/grpc/status/status.go

@@ -46,7 +46,7 @@ func (se *statusError) Error() string {
 	return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
 }
 
-func (se *statusError) status() *Status {
+func (se *statusError) GRPCStatus() *Status {
 	return &Status{s: (*spb.Status)(se)}
 }
 
@@ -120,15 +120,23 @@ func FromProto(s *spb.Status) *Status {
 }
 
 // FromError returns a Status representing err if it was produced from this
-// package, otherwise it returns nil, false.
+// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
+// Status is returned with codes.Unknown and the original error message.
 func FromError(err error) (s *Status, ok bool) {
 	if err == nil {
 		return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
 	}
-	if s, ok := err.(*statusError); ok {
-		return s.status(), true
+	if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
+		return se.GRPCStatus(), true
 	}
-	return nil, false
+	return New(codes.Unknown, err.Error()), false
+}
+
+// Convert is a convenience function which removes the need to handle the
+// boolean return value from FromError.
+func Convert(err error) *Status {
+	s, _ := FromError(err)
+	return s
 }
 
 // WithDetails returns a new status with the provided details messages appended to the status.
@@ -166,3 +174,16 @@ func (s *Status) Details() []interface{} {
 	}
 	return details
 }
+
+// Code returns the Code of the error if it is a Status error, codes.OK if err
+// is nil, or codes.Unknown otherwise.
+func Code(err error) codes.Code {
+	// Don't use FromError to avoid allocation of OK status.
+	if err == nil {
+		return codes.OK
+	}
+	if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
+		return se.GRPCStatus().Code()
+	}
+	return codes.Unknown
+}

+ 349 - 266
vendor/google.golang.org/grpc/stream.go

@@ -19,7 +19,6 @@
 package grpc
 
 import (
-	"bytes"
 	"errors"
 	"io"
 	"sync"
@@ -29,15 +28,18 @@ import (
 	"golang.org/x/net/trace"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/metadata"
-	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
 	"google.golang.org/grpc/transport"
 )
 
 // StreamHandler defines the handler called by gRPC server to complete the
-// execution of a streaming RPC.
+// execution of a streaming RPC. If a StreamHandler returns an error, it
+// should be produced by the status package, or else gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message
+// of the RPC.
 type StreamHandler func(srv interface{}, stream ServerStream) error
 
 // StreamDesc represents a streaming RPC service's method specification.
@@ -51,6 +53,8 @@ type StreamDesc struct {
 }
 
 // Stream defines the common interface a client or server stream has to satisfy.
+//
+// All errors returned from Stream are compatible with the status package.
 type Stream interface {
 	// Context returns the context for this stream.
 	Context() context.Context
@@ -89,43 +93,57 @@ type ClientStream interface {
 	// Stream.SendMsg() may return a non-nil error when something wrong happens sending
 	// the request. The returned error indicates the status of this sending, not the final
 	// status of the RPC.
-	// Always call Stream.RecvMsg() to get the final status if you care about the status of
-	// the RPC.
+	//
+	// Always call Stream.RecvMsg() to drain the stream and get the final
+	// status, otherwise there could be leaked resources.
 	Stream
 }
 
-// NewClientStream creates a new Stream for the client side. This is called
-// by generated code.
-func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+// NewStream creates a new Stream for the client side. This is typically
+// called by generated code.
+func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+	// allow interceptor to see all applicable call options, which means those
+	// configured as defaults from dial option as well as per-call options
+	opts = combine(cc.dopts.callOptions, opts)
+
 	if cc.dopts.streamInt != nil {
 		return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
 	}
 	return newClientStream(ctx, desc, cc, method, opts...)
 }
 
+// NewClientStream creates a new Stream for the client side. This is typically
+// called by generated code.
+//
+// DEPRECATED: Use ClientConn.NewStream instead.
+func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+	return cc.NewStream(ctx, desc, method, opts...)
+}
+
 func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
-	var (
-		t      transport.ClientTransport
-		s      *transport.Stream
-		done   func(balancer.DoneInfo)
-		cancel context.CancelFunc
-	)
 	c := defaultCallInfo()
 	mc := cc.GetMethodConfig(method)
 	if mc.WaitForReady != nil {
 		c.failFast = !*mc.WaitForReady
 	}
 
-	if mc.Timeout != nil {
+	// Possible context leak:
+	// The cancel function for the child context we create will only be called
+	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
+	// an error is generated by SendMsg.
+	// https://github.com/grpc/grpc-go/issues/1818.
+	var cancel context.CancelFunc
+	if mc.Timeout != nil && *mc.Timeout >= 0 {
 		ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
-		defer func() {
-			if err != nil {
-				cancel()
-			}
-		}()
+	} else {
+		ctx, cancel = context.WithCancel(ctx)
 	}
+	defer func() {
+		if err != nil {
+			cancel()
+		}
+	}()
 
-	opts = append(cc.dopts.callOptions, opts...)
 	for _, o := range opts {
 		if err := o.before(c); err != nil {
 			return nil, toRPCErr(err)
@@ -133,6 +151,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 	}
 	c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
 	c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+	if err := setCallInfoCodec(c); err != nil {
+		return nil, err
+	}
 
 	callHdr := &transport.CallHdr{
 		Host:   cc.authority,
@@ -141,10 +162,27 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 		// so we don't flush the header.
 		// If it's client streaming, the user may never send a request or send it any
 		// time soon, so we ask the transport to flush the header.
-		Flush: desc.ClientStreams,
-	}
-	if cc.dopts.cp != nil {
+		Flush:          desc.ClientStreams,
+		ContentSubtype: c.contentSubtype,
+	}
+
+	// Set our outgoing compression according to the UseCompressor CallOption, if
+	// set.  In that case, also find the compressor from the encoding package.
+	// Otherwise, use the compressor configured by the WithCompressor DialOption,
+	// if set.
+	var cp Compressor
+	var comp encoding.Compressor
+	if ct := c.compressorType; ct != "" {
+		callHdr.SendCompress = ct
+		if ct != encoding.Identity {
+			comp = encoding.GetCompressor(ct)
+			if comp == nil {
+				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
+			}
+		}
+	} else if cc.dopts.cp != nil {
 		callHdr.SendCompress = cc.dopts.cp.Type()
+		cp = cc.dopts.cp
 	}
 	if c.creds != nil {
 		callHdr.Creds = c.creds
@@ -170,11 +208,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 	}
 	ctx = newContextWithRPCInfo(ctx, c.failFast)
 	sh := cc.dopts.copts.StatsHandler
+	var beginTime time.Time
 	if sh != nil {
 		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
+		beginTime = time.Now()
 		begin := &stats.Begin{
 			Client:    true,
-			BeginTime: time.Now(),
+			BeginTime: beginTime,
 			FailFast:  c.failFast,
 		}
 		sh.HandleRPC(ctx, begin)
@@ -182,341 +222,369 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 			if err != nil {
 				// Only handle end stats if err != nil.
 				end := &stats.End{
-					Client: true,
-					Error:  err,
+					Client:    true,
+					Error:     err,
+					BeginTime: beginTime,
+					EndTime:   time.Now(),
 				}
 				sh.HandleRPC(ctx, end)
 			}
 		}()
 	}
+
+	var (
+		t    transport.ClientTransport
+		s    *transport.Stream
+		done func(balancer.DoneInfo)
+	)
 	for {
+		// Check to make sure the context has expired.  This will prevent us from
+		// looping forever if an error occurs for wait-for-ready RPCs where no data
+		// is sent on the wire.
+		select {
+		case <-ctx.Done():
+			return nil, toRPCErr(ctx.Err())
+		default:
+		}
+
 		t, done, err = cc.getTransport(ctx, c.failFast)
 		if err != nil {
-			// TODO(zhaoq): Probably revisit the error handling.
-			if _, ok := status.FromError(err); ok {
-				return nil, err
-			}
-			if err == errConnClosing || err == errConnUnavailable {
-				if c.failFast {
-					return nil, Errorf(codes.Unavailable, "%v", err)
-				}
-				continue
-			}
-			// All the other errors are treated as Internal errors.
-			return nil, Errorf(codes.Internal, "%v", err)
+			return nil, err
 		}
 
 		s, err = t.NewStream(ctx, callHdr)
 		if err != nil {
-			if _, ok := err.(transport.ConnectionError); ok && done != nil {
-				// If error is connection error, transport was sending data on wire,
-				// and we are not sure if anything has been sent on wire.
-				// If error is not connection error, we are sure nothing has been sent.
-				updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
-			}
 			if done != nil {
 				done(balancer.DoneInfo{Err: err})
 				done = nil
 			}
-			if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
+			// In the event of any error from NewStream, we never attempted to write
+			// anything to the wire, so we can retry indefinitely for non-fail-fast
+			// RPCs.
+			if !c.failFast {
 				continue
 			}
 			return nil, toRPCErr(err)
 		}
 		break
 	}
-	// Set callInfo.peer object from stream's context.
-	if peer, ok := peer.FromContext(s.Context()); ok {
-		c.peer = peer
-	}
+
 	cs := &clientStream{
 		opts:   opts,
 		c:      c,
 		desc:   desc,
-		codec:  cc.dopts.codec,
-		cp:     cc.dopts.cp,
-		dc:     cc.dopts.dc,
+		codec:  c.codec,
+		cp:     cp,
+		comp:   comp,
 		cancel: cancel,
-
-		done: done,
-		t:    t,
-		s:    s,
-		p:    &parser{r: s},
-
-		tracing: EnableTracing,
-		trInfo:  trInfo,
-
-		statsCtx:     ctx,
-		statsHandler: cc.dopts.copts.StatsHandler,
+		attempt: &csAttempt{
+			t:            t,
+			s:            s,
+			p:            &parser{r: s},
+			done:         done,
+			dc:           cc.dopts.dc,
+			ctx:          ctx,
+			trInfo:       trInfo,
+			statsHandler: sh,
+			beginTime:    beginTime,
+		},
+	}
+	cs.c.stream = cs
+	cs.attempt.cs = cs
+	if desc != unaryStreamDesc {
+		// Listen on cc and stream contexts to cleanup when the user closes the
+		// ClientConn or cancels the stream context.  In all other cases, an error
+		// should already be injected into the recv buffer by the transport, which
+		// the client will eventually receive, and then we will cancel the stream's
+		// context in clientStream.finish.
+		go func() {
+			select {
+			case <-cc.ctx.Done():
+				cs.finish(ErrClientConnClosing)
+			case <-ctx.Done():
+				cs.finish(toRPCErr(ctx.Err()))
+			}
+		}()
 	}
-	// Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
-	// when there is no pending I/O operations on this stream.
-	go func() {
-		select {
-		case <-t.Error():
-			// Incur transport error, simply exit.
-		case <-cc.ctx.Done():
-			cs.finish(ErrClientConnClosing)
-			cs.closeTransportStream(ErrClientConnClosing)
-		case <-s.Done():
-			// TODO: The trace of the RPC is terminated here when there is no pending
-			// I/O, which is probably not the optimal solution.
-			cs.finish(s.Status().Err())
-			cs.closeTransportStream(nil)
-		case <-s.GoAway():
-			cs.finish(errConnDrain)
-			cs.closeTransportStream(errConnDrain)
-		case <-s.Context().Done():
-			err := s.Context().Err()
-			cs.finish(err)
-			cs.closeTransportStream(transport.ContextErr(err))
-		}
-	}()
 	return cs, nil
 }
 
 // clientStream implements a client side Stream.
 type clientStream struct {
-	opts   []CallOption
-	c      *callInfo
-	t      transport.ClientTransport
-	s      *transport.Stream
-	p      *parser
-	desc   *StreamDesc
-	codec  Codec
-	cp     Compressor
-	dc     Decompressor
-	cancel context.CancelFunc
+	opts []CallOption
+	c    *callInfo
+	desc *StreamDesc
+
+	codec baseCodec
+	cp    Compressor
+	comp  encoding.Compressor
 
-	tracing bool // set to EnableTracing when the clientStream is created.
+	cancel context.CancelFunc // cancels all attempts
 
-	mu       sync.Mutex
-	done     func(balancer.DoneInfo)
-	closed   bool
-	finished bool
-	// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
-	// and is set to nil when the clientStream's finish method is called.
+	sentLast bool // sent an end stream
+
+	mu       sync.Mutex // guards finished
+	finished bool       // TODO: replace with atomic cmpxchg or sync.Once?
+
+	attempt *csAttempt // the active client stream attempt
+	// TODO(hedging): hedging will have multiple attempts simultaneously.
+}
+
+// csAttempt implements a single transport stream attempt within a
+// clientStream.
+type csAttempt struct {
+	cs   *clientStream
+	t    transport.ClientTransport
+	s    *transport.Stream
+	p    *parser
+	done func(balancer.DoneInfo)
+
+	dc        Decompressor
+	decomp    encoding.Compressor
+	decompSet bool
+
+	ctx context.Context // the application's context, wrapped by stats/tracing
+
+	mu sync.Mutex // guards trInfo.tr
+	// trInfo.tr is set when created (if EnableTracing is true),
+	// and cleared when the finish method is called.
 	trInfo traceInfo
 
-	// statsCtx keeps the user context for stats handling.
-	// All stats collection should use the statsCtx (instead of the stream context)
-	// so that all the generated stats for a particular RPC can be associated in the processing phase.
-	statsCtx     context.Context
 	statsHandler stats.Handler
+	beginTime    time.Time
 }
 
 func (cs *clientStream) Context() context.Context {
-	return cs.s.Context()
+	// TODO(retry): commit the current attempt (the context has peer-aware data).
+	return cs.attempt.context()
 }
 
 func (cs *clientStream) Header() (metadata.MD, error) {
-	m, err := cs.s.Header()
+	m, err := cs.attempt.header()
 	if err != nil {
-		if _, ok := err.(transport.ConnectionError); !ok {
-			cs.closeTransportStream(err)
-		}
+		// TODO(retry): maybe retry on error or commit attempt on success.
+		err = toRPCErr(err)
+		cs.finish(err)
 	}
 	return m, err
 }
 
 func (cs *clientStream) Trailer() metadata.MD {
-	return cs.s.Trailer()
+	// TODO(retry): on error, maybe retry (trailers-only).
+	return cs.attempt.trailer()
 }
 
 func (cs *clientStream) SendMsg(m interface{}) (err error) {
-	if cs.tracing {
-		cs.mu.Lock()
-		if cs.trInfo.tr != nil {
-			cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
-		}
+	// TODO(retry): buffer message for replaying if not committed.
+	return cs.attempt.sendMsg(m)
+}
+
+func (cs *clientStream) RecvMsg(m interface{}) (err error) {
+	// TODO(retry): maybe retry on error or commit attempt on success.
+	return cs.attempt.recvMsg(m)
+}
+
+func (cs *clientStream) CloseSend() error {
+	cs.attempt.closeSend()
+	return nil
+}
+
+func (cs *clientStream) finish(err error) {
+	if err == io.EOF {
+		// Ending a stream with EOF indicates a success.
+		err = nil
+	}
+	cs.mu.Lock()
+	if cs.finished {
 		cs.mu.Unlock()
+		return
 	}
+	cs.finished = true
+	cs.mu.Unlock()
+	// TODO(retry): commit current attempt if necessary.
+	cs.attempt.finish(err)
+	for _, o := range cs.opts {
+		o.after(cs.c)
+	}
+	cs.cancel()
+}
+
+func (a *csAttempt) context() context.Context {
+	return a.s.Context()
+}
+
+func (a *csAttempt) header() (metadata.MD, error) {
+	return a.s.Header()
+}
+
+func (a *csAttempt) trailer() metadata.MD {
+	return a.s.Trailer()
+}
+
+func (a *csAttempt) sendMsg(m interface{}) (err error) {
 	// TODO Investigate how to signal the stats handling party.
 	// generate error stats if err != nil && err != io.EOF?
+	cs := a.cs
 	defer func() {
-		if err != nil {
-			cs.finish(err)
+		// For non-client-streaming RPCs, we return nil instead of EOF on success
+		// because the generated code requires it.  finish is not called; RecvMsg()
+		// will call it with the stream's status independently.
+		if err == io.EOF && !cs.desc.ClientStreams {
+			err = nil
 		}
-		if err == nil {
-			return
-		}
-		if err == io.EOF {
-			// Specialize the process for server streaming. SendMsg is only called
-			// once when creating the stream object. io.EOF needs to be skipped when
-			// the rpc is early finished (before the stream object is created.).
-			// TODO: It is probably better to move this into the generated code.
-			if !cs.desc.ClientStreams && cs.desc.ServerStreams {
-				err = nil
-			}
-			return
-		}
-		if _, ok := err.(transport.ConnectionError); !ok {
-			cs.closeTransportStream(err)
+		if err != nil && err != io.EOF {
+			// Call finish on the client stream for errors generated by this SendMsg
+			// call, as these indicate problems created by this client.  (Transport
+			// errors are converted to an io.EOF error below; the real error will be
+			// returned from RecvMsg eventually in that case, or be retried.)
+			cs.finish(err)
 		}
-		err = toRPCErr(err)
 	}()
+	// TODO: Check cs.sentLast and error if we already ended the stream.
+	if EnableTracing {
+		a.mu.Lock()
+		if a.trInfo.tr != nil {
+			a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
+		}
+		a.mu.Unlock()
+	}
 	var outPayload *stats.OutPayload
-	if cs.statsHandler != nil {
+	if a.statsHandler != nil {
 		outPayload = &stats.OutPayload{
 			Client: true,
 		}
 	}
-	hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload)
+	hdr, data, err := encode(cs.codec, m, cs.cp, outPayload, cs.comp)
 	if err != nil {
 		return err
 	}
-	if cs.c.maxSendMessageSize == nil {
-		return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
-	}
 	if len(data) > *cs.c.maxSendMessageSize {
-		return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
 	}
-	err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false})
-	if err == nil && outPayload != nil {
-		outPayload.SentTime = time.Now()
-		cs.statsHandler.HandleRPC(cs.statsCtx, outPayload)
+	if !cs.desc.ClientStreams {
+		cs.sentLast = true
 	}
-	return err
+	err = a.t.Write(a.s, hdr, data, &transport.Options{Last: !cs.desc.ClientStreams})
+	if err == nil {
+		if outPayload != nil {
+			outPayload.SentTime = time.Now()
+			a.statsHandler.HandleRPC(a.ctx, outPayload)
+		}
+		return nil
+	}
+	return io.EOF
 }
 
-func (cs *clientStream) RecvMsg(m interface{}) (err error) {
+func (a *csAttempt) recvMsg(m interface{}) (err error) {
+	cs := a.cs
+	defer func() {
+		if err != nil || !cs.desc.ServerStreams {
+			// err != nil or non-server-streaming indicates end of stream.
+			cs.finish(err)
+		}
+	}()
 	var inPayload *stats.InPayload
-	if cs.statsHandler != nil {
+	if a.statsHandler != nil {
 		inPayload = &stats.InPayload{
 			Client: true,
 		}
 	}
-	if cs.c.maxReceiveMessageSize == nil {
-		return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
-	}
-	err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload)
-	defer func() {
-		// err != nil indicates the termination of the stream.
-		if err != nil {
-			cs.finish(err)
-		}
-	}()
-	if err == nil {
-		if cs.tracing {
-			cs.mu.Lock()
-			if cs.trInfo.tr != nil {
-				cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+	if !a.decompSet {
+		// Block until we receive headers containing received message encoding.
+		if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
+			if a.dc == nil || a.dc.Type() != ct {
+				// No configured decompressor, or it does not match the incoming
+				// message encoding; attempt to find a registered compressor that does.
+				a.dc = nil
+				a.decomp = encoding.GetCompressor(ct)
 			}
-			cs.mu.Unlock()
-		}
-		if inPayload != nil {
-			cs.statsHandler.HandleRPC(cs.statsCtx, inPayload)
-		}
-		if !cs.desc.ClientStreams || cs.desc.ServerStreams {
-			return
-		}
-		// Special handling for client streaming rpc.
-		// This recv expects EOF or errors, so we don't collect inPayload.
-		if cs.c.maxReceiveMessageSize == nil {
-			return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
-		}
-		err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil)
-		cs.closeTransportStream(err)
-		if err == nil {
-			return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+		} else {
+			// No compression is used; disable our decompressor.
+			a.dc = nil
 		}
+		// Only initialize this state once per stream.
+		a.decompSet = true
+	}
+	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, inPayload, a.decomp)
+	if err != nil {
 		if err == io.EOF {
-			if se := cs.s.Status().Err(); se != nil {
-				return se
+			if statusErr := a.s.Status().Err(); statusErr != nil {
+				return statusErr
 			}
-			cs.finish(err)
-			return nil
+			return io.EOF // indicates successful end of stream.
 		}
 		return toRPCErr(err)
 	}
-	if _, ok := err.(transport.ConnectionError); !ok {
-		cs.closeTransportStream(err)
-	}
-	if err == io.EOF {
-		if statusErr := cs.s.Status().Err(); statusErr != nil {
-			return statusErr
+	if EnableTracing {
+		a.mu.Lock()
+		if a.trInfo.tr != nil {
+			a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
 		}
-		// Returns io.EOF to indicate the end of the stream.
-		return
+		a.mu.Unlock()
 	}
-	return toRPCErr(err)
-}
-
-func (cs *clientStream) CloseSend() (err error) {
-	err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true})
-	defer func() {
-		if err != nil {
-			cs.finish(err)
-		}
-	}()
-	if err == nil || err == io.EOF {
+	if inPayload != nil {
+		a.statsHandler.HandleRPC(a.ctx, inPayload)
+	}
+	if cs.desc.ServerStreams {
+		// Subsequent messages should be received by subsequent RecvMsg calls.
 		return nil
 	}
-	if _, ok := err.(transport.ConnectionError); !ok {
-		cs.closeTransportStream(err)
+
+	// Special handling for non-server-stream rpcs.
+	// This recv expects EOF or errors, so we don't collect inPayload.
+	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, nil, a.decomp)
+	if err == nil {
+		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
 	}
-	err = toRPCErr(err)
-	return
+	if err == io.EOF {
+		return a.s.Status().Err() // non-server streaming Recv returns nil on success
+	}
+	return toRPCErr(err)
 }
 
-func (cs *clientStream) closeTransportStream(err error) {
-	cs.mu.Lock()
-	if cs.closed {
-		cs.mu.Unlock()
+func (a *csAttempt) closeSend() {
+	cs := a.cs
+	if cs.sentLast {
 		return
 	}
-	cs.closed = true
-	cs.mu.Unlock()
-	cs.t.CloseStream(cs.s, err)
+	cs.sentLast = true
+	cs.attempt.t.Write(cs.attempt.s, nil, nil, &transport.Options{Last: true})
+	// We ignore errors from Write.  Any error it would return would also be
+	// returned by a subsequent RecvMsg call, and the user is supposed to always
+	// finish the stream by calling RecvMsg until it returns err != nil.
 }
 
-func (cs *clientStream) finish(err error) {
-	cs.mu.Lock()
-	defer cs.mu.Unlock()
-	if cs.finished {
-		return
-	}
-	cs.finished = true
-	defer func() {
-		if cs.cancel != nil {
-			cs.cancel()
-		}
-	}()
-	for _, o := range cs.opts {
-		o.after(cs.c)
-	}
-	if cs.done != nil {
-		updateRPCInfoInContext(cs.s.Context(), rpcInfo{
-			bytesSent:     cs.s.BytesSent(),
-			bytesReceived: cs.s.BytesReceived(),
+func (a *csAttempt) finish(err error) {
+	a.mu.Lock()
+	a.t.CloseStream(a.s, err)
+
+	if a.done != nil {
+		a.done(balancer.DoneInfo{
+			Err:           err,
+			BytesSent:     true,
+			BytesReceived: a.s.BytesReceived(),
 		})
-		cs.done(balancer.DoneInfo{Err: err})
-		cs.done = nil
 	}
-	if cs.statsHandler != nil {
+	if a.statsHandler != nil {
 		end := &stats.End{
-			Client:  true,
-			EndTime: time.Now(),
-		}
-		if err != io.EOF {
-			// end.Error is nil if the RPC finished successfully.
-			end.Error = toRPCErr(err)
+			Client:    true,
+			BeginTime: a.beginTime,
+			EndTime:   time.Now(),
+			Error:     err,
 		}
-		cs.statsHandler.HandleRPC(cs.statsCtx, end)
+		a.statsHandler.HandleRPC(a.ctx, end)
 	}
-	if !cs.tracing {
-		return
-	}
-	if cs.trInfo.tr != nil {
-		if err == nil || err == io.EOF {
-			cs.trInfo.tr.LazyPrintf("RPC: [OK]")
+	if a.trInfo.tr != nil {
+		if err == nil {
+			a.trInfo.tr.LazyPrintf("RPC: [OK]")
 		} else {
-			cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
-			cs.trInfo.tr.SetError()
+			a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
+			a.trInfo.tr.SetError()
 		}
-		cs.trInfo.tr.Finish()
-		cs.trInfo.tr = nil
+		a.trInfo.tr.Finish()
+		a.trInfo.tr = nil
 	}
+	a.mu.Unlock()
 }
 
 // ServerStream defines the interface a server stream has to satisfy.
@@ -540,12 +608,17 @@ type ServerStream interface {
 
 // serverStream implements a server side Stream.
 type serverStream struct {
-	t                     transport.ServerTransport
-	s                     *transport.Stream
-	p                     *parser
-	codec                 Codec
-	cp                    Compressor
-	dc                    Decompressor
+	ctx   context.Context
+	t     transport.ServerTransport
+	s     *transport.Stream
+	p     *parser
+	codec baseCodec
+
+	cp     Compressor
+	dc     Decompressor
+	comp   encoding.Compressor
+	decomp encoding.Compressor
+
 	maxReceiveMessageSize int
 	maxSendMessageSize    int
 	trInfo                *traceInfo
@@ -556,7 +629,7 @@ type serverStream struct {
 }
 
 func (ss *serverStream) Context() context.Context {
-	return ss.s.Context()
+	return ss.ctx
 }
 
 func (ss *serverStream) SetHeader(md metadata.MD) error {
@@ -601,12 +674,12 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
 	if ss.statsHandler != nil {
 		outPayload = &stats.OutPayload{}
 	}
-	hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload)
+	hdr, data, err := encode(ss.codec, m, ss.cp, outPayload, ss.comp)
 	if err != nil {
 		return err
 	}
 	if len(data) > ss.maxSendMessageSize {
-		return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
 	}
 	if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil {
 		return toRPCErr(err)
@@ -641,12 +714,12 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
 	if ss.statsHandler != nil {
 		inPayload = &stats.InPayload{}
 	}
-	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil {
+	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil {
 		if err == io.EOF {
 			return err
 		}
 		if err == io.ErrUnexpectedEOF {
-			err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+			err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
 		}
 		return toRPCErr(err)
 	}
@@ -655,3 +728,13 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
 	}
 	return nil
 }
+
+// MethodFromServerStream returns the method string for the input stream.
+// The returned string is in the format of "/service/method".
+func MethodFromServerStream(stream ServerStream) (string, bool) {
+	s := serverTransportStreamFromContext(stream.Context())
+	if s == nil {
+		return "", false
+	}
+	return s.Method(), true
+}

+ 3 - 6
vendor/google.golang.org/grpc/transport/bdp_estimator.go

@@ -41,12 +41,9 @@ const (
 	gamma = 2
 )
 
-var (
-	// Adding arbitrary data to ping so that its ack can be
-	// identified.
-	// Easter-egg: what does the ping message say?
-	bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
-)
+// Adding arbitrary data to ping so that its ack can be identified.
+// Easter-egg: what does the ping message say?
+var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
 
 type bdpEstimator struct {
 	// sentAt is the time when the ping was sent.

+ 68 - 45
vendor/google.golang.org/grpc/transport/control.go

@@ -20,9 +20,9 @@ package transport
 
 import (
 	"fmt"
+	"io"
 	"math"
 	"sync"
-	"sync/atomic"
 	"time"
 
 	"golang.org/x/net/http2"
@@ -49,7 +49,7 @@ const (
 	// defaultLocalSendQuota sets is default value for number of data
 	// bytes that each stream can schedule before some of it being
 	// flushed out.
-	defaultLocalSendQuota = 64 * 1024
+	defaultLocalSendQuota = 128 * 1024
 )
 
 // The following defines various control items which could flow through
@@ -89,12 +89,16 @@ type windowUpdate struct {
 func (*windowUpdate) item() {}
 
 type settings struct {
-	ack bool
-	ss  []http2.Setting
+	ss []http2.Setting
 }
 
 func (*settings) item() {}
 
+type settingsAck struct {
+}
+
+func (*settingsAck) item() {}
+
 type resetStream struct {
 	streamID uint32
 	code     http2.ErrCode
@@ -112,6 +116,7 @@ type goAway struct {
 func (*goAway) item() {}
 
 type flushIO struct {
+	closeTr bool
 }
 
 func (*flushIO) item() {}
@@ -126,9 +131,8 @@ func (*ping) item() {}
 // quotaPool is a pool which accumulates the quota and sends it to acquire()
 // when it is available.
 type quotaPool struct {
-	c chan int
-
 	mu      sync.Mutex
+	c       chan struct{}
 	version uint32
 	quota   int
 }
@@ -136,12 +140,8 @@ type quotaPool struct {
 // newQuotaPool creates a quotaPool which has quota q available to consume.
 func newQuotaPool(q int) *quotaPool {
 	qb := &quotaPool{
-		c: make(chan int, 1),
-	}
-	if q > 0 {
-		qb.c <- q
-	} else {
-		qb.quota = q
+		quota: q,
+		c:     make(chan struct{}, 1),
 	}
 	return qb
 }
@@ -155,60 +155,83 @@ func (qb *quotaPool) add(v int) {
 }
 
 func (qb *quotaPool) lockedAdd(v int) {
-	select {
-	case n := <-qb.c:
-		qb.quota += n
-	default:
-	}
-	qb.quota += v
+	var wakeUp bool
 	if qb.quota <= 0 {
-		return
+		wakeUp = true // Wake up potential waiters.
 	}
-	// After the pool has been created, this is the only place that sends on
-	// the channel. Since mu is held at this point and any quota that was sent
-	// on the channel has been retrieved, we know that this code will always
-	// place any positive quota value on the channel.
-	select {
-	case qb.c <- qb.quota:
-		qb.quota = 0
-	default:
+	qb.quota += v
+	if wakeUp && qb.quota > 0 {
+		select {
+		case qb.c <- struct{}{}:
+		default:
+		}
 	}
 }
 
 func (qb *quotaPool) addAndUpdate(v int) {
 	qb.mu.Lock()
-	defer qb.mu.Unlock()
 	qb.lockedAdd(v)
-	// Update the version only after having added to the quota
-	// so that if acquireWithVesrion sees the new vesrion it is
-	// guaranteed to have seen the updated quota.
-	// Also, still keep this inside of the lock, so that when
-	// compareAndExecute is processing, this function doesn't
-	// get executed partially (quota gets updated but the version
-	// doesn't).
-	atomic.AddUint32(&(qb.version), 1)
+	qb.version++
+	qb.mu.Unlock()
 }
 
-func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) {
-	return qb.c, atomic.LoadUint32(&(qb.version))
+func (qb *quotaPool) get(v int, wc waiters) (int, uint32, error) {
+	qb.mu.Lock()
+	if qb.quota > 0 {
+		if v > qb.quota {
+			v = qb.quota
+		}
+		qb.quota -= v
+		ver := qb.version
+		qb.mu.Unlock()
+		return v, ver, nil
+	}
+	qb.mu.Unlock()
+	for {
+		select {
+		case <-wc.ctx.Done():
+			return 0, 0, ContextErr(wc.ctx.Err())
+		case <-wc.tctx.Done():
+			return 0, 0, ErrConnClosing
+		case <-wc.done:
+			return 0, 0, io.EOF
+		case <-wc.goAway:
+			return 0, 0, errStreamDrain
+		case <-qb.c:
+			qb.mu.Lock()
+			if qb.quota > 0 {
+				if v > qb.quota {
+					v = qb.quota
+				}
+				qb.quota -= v
+				ver := qb.version
+				if qb.quota > 0 {
+					select {
+					case qb.c <- struct{}{}:
+					default:
+					}
+				}
+				qb.mu.Unlock()
+				return v, ver, nil
+
+			}
+			qb.mu.Unlock()
+		}
+	}
 }
 
 func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool {
 	qb.mu.Lock()
-	defer qb.mu.Unlock()
-	if version == atomic.LoadUint32(&(qb.version)) {
+	if version == qb.version {
 		success()
+		qb.mu.Unlock()
 		return true
 	}
 	failure()
+	qb.mu.Unlock()
 	return false
 }
 
-// acquire returns the channel on which available quota amounts are sent.
-func (qb *quotaPool) acquire() <-chan int {
-	return qb.c
-}
-
 // inFlow deals with inbound flow control
 type inFlow struct {
 	mu sync.Mutex

+ 51 - 0
vendor/google.golang.org/grpc/transport/go16.go

@@ -0,0 +1,51 @@
+// +build go1.6,!go1.7
+
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"net"
+	"net/http"
+
+	"google.golang.org/grpc/codes"
+
+	"golang.org/x/net/context"
+)
+
+// dialContext connects to the address on the named network.
+func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
+	return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
+}
+
+// ContextErr converts the error from context package into a StreamError.
+func ContextErr(err error) StreamError {
+	switch err {
+	case context.DeadlineExceeded:
+		return streamErrorf(codes.DeadlineExceeded, "%v", err)
+	case context.Canceled:
+		return streamErrorf(codes.Canceled, "%v", err)
+	}
+	return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
+}
+
+// contextFromRequest returns a background context.
+func contextFromRequest(r *http.Request) context.Context {
+	return context.Background()
+}

+ 52 - 0
vendor/google.golang.org/grpc/transport/go17.go

@@ -0,0 +1,52 @@
+// +build go1.7
+
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"context"
+	"net"
+	"net/http"
+
+	"google.golang.org/grpc/codes"
+
+	netctx "golang.org/x/net/context"
+)
+
+// dialContext connects to the address on the named network.
+func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
+	return (&net.Dialer{}).DialContext(ctx, network, address)
+}
+
+// ContextErr converts the error from context package into a StreamError.
+func ContextErr(err error) StreamError {
+	switch err {
+	case context.DeadlineExceeded, netctx.DeadlineExceeded:
+		return streamErrorf(codes.DeadlineExceeded, "%v", err)
+	case context.Canceled, netctx.Canceled:
+		return streamErrorf(codes.Canceled, "%v", err)
+	}
+	return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
+}
+
+// contextFromRequest returns a context from the HTTP Request.
+func contextFromRequest(r *http.Request) context.Context {
+	return r.Context()
+}

+ 56 - 22
vendor/google.golang.org/grpc/transport/handler_server.go

@@ -40,20 +40,24 @@ import (
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
 )
 
 // NewServerHandlerTransport returns a ServerTransport handling gRPC
 // from inside an http.Handler. It requires that the http Server
 // supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) {
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
 	if r.ProtoMajor != 2 {
 		return nil, errors.New("gRPC requires HTTP/2")
 	}
 	if r.Method != "POST" {
 		return nil, errors.New("invalid gRPC request method")
 	}
-	if !validContentType(r.Header.Get("Content-Type")) {
+	contentType := r.Header.Get("Content-Type")
+	// TODO: do we assume contentType is lowercase? we did before
+	contentSubtype, validContentType := contentSubtype(contentType)
+	if !validContentType {
 		return nil, errors.New("invalid gRPC request content-type")
 	}
 	if _, ok := w.(http.Flusher); !ok {
@@ -64,10 +68,13 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
 	}
 
 	st := &serverHandlerTransport{
-		rw:       w,
-		req:      r,
-		closedCh: make(chan struct{}),
-		writes:   make(chan func()),
+		rw:             w,
+		req:            r,
+		closedCh:       make(chan struct{}),
+		writes:         make(chan func()),
+		contentType:    contentType,
+		contentSubtype: contentSubtype,
+		stats:          stats,
 	}
 
 	if v := r.Header.Get("grpc-timeout"); v != "" {
@@ -79,7 +86,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
 		st.timeout = to
 	}
 
-	var metakv []string
+	metakv := []string{"content-type", contentType}
 	if r.Host != "" {
 		metakv = append(metakv, ":authority", r.Host)
 	}
@@ -91,7 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
 		for _, v := range vv {
 			v, err := decodeMetadataHeader(k, v)
 			if err != nil {
-				return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err)
+				return nil, streamErrorf(codes.Internal, "malformed binary metadata: %v", err)
 			}
 			metakv = append(metakv, k, v)
 		}
@@ -126,6 +133,14 @@ type serverHandlerTransport struct {
 	// block concurrent WriteStatus calls
 	// e.g. grpc/(*serverStream).SendMsg/RecvMsg
 	writeStatusMu sync.Mutex
+
+	// we just mirror the request content-type
+	contentType string
+	// we store both contentType and contentSubtype so we don't keep recreating them
+	// TODO make sure this is consistent across handler_server and http2_server
+	contentSubtype string
+
+	stats stats.Handler
 }
 
 func (ht *serverHandlerTransport) Close() error {
@@ -219,6 +234,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
 	})
 
 	if err == nil { // transport has not been closed
+		if ht.stats != nil {
+			ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
+		}
 		ht.Close()
 		close(ht.writes)
 	}
@@ -235,7 +253,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
 
 	h := ht.rw.Header()
 	h["Date"] = nil // suppress Date to make tests happy; TODO: restore
-	h.Set("Content-Type", "application/grpc")
+	h.Set("Content-Type", ht.contentType)
 
 	// Predeclare trailers we'll set later in WriteStatus (after the body).
 	// This is a SHOULD in the HTTP RFC, and the way you add (known)
@@ -263,7 +281,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts
 }
 
 func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
-	return ht.do(func() {
+	err := ht.do(func() {
 		ht.writeCommonHeaders(s)
 		h := ht.rw.Header()
 		for k, vv := range md {
@@ -279,17 +297,24 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
 		ht.rw.WriteHeader(200)
 		ht.rw.(http.Flusher).Flush()
 	})
+
+	if err == nil {
+		if ht.stats != nil {
+			ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
+		}
+	}
+	return err
 }
 
 func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
 	// With this transport type there will be exactly 1 stream: this HTTP request.
 
-	var ctx context.Context
+	ctx := contextFromRequest(ht.req)
 	var cancel context.CancelFunc
 	if ht.timeoutSet {
-		ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
+		ctx, cancel = context.WithTimeout(ctx, ht.timeout)
 	} else {
-		ctx, cancel = context.WithCancel(context.Background())
+		ctx, cancel = context.WithCancel(ctx)
 	}
 
 	// requestOver is closed when either the request's context is done
@@ -313,13 +338,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
 	req := ht.req
 
 	s := &Stream{
-		id:           0, // irrelevant
-		requestRead:  func(int) {},
-		cancel:       cancel,
-		buf:          newRecvBuffer(),
-		st:           ht,
-		method:       req.URL.Path,
-		recvCompress: req.Header.Get("grpc-encoding"),
+		id:             0, // irrelevant
+		requestRead:    func(int) {},
+		cancel:         cancel,
+		buf:            newRecvBuffer(),
+		st:             ht,
+		method:         req.URL.Path,
+		recvCompress:   req.Header.Get("grpc-encoding"),
+		contentSubtype: ht.contentSubtype,
 	}
 	pr := &peer.Peer{
 		Addr: ht.RemoteAddr(),
@@ -328,8 +354,16 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
 		pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
 	}
 	ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
-	ctx = peer.NewContext(ctx, pr)
-	s.ctx = newContextWithStream(ctx, s)
+	s.ctx = peer.NewContext(ctx, pr)
+	if ht.stats != nil {
+		s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
+		inHeader := &stats.InHeader{
+			FullMethod:  s.method,
+			RemoteAddr:  ht.RemoteAddr(),
+			Compression: s.recvCompress,
+		}
+		ht.stats.HandleRPC(s.ctx, inHeader)
+	}
 	s.trReader = &transportReader{
 		reader:        &recvBufferReader{ctx: s.ctx, recv: s.buf},
 		windowHandler: func(int) {},

+ 189 - 125
vendor/google.golang.org/grpc/transport/http2_client.go

@@ -20,6 +20,7 @@ package transport
 
 import (
 	"bytes"
+	"fmt"
 	"io"
 	"math"
 	"net"
@@ -44,7 +45,6 @@ import (
 type http2Client struct {
 	ctx        context.Context
 	cancel     context.CancelFunc
-	target     string // server name/addr
 	userAgent  string
 	md         interface{}
 	conn       net.Conn // underlying communication channel
@@ -69,6 +69,9 @@ type http2Client struct {
 	fc         *inFlow
 	// sendQuotaPool provides flow control to outbound message.
 	sendQuotaPool *quotaPool
+	// localSendQuota limits the amount of data that can be scheduled
+	// for writing before it is actually written out.
+	localSendQuota *quotaPool
 	// streamsQuota limits the max number of concurrent streams.
 	streamsQuota *quotaPool
 
@@ -91,6 +94,11 @@ type http2Client struct {
 	bdpEst          *bdpEstimator
 	outQuotaVersion uint32
 
+	// onSuccess is a callback that client transport calls upon
+	// receiving server preface to signal that a succefull HTTP2
+	// connection was established.
+	onSuccess func()
+
 	mu            sync.Mutex     // guard the following variables
 	state         transportState // the state of underlying connection
 	activeStreams map[uint32]*Stream
@@ -109,22 +117,10 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
 	if fn != nil {
 		return fn(ctx, addr)
 	}
-	return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
+	return dialContext(ctx, "tcp", addr)
 }
 
 func isTemporary(err error) bool {
-	switch err {
-	case io.EOF:
-		// Connection closures may be resolved upon retry, and are thus
-		// treated as temporary.
-		return true
-	case context.DeadlineExceeded:
-		// In Go 1.7, context.DeadlineExceeded implements Timeout(), and this
-		// special case is not needed. Until then, we need to keep this
-		// clause.
-		return true
-	}
-
 	switch err := err.(type) {
 	case interface {
 		Temporary() bool
@@ -137,18 +133,16 @@ func isTemporary(err error) bool {
 		// temporary.
 		return err.Timeout()
 	}
-	return false
+	return true
 }
 
 // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
 // and starts to receive messages on it. Non-nil error returns if construction
 // fails.
-func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) {
+func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ ClientTransport, err error) {
 	scheme := "http"
 	ctx, cancel := context.WithCancel(ctx)
-	connectCtx, connectCancel := context.WithTimeout(ctx, timeout)
 	defer func() {
-		connectCancel()
 		if err != nil {
 			cancel()
 		}
@@ -173,12 +167,9 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
 	)
 	if creds := opts.TransportCredentials; creds != nil {
 		scheme = "https"
-		conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Addr, conn)
+		conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn)
 		if err != nil {
-			// Credentials handshake errors are typically considered permanent
-			// to avoid retrying on e.g. bad certificates.
-			temp := isTemporary(err)
-			return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err)
+			return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
 		}
 		isSecure = true
 	}
@@ -208,7 +199,6 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
 	t := &http2Client{
 		ctx:        ctx,
 		cancel:     cancel,
-		target:     addr.Addr,
 		userAgent:  opts.UserAgent,
 		md:         addr.Metadata,
 		conn:       conn,
@@ -225,6 +215,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
 		controlBuf:        newControlBuffer(),
 		fc:                &inFlow{limit: uint32(icwz)},
 		sendQuotaPool:     newQuotaPool(defaultWindowSize),
+		localSendQuota:    newQuotaPool(defaultLocalSendQuota),
 		scheme:            scheme,
 		state:             reachable,
 		activeStreams:     make(map[uint32]*Stream),
@@ -236,6 +227,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
 		kp:                kp,
 		statsHandler:      opts.StatsHandler,
 		initialWindowSize: initialWindowSize,
+		onSuccess:         onSuccess,
 	}
 	if opts.InitialWindowSize >= defaultWindowSize {
 		t.initialWindowSize = opts.InitialWindowSize
@@ -296,7 +288,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
 	t.framer.writer.Flush()
 	go func() {
 		loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
-		t.Close()
+		t.conn.Close()
 	}()
 	if t.kp.Time != infinity {
 		go t.keepalive()
@@ -315,8 +307,8 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
 		buf:            newRecvBuffer(),
 		fc:             &inFlow{limit: uint32(t.initialWindowSize)},
 		sendQuotaPool:  newQuotaPool(int(t.streamSendQuota)),
-		localSendQuota: newQuotaPool(defaultLocalSendQuota),
 		headerChan:     make(chan struct{}),
+		contentSubtype: callHdr.ContentSubtype,
 	}
 	t.nextID += 2
 	s.requestRead = func(n int) {
@@ -336,7 +328,12 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
 			t.updateWindow(s, uint32(n))
 		},
 	}
-
+	s.waiters = waiters{
+		ctx:    s.ctx,
+		tctx:   t.ctx,
+		done:   s.done,
+		goAway: s.goAway,
+	}
 	return s
 }
 
@@ -369,7 +366,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 	for _, c := range t.creds {
 		data, err := c.GetRequestMetadata(ctx, audience)
 		if err != nil {
-			return nil, streamErrorf(codes.Internal, "transport: %v", err)
+			if _, ok := status.FromError(err); ok {
+				return nil, err
+			}
+
+			return nil, streamErrorf(codes.Unauthenticated, "transport: %v", err)
 		}
 		for k, v := range data {
 			// Capital header names are illegal in HTTP/2.
@@ -402,22 +403,18 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 	}
 	if t.state == draining {
 		t.mu.Unlock()
-		return nil, ErrStreamDrain
+		return nil, errStreamDrain
 	}
 	if t.state != reachable {
 		t.mu.Unlock()
 		return nil, ErrConnClosing
 	}
 	t.mu.Unlock()
-	sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire())
-	if err != nil {
+	// Get a quota of 1 from streamsQuota.
+	if _, _, err := t.streamsQuota.get(1, waiters{ctx: ctx, tctx: t.ctx}); err != nil {
 		return nil, err
 	}
-	// Returns the quota balance back.
-	if sq > 1 {
-		t.streamsQuota.add(sq - 1)
-	}
-	// TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields
+	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
 	// first and create a slice of that exact size.
 	// Make the slice of certain predictable size to reduce allocations made by append.
 	hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
@@ -427,7 +424,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
-	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
 	headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
 	headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
 
@@ -452,7 +449,22 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 	if b := stats.OutgoingTrace(ctx); b != nil {
 		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
 	}
-	if md, ok := metadata.FromOutgoingContext(ctx); ok {
+
+	if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
+		var k string
+		for _, vv := range added {
+			for i, v := range vv {
+				if i%2 == 0 {
+					k = v
+					continue
+				}
+				// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
+				if isReservedHeader(k) {
+					continue
+				}
+				headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
+			}
+		}
 		for k, vv := range md {
 			// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
 			if isReservedHeader(k) {
@@ -477,7 +489,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 	if t.state == draining {
 		t.mu.Unlock()
 		t.streamsQuota.add(1)
-		return nil, ErrStreamDrain
+		return nil, errStreamDrain
 	}
 	if t.state != reachable {
 		t.mu.Unlock()
@@ -505,10 +517,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 	})
 	t.mu.Unlock()
 
-	s.mu.Lock()
-	s.bytesSent = true
-	s.mu.Unlock()
-
 	if t.statsHandler != nil {
 		outHeader := &stats.OutHeader{
 			Client:      true,
@@ -573,7 +581,7 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
 	}
 	s.state = streamDone
 	s.mu.Unlock()
-	if _, ok := err.(StreamError); ok {
+	if err != nil && !rstStream {
 		rstStream = true
 		rstError = http2.ErrCodeCancel
 	}
@@ -582,16 +590,16 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
 // Close kicks off the shutdown process of the transport. This should be called
 // only once on a transport. Once it is called, the transport should not be
 // accessed any more.
-func (t *http2Client) Close() (err error) {
+func (t *http2Client) Close() error {
 	t.mu.Lock()
 	if t.state == closing {
 		t.mu.Unlock()
-		return
+		return nil
 	}
 	t.state = closing
 	t.mu.Unlock()
 	t.cancel()
-	err = t.conn.Close()
+	err := t.conn.Close()
 	t.mu.Lock()
 	streams := t.activeStreams
 	t.activeStreams = nil
@@ -642,6 +650,8 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
 	select {
 	case <-s.ctx.Done():
 		return ContextErr(s.ctx.Err())
+	case <-s.done:
+		return io.EOF
 	case <-t.ctx.Done():
 		return ErrConnClosing
 	default:
@@ -659,44 +669,46 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
 	}
 	hdr = append(hdr, data[:emptyLen]...)
 	data = data[emptyLen:]
+	var (
+		streamQuota    int
+		streamQuotaVer uint32
+		err            error
+	)
 	for idx, r := range [][]byte{hdr, data} {
 		for len(r) > 0 {
 			size := http2MaxFrameLen
-			// Wait until the stream has some quota to send the data.
-			quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion()
-			sq, err := wait(s.ctx, t.ctx, s.done, s.goAway, quotaChan)
-			if err != nil {
-				return err
+			if size > len(r) {
+				size = len(r)
+			}
+			if streamQuota == 0 { // Used up all the locally cached stream quota.
+				// Get all the stream quota there is.
+				streamQuota, streamQuotaVer, err = s.sendQuotaPool.get(math.MaxInt32, s.waiters)
+				if err != nil {
+					return err
+				}
+			}
+			if size > streamQuota {
+				size = streamQuota
 			}
-			// Wait until the transport has some quota to send the data.
-			tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire())
+
+			// Get size worth quota from transport.
+			tq, _, err := t.sendQuotaPool.get(size, s.waiters)
 			if err != nil {
 				return err
 			}
-			if sq < size {
-				size = sq
-			}
 			if tq < size {
 				size = tq
 			}
-			if size > len(r) {
-				size = len(r)
-			}
-			p := r[:size]
-			ps := len(p)
-			if ps < tq {
-				// Overbooked transport quota. Return it back.
-				t.sendQuotaPool.add(tq - ps)
-			}
-			// Acquire local send quota to be able to write to the controlBuf.
-			ltq, err := wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire())
+			ltq, _, err := t.localSendQuota.get(size, s.waiters)
 			if err != nil {
-				if _, ok := err.(ConnectionError); !ok {
-					t.sendQuotaPool.add(ps)
-				}
+				// Add the acquired quota back to transport.
+				t.sendQuotaPool.add(tq)
 				return err
 			}
-			s.localSendQuota.add(ltq - ps) // It's ok if we make it negative.
+			// even if ltq is smaller than size we don't adjust size since
+			// ltq is only a soft limit.
+			streamQuota -= size
+			p := r[:size]
 			var endStream bool
 			// See if this is the last frame to be written.
 			if opts.Last {
@@ -711,21 +723,25 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
 				}
 			}
 			success := func() {
-				t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(ps) }})
-				if ps < sq {
-					s.sendQuotaPool.lockedAdd(sq - ps)
-				}
-				r = r[ps:]
+				ltq := ltq
+				t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { t.localSendQuota.add(ltq) }})
+				r = r[size:]
 			}
-			failure := func() {
-				s.sendQuotaPool.lockedAdd(sq)
+			failure := func() { // The stream quota version must have changed.
+				// Our streamQuota cache is invalidated now, so give it back.
+				s.sendQuotaPool.lockedAdd(streamQuota + size)
 			}
-			if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) {
-				t.sendQuotaPool.add(ps)
-				s.localSendQuota.add(ps)
+			if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
+				// Couldn't send this chunk out.
+				t.sendQuotaPool.add(size)
+				t.localSendQuota.add(ltq)
+				streamQuota = 0
 			}
 		}
 	}
+	if streamQuota > 0 { // Add the left over quota back to stream.
+		s.sendQuotaPool.add(streamQuota)
+	}
 	if !opts.Last {
 		return nil
 	}
@@ -791,7 +807,6 @@ func (t *http2Client) updateFlowControl(n uint32) {
 	t.mu.Unlock()
 	t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
 	t.controlBuf.put(&settings{
-		ack: false,
 		ss: []http2.Setting{
 			{
 				ID:  http2.SettingInitialWindowSize,
@@ -894,7 +909,13 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
 		close(s.headerChan)
 		s.headerDone = true
 	}
-	statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)]
+
+	code := http2.ErrCode(f.ErrCode)
+	if code == http2.ErrCodeRefusedStream {
+		// The stream was unprocessed by the server.
+		s.unprocessed = true
+	}
+	statusCode, ok := http2ErrConvTab[code]
 	if !ok {
 		warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
 		statusCode = codes.Unknown
@@ -904,17 +925,48 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
 	s.write(recvMsg{err: io.EOF})
 }
 
-func (t *http2Client) handleSettings(f *http2.SettingsFrame) {
+func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
 	if f.IsAck() {
 		return
 	}
-	var ss []http2.Setting
+	var rs []http2.Setting
+	var ps []http2.Setting
+	isMaxConcurrentStreamsMissing := true
 	f.ForeachSetting(func(s http2.Setting) error {
-		ss = append(ss, s)
+		if s.ID == http2.SettingMaxConcurrentStreams {
+			isMaxConcurrentStreamsMissing = false
+		}
+		if t.isRestrictive(s) {
+			rs = append(rs, s)
+		} else {
+			ps = append(ps, s)
+		}
 		return nil
 	})
-	// The settings will be applied once the ack is sent.
-	t.controlBuf.put(&settings{ack: true, ss: ss})
+	if isFirst && isMaxConcurrentStreamsMissing {
+		// This means server is imposing no limits on
+		// maximum number of concurrent streams initiated by client.
+		// So we must remove our self-imposed limit.
+		ps = append(ps, http2.Setting{
+			ID:  http2.SettingMaxConcurrentStreams,
+			Val: math.MaxUint32,
+		})
+	}
+	t.applySettings(rs)
+	t.controlBuf.put(&settingsAck{})
+	t.applySettings(ps)
+}
+
+func (t *http2Client) isRestrictive(s http2.Setting) bool {
+	switch s.ID {
+	case http2.SettingMaxConcurrentStreams:
+		return int(s.Val) < t.maxStreams
+	case http2.SettingInitialWindowSize:
+		// Note: we don't acquire a lock here to read streamSendQuota
+		// because the same goroutine updates it later.
+		return s.Val < t.streamSendQuota
+	}
+	return false
 }
 
 func (t *http2Client) handlePing(f *http2.PingFrame) {
@@ -945,12 +997,16 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
 		t.Close()
 		return
 	}
-	// A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387).
-	// The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay
-	// with the ID of the last stream the server will process.
-	// Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we
-	// close all streams created after the second GoAwayId. This way streams that were in-flight while the GoAway from server
-	// was being sent don't get killed.
+	// A client can receive multiple GoAways from the server (see
+	// https://github.com/grpc/grpc-go/issues/1387).  The idea is that the first
+	// GoAway will be sent with an ID of MaxInt32 and the second GoAway will be
+	// sent after an RTT delay with the ID of the last stream the server will
+	// process.
+	//
+	// Therefore, when we get the first GoAway we don't necessarily close any
+	// streams. While in case of second GoAway we close all streams created after
+	// the GoAwayId. This way streams that were in-flight while the GoAway from
+	// server was being sent don't get killed.
 	select {
 	case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways).
 		// If there are multiple GoAways the first one should always have an ID greater than the following ones.
@@ -972,6 +1028,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
 	}
 	for streamID, stream := range t.activeStreams {
 		if streamID > id && streamID <= upperLimit {
+			// The stream was unprocessed by the server.
+			stream.mu.Lock()
+			stream.unprocessed = true
+			stream.finish(statusGoAway)
+			stream.mu.Unlock()
 			close(stream.goAway)
 		}
 	}
@@ -988,11 +1049,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
 // It expects a lock on transport's mutext to be held by
 // the caller.
 func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
-	t.goAwayReason = NoReason
+	t.goAwayReason = GoAwayNoReason
 	switch f.ErrCode {
 	case http2.ErrCodeEnhanceYourCalm:
 		if string(f.DebugData()) == "too_many_pings" {
-			t.goAwayReason = TooManyPings
+			t.goAwayReason = GoAwayTooManyPings
 		}
 	}
 }
@@ -1058,22 +1119,22 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
 	}()
 
 	s.mu.Lock()
-	if !endStream {
-		s.recvCompress = state.encoding
-	}
 	if !s.headerDone {
-		if !endStream && len(state.mdata) > 0 {
-			s.header = state.mdata
+		if !endStream {
+			// Headers frame is not actually a trailers-only frame.
+			isHeader = true
+			s.recvCompress = state.encoding
+			if len(state.mdata) > 0 {
+				s.header = state.mdata
+			}
 		}
 		close(s.headerChan)
 		s.headerDone = true
-		isHeader = true
 	}
 	if !endStream || s.state == streamDone {
 		s.mu.Unlock()
 		return
 	}
-
 	if len(state.mdata) > 0 {
 		s.trailer = state.mdata
 	}
@@ -1111,7 +1172,8 @@ func (t *http2Client) reader() {
 		t.Close()
 		return
 	}
-	t.handleSettings(sf)
+	t.onSuccess()
+	t.handleSettings(sf, true)
 
 	// loop to keep reading incoming messages on this transport.
 	for {
@@ -1144,7 +1206,7 @@ func (t *http2Client) reader() {
 		case *http2.RSTStreamFrame:
 			t.handleRSTStream(frame)
 		case *http2.SettingsFrame:
-			t.handleSettings(frame)
+			t.handleSettings(frame, false)
 		case *http2.PingFrame:
 			t.handlePing(frame)
 		case *http2.GoAwayFrame:
@@ -1167,10 +1229,8 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
 			if s.Val > math.MaxInt32 {
 				s.Val = math.MaxInt32
 			}
-			t.mu.Lock()
 			ms := t.maxStreams
 			t.maxStreams = int(s.Val)
-			t.mu.Unlock()
 			t.streamsQuota.add(int(s.Val) - ms)
 		case http2.SettingInitialWindowSize:
 			t.mu.Lock()
@@ -1187,14 +1247,19 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
 // TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer)
 // is duplicated between the client and the server.
 // The transport layer needs to be refactored to take care of this.
-func (t *http2Client) itemHandler(i item) error {
-	var err error
+func (t *http2Client) itemHandler(i item) (err error) {
+	defer func() {
+		if err != nil {
+			errorf(" error in itemHandler: %v", err)
+		}
+	}()
 	switch i := i.(type) {
 	case *dataFrame:
-		err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d)
-		if err == nil {
-			i.f()
+		if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil {
+			return err
 		}
+		i.f()
+		return nil
 	case *headerFrame:
 		t.hBuf.Reset()
 		for _, f := range i.hf {
@@ -1228,34 +1293,33 @@ func (t *http2Client) itemHandler(i item) error {
 				return err
 			}
 		}
+		return nil
 	case *windowUpdate:
-		err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
+		return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
 	case *settings:
-		if i.ack {
-			t.applySettings(i.ss)
-			err = t.framer.fr.WriteSettingsAck()
-		} else {
-			err = t.framer.fr.WriteSettings(i.ss...)
-		}
+		return t.framer.fr.WriteSettings(i.ss...)
+	case *settingsAck:
+		return t.framer.fr.WriteSettingsAck()
 	case *resetStream:
 		// If the server needs to be to intimated about stream closing,
 		// then we need to make sure the RST_STREAM frame is written to
 		// the wire before the headers of the next stream waiting on
 		// streamQuota. We ensure this by adding to the streamsQuota pool
 		// only after having acquired the writableChan to send RST_STREAM.
-		err = t.framer.fr.WriteRSTStream(i.streamID, i.code)
+		err := t.framer.fr.WriteRSTStream(i.streamID, i.code)
 		t.streamsQuota.add(1)
+		return err
 	case *flushIO:
-		err = t.framer.writer.Flush()
+		return t.framer.writer.Flush()
 	case *ping:
 		if !i.ack {
 			t.bdpEst.timesnap(i.data)
 		}
-		err = t.framer.fr.WritePing(i.ack, i.data)
+		return t.framer.fr.WritePing(i.ack, i.data)
 	default:
-		errorf("transport: http2Client.controller got unexpected item type %v\n", i)
+		errorf("transport: http2Client.controller got unexpected item type %v", i)
+		return fmt.Errorf("transport: http2Client.controller got unexpected item type %v", i)
 	}
-	return err
 }
 
 // keepalive running in a separate goroutune makes sure the connection is alive by sending pings.

+ 111 - 79
vendor/google.golang.org/grpc/transport/http2_server.go

@@ -70,7 +70,10 @@ type http2Server struct {
 	fc         *inFlow
 	// sendQuotaPool provides flow control to outbound message.
 	sendQuotaPool *quotaPool
-	stats         stats.Handler
+	// localSendQuota limits the amount of data that can be scheduled
+	// for writing before it is actually written out.
+	localSendQuota *quotaPool
+	stats          stats.Handler
 	// Flag to keep track of reading activity on transport.
 	// 1 is true and 0 is false.
 	activity uint32 // Accessed atomically.
@@ -199,6 +202,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 		controlBuf:        newControlBuffer(),
 		fc:                &inFlow{limit: uint32(icwz)},
 		sendQuotaPool:     newQuotaPool(defaultWindowSize),
+		localSendQuota:    newQuotaPool(defaultLocalSendQuota),
 		state:             reachable,
 		activeStreams:     make(map[uint32]*Stream),
 		streamSendQuota:   defaultWindowSize,
@@ -224,6 +228,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 	}
 	t.framer.writer.Flush()
 
+	defer func() {
+		if err != nil {
+			t.Close()
+		}
+	}()
+
 	// Check the validity of client preface.
 	preface := make([]byte, len(clientPreface))
 	if _, err := io.ReadFull(t.conn, preface); err != nil {
@@ -235,8 +245,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 
 	frame, err := t.framer.fr.ReadFrame()
 	if err == io.EOF || err == io.ErrUnexpectedEOF {
-		t.Close()
-		return
+		return nil, err
 	}
 	if err != nil {
 		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
@@ -250,7 +259,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 
 	go func() {
 		loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
-		t.Close()
+		t.conn.Close()
 	}()
 	go t.keepalive()
 	return t, nil
@@ -272,12 +281,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
 
 	buf := newRecvBuffer()
 	s := &Stream{
-		id:           streamID,
-		st:           t,
-		buf:          buf,
-		fc:           &inFlow{limit: uint32(t.initialWindowSize)},
-		recvCompress: state.encoding,
-		method:       state.method,
+		id:             streamID,
+		st:             t,
+		buf:            buf,
+		fc:             &inFlow{limit: uint32(t.initialWindowSize)},
+		recvCompress:   state.encoding,
+		method:         state.method,
+		contentSubtype: state.contentSubtype,
 	}
 
 	if frame.StreamEnded() {
@@ -297,10 +307,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
 		pr.AuthInfo = t.authInfo
 	}
 	s.ctx = peer.NewContext(s.ctx, pr)
-	// Cache the current stream to the context so that the server application
-	// can find out. Required when the server wants to send some metadata
-	// back to the client (unary call only).
-	s.ctx = newContextWithStream(s.ctx, s)
 	// Attach the received metadata to the context.
 	if len(state.mdata) > 0 {
 		s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
@@ -341,7 +347,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
 	}
 	t.maxStreamID = streamID
 	s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
-	s.localSendQuota = newQuotaPool(defaultLocalSendQuota)
 	t.activeStreams[streamID] = s
 	if len(t.activeStreams) == 1 {
 		t.idle = time.Time{}
@@ -371,6 +376,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
 			t.updateWindow(s, uint32(n))
 		},
 	}
+	s.waiters = waiters{
+		ctx:  s.ctx,
+		tctx: t.ctx,
+	}
 	handle(s)
 	return
 }
@@ -486,7 +495,6 @@ func (t *http2Server) updateFlowControl(n uint32) {
 	t.mu.Unlock()
 	t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
 	t.controlBuf.put(&settings{
-		ack: false,
 		ss: []http2.Setting{
 			{
 				ID:  http2.SettingInitialWindowSize,
@@ -584,12 +592,29 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
 	if f.IsAck() {
 		return
 	}
-	var ss []http2.Setting
+	var rs []http2.Setting
+	var ps []http2.Setting
 	f.ForeachSetting(func(s http2.Setting) error {
-		ss = append(ss, s)
+		if t.isRestrictive(s) {
+			rs = append(rs, s)
+		} else {
+			ps = append(ps, s)
+		}
 		return nil
 	})
-	t.controlBuf.put(&settings{ack: true, ss: ss})
+	t.applySettings(rs)
+	t.controlBuf.put(&settingsAck{})
+	t.applySettings(ps)
+}
+
+func (t *http2Server) isRestrictive(s http2.Setting) bool {
+	switch s.ID {
+	case http2.SettingInitialWindowSize:
+		// Note: we don't acquire a lock here to read streamSendQuota
+		// because the same goroutine updates it later.
+		return s.Val < t.streamSendQuota
+	}
+	return false
 }
 
 func (t *http2Server) applySettings(ss []http2.Setting) {
@@ -656,7 +681,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
 
 	if t.pingStrikes > maxPingStrikes {
 		// Send goaway and close the connection.
-		errorf("transport: Got to too many pings from the client, closing the connection.")
+		errorf("transport: Got too many pings from the client, closing the connection.")
 		t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
 	}
 }
@@ -698,11 +723,11 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
 	}
 	md = s.header
 	s.mu.Unlock()
-	// TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields
+	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
 	// first and create a slice of that exact size.
 	headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
-	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
 	if s.sendCompress != "" {
 		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
 	}
@@ -721,9 +746,9 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
 		endStream: false,
 	})
 	if t.stats != nil {
-		outHeader := &stats.OutHeader{
-		//WireLength: // TODO(mmukhi): Revisit this later, if needed.
-		}
+		// Note: WireLength is not set in outHeader.
+		// TODO(mmukhi): Revisit this later, if needed.
+		outHeader := &stats.OutHeader{}
 		t.stats.HandleRPC(s.Context(), outHeader)
 	}
 	return nil
@@ -759,12 +784,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
 		headersSent = true
 	}
 
-	// TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields
+	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
 	// first and create a slice of that exact size.
 	headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
 	if !headersSent {
 		headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
-		headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+		headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
 	}
 	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
 	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
@@ -803,7 +828,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
 
 // Write converts the data into HTTP2 data frame and sends it out. Non-nil error
 // is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (err error) {
+func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
 	select {
 	case <-s.ctx.Done():
 		return ContextErr(s.ctx.Err())
@@ -814,10 +839,6 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (
 
 	var writeHeaderFrame bool
 	s.mu.Lock()
-	if s.state == streamDone {
-		s.mu.Unlock()
-		return streamErrorf(codes.Unknown, "the stream has been done")
-	}
 	if !s.headerOk {
 		writeHeaderFrame = true
 	}
@@ -832,66 +853,68 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (
 	}
 	hdr = append(hdr, data[:emptyLen]...)
 	data = data[emptyLen:]
+	var (
+		streamQuota    int
+		streamQuotaVer uint32
+		err            error
+	)
 	for _, r := range [][]byte{hdr, data} {
 		for len(r) > 0 {
 			size := http2MaxFrameLen
-			// Wait until the stream has some quota to send the data.
-			quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion()
-			sq, err := wait(s.ctx, t.ctx, nil, nil, quotaChan)
-			if err != nil {
-				return err
+			if size > len(r) {
+				size = len(r)
 			}
-			// Wait until the transport has some quota to send the data.
-			tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire())
+			if streamQuota == 0 { // Used up all the locally cached stream quota.
+				// Get all the stream quota there is.
+				streamQuota, streamQuotaVer, err = s.sendQuotaPool.get(math.MaxInt32, s.waiters)
+				if err != nil {
+					return err
+				}
+			}
+			if size > streamQuota {
+				size = streamQuota
+			}
+			// Get size worth quota from transport.
+			tq, _, err := t.sendQuotaPool.get(size, s.waiters)
 			if err != nil {
 				return err
 			}
-			if sq < size {
-				size = sq
-			}
 			if tq < size {
 				size = tq
 			}
-			if size > len(r) {
-				size = len(r)
-			}
-			p := r[:size]
-			ps := len(p)
-			if ps < tq {
-				// Overbooked transport quota. Return it back.
-				t.sendQuotaPool.add(tq - ps)
-			}
-			// Acquire local send quota to be able to write to the controlBuf.
-			ltq, err := wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire())
+			ltq, _, err := t.localSendQuota.get(size, s.waiters)
 			if err != nil {
-				if _, ok := err.(ConnectionError); !ok {
-					t.sendQuotaPool.add(ps)
-				}
+				// Add the acquired quota back to transport.
+				t.sendQuotaPool.add(tq)
 				return err
 			}
-			s.localSendQuota.add(ltq - ps) // It's ok we make this negative.
-			// Reset ping strikes when sending data since this might cause
-			// the peer to send ping.
-			atomic.StoreUint32(&t.resetPingStrikes, 1)
+			// even if ltq is smaller than size we don't adjust size since,
+			// ltq is only a soft limit.
+			streamQuota -= size
+			p := r[:size]
 			success := func() {
+				ltq := ltq
 				t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() {
-					s.localSendQuota.add(ps)
+					t.localSendQuota.add(ltq)
 				}})
-				if ps < sq {
-					// Overbooked stream quota. Return it back.
-					s.sendQuotaPool.lockedAdd(sq - ps)
-				}
-				r = r[ps:]
+				r = r[size:]
 			}
-			failure := func() {
-				s.sendQuotaPool.lockedAdd(sq)
+			failure := func() { // The stream quota version must have changed.
+				// Our streamQuota cache is invalidated now, so give it back.
+				s.sendQuotaPool.lockedAdd(streamQuota + size)
 			}
-			if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) {
-				t.sendQuotaPool.add(ps)
-				s.localSendQuota.add(ps)
+			if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
+				// Couldn't send this chunk out.
+				t.sendQuotaPool.add(size)
+				t.localSendQuota.add(ltq)
+				streamQuota = 0
 			}
 		}
 	}
+	if streamQuota > 0 {
+		// ADd the left over quota back to stream.
+		s.sendQuotaPool.add(streamQuota)
+	}
 	return nil
 }
 
@@ -983,6 +1006,9 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
 func (t *http2Server) itemHandler(i item) error {
 	switch i := i.(type) {
 	case *dataFrame:
+		// Reset ping strikes when sending data since this might cause
+		// the peer to send ping.
+		atomic.StoreUint32(&t.resetPingStrikes, 1)
 		if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil {
 			return err
 		}
@@ -1027,11 +1053,9 @@ func (t *http2Server) itemHandler(i item) error {
 	case *windowUpdate:
 		return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
 	case *settings:
-		if i.ack {
-			t.applySettings(i.ss)
-			return t.framer.fr.WriteSettingsAck()
-		}
 		return t.framer.fr.WriteSettings(i.ss...)
+	case *settingsAck:
+		return t.framer.fr.WriteSettingsAck()
 	case *resetStream:
 		return t.framer.fr.WriteRSTStream(i.streamID, i.code)
 	case *goAway:
@@ -1045,6 +1069,9 @@ func (t *http2Server) itemHandler(i item) error {
 		if !i.headsUp {
 			// Stop accepting more streams now.
 			t.state = draining
+			if len(t.activeStreams) == 0 {
+				i.closeConn = true
+			}
 			t.mu.Unlock()
 			if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil {
 				return err
@@ -1052,8 +1079,7 @@ func (t *http2Server) itemHandler(i item) error {
 			if i.closeConn {
 				// Abruptly close the connection following the GoAway (via
 				// loopywriter).  But flush out what's inside the buffer first.
-				t.framer.writer.Flush()
-				return fmt.Errorf("transport: Connection closing")
+				t.controlBuf.put(&flushIO{closeTr: true})
 			}
 			return nil
 		}
@@ -1083,7 +1109,13 @@ func (t *http2Server) itemHandler(i item) error {
 		}()
 		return nil
 	case *flushIO:
-		return t.framer.writer.Flush()
+		if err := t.framer.writer.Flush(); err != nil {
+			return err
+		}
+		if i.closeTr {
+			return ErrConnClosing
+		}
+		return nil
 	case *ping:
 		if !i.ack {
 			t.bdpEst.timesnap(i.data)
@@ -1131,7 +1163,7 @@ func (t *http2Server) closeStream(s *Stream) {
 		t.idle = time.Now()
 	}
 	if t.state == draining && len(t.activeStreams) == 0 {
-		defer t.Close()
+		defer t.controlBuf.put(&flushIO{closeTr: true})
 	}
 	t.mu.Unlock()
 	// In case stream sending and receiving are invoked in separate

+ 56 - 15
vendor/google.golang.org/grpc/transport/http_util.go

@@ -46,6 +46,12 @@ const (
 	// http2IOBufSize specifies the buffer size for sending frames.
 	defaultWriteBufSize = 32 * 1024
 	defaultReadBufSize  = 32 * 1024
+	// baseContentType is the base content-type for gRPC.  This is a valid
+	// content-type on it's own, but can also include a content-subtype such as
+	// "proto" as a suffix after "+" or ";".  See
+	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+	// for more details.
+	baseContentType = "application/grpc"
 )
 
 var (
@@ -64,7 +70,7 @@ var (
 		http2.ErrCodeConnect:            codes.Internal,
 		http2.ErrCodeEnhanceYourCalm:    codes.ResourceExhausted,
 		http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
-		http2.ErrCodeHTTP11Required:     codes.FailedPrecondition,
+		http2.ErrCodeHTTP11Required:     codes.Internal,
 	}
 	statusCodeConvTab = map[codes.Code]http2.ErrCode{
 		codes.Internal:          http2.ErrCodeInternal,
@@ -111,9 +117,10 @@ type decodeState struct {
 	timeout    time.Duration
 	method     string
 	// key-value metadata map from the peer.
-	mdata      map[string][]string
-	statsTags  []byte
-	statsTrace []byte
+	mdata          map[string][]string
+	statsTags      []byte
+	statsTrace     []byte
+	contentSubtype string
 }
 
 // isReservedHeader checks whether hdr belongs to HTTP2 headers
@@ -149,17 +156,44 @@ func isWhitelistedPseudoHeader(hdr string) bool {
 	}
 }
 
-func validContentType(t string) bool {
-	e := "application/grpc"
-	if !strings.HasPrefix(t, e) {
-		return false
+// contentSubtype returns the content-subtype for the given content-type.  The
+// given content-type must be a valid content-type that starts with
+// "application/grpc". A content-subtype will follow "application/grpc" after a
+// "+" or ";". See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If contentType is not a valid content-type for gRPC, the boolean
+// will be false, otherwise true. If content-type == "application/grpc",
+// "application/grpc+", or "application/grpc;", the boolean will be true,
+// but no content-subtype will be returned.
+//
+// contentType is assumed to be lowercase already.
+func contentSubtype(contentType string) (string, bool) {
+	if contentType == baseContentType {
+		return "", true
+	}
+	if !strings.HasPrefix(contentType, baseContentType) {
+		return "", false
+	}
+	// guaranteed since != baseContentType and has baseContentType prefix
+	switch contentType[len(baseContentType)] {
+	case '+', ';':
+		// this will return true for "application/grpc+" or "application/grpc;"
+		// which the previous validContentType function tested to be valid, so we
+		// just say that no content-subtype is specified in this case
+		return contentType[len(baseContentType)+1:], true
+	default:
+		return "", false
 	}
-	// Support variations on the content-type
-	// (e.g. "application/grpc+blah", "application/grpc;blah").
-	if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' {
-		return false
+}
+
+// contentSubtype is assumed to be lowercase
+func contentType(contentSubtype string) string {
+	if contentSubtype == "" {
+		return baseContentType
 	}
-	return true
+	return baseContentType + "+" + contentSubtype
 }
 
 func (d *decodeState) status() *status.Status {
@@ -247,9 +281,16 @@ func (d *decodeState) addMetadata(k, v string) {
 func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
 	switch f.Name {
 	case "content-type":
-		if !validContentType(f.Value) {
-			return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)
+		contentSubtype, validContentType := contentSubtype(f.Value)
+		if !validContentType {
+			return streamErrorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
 		}
+		d.contentSubtype = contentSubtype
+		// TODO: do we want to propagate the whole content-type in the metadata,
+		// or come up with a way to just propagate the content-subtype if it was set?
+		// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
+		// in the metadata?
+		d.addMetadata(f.Name, f.Value)
 	case "grpc-encoding":
 		d.encoding = f.Value
 	case "grpc-status":

+ 116 - 122
vendor/google.golang.org/grpc/transport/transport.go

@@ -17,16 +17,15 @@
  */
 
 // Package transport defines and implements message oriented communication
-// channel to complete various transactions (e.g., an RPC).
+// channel to complete various transactions (e.g., an RPC).  It is meant for
+// grpc-internal usage and is not intended to be imported directly by users.
 package transport // import "google.golang.org/grpc/transport"
 
 import (
-	stdctx "context"
 	"fmt"
 	"io"
 	"net"
 	"sync"
-	"time"
 
 	"golang.org/x/net/context"
 	"golang.org/x/net/http2"
@@ -134,7 +133,7 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
 	case <-r.ctx.Done():
 		return 0, ContextErr(r.ctx.Err())
 	case <-r.goAway:
-		return 0, ErrStreamDrain
+		return 0, errStreamDrain
 	case m := <-r.recv.get():
 		r.recv.load()
 		if m.err != nil {
@@ -211,66 +210,71 @@ const (
 
 // Stream represents an RPC in the transport layer.
 type Stream struct {
-	id uint32
-	// nil for client side Stream.
-	st ServerTransport
-	// ctx is the associated context of the stream.
-	ctx context.Context
-	// cancel is always nil for client side Stream.
-	cancel context.CancelFunc
-	// done is closed when the final status arrives.
-	done chan struct{}
-	// goAway is closed when the server sent GoAways signal before this stream was initiated.
-	goAway chan struct{}
-	// method records the associated RPC method of the stream.
-	method       string
+	id           uint32
+	st           ServerTransport    // nil for client side Stream
+	ctx          context.Context    // the associated context of the stream
+	cancel       context.CancelFunc // always nil for client side Stream
+	done         chan struct{}      // closed when the final status arrives
+	goAway       chan struct{}      // closed when a GOAWAY control message is received
+	method       string             // the associated RPC method of the stream
 	recvCompress string
 	sendCompress string
 	buf          *recvBuffer
 	trReader     io.Reader
 	fc           *inFlow
 	recvQuota    uint32
-
-	// TODO: Remote this unused variable.
-	// The accumulated inbound quota pending for window update.
-	updateQuota uint32
+	waiters      waiters
 
 	// Callback to state application's intentions to read data. This
-	// is used to adjust flow control, if need be.
+	// is used to adjust flow control, if needed.
 	requestRead func(int)
 
-	sendQuotaPool  *quotaPool
-	localSendQuota *quotaPool
-	// Close headerChan to indicate the end of reception of header metadata.
-	headerChan chan struct{}
-	// header caches the received header metadata.
-	header metadata.MD
-	// The key-value map of trailer metadata.
-	trailer metadata.MD
-
-	mu sync.RWMutex // guard the following
-	// headerOK becomes true from the first header is about to send.
-	headerOk bool
+	sendQuotaPool *quotaPool
+	headerChan    chan struct{} // closed to indicate the end of header metadata.
+	headerDone    bool          // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+	header        metadata.MD   // the received header metadata.
+	trailer       metadata.MD   // the key-value map of trailer metadata.
+
+	mu       sync.RWMutex // guard the following
+	headerOk bool         // becomes true from the first header is about to send
 	state    streamState
-	// true iff headerChan is closed. Used to avoid closing headerChan
-	// multiple times.
-	headerDone bool
-	// the status error received from the server.
-	status *status.Status
-	// rstStream indicates whether a RST_STREAM frame needs to be sent
-	// to the server to signify that this stream is closing.
-	rstStream bool
-	// rstError is the error that needs to be sent along with the RST_STREAM frame.
-	rstError http2.ErrCode
-	// bytesSent and bytesReceived indicates whether any bytes have been sent or
-	// received on this stream.
-	bytesSent     bool
-	bytesReceived bool
+
+	status *status.Status // the status error received from the server
+
+	rstStream bool          // indicates whether a RST_STREAM frame needs to be sent
+	rstError  http2.ErrCode // the error that needs to be sent along with the RST_STREAM frame
+
+	bytesReceived bool // indicates whether any bytes have been received on this stream
+	unprocessed   bool // set if the server sends a refused stream or GOAWAY including this stream
+
+	// contentSubtype is the content-subtype for requests.
+	// this must be lowercase or the behavior is undefined.
+	contentSubtype string
+}
+
+func (s *Stream) waitOnHeader() error {
+	if s.headerChan == nil {
+		// On the server headerChan is always nil since a stream originates
+		// only after having received headers.
+		return nil
+	}
+	wc := s.waiters
+	select {
+	case <-wc.ctx.Done():
+		return ContextErr(wc.ctx.Err())
+	case <-wc.goAway:
+		return errStreamDrain
+	case <-s.headerChan:
+		return nil
+	}
 }
 
 // RecvCompress returns the compression algorithm applied to the inbound
 // message. It is empty string if there is no compression applied.
 func (s *Stream) RecvCompress() string {
+	if err := s.waitOnHeader(); err != nil {
+		return ""
+	}
 	return s.recvCompress
 }
 
@@ -295,15 +299,7 @@ func (s *Stream) GoAway() <-chan struct{} {
 // is available. It blocks until i) the metadata is ready or ii) there is no
 // header metadata or iii) the stream is canceled/expired.
 func (s *Stream) Header() (metadata.MD, error) {
-	var err error
-	select {
-	case <-s.ctx.Done():
-		err = ContextErr(s.ctx.Err())
-	case <-s.goAway:
-		err = ErrStreamDrain
-	case <-s.headerChan:
-		return s.header.Copy(), nil
-	}
+	err := s.waitOnHeader()
 	// Even if the stream is closed, header is returned if available.
 	select {
 	case <-s.headerChan:
@@ -329,6 +325,15 @@ func (s *Stream) ServerTransport() ServerTransport {
 	return s.st
 }
 
+// ContentSubtype returns the content-subtype for a request. For example, a
+// content-subtype of "proto" will result in a content-type of
+// "application/grpc+proto". This will always be lowercase.  See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+func (s *Stream) ContentSubtype() string {
+	return s.contentSubtype
+}
+
 // Context returns the context of the stream.
 func (s *Stream) Context() context.Context {
 	return s.ctx
@@ -361,6 +366,14 @@ func (s *Stream) SetHeader(md metadata.MD) error {
 	return nil
 }
 
+// SendHeader sends the given header metadata. The given metadata is
+// combined with any metadata set by previous calls to SetHeader and
+// then written to the transport stream.
+func (s *Stream) SendHeader(md metadata.MD) error {
+	t := s.ServerTransport()
+	return t.WriteHeader(s, md)
+}
+
 // SetTrailer sets the trailer metadata which will be sent with the RPC status
 // by the server. This can be called multiple times. Server side only.
 func (s *Stream) SetTrailer(md metadata.MD) error {
@@ -417,18 +430,19 @@ func (s *Stream) finish(st *status.Status) {
 	close(s.done)
 }
 
-// BytesSent indicates whether any bytes have been sent on this stream.
-func (s *Stream) BytesSent() bool {
+// BytesReceived indicates whether any bytes have been received on this stream.
+func (s *Stream) BytesReceived() bool {
 	s.mu.Lock()
-	bs := s.bytesSent
+	br := s.bytesReceived
 	s.mu.Unlock()
-	return bs
+	return br
 }
 
-// BytesReceived indicates whether any bytes have been received on this stream.
-func (s *Stream) BytesReceived() bool {
+// Unprocessed indicates whether the server did not process this stream --
+// i.e. it sent a refused stream or GOAWAY including this stream ID.
+func (s *Stream) Unprocessed() bool {
 	s.mu.Lock()
-	br := s.bytesReceived
+	br := s.unprocessed
 	s.mu.Unlock()
 	return br
 }
@@ -439,21 +453,6 @@ func (s *Stream) GoString() string {
 	return fmt.Sprintf("<stream: %p, %v>", s, s.method)
 }
 
-// The key to save transport.Stream in the context.
-type streamKey struct{}
-
-// newContextWithStream creates a new context from ctx and attaches stream
-// to it.
-func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
-	return context.WithValue(ctx, streamKey{}, stream)
-}
-
-// StreamFromContext returns the stream saved in ctx.
-func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
-	s, ok = ctx.Value(streamKey{}).(*Stream)
-	return
-}
-
 // state of transport
 type transportState int
 
@@ -514,14 +513,15 @@ type ConnectOptions struct {
 
 // TargetInfo contains the information of the target such as network address and metadata.
 type TargetInfo struct {
-	Addr     string
-	Metadata interface{}
+	Addr      string
+	Metadata  interface{}
+	Authority string
 }
 
 // NewClientTransport establishes the transport with the required ConnectOptions
 // and returns it to the caller.
-func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) {
-	return newHTTP2Client(ctx, target, opts, timeout)
+func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) {
+	return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess)
 }
 
 // Options provides additional hints and information for message
@@ -545,10 +545,6 @@ type CallHdr struct {
 	// Method specifies the operation to perform.
 	Method string
 
-	// RecvCompress specifies the compression algorithm applied on
-	// inbound messages.
-	RecvCompress string
-
 	// SendCompress specifies the compression algorithm applied on
 	// outbound message.
 	SendCompress string
@@ -563,6 +559,14 @@ type CallHdr struct {
 	// for performance purposes.
 	// If it's false, new stream will never be flushed.
 	Flush bool
+
+	// ContentSubtype specifies the content-subtype for a request. For example, a
+	// content-subtype of "proto" will result in a content-type of
+	// "application/grpc+proto". The value of ContentSubtype must be all
+	// lowercase, otherwise the behavior is undefined. See
+	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+	// for more details.
+	ContentSubtype string
 }
 
 // ClientTransport is the common interface for all gRPC client-side transport
@@ -686,9 +690,13 @@ func (e ConnectionError) Origin() error {
 var (
 	// ErrConnClosing indicates that the transport is closing.
 	ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
-	// ErrStreamDrain indicates that the stream is rejected by the server because
-	// the server stops accepting new RPCs.
-	ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
+	// errStreamDrain indicates that the stream is rejected because the
+	// connection is draining. This could be caused by goaway or balancer
+	// removing the address.
+	errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining")
+	// StatusGoAway indicates that the server sent a GOAWAY that included this
+	// stream's ID in unprocessed RPCs.
+	statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
 )
 
 // TODO: See if we can replace StreamError with status package errors.
@@ -703,44 +711,27 @@ func (e StreamError) Error() string {
 	return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
 }
 
-// wait blocks until it can receive from one of the provided contexts or channels
-func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) {
-	select {
-	case <-ctx.Done():
-		return 0, ContextErr(ctx.Err())
-	case <-done:
-		return 0, io.EOF
-	case <-goAway:
-		return 0, ErrStreamDrain
-	case <-tctx.Done():
-		return 0, ErrConnClosing
-	case i := <-proceed:
-		return i, nil
-	}
-}
-
-// ContextErr converts the error from context package into a StreamError.
-func ContextErr(err error) StreamError {
-	switch err {
-	case context.DeadlineExceeded, stdctx.DeadlineExceeded:
-		return streamErrorf(codes.DeadlineExceeded, "%v", err)
-	case context.Canceled, stdctx.Canceled:
-		return streamErrorf(codes.Canceled, "%v", err)
-	}
-	return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
+// waiters are passed to quotaPool get methods to
+// wait on in addition to waiting on quota.
+type waiters struct {
+	ctx    context.Context
+	tctx   context.Context
+	done   chan struct{}
+	goAway chan struct{}
 }
 
 // GoAwayReason contains the reason for the GoAway frame received.
 type GoAwayReason uint8
 
 const (
-	// Invalid indicates that no GoAway frame is received.
-	Invalid GoAwayReason = 0
-	// NoReason is the default value when GoAway frame is received.
-	NoReason GoAwayReason = 1
-	// TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm
-	// was received and that the debug data said "too_many_pings".
-	TooManyPings GoAwayReason = 2
+	// GoAwayInvalid indicates that no GoAway frame is received.
+	GoAwayInvalid GoAwayReason = 0
+	// GoAwayNoReason is the default value when GoAway frame is received.
+	GoAwayNoReason GoAwayReason = 1
+	// GoAwayTooManyPings indicates that a GoAway frame with
+	// ErrCodeEnhanceYourCalm was received and that the debug data said
+	// "too_many_pings".
+	GoAwayTooManyPings GoAwayReason = 2
 )
 
 // loopyWriter is run in a separate go routine. It is the single code path that will
@@ -751,6 +742,7 @@ func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) er
 		case i := <-cbuf.get():
 			cbuf.load()
 			if err := handler(i); err != nil {
+				errorf("transport: Error while handling item. Err: %v", err)
 				return
 			}
 		case <-ctx.Done():
@@ -762,12 +754,14 @@ func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) er
 			case i := <-cbuf.get():
 				cbuf.load()
 				if err := handler(i); err != nil {
+					errorf("transport: Error while handling item. Err: %v", err)
 					return
 				}
 			case <-ctx.Done():
 				return
 			default:
 				if err := handler(&flushIO{}); err != nil {
+					errorf("transport: Error while flushing. Err: %v", err)
 					return
 				}
 				break hasData

Some files were not shown because too many files changed in this diff