Browse Source

vendor: upgrade grpc-go to 1.23.0

https://github.com/grpc/grpc-go/releases/tag/v1.23.0

Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
Gyuho Lee 6 years ago
parent
commit
02b2779814

+ 1 - 1
go.mod

@@ -42,7 +42,7 @@ require (
 	golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
 	golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
 	golang.org/x/net v0.0.0-20190311183353-d8887717615a
 	golang.org/x/net v0.0.0-20190311183353-d8887717615a
 	golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
 	golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
-	google.golang.org/grpc v1.22.1-0.20190805101010-a2bdfb40ff25
+	google.golang.org/grpc v1.23.0
 	gopkg.in/cheggaaa/pb.v1 v1.0.25
 	gopkg.in/cheggaaa/pb.v1 v1.0.25
 	gopkg.in/yaml.v2 v2.2.2
 	gopkg.in/yaml.v2 v2.2.2
 	sigs.k8s.io/yaml v1.1.0
 	sigs.k8s.io/yaml v1.1.0

+ 2 - 2
go.sum

@@ -177,8 +177,8 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.22.1-0.20190805101010-a2bdfb40ff25 h1:lS/LGci7282xXbzMwFpHD7RKjsfKUK3KYwk34RYtlK0=
-google.golang.org/grpc v1.22.1-0.20190805101010-a2bdfb40ff25/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

+ 81 - 3
vendor/google.golang.org/grpc/internal/transport/controlbuf.go

@@ -23,6 +23,7 @@ import (
 	"fmt"
 	"fmt"
 	"runtime"
 	"runtime"
 	"sync"
 	"sync"
+	"sync/atomic"
 
 
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
 	"golang.org/x/net/http2/hpack"
@@ -84,12 +85,24 @@ func (il *itemList) isEmpty() bool {
 // the control buffer of transport. They represent different aspects of
 // the control buffer of transport. They represent different aspects of
 // control tasks, e.g., flow control, settings, streaming resetting, etc.
 // control tasks, e.g., flow control, settings, streaming resetting, etc.
 
 
+// maxQueuedTransportResponseFrames is the most queued "transport response"
+// frames we will buffer before preventing new reads from occurring on the
+// transport.  These are control frames sent in response to client requests,
+// such as RST_STREAM due to bad headers or settings acks.
+const maxQueuedTransportResponseFrames = 50
+
+type cbItem interface {
+	isTransportResponseFrame() bool
+}
+
 // registerStream is used to register an incoming stream with loopy writer.
 // registerStream is used to register an incoming stream with loopy writer.
 type registerStream struct {
 type registerStream struct {
 	streamID uint32
 	streamID uint32
 	wq       *writeQuota
 	wq       *writeQuota
 }
 }
 
 
+func (*registerStream) isTransportResponseFrame() bool { return false }
+
 // headerFrame is also used to register stream on the client-side.
 // headerFrame is also used to register stream on the client-side.
 type headerFrame struct {
 type headerFrame struct {
 	streamID   uint32
 	streamID   uint32
@@ -102,6 +115,10 @@ type headerFrame struct {
 	onOrphaned func(error)    // Valid on client-side
 	onOrphaned func(error)    // Valid on client-side
 }
 }
 
 
+func (h *headerFrame) isTransportResponseFrame() bool {
+	return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
+}
+
 type cleanupStream struct {
 type cleanupStream struct {
 	streamID uint32
 	streamID uint32
 	rst      bool
 	rst      bool
@@ -109,6 +126,8 @@ type cleanupStream struct {
 	onWrite  func()
 	onWrite  func()
 }
 }
 
 
+func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
+
 type dataFrame struct {
 type dataFrame struct {
 	streamID  uint32
 	streamID  uint32
 	endStream bool
 	endStream bool
@@ -119,27 +138,41 @@ type dataFrame struct {
 	onEachWrite func()
 	onEachWrite func()
 }
 }
 
 
+func (*dataFrame) isTransportResponseFrame() bool { return false }
+
 type incomingWindowUpdate struct {
 type incomingWindowUpdate struct {
 	streamID  uint32
 	streamID  uint32
 	increment uint32
 	increment uint32
 }
 }
 
 
+func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
+
 type outgoingWindowUpdate struct {
 type outgoingWindowUpdate struct {
 	streamID  uint32
 	streamID  uint32
 	increment uint32
 	increment uint32
 }
 }
 
 
+func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
+	return false // window updates are throttled by thresholds
+}
+
 type incomingSettings struct {
 type incomingSettings struct {
 	ss []http2.Setting
 	ss []http2.Setting
 }
 }
 
 
+func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
+
 type outgoingSettings struct {
 type outgoingSettings struct {
 	ss []http2.Setting
 	ss []http2.Setting
 }
 }
 
 
+func (*outgoingSettings) isTransportResponseFrame() bool { return false }
+
 type incomingGoAway struct {
 type incomingGoAway struct {
 }
 }
 
 
+func (*incomingGoAway) isTransportResponseFrame() bool { return false }
+
 type goAway struct {
 type goAway struct {
 	code      http2.ErrCode
 	code      http2.ErrCode
 	debugData []byte
 	debugData []byte
@@ -147,15 +180,21 @@ type goAway struct {
 	closeConn bool
 	closeConn bool
 }
 }
 
 
+func (*goAway) isTransportResponseFrame() bool { return false }
+
 type ping struct {
 type ping struct {
 	ack  bool
 	ack  bool
 	data [8]byte
 	data [8]byte
 }
 }
 
 
+func (*ping) isTransportResponseFrame() bool { return true }
+
 type outFlowControlSizeRequest struct {
 type outFlowControlSizeRequest struct {
 	resp chan uint32
 	resp chan uint32
 }
 }
 
 
+func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
+
 type outStreamState int
 type outStreamState int
 
 
 const (
 const (
@@ -238,6 +277,14 @@ type controlBuffer struct {
 	consumerWaiting bool
 	consumerWaiting bool
 	list            *itemList
 	list            *itemList
 	err             error
 	err             error
+
+	// transportResponseFrames counts the number of queued items that represent
+	// the response of an action initiated by the peer.  trfChan is created
+	// when transportResponseFrames >= maxQueuedTransportResponseFrames and is
+	// closed and nilled when transportResponseFrames drops below the
+	// threshold.  Both fields are protected by mu.
+	transportResponseFrames int
+	trfChan                 atomic.Value // *chan struct{}
 }
 }
 
 
 func newControlBuffer(done <-chan struct{}) *controlBuffer {
 func newControlBuffer(done <-chan struct{}) *controlBuffer {
@@ -248,12 +295,24 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer {
 	}
 	}
 }
 }
 
 
-func (c *controlBuffer) put(it interface{}) error {
+// throttle blocks if there are too many incomingSettings/cleanupStreams in the
+// controlbuf.
+func (c *controlBuffer) throttle() {
+	ch, _ := c.trfChan.Load().(*chan struct{})
+	if ch != nil {
+		select {
+		case <-*ch:
+		case <-c.done:
+		}
+	}
+}
+
+func (c *controlBuffer) put(it cbItem) error {
 	_, err := c.executeAndPut(nil, it)
 	_, err := c.executeAndPut(nil, it)
 	return err
 	return err
 }
 }
 
 
-func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
+func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
 	var wakeUp bool
 	var wakeUp bool
 	c.mu.Lock()
 	c.mu.Lock()
 	if c.err != nil {
 	if c.err != nil {
@@ -271,6 +330,15 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{
 		c.consumerWaiting = false
 		c.consumerWaiting = false
 	}
 	}
 	c.list.enqueue(it)
 	c.list.enqueue(it)
+	if it.isTransportResponseFrame() {
+		c.transportResponseFrames++
+		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+			// We are adding the frame that puts us over the threshold; create
+			// a throttling channel.
+			ch := make(chan struct{})
+			c.trfChan.Store(&ch)
+		}
+	}
 	c.mu.Unlock()
 	c.mu.Unlock()
 	if wakeUp {
 	if wakeUp {
 		select {
 		select {
@@ -304,7 +372,17 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
 			return nil, c.err
 			return nil, c.err
 		}
 		}
 		if !c.list.isEmpty() {
 		if !c.list.isEmpty() {
-			h := c.list.dequeue()
+			h := c.list.dequeue().(cbItem)
+			if h.isTransportResponseFrame() {
+				if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+					// We are removing the frame that put us over the
+					// threshold; close and clear the throttling channel.
+					ch := c.trfChan.Load().(*chan struct{})
+					close(*ch)
+					c.trfChan.Store((*chan struct{})(nil))
+				}
+				c.transportResponseFrames--
+			}
 			c.mu.Unlock()
 			c.mu.Unlock()
 			return h, nil
 			return h, nil
 		}
 		}

+ 1 - 2
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go

@@ -149,6 +149,7 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
 		n = uint32(math.MaxInt32)
 		n = uint32(math.MaxInt32)
 	}
 	}
 	f.mu.Lock()
 	f.mu.Lock()
+	defer f.mu.Unlock()
 	// estSenderQuota is the receiver's view of the maximum number of bytes the sender
 	// estSenderQuota is the receiver's view of the maximum number of bytes the sender
 	// can send without a window update.
 	// can send without a window update.
 	estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
 	estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
@@ -169,10 +170,8 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
 			// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
 			// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
 			f.delta = n
 			f.delta = n
 		}
 		}
-		f.mu.Unlock()
 		return f.delta
 		return f.delta
 	}
 	}
-	f.mu.Unlock()
 	return 0
 	return 0
 }
 }
 
 

+ 1 - 0
vendor/google.golang.org/grpc/internal/transport/http2_client.go

@@ -1245,6 +1245,7 @@ func (t *http2Client) reader() {
 
 
 	// loop to keep reading incoming messages on this transport.
 	// loop to keep reading incoming messages on this transport.
 	for {
 	for {
+		t.controlBuf.throttle()
 		frame, err := t.framer.fr.ReadFrame()
 		frame, err := t.framer.fr.ReadFrame()
 		if t.keepaliveEnabled {
 		if t.keepaliveEnabled {
 			atomic.CompareAndSwapUint32(&t.activity, 0, 1)
 			atomic.CompareAndSwapUint32(&t.activity, 0, 1)

+ 1 - 0
vendor/google.golang.org/grpc/internal/transport/http2_server.go

@@ -436,6 +436,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
 func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
 func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
 	defer close(t.readerDone)
 	defer close(t.readerDone)
 	for {
 	for {
+		t.controlBuf.throttle()
 		frame, err := t.framer.fr.ReadFrame()
 		frame, err := t.framer.fr.ReadFrame()
 		atomic.StoreUint32(&t.activity, 1)
 		atomic.StoreUint32(&t.activity, 1)
 		if err != nil {
 		if err != nil {

+ 21 - 0
vendor/google.golang.org/grpc/internal/transport/transport.go

@@ -184,6 +184,19 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
 	// r.readAdditional acts on that message and returns the necessary error.
 	// r.readAdditional acts on that message and returns the necessary error.
 	select {
 	select {
 	case <-r.ctxDone:
 	case <-r.ctxDone:
+		// Note that this adds the ctx error to the end of recv buffer, and
+		// reads from the head. This will delay the error until recv buffer is
+		// empty, thus will delay ctx cancellation in Recv().
+		//
+		// It's done this way to fix a race between ctx cancel and trailer. The
+		// race was, stream.Recv() may return ctx error if ctxDone wins the
+		// race, but stream.Trailer() may return a non-nil md because the stream
+		// was not marked as done when trailer is received. This closeStream
+		// call will mark stream as done, thus fix the race.
+		//
+		// TODO: delaying ctx error seems like a unnecessary side effect. What
+		// we really want is to mark the stream as done, and return ctx error
+		// faster.
 		r.closeStream(ContextErr(r.ctx.Err()))
 		r.closeStream(ContextErr(r.ctx.Err()))
 		m := <-r.recv.get()
 		m := <-r.recv.get()
 		return r.readAdditional(m, p)
 		return r.readAdditional(m, p)
@@ -298,6 +311,14 @@ func (s *Stream) waitOnHeader() error {
 	}
 	}
 	select {
 	select {
 	case <-s.ctx.Done():
 	case <-s.ctx.Done():
+		// We prefer success over failure when reading messages because we delay
+		// context error in stream.Read(). To keep behavior consistent, we also
+		// prefer success here.
+		select {
+		case <-s.headerChan:
+			return nil
+		default:
+		}
 		return ContextErr(s.ctx.Err())
 		return ContextErr(s.ctx.Err())
 	case <-s.headerChan:
 	case <-s.headerChan:
 		return nil
 		return nil

+ 11 - 1
vendor/google.golang.org/grpc/stream.go

@@ -327,13 +327,23 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
 	return cs, nil
 	return cs, nil
 }
 }
 
 
-func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) error {
+// newAttemptLocked creates a new attempt with a transport.
+// If it succeeds, then it replaces clientStream's attempt with this new attempt.
+func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) {
 	newAttempt := &csAttempt{
 	newAttempt := &csAttempt{
 		cs:           cs,
 		cs:           cs,
 		dc:           cs.cc.dopts.dc,
 		dc:           cs.cc.dopts.dc,
 		statsHandler: sh,
 		statsHandler: sh,
 		trInfo:       trInfo,
 		trInfo:       trInfo,
 	}
 	}
+	defer func() {
+		if retErr != nil {
+			// This attempt is not set in the clientStream, so it's finish won't
+			// be called. Call it here for stats and trace in case they are not
+			// nil.
+			newAttempt.finish(retErr)
+		}
+	}()
 
 
 	if err := cs.ctx.Err(); err != nil {
 	if err := cs.ctx.Err(); err != nil {
 		return toRPCErr(err)
 		return toRPCErr(err)

+ 1 - 1
vendor/google.golang.org/grpc/version.go

@@ -19,4 +19,4 @@
 package grpc
 package grpc
 
 
 // Version is the current grpc version.
 // Version is the current grpc version.
-const Version = "1.23.0-dev"
+const Version = "1.23.0"