Browse Source

*: introduce grpc dependency

Xiang Li 10 years ago
parent
commit
436bacd77a
100 changed files with 24620 additions and 0 deletions
  1. 24 0
      Godeps/Godeps.json
  2. 1 0
      Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore
  3. 19 0
      Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS
  4. 19 0
      Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS
  5. 44 0
      Godeps/_workspace/src/github.com/bradfitz/http2/Dockerfile
  6. 5 0
      Godeps/_workspace/src/github.com/bradfitz/http2/HACKING
  7. 7 0
      Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE
  8. 3 0
      Godeps/_workspace/src/github.com/bradfitz/http2/Makefile
  9. 17 0
      Godeps/_workspace/src/github.com/bradfitz/http2/README
  10. 75 0
      Godeps/_workspace/src/github.com/bradfitz/http2/buffer.go
  11. 73 0
      Godeps/_workspace/src/github.com/bradfitz/http2/buffer_test.go
  12. 78 0
      Godeps/_workspace/src/github.com/bradfitz/http2/errors.go
  13. 27 0
      Godeps/_workspace/src/github.com/bradfitz/http2/errors_test.go
  14. 51 0
      Godeps/_workspace/src/github.com/bradfitz/http2/flow.go
  15. 54 0
      Godeps/_workspace/src/github.com/bradfitz/http2/flow_test.go
  16. 1113 0
      Godeps/_workspace/src/github.com/bradfitz/http2/frame.go
  17. 578 0
      Godeps/_workspace/src/github.com/bradfitz/http2/frame_test.go
  18. 169 0
      Godeps/_workspace/src/github.com/bradfitz/http2/gotrack.go
  19. 33 0
      Godeps/_workspace/src/github.com/bradfitz/http2/gotrack_test.go
  20. 5 0
      Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/.gitignore
  21. 5 0
      Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/Makefile
  22. 16 0
      Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/README
  23. 426 0
      Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/h2demo.go
  24. 279 0
      Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/launch.go
  25. 27 0
      Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.key
  26. 26 0
      Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.pem
  27. 1 0
      Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.srl
  28. 20 0
      Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.crt
  29. 27 0
      Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.key
  30. 80 0
      Godeps/_workspace/src/github.com/bradfitz/http2/headermap.go
  31. 252 0
      Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode.go
  32. 331 0
      Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode_test.go
  33. 445 0
      Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack.go
  34. 648 0
      Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack_test.go
  35. 159 0
      Godeps/_workspace/src/github.com/bradfitz/http2/hpack/huffman.go
  36. 353 0
      Godeps/_workspace/src/github.com/bradfitz/http2/hpack/tables.go
  37. 249 0
      Godeps/_workspace/src/github.com/bradfitz/http2/http2.go
  38. 152 0
      Godeps/_workspace/src/github.com/bradfitz/http2/http2_test.go
  39. 43 0
      Godeps/_workspace/src/github.com/bradfitz/http2/pipe.go
  40. 24 0
      Godeps/_workspace/src/github.com/bradfitz/http2/pipe_test.go
  41. 121 0
      Godeps/_workspace/src/github.com/bradfitz/http2/priority_test.go
  42. 1777 0
      Godeps/_workspace/src/github.com/bradfitz/http2/server.go
  43. 2252 0
      Godeps/_workspace/src/github.com/bradfitz/http2/server_test.go
  44. 5021 0
      Godeps/_workspace/src/github.com/bradfitz/http2/testdata/draft-ietf-httpbis-http2.xml
  45. 553 0
      Godeps/_workspace/src/github.com/bradfitz/http2/transport.go
  46. 168 0
      Godeps/_workspace/src/github.com/bradfitz/http2/transport_test.go
  47. 204 0
      Godeps/_workspace/src/github.com/bradfitz/http2/write.go
  48. 286 0
      Godeps/_workspace/src/github.com/bradfitz/http2/writesched.go
  49. 357 0
      Godeps/_workspace/src/github.com/bradfitz/http2/z_spec_test.go
  50. 191 0
      Godeps/_workspace/src/github.com/golang/glog/LICENSE
  51. 44 0
      Godeps/_workspace/src/github.com/golang/glog/README
  52. 1177 0
      Godeps/_workspace/src/github.com/golang/glog/glog.go
  53. 124 0
      Godeps/_workspace/src/github.com/golang/glog/glog_file.go
  54. 415 0
      Godeps/_workspace/src/github.com/golang/glog/glog_test.go
  55. 14 0
      Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml
  56. 3 0
      Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS
  57. 31 0
      Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md
  58. 3 0
      Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS
  59. 27 0
      Godeps/_workspace/src/golang.org/x/oauth2/LICENSE
  60. 64 0
      Godeps/_workspace/src/golang.org/x/oauth2/README.md
  61. 24 0
      Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go
  62. 45 0
      Godeps/_workspace/src/golang.org/x/oauth2/example_test.go
  63. 16 0
      Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go
  64. 16 0
      Godeps/_workspace/src/golang.org/x/oauth2/github/github.go
  65. 83 0
      Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go
  66. 13 0
      Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go
  67. 154 0
      Godeps/_workspace/src/golang.org/x/oauth2/google/default.go
  68. 150 0
      Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go
  69. 145 0
      Godeps/_workspace/src/golang.org/x/oauth2/google/google.go
  70. 67 0
      Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go
  71. 168 0
      Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go
  72. 46 0
      Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go
  73. 122 0
      Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials
  74. 2 0
      Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties
  75. 69 0
      Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go
  76. 62 0
      Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go
  77. 160 0
      Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go
  78. 31 0
      Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go
  79. 147 0
      Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go
  80. 134 0
      Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go
  81. 16 0
      Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go
  82. 523 0
      Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go
  83. 435 0
      Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go
  84. 16 0
      Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go
  85. 22 0
      Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go
  86. 104 0
      Godeps/_workspace/src/golang.org/x/oauth2/token.go
  87. 50 0
      Godeps/_workspace/src/golang.org/x/oauth2/token_test.go
  88. 138 0
      Godeps/_workspace/src/golang.org/x/oauth2/transport.go
  89. 53 0
      Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go
  90. 16 0
      Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go
  91. 37 0
      Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/go13.go
  92. 267 0
      Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go
  93. 128 0
      Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go
  94. 1633 0
      Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go
  95. 594 0
      Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto
  96. 57 0
      Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go
  97. 10 0
      Godeps/_workspace/src/google.golang.org/grpc/.travis.yml
  98. 27 0
      Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md
  99. 28 0
      Godeps/_workspace/src/google.golang.org/grpc/LICENSE
  100. 22 0
      Godeps/_workspace/src/google.golang.org/grpc/PATENTS

+ 24 - 0
Godeps/Godeps.json

@@ -23,6 +23,10 @@
 			"Comment": "v1.0-71-g71f28ea",
 			"Rev": "71f28eaecbebd00604d87bb1de0dae8fcfa54bbd"
 		},
+		{
+			"ImportPath": "github.com/bradfitz/http2",
+			"Rev": "3e36af6d3af0e56fa3da71099f864933dea3d9fb"
+		},
 		{
 			"ImportPath": "github.com/codegangsta/cli",
 			"Comment": "1.2.0-26-gf7ebb76",
@@ -45,6 +49,10 @@
 			"ImportPath": "github.com/gogo/protobuf/proto",
 			"Rev": "64f27bf06efee53589314a6e5a4af34cdd85adf6"
 		},
+		{
+			"ImportPath": "github.com/golang/glog",
+			"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
+		},
 		{
 			"ImportPath": "github.com/golang/protobuf/proto",
 			"Rev": "5677a0e3d5e89854c9974e1256839ee23f8233ca"
@@ -104,6 +112,22 @@
 		{
 			"ImportPath": "golang.org/x/net/context",
 			"Rev": "7dbad50ab5b31073856416cdcfeb2796d682f844"
+		},
+		{
+			"ImportPath": "golang.org/x/oauth2",
+			"Rev": "3046bc76d6dfd7d3707f6640f85e42d9c4050f50"
+		},
+		{
+			"ImportPath": "google.golang.org/cloud/compute/metadata",
+			"Rev": "f20d6dcccb44ed49de45ae3703312cb46e627db1"
+		},
+		{
+			"ImportPath": "google.golang.org/cloud/internal",
+			"Rev": "f20d6dcccb44ed49de45ae3703312cb46e627db1"
+		},
+		{
+			"ImportPath": "google.golang.org/grpc",
+			"Rev": "f5ebd86be717593ab029545492c93ddf8914832b"
 		}
 	]
 }

+ 1 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/.gitignore

@@ -0,0 +1 @@
+*~

+ 19 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/AUTHORS

@@ -0,0 +1,19 @@
+# This file is like Go's AUTHORS file: it lists Copyright holders.
+# The list of humans who have contributd is in the CONTRIBUTORS file.
+#
+# To contribute to this project, because it will eventually be folded
+# back in to Go itself, you need to submit a CLA:
+#
+#    http://golang.org/doc/contribute.html#copyright
+#
+# Then you get added to CONTRIBUTORS and you or your company get added
+# to the AUTHORS file.
+
+Blake Mizerany <blake.mizerany@gmail.com> github=bmizerany
+Daniel Morsing <daniel.morsing@gmail.com> github=DanielMorsing
+Gabriel Aszalos <gabriel.aszalos@gmail.com> github=gbbr
+Google, Inc.
+Keith Rarick <kr@xph.us> github=kr
+Matthew Keenan <tank.en.mate@gmail.com> <github@mattkeenan.net> github=mattkeenan
+Matt Layher <mdlayher@gmail.com> github=mdlayher
+Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com> github=tatsuhiro-t

+ 19 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/CONTRIBUTORS

@@ -0,0 +1,19 @@
+# This file is like Go's CONTRIBUTORS file: it lists humans.
+# The list of copyright holders (which may be companies) are in the AUTHORS file.
+#
+# To contribute to this project, because it will eventually be folded
+# back in to Go itself, you need to submit a CLA:
+#
+#    http://golang.org/doc/contribute.html#copyright
+#
+# Then you get added to CONTRIBUTORS and you or your company get added
+# to the AUTHORS file.
+
+Blake Mizerany <blake.mizerany@gmail.com> github=bmizerany
+Brad Fitzpatrick <bradfitz@golang.org> github=bradfitz
+Daniel Morsing <daniel.morsing@gmail.com> github=DanielMorsing
+Gabriel Aszalos <gabriel.aszalos@gmail.com> github=gbbr
+Keith Rarick <kr@xph.us> github=kr
+Matthew Keenan <tank.en.mate@gmail.com> <github@mattkeenan.net> github=mattkeenan
+Matt Layher <mdlayher@gmail.com> github=mdlayher
+Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com> github=tatsuhiro-t

+ 44 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/Dockerfile

@@ -0,0 +1,44 @@
+#
+# This Dockerfile builds a recent curl with HTTP/2 client support, using
+# a recent nghttp2 build.
+#
+# See the Makefile for how to tag it. If Docker and that image is found, the
+# Go tests use this curl binary for integration tests.
+#
+
+FROM ubuntu:trusty
+
+RUN apt-get update && \
+    apt-get upgrade -y && \
+    apt-get install -y git-core build-essential wget
+
+RUN apt-get install -y --no-install-recommends \
+       autotools-dev libtool pkg-config zlib1g-dev \
+       libcunit1-dev libssl-dev libxml2-dev libevent-dev \
+       automake autoconf
+
+# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
+ENV NGHTTP2_VER af24f8394e43f4
+RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
+
+WORKDIR /root/nghttp2
+RUN git reset --hard $NGHTTP2_VER
+RUN autoreconf -i
+RUN automake
+RUN autoconf
+RUN ./configure
+RUN make
+RUN make install
+
+WORKDIR /root
+RUN wget http://curl.haxx.se/download/curl-7.40.0.tar.gz
+RUN tar -zxvf curl-7.40.0.tar.gz
+WORKDIR /root/curl-7.40.0
+RUN ./configure --with-ssl --with-nghttp2=/usr/local
+RUN make
+RUN make install
+RUN ldconfig
+
+CMD ["-h"]
+ENTRYPOINT ["/usr/local/bin/curl"]
+

+ 5 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/HACKING

@@ -0,0 +1,5 @@
+We only accept contributions from users who have gone through Go's
+contribution process (signed a CLA).
+
+Please acknowledge whether you have (and use the same email) if
+sending a pull request.

+ 7 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/LICENSE

@@ -0,0 +1,7 @@
+Copyright 2014 Google & the Go AUTHORS
+
+Go AUTHORS are:
+See https://code.google.com/p/go/source/browse/AUTHORS
+
+Licensed under the terms of Go itself:
+https://code.google.com/p/go/source/browse/LICENSE

+ 3 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/Makefile

@@ -0,0 +1,3 @@
+curlimage:
+	docker build -t gohttp2/curl .
+

+ 17 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/README

@@ -0,0 +1,17 @@
+This is a work-in-progress HTTP/2 implementation for Go.
+
+It will eventually live in the Go standard library and won't require
+any changes to your code to use.  It will just be automatic.
+
+Status:
+
+* The server support is pretty good. A few things are missing
+  but are being worked on.
+* The client work has just started but shares a lot of code
+  is coming along much quicker.
+
+Docs are at https://godoc.org/github.com/bradfitz/http2
+
+Demo test server at https://http2.golang.org/
+
+Help & bug reports welcome.

+ 75 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/buffer.go

@@ -0,0 +1,75 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+	"errors"
+)
+
+// buffer is an io.ReadWriteCloser backed by a fixed size buffer.
+// It never allocates, but moves old data as new data is written.
+type buffer struct {
+	buf    []byte
+	r, w   int
+	closed bool
+	err    error // err to return to reader
+}
+
+var (
+	errReadEmpty = errors.New("read from empty buffer")
+	errWriteFull = errors.New("write on full buffer")
+)
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *buffer) Read(p []byte) (n int, err error) {
+	n = copy(p, b.buf[b.r:b.w])
+	b.r += n
+	if b.closed && b.r == b.w {
+		err = b.err
+	} else if b.r == b.w && n == 0 {
+		err = errReadEmpty
+	}
+	return n, err
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *buffer) Len() int {
+	return b.w - b.r
+}
+
+// Write copies bytes from p into the buffer.
+// It is an error to write more data than the buffer can hold.
+func (b *buffer) Write(p []byte) (n int, err error) {
+	if b.closed {
+		return 0, errors.New("closed")
+	}
+
+	// Slide existing data to beginning.
+	if b.r > 0 && len(p) > len(b.buf)-b.w {
+		copy(b.buf, b.buf[b.r:b.w])
+		b.w -= b.r
+		b.r = 0
+	}
+
+	// Write new data.
+	n = copy(b.buf[b.w:], p)
+	b.w += n
+	if n < len(p) {
+		err = errWriteFull
+	}
+	return n, err
+}
+
+// Close marks the buffer as closed. Future calls to Write will
+// return an error. Future calls to Read, once the buffer is
+// empty, will return err.
+func (b *buffer) Close(err error) {
+	if !b.closed {
+		b.closed = true
+		b.err = err
+	}
+}

+ 73 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/buffer_test.go

@@ -0,0 +1,73 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+	"io"
+	"reflect"
+	"testing"
+)
+
+var bufferReadTests = []struct {
+	buf      buffer
+	read, wn int
+	werr     error
+	wp       []byte
+	wbuf     buffer
+}{
+	{
+		buffer{[]byte{'a', 0}, 0, 1, false, nil},
+		5, 1, nil, []byte{'a'},
+		buffer{[]byte{'a', 0}, 1, 1, false, nil},
+	},
+	{
+		buffer{[]byte{'a', 0}, 0, 1, true, io.EOF},
+		5, 1, io.EOF, []byte{'a'},
+		buffer{[]byte{'a', 0}, 1, 1, true, io.EOF},
+	},
+	{
+		buffer{[]byte{0, 'a'}, 1, 2, false, nil},
+		5, 1, nil, []byte{'a'},
+		buffer{[]byte{0, 'a'}, 2, 2, false, nil},
+	},
+	{
+		buffer{[]byte{0, 'a'}, 1, 2, true, io.EOF},
+		5, 1, io.EOF, []byte{'a'},
+		buffer{[]byte{0, 'a'}, 2, 2, true, io.EOF},
+	},
+	{
+		buffer{[]byte{}, 0, 0, false, nil},
+		5, 0, errReadEmpty, []byte{},
+		buffer{[]byte{}, 0, 0, false, nil},
+	},
+	{
+		buffer{[]byte{}, 0, 0, true, io.EOF},
+		5, 0, io.EOF, []byte{},
+		buffer{[]byte{}, 0, 0, true, io.EOF},
+	},
+}
+
+func TestBufferRead(t *testing.T) {
+	for i, tt := range bufferReadTests {
+		read := make([]byte, tt.read)
+		n, err := tt.buf.Read(read)
+		if n != tt.wn {
+			t.Errorf("#%d: wn = %d want %d", i, n, tt.wn)
+			continue
+		}
+		if err != tt.werr {
+			t.Errorf("#%d: werr = %v want %v", i, err, tt.werr)
+			continue
+		}
+		read = read[:n]
+		if !reflect.DeepEqual(read, tt.wp) {
+			t.Errorf("#%d: read = %+v want %+v", i, read, tt.wp)
+		}
+		if !reflect.DeepEqual(tt.buf, tt.wbuf) {
+			t.Errorf("#%d: buf = %+v want %+v", i, tt.buf, tt.wbuf)
+		}
+	}
+}

+ 78 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/errors.go

@@ -0,0 +1,78 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import "fmt"
+
+// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
+type ErrCode uint32
+
+const (
+	ErrCodeNo                 ErrCode = 0x0
+	ErrCodeProtocol           ErrCode = 0x1
+	ErrCodeInternal           ErrCode = 0x2
+	ErrCodeFlowControl        ErrCode = 0x3
+	ErrCodeSettingsTimeout    ErrCode = 0x4
+	ErrCodeStreamClosed       ErrCode = 0x5
+	ErrCodeFrameSize          ErrCode = 0x6
+	ErrCodeRefusedStream      ErrCode = 0x7
+	ErrCodeCancel             ErrCode = 0x8
+	ErrCodeCompression        ErrCode = 0x9
+	ErrCodeConnect            ErrCode = 0xa
+	ErrCodeEnhanceYourCalm    ErrCode = 0xb
+	ErrCodeInadequateSecurity ErrCode = 0xc
+	ErrCodeHTTP11Required     ErrCode = 0xd
+)
+
+var errCodeName = map[ErrCode]string{
+	ErrCodeNo:                 "NO_ERROR",
+	ErrCodeProtocol:           "PROTOCOL_ERROR",
+	ErrCodeInternal:           "INTERNAL_ERROR",
+	ErrCodeFlowControl:        "FLOW_CONTROL_ERROR",
+	ErrCodeSettingsTimeout:    "SETTINGS_TIMEOUT",
+	ErrCodeStreamClosed:       "STREAM_CLOSED",
+	ErrCodeFrameSize:          "FRAME_SIZE_ERROR",
+	ErrCodeRefusedStream:      "REFUSED_STREAM",
+	ErrCodeCancel:             "CANCEL",
+	ErrCodeCompression:        "COMPRESSION_ERROR",
+	ErrCodeConnect:            "CONNECT_ERROR",
+	ErrCodeEnhanceYourCalm:    "ENHANCE_YOUR_CALM",
+	ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
+	ErrCodeHTTP11Required:     "HTTP_1_1_REQUIRED",
+}
+
+func (e ErrCode) String() string {
+	if s, ok := errCodeName[e]; ok {
+		return s
+	}
+	return fmt.Sprintf("unknown error code 0x%x", uint32(e))
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection.
+type ConnectionError ErrCode
+
+func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
+
+// StreamError is an error that only affects one stream within an
+// HTTP/2 connection.
+type StreamError struct {
+	StreamID uint32
+	Code     ErrCode
+}
+
+func (e StreamError) Error() string {
+	return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
+}
+
+// 6.9.1 The Flow Control Window
+// "If a sender receives a WINDOW_UPDATE that causes a flow control
+// window to exceed this maximum it MUST terminate either the stream
+// or the connection, as appropriate. For streams, [...]; for the
+// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
+type goAwayFlowError struct{}
+
+func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }

+ 27 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/errors_test.go

@@ -0,0 +1,27 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import "testing"
+
+func TestErrCodeString(t *testing.T) {
+	tests := []struct {
+		err  ErrCode
+		want string
+	}{
+		{ErrCodeProtocol, "PROTOCOL_ERROR"},
+		{0xd, "HTTP_1_1_REQUIRED"},
+		{0xf, "unknown error code 0xf"},
+	}
+	for i, tt := range tests {
+		got := tt.err.String()
+		if got != tt.want {
+			t.Errorf("%d. Error = %q; want %q", i, got, tt.want)
+		}
+	}
+}

+ 51 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/flow.go

@@ -0,0 +1,51 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+// Flow control
+
+package http2
+
+// flow is the flow control window's size.
+type flow struct {
+	// n is the number of DATA bytes we're allowed to send.
+	// A flow is kept both on a conn and a per-stream.
+	n int32
+
+	// conn points to the shared connection-level flow that is
+	// shared by all streams on that conn. It is nil for the flow
+	// that's on the conn directly.
+	conn *flow
+}
+
+func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
+
+func (f *flow) available() int32 {
+	n := f.n
+	if f.conn != nil && f.conn.n < n {
+		n = f.conn.n
+	}
+	return n
+}
+
+func (f *flow) take(n int32) {
+	if n > f.available() {
+		panic("internal error: took too much")
+	}
+	f.n -= n
+	if f.conn != nil {
+		f.conn.n -= n
+	}
+}
+
+// add adds n bytes (positive or negative) to the flow control window.
+// It returns false if the sum would exceed 2^31-1.
+func (f *flow) add(n int32) bool {
+	remain := (1<<31 - 1) - f.n
+	if n > remain {
+		return false
+	}
+	f.n += n
+	return true
+}

+ 54 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/flow_test.go

@@ -0,0 +1,54 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import "testing"
+
+func TestFlow(t *testing.T) {
+	var st flow
+	var conn flow
+	st.add(3)
+	conn.add(2)
+
+	if got, want := st.available(), int32(3); got != want {
+		t.Errorf("available = %d; want %d", got, want)
+	}
+	st.setConnFlow(&conn)
+	if got, want := st.available(), int32(2); got != want {
+		t.Errorf("after parent setup, available = %d; want %d", got, want)
+	}
+
+	st.take(2)
+	if got, want := conn.available(), int32(0); got != want {
+		t.Errorf("after taking 2, conn = %d; want %d", got, want)
+	}
+	if got, want := st.available(), int32(0); got != want {
+		t.Errorf("after taking 2, stream = %d; want %d", got, want)
+	}
+}
+
+func TestFlowAdd(t *testing.T) {
+	var f flow
+	if !f.add(1) {
+		t.Fatal("failed to add 1")
+	}
+	if !f.add(-1) {
+		t.Fatal("failed to add -1")
+	}
+	if got, want := f.available(), int32(0); got != want {
+		t.Fatalf("size = %d; want %d", got, want)
+	}
+	if !f.add(1<<31 - 1) {
+		t.Fatal("failed to add 2^31-1")
+	}
+	if got, want := f.available(), int32(1<<31-1); got != want {
+		t.Fatalf("size = %d; want %d", got, want)
+	}
+	if f.add(1) {
+		t.Fatal("adding 1 to max shouldn't be allowed")
+	}
+
+}

+ 1113 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/frame.go

@@ -0,0 +1,1113 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"sync"
+)
+
+const frameHeaderLen = 9
+
+var padZeros = make([]byte, 255) // zeros for padding
+
+// A FrameType is a registered frame type as defined in
+// http://http2.github.io/http2-spec/#rfc.section.11.2
+type FrameType uint8
+
+const (
+	FrameData         FrameType = 0x0
+	FrameHeaders      FrameType = 0x1
+	FramePriority     FrameType = 0x2
+	FrameRSTStream    FrameType = 0x3
+	FrameSettings     FrameType = 0x4
+	FramePushPromise  FrameType = 0x5
+	FramePing         FrameType = 0x6
+	FrameGoAway       FrameType = 0x7
+	FrameWindowUpdate FrameType = 0x8
+	FrameContinuation FrameType = 0x9
+)
+
+var frameName = map[FrameType]string{
+	FrameData:         "DATA",
+	FrameHeaders:      "HEADERS",
+	FramePriority:     "PRIORITY",
+	FrameRSTStream:    "RST_STREAM",
+	FrameSettings:     "SETTINGS",
+	FramePushPromise:  "PUSH_PROMISE",
+	FramePing:         "PING",
+	FrameGoAway:       "GOAWAY",
+	FrameWindowUpdate: "WINDOW_UPDATE",
+	FrameContinuation: "CONTINUATION",
+}
+
+func (t FrameType) String() string {
+	if s, ok := frameName[t]; ok {
+		return s
+	}
+	return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+}
+
+// Flags is a bitmask of HTTP/2 flags.
+// The meaning of flags varies depending on the frame type.
+type Flags uint8
+
+// Has reports whether f contains all (0 or more) flags in v.
+func (f Flags) Has(v Flags) bool {
+	return (f & v) == v
+}
+
+// Frame-specific FrameHeader flag bits.
+const (
+	// Data Frame
+	FlagDataEndStream Flags = 0x1
+	FlagDataPadded    Flags = 0x8
+
+	// Headers Frame
+	FlagHeadersEndStream  Flags = 0x1
+	FlagHeadersEndHeaders Flags = 0x4
+	FlagHeadersPadded     Flags = 0x8
+	FlagHeadersPriority   Flags = 0x20
+
+	// Settings Frame
+	FlagSettingsAck Flags = 0x1
+
+	// Ping Frame
+	FlagPingAck Flags = 0x1
+
+	// Continuation Frame
+	FlagContinuationEndHeaders Flags = 0x4
+
+	FlagPushPromiseEndHeaders = 0x4
+	FlagPushPromisePadded     = 0x8
+)
+
+var flagName = map[FrameType]map[Flags]string{
+	FrameData: {
+		FlagDataEndStream: "END_STREAM",
+		FlagDataPadded:    "PADDED",
+	},
+	FrameHeaders: {
+		FlagHeadersEndStream:  "END_STREAM",
+		FlagHeadersEndHeaders: "END_HEADERS",
+		FlagHeadersPadded:     "PADDED",
+		FlagHeadersPriority:   "PRIORITY",
+	},
+	FrameSettings: {
+		FlagSettingsAck: "ACK",
+	},
+	FramePing: {
+		FlagPingAck: "ACK",
+	},
+	FrameContinuation: {
+		FlagContinuationEndHeaders: "END_HEADERS",
+	},
+	FramePushPromise: {
+		FlagPushPromiseEndHeaders: "END_HEADERS",
+		FlagPushPromisePadded:     "PADDED",
+	},
+}
+
+// a frameParser parses a frame given its FrameHeader and payload
+// bytes. The length of payload will always equal fh.Length (which
+// might be 0).
+type frameParser func(fh FrameHeader, payload []byte) (Frame, error)
+
+var frameParsers = map[FrameType]frameParser{
+	FrameData:         parseDataFrame,
+	FrameHeaders:      parseHeadersFrame,
+	FramePriority:     parsePriorityFrame,
+	FrameRSTStream:    parseRSTStreamFrame,
+	FrameSettings:     parseSettingsFrame,
+	FramePushPromise:  parsePushPromise,
+	FramePing:         parsePingFrame,
+	FrameGoAway:       parseGoAwayFrame,
+	FrameWindowUpdate: parseWindowUpdateFrame,
+	FrameContinuation: parseContinuationFrame,
+}
+
+func typeFrameParser(t FrameType) frameParser {
+	if f := frameParsers[t]; f != nil {
+		return f
+	}
+	return parseUnknownFrame
+}
+
+// A FrameHeader is the 9 byte header of all HTTP/2 frames.
+//
+// See http://http2.github.io/http2-spec/#FrameHeader
+type FrameHeader struct {
+	valid bool // caller can access []byte fields in the Frame
+
+	// Type is the 1 byte frame type. There are ten standard frame
+	// types, but extension frame types may be written by WriteRawFrame
+	// and will be returned by ReadFrame (as UnknownFrame).
+	Type FrameType
+
+	// Flags are the 1 byte of 8 potential bit flags per frame.
+	// They are specific to the frame type.
+	Flags Flags
+
+	// Length is the length of the frame, not including the 9 byte header.
+	// The maximum size is one byte less than 16MB (uint24), but only
+	// frames up to 16KB are allowed without peer agreement.
+	Length uint32
+
+	// StreamID is which stream this frame is for. Certain frames
+	// are not stream-specific, in which case this field is 0.
+	StreamID uint32
+}
+
+// Header returns h. It exists so FrameHeaders can be embedded in other
+// specific frame types and implement the Frame interface.
+func (h FrameHeader) Header() FrameHeader { return h }
+
+func (h FrameHeader) String() string {
+	var buf bytes.Buffer
+	buf.WriteString("[FrameHeader ")
+	buf.WriteString(h.Type.String())
+	if h.Flags != 0 {
+		buf.WriteString(" flags=")
+		set := 0
+		for i := uint8(0); i < 8; i++ {
+			if h.Flags&(1<<i) == 0 {
+				continue
+			}
+			set++
+			if set > 1 {
+				buf.WriteByte('|')
+			}
+			name := flagName[h.Type][Flags(1<<i)]
+			if name != "" {
+				buf.WriteString(name)
+			} else {
+				fmt.Fprintf(&buf, "0x%x", 1<<i)
+			}
+		}
+	}
+	if h.StreamID != 0 {
+		fmt.Fprintf(&buf, " stream=%d", h.StreamID)
+	}
+	fmt.Fprintf(&buf, " len=%d]", h.Length)
+	return buf.String()
+}
+
+func (h *FrameHeader) checkValid() {
+	if !h.valid {
+		panic("Frame accessor called on non-owned Frame")
+	}
+}
+
+func (h *FrameHeader) invalidate() { h.valid = false }
+
+// frame header bytes.
+// Used only by ReadFrameHeader.
+var fhBytes = sync.Pool{
+	New: func() interface{} {
+		buf := make([]byte, frameHeaderLen)
+		return &buf
+	},
+}
+
+// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
+// Most users should use Framer.ReadFrame instead.
+func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
+	bufp := fhBytes.Get().(*[]byte)
+	defer fhBytes.Put(bufp)
+	return readFrameHeader(*bufp, r)
+}
+
+func readFrameHeader(buf []byte, r io.Reader) (FrameHeader, error) {
+	_, err := io.ReadFull(r, buf[:frameHeaderLen])
+	if err != nil {
+		return FrameHeader{}, err
+	}
+	return FrameHeader{
+		Length:   (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),
+		Type:     FrameType(buf[3]),
+		Flags:    Flags(buf[4]),
+		StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
+		valid:    true,
+	}, nil
+}
+
+// A Frame is the base interface implemented by all frame types.
+// Callers will generally type-assert the specific frame type:
+// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.
+//
+// Frames are only valid until the next call to Framer.ReadFrame.
+type Frame interface {
+	Header() FrameHeader
+
+	// invalidate is called by Framer.ReadFrame to make this
+	// frame's buffers as being invalid, since the subsequent
+	// frame will reuse them.
+	invalidate()
+}
+
+// A Framer reads and writes Frames.
+type Framer struct {
+	r         io.Reader
+	lastFrame Frame
+
+	maxReadSize uint32
+	headerBuf   [frameHeaderLen]byte
+
+	// TODO: let getReadBuf be configurable, and use a less memory-pinning
+	// allocator in server.go to minimize memory pinned for many idle conns.
+	// Will probably also need to make frame invalidation have a hook too.
+	getReadBuf func(size uint32) []byte
+	readBuf    []byte // cache for default getReadBuf
+
+	maxWriteSize uint32 // zero means unlimited; TODO: implement
+
+	w    io.Writer
+	wbuf []byte
+
+	// AllowIllegalWrites permits the Framer's Write methods to
+	// write frames that do not conform to the HTTP/2 spec.  This
+	// permits using the Framer to test other HTTP/2
+	// implementations' conformance to the spec.
+	// If false, the Write methods will prefer to return an error
+	// rather than comply.
+	AllowIllegalWrites bool
+
+	// TODO: track which type of frame & with which flags was sent
+	// last.  Then return an error (unless AllowIllegalWrites) if
+	// we're in the middle of a header block and a
+	// non-Continuation or Continuation on a different stream is
+	// attempted to be written.
+}
+
+func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
+	// Write the FrameHeader.
+	f.wbuf = append(f.wbuf[:0],
+		0, // 3 bytes of length, filled in in endWrite
+		0,
+		0,
+		byte(ftype),
+		byte(flags),
+		byte(streamID>>24),
+		byte(streamID>>16),
+		byte(streamID>>8),
+		byte(streamID))
+}
+
+func (f *Framer) endWrite() error {
+	// Now that we know the final size, fill in the FrameHeader in
+	// the space previously reserved for it. Abuse append.
+	length := len(f.wbuf) - frameHeaderLen
+	if length >= (1 << 24) {
+		return ErrFrameTooLarge
+	}
+	_ = append(f.wbuf[:0],
+		byte(length>>16),
+		byte(length>>8),
+		byte(length))
+	n, err := f.w.Write(f.wbuf)
+	if err == nil && n != len(f.wbuf) {
+		err = io.ErrShortWrite
+	}
+	return err
+}
+
+func (f *Framer) writeByte(v byte)     { f.wbuf = append(f.wbuf, v) }
+func (f *Framer) writeBytes(v []byte)  { f.wbuf = append(f.wbuf, v...) }
+func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
+func (f *Framer) writeUint32(v uint32) {
+	f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+const (
+	minMaxFrameSize = 1 << 14
+	maxFrameSize    = 1<<24 - 1
+)
+
+// NewFramer returns a Framer that writes frames to w and reads them from r.
+func NewFramer(w io.Writer, r io.Reader) *Framer {
+	fr := &Framer{
+		w: w,
+		r: r,
+	}
+	fr.getReadBuf = func(size uint32) []byte {
+		if cap(fr.readBuf) >= int(size) {
+			return fr.readBuf[:size]
+		}
+		fr.readBuf = make([]byte, size)
+		return fr.readBuf
+	}
+	fr.SetMaxReadFrameSize(maxFrameSize)
+	return fr
+}
+
+// SetMaxReadFrameSize sets the maximum size of a frame
+// that will be read by a subsequent call to ReadFrame.
+// It is the caller's responsibility to advertise this
+// limit with a SETTINGS frame.
+func (fr *Framer) SetMaxReadFrameSize(v uint32) {
+	if v > maxFrameSize {
+		v = maxFrameSize
+	}
+	fr.maxReadSize = v
+}
+
+// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
+// sends a frame that is larger than declared with SetMaxReadFrameSize.
+var ErrFrameTooLarge = errors.New("http2: frame too large")
+
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame.
+// If the frame is larger than previously set with SetMaxReadFrameSize,
+// the returned error is ErrFrameTooLarge.
+func (fr *Framer) ReadFrame() (Frame, error) {
+	if fr.lastFrame != nil {
+		fr.lastFrame.invalidate()
+	}
+	fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
+	if err != nil {
+		return nil, err
+	}
+	if fh.Length > fr.maxReadSize {
+		return nil, ErrFrameTooLarge
+	}
+	payload := fr.getReadBuf(fh.Length)
+	if _, err := io.ReadFull(fr.r, payload); err != nil {
+		return nil, err
+	}
+	f, err := typeFrameParser(fh.Type)(fh, payload)
+	if err != nil {
+		return nil, err
+	}
+	fr.lastFrame = f
+	return f, nil
+}
+
+// A DataFrame conveys arbitrary, variable-length sequences of octets
+// associated with a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.1
+type DataFrame struct {
+	FrameHeader
+	data []byte
+}
+
+func (f *DataFrame) StreamEnded() bool {
+	return f.FrameHeader.Flags.Has(FlagDataEndStream)
+}
+
+// Data returns the frame's data octets, not including any padding
+// size byte or padding suffix bytes.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *DataFrame) Data() []byte {
+	f.checkValid()
+	return f.data
+}
+
+func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
+	if fh.StreamID == 0 {
+		// DATA frames MUST be associated with a stream. If a
+		// DATA frame is received whose stream identifier
+		// field is 0x0, the recipient MUST respond with a
+		// connection error (Section 5.4.1) of type
+		// PROTOCOL_ERROR.
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	f := &DataFrame{
+		FrameHeader: fh,
+	}
+	var padSize byte
+	if fh.Flags.Has(FlagDataPadded) {
+		var err error
+		payload, padSize, err = readByte(payload)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if int(padSize) > len(payload) {
+		// If the length of the padding is greater than the
+		// length of the frame payload, the recipient MUST
+		// treat this as a connection error.
+		// Filed: https://github.com/http2/http2-spec/issues/610
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	f.data = payload[:len(payload)-int(padSize)]
+	return f, nil
+}
+
+var errStreamID = errors.New("invalid streamid")
+
+func validStreamID(streamID uint32) bool {
+	return streamID != 0 && streamID&(1<<31) == 0
+}
+
+// WriteData writes a DATA frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
+	// TODO: ignoring padding for now. will add when somebody cares.
+	if !validStreamID(streamID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	var flags Flags
+	if endStream {
+		flags |= FlagDataEndStream
+	}
+	f.startWrite(FrameData, flags, streamID)
+	f.wbuf = append(f.wbuf, data...)
+	return f.endWrite()
+}
+
+// A SettingsFrame conveys configuration parameters that affect how
+// endpoints communicate, such as preferences and constraints on peer
+// behavior.
+//
+// See http://http2.github.io/http2-spec/#SETTINGS
+type SettingsFrame struct {
+	FrameHeader
+	p []byte
+}
+
+func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
+	if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
+		// When this (ACK 0x1) bit is set, the payload of the
+		// SETTINGS frame MUST be empty.  Receipt of a
+		// SETTINGS frame with the ACK flag set and a length
+		// field value other than 0 MUST be treated as a
+		// connection error (Section 5.4.1) of type
+		// FRAME_SIZE_ERROR.
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	if fh.StreamID != 0 {
+		// SETTINGS frames always apply to a connection,
+		// never a single stream.  The stream identifier for a
+		// SETTINGS frame MUST be zero (0x0).  If an endpoint
+		// receives a SETTINGS frame whose stream identifier
+		// field is anything other than 0x0, the endpoint MUST
+		// respond with a connection error (Section 5.4.1) of
+		// type PROTOCOL_ERROR.
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	if len(p)%6 != 0 {
+		// Expecting even number of 6 byte settings.
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	f := &SettingsFrame{FrameHeader: fh, p: p}
+	if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
+		// Values above the maximum flow control window size of 2^31 - 1 MUST
+		// be treated as a connection error (Section 5.4.1) of type
+		// FLOW_CONTROL_ERROR.
+		return nil, ConnectionError(ErrCodeFlowControl)
+	}
+	return f, nil
+}
+
+func (f *SettingsFrame) IsAck() bool {
+	return f.FrameHeader.Flags.Has(FlagSettingsAck)
+}
+
+func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {
+	f.checkValid()
+	buf := f.p
+	for len(buf) > 0 {
+		settingID := SettingID(binary.BigEndian.Uint16(buf[:2]))
+		if settingID == s {
+			return binary.BigEndian.Uint32(buf[2:6]), true
+		}
+		buf = buf[6:]
+	}
+	return 0, false
+}
+
+// ForeachSetting runs fn for each setting.
+// It stops and returns the first error.
+func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
+	f.checkValid()
+	buf := f.p
+	for len(buf) > 0 {
+		if err := fn(Setting{
+			SettingID(binary.BigEndian.Uint16(buf[:2])),
+			binary.BigEndian.Uint32(buf[2:6]),
+		}); err != nil {
+			return err
+		}
+		buf = buf[6:]
+	}
+	return nil
+}
+
+// WriteSettings writes a SETTINGS frame with zero or more settings
+// specified and the ACK bit not set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettings(settings ...Setting) error {
+	f.startWrite(FrameSettings, 0, 0)
+	for _, s := range settings {
+		f.writeUint16(uint16(s.ID))
+		f.writeUint32(s.Val)
+	}
+	return f.endWrite()
+}
+
+// WriteSettings writes an empty SETTINGS frame with the ACK bit set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettingsAck() error {
+	f.startWrite(FrameSettings, FlagSettingsAck, 0)
+	return f.endWrite()
+}
+
+// A PingFrame is a mechanism for measuring a minimal round trip time
+// from the sender, as well as determining whether an idle connection
+// is still functional.
+// See http://http2.github.io/http2-spec/#rfc.section.6.7
+type PingFrame struct {
+	FrameHeader
+	Data [8]byte
+}
+
+func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
+	if len(payload) != 8 {
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	if fh.StreamID != 0 {
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	f := &PingFrame{FrameHeader: fh}
+	copy(f.Data[:], payload)
+	return f, nil
+}
+
+func (f *Framer) WritePing(ack bool, data [8]byte) error {
+	var flags Flags
+	if ack {
+		flags = FlagPingAck
+	}
+	f.startWrite(FramePing, flags, 0)
+	f.writeBytes(data[:])
+	return f.endWrite()
+}
+
+// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
+// See http://http2.github.io/http2-spec/#rfc.section.6.8
+type GoAwayFrame struct {
+	FrameHeader
+	LastStreamID uint32
+	ErrCode      ErrCode
+	debugData    []byte
+}
+
+// DebugData returns any debug data in the GOAWAY frame. Its contents
+// are not defined.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *GoAwayFrame) DebugData() []byte {
+	f.checkValid()
+	return f.debugData
+}
+
+func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {
+	if fh.StreamID != 0 {
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	if len(p) < 8 {
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	return &GoAwayFrame{
+		FrameHeader:  fh,
+		LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
+		ErrCode:      ErrCode(binary.BigEndian.Uint32(p[4:8])),
+		debugData:    p[8:],
+	}, nil
+}
+
+func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error {
+	f.startWrite(FrameGoAway, 0, 0)
+	f.writeUint32(maxStreamID & (1<<31 - 1))
+	f.writeUint32(uint32(code))
+	f.writeBytes(debugData)
+	return f.endWrite()
+}
+
+// An UnknownFrame is the frame type returned when the frame type is unknown
+// or no specific frame type parser exists.
+type UnknownFrame struct {
+	FrameHeader
+	p []byte
+}
+
+// Payload returns the frame's payload (after the header).  It is not
+// valid to call this method after a subsequent call to
+// Framer.ReadFrame, nor is it valid to retain the returned slice.
+// The memory is owned by the Framer and is invalidated when the next
+// frame is read.
+func (f *UnknownFrame) Payload() []byte {
+	f.checkValid()
+	return f.p
+}
+
+func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
+	return &UnknownFrame{fh, p}, nil
+}
+
+// A WindowUpdateFrame is used to implement flow control.
+// See http://http2.github.io/http2-spec/#rfc.section.6.9
+type WindowUpdateFrame struct {
+	FrameHeader
+	Increment uint32
+}
+
+func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
+	if len(p) != 4 {
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
+	if inc == 0 {
+		// A receiver MUST treat the receipt of a
+		// WINDOW_UPDATE frame with an flow control window
+		// increment of 0 as a stream error (Section 5.4.2) of
+		// type PROTOCOL_ERROR; errors on the connection flow
+		// control window MUST be treated as a connection
+		// error (Section 5.4.1).
+		if fh.StreamID == 0 {
+			return nil, ConnectionError(ErrCodeProtocol)
+		}
+		return nil, StreamError{fh.StreamID, ErrCodeProtocol}
+	}
+	return &WindowUpdateFrame{
+		FrameHeader: fh,
+		Increment:   inc,
+	}, nil
+}
+
+// WriteWindowUpdate writes a WINDOW_UPDATE frame.
+// The increment value must be between 1 and 2,147,483,647, inclusive.
+// If the Stream ID is zero, the window update applies to the
+// connection as a whole.
+func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error {
+	// "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
+	if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
+		return errors.New("illegal window increment value")
+	}
+	f.startWrite(FrameWindowUpdate, 0, streamID)
+	f.writeUint32(incr)
+	return f.endWrite()
+}
+
+// A HeadersFrame is used to open a stream and additionally carries a
+// header block fragment.
+type HeadersFrame struct {
+	FrameHeader
+
+	// Priority is set if FlagHeadersPriority is set in the FrameHeader.
+	Priority PriorityParam
+
+	headerFragBuf []byte // not owned
+}
+
+func (f *HeadersFrame) HeaderBlockFragment() []byte {
+	f.checkValid()
+	return f.headerFragBuf
+}
+
+func (f *HeadersFrame) HeadersEnded() bool {
+	return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders)
+}
+
+func (f *HeadersFrame) StreamEnded() bool {
+	return f.FrameHeader.Flags.Has(FlagHeadersEndStream)
+}
+
+func (f *HeadersFrame) HasPriority() bool {
+	return f.FrameHeader.Flags.Has(FlagHeadersPriority)
+}
+
+func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
+	hf := &HeadersFrame{
+		FrameHeader: fh,
+	}
+	if fh.StreamID == 0 {
+		// HEADERS frames MUST be associated with a stream.  If a HEADERS frame
+		// is received whose stream identifier field is 0x0, the recipient MUST
+		// respond with a connection error (Section 5.4.1) of type
+		// PROTOCOL_ERROR.
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	var padLength uint8
+	if fh.Flags.Has(FlagHeadersPadded) {
+		if p, padLength, err = readByte(p); err != nil {
+			return
+		}
+	}
+	if fh.Flags.Has(FlagHeadersPriority) {
+		var v uint32
+		p, v, err = readUint32(p)
+		if err != nil {
+			return nil, err
+		}
+		hf.Priority.StreamDep = v & 0x7fffffff
+		hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
+		p, hf.Priority.Weight, err = readByte(p)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if len(p)-int(padLength) <= 0 {
+		return nil, StreamError{fh.StreamID, ErrCodeProtocol}
+	}
+	hf.headerFragBuf = p[:len(p)-int(padLength)]
+	return hf, nil
+}
+
+// HeadersFrameParam are the parameters for writing a HEADERS frame.
+type HeadersFrameParam struct {
+	// StreamID is the required Stream ID to initiate.
+	StreamID uint32
+	// BlockFragment is part (or all) of a Header Block.
+	BlockFragment []byte
+
+	// EndStream indicates that the header block is the last that
+	// the endpoint will send for the identified stream. Setting
+	// this flag causes the stream to enter one of "half closed"
+	// states.
+	EndStream bool
+
+	// EndHeaders indicates that this frame contains an entire
+	// header block and is not followed by any
+	// CONTINUATION frames.
+	EndHeaders bool
+
+	// PadLength is the optional number of bytes of zeros to add
+	// to this frame.
+	PadLength uint8
+
+	// Priority, if non-zero, includes stream priority information
+	// in the HEADER frame.
+	Priority PriorityParam
+}
+
+// WriteHeaders writes a single HEADERS frame.
+//
+// This is a low-level header writing method. Encoding headers and
+// splitting them into any necessary CONTINUATION frames is handled
+// elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteHeaders(p HeadersFrameParam) error {
+	if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	var flags Flags
+	if p.PadLength != 0 {
+		flags |= FlagHeadersPadded
+	}
+	if p.EndStream {
+		flags |= FlagHeadersEndStream
+	}
+	if p.EndHeaders {
+		flags |= FlagHeadersEndHeaders
+	}
+	if !p.Priority.IsZero() {
+		flags |= FlagHeadersPriority
+	}
+	f.startWrite(FrameHeaders, flags, p.StreamID)
+	if p.PadLength != 0 {
+		f.writeByte(p.PadLength)
+	}
+	if !p.Priority.IsZero() {
+		v := p.Priority.StreamDep
+		if !validStreamID(v) && !f.AllowIllegalWrites {
+			return errors.New("invalid dependent stream id")
+		}
+		if p.Priority.Exclusive {
+			v |= 1 << 31
+		}
+		f.writeUint32(v)
+		f.writeByte(p.Priority.Weight)
+	}
+	f.wbuf = append(f.wbuf, p.BlockFragment...)
+	f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+	return f.endWrite()
+}
+
+// A PriorityFrame specifies the sender-advised priority of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.3
+type PriorityFrame struct {
+	FrameHeader
+	PriorityParam
+}
+
+// PriorityParam are the stream prioritzation parameters.
+type PriorityParam struct {
+	// StreamDep is a 31-bit stream identifier for the
+	// stream that this stream depends on. Zero means no
+	// dependency.
+	StreamDep uint32
+
+	// Exclusive is whether the dependency is exclusive.
+	Exclusive bool
+
+	// Weight is the stream's zero-indexed weight. It should be
+	// set together with StreamDep, or neither should be set.  Per
+	// the spec, "Add one to the value to obtain a weight between
+	// 1 and 256."
+	Weight uint8
+}
+
+func (p PriorityParam) IsZero() bool {
+	return p == PriorityParam{}
+}
+
+func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
+	if fh.StreamID == 0 {
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	if len(payload) != 5 {
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	v := binary.BigEndian.Uint32(payload[:4])
+	streamID := v & 0x7fffffff // mask off high bit
+	return &PriorityFrame{
+		FrameHeader: fh,
+		PriorityParam: PriorityParam{
+			Weight:    payload[4],
+			StreamDep: streamID,
+			Exclusive: streamID != v, // was high bit set?
+		},
+	}, nil
+}
+
+// WritePriority writes a PRIORITY frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {
+	if !validStreamID(streamID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	f.startWrite(FramePriority, 0, streamID)
+	v := p.StreamDep
+	if p.Exclusive {
+		v |= 1 << 31
+	}
+	f.writeUint32(v)
+	f.writeByte(p.Weight)
+	return f.endWrite()
+}
+
+// A RSTStreamFrame allows for abnormal termination of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.4
+type RSTStreamFrame struct {
+	FrameHeader
+	ErrCode ErrCode
+}
+
+func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {
+	if len(p) != 4 {
+		return nil, ConnectionError(ErrCodeFrameSize)
+	}
+	if fh.StreamID == 0 {
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
+}
+
+// WriteRSTStream writes a RST_STREAM frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error {
+	if !validStreamID(streamID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	f.startWrite(FrameRSTStream, 0, streamID)
+	f.writeUint32(uint32(code))
+	return f.endWrite()
+}
+
+// A ContinuationFrame is used to continue a sequence of header block fragments.
+// See http://http2.github.io/http2-spec/#rfc.section.6.10
+type ContinuationFrame struct {
+	FrameHeader
+	headerFragBuf []byte
+}
+
+func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
+	return &ContinuationFrame{fh, p}, nil
+}
+
+func (f *ContinuationFrame) StreamEnded() bool {
+	return f.FrameHeader.Flags.Has(FlagDataEndStream)
+}
+
+func (f *ContinuationFrame) HeaderBlockFragment() []byte {
+	f.checkValid()
+	return f.headerFragBuf
+}
+
+func (f *ContinuationFrame) HeadersEnded() bool {
+	return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders)
+}
+
+// WriteContinuation writes a CONTINUATION frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+	if !validStreamID(streamID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	var flags Flags
+	if endHeaders {
+		flags |= FlagContinuationEndHeaders
+	}
+	f.startWrite(FrameContinuation, flags, streamID)
+	f.wbuf = append(f.wbuf, headerBlockFragment...)
+	return f.endWrite()
+}
+
+// A PushPromiseFrame is used to initiate a server stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.6
+type PushPromiseFrame struct {
+	FrameHeader
+	PromiseID     uint32
+	headerFragBuf []byte // not owned
+}
+
+func (f *PushPromiseFrame) HeaderBlockFragment() []byte {
+	f.checkValid()
+	return f.headerFragBuf
+}
+
+func (f *PushPromiseFrame) HeadersEnded() bool {
+	return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
+}
+
+func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {
+	pp := &PushPromiseFrame{
+		FrameHeader: fh,
+	}
+	if pp.StreamID == 0 {
+		// PUSH_PROMISE frames MUST be associated with an existing,
+		// peer-initiated stream. The stream identifier of a
+		// PUSH_PROMISE frame indicates the stream it is associated
+		// with. If the stream identifier field specifies the value
+		// 0x0, a recipient MUST respond with a connection error
+		// (Section 5.4.1) of type PROTOCOL_ERROR.
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	// The PUSH_PROMISE frame includes optional padding.
+	// Padding fields and flags are identical to those defined for DATA frames
+	var padLength uint8
+	if fh.Flags.Has(FlagPushPromisePadded) {
+		if p, padLength, err = readByte(p); err != nil {
+			return
+		}
+	}
+
+	p, pp.PromiseID, err = readUint32(p)
+	if err != nil {
+		return
+	}
+	pp.PromiseID = pp.PromiseID & (1<<31 - 1)
+
+	if int(padLength) > len(p) {
+		// like the DATA frame, error out if padding is longer than the body.
+		return nil, ConnectionError(ErrCodeProtocol)
+	}
+	pp.headerFragBuf = p[:len(p)-int(padLength)]
+	return pp, nil
+}
+
+// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
+type PushPromiseParam struct {
+	// StreamID is the required Stream ID to initiate.
+	StreamID uint32
+
+	// PromiseID is the required Stream ID which this
+	// Push Promises
+	PromiseID uint32
+
+	// BlockFragment is part (or all) of a Header Block.
+	BlockFragment []byte
+
+	// EndHeaders indicates that this frame contains an entire
+	// header block and is not followed by any
+	// CONTINUATION frames.
+	EndHeaders bool
+
+	// PadLength is the optional number of bytes of zeros to add
+	// to this frame.
+	PadLength uint8
+}
+
+// WritePushPromise writes a single PushPromise Frame.
+//
+// As with Header Frames, This is the low level call for writing
+// individual frames. Continuation frames are handled elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePushPromise(p PushPromiseParam) error {
+	if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	var flags Flags
+	if p.PadLength != 0 {
+		flags |= FlagPushPromisePadded
+	}
+	if p.EndHeaders {
+		flags |= FlagPushPromiseEndHeaders
+	}
+	f.startWrite(FramePushPromise, flags, p.StreamID)
+	if p.PadLength != 0 {
+		f.writeByte(p.PadLength)
+	}
+	if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
+		return errStreamID
+	}
+	f.writeUint32(p.PromiseID)
+	f.wbuf = append(f.wbuf, p.BlockFragment...)
+	f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+	return f.endWrite()
+}
+
+// WriteRawFrame writes a raw frame. This can be used to write
+// extension frames unknown to this package.
+func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error {
+	f.startWrite(t, flags, streamID)
+	f.writeBytes(payload)
+	return f.endWrite()
+}
+
+func readByte(p []byte) (remain []byte, b byte, err error) {
+	if len(p) == 0 {
+		return nil, 0, io.ErrUnexpectedEOF
+	}
+	return p[1:], p[0], nil
+}
+
+func readUint32(p []byte) (remain []byte, v uint32, err error) {
+	if len(p) < 4 {
+		return nil, 0, io.ErrUnexpectedEOF
+	}
+	return p[4:], binary.BigEndian.Uint32(p[:4]), nil
+}
+
+type streamEnder interface {
+	StreamEnded() bool
+}
+
+type headersEnder interface {
+	HeadersEnded() bool
+}

+ 578 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/frame_test.go

@@ -0,0 +1,578 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"bytes"
+	"reflect"
+	"strings"
+	"testing"
+	"unsafe"
+)
+
+func testFramer() (*Framer, *bytes.Buffer) {
+	buf := new(bytes.Buffer)
+	return NewFramer(buf, buf), buf
+}
+
+func TestFrameSizes(t *testing.T) {
+	// Catch people rearranging the FrameHeader fields.
+	if got, want := int(unsafe.Sizeof(FrameHeader{})), 12; got != want {
+		t.Errorf("FrameHeader size = %d; want %d", got, want)
+	}
+}
+
+func TestWriteRST(t *testing.T) {
+	fr, buf := testFramer()
+	var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4
+	var errCode uint32 = 7<<24 + 6<<16 + 5<<8 + 4
+	fr.WriteRSTStream(streamID, ErrCode(errCode))
+	const wantEnc = "\x00\x00\x04\x03\x00\x01\x02\x03\x04\x07\x06\x05\x04"
+	if buf.String() != wantEnc {
+		t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+	}
+	f, err := fr.ReadFrame()
+	if err != nil {
+		t.Fatal(err)
+	}
+	want := &RSTStreamFrame{
+		FrameHeader: FrameHeader{
+			valid:    true,
+			Type:     0x3,
+			Flags:    0x0,
+			Length:   0x4,
+			StreamID: 0x1020304,
+		},
+		ErrCode: 0x7060504,
+	}
+	if !reflect.DeepEqual(f, want) {
+		t.Errorf("parsed back %#v; want %#v", f, want)
+	}
+}
+
+func TestWriteData(t *testing.T) {
+	fr, buf := testFramer()
+	var streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4
+	data := []byte("ABC")
+	fr.WriteData(streamID, true, data)
+	const wantEnc = "\x00\x00\x03\x00\x01\x01\x02\x03\x04ABC"
+	if buf.String() != wantEnc {
+		t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+	}
+	f, err := fr.ReadFrame()
+	if err != nil {
+		t.Fatal(err)
+	}
+	df, ok := f.(*DataFrame)
+	if !ok {
+		t.Fatalf("got %T; want *DataFrame", f)
+	}
+	if !bytes.Equal(df.Data(), data) {
+		t.Errorf("got %q; want %q", df.Data(), data)
+	}
+	if f.Header().Flags&1 == 0 {
+		t.Errorf("didn't see END_STREAM flag")
+	}
+}
+
+func TestWriteHeaders(t *testing.T) {
+	tests := []struct {
+		name      string
+		p         HeadersFrameParam
+		wantEnc   string
+		wantFrame *HeadersFrame
+	}{
+		{
+			"basic",
+			HeadersFrameParam{
+				StreamID:      42,
+				BlockFragment: []byte("abc"),
+				Priority:      PriorityParam{},
+			},
+			"\x00\x00\x03\x01\x00\x00\x00\x00*abc",
+			&HeadersFrame{
+				FrameHeader: FrameHeader{
+					valid:    true,
+					StreamID: 42,
+					Type:     FrameHeaders,
+					Length:   uint32(len("abc")),
+				},
+				Priority:      PriorityParam{},
+				headerFragBuf: []byte("abc"),
+			},
+		},
+		{
+			"basic + end flags",
+			HeadersFrameParam{
+				StreamID:      42,
+				BlockFragment: []byte("abc"),
+				EndStream:     true,
+				EndHeaders:    true,
+				Priority:      PriorityParam{},
+			},
+			"\x00\x00\x03\x01\x05\x00\x00\x00*abc",
+			&HeadersFrame{
+				FrameHeader: FrameHeader{
+					valid:    true,
+					StreamID: 42,
+					Type:     FrameHeaders,
+					Flags:    FlagHeadersEndStream | FlagHeadersEndHeaders,
+					Length:   uint32(len("abc")),
+				},
+				Priority:      PriorityParam{},
+				headerFragBuf: []byte("abc"),
+			},
+		},
+		{
+			"with padding",
+			HeadersFrameParam{
+				StreamID:      42,
+				BlockFragment: []byte("abc"),
+				EndStream:     true,
+				EndHeaders:    true,
+				PadLength:     5,
+				Priority:      PriorityParam{},
+			},
+			"\x00\x00\t\x01\r\x00\x00\x00*\x05abc\x00\x00\x00\x00\x00",
+			&HeadersFrame{
+				FrameHeader: FrameHeader{
+					valid:    true,
+					StreamID: 42,
+					Type:     FrameHeaders,
+					Flags:    FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded,
+					Length:   uint32(1 + len("abc") + 5), // pad length + contents + padding
+				},
+				Priority:      PriorityParam{},
+				headerFragBuf: []byte("abc"),
+			},
+		},
+		{
+			"with priority",
+			HeadersFrameParam{
+				StreamID:      42,
+				BlockFragment: []byte("abc"),
+				EndStream:     true,
+				EndHeaders:    true,
+				PadLength:     2,
+				Priority: PriorityParam{
+					StreamDep: 15,
+					Exclusive: true,
+					Weight:    127,
+				},
+			},
+			"\x00\x00\v\x01-\x00\x00\x00*\x02\x80\x00\x00\x0f\u007fabc\x00\x00",
+			&HeadersFrame{
+				FrameHeader: FrameHeader{
+					valid:    true,
+					StreamID: 42,
+					Type:     FrameHeaders,
+					Flags:    FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded | FlagHeadersPriority,
+					Length:   uint32(1 + 5 + len("abc") + 2), // pad length + priority + contents + padding
+				},
+				Priority: PriorityParam{
+					StreamDep: 15,
+					Exclusive: true,
+					Weight:    127,
+				},
+				headerFragBuf: []byte("abc"),
+			},
+		},
+	}
+	for _, tt := range tests {
+		fr, buf := testFramer()
+		if err := fr.WriteHeaders(tt.p); err != nil {
+			t.Errorf("test %q: %v", tt.name, err)
+			continue
+		}
+		if buf.String() != tt.wantEnc {
+			t.Errorf("test %q: encoded %q; want %q", tt.name, buf.Bytes(), tt.wantEnc)
+		}
+		f, err := fr.ReadFrame()
+		if err != nil {
+			t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
+			continue
+		}
+		if !reflect.DeepEqual(f, tt.wantFrame) {
+			t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
+		}
+	}
+}
+
+func TestWriteContinuation(t *testing.T) {
+	const streamID = 42
+	tests := []struct {
+		name string
+		end  bool
+		frag []byte
+
+		wantFrame *ContinuationFrame
+	}{
+		{
+			"not end",
+			false,
+			[]byte("abc"),
+			&ContinuationFrame{
+				FrameHeader: FrameHeader{
+					valid:    true,
+					StreamID: streamID,
+					Type:     FrameContinuation,
+					Length:   uint32(len("abc")),
+				},
+				headerFragBuf: []byte("abc"),
+			},
+		},
+		{
+			"end",
+			true,
+			[]byte("def"),
+			&ContinuationFrame{
+				FrameHeader: FrameHeader{
+					valid:    true,
+					StreamID: streamID,
+					Type:     FrameContinuation,
+					Flags:    FlagContinuationEndHeaders,
+					Length:   uint32(len("def")),
+				},
+				headerFragBuf: []byte("def"),
+			},
+		},
+	}
+	for _, tt := range tests {
+		fr, _ := testFramer()
+		if err := fr.WriteContinuation(streamID, tt.end, tt.frag); err != nil {
+			t.Errorf("test %q: %v", tt.name, err)
+			continue
+		}
+		f, err := fr.ReadFrame()
+		if err != nil {
+			t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
+			continue
+		}
+		if !reflect.DeepEqual(f, tt.wantFrame) {
+			t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
+		}
+	}
+}
+
+func TestWritePriority(t *testing.T) {
+	const streamID = 42
+	tests := []struct {
+		name      string
+		priority  PriorityParam
+		wantFrame *PriorityFrame
+	}{
+		{
+			"not exclusive",
+			PriorityParam{
+				StreamDep: 2,
+				Exclusive: false,
+				Weight:    127,
+			},
+			&PriorityFrame{
+				FrameHeader{
+					valid:    true,
+					StreamID: streamID,
+					Type:     FramePriority,
+					Length:   5,
+				},
+				PriorityParam{
+					StreamDep: 2,
+					Exclusive: false,
+					Weight:    127,
+				},
+			},
+		},
+
+		{
+			"exclusive",
+			PriorityParam{
+				StreamDep: 3,
+				Exclusive: true,
+				Weight:    77,
+			},
+			&PriorityFrame{
+				FrameHeader{
+					valid:    true,
+					StreamID: streamID,
+					Type:     FramePriority,
+					Length:   5,
+				},
+				PriorityParam{
+					StreamDep: 3,
+					Exclusive: true,
+					Weight:    77,
+				},
+			},
+		},
+	}
+	for _, tt := range tests {
+		fr, _ := testFramer()
+		if err := fr.WritePriority(streamID, tt.priority); err != nil {
+			t.Errorf("test %q: %v", tt.name, err)
+			continue
+		}
+		f, err := fr.ReadFrame()
+		if err != nil {
+			t.Errorf("test %q: failed to read the frame back: %v", tt.name, err)
+			continue
+		}
+		if !reflect.DeepEqual(f, tt.wantFrame) {
+			t.Errorf("test %q: mismatch.\n got: %#v\nwant: %#v\n", tt.name, f, tt.wantFrame)
+		}
+	}
+}
+
+func TestWriteSettings(t *testing.T) {
+	fr, buf := testFramer()
+	settings := []Setting{{1, 2}, {3, 4}}
+	fr.WriteSettings(settings...)
+	const wantEnc = "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x03\x00\x00\x00\x04"
+	if buf.String() != wantEnc {
+		t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+	}
+	f, err := fr.ReadFrame()
+	if err != nil {
+		t.Fatal(err)
+	}
+	sf, ok := f.(*SettingsFrame)
+	if !ok {
+		t.Fatalf("Got a %T; want a SettingsFrame", f)
+	}
+	var got []Setting
+	sf.ForeachSetting(func(s Setting) error {
+		got = append(got, s)
+		valBack, ok := sf.Value(s.ID)
+		if !ok || valBack != s.Val {
+			t.Errorf("Value(%d) = %v, %v; want %v, true", s.ID, valBack, ok, s.Val)
+		}
+		return nil
+	})
+	if !reflect.DeepEqual(settings, got) {
+		t.Errorf("Read settings %+v != written settings %+v", got, settings)
+	}
+}
+
+func TestWriteSettingsAck(t *testing.T) {
+	fr, buf := testFramer()
+	fr.WriteSettingsAck()
+	const wantEnc = "\x00\x00\x00\x04\x01\x00\x00\x00\x00"
+	if buf.String() != wantEnc {
+		t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+	}
+}
+
+func TestWriteWindowUpdate(t *testing.T) {
+	fr, buf := testFramer()
+	const streamID = 1<<24 + 2<<16 + 3<<8 + 4
+	const incr = 7<<24 + 6<<16 + 5<<8 + 4
+	if err := fr.WriteWindowUpdate(streamID, incr); err != nil {
+		t.Fatal(err)
+	}
+	const wantEnc = "\x00\x00\x04\x08\x00\x01\x02\x03\x04\x07\x06\x05\x04"
+	if buf.String() != wantEnc {
+		t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+	}
+	f, err := fr.ReadFrame()
+	if err != nil {
+		t.Fatal(err)
+	}
+	want := &WindowUpdateFrame{
+		FrameHeader: FrameHeader{
+			valid:    true,
+			Type:     0x8,
+			Flags:    0x0,
+			Length:   0x4,
+			StreamID: 0x1020304,
+		},
+		Increment: 0x7060504,
+	}
+	if !reflect.DeepEqual(f, want) {
+		t.Errorf("parsed back %#v; want %#v", f, want)
+	}
+}
+
+func TestWritePing(t *testing.T)    { testWritePing(t, false) }
+func TestWritePingAck(t *testing.T) { testWritePing(t, true) }
+
+func testWritePing(t *testing.T, ack bool) {
+	fr, buf := testFramer()
+	if err := fr.WritePing(ack, [8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil {
+		t.Fatal(err)
+	}
+	var wantFlags Flags
+	if ack {
+		wantFlags = FlagPingAck
+	}
+	var wantEnc = "\x00\x00\x08\x06" + string(wantFlags) + "\x00\x00\x00\x00" + "\x01\x02\x03\x04\x05\x06\x07\x08"
+	if buf.String() != wantEnc {
+		t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+	}
+
+	f, err := fr.ReadFrame()
+	if err != nil {
+		t.Fatal(err)
+	}
+	want := &PingFrame{
+		FrameHeader: FrameHeader{
+			valid:    true,
+			Type:     0x6,
+			Flags:    wantFlags,
+			Length:   0x8,
+			StreamID: 0,
+		},
+		Data: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
+	}
+	if !reflect.DeepEqual(f, want) {
+		t.Errorf("parsed back %#v; want %#v", f, want)
+	}
+}
+
+func TestReadFrameHeader(t *testing.T) {
+	tests := []struct {
+		in   string
+		want FrameHeader
+	}{
+		{in: "\x00\x00\x00" + "\x00" + "\x00" + "\x00\x00\x00\x00", want: FrameHeader{}},
+		{in: "\x01\x02\x03" + "\x04" + "\x05" + "\x06\x07\x08\x09", want: FrameHeader{
+			Length: 66051, Type: 4, Flags: 5, StreamID: 101124105,
+		}},
+		// Ignore high bit:
+		{in: "\xff\xff\xff" + "\xff" + "\xff" + "\xff\xff\xff\xff", want: FrameHeader{
+			Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}},
+		{in: "\xff\xff\xff" + "\xff" + "\xff" + "\x7f\xff\xff\xff", want: FrameHeader{
+			Length: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}},
+	}
+	for i, tt := range tests {
+		got, err := readFrameHeader(make([]byte, 9), strings.NewReader(tt.in))
+		if err != nil {
+			t.Errorf("%d. readFrameHeader(%q) = %v", i, tt.in, err)
+			continue
+		}
+		tt.want.valid = true
+		if got != tt.want {
+			t.Errorf("%d. readFrameHeader(%q) = %+v; want %+v", i, tt.in, got, tt.want)
+		}
+	}
+}
+
+func TestReadWriteFrameHeader(t *testing.T) {
+	tests := []struct {
+		len      uint32
+		typ      FrameType
+		flags    Flags
+		streamID uint32
+	}{
+		{len: 0, typ: 255, flags: 1, streamID: 0},
+		{len: 0, typ: 255, flags: 1, streamID: 1},
+		{len: 0, typ: 255, flags: 1, streamID: 255},
+		{len: 0, typ: 255, flags: 1, streamID: 256},
+		{len: 0, typ: 255, flags: 1, streamID: 65535},
+		{len: 0, typ: 255, flags: 1, streamID: 65536},
+
+		{len: 0, typ: 1, flags: 255, streamID: 1},
+		{len: 255, typ: 1, flags: 255, streamID: 1},
+		{len: 256, typ: 1, flags: 255, streamID: 1},
+		{len: 65535, typ: 1, flags: 255, streamID: 1},
+		{len: 65536, typ: 1, flags: 255, streamID: 1},
+		{len: 16777215, typ: 1, flags: 255, streamID: 1},
+	}
+	for _, tt := range tests {
+		fr, buf := testFramer()
+		fr.startWrite(tt.typ, tt.flags, tt.streamID)
+		fr.writeBytes(make([]byte, tt.len))
+		fr.endWrite()
+		fh, err := ReadFrameHeader(buf)
+		if err != nil {
+			t.Errorf("ReadFrameHeader(%+v) = %v", tt, err)
+			continue
+		}
+		if fh.Type != tt.typ || fh.Flags != tt.flags || fh.Length != tt.len || fh.StreamID != tt.streamID {
+			t.Errorf("ReadFrameHeader(%+v) = %+v; mismatch", tt, fh)
+		}
+	}
+
+}
+
+func TestWriteTooLargeFrame(t *testing.T) {
+	fr, _ := testFramer()
+	fr.startWrite(0, 1, 1)
+	fr.writeBytes(make([]byte, 1<<24))
+	err := fr.endWrite()
+	if err != ErrFrameTooLarge {
+		t.Errorf("endWrite = %v; want errFrameTooLarge", err)
+	}
+}
+
+func TestWriteGoAway(t *testing.T) {
+	const debug = "foo"
+	fr, buf := testFramer()
+	if err := fr.WriteGoAway(0x01020304, 0x05060708, []byte(debug)); err != nil {
+		t.Fatal(err)
+	}
+	const wantEnc = "\x00\x00\v\a\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08" + debug
+	if buf.String() != wantEnc {
+		t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+	}
+	f, err := fr.ReadFrame()
+	if err != nil {
+		t.Fatal(err)
+	}
+	want := &GoAwayFrame{
+		FrameHeader: FrameHeader{
+			valid:    true,
+			Type:     0x7,
+			Flags:    0,
+			Length:   uint32(4 + 4 + len(debug)),
+			StreamID: 0,
+		},
+		LastStreamID: 0x01020304,
+		ErrCode:      0x05060708,
+		debugData:    []byte(debug),
+	}
+	if !reflect.DeepEqual(f, want) {
+		t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want)
+	}
+	if got := string(f.(*GoAwayFrame).DebugData()); got != debug {
+		t.Errorf("debug data = %q; want %q", got, debug)
+	}
+}
+
+func TestWritePushPromise(t *testing.T) {
+	pp := PushPromiseParam{
+		StreamID:      42,
+		PromiseID:     42,
+		BlockFragment: []byte("abc"),
+	}
+	fr, buf := testFramer()
+	if err := fr.WritePushPromise(pp); err != nil {
+		t.Fatal(err)
+	}
+	const wantEnc = "\x00\x00\x07\x05\x00\x00\x00\x00*\x00\x00\x00*abc"
+	if buf.String() != wantEnc {
+		t.Errorf("encoded as %q; want %q", buf.Bytes(), wantEnc)
+	}
+	f, err := fr.ReadFrame()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, ok := f.(*PushPromiseFrame)
+	if !ok {
+		t.Fatalf("got %T; want *PushPromiseFrame", f)
+	}
+	want := &PushPromiseFrame{
+		FrameHeader: FrameHeader{
+			valid:    true,
+			Type:     0x5,
+			Flags:    0x0,
+			Length:   0x7,
+			StreamID: 42,
+		},
+		PromiseID:     42,
+		headerFragBuf: []byte("abc"),
+	}
+	if !reflect.DeepEqual(f, want) {
+		t.Fatalf("parsed back:\n%#v\nwant:\n%#v", f, want)
+	}
+}

+ 169 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/gotrack.go

@@ -0,0 +1,169 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+// Defensive debug-only utility to track that functions run on the
+// goroutine that they're supposed to.
+
+package http2
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"runtime"
+	"strconv"
+	"sync"
+)
+
+var DebugGoroutines = false
+
+type goroutineLock uint64
+
+func newGoroutineLock() goroutineLock {
+	return goroutineLock(curGoroutineID())
+}
+
+func (g goroutineLock) check() {
+	if !DebugGoroutines {
+		return
+	}
+	if curGoroutineID() != uint64(g) {
+		panic("running on the wrong goroutine")
+	}
+}
+
+func (g goroutineLock) checkNotOn() {
+	if !DebugGoroutines {
+		return
+	}
+	if curGoroutineID() == uint64(g) {
+		panic("running on the wrong goroutine")
+	}
+}
+
+var goroutineSpace = []byte("goroutine ")
+
+func curGoroutineID() uint64 {
+	bp := littleBuf.Get().(*[]byte)
+	defer littleBuf.Put(bp)
+	b := *bp
+	b = b[:runtime.Stack(b, false)]
+	// Parse the 4707 out of "goroutine 4707 ["
+	b = bytes.TrimPrefix(b, goroutineSpace)
+	i := bytes.IndexByte(b, ' ')
+	if i < 0 {
+		panic(fmt.Sprintf("No space found in %q", b))
+	}
+	b = b[:i]
+	n, err := parseUintBytes(b, 10, 64)
+	if err != nil {
+		panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
+	}
+	return n
+}
+
+var littleBuf = sync.Pool{
+	New: func() interface{} {
+		buf := make([]byte, 64)
+		return &buf
+	},
+}
+
+// parseUintBytes is like strconv.ParseUint, but using a []byte.
+func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
+	var cutoff, maxVal uint64
+
+	if bitSize == 0 {
+		bitSize = int(strconv.IntSize)
+	}
+
+	s0 := s
+	switch {
+	case len(s) < 1:
+		err = strconv.ErrSyntax
+		goto Error
+
+	case 2 <= base && base <= 36:
+		// valid base; nothing to do
+
+	case base == 0:
+		// Look for octal, hex prefix.
+		switch {
+		case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
+			base = 16
+			s = s[2:]
+			if len(s) < 1 {
+				err = strconv.ErrSyntax
+				goto Error
+			}
+		case s[0] == '0':
+			base = 8
+		default:
+			base = 10
+		}
+
+	default:
+		err = errors.New("invalid base " + strconv.Itoa(base))
+		goto Error
+	}
+
+	n = 0
+	cutoff = cutoff64(base)
+	maxVal = 1<<uint(bitSize) - 1
+
+	for i := 0; i < len(s); i++ {
+		var v byte
+		d := s[i]
+		switch {
+		case '0' <= d && d <= '9':
+			v = d - '0'
+		case 'a' <= d && d <= 'z':
+			v = d - 'a' + 10
+		case 'A' <= d && d <= 'Z':
+			v = d - 'A' + 10
+		default:
+			n = 0
+			err = strconv.ErrSyntax
+			goto Error
+		}
+		if int(v) >= base {
+			n = 0
+			err = strconv.ErrSyntax
+			goto Error
+		}
+
+		if n >= cutoff {
+			// n*base overflows
+			n = 1<<64 - 1
+			err = strconv.ErrRange
+			goto Error
+		}
+		n *= uint64(base)
+
+		n1 := n + uint64(v)
+		if n1 < n || n1 > maxVal {
+			// n+v overflows
+			n = 1<<64 - 1
+			err = strconv.ErrRange
+			goto Error
+		}
+		n = n1
+	}
+
+	return n, nil
+
+Error:
+	return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
+}
+
+// Return the first number n such that n*base >= 1<<64.
+func cutoff64(base int) uint64 {
+	if base < 2 {
+		return 0
+	}
+	return (1<<64-1)/uint64(base) + 1
+}

+ 33 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/gotrack_test.go

@@ -0,0 +1,33 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+	"fmt"
+	"strings"
+	"testing"
+)
+
+func TestGoroutineLock(t *testing.T) {
+	DebugGoroutines = true
+	g := newGoroutineLock()
+	g.check()
+
+	sawPanic := make(chan interface{})
+	go func() {
+		defer func() { sawPanic <- recover() }()
+		g.check() // should panic
+	}()
+	e := <-sawPanic
+	if e == nil {
+		t.Fatal("did not see panic from check in other goroutine")
+	}
+	if !strings.Contains(fmt.Sprint(e), "wrong goroutine") {
+		t.Errorf("expected on see panic about running on the wrong goroutine; got %v", e)
+	}
+}

+ 5 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/.gitignore

@@ -0,0 +1,5 @@
+h2demo
+h2demo.linux
+client-id.dat
+client-secret.dat
+token.dat

+ 5 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/Makefile

@@ -0,0 +1,5 @@
+h2demo.linux: h2demo.go
+	GOOS=linux go build --tags=h2demo -o h2demo.linux .
+
+upload: h2demo.linux
+	cat h2demo.linux | go run launch.go --write_object=http2-demo-server-tls/h2demo --write_object_is_public

+ 16 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/README

@@ -0,0 +1,16 @@
+
+Client:
+ -- Firefox nightly with about:config network.http.spdy.enabled.http2draft set true
+ -- Chrome: go to chrome://flags/#enable-spdy4, save and restart (button at bottom)
+
+Make CA:
+$ openssl genrsa -out rootCA.key 2048
+$ openssl req -x509 -new -nodes -key rootCA.key -days 1024 -out rootCA.pem
+... install that to Firefox
+
+Make cert:
+$ openssl genrsa -out server.key 2048
+$ openssl req -new -key server.key -out server.csr
+$ openssl x509 -req -in server.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out server.crt -days 500
+
+

+ 426 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/h2demo.go

@@ -0,0 +1,426 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+// +build h2demo
+
+package main
+
+import (
+	"bytes"
+	"crypto/tls"
+	"flag"
+	"fmt"
+	"hash/crc32"
+	"image"
+	"image/jpeg"
+	"io"
+	"io/ioutil"
+	"log"
+	"net"
+	"net/http"
+	"os/exec"
+	"path"
+	"regexp"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"camlistore.org/pkg/googlestorage"
+	"camlistore.org/pkg/singleflight"
+	"github.com/coreos/etcd/Godeps/_workspace/src/github.com/bradfitz/http2"
+)
+
+var (
+	openFirefox = flag.Bool("openff", false, "Open Firefox")
+	addr        = flag.String("addr", "localhost:4430", "TLS address to listen on")
+	httpAddr    = flag.String("httpaddr", "", "If non-empty, address to listen for regular HTTP on")
+	prod        = flag.Bool("prod", false, "Whether to configure itself to be the production http2.golang.org server.")
+)
+
+func homeOldHTTP(w http.ResponseWriter, r *http.Request) {
+	io.WriteString(w, `<html>
+<body>
+<h1>Go + HTTP/2</h1>
+<p>Welcome to <a href="https://golang.org/">the Go language</a>'s <a href="https://http2.github.io/">HTTP/2</a> demo & interop server.</p>
+<p>Unfortunately, you're <b>not</b> using HTTP/2 right now. To do so:</p>
+<ul>
+   <li>Use Firefox Nightly or go to <b>about:config</b> and enable "network.http.spdy.enabled.http2draft"</li>
+   <li>Use Google Chrome Canary and/or go to <b>chrome://flags/#enable-spdy4</b> to <i>Enable SPDY/4</i> (Chrome's name for HTTP/2)</li>
+</ul>
+<p>See code & instructions for connecting at <a href="https://github.com/bradfitz/http2">https://github.com/bradfitz/http2</a>.</p>
+
+</body></html>`)
+}
+
+func home(w http.ResponseWriter, r *http.Request) {
+	if r.URL.Path != "/" {
+		http.NotFound(w, r)
+		return
+	}
+	io.WriteString(w, `<html>
+<body>
+<h1>Go + HTTP/2</h1>
+
+<p>Welcome to <a href="https://golang.org/">the Go language</a>'s <a
+href="https://http2.github.io/">HTTP/2</a> demo & interop server.</p>
+
+<p>Congratulations, <b>you're using HTTP/2 right now</b>.</p>
+
+<p>This server exists for others in the HTTP/2 community to test their HTTP/2 client implementations and point out flaws in our server.</p>
+
+<p> The code is currently at <a
+href="https://github.com/bradfitz/http2">github.com/bradfitz/http2</a>
+but will move to the Go standard library at some point in the future
+(enabled by default, without users needing to change their code).</p>
+
+<p>Contact info: <i>bradfitz@golang.org</i>, or <a
+href="https://github.com/bradfitz/http2/issues">file a bug</a>.</p>
+
+<h2>Handlers for testing</h2>
+<ul>
+  <li>GET <a href="/reqinfo">/reqinfo</a> to dump the request + headers received</li>
+  <li>GET <a href="/clockstream">/clockstream</a> streams the current time every second</li>
+  <li>GET <a href="/gophertiles">/gophertiles</a> to see a page with a bunch of images</li>
+  <li>GET <a href="/file/gopher.png">/file/gopher.png</a> for a small file (does If-Modified-Since, Content-Range, etc)</li>
+  <li>GET <a href="/file/go.src.tar.gz">/file/go.src.tar.gz</a> for a larger file (~10 MB)</li>
+  <li>GET <a href="/redirect">/redirect</a> to redirect back to / (this page)</li>
+  <li>GET <a href="/goroutines">/goroutines</a> to see all active goroutines in this server</li>
+  <li>PUT something to <a href="/crc32">/crc32</a> to get a count of number of bytes and its CRC-32</li>
+</ul>
+
+</body></html>`)
+}
+
+func reqInfoHandler(w http.ResponseWriter, r *http.Request) {
+	w.Header().Set("Content-Type", "text/plain")
+	fmt.Fprintf(w, "Method: %s\n", r.Method)
+	fmt.Fprintf(w, "Protocol: %s\n", r.Proto)
+	fmt.Fprintf(w, "Host: %s\n", r.Host)
+	fmt.Fprintf(w, "RemoteAddr: %s\n", r.RemoteAddr)
+	fmt.Fprintf(w, "RequestURI: %q\n", r.RequestURI)
+	fmt.Fprintf(w, "URL: %#v\n", r.URL)
+	fmt.Fprintf(w, "Body.ContentLength: %d (-1 means unknown)\n", r.ContentLength)
+	fmt.Fprintf(w, "Close: %v (relevant for HTTP/1 only)\n", r.Close)
+	fmt.Fprintf(w, "TLS: %#v\n", r.TLS)
+	fmt.Fprintf(w, "\nHeaders:\n")
+	r.Header.Write(w)
+}
+
+func crcHandler(w http.ResponseWriter, r *http.Request) {
+	if r.Method != "PUT" {
+		http.Error(w, "PUT required.", 400)
+		return
+	}
+	crc := crc32.NewIEEE()
+	n, err := io.Copy(crc, r.Body)
+	if err == nil {
+		w.Header().Set("Content-Type", "text/plain")
+		fmt.Fprintf(w, "bytes=%d, CRC32=%x", n, crc.Sum(nil))
+	}
+}
+
+var (
+	fsGrp   singleflight.Group
+	fsMu    sync.Mutex // guards fsCache
+	fsCache = map[string]http.Handler{}
+)
+
+// fileServer returns a file-serving handler that proxies URL.
+// It lazily fetches URL on the first access and caches its contents forever.
+func fileServer(url string) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		hi, err := fsGrp.Do(url, func() (interface{}, error) {
+			fsMu.Lock()
+			if h, ok := fsCache[url]; ok {
+				fsMu.Unlock()
+				return h, nil
+			}
+			fsMu.Unlock()
+
+			res, err := http.Get(url)
+			if err != nil {
+				return nil, err
+			}
+			defer res.Body.Close()
+			slurp, err := ioutil.ReadAll(res.Body)
+			if err != nil {
+				return nil, err
+			}
+
+			modTime := time.Now()
+			var h http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+				http.ServeContent(w, r, path.Base(url), modTime, bytes.NewReader(slurp))
+			})
+			fsMu.Lock()
+			fsCache[url] = h
+			fsMu.Unlock()
+			return h, nil
+		})
+		if err != nil {
+			http.Error(w, err.Error(), 500)
+			return
+		}
+		hi.(http.Handler).ServeHTTP(w, r)
+	})
+}
+
+func clockStreamHandler(w http.ResponseWriter, r *http.Request) {
+	clientGone := w.(http.CloseNotifier).CloseNotify()
+	w.Header().Set("Content-Type", "text/plain")
+	ticker := time.NewTicker(1 * time.Second)
+	defer ticker.Stop()
+	fmt.Fprintf(w, "# ~1KB of junk to force browsers to start rendering immediately: \n")
+	io.WriteString(w, strings.Repeat("# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n", 13))
+
+	for {
+		fmt.Fprintf(w, "%v\n", time.Now())
+		w.(http.Flusher).Flush()
+		select {
+		case <-ticker.C:
+		case <-clientGone:
+			log.Printf("Client %v disconnected from the clock", r.RemoteAddr)
+			return
+		}
+	}
+}
+
+func registerHandlers() {
+	tiles := newGopherTilesHandler()
+
+	mux2 := http.NewServeMux()
+	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+		if r.TLS == nil {
+			if r.URL.Path == "/gophertiles" {
+				tiles.ServeHTTP(w, r)
+				return
+			}
+			http.Redirect(w, r, "https://http2.golang.org/", http.StatusFound)
+			return
+		}
+		if r.ProtoMajor == 1 {
+			if r.URL.Path == "/reqinfo" {
+				reqInfoHandler(w, r)
+				return
+			}
+			homeOldHTTP(w, r)
+			return
+		}
+		mux2.ServeHTTP(w, r)
+	})
+	mux2.HandleFunc("/", home)
+	mux2.Handle("/file/gopher.png", fileServer("https://golang.org/doc/gopher/frontpage.png"))
+	mux2.Handle("/file/go.src.tar.gz", fileServer("https://storage.googleapis.com/golang/go1.4.1.src.tar.gz"))
+	mux2.HandleFunc("/reqinfo", reqInfoHandler)
+	mux2.HandleFunc("/crc32", crcHandler)
+	mux2.HandleFunc("/clockstream", clockStreamHandler)
+	mux2.Handle("/gophertiles", tiles)
+	mux2.HandleFunc("/redirect", func(w http.ResponseWriter, r *http.Request) {
+		http.Redirect(w, r, "/", http.StatusFound)
+	})
+	stripHomedir := regexp.MustCompile(`/(Users|home)/\w+`)
+	mux2.HandleFunc("/goroutines", func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+		buf := make([]byte, 2<<20)
+		w.Write(stripHomedir.ReplaceAll(buf[:runtime.Stack(buf, true)], nil))
+	})
+}
+
+func newGopherTilesHandler() http.Handler {
+	const gopherURL = "https://blog.golang.org/go-programming-language-turns-two_gophers.jpg"
+	res, err := http.Get(gopherURL)
+	if err != nil {
+		log.Fatal(err)
+	}
+	if res.StatusCode != 200 {
+		log.Fatalf("Error fetching %s: %v", gopherURL, res.Status)
+	}
+	slurp, err := ioutil.ReadAll(res.Body)
+	res.Body.Close()
+	if err != nil {
+		log.Fatal(err)
+	}
+	im, err := jpeg.Decode(bytes.NewReader(slurp))
+	if err != nil {
+		if len(slurp) > 1024 {
+			slurp = slurp[:1024]
+		}
+		log.Fatalf("Failed to decode gopher image: %v (got %q)", err, slurp)
+	}
+
+	type subImager interface {
+		SubImage(image.Rectangle) image.Image
+	}
+	const tileSize = 32
+	xt := im.Bounds().Max.X / tileSize
+	yt := im.Bounds().Max.Y / tileSize
+	var tile [][][]byte // y -> x -> jpeg bytes
+	for yi := 0; yi < yt; yi++ {
+		var row [][]byte
+		for xi := 0; xi < xt; xi++ {
+			si := im.(subImager).SubImage(image.Rectangle{
+				Min: image.Point{xi * tileSize, yi * tileSize},
+				Max: image.Point{(xi + 1) * tileSize, (yi + 1) * tileSize},
+			})
+			buf := new(bytes.Buffer)
+			if err := jpeg.Encode(buf, si, &jpeg.Options{Quality: 90}); err != nil {
+				log.Fatal(err)
+			}
+			row = append(row, buf.Bytes())
+		}
+		tile = append(tile, row)
+	}
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		ms, _ := strconv.Atoi(r.FormValue("latency"))
+		const nanosPerMilli = 1e6
+		if r.FormValue("x") != "" {
+			x, _ := strconv.Atoi(r.FormValue("x"))
+			y, _ := strconv.Atoi(r.FormValue("y"))
+			if ms <= 1000 {
+				time.Sleep(time.Duration(ms) * nanosPerMilli)
+			}
+			if x >= 0 && x < xt && y >= 0 && y < yt {
+				http.ServeContent(w, r, "", time.Time{}, bytes.NewReader(tile[y][x]))
+				return
+			}
+		}
+		io.WriteString(w, "<html><body>")
+		fmt.Fprintf(w, "A grid of %d tiled images is below. Compare:<p>", xt*yt)
+		for _, ms := range []int{0, 30, 200, 1000} {
+			d := time.Duration(ms) * nanosPerMilli
+			fmt.Fprintf(w, "[<a href='https://%s/gophertiles?latency=%d'>HTTP/2, %v latency</a>] [<a href='http://%s/gophertiles?latency=%d'>HTTP/1, %v latency</a>]<br>\n",
+				httpsHost(), ms, d,
+				httpHost(), ms, d,
+			)
+		}
+		io.WriteString(w, "<p>\n")
+		cacheBust := time.Now().UnixNano()
+		for y := 0; y < yt; y++ {
+			for x := 0; x < xt; x++ {
+				fmt.Fprintf(w, "<img width=%d height=%d src='/gophertiles?x=%d&y=%d&cachebust=%d&latency=%d'>",
+					tileSize, tileSize, x, y, cacheBust, ms)
+			}
+			io.WriteString(w, "<br/>\n")
+		}
+		io.WriteString(w, "<hr><a href='/'>&lt;&lt Back to Go HTTP/2 demo server</a></body></html>")
+	})
+}
+
+func httpsHost() string {
+	if *prod {
+		return "http2.golang.org"
+	}
+	if v := *addr; strings.HasPrefix(v, ":") {
+		return "localhost" + v
+	} else {
+		return v
+	}
+}
+
+func httpHost() string {
+	if *prod {
+		return "http2.golang.org"
+	}
+	if v := *httpAddr; strings.HasPrefix(v, ":") {
+		return "localhost" + v
+	} else {
+		return v
+	}
+}
+
+func serveProdTLS() error {
+	c, err := googlestorage.NewServiceClient()
+	if err != nil {
+		return err
+	}
+	slurp := func(key string) ([]byte, error) {
+		const bucket = "http2-demo-server-tls"
+		rc, _, err := c.GetObject(&googlestorage.Object{
+			Bucket: bucket,
+			Key:    key,
+		})
+		if err != nil {
+			return nil, fmt.Errorf("Error fetching GCS object %q in bucket %q: %v", key, bucket, err)
+		}
+		defer rc.Close()
+		return ioutil.ReadAll(rc)
+	}
+	certPem, err := slurp("http2.golang.org.chained.pem")
+	if err != nil {
+		return err
+	}
+	keyPem, err := slurp("http2.golang.org.key")
+	if err != nil {
+		return err
+	}
+	cert, err := tls.X509KeyPair(certPem, keyPem)
+	if err != nil {
+		return err
+	}
+	srv := &http.Server{
+		TLSConfig: &tls.Config{
+			Certificates: []tls.Certificate{cert},
+		},
+	}
+	http2.ConfigureServer(srv, &http2.Server{})
+	ln, err := net.Listen("tcp", ":443")
+	if err != nil {
+		return err
+	}
+	return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig))
+}
+
+type tcpKeepAliveListener struct {
+	*net.TCPListener
+}
+
+func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
+	tc, err := ln.AcceptTCP()
+	if err != nil {
+		return
+	}
+	tc.SetKeepAlive(true)
+	tc.SetKeepAlivePeriod(3 * time.Minute)
+	return tc, nil
+}
+
+func serveProd() error {
+	errc := make(chan error, 2)
+	go func() { errc <- http.ListenAndServe(":80", nil) }()
+	go func() { errc <- serveProdTLS() }()
+	return <-errc
+}
+
+func main() {
+	var srv http.Server
+	flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.")
+	flag.Parse()
+	srv.Addr = *addr
+
+	registerHandlers()
+
+	if *prod {
+		*httpAddr = "http2.golang.org"
+		log.Fatal(serveProd())
+	}
+
+	url := "https://" + *addr + "/"
+	log.Printf("Listening on " + url)
+	http2.ConfigureServer(&srv, &http2.Server{})
+
+	if *httpAddr != "" {
+		go func() { log.Fatal(http.ListenAndServe(*httpAddr, nil)) }()
+	}
+
+	go func() {
+		log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key"))
+	}()
+	if *openFirefox && runtime.GOOS == "darwin" {
+		time.Sleep(250 * time.Millisecond)
+		exec.Command("open", "-b", "org.mozilla.nightly", "https://localhost:4430/").Run()
+	}
+	select {}
+}

+ 279 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/launch.go

@@ -0,0 +1,279 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"os"
+	"strings"
+	"time"
+
+	"code.google.com/p/goauth2/oauth"
+	compute "code.google.com/p/google-api-go-client/compute/v1"
+)
+
+var (
+	proj     = flag.String("project", "symbolic-datum-552", "name of Project")
+	zone     = flag.String("zone", "us-central1-a", "GCE zone")
+	mach     = flag.String("machinetype", "n1-standard-1", "Machine type")
+	instName = flag.String("instance_name", "http2-demo", "Name of VM instance.")
+	sshPub   = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.")
+	staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.")
+
+	writeObject  = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.")
+	publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.")
+)
+
+func readFile(v string) string {
+	slurp, err := ioutil.ReadFile(v)
+	if err != nil {
+		log.Fatalf("Error reading %s: %v", v, err)
+	}
+	return strings.TrimSpace(string(slurp))
+}
+
+var config = &oauth.Config{
+	// The client-id and secret should be for an "Installed Application" when using
+	// the CLI. Later we'll use a web application with a callback.
+	ClientId:     readFile("client-id.dat"),
+	ClientSecret: readFile("client-secret.dat"),
+	Scope: strings.Join([]string{
+		compute.DevstorageFull_controlScope,
+		compute.ComputeScope,
+		"https://www.googleapis.com/auth/sqlservice",
+		"https://www.googleapis.com/auth/sqlservice.admin",
+	}, " "),
+	AuthURL:     "https://accounts.google.com/o/oauth2/auth",
+	TokenURL:    "https://accounts.google.com/o/oauth2/token",
+	RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
+}
+
+const baseConfig = `#cloud-config
+coreos:
+  units:
+    - name: h2demo.service
+      command: start
+      content: |
+        [Unit]
+        Description=HTTP2 Demo
+        
+        [Service]
+        ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo'
+        ExecStart=/opt/bin/h2demo --prod
+        RestartSec=5s
+        Restart=always
+        Type=simple
+        
+        [Install]
+        WantedBy=multi-user.target
+`
+
+func main() {
+	flag.Parse()
+	if *proj == "" {
+		log.Fatalf("Missing --project flag")
+	}
+	prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
+	machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach
+
+	tr := &oauth.Transport{
+		Config: config,
+	}
+
+	tokenCache := oauth.CacheFile("token.dat")
+	token, err := tokenCache.Token()
+	if err != nil {
+		if *writeObject != "" {
+			log.Fatalf("Can't use --write_object without a valid token.dat file already cached.")
+		}
+		log.Printf("Error getting token from %s: %v", string(tokenCache), err)
+		log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
+		fmt.Print("\nEnter auth code: ")
+		sc := bufio.NewScanner(os.Stdin)
+		sc.Scan()
+		authCode := strings.TrimSpace(sc.Text())
+		token, err = tr.Exchange(authCode)
+		if err != nil {
+			log.Fatalf("Error exchanging auth code for a token: %v", err)
+		}
+		tokenCache.PutToken(token)
+	}
+
+	tr.Token = token
+	oauthClient := &http.Client{Transport: tr}
+	if *writeObject != "" {
+		writeCloudStorageObject(oauthClient)
+		return
+	}
+
+	computeService, _ := compute.New(oauthClient)
+
+	natIP := *staticIP
+	if natIP == "" {
+		// Try to find it by name.
+		aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
+		if err != nil {
+			log.Fatal(err)
+		}
+		// http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList
+	IPLoop:
+		for _, asl := range aggAddrList.Items {
+			for _, addr := range asl.Addresses {
+				if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
+					natIP = addr.Address
+					break IPLoop
+				}
+			}
+		}
+	}
+
+	cloudConfig := baseConfig
+	if *sshPub != "" {
+		key := strings.TrimSpace(readFile(*sshPub))
+		cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n    - %s\n", key)
+	}
+	if os.Getenv("USER") == "bradfitz" {
+		cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n    - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com")
+	}
+	const maxCloudConfig = 32 << 10 // per compute API docs
+	if len(cloudConfig) > maxCloudConfig {
+		log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
+	}
+
+	instance := &compute.Instance{
+		Name:        *instName,
+		Description: "Go Builder",
+		MachineType: machType,
+		Disks:       []*compute.AttachedDisk{instanceDisk(computeService)},
+		Tags: &compute.Tags{
+			Items: []string{"http-server", "https-server"},
+		},
+		Metadata: &compute.Metadata{
+			Items: []*compute.MetadataItems{
+				{
+					Key:   "user-data",
+					Value: cloudConfig,
+				},
+			},
+		},
+		NetworkInterfaces: []*compute.NetworkInterface{
+			&compute.NetworkInterface{
+				AccessConfigs: []*compute.AccessConfig{
+					&compute.AccessConfig{
+						Type:  "ONE_TO_ONE_NAT",
+						Name:  "External NAT",
+						NatIP: natIP,
+					},
+				},
+				Network: prefix + "/global/networks/default",
+			},
+		},
+		ServiceAccounts: []*compute.ServiceAccount{
+			{
+				Email: "default",
+				Scopes: []string{
+					compute.DevstorageFull_controlScope,
+					compute.ComputeScope,
+				},
+			},
+		},
+	}
+
+	log.Printf("Creating instance...")
+	op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
+	if err != nil {
+		log.Fatalf("Failed to create instance: %v", err)
+	}
+	opName := op.Name
+	log.Printf("Created. Waiting on operation %v", opName)
+OpLoop:
+	for {
+		time.Sleep(2 * time.Second)
+		op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
+		if err != nil {
+			log.Fatalf("Failed to get op %s: %v", opName, err)
+		}
+		switch op.Status {
+		case "PENDING", "RUNNING":
+			log.Printf("Waiting on operation %v", opName)
+			continue
+		case "DONE":
+			if op.Error != nil {
+				for _, operr := range op.Error.Errors {
+					log.Printf("Error: %+v", operr)
+				}
+				log.Fatalf("Failed to start.")
+			}
+			log.Printf("Success. %+v", op)
+			break OpLoop
+		default:
+			log.Fatalf("Unknown status %q: %+v", op.Status, op)
+		}
+	}
+
+	inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
+	if err != nil {
+		log.Fatalf("Error getting instance after creation: %v", err)
+	}
+	ij, _ := json.MarshalIndent(inst, "", "    ")
+	log.Printf("Instance: %s", ij)
+}
+
+func instanceDisk(svc *compute.Service) *compute.AttachedDisk {
+	const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016"
+	diskName := *instName + "-disk"
+
+	return &compute.AttachedDisk{
+		AutoDelete: true,
+		Boot:       true,
+		Type:       "PERSISTENT",
+		InitializeParams: &compute.AttachedDiskInitializeParams{
+			DiskName:    diskName,
+			SourceImage: imageURL,
+			DiskSizeGb:  50,
+		},
+	}
+}
+
+func writeCloudStorageObject(httpClient *http.Client) {
+	content := os.Stdin
+	const maxSlurp = 1 << 20
+	var buf bytes.Buffer
+	n, err := io.CopyN(&buf, content, maxSlurp)
+	if err != nil && err != io.EOF {
+		log.Fatalf("Error reading from stdin: %v, %v", n, err)
+	}
+	contentType := http.DetectContentType(buf.Bytes())
+
+	req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content))
+	if err != nil {
+		log.Fatal(err)
+	}
+	req.Header.Set("x-goog-api-version", "2")
+	if *publicObject {
+		req.Header.Set("x-goog-acl", "public-read")
+	}
+	req.Header.Set("Content-Type", contentType)
+	res, err := httpClient.Do(req)
+	if err != nil {
+		log.Fatal(err)
+	}
+	if res.StatusCode != 200 {
+		res.Write(os.Stderr)
+		log.Fatalf("Failed.")
+	}
+	log.Printf("Success.")
+	os.Exit(0)
+}

+ 27 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.key

@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q
+62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby
+XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV
+mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ
+JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ
+SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA
+nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e
+/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx
+qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser
+hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j
+NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E
+LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7
+8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c
+0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws
+K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd
+bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo
+QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt
+Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1
+nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy
+b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7
+gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev
+WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr
+C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj
+x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA
+hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y
+-----END RSA PRIVATE KEY-----

+ 26 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.pem

@@ -0,0 +1,26 @@
+-----BEGIN CERTIFICATE-----
+MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV
+BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG
+A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3
+DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0
+NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG
+cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv
+c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS
+R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT
+ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk
+JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3
+mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW
+caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G
+A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt
+hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB
+MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES
+MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv
+bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h
+U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao
+eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4
+UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD
+58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n
+sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF
+kPe6XoSbiLm/kxk32T0=
+-----END CERTIFICATE-----

+ 1 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/rootCA.srl

@@ -0,0 +1 @@
+E2CE26BF3285059C

+ 20 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.crt

@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV
+UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT
+C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW
+DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow
+RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE
+ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l
+gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2
+dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL
+A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws
+/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88
+F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB
+AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R
+rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD
+EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19
+KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI
+dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU
+90p6/CbU71bGbfpM2PHot2fm
+-----END CERTIFICATE-----

+ 27 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/h2demo/server.key

@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi
+fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm
+J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef
+b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55
+mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/
+fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p
+3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3
+qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4
+NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80
+LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN
+a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+
+Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL
+W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO
+gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm
+S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS
+Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp
+V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4
+KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4
+yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5
+drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e
+ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R
+48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5
+c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY
+nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl
+IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd
+-----END RSA PRIVATE KEY-----

+ 80 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/headermap.go

@@ -0,0 +1,80 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+	"net/http"
+	"strings"
+)
+
+var (
+	commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
+	commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
+)
+
+func init() {
+	for _, v := range []string{
+		"accept",
+		"accept-charset",
+		"accept-encoding",
+		"accept-language",
+		"accept-ranges",
+		"age",
+		"access-control-allow-origin",
+		"allow",
+		"authorization",
+		"cache-control",
+		"content-disposition",
+		"content-encoding",
+		"content-language",
+		"content-length",
+		"content-location",
+		"content-range",
+		"content-type",
+		"cookie",
+		"date",
+		"etag",
+		"expect",
+		"expires",
+		"from",
+		"host",
+		"if-match",
+		"if-modified-since",
+		"if-none-match",
+		"if-unmodified-since",
+		"last-modified",
+		"link",
+		"location",
+		"max-forwards",
+		"proxy-authenticate",
+		"proxy-authorization",
+		"range",
+		"referer",
+		"refresh",
+		"retry-after",
+		"server",
+		"set-cookie",
+		"strict-transport-security",
+		"transfer-encoding",
+		"user-agent",
+		"vary",
+		"via",
+		"www-authenticate",
+	} {
+		chk := http.CanonicalHeaderKey(v)
+		commonLowerHeader[chk] = v
+		commonCanonHeader[v] = chk
+	}
+}
+
+func lowerHeader(v string) string {
+	if s, ok := commonLowerHeader[v]; ok {
+		return s
+	}
+	return strings.ToLower(v)
+}

+ 252 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode.go

@@ -0,0 +1,252 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package hpack
+
+import (
+	"io"
+)
+
+const (
+	uint32Max              = ^uint32(0)
+	initialHeaderTableSize = 4096
+)
+
+type Encoder struct {
+	dynTab dynamicTable
+	// minSize is the minimum table size set by
+	// SetMaxDynamicTableSize after the previous Header Table Size
+	// Update.
+	minSize uint32
+	// maxSizeLimit is the maximum table size this encoder
+	// supports. This will protect the encoder from too large
+	// size.
+	maxSizeLimit uint32
+	// tableSizeUpdate indicates whether "Header Table Size
+	// Update" is required.
+	tableSizeUpdate bool
+	w               io.Writer
+	buf             []byte
+}
+
+// NewEncoder returns a new Encoder which performs HPACK encoding. An
+// encoded data is written to w.
+func NewEncoder(w io.Writer) *Encoder {
+	e := &Encoder{
+		minSize:         uint32Max,
+		maxSizeLimit:    initialHeaderTableSize,
+		tableSizeUpdate: false,
+		w:               w,
+	}
+	e.dynTab.setMaxSize(initialHeaderTableSize)
+	return e
+}
+
+// WriteField encodes f into a single Write to e's underlying Writer.
+// This function may also produce bytes for "Header Table Size Update"
+// if necessary.  If produced, it is done before encoding f.
+func (e *Encoder) WriteField(f HeaderField) error {
+	e.buf = e.buf[:0]
+
+	if e.tableSizeUpdate {
+		e.tableSizeUpdate = false
+		if e.minSize < e.dynTab.maxSize {
+			e.buf = appendTableSize(e.buf, e.minSize)
+		}
+		e.minSize = uint32Max
+		e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
+	}
+
+	idx, nameValueMatch := e.searchTable(f)
+	if nameValueMatch {
+		e.buf = appendIndexed(e.buf, idx)
+	} else {
+		indexing := e.shouldIndex(f)
+		if indexing {
+			e.dynTab.add(f)
+		}
+
+		if idx == 0 {
+			e.buf = appendNewName(e.buf, f, indexing)
+		} else {
+			e.buf = appendIndexedName(e.buf, f, idx, indexing)
+		}
+	}
+	n, err := e.w.Write(e.buf)
+	if err == nil && n != len(e.buf) {
+		err = io.ErrShortWrite
+	}
+	return err
+}
+
+// searchTable searches f in both stable and dynamic header tables.
+// The static header table is searched first. Only when there is no
+// exact match for both name and value, the dynamic header table is
+// then searched. If there is no match, i is 0. If both name and value
+// match, i is the matched index and nameValueMatch becomes true. If
+// only name matches, i points to that index and nameValueMatch
+// becomes false.
+func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
+	for idx, hf := range staticTable {
+		if !constantTimeStringCompare(hf.Name, f.Name) {
+			continue
+		}
+		if i == 0 {
+			i = uint64(idx + 1)
+		}
+		if f.Sensitive {
+			continue
+		}
+		if !constantTimeStringCompare(hf.Value, f.Value) {
+			continue
+		}
+		i = uint64(idx + 1)
+		nameValueMatch = true
+		return
+	}
+
+	j, nameValueMatch := e.dynTab.search(f)
+	if nameValueMatch || (i == 0 && j != 0) {
+		i = j + uint64(len(staticTable))
+	}
+	return
+}
+
+// SetMaxDynamicTableSize changes the dynamic header table size to v.
+// The actual size is bounded by the value passed to
+// SetMaxDynamicTableSizeLimit.
+func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
+	if v > e.maxSizeLimit {
+		v = e.maxSizeLimit
+	}
+	if v < e.minSize {
+		e.minSize = v
+	}
+	e.tableSizeUpdate = true
+	e.dynTab.setMaxSize(v)
+}
+
+// SetMaxDynamicTableSizeLimit changes the maximum value that can be
+// specified in SetMaxDynamicTableSize to v. By default, it is set to
+// 4096, which is the same size of the default dynamic header table
+// size described in HPACK specification. If the current maximum
+// dynamic header table size is strictly greater than v, "Header Table
+// Size Update" will be done in the next WriteField call and the
+// maximum dynamic header table size is truncated to v.
+func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
+	e.maxSizeLimit = v
+	if e.dynTab.maxSize > v {
+		e.tableSizeUpdate = true
+		e.dynTab.setMaxSize(v)
+	}
+}
+
+// shouldIndex reports whether f should be indexed.
+func (e *Encoder) shouldIndex(f HeaderField) bool {
+	return !f.Sensitive && f.size() <= e.dynTab.maxSize
+}
+
+// appendIndexed appends index i, as encoded in "Indexed Header Field"
+// representation, to dst and returns the extended buffer.
+func appendIndexed(dst []byte, i uint64) []byte {
+	first := len(dst)
+	dst = appendVarInt(dst, 7, i)
+	dst[first] |= 0x80
+	return dst
+}
+
+// appendNewName appends f, as encoded in one of "Literal Header field
+// - New Name" representation variants, to dst and returns the
+// extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Inremental Indexing"
+// representation is used.
+func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
+	dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
+	dst = appendHpackString(dst, f.Name)
+	return appendHpackString(dst, f.Value)
+}
+
+// appendIndexedName appends f and index i referring indexed name
+// entry, as encoded in one of "Literal Header field - Indexed Name"
+// representation variants, to dst and returns the extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Incremental Indexing"
+// representation is used.
+func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
+	first := len(dst)
+	var n byte
+	if indexing {
+		n = 6
+	} else {
+		n = 4
+	}
+	dst = appendVarInt(dst, n, i)
+	dst[first] |= encodeTypeByte(indexing, f.Sensitive)
+	return appendHpackString(dst, f.Value)
+}
+
+// appendTableSize appends v, as encoded in "Header Table Size Update"
+// representation, to dst and returns the extended buffer.
+func appendTableSize(dst []byte, v uint32) []byte {
+	first := len(dst)
+	dst = appendVarInt(dst, 5, uint64(v))
+	dst[first] |= 0x20
+	return dst
+}
+
+// appendVarInt appends i, as encoded in variable integer form using n
+// bit prefix, to dst and returns the extended buffer.
+//
+// See
+// http://http2.github.io/http2-spec/compression.html#integer.representation
+func appendVarInt(dst []byte, n byte, i uint64) []byte {
+	k := uint64((1 << n) - 1)
+	if i < k {
+		return append(dst, byte(i))
+	}
+	dst = append(dst, byte(k))
+	i -= k
+	for ; i >= 128; i >>= 7 {
+		dst = append(dst, byte(0x80|(i&0x7f)))
+	}
+	return append(dst, byte(i))
+}
+
+// appendHpackString appends s, as encoded in "String Literal"
+// representation, to dst and returns the the extended buffer.
+//
+// s will be encoded in Huffman codes only when it produces strictly
+// shorter byte string.
+func appendHpackString(dst []byte, s string) []byte {
+	huffmanLength := HuffmanEncodeLength(s)
+	if huffmanLength < uint64(len(s)) {
+		first := len(dst)
+		dst = appendVarInt(dst, 7, huffmanLength)
+		dst = AppendHuffmanString(dst, s)
+		dst[first] |= 0x80
+	} else {
+		dst = appendVarInt(dst, 7, uint64(len(s)))
+		dst = append(dst, s...)
+	}
+	return dst
+}
+
+// encodeTypeByte returns type byte. If sensitive is true, type byte
+// for "Never Indexed" representation is returned. If sensitive is
+// false and indexing is true, type byte for "Incremental Indexing"
+// representation is returned. Otherwise, type byte for "Without
+// Indexing" is returned.
+func encodeTypeByte(indexing, sensitive bool) byte {
+	if sensitive {
+		return 0x10
+	}
+	if indexing {
+		return 0x40
+	}
+	return 0
+}

+ 331 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/hpack/encode_test.go

@@ -0,0 +1,331 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package hpack
+
+import (
+	"bytes"
+	"encoding/hex"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+func TestEncoderTableSizeUpdate(t *testing.T) {
+	tests := []struct {
+		size1, size2 uint32
+		wantHex      string
+	}{
+		// Should emit 2 table size updates (2048 and 4096)
+		{2048, 4096, "3fe10f 3fe11f 82"},
+
+		// Should emit 1 table size update (2048)
+		{16384, 2048, "3fe10f 82"},
+	}
+	for _, tt := range tests {
+		var buf bytes.Buffer
+		e := NewEncoder(&buf)
+		e.SetMaxDynamicTableSize(tt.size1)
+		e.SetMaxDynamicTableSize(tt.size2)
+		if err := e.WriteField(pair(":method", "GET")); err != nil {
+			t.Fatal(err)
+		}
+		want := removeSpace(tt.wantHex)
+		if got := hex.EncodeToString(buf.Bytes()); got != want {
+			t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want)
+		}
+	}
+}
+
+func TestEncoderWriteField(t *testing.T) {
+	var buf bytes.Buffer
+	e := NewEncoder(&buf)
+	var got []HeaderField
+	d := NewDecoder(4<<10, func(f HeaderField) {
+		got = append(got, f)
+	})
+
+	tests := []struct {
+		hdrs []HeaderField
+	}{
+		{[]HeaderField{
+			pair(":method", "GET"),
+			pair(":scheme", "http"),
+			pair(":path", "/"),
+			pair(":authority", "www.example.com"),
+		}},
+		{[]HeaderField{
+			pair(":method", "GET"),
+			pair(":scheme", "http"),
+			pair(":path", "/"),
+			pair(":authority", "www.example.com"),
+			pair("cache-control", "no-cache"),
+		}},
+		{[]HeaderField{
+			pair(":method", "GET"),
+			pair(":scheme", "https"),
+			pair(":path", "/index.html"),
+			pair(":authority", "www.example.com"),
+			pair("custom-key", "custom-value"),
+		}},
+	}
+	for i, tt := range tests {
+		buf.Reset()
+		got = got[:0]
+		for _, hf := range tt.hdrs {
+			if err := e.WriteField(hf); err != nil {
+				t.Fatal(err)
+			}
+		}
+		_, err := d.Write(buf.Bytes())
+		if err != nil {
+			t.Errorf("%d. Decoder Write = %v", i, err)
+		}
+		if !reflect.DeepEqual(got, tt.hdrs) {
+			t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs)
+		}
+	}
+}
+
+func TestEncoderSearchTable(t *testing.T) {
+	e := NewEncoder(nil)
+
+	e.dynTab.add(pair("foo", "bar"))
+	e.dynTab.add(pair("blake", "miz"))
+	e.dynTab.add(pair(":method", "GET"))
+
+	tests := []struct {
+		hf        HeaderField
+		wantI     uint64
+		wantMatch bool
+	}{
+		// Name and Value match
+		{pair("foo", "bar"), uint64(len(staticTable) + 3), true},
+		{pair("blake", "miz"), uint64(len(staticTable) + 2), true},
+		{pair(":method", "GET"), 2, true},
+
+		// Only name match because Sensitive == true
+		{HeaderField{":method", "GET", true}, 2, false},
+
+		// Only Name matches
+		{pair("foo", "..."), uint64(len(staticTable) + 3), false},
+		{pair("blake", "..."), uint64(len(staticTable) + 2), false},
+		{pair(":method", "..."), 2, false},
+
+		// None match
+		{pair("foo-", "bar"), 0, false},
+	}
+	for _, tt := range tests {
+		if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
+			t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
+		}
+	}
+}
+
+func TestAppendVarInt(t *testing.T) {
+	tests := []struct {
+		n    byte
+		i    uint64
+		want []byte
+	}{
+		// Fits in a byte:
+		{1, 0, []byte{0}},
+		{2, 2, []byte{2}},
+		{3, 6, []byte{6}},
+		{4, 14, []byte{14}},
+		{5, 30, []byte{30}},
+		{6, 62, []byte{62}},
+		{7, 126, []byte{126}},
+		{8, 254, []byte{254}},
+
+		// Multiple bytes:
+		{5, 1337, []byte{31, 154, 10}},
+	}
+	for _, tt := range tests {
+		got := appendVarInt(nil, tt.n, tt.i)
+		if !bytes.Equal(got, tt.want) {
+			t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want)
+		}
+	}
+}
+
+func TestAppendHpackString(t *testing.T) {
+	tests := []struct {
+		s, wantHex string
+	}{
+		// Huffman encoded
+		{"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
+
+		// Not Huffman encoded
+		{"a", "01 61"},
+
+		// zero length
+		{"", "00"},
+	}
+	for _, tt := range tests {
+		want := removeSpace(tt.wantHex)
+		buf := appendHpackString(nil, tt.s)
+		if got := hex.EncodeToString(buf); want != got {
+			t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want)
+		}
+	}
+}
+
+func TestAppendIndexed(t *testing.T) {
+	tests := []struct {
+		i       uint64
+		wantHex string
+	}{
+		// 1 byte
+		{1, "81"},
+		{126, "fe"},
+
+		// 2 bytes
+		{127, "ff00"},
+		{128, "ff01"},
+	}
+	for _, tt := range tests {
+		want := removeSpace(tt.wantHex)
+		buf := appendIndexed(nil, tt.i)
+		if got := hex.EncodeToString(buf); want != got {
+			t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want)
+		}
+	}
+}
+
+func TestAppendNewName(t *testing.T) {
+	tests := []struct {
+		f        HeaderField
+		indexing bool
+		wantHex  string
+	}{
+		// Incremental indexing
+		{HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
+
+		// Without indexing
+		{HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
+
+		// Never indexed
+		{HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
+		{HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"},
+	}
+	for _, tt := range tests {
+		want := removeSpace(tt.wantHex)
+		buf := appendNewName(nil, tt.f, tt.indexing)
+		if got := hex.EncodeToString(buf); want != got {
+			t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
+		}
+	}
+}
+
+func TestAppendIndexedName(t *testing.T) {
+	tests := []struct {
+		f        HeaderField
+		i        uint64
+		indexing bool
+		wantHex  string
+	}{
+		// Incremental indexing
+		{HeaderField{":status", "302", false}, 8, true, "48 82 6402"},
+
+		// Without indexing
+		{HeaderField{":status", "302", false}, 8, false, "08 82 6402"},
+
+		// Never indexed
+		{HeaderField{":status", "302", true}, 8, true, "18 82 6402"},
+		{HeaderField{":status", "302", true}, 8, false, "18 82 6402"},
+	}
+	for _, tt := range tests {
+		want := removeSpace(tt.wantHex)
+		buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing)
+		if got := hex.EncodeToString(buf); want != got {
+			t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want)
+		}
+	}
+}
+
+func TestAppendTableSize(t *testing.T) {
+	tests := []struct {
+		i       uint32
+		wantHex string
+	}{
+		// Fits into 1 byte
+		{30, "3e"},
+
+		// Extra byte
+		{31, "3f00"},
+		{32, "3f01"},
+	}
+	for _, tt := range tests {
+		want := removeSpace(tt.wantHex)
+		buf := appendTableSize(nil, tt.i)
+		if got := hex.EncodeToString(buf); want != got {
+			t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want)
+		}
+	}
+}
+
+func TestEncoderSetMaxDynamicTableSize(t *testing.T) {
+	var buf bytes.Buffer
+	e := NewEncoder(&buf)
+	tests := []struct {
+		v           uint32
+		wantUpdate  bool
+		wantMinSize uint32
+		wantMaxSize uint32
+	}{
+		// Set new table size to 2048
+		{2048, true, 2048, 2048},
+
+		// Set new table size to 16384, but still limited to
+		// 4096
+		{16384, true, 2048, 4096},
+	}
+	for _, tt := range tests {
+		e.SetMaxDynamicTableSize(tt.v)
+		if got := e.tableSizeUpdate; tt.wantUpdate != got {
+			t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate)
+		}
+		if got := e.minSize; tt.wantMinSize != got {
+			t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize)
+		}
+		if got := e.dynTab.maxSize; tt.wantMaxSize != got {
+			t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize)
+		}
+	}
+}
+
+func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) {
+	e := NewEncoder(nil)
+	// 4095 < initialHeaderTableSize means maxSize is truncated to
+	// 4095.
+	e.SetMaxDynamicTableSizeLimit(4095)
+	if got, want := e.dynTab.maxSize, uint32(4095); got != want {
+		t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
+	}
+	if got, want := e.maxSizeLimit, uint32(4095); got != want {
+		t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
+	}
+	if got, want := e.tableSizeUpdate, true; got != want {
+		t.Errorf("e.tableSizeUpdate = %v; want %v", got, want)
+	}
+	// maxSize will be truncated to maxSizeLimit
+	e.SetMaxDynamicTableSize(16384)
+	if got, want := e.dynTab.maxSize, uint32(4095); got != want {
+		t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
+	}
+	// 8192 > current maxSizeLimit, so maxSize does not change.
+	e.SetMaxDynamicTableSizeLimit(8192)
+	if got, want := e.dynTab.maxSize, uint32(4095); got != want {
+		t.Errorf("e.dynTab.maxSize = %v; want %v", got, want)
+	}
+	if got, want := e.maxSizeLimit, uint32(8192); got != want {
+		t.Errorf("e.maxSizeLimit = %v; want %v", got, want)
+	}
+}
+
+func removeSpace(s string) string {
+	return strings.Replace(s, " ", "", -1)
+}

+ 445 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack.go

@@ -0,0 +1,445 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+// Package hpack implements HPACK, a compression format for
+// efficiently representing HTTP header fields in the context of HTTP/2.
+//
+// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
+package hpack
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+)
+
+// A DecodingError is something the spec defines as a decoding error.
+type DecodingError struct {
+	Err error
+}
+
+func (de DecodingError) Error() string {
+	return fmt.Sprintf("decoding error: %v", de.Err)
+}
+
+// An InvalidIndexError is returned when an encoder references a table
+// entry before the static table or after the end of the dynamic table.
+type InvalidIndexError int
+
+func (e InvalidIndexError) Error() string {
+	return fmt.Sprintf("invalid indexed representation index %d", int(e))
+}
+
+// A HeaderField is a name-value pair. Both the name and value are
+// treated as opaque sequences of octets.
+type HeaderField struct {
+	Name, Value string
+
+	// Sensitive means that this header field should never be
+	// indexed.
+	Sensitive bool
+}
+
+func (hf *HeaderField) size() uint32 {
+	// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
+	// "The size of the dynamic table is the sum of the size of
+	// its entries.  The size of an entry is the sum of its name's
+	// length in octets (as defined in Section 5.2), its value's
+	// length in octets (see Section 5.2), plus 32.  The size of
+	// an entry is calculated using the length of the name and
+	// value without any Huffman encoding applied."
+
+	// This can overflow if somebody makes a large HeaderField
+	// Name and/or Value by hand, but we don't care, because that
+	// won't happen on the wire because the encoding doesn't allow
+	// it.
+	return uint32(len(hf.Name) + len(hf.Value) + 32)
+}
+
+// A Decoder is the decoding context for incremental processing of
+// header blocks.
+type Decoder struct {
+	dynTab dynamicTable
+	emit   func(f HeaderField)
+
+	// buf is the unparsed buffer. It's only written to
+	// saveBuf if it was truncated in the middle of a header
+	// block. Because it's usually not owned, we can only
+	// process it under Write.
+	buf     []byte // usually not owned
+	saveBuf bytes.Buffer
+}
+
+func NewDecoder(maxSize uint32, emitFunc func(f HeaderField)) *Decoder {
+	d := &Decoder{
+		emit: emitFunc,
+	}
+	d.dynTab.allowedMaxSize = maxSize
+	d.dynTab.setMaxSize(maxSize)
+	return d
+}
+
+// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
+// underlying buffers for garbage reasons.
+
+func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
+	d.dynTab.setMaxSize(v)
+}
+
+// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
+// stream (via dynamic table size updates) may set the maximum size
+// to.
+func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
+	d.dynTab.allowedMaxSize = v
+}
+
+type dynamicTable struct {
+	// ents is the FIFO described at
+	// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
+	// The newest (low index) is append at the end, and items are
+	// evicted from the front.
+	ents           []HeaderField
+	size           uint32
+	maxSize        uint32 // current maxSize
+	allowedMaxSize uint32 // maxSize may go up to this, inclusive
+}
+
+func (dt *dynamicTable) setMaxSize(v uint32) {
+	dt.maxSize = v
+	dt.evict()
+}
+
+// TODO: change dynamicTable to be a struct with a slice and a size int field,
+// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
+//
+//
+// Then make add increment the size. maybe the max size should move from Decoder to
+// dynamicTable and add should return an ok bool if there was enough space.
+//
+// Later we'll need a remove operation on dynamicTable.
+
+func (dt *dynamicTable) add(f HeaderField) {
+	dt.ents = append(dt.ents, f)
+	dt.size += f.size()
+	dt.evict()
+}
+
+// If we're too big, evict old stuff (front of the slice)
+func (dt *dynamicTable) evict() {
+	base := dt.ents // keep base pointer of slice
+	for dt.size > dt.maxSize {
+		dt.size -= dt.ents[0].size()
+		dt.ents = dt.ents[1:]
+	}
+
+	// Shift slice contents down if we evicted things.
+	if len(dt.ents) != len(base) {
+		copy(base, dt.ents)
+		dt.ents = base[:len(dt.ents)]
+	}
+}
+
+// constantTimeStringCompare compares string a and b in a constant
+// time manner.
+func constantTimeStringCompare(a, b string) bool {
+	if len(a) != len(b) {
+		return false
+	}
+
+	c := byte(0)
+
+	for i := 0; i < len(a); i++ {
+		c |= a[i] ^ b[i]
+	}
+
+	return c == 0
+}
+
+// Search searches f in the table. The return value i is 0 if there is
+// no name match. If there is name match or name/value match, i is the
+// index of that entry (1-based). If both name and value match,
+// nameValueMatch becomes true.
+func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
+	l := len(dt.ents)
+	for j := l - 1; j >= 0; j-- {
+		ent := dt.ents[j]
+		if !constantTimeStringCompare(ent.Name, f.Name) {
+			continue
+		}
+		if i == 0 {
+			i = uint64(l - j)
+		}
+		if f.Sensitive {
+			continue
+		}
+		if !constantTimeStringCompare(ent.Value, f.Value) {
+			continue
+		}
+		i = uint64(l - j)
+		nameValueMatch = true
+		return
+	}
+	return
+}
+
+func (d *Decoder) maxTableIndex() int {
+	return len(d.dynTab.ents) + len(staticTable)
+}
+
+func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
+	if i < 1 {
+		return
+	}
+	if i > uint64(d.maxTableIndex()) {
+		return
+	}
+	if i <= uint64(len(staticTable)) {
+		return staticTable[i-1], true
+	}
+	dents := d.dynTab.ents
+	return dents[len(dents)-(int(i)-len(staticTable))], true
+}
+
+// Decode decodes an entire block.
+//
+// TODO: remove this method and make it incremental later? This is
+// easier for debugging now.
+func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
+	var hf []HeaderField
+	saveFunc := d.emit
+	defer func() { d.emit = saveFunc }()
+	d.emit = func(f HeaderField) { hf = append(hf, f) }
+	if _, err := d.Write(p); err != nil {
+		return nil, err
+	}
+	if err := d.Close(); err != nil {
+		return nil, err
+	}
+	return hf, nil
+}
+
+func (d *Decoder) Close() error {
+	if d.saveBuf.Len() > 0 {
+		d.saveBuf.Reset()
+		return DecodingError{errors.New("truncated headers")}
+	}
+	return nil
+}
+
+func (d *Decoder) Write(p []byte) (n int, err error) {
+	if len(p) == 0 {
+		// Prevent state machine CPU attacks (making us redo
+		// work up to the point of finding out we don't have
+		// enough data)
+		return
+	}
+	// Only copy the data if we have to. Optimistically assume
+	// that p will contain a complete header block.
+	if d.saveBuf.Len() == 0 {
+		d.buf = p
+	} else {
+		d.saveBuf.Write(p)
+		d.buf = d.saveBuf.Bytes()
+		d.saveBuf.Reset()
+	}
+
+	for len(d.buf) > 0 {
+		err = d.parseHeaderFieldRepr()
+		if err != nil {
+			if err == errNeedMore {
+				err = nil
+				d.saveBuf.Write(d.buf)
+			}
+			break
+		}
+	}
+
+	return len(p), err
+}
+
+// errNeedMore is an internal sentinel error value that means the
+// buffer is truncated and we need to read more data before we can
+// continue parsing.
+var errNeedMore = errors.New("need more data")
+
+type indexType int
+
+const (
+	indexedTrue indexType = iota
+	indexedFalse
+	indexedNever
+)
+
+func (v indexType) indexed() bool   { return v == indexedTrue }
+func (v indexType) sensitive() bool { return v == indexedNever }
+
+// returns errNeedMore if there isn't enough data available.
+// any other error is fatal.
+// consumes d.buf iff it returns nil.
+// precondition: must be called with len(d.buf) > 0
+func (d *Decoder) parseHeaderFieldRepr() error {
+	b := d.buf[0]
+	switch {
+	case b&128 != 0:
+		// Indexed representation.
+		// High bit set?
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
+		return d.parseFieldIndexed()
+	case b&192 == 64:
+		// 6.2.1 Literal Header Field with Incremental Indexing
+		// 0b10xxxxxx: top two bits are 10
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
+		return d.parseFieldLiteral(6, indexedTrue)
+	case b&240 == 0:
+		// 6.2.2 Literal Header Field without Indexing
+		// 0b0000xxxx: top four bits are 0000
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
+		return d.parseFieldLiteral(4, indexedFalse)
+	case b&240 == 16:
+		// 6.2.3 Literal Header Field never Indexed
+		// 0b0001xxxx: top four bits are 0001
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
+		return d.parseFieldLiteral(4, indexedNever)
+	case b&224 == 32:
+		// 6.3 Dynamic Table Size Update
+		// Top three bits are '001'.
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
+		return d.parseDynamicTableSizeUpdate()
+	}
+
+	return DecodingError{errors.New("invalid encoding")}
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldIndexed() error {
+	buf := d.buf
+	idx, buf, err := readVarInt(7, buf)
+	if err != nil {
+		return err
+	}
+	hf, ok := d.at(idx)
+	if !ok {
+		return DecodingError{InvalidIndexError(idx)}
+	}
+	d.emit(HeaderField{Name: hf.Name, Value: hf.Value})
+	d.buf = buf
+	return nil
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+	buf := d.buf
+	nameIdx, buf, err := readVarInt(n, buf)
+	if err != nil {
+		return err
+	}
+
+	var hf HeaderField
+	if nameIdx > 0 {
+		ihf, ok := d.at(nameIdx)
+		if !ok {
+			return DecodingError{InvalidIndexError(nameIdx)}
+		}
+		hf.Name = ihf.Name
+	} else {
+		hf.Name, buf, err = readString(buf)
+		if err != nil {
+			return err
+		}
+	}
+	hf.Value, buf, err = readString(buf)
+	if err != nil {
+		return err
+	}
+	d.buf = buf
+	if it.indexed() {
+		d.dynTab.add(hf)
+	}
+	hf.Sensitive = it.sensitive()
+	d.emit(hf)
+	return nil
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseDynamicTableSizeUpdate() error {
+	buf := d.buf
+	size, buf, err := readVarInt(5, buf)
+	if err != nil {
+		return err
+	}
+	if size > uint64(d.dynTab.allowedMaxSize) {
+		return DecodingError{errors.New("dynamic table size update too large")}
+	}
+	d.dynTab.setMaxSize(uint32(size))
+	d.buf = buf
+	return nil
+}
+
+var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
+
+// readVarInt reads an unsigned variable length integer off the
+// beginning of p. n is the parameter as described in
+// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
+//
+// n must always be between 1 and 8.
+//
+// The returned remain buffer is either a smaller suffix of p, or err != nil.
+// The error is errNeedMore if p doesn't contain a complete integer.
+func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+	if n < 1 || n > 8 {
+		panic("bad n")
+	}
+	if len(p) == 0 {
+		return 0, p, errNeedMore
+	}
+	i = uint64(p[0])
+	if n < 8 {
+		i &= (1 << uint64(n)) - 1
+	}
+	if i < (1<<uint64(n))-1 {
+		return i, p[1:], nil
+	}
+
+	origP := p
+	p = p[1:]
+	var m uint64
+	for len(p) > 0 {
+		b := p[0]
+		p = p[1:]
+		i += uint64(b&127) << m
+		if b&128 == 0 {
+			return i, p, nil
+		}
+		m += 7
+		if m >= 63 { // TODO: proper overflow check. making this up.
+			return 0, origP, errVarintOverflow
+		}
+	}
+	return 0, origP, errNeedMore
+}
+
+func readString(p []byte) (s string, remain []byte, err error) {
+	if len(p) == 0 {
+		return "", p, errNeedMore
+	}
+	isHuff := p[0]&128 != 0
+	strLen, p, err := readVarInt(7, p)
+	if err != nil {
+		return "", p, err
+	}
+	if uint64(len(p)) < strLen {
+		return "", p, errNeedMore
+	}
+	if !isHuff {
+		return string(p[:strLen]), p[strLen:], nil
+	}
+
+	// TODO: optimize this garbage:
+	var buf bytes.Buffer
+	if _, err := HuffmanDecode(&buf, p[:strLen]); err != nil {
+		return "", nil, err
+	}
+	return buf.String(), p[strLen:], nil
+}

+ 648 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/hpack/hpack_test.go

@@ -0,0 +1,648 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package hpack
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"testing"
+)
+
+func TestStaticTable(t *testing.T) {
+	fromSpec := `
+          +-------+-----------------------------+---------------+
+          | 1     | :authority                  |               |
+          | 2     | :method                     | GET           |
+          | 3     | :method                     | POST          |
+          | 4     | :path                       | /             |
+          | 5     | :path                       | /index.html   |
+          | 6     | :scheme                     | http          |
+          | 7     | :scheme                     | https         |
+          | 8     | :status                     | 200           |
+          | 9     | :status                     | 204           |
+          | 10    | :status                     | 206           |
+          | 11    | :status                     | 304           |
+          | 12    | :status                     | 400           |
+          | 13    | :status                     | 404           |
+          | 14    | :status                     | 500           |
+          | 15    | accept-charset              |               |
+          | 16    | accept-encoding             | gzip, deflate |
+          | 17    | accept-language             |               |
+          | 18    | accept-ranges               |               |
+          | 19    | accept                      |               |
+          | 20    | access-control-allow-origin |               |
+          | 21    | age                         |               |
+          | 22    | allow                       |               |
+          | 23    | authorization               |               |
+          | 24    | cache-control               |               |
+          | 25    | content-disposition         |               |
+          | 26    | content-encoding            |               |
+          | 27    | content-language            |               |
+          | 28    | content-length              |               |
+          | 29    | content-location            |               |
+          | 30    | content-range               |               |
+          | 31    | content-type                |               |
+          | 32    | cookie                      |               |
+          | 33    | date                        |               |
+          | 34    | etag                        |               |
+          | 35    | expect                      |               |
+          | 36    | expires                     |               |
+          | 37    | from                        |               |
+          | 38    | host                        |               |
+          | 39    | if-match                    |               |
+          | 40    | if-modified-since           |               |
+          | 41    | if-none-match               |               |
+          | 42    | if-range                    |               |
+          | 43    | if-unmodified-since         |               |
+          | 44    | last-modified               |               |
+          | 45    | link                        |               |
+          | 46    | location                    |               |
+          | 47    | max-forwards                |               |
+          | 48    | proxy-authenticate          |               |
+          | 49    | proxy-authorization         |               |
+          | 50    | range                       |               |
+          | 51    | referer                     |               |
+          | 52    | refresh                     |               |
+          | 53    | retry-after                 |               |
+          | 54    | server                      |               |
+          | 55    | set-cookie                  |               |
+          | 56    | strict-transport-security   |               |
+          | 57    | transfer-encoding           |               |
+          | 58    | user-agent                  |               |
+          | 59    | vary                        |               |
+          | 60    | via                         |               |
+          | 61    | www-authenticate            |               |
+          +-------+-----------------------------+---------------+
+`
+	bs := bufio.NewScanner(strings.NewReader(fromSpec))
+	re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`)
+	for bs.Scan() {
+		l := bs.Text()
+		if !strings.Contains(l, "|") {
+			continue
+		}
+		m := re.FindStringSubmatch(l)
+		if m == nil {
+			continue
+		}
+		i, err := strconv.Atoi(m[1])
+		if err != nil {
+			t.Errorf("Bogus integer on line %q", l)
+			continue
+		}
+		if i < 1 || i > len(staticTable) {
+			t.Errorf("Bogus index %d on line %q", i, l)
+			continue
+		}
+		if got, want := staticTable[i-1].Name, m[2]; got != want {
+			t.Errorf("header index %d name = %q; want %q", i, got, want)
+		}
+		if got, want := staticTable[i-1].Value, m[3]; got != want {
+			t.Errorf("header index %d value = %q; want %q", i, got, want)
+		}
+	}
+	if err := bs.Err(); err != nil {
+		t.Error(err)
+	}
+}
+
+func (d *Decoder) mustAt(idx int) HeaderField {
+	if hf, ok := d.at(uint64(idx)); !ok {
+		panic(fmt.Sprintf("bogus index %d", idx))
+	} else {
+		return hf
+	}
+}
+
+func TestDynamicTableAt(t *testing.T) {
+	d := NewDecoder(4096, nil)
+	at := d.mustAt
+	if got, want := at(2), (pair(":method", "GET")); got != want {
+		t.Errorf("at(2) = %v; want %v", got, want)
+	}
+	d.dynTab.add(pair("foo", "bar"))
+	d.dynTab.add(pair("blake", "miz"))
+	if got, want := at(len(staticTable)+1), (pair("blake", "miz")); got != want {
+		t.Errorf("at(dyn 1) = %v; want %v", got, want)
+	}
+	if got, want := at(len(staticTable)+2), (pair("foo", "bar")); got != want {
+		t.Errorf("at(dyn 2) = %v; want %v", got, want)
+	}
+	if got, want := at(3), (pair(":method", "POST")); got != want {
+		t.Errorf("at(3) = %v; want %v", got, want)
+	}
+}
+
+func TestDynamicTableSearch(t *testing.T) {
+	dt := dynamicTable{}
+	dt.setMaxSize(4096)
+
+	dt.add(pair("foo", "bar"))
+	dt.add(pair("blake", "miz"))
+	dt.add(pair(":method", "GET"))
+
+	tests := []struct {
+		hf        HeaderField
+		wantI     uint64
+		wantMatch bool
+	}{
+		// Name and Value match
+		{pair("foo", "bar"), 3, true},
+		{pair(":method", "GET"), 1, true},
+
+		// Only name match because of Sensitive == true
+		{HeaderField{"blake", "miz", true}, 2, false},
+
+		// Only Name matches
+		{pair("foo", "..."), 3, false},
+		{pair("blake", "..."), 2, false},
+		{pair(":method", "..."), 1, false},
+
+		// None match
+		{pair("foo-", "bar"), 0, false},
+	}
+	for _, tt := range tests {
+		if gotI, gotMatch := dt.search(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {
+			t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)
+		}
+	}
+}
+
+func TestDynamicTableSizeEvict(t *testing.T) {
+	d := NewDecoder(4096, nil)
+	if want := uint32(0); d.dynTab.size != want {
+		t.Fatalf("size = %d; want %d", d.dynTab.size, want)
+	}
+	add := d.dynTab.add
+	add(pair("blake", "eats pizza"))
+	if want := uint32(15 + 32); d.dynTab.size != want {
+		t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want)
+	}
+	add(pair("foo", "bar"))
+	if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want {
+		t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want)
+	}
+	d.dynTab.setMaxSize(15 + 32 + 1 /* slop */)
+	if want := uint32(6 + 32); d.dynTab.size != want {
+		t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want)
+	}
+	if got, want := d.mustAt(len(staticTable)+1), (pair("foo", "bar")); got != want {
+		t.Errorf("at(dyn 1) = %v; want %v", got, want)
+	}
+	add(pair("long", strings.Repeat("x", 500)))
+	if want := uint32(0); d.dynTab.size != want {
+		t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want)
+	}
+}
+
+func TestDecoderDecode(t *testing.T) {
+	tests := []struct {
+		name       string
+		in         []byte
+		want       []HeaderField
+		wantDynTab []HeaderField // newest entry first
+	}{
+		// C.2.1 Literal Header Field with Indexing
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1
+		{"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"),
+			[]HeaderField{pair("custom-key", "custom-header")},
+			[]HeaderField{pair("custom-key", "custom-header")},
+		},
+
+		// C.2.2 Literal Header Field without Indexing
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2
+		{"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"),
+			[]HeaderField{pair(":path", "/sample/path")},
+			[]HeaderField{}},
+
+		// C.2.3 Literal Header Field never Indexed
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3
+		{"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"),
+			[]HeaderField{{"password", "secret", true}},
+			[]HeaderField{}},
+
+		// C.2.4 Indexed Header Field
+		// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4
+		{"C.2.4", []byte("\x82"),
+			[]HeaderField{pair(":method", "GET")},
+			[]HeaderField{}},
+	}
+	for _, tt := range tests {
+		d := NewDecoder(4096, nil)
+		hf, err := d.DecodeFull(tt.in)
+		if err != nil {
+			t.Errorf("%s: %v", tt.name, err)
+			continue
+		}
+		if !reflect.DeepEqual(hf, tt.want) {
+			t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want)
+		}
+		gotDynTab := d.dynTab.reverseCopy()
+		if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) {
+			t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab)
+		}
+	}
+}
+
+func (dt *dynamicTable) reverseCopy() (hf []HeaderField) {
+	hf = make([]HeaderField, len(dt.ents))
+	for i := range hf {
+		hf[i] = dt.ents[len(dt.ents)-1-i]
+	}
+	return
+}
+
+type encAndWant struct {
+	enc         []byte
+	want        []HeaderField
+	wantDynTab  []HeaderField
+	wantDynSize uint32
+}
+
+// C.3 Request Examples without Huffman Coding
+// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3
+func TestDecodeC3_NoHuffman(t *testing.T) {
+	testDecodeSeries(t, 4096, []encAndWant{
+		{dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"),
+			[]HeaderField{
+				pair(":method", "GET"),
+				pair(":scheme", "http"),
+				pair(":path", "/"),
+				pair(":authority", "www.example.com"),
+			},
+			[]HeaderField{
+				pair(":authority", "www.example.com"),
+			},
+			57,
+		},
+		{dehex("8286 84be 5808 6e6f 2d63 6163 6865"),
+			[]HeaderField{
+				pair(":method", "GET"),
+				pair(":scheme", "http"),
+				pair(":path", "/"),
+				pair(":authority", "www.example.com"),
+				pair("cache-control", "no-cache"),
+			},
+			[]HeaderField{
+				pair("cache-control", "no-cache"),
+				pair(":authority", "www.example.com"),
+			},
+			110,
+		},
+		{dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"),
+			[]HeaderField{
+				pair(":method", "GET"),
+				pair(":scheme", "https"),
+				pair(":path", "/index.html"),
+				pair(":authority", "www.example.com"),
+				pair("custom-key", "custom-value"),
+			},
+			[]HeaderField{
+				pair("custom-key", "custom-value"),
+				pair("cache-control", "no-cache"),
+				pair(":authority", "www.example.com"),
+			},
+			164,
+		},
+	})
+}
+
+// C.4 Request Examples with Huffman Coding
+// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4
+func TestDecodeC4_Huffman(t *testing.T) {
+	testDecodeSeries(t, 4096, []encAndWant{
+		{dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"),
+			[]HeaderField{
+				pair(":method", "GET"),
+				pair(":scheme", "http"),
+				pair(":path", "/"),
+				pair(":authority", "www.example.com"),
+			},
+			[]HeaderField{
+				pair(":authority", "www.example.com"),
+			},
+			57,
+		},
+		{dehex("8286 84be 5886 a8eb 1064 9cbf"),
+			[]HeaderField{
+				pair(":method", "GET"),
+				pair(":scheme", "http"),
+				pair(":path", "/"),
+				pair(":authority", "www.example.com"),
+				pair("cache-control", "no-cache"),
+			},
+			[]HeaderField{
+				pair("cache-control", "no-cache"),
+				pair(":authority", "www.example.com"),
+			},
+			110,
+		},
+		{dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"),
+			[]HeaderField{
+				pair(":method", "GET"),
+				pair(":scheme", "https"),
+				pair(":path", "/index.html"),
+				pair(":authority", "www.example.com"),
+				pair("custom-key", "custom-value"),
+			},
+			[]HeaderField{
+				pair("custom-key", "custom-value"),
+				pair("cache-control", "no-cache"),
+				pair(":authority", "www.example.com"),
+			},
+			164,
+		},
+	})
+}
+
+// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5
+// "This section shows several consecutive header lists, corresponding
+// to HTTP responses, on the same connection. The HTTP/2 setting
+// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
+// octets, causing some evictions to occur."
+func TestDecodeC5_ResponsesNoHuff(t *testing.T) {
+	testDecodeSeries(t, 256, []encAndWant{
+		{dehex(`
+4803 3330 3258 0770 7269 7661 7465 611d
+4d6f 6e2c 2032 3120 4f63 7420 3230 3133
+2032 303a 3133 3a32 3120 474d 546e 1768
+7474 7073 3a2f 2f77 7777 2e65 7861 6d70
+6c65 2e63 6f6d
+`),
+			[]HeaderField{
+				pair(":status", "302"),
+				pair("cache-control", "private"),
+				pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+				pair("location", "https://www.example.com"),
+			},
+			[]HeaderField{
+				pair("location", "https://www.example.com"),
+				pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+				pair("cache-control", "private"),
+				pair(":status", "302"),
+			},
+			222,
+		},
+		{dehex("4803 3330 37c1 c0bf"),
+			[]HeaderField{
+				pair(":status", "307"),
+				pair("cache-control", "private"),
+				pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+				pair("location", "https://www.example.com"),
+			},
+			[]HeaderField{
+				pair(":status", "307"),
+				pair("location", "https://www.example.com"),
+				pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+				pair("cache-control", "private"),
+			},
+			222,
+		},
+		{dehex(`
+88c1 611d 4d6f 6e2c 2032 3120 4f63 7420
+3230 3133 2032 303a 3133 3a32 3220 474d
+54c0 5a04 677a 6970 7738 666f 6f3d 4153
+444a 4b48 514b 425a 584f 5157 454f 5049
+5541 5851 5745 4f49 553b 206d 6178 2d61
+6765 3d33 3630 303b 2076 6572 7369 6f6e
+3d31
+`),
+			[]HeaderField{
+				pair(":status", "200"),
+				pair("cache-control", "private"),
+				pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
+				pair("location", "https://www.example.com"),
+				pair("content-encoding", "gzip"),
+				pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
+			},
+			[]HeaderField{
+				pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
+				pair("content-encoding", "gzip"),
+				pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
+			},
+			215,
+		},
+	})
+}
+
+// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6
+// "This section shows the same examples as the previous section, but
+// using Huffman encoding for the literal values. The HTTP/2 setting
+// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256
+// octets, causing some evictions to occur. The eviction mechanism
+// uses the length of the decoded literal values, so the same
+// evictions occurs as in the previous section."
+func TestDecodeC6_ResponsesHuffman(t *testing.T) {
+	testDecodeSeries(t, 256, []encAndWant{
+		{dehex(`
+4882 6402 5885 aec3 771a 4b61 96d0 7abe
+9410 54d4 44a8 2005 9504 0b81 66e0 82a6
+2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8
+e9ae 82ae 43d3
+`),
+			[]HeaderField{
+				pair(":status", "302"),
+				pair("cache-control", "private"),
+				pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+				pair("location", "https://www.example.com"),
+			},
+			[]HeaderField{
+				pair("location", "https://www.example.com"),
+				pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+				pair("cache-control", "private"),
+				pair(":status", "302"),
+			},
+			222,
+		},
+		{dehex("4883 640e ffc1 c0bf"),
+			[]HeaderField{
+				pair(":status", "307"),
+				pair("cache-control", "private"),
+				pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+				pair("location", "https://www.example.com"),
+			},
+			[]HeaderField{
+				pair(":status", "307"),
+				pair("location", "https://www.example.com"),
+				pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
+				pair("cache-control", "private"),
+			},
+			222,
+		},
+		{dehex(`
+88c1 6196 d07a be94 1054 d444 a820 0595
+040b 8166 e084 a62d 1bff c05a 839b d9ab
+77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b
+3960 d5af 2708 7f36 72c1 ab27 0fb5 291f
+9587 3160 65c0 03ed 4ee5 b106 3d50 07
+`),
+			[]HeaderField{
+				pair(":status", "200"),
+				pair("cache-control", "private"),
+				pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
+				pair("location", "https://www.example.com"),
+				pair("content-encoding", "gzip"),
+				pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
+			},
+			[]HeaderField{
+				pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"),
+				pair("content-encoding", "gzip"),
+				pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
+			},
+			215,
+		},
+	})
+}
+
+func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) {
+	d := NewDecoder(size, nil)
+	for i, step := range steps {
+		hf, err := d.DecodeFull(step.enc)
+		if err != nil {
+			t.Fatalf("Error at step index %d: %v", i, err)
+		}
+		if !reflect.DeepEqual(hf, step.want) {
+			t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want)
+		}
+		gotDynTab := d.dynTab.reverseCopy()
+		if !reflect.DeepEqual(gotDynTab, step.wantDynTab) {
+			t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab)
+		}
+		if d.dynTab.size != step.wantDynSize {
+			t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize)
+		}
+	}
+}
+
+func TestHuffmanDecode(t *testing.T) {
+	tests := []struct {
+		inHex, want string
+	}{
+		{"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"},
+		{"a8eb 1064 9cbf", "no-cache"},
+		{"25a8 49e9 5ba9 7d7f", "custom-key"},
+		{"25a8 49e9 5bb8 e8b4 bf", "custom-value"},
+		{"6402", "302"},
+		{"aec3 771a 4b", "private"},
+		{"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"},
+		{"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"},
+		{"9bd9 ab", "gzip"},
+		{"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07",
+			"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
+	}
+	for i, tt := range tests {
+		var buf bytes.Buffer
+		in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1))
+		if err != nil {
+			t.Errorf("%d. hex input error: %v", i, err)
+			continue
+		}
+		if _, err := HuffmanDecode(&buf, in); err != nil {
+			t.Errorf("%d. decode error: %v", i, err)
+			continue
+		}
+		if got := buf.String(); tt.want != got {
+			t.Errorf("%d. decode = %q; want %q", i, got, tt.want)
+		}
+	}
+}
+
+func TestAppendHuffmanString(t *testing.T) {
+	tests := []struct {
+		in, want string
+	}{
+		{"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"},
+		{"no-cache", "a8eb 1064 9cbf"},
+		{"custom-key", "25a8 49e9 5ba9 7d7f"},
+		{"custom-value", "25a8 49e9 5bb8 e8b4 bf"},
+		{"302", "6402"},
+		{"private", "aec3 771a 4b"},
+		{"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"},
+		{"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"},
+		{"gzip", "9bd9 ab"},
+		{"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1",
+			"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"},
+	}
+	for i, tt := range tests {
+		buf := []byte{}
+		want := strings.Replace(tt.want, " ", "", -1)
+		buf = AppendHuffmanString(buf, tt.in)
+		if got := hex.EncodeToString(buf); want != got {
+			t.Errorf("%d. encode = %q; want %q", i, got, want)
+		}
+	}
+}
+
+func TestReadVarInt(t *testing.T) {
+	type res struct {
+		i        uint64
+		consumed int
+		err      error
+	}
+	tests := []struct {
+		n    byte
+		p    []byte
+		want res
+	}{
+		// Fits in a byte:
+		{1, []byte{0}, res{0, 1, nil}},
+		{2, []byte{2}, res{2, 1, nil}},
+		{3, []byte{6}, res{6, 1, nil}},
+		{4, []byte{14}, res{14, 1, nil}},
+		{5, []byte{30}, res{30, 1, nil}},
+		{6, []byte{62}, res{62, 1, nil}},
+		{7, []byte{126}, res{126, 1, nil}},
+		{8, []byte{254}, res{254, 1, nil}},
+
+		// Doesn't fit in a byte:
+		{1, []byte{1}, res{0, 0, errNeedMore}},
+		{2, []byte{3}, res{0, 0, errNeedMore}},
+		{3, []byte{7}, res{0, 0, errNeedMore}},
+		{4, []byte{15}, res{0, 0, errNeedMore}},
+		{5, []byte{31}, res{0, 0, errNeedMore}},
+		{6, []byte{63}, res{0, 0, errNeedMore}},
+		{7, []byte{127}, res{0, 0, errNeedMore}},
+		{8, []byte{255}, res{0, 0, errNeedMore}},
+
+		// Ignoring top bits:
+		{5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111
+		{5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100
+		{5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101
+
+		// Extra byte:
+		{5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte
+
+		// Short a byte:
+		{5, []byte{191, 154}, res{0, 0, errNeedMore}},
+
+		// integer overflow:
+		{1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}},
+	}
+	for _, tt := range tests {
+		i, remain, err := readVarInt(tt.n, tt.p)
+		consumed := len(tt.p) - len(remain)
+		got := res{i, consumed, err}
+		if got != tt.want {
+			t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want)
+		}
+	}
+}
+
+func dehex(s string) []byte {
+	s = strings.Replace(s, " ", "", -1)
+	s = strings.Replace(s, "\n", "", -1)
+	b, err := hex.DecodeString(s)
+	if err != nil {
+		panic(err)
+	}
+	return b
+}

+ 159 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/hpack/huffman.go

@@ -0,0 +1,159 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package hpack
+
+import (
+	"bytes"
+	"io"
+	"sync"
+)
+
+var bufPool = sync.Pool{
+	New: func() interface{} { return new(bytes.Buffer) },
+}
+
+// HuffmanDecode decodes the string in v and writes the expanded
+// result to w, returning the number of bytes written to w and the
+// Write call's return value. At most one Write call is made.
+func HuffmanDecode(w io.Writer, v []byte) (int, error) {
+	buf := bufPool.Get().(*bytes.Buffer)
+	buf.Reset()
+	defer bufPool.Put(buf)
+
+	n := rootHuffmanNode
+	cur, nbits := uint(0), uint8(0)
+	for _, b := range v {
+		cur = cur<<8 | uint(b)
+		nbits += 8
+		for nbits >= 8 {
+			n = n.children[byte(cur>>(nbits-8))]
+			if n.children == nil {
+				buf.WriteByte(n.sym)
+				nbits -= n.codeLen
+				n = rootHuffmanNode
+			} else {
+				nbits -= 8
+			}
+		}
+	}
+	for nbits > 0 {
+		n = n.children[byte(cur<<(8-nbits))]
+		if n.children != nil || n.codeLen > nbits {
+			break
+		}
+		buf.WriteByte(n.sym)
+		nbits -= n.codeLen
+		n = rootHuffmanNode
+	}
+	return w.Write(buf.Bytes())
+}
+
+type node struct {
+	// children is non-nil for internal nodes
+	children []*node
+
+	// The following are only valid if children is nil:
+	codeLen uint8 // number of bits that led to the output of sym
+	sym     byte  // output symbol
+}
+
+func newInternalNode() *node {
+	return &node{children: make([]*node, 256)}
+}
+
+var rootHuffmanNode = newInternalNode()
+
+func init() {
+	for i, code := range huffmanCodes {
+		if i > 255 {
+			panic("too many huffman codes")
+		}
+		addDecoderNode(byte(i), code, huffmanCodeLen[i])
+	}
+}
+
+func addDecoderNode(sym byte, code uint32, codeLen uint8) {
+	cur := rootHuffmanNode
+	for codeLen > 8 {
+		codeLen -= 8
+		i := uint8(code >> codeLen)
+		if cur.children[i] == nil {
+			cur.children[i] = newInternalNode()
+		}
+		cur = cur.children[i]
+	}
+	shift := 8 - codeLen
+	start, end := int(uint8(code<<shift)), int(1<<shift)
+	for i := start; i < start+end; i++ {
+		cur.children[i] = &node{sym: sym, codeLen: codeLen}
+	}
+}
+
+// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
+// and returns the extended buffer.
+func AppendHuffmanString(dst []byte, s string) []byte {
+	rembits := uint8(8)
+
+	for i := 0; i < len(s); i++ {
+		if rembits == 8 {
+			dst = append(dst, 0)
+		}
+		dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
+	}
+
+	if rembits < 8 {
+		// special EOS symbol
+		code := uint32(0x3fffffff)
+		nbits := uint8(30)
+
+		t := uint8(code >> (nbits - rembits))
+		dst[len(dst)-1] |= t
+	}
+
+	return dst
+}
+
+// HuffmanEncodeLength returns the number of bytes required to encode
+// s in Huffman codes. The result is round up to byte boundary.
+func HuffmanEncodeLength(s string) uint64 {
+	n := uint64(0)
+	for i := 0; i < len(s); i++ {
+		n += uint64(huffmanCodeLen[s[i]])
+	}
+	return (n + 7) / 8
+}
+
+// appendByteToHuffmanCode appends Huffman code for c to dst and
+// returns the extended buffer and the remaining bits in the last
+// element. The appending is not byte aligned and the remaining bits
+// in the last element of dst is given in rembits.
+func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
+	code := huffmanCodes[c]
+	nbits := huffmanCodeLen[c]
+
+	for {
+		if rembits > nbits {
+			t := uint8(code << (rembits - nbits))
+			dst[len(dst)-1] |= t
+			rembits -= nbits
+			break
+		}
+
+		t := uint8(code >> (nbits - rembits))
+		dst[len(dst)-1] |= t
+
+		nbits -= rembits
+		rembits = 8
+
+		if nbits == 0 {
+			break
+		}
+
+		dst = append(dst, 0)
+	}
+
+	return dst, rembits
+}

+ 353 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/hpack/tables.go

@@ -0,0 +1,353 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package hpack
+
+func pair(name, value string) HeaderField {
+	return HeaderField{Name: name, Value: value}
+}
+
+// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
+var staticTable = []HeaderField{
+	pair(":authority", ""), // index 1 (1-based)
+	pair(":method", "GET"),
+	pair(":method", "POST"),
+	pair(":path", "/"),
+	pair(":path", "/index.html"),
+	pair(":scheme", "http"),
+	pair(":scheme", "https"),
+	pair(":status", "200"),
+	pair(":status", "204"),
+	pair(":status", "206"),
+	pair(":status", "304"),
+	pair(":status", "400"),
+	pair(":status", "404"),
+	pair(":status", "500"),
+	pair("accept-charset", ""),
+	pair("accept-encoding", "gzip, deflate"),
+	pair("accept-language", ""),
+	pair("accept-ranges", ""),
+	pair("accept", ""),
+	pair("access-control-allow-origin", ""),
+	pair("age", ""),
+	pair("allow", ""),
+	pair("authorization", ""),
+	pair("cache-control", ""),
+	pair("content-disposition", ""),
+	pair("content-encoding", ""),
+	pair("content-language", ""),
+	pair("content-length", ""),
+	pair("content-location", ""),
+	pair("content-range", ""),
+	pair("content-type", ""),
+	pair("cookie", ""),
+	pair("date", ""),
+	pair("etag", ""),
+	pair("expect", ""),
+	pair("expires", ""),
+	pair("from", ""),
+	pair("host", ""),
+	pair("if-match", ""),
+	pair("if-modified-since", ""),
+	pair("if-none-match", ""),
+	pair("if-range", ""),
+	pair("if-unmodified-since", ""),
+	pair("last-modified", ""),
+	pair("link", ""),
+	pair("location", ""),
+	pair("max-forwards", ""),
+	pair("proxy-authenticate", ""),
+	pair("proxy-authorization", ""),
+	pair("range", ""),
+	pair("referer", ""),
+	pair("refresh", ""),
+	pair("retry-after", ""),
+	pair("server", ""),
+	pair("set-cookie", ""),
+	pair("strict-transport-security", ""),
+	pair("transfer-encoding", ""),
+	pair("user-agent", ""),
+	pair("vary", ""),
+	pair("via", ""),
+	pair("www-authenticate", ""),
+}
+
+var huffmanCodes = []uint32{
+	0x1ff8,
+	0x7fffd8,
+	0xfffffe2,
+	0xfffffe3,
+	0xfffffe4,
+	0xfffffe5,
+	0xfffffe6,
+	0xfffffe7,
+	0xfffffe8,
+	0xffffea,
+	0x3ffffffc,
+	0xfffffe9,
+	0xfffffea,
+	0x3ffffffd,
+	0xfffffeb,
+	0xfffffec,
+	0xfffffed,
+	0xfffffee,
+	0xfffffef,
+	0xffffff0,
+	0xffffff1,
+	0xffffff2,
+	0x3ffffffe,
+	0xffffff3,
+	0xffffff4,
+	0xffffff5,
+	0xffffff6,
+	0xffffff7,
+	0xffffff8,
+	0xffffff9,
+	0xffffffa,
+	0xffffffb,
+	0x14,
+	0x3f8,
+	0x3f9,
+	0xffa,
+	0x1ff9,
+	0x15,
+	0xf8,
+	0x7fa,
+	0x3fa,
+	0x3fb,
+	0xf9,
+	0x7fb,
+	0xfa,
+	0x16,
+	0x17,
+	0x18,
+	0x0,
+	0x1,
+	0x2,
+	0x19,
+	0x1a,
+	0x1b,
+	0x1c,
+	0x1d,
+	0x1e,
+	0x1f,
+	0x5c,
+	0xfb,
+	0x7ffc,
+	0x20,
+	0xffb,
+	0x3fc,
+	0x1ffa,
+	0x21,
+	0x5d,
+	0x5e,
+	0x5f,
+	0x60,
+	0x61,
+	0x62,
+	0x63,
+	0x64,
+	0x65,
+	0x66,
+	0x67,
+	0x68,
+	0x69,
+	0x6a,
+	0x6b,
+	0x6c,
+	0x6d,
+	0x6e,
+	0x6f,
+	0x70,
+	0x71,
+	0x72,
+	0xfc,
+	0x73,
+	0xfd,
+	0x1ffb,
+	0x7fff0,
+	0x1ffc,
+	0x3ffc,
+	0x22,
+	0x7ffd,
+	0x3,
+	0x23,
+	0x4,
+	0x24,
+	0x5,
+	0x25,
+	0x26,
+	0x27,
+	0x6,
+	0x74,
+	0x75,
+	0x28,
+	0x29,
+	0x2a,
+	0x7,
+	0x2b,
+	0x76,
+	0x2c,
+	0x8,
+	0x9,
+	0x2d,
+	0x77,
+	0x78,
+	0x79,
+	0x7a,
+	0x7b,
+	0x7ffe,
+	0x7fc,
+	0x3ffd,
+	0x1ffd,
+	0xffffffc,
+	0xfffe6,
+	0x3fffd2,
+	0xfffe7,
+	0xfffe8,
+	0x3fffd3,
+	0x3fffd4,
+	0x3fffd5,
+	0x7fffd9,
+	0x3fffd6,
+	0x7fffda,
+	0x7fffdb,
+	0x7fffdc,
+	0x7fffdd,
+	0x7fffde,
+	0xffffeb,
+	0x7fffdf,
+	0xffffec,
+	0xffffed,
+	0x3fffd7,
+	0x7fffe0,
+	0xffffee,
+	0x7fffe1,
+	0x7fffe2,
+	0x7fffe3,
+	0x7fffe4,
+	0x1fffdc,
+	0x3fffd8,
+	0x7fffe5,
+	0x3fffd9,
+	0x7fffe6,
+	0x7fffe7,
+	0xffffef,
+	0x3fffda,
+	0x1fffdd,
+	0xfffe9,
+	0x3fffdb,
+	0x3fffdc,
+	0x7fffe8,
+	0x7fffe9,
+	0x1fffde,
+	0x7fffea,
+	0x3fffdd,
+	0x3fffde,
+	0xfffff0,
+	0x1fffdf,
+	0x3fffdf,
+	0x7fffeb,
+	0x7fffec,
+	0x1fffe0,
+	0x1fffe1,
+	0x3fffe0,
+	0x1fffe2,
+	0x7fffed,
+	0x3fffe1,
+	0x7fffee,
+	0x7fffef,
+	0xfffea,
+	0x3fffe2,
+	0x3fffe3,
+	0x3fffe4,
+	0x7ffff0,
+	0x3fffe5,
+	0x3fffe6,
+	0x7ffff1,
+	0x3ffffe0,
+	0x3ffffe1,
+	0xfffeb,
+	0x7fff1,
+	0x3fffe7,
+	0x7ffff2,
+	0x3fffe8,
+	0x1ffffec,
+	0x3ffffe2,
+	0x3ffffe3,
+	0x3ffffe4,
+	0x7ffffde,
+	0x7ffffdf,
+	0x3ffffe5,
+	0xfffff1,
+	0x1ffffed,
+	0x7fff2,
+	0x1fffe3,
+	0x3ffffe6,
+	0x7ffffe0,
+	0x7ffffe1,
+	0x3ffffe7,
+	0x7ffffe2,
+	0xfffff2,
+	0x1fffe4,
+	0x1fffe5,
+	0x3ffffe8,
+	0x3ffffe9,
+	0xffffffd,
+	0x7ffffe3,
+	0x7ffffe4,
+	0x7ffffe5,
+	0xfffec,
+	0xfffff3,
+	0xfffed,
+	0x1fffe6,
+	0x3fffe9,
+	0x1fffe7,
+	0x1fffe8,
+	0x7ffff3,
+	0x3fffea,
+	0x3fffeb,
+	0x1ffffee,
+	0x1ffffef,
+	0xfffff4,
+	0xfffff5,
+	0x3ffffea,
+	0x7ffff4,
+	0x3ffffeb,
+	0x7ffffe6,
+	0x3ffffec,
+	0x3ffffed,
+	0x7ffffe7,
+	0x7ffffe8,
+	0x7ffffe9,
+	0x7ffffea,
+	0x7ffffeb,
+	0xffffffe,
+	0x7ffffec,
+	0x7ffffed,
+	0x7ffffee,
+	0x7ffffef,
+	0x7fffff0,
+	0x3ffffee,
+}
+
+var huffmanCodeLen = []uint8{
+	13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
+	28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
+	5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
+	13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
+	15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
+	6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
+	20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
+	24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
+	22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
+	21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
+	26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
+	19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
+	20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
+	26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
+}

+ 249 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/http2.go

@@ -0,0 +1,249 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+// Package http2 implements the HTTP/2 protocol.
+//
+// This is a work in progress. This package is low-level and intended
+// to be used directly by very few people. Most users will use it
+// indirectly through integration with the net/http package. See
+// ConfigureServer. That ConfigureServer call will likely be automatic
+// or available via an empty import in the future.
+//
+// This package currently targets draft-14. See http://http2.github.io/
+package http2
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"net/http"
+	"strconv"
+	"sync"
+)
+
+var VerboseLogs = false
+
+const (
+	// ClientPreface is the string that must be sent by new
+	// connections from clients.
+	ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+
+	// SETTINGS_MAX_FRAME_SIZE default
+	// http://http2.github.io/http2-spec/#rfc.section.6.5.2
+	initialMaxFrameSize = 16384
+
+	// NextProtoTLS is the NPN/ALPN protocol negotiated during
+	// HTTP/2's TLS setup.
+	NextProtoTLS = "h2"
+
+	// http://http2.github.io/http2-spec/#SettingValues
+	initialHeaderTableSize = 4096
+
+	initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
+
+	defaultMaxReadFrameSize = 1 << 20
+)
+
+var (
+	clientPreface = []byte(ClientPreface)
+)
+
+type streamState int
+
+const (
+	stateIdle streamState = iota
+	stateOpen
+	stateHalfClosedLocal
+	stateHalfClosedRemote
+	stateResvLocal
+	stateResvRemote
+	stateClosed
+)
+
+var stateName = [...]string{
+	stateIdle:             "Idle",
+	stateOpen:             "Open",
+	stateHalfClosedLocal:  "HalfClosedLocal",
+	stateHalfClosedRemote: "HalfClosedRemote",
+	stateResvLocal:        "ResvLocal",
+	stateResvRemote:       "ResvRemote",
+	stateClosed:           "Closed",
+}
+
+func (st streamState) String() string {
+	return stateName[st]
+}
+
+// Setting is a setting parameter: which setting it is, and its value.
+type Setting struct {
+	// ID is which setting is being set.
+	// See http://http2.github.io/http2-spec/#SettingValues
+	ID SettingID
+
+	// Val is the value.
+	Val uint32
+}
+
+func (s Setting) String() string {
+	return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
+}
+
+// Valid reports whether the setting is valid.
+func (s Setting) Valid() error {
+	// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
+	switch s.ID {
+	case SettingEnablePush:
+		if s.Val != 1 && s.Val != 0 {
+			return ConnectionError(ErrCodeProtocol)
+		}
+	case SettingInitialWindowSize:
+		if s.Val > 1<<31-1 {
+			return ConnectionError(ErrCodeFlowControl)
+		}
+	case SettingMaxFrameSize:
+		if s.Val < 16384 || s.Val > 1<<24-1 {
+			return ConnectionError(ErrCodeProtocol)
+		}
+	}
+	return nil
+}
+
+// A SettingID is an HTTP/2 setting as defined in
+// http://http2.github.io/http2-spec/#iana-settings
+type SettingID uint16
+
+const (
+	SettingHeaderTableSize      SettingID = 0x1
+	SettingEnablePush           SettingID = 0x2
+	SettingMaxConcurrentStreams SettingID = 0x3
+	SettingInitialWindowSize    SettingID = 0x4
+	SettingMaxFrameSize         SettingID = 0x5
+	SettingMaxHeaderListSize    SettingID = 0x6
+)
+
+var settingName = map[SettingID]string{
+	SettingHeaderTableSize:      "HEADER_TABLE_SIZE",
+	SettingEnablePush:           "ENABLE_PUSH",
+	SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+	SettingInitialWindowSize:    "INITIAL_WINDOW_SIZE",
+	SettingMaxFrameSize:         "MAX_FRAME_SIZE",
+	SettingMaxHeaderListSize:    "MAX_HEADER_LIST_SIZE",
+}
+
+func (s SettingID) String() string {
+	if v, ok := settingName[s]; ok {
+		return v
+	}
+	return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
+}
+
+func validHeader(v string) bool {
+	if len(v) == 0 {
+		return false
+	}
+	for _, r := range v {
+		// "Just as in HTTP/1.x, header field names are
+		// strings of ASCII characters that are compared in a
+		// case-insensitive fashion. However, header field
+		// names MUST be converted to lowercase prior to their
+		// encoding in HTTP/2. "
+		if r >= 127 || ('A' <= r && r <= 'Z') {
+			return false
+		}
+	}
+	return true
+}
+
+var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
+
+func init() {
+	for i := 100; i <= 999; i++ {
+		if v := http.StatusText(i); v != "" {
+			httpCodeStringCommon[i] = strconv.Itoa(i)
+		}
+	}
+}
+
+func httpCodeString(code int) string {
+	if s, ok := httpCodeStringCommon[code]; ok {
+		return s
+	}
+	return strconv.Itoa(code)
+}
+
+// from pkg io
+type stringWriter interface {
+	WriteString(s string) (n int, err error)
+}
+
+// A gate lets two goroutines coordinate their activities.
+type gate chan struct{}
+
+func (g gate) Done() { g <- struct{}{} }
+func (g gate) Wait() { <-g }
+
+// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
+type closeWaiter chan struct{}
+
+// Init makes a closeWaiter usable.
+// It exists because so a closeWaiter value can be placed inside a
+// larger struct and have the Mutex and Cond's memory in the same
+// allocation.
+func (cw *closeWaiter) Init() {
+	*cw = make(chan struct{})
+}
+
+// Close marks the closeWaiter as closed and unblocks any waiters.
+func (cw closeWaiter) Close() {
+	close(cw)
+}
+
+// Wait waits for the closeWaiter to become closed.
+func (cw closeWaiter) Wait() {
+	<-cw
+}
+
+// bufferedWriter is a buffered writer that writes to w.
+// Its buffered writer is lazily allocated as needed, to minimize
+// idle memory usage with many connections.
+type bufferedWriter struct {
+	w  io.Writer     // immutable
+	bw *bufio.Writer // non-nil when data is buffered
+}
+
+func newBufferedWriter(w io.Writer) *bufferedWriter {
+	return &bufferedWriter{w: w}
+}
+
+var bufWriterPool = sync.Pool{
+	New: func() interface{} {
+		// TODO: pick something better? this is a bit under
+		// (3 x typical 1500 byte MTU) at least.
+		return bufio.NewWriterSize(nil, 4<<10)
+	},
+}
+
+func (w *bufferedWriter) Write(p []byte) (n int, err error) {
+	if w.bw == nil {
+		bw := bufWriterPool.Get().(*bufio.Writer)
+		bw.Reset(w.w)
+		w.bw = bw
+	}
+	return w.bw.Write(p)
+}
+
+func (w *bufferedWriter) Flush() error {
+	bw := w.bw
+	if bw == nil {
+		return nil
+	}
+	err := bw.Flush()
+	bw.Reset(nil)
+	bufWriterPool.Put(bw)
+	w.bw = nil
+	return err
+}

+ 152 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/http2_test.go

@@ -0,0 +1,152 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"net/http"
+	"os/exec"
+	"strconv"
+	"strings"
+	"testing"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/github.com/bradfitz/http2/hpack"
+)
+
+var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.")
+
+func condSkipFailingTest(t *testing.T) {
+	if !*knownFailing {
+		t.Skip("Skipping known-failing test without --known_failing")
+	}
+}
+
+func init() {
+	DebugGoroutines = true
+	flag.BoolVar(&VerboseLogs, "verboseh2", false, "Verbose HTTP/2 debug logging")
+}
+
+func TestSettingString(t *testing.T) {
+	tests := []struct {
+		s    Setting
+		want string
+	}{
+		{Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"},
+		{Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"},
+	}
+	for i, tt := range tests {
+		got := fmt.Sprint(tt.s)
+		if got != tt.want {
+			t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want)
+		}
+	}
+}
+
+type twriter struct {
+	t  testing.TB
+	st *serverTester // optional
+}
+
+func (w twriter) Write(p []byte) (n int, err error) {
+	if w.st != nil {
+		ps := string(p)
+		for _, phrase := range w.st.logFilter {
+			if strings.Contains(ps, phrase) {
+				return len(p), nil // no logging
+			}
+		}
+	}
+	w.t.Logf("%s", p)
+	return len(p), nil
+}
+
+// like encodeHeader, but don't add implicit psuedo headers.
+func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte {
+	var buf bytes.Buffer
+	enc := hpack.NewEncoder(&buf)
+	for len(headers) > 0 {
+		k, v := headers[0], headers[1]
+		headers = headers[2:]
+		if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil {
+			t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
+		}
+	}
+	return buf.Bytes()
+}
+
+// Verify that curl has http2.
+func requireCurl(t *testing.T) {
+	out, err := dockerLogs(curl(t, "--version"))
+	if err != nil {
+		t.Skipf("failed to determine curl features; skipping test")
+	}
+	if !strings.Contains(string(out), "HTTP2") {
+		t.Skip("curl doesn't support HTTP2; skipping test")
+	}
+}
+
+func curl(t *testing.T, args ...string) (container string) {
+	out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).CombinedOutput()
+	if err != nil {
+		t.Skipf("Failed to run curl in docker: %v, %s", err, out)
+	}
+	return strings.TrimSpace(string(out))
+}
+
+type puppetCommand struct {
+	fn   func(w http.ResponseWriter, r *http.Request)
+	done chan<- bool
+}
+
+type handlerPuppet struct {
+	ch chan puppetCommand
+}
+
+func newHandlerPuppet() *handlerPuppet {
+	return &handlerPuppet{
+		ch: make(chan puppetCommand),
+	}
+}
+
+func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) {
+	for cmd := range p.ch {
+		cmd.fn(w, r)
+		cmd.done <- true
+	}
+}
+
+func (p *handlerPuppet) done() { close(p.ch) }
+func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) {
+	done := make(chan bool)
+	p.ch <- puppetCommand{fn, done}
+	<-done
+}
+func dockerLogs(container string) ([]byte, error) {
+	out, err := exec.Command("docker", "wait", container).CombinedOutput()
+	if err != nil {
+		return out, err
+	}
+	exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out)))
+	if err != nil {
+		return out, errors.New("unexpected exit status from docker wait")
+	}
+	out, err = exec.Command("docker", "logs", container).CombinedOutput()
+	exec.Command("docker", "rm", container).Run()
+	if err == nil && exitStatus != 0 {
+		err = fmt.Errorf("exit status %d: %s", exitStatus, out)
+	}
+	return out, err
+}
+
+func kill(container string) {
+	exec.Command("docker", "kill", container).Run()
+	exec.Command("docker", "rm", container).Run()
+}

+ 43 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/pipe.go

@@ -0,0 +1,43 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+	"sync"
+)
+
+type pipe struct {
+	b buffer
+	c sync.Cond
+	m sync.Mutex
+}
+
+// Read waits until data is available and copies bytes
+// from the buffer into p.
+func (r *pipe) Read(p []byte) (n int, err error) {
+	r.c.L.Lock()
+	defer r.c.L.Unlock()
+	for r.b.Len() == 0 && !r.b.closed {
+		r.c.Wait()
+	}
+	return r.b.Read(p)
+}
+
+// Write copies bytes from p into the buffer and wakes a reader.
+// It is an error to write more data than the buffer can hold.
+func (w *pipe) Write(p []byte) (n int, err error) {
+	w.c.L.Lock()
+	defer w.c.L.Unlock()
+	defer w.c.Signal()
+	return w.b.Write(p)
+}
+
+func (c *pipe) Close(err error) {
+	c.c.L.Lock()
+	defer c.c.L.Unlock()
+	defer c.c.Signal()
+	c.b.Close(err)
+}

+ 24 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/pipe_test.go

@@ -0,0 +1,24 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+	"errors"
+	"testing"
+)
+
+func TestPipeClose(t *testing.T) {
+	var p pipe
+	p.c.L = &p.m
+	a := errors.New("a")
+	b := errors.New("b")
+	p.Close(a)
+	p.Close(b)
+	_, err := p.Read(make([]byte, 1))
+	if err != a {
+		t.Errorf("err = %v want %v", err, a)
+	}
+}

+ 121 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/priority_test.go

@@ -0,0 +1,121 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+	"testing"
+)
+
+func TestPriority(t *testing.T) {
+	// A -> B
+	// move A's parent to B
+	streams := make(map[uint32]*stream)
+	a := &stream{
+		parent: nil,
+		weight: 16,
+	}
+	streams[1] = a
+	b := &stream{
+		parent: a,
+		weight: 16,
+	}
+	streams[2] = b
+	adjustStreamPriority(streams, 1, PriorityParam{
+		Weight:    20,
+		StreamDep: 2,
+	})
+	if a.parent != b {
+		t.Errorf("Expected A's parent to be B")
+	}
+	if a.weight != 20 {
+		t.Errorf("Expected A's weight to be 20; got %d", a.weight)
+	}
+	if b.parent != nil {
+		t.Errorf("Expected B to have no parent")
+	}
+	if b.weight != 16 {
+		t.Errorf("Expected B's weight to be 16; got %d", b.weight)
+	}
+}
+
+func TestPriorityExclusiveZero(t *testing.T) {
+	// A B and C are all children of the 0 stream.
+	// Exclusive reprioritization to any of the streams
+	// should bring the rest of the streams under the
+	// reprioritized stream
+	streams := make(map[uint32]*stream)
+	a := &stream{
+		parent: nil,
+		weight: 16,
+	}
+	streams[1] = a
+	b := &stream{
+		parent: nil,
+		weight: 16,
+	}
+	streams[2] = b
+	c := &stream{
+		parent: nil,
+		weight: 16,
+	}
+	streams[3] = c
+	adjustStreamPriority(streams, 3, PriorityParam{
+		Weight:    20,
+		StreamDep: 0,
+		Exclusive: true,
+	})
+	if a.parent != c {
+		t.Errorf("Expected A's parent to be C")
+	}
+	if a.weight != 16 {
+		t.Errorf("Expected A's weight to be 16; got %d", a.weight)
+	}
+	if b.parent != c {
+		t.Errorf("Expected B's parent to be C")
+	}
+	if b.weight != 16 {
+		t.Errorf("Expected B's weight to be 16; got %d", b.weight)
+	}
+	if c.parent != nil {
+		t.Errorf("Expected C to have no parent")
+	}
+	if c.weight != 20 {
+		t.Errorf("Expected C's weight to be 20; got %d", b.weight)
+	}
+}
+
+func TestPriorityOwnParent(t *testing.T) {
+	streams := make(map[uint32]*stream)
+	a := &stream{
+		parent: nil,
+		weight: 16,
+	}
+	streams[1] = a
+	b := &stream{
+		parent: a,
+		weight: 16,
+	}
+	streams[2] = b
+	adjustStreamPriority(streams, 1, PriorityParam{
+		Weight:    20,
+		StreamDep: 1,
+	})
+	if a.parent != nil {
+		t.Errorf("Expected A's parent to be nil")
+	}
+	if a.weight != 20 {
+		t.Errorf("Expected A's weight to be 20; got %d", a.weight)
+	}
+	if b.parent != a {
+		t.Errorf("Expected B's parent to be A")
+	}
+	if b.weight != 16 {
+		t.Errorf("Expected B's weight to be 16; got %d", b.weight)
+	}
+
+}

+ 1777 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/server.go

@@ -0,0 +1,1777 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+// TODO: replace all <-sc.doneServing with reads from the stream's cw
+// instead, and make sure that on close we close all open
+// streams. then remove doneServing?
+
+// TODO: finish GOAWAY support. Consider each incoming frame type and
+// whether it should be ignored during a shutdown race.
+
+// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
+// configurable?  or maximum number of idle clients and remove the
+// oldest?
+
+// TODO: turn off the serve goroutine when idle, so
+// an idle conn only has the readFrames goroutine active. (which could
+// also be optimized probably to pin less memory in crypto/tls). This
+// would involve tracking when the serve goroutine is active (atomic
+// int32 read/CAS probably?) and starting it up when frames arrive,
+// and shutting it down when all handlers exit. the occasional PING
+// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
+// (which is a no-op if already running) and then queue the PING write
+// as normal. The serve loop would then exit in most cases (if no
+// Handlers running) and not be woken up again until the PING packet
+// returns.
+
+// TODO (maybe): add a mechanism for Handlers to going into
+// half-closed-local mode (rw.(io.Closer) test?) but not exit their
+// handler, and continue to be able to read from the
+// Request.Body. This would be a somewhat semantic change from HTTP/1
+// (or at least what we expose in net/http), so I'd probably want to
+// add it there too. For now, this package says that returning from
+// the Handler ServeHTTP function means you're both done reading and
+// done writing, without a way to stop just one or the other.
+
+package http2
+
+import (
+	"bufio"
+	"bytes"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/github.com/bradfitz/http2/hpack"
+)
+
+const (
+	prefaceTimeout        = 10 * time.Second
+	firstSettingsTimeout  = 2 * time.Second // should be in-flight with preface anyway
+	handlerChunkWriteSize = 4 << 10
+	defaultMaxStreams     = 250 // TODO: make this 100 as the GFE seems to?
+)
+
+var (
+	errClientDisconnected = errors.New("client disconnected")
+	errClosedBody         = errors.New("body closed by handler")
+	errStreamBroken       = errors.New("http2: stream broken")
+)
+
+var responseWriterStatePool = sync.Pool{
+	New: func() interface{} {
+		rws := &responseWriterState{}
+		rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
+		return rws
+	},
+}
+
+// Test hooks.
+var (
+	testHookOnConn        func()
+	testHookGetServerConn func(*serverConn)
+	testHookOnPanicMu     *sync.Mutex // nil except in tests
+	testHookOnPanic       func(sc *serverConn, panicVal interface{}) (rePanic bool)
+)
+
+// Server is an HTTP/2 server.
+type Server struct {
+	// MaxHandlers limits the number of http.Handler ServeHTTP goroutines
+	// which may run at a time over all connections.
+	// Negative or zero no limit.
+	// TODO: implement
+	MaxHandlers int
+
+	// MaxConcurrentStreams optionally specifies the number of
+	// concurrent streams that each client may have open at a
+	// time. This is unrelated to the number of http.Handler goroutines
+	// which may be active globally, which is MaxHandlers.
+	// If zero, MaxConcurrentStreams defaults to at least 100, per
+	// the HTTP/2 spec's recommendations.
+	MaxConcurrentStreams uint32
+
+	// MaxReadFrameSize optionally specifies the largest frame
+	// this server is willing to read. A valid value is between
+	// 16k and 16M, inclusive. If zero or otherwise invalid, a
+	// default value is used.
+	MaxReadFrameSize uint32
+
+	// PermitProhibitedCipherSuites, if true, permits the use of
+	// cipher suites prohibited by the HTTP/2 spec.
+	PermitProhibitedCipherSuites bool
+}
+
+func (s *Server) maxReadFrameSize() uint32 {
+	if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
+		return v
+	}
+	return defaultMaxReadFrameSize
+}
+
+func (s *Server) maxConcurrentStreams() uint32 {
+	if v := s.MaxConcurrentStreams; v > 0 {
+		return v
+	}
+	return defaultMaxStreams
+}
+
+// ConfigureServer adds HTTP/2 support to a net/http Server.
+//
+// The configuration conf may be nil.
+//
+// ConfigureServer must be called before s begins serving.
+func ConfigureServer(s *http.Server, conf *Server) {
+	if conf == nil {
+		conf = new(Server)
+	}
+	if s.TLSConfig == nil {
+		s.TLSConfig = new(tls.Config)
+	}
+
+	// Note: not setting MinVersion to tls.VersionTLS12,
+	// as we don't want to interfere with HTTP/1.1 traffic
+	// on the user's server. We enforce TLS 1.2 later once
+	// we accept a connection. Ideally this should be done
+	// during next-proto selection, but using TLS <1.2 with
+	// HTTP/2 is still the client's bug.
+
+	// Be sure we advertise tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+	// at least.
+	// TODO: enable PreferServerCipherSuites?
+	if s.TLSConfig.CipherSuites != nil {
+		const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+		haveRequired := false
+		for _, v := range s.TLSConfig.CipherSuites {
+			if v == requiredCipher {
+				haveRequired = true
+				break
+			}
+		}
+		if !haveRequired {
+			s.TLSConfig.CipherSuites = append(s.TLSConfig.CipherSuites, requiredCipher)
+		}
+	}
+
+	haveNPN := false
+	for _, p := range s.TLSConfig.NextProtos {
+		if p == NextProtoTLS {
+			haveNPN = true
+			break
+		}
+	}
+	if !haveNPN {
+		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
+	}
+	// h2-14 is temporary (as of 2015-03-05) while we wait for all browsers
+	// to switch to "h2".
+	s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14")
+
+	if s.TLSNextProto == nil {
+		s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
+	}
+	protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
+		if testHookOnConn != nil {
+			testHookOnConn()
+		}
+		conf.handleConn(hs, c, h)
+	}
+	s.TLSNextProto[NextProtoTLS] = protoHandler
+	s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
+}
+
+func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) {
+	sc := &serverConn{
+		srv:              srv,
+		hs:               hs,
+		conn:             c,
+		remoteAddrStr:    c.RemoteAddr().String(),
+		bw:               newBufferedWriter(c),
+		handler:          h,
+		streams:          make(map[uint32]*stream),
+		readFrameCh:      make(chan frameAndGate),
+		readFrameErrCh:   make(chan error, 1), // must be buffered for 1
+		wantWriteFrameCh: make(chan frameWriteMsg, 8),
+		wroteFrameCh:     make(chan struct{}, 1), // buffered; one send in reading goroutine
+		bodyReadCh:       make(chan bodyReadMsg), // buffering doesn't matter either way
+		doneServing:      make(chan struct{}),
+		advMaxStreams:    srv.maxConcurrentStreams(),
+		writeSched: writeScheduler{
+			maxFrameSize: initialMaxFrameSize,
+		},
+		initialWindowSize: initialWindowSize,
+		headerTableSize:   initialHeaderTableSize,
+		serveG:            newGoroutineLock(),
+		pushEnabled:       true,
+	}
+	sc.flow.add(initialWindowSize)
+	sc.inflow.add(initialWindowSize)
+	sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
+	sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, sc.onNewHeaderField)
+
+	fr := NewFramer(sc.bw, c)
+	fr.SetMaxReadFrameSize(srv.maxReadFrameSize())
+	sc.framer = fr
+
+	if tc, ok := c.(*tls.Conn); ok {
+		sc.tlsState = new(tls.ConnectionState)
+		*sc.tlsState = tc.ConnectionState()
+		// 9.2 Use of TLS Features
+		// An implementation of HTTP/2 over TLS MUST use TLS
+		// 1.2 or higher with the restrictions on feature set
+		// and cipher suite described in this section. Due to
+		// implementation limitations, it might not be
+		// possible to fail TLS negotiation. An endpoint MUST
+		// immediately terminate an HTTP/2 connection that
+		// does not meet the TLS requirements described in
+		// this section with a connection error (Section
+		// 5.4.1) of type INADEQUATE_SECURITY.
+		if sc.tlsState.Version < tls.VersionTLS12 {
+			sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
+			return
+		}
+
+		if sc.tlsState.ServerName == "" {
+			// Client must use SNI, but we don't enforce that anymore,
+			// since it was causing problems when connecting to bare IP
+			// addresses during development.
+			//
+			// TODO: optionally enforce? Or enforce at the time we receive
+			// a new request, and verify the the ServerName matches the :authority?
+			// But that precludes proxy situations, perhaps.
+			//
+			// So for now, do nothing here again.
+		}
+
+		if !srv.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+			// "Endpoints MAY choose to generate a connection error
+			// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
+			// the prohibited cipher suites are negotiated."
+			//
+			// We choose that. In my opinion, the spec is weak
+			// here. It also says both parties must support at least
+			// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
+			// excuses here. If we really must, we could allow an
+			// "AllowInsecureWeakCiphers" option on the server later.
+			// Let's see how it plays out first.
+			sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
+			return
+		}
+	}
+
+	if hook := testHookGetServerConn; hook != nil {
+		hook(sc)
+	}
+	sc.serve()
+}
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+func isBadCipher(cipher uint16) bool {
+	switch cipher {
+	case tls.TLS_RSA_WITH_RC4_128_SHA,
+		tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+		tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+		tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+		tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+		tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+		tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+		tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+		tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+		tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+		tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+		// Reject cipher suites from Appendix A.
+		// "This list includes those cipher suites that do not
+		// offer an ephemeral key exchange and those that are
+		// based on the TLS null, stream or block cipher type"
+		return true
+	default:
+		return false
+	}
+}
+
+func (sc *serverConn) rejectConn(err ErrCode, debug string) {
+	log.Printf("REJECTING conn: %v, %s", err, debug)
+	// ignoring errors. hanging up anyway.
+	sc.framer.WriteGoAway(0, err, []byte(debug))
+	sc.bw.Flush()
+	sc.conn.Close()
+}
+
+// frameAndGates coordinates the readFrames and serve
+// goroutines. Because the Framer interface only permits the most
+// recently-read Frame from being accessed, the readFrames goroutine
+// blocks until it has a frame, passes it to serve, and then waits for
+// serve to be done with it before reading the next one.
+type frameAndGate struct {
+	f Frame
+	g gate
+}
+
+type serverConn struct {
+	// Immutable:
+	srv              *Server
+	hs               *http.Server
+	conn             net.Conn
+	bw               *bufferedWriter // writing to conn
+	handler          http.Handler
+	framer           *Framer
+	hpackDecoder     *hpack.Decoder
+	doneServing      chan struct{}     // closed when serverConn.serve ends
+	readFrameCh      chan frameAndGate // written by serverConn.readFrames
+	readFrameErrCh   chan error
+	wantWriteFrameCh chan frameWriteMsg   // from handlers -> serve
+	wroteFrameCh     chan struct{}        // from writeFrameAsync -> serve, tickles more frame writes
+	bodyReadCh       chan bodyReadMsg     // from handlers -> serve
+	testHookCh       chan func()          // code to run on the serve loop
+	flow             flow                 // conn-wide (not stream-specific) outbound flow control
+	inflow           flow                 // conn-wide inbound flow control
+	tlsState         *tls.ConnectionState // shared by all handlers, like net/http
+	remoteAddrStr    string
+
+	// Everything following is owned by the serve loop; use serveG.check():
+	serveG                goroutineLock // used to verify funcs are on serve()
+	pushEnabled           bool
+	sawFirstSettings      bool // got the initial SETTINGS frame after the preface
+	needToSendSettingsAck bool
+	unackedSettings       int    // how many SETTINGS have we sent without ACKs?
+	clientMaxStreams      uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+	advMaxStreams         uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+	curOpenStreams        uint32 // client's number of open streams
+	maxStreamID           uint32 // max ever seen
+	streams               map[uint32]*stream
+	initialWindowSize     int32
+	headerTableSize       uint32
+	maxHeaderListSize     uint32            // zero means unknown (default)
+	canonHeader           map[string]string // http2-lower-case -> Go-Canonical-Case
+	req                   requestParam      // non-zero while reading request headers
+	writingFrame          bool              // started write goroutine but haven't heard back on wroteFrameCh
+	needsFrameFlush       bool              // last frame write wasn't a flush
+	writeSched            writeScheduler
+	inGoAway              bool // we've started to or sent GOAWAY
+	needToSendGoAway      bool // we need to schedule a GOAWAY frame write
+	goAwayCode            ErrCode
+	shutdownTimerCh       <-chan time.Time // nil until used
+	shutdownTimer         *time.Timer      // nil until used
+
+	// Owned by the writeFrameAsync goroutine:
+	headerWriteBuf bytes.Buffer
+	hpackEncoder   *hpack.Encoder
+}
+
+// requestParam is the state of the next request, initialized over
+// potentially several frames HEADERS + zero or more CONTINUATION
+// frames.
+type requestParam struct {
+	// stream is non-nil if we're reading (HEADER or CONTINUATION)
+	// frames for a request (but not DATA).
+	stream            *stream
+	header            http.Header
+	method, path      string
+	scheme, authority string
+	sawRegularHeader  bool // saw a non-pseudo header already
+	invalidHeader     bool // an invalid header was seen
+}
+
+// stream represents a stream. This is the minimal metadata needed by
+// the serve goroutine. Most of the actual stream state is owned by
+// the http.Handler's goroutine in the responseWriter. Because the
+// responseWriter's responseWriterState is recycled at the end of a
+// handler, this struct intentionally has no pointer to the
+// *responseWriter{,State} itself, as the Handler ending nils out the
+// responseWriter's state field.
+type stream struct {
+	// immutable:
+	id   uint32
+	body *pipe       // non-nil if expecting DATA frames
+	cw   closeWaiter // closed wait stream transitions to closed state
+
+	// owned by serverConn's serve loop:
+	bodyBytes     int64   // body bytes seen so far
+	declBodyBytes int64   // or -1 if undeclared
+	flow          flow    // limits writing from Handler to client
+	inflow        flow    // what the client is allowed to POST/etc to us
+	parent        *stream // or nil
+	weight        uint8
+	state         streamState
+	sentReset     bool // only true once detached from streams map
+	gotReset      bool // only true once detacted from streams map
+}
+
+func (sc *serverConn) Framer() *Framer  { return sc.framer }
+func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
+func (sc *serverConn) Flush() error     { return sc.bw.Flush() }
+func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
+	return sc.hpackEncoder, &sc.headerWriteBuf
+}
+
+func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
+	sc.serveG.check()
+	// http://http2.github.io/http2-spec/#rfc.section.5.1
+	if st, ok := sc.streams[streamID]; ok {
+		return st.state, st
+	}
+	// "The first use of a new stream identifier implicitly closes all
+	// streams in the "idle" state that might have been initiated by
+	// that peer with a lower-valued stream identifier. For example, if
+	// a client sends a HEADERS frame on stream 7 without ever sending a
+	// frame on stream 5, then stream 5 transitions to the "closed"
+	// state when the first frame for stream 7 is sent or received."
+	if streamID <= sc.maxStreamID {
+		return stateClosed, nil
+	}
+	return stateIdle, nil
+}
+
+func (sc *serverConn) vlogf(format string, args ...interface{}) {
+	if VerboseLogs {
+		sc.logf(format, args...)
+	}
+}
+
+func (sc *serverConn) logf(format string, args ...interface{}) {
+	if lg := sc.hs.ErrorLog; lg != nil {
+		lg.Printf(format, args...)
+	} else {
+		log.Printf(format, args...)
+	}
+}
+
+func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
+	if err == nil {
+		return
+	}
+	str := err.Error()
+	if err == io.EOF || strings.Contains(str, "use of closed network connection") {
+		// Boring, expected errors.
+		sc.vlogf(format, args...)
+	} else {
+		sc.logf(format, args...)
+	}
+}
+
+func (sc *serverConn) onNewHeaderField(f hpack.HeaderField) {
+	sc.serveG.check()
+	sc.vlogf("got header field %+v", f)
+	switch {
+	case !validHeader(f.Name):
+		sc.req.invalidHeader = true
+	case strings.HasPrefix(f.Name, ":"):
+		if sc.req.sawRegularHeader {
+			sc.logf("pseudo-header after regular header")
+			sc.req.invalidHeader = true
+			return
+		}
+		var dst *string
+		switch f.Name {
+		case ":method":
+			dst = &sc.req.method
+		case ":path":
+			dst = &sc.req.path
+		case ":scheme":
+			dst = &sc.req.scheme
+		case ":authority":
+			dst = &sc.req.authority
+		default:
+			// 8.1.2.1 Pseudo-Header Fields
+			// "Endpoints MUST treat a request or response
+			// that contains undefined or invalid
+			// pseudo-header fields as malformed (Section
+			// 8.1.2.6)."
+			sc.logf("invalid pseudo-header %q", f.Name)
+			sc.req.invalidHeader = true
+			return
+		}
+		if *dst != "" {
+			sc.logf("duplicate pseudo-header %q sent", f.Name)
+			sc.req.invalidHeader = true
+			return
+		}
+		*dst = f.Value
+	case f.Name == "cookie":
+		sc.req.sawRegularHeader = true
+		if s, ok := sc.req.header["Cookie"]; ok && len(s) == 1 {
+			s[0] = s[0] + "; " + f.Value
+		} else {
+			sc.req.header.Add("Cookie", f.Value)
+		}
+	default:
+		sc.req.sawRegularHeader = true
+		sc.req.header.Add(sc.canonicalHeader(f.Name), f.Value)
+	}
+}
+
+func (sc *serverConn) canonicalHeader(v string) string {
+	sc.serveG.check()
+	cv, ok := commonCanonHeader[v]
+	if ok {
+		return cv
+	}
+	cv, ok = sc.canonHeader[v]
+	if ok {
+		return cv
+	}
+	if sc.canonHeader == nil {
+		sc.canonHeader = make(map[string]string)
+	}
+	cv = http.CanonicalHeaderKey(v)
+	sc.canonHeader[v] = cv
+	return cv
+}
+
+// readFrames is the loop that reads incoming frames.
+// It's run on its own goroutine.
+func (sc *serverConn) readFrames() {
+	g := make(gate, 1)
+	for {
+		f, err := sc.framer.ReadFrame()
+		if err != nil {
+			sc.readFrameErrCh <- err
+			close(sc.readFrameCh)
+			return
+		}
+		sc.readFrameCh <- frameAndGate{f, g}
+		// We can't read another frame until this one is
+		// processed, as the ReadFrame interface doesn't copy
+		// memory.  The Frame accessor methods access the last
+		// frame's (shared) buffer. So we wait for the
+		// serve goroutine to tell us it's done:
+		g.Wait()
+	}
+}
+
+// writeFrameAsync runs in its own goroutine and writes a single frame
+// and then reports when it's done.
+// At most one goroutine can be running writeFrameAsync at a time per
+// serverConn.
+func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
+	err := wm.write.writeFrame(sc)
+	if ch := wm.done; ch != nil {
+		select {
+		case ch <- err:
+		default:
+			panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
+		}
+	}
+	sc.wroteFrameCh <- struct{}{} // tickle frame selection scheduler
+}
+
+func (sc *serverConn) closeAllStreamsOnConnClose() {
+	sc.serveG.check()
+	for _, st := range sc.streams {
+		sc.closeStream(st, errClientDisconnected)
+	}
+}
+
+func (sc *serverConn) stopShutdownTimer() {
+	sc.serveG.check()
+	if t := sc.shutdownTimer; t != nil {
+		t.Stop()
+	}
+}
+
+func (sc *serverConn) notePanic() {
+	if testHookOnPanicMu != nil {
+		testHookOnPanicMu.Lock()
+		defer testHookOnPanicMu.Unlock()
+	}
+	if testHookOnPanic != nil {
+		if e := recover(); e != nil {
+			if testHookOnPanic(sc, e) {
+				panic(e)
+			}
+		}
+	}
+}
+
+func (sc *serverConn) serve() {
+	sc.serveG.check()
+	defer sc.notePanic()
+	defer sc.conn.Close()
+	defer sc.closeAllStreamsOnConnClose()
+	defer sc.stopShutdownTimer()
+	defer close(sc.doneServing) // unblocks handlers trying to send
+
+	sc.vlogf("HTTP/2 connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+
+	sc.writeFrame(frameWriteMsg{
+		write: writeSettings{
+			{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+			{SettingMaxConcurrentStreams, sc.advMaxStreams},
+
+			// TODO: more actual settings, notably
+			// SettingInitialWindowSize, but then we also
+			// want to bump up the conn window size the
+			// same amount here right after the settings
+		},
+	})
+	sc.unackedSettings++
+
+	if err := sc.readPreface(); err != nil {
+		sc.condlogf(err, "error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
+		return
+	}
+
+	go sc.readFrames() // closed by defer sc.conn.Close above
+
+	settingsTimer := time.NewTimer(firstSettingsTimeout)
+	for {
+		select {
+		case wm := <-sc.wantWriteFrameCh:
+			sc.writeFrame(wm)
+		case <-sc.wroteFrameCh:
+			sc.writingFrame = false
+			sc.scheduleFrameWrite()
+		case fg, ok := <-sc.readFrameCh:
+			if !ok {
+				sc.readFrameCh = nil
+			}
+			if !sc.processFrameFromReader(fg, ok) {
+				return
+			}
+			if settingsTimer.C != nil {
+				settingsTimer.Stop()
+				settingsTimer.C = nil
+			}
+		case m := <-sc.bodyReadCh:
+			sc.noteBodyRead(m.st, m.n)
+		case <-settingsTimer.C:
+			sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+			return
+		case <-sc.shutdownTimerCh:
+			sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+			return
+		case fn := <-sc.testHookCh:
+			fn()
+		}
+	}
+}
+
+// readPreface reads the ClientPreface greeting from the peer
+// or returns an error on timeout or an invalid greeting.
+func (sc *serverConn) readPreface() error {
+	errc := make(chan error, 1)
+	go func() {
+		// Read the client preface
+		buf := make([]byte, len(ClientPreface))
+		if _, err := io.ReadFull(sc.conn, buf); err != nil {
+			errc <- err
+		} else if !bytes.Equal(buf, clientPreface) {
+			errc <- fmt.Errorf("bogus greeting %q", buf)
+		} else {
+			errc <- nil
+		}
+	}()
+	timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
+	defer timer.Stop()
+	select {
+	case <-timer.C:
+		return errors.New("timeout waiting for client preface")
+	case err := <-errc:
+		if err == nil {
+			sc.vlogf("client %v said hello", sc.conn.RemoteAddr())
+		}
+		return err
+	}
+}
+
+// writeDataFromHandler writes the data described in req to stream.id.
+//
+// The provided ch is used to avoid allocating new channels for each
+// write operation. It's expected that the caller reuses writeData and ch
+// over time.
+//
+// The flow control currently happens in the Handler where it waits
+// for 1 or more bytes to be available to then write here.  So at this
+// point we know that we have flow control. But this might have to
+// change when priority is implemented, so the serve goroutine knows
+// the total amount of bytes waiting to be sent and can can have more
+// scheduling decisions available.
+func (sc *serverConn) writeDataFromHandler(stream *stream, writeData *writeData, ch chan error) error {
+	sc.writeFrameFromHandler(frameWriteMsg{
+		write:  writeData,
+		stream: stream,
+		done:   ch,
+	})
+	select {
+	case err := <-ch:
+		return err
+	case <-sc.doneServing:
+		return errClientDisconnected
+	case <-stream.cw:
+		return errStreamBroken
+	}
+}
+
+// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
+// if the connection has gone away.
+//
+// This must not be run from the serve goroutine itself, else it might
+// deadlock writing to sc.wantWriteFrameCh (which is only mildly
+// buffered and is read by serve itself). If you're on the serve
+// goroutine, call writeFrame instead.
+func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) {
+	sc.serveG.checkNotOn() // NOT
+	select {
+	case sc.wantWriteFrameCh <- wm:
+	case <-sc.doneServing:
+		// Client has closed their connection to the server.
+	}
+}
+
+// writeFrame schedules a frame to write and sends it if there's nothing
+// already being written.
+//
+// There is no pushback here (the serve goroutine never blocks). It's
+// the http.Handlers that block, waiting for their previous frames to
+// make it onto the wire
+//
+// If you're not on the serve goroutine, use writeFrameFromHandler instead.
+func (sc *serverConn) writeFrame(wm frameWriteMsg) {
+	sc.serveG.check()
+	sc.writeSched.add(wm)
+	sc.scheduleFrameWrite()
+}
+
+// startFrameWrite starts a goroutine to write wm (in a separate
+// goroutine since that might block on the network), and updates the
+// serve goroutine's state about the world, updated from info in wm.
+func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
+	sc.serveG.check()
+	if sc.writingFrame {
+		panic("internal error: can only be writing one frame at a time")
+	}
+
+	st := wm.stream
+	if st != nil {
+		switch st.state {
+		case stateHalfClosedLocal:
+			panic("internal error: attempt to send frame on half-closed-local stream")
+		case stateClosed:
+			if st.sentReset || st.gotReset {
+				// Skip this frame. But fake the frame write to reschedule:
+				sc.wroteFrameCh <- struct{}{}
+				return
+			}
+			panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
+		}
+	}
+
+	sc.writingFrame = true
+	sc.needsFrameFlush = true
+	if endsStream(wm.write) {
+		if st == nil {
+			panic("internal error: expecting non-nil stream")
+		}
+		switch st.state {
+		case stateOpen:
+			// Here we would go to stateHalfClosedLocal in
+			// theory, but since our handler is done and
+			// the net/http package provides no mechanism
+			// for finishing writing to a ResponseWriter
+			// while still reading data (see possible TODO
+			// at top of this file), we go into closed
+			// state here anyway, after telling the peer
+			// we're hanging up on them.
+			st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
+			errCancel := StreamError{st.id, ErrCodeCancel}
+			sc.resetStream(errCancel)
+		case stateHalfClosedRemote:
+			sc.closeStream(st, nil)
+		}
+	}
+	go sc.writeFrameAsync(wm)
+}
+
+// scheduleFrameWrite tickles the frame writing scheduler.
+//
+// If a frame is already being written, nothing happens. This will be called again
+// when the frame is done being written.
+//
+// If a frame isn't being written we need to send one, the best frame
+// to send is selected, preferring first things that aren't
+// stream-specific (e.g. ACKing settings), and then finding the
+// highest priority stream.
+//
+// If a frame isn't being written and there's nothing else to send, we
+// flush the write buffer.
+func (sc *serverConn) scheduleFrameWrite() {
+	sc.serveG.check()
+	if sc.writingFrame {
+		return
+	}
+	if sc.needToSendGoAway {
+		sc.needToSendGoAway = false
+		sc.startFrameWrite(frameWriteMsg{
+			write: &writeGoAway{
+				maxStreamID: sc.maxStreamID,
+				code:        sc.goAwayCode,
+			},
+		})
+		return
+	}
+	if sc.needToSendSettingsAck {
+		sc.needToSendSettingsAck = false
+		sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
+		return
+	}
+	if !sc.inGoAway {
+		if wm, ok := sc.writeSched.take(); ok {
+			sc.startFrameWrite(wm)
+			return
+		}
+	}
+	if sc.needsFrameFlush {
+		sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
+		sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+		return
+	}
+}
+
+func (sc *serverConn) goAway(code ErrCode) {
+	sc.serveG.check()
+	if sc.inGoAway {
+		return
+	}
+	if code != ErrCodeNo {
+		sc.shutDownIn(250 * time.Millisecond)
+	} else {
+		// TODO: configurable
+		sc.shutDownIn(1 * time.Second)
+	}
+	sc.inGoAway = true
+	sc.needToSendGoAway = true
+	sc.goAwayCode = code
+	sc.scheduleFrameWrite()
+}
+
+func (sc *serverConn) shutDownIn(d time.Duration) {
+	sc.serveG.check()
+	sc.shutdownTimer = time.NewTimer(d)
+	sc.shutdownTimerCh = sc.shutdownTimer.C
+}
+
+func (sc *serverConn) resetStream(se StreamError) {
+	sc.serveG.check()
+	sc.writeFrame(frameWriteMsg{write: se})
+	if st, ok := sc.streams[se.StreamID]; ok {
+		st.sentReset = true
+		sc.closeStream(st, se)
+	}
+}
+
+// curHeaderStreamID returns the stream ID of the header block we're
+// currently in the middle of reading. If this returns non-zero, the
+// next frame must be a CONTINUATION with this stream id.
+func (sc *serverConn) curHeaderStreamID() uint32 {
+	sc.serveG.check()
+	st := sc.req.stream
+	if st == nil {
+		return 0
+	}
+	return st.id
+}
+
+// processFrameFromReader processes the serve loop's read from readFrameCh from the
+// frame-reading goroutine.
+// processFrameFromReader returns whether the connection should be kept open.
+func (sc *serverConn) processFrameFromReader(fg frameAndGate, fgValid bool) bool {
+	sc.serveG.check()
+	var clientGone bool
+	var err error
+	if !fgValid {
+		err = <-sc.readFrameErrCh
+		if err == ErrFrameTooLarge {
+			sc.goAway(ErrCodeFrameSize)
+			return true // goAway will close the loop
+		}
+		clientGone = err == io.EOF || strings.Contains(err.Error(), "use of closed network connection")
+		if clientGone {
+			// TODO: could we also get into this state if
+			// the peer does a half close
+			// (e.g. CloseWrite) because they're done
+			// sending frames but they're still wanting
+			// our open replies?  Investigate.
+			// TODO: add CloseWrite to crypto/tls.Conn first
+			// so we have a way to test this? I suppose
+			// just for testing we could have a non-TLS mode.
+			return false
+		}
+	}
+
+	if fgValid {
+		f := fg.f
+		sc.vlogf("got %v: %#v", f.Header(), f)
+		err = sc.processFrame(f)
+		fg.g.Done() // unblock the readFrames goroutine
+		if err == nil {
+			return true
+		}
+	}
+
+	switch ev := err.(type) {
+	case StreamError:
+		sc.resetStream(ev)
+		return true
+	case goAwayFlowError:
+		sc.goAway(ErrCodeFlowControl)
+		return true
+	case ConnectionError:
+		sc.logf("%v: %v", sc.conn.RemoteAddr(), ev)
+		sc.goAway(ErrCode(ev))
+		return true // goAway will handle shutdown
+	default:
+		if !fgValid {
+			sc.logf("disconnecting; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
+		} else {
+			sc.logf("disconnection due to other error: %v", err)
+		}
+	}
+	return false
+}
+
+func (sc *serverConn) processFrame(f Frame) error {
+	sc.serveG.check()
+
+	// First frame received must be SETTINGS.
+	if !sc.sawFirstSettings {
+		if _, ok := f.(*SettingsFrame); !ok {
+			return ConnectionError(ErrCodeProtocol)
+		}
+		sc.sawFirstSettings = true
+	}
+
+	if s := sc.curHeaderStreamID(); s != 0 {
+		if cf, ok := f.(*ContinuationFrame); !ok {
+			return ConnectionError(ErrCodeProtocol)
+		} else if cf.Header().StreamID != s {
+			return ConnectionError(ErrCodeProtocol)
+		}
+	}
+
+	switch f := f.(type) {
+	case *SettingsFrame:
+		return sc.processSettings(f)
+	case *HeadersFrame:
+		return sc.processHeaders(f)
+	case *ContinuationFrame:
+		return sc.processContinuation(f)
+	case *WindowUpdateFrame:
+		return sc.processWindowUpdate(f)
+	case *PingFrame:
+		return sc.processPing(f)
+	case *DataFrame:
+		return sc.processData(f)
+	case *RSTStreamFrame:
+		return sc.processResetStream(f)
+	case *PriorityFrame:
+		return sc.processPriority(f)
+	case *PushPromiseFrame:
+		// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
+		// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+		return ConnectionError(ErrCodeProtocol)
+	default:
+		log.Printf("Ignoring frame: %v", f.Header())
+		return nil
+	}
+}
+
+func (sc *serverConn) processPing(f *PingFrame) error {
+	sc.serveG.check()
+	if f.Flags.Has(FlagSettingsAck) {
+		// 6.7 PING: " An endpoint MUST NOT respond to PING frames
+		// containing this flag."
+		return nil
+	}
+	if f.StreamID != 0 {
+		// "PING frames are not associated with any individual
+		// stream. If a PING frame is received with a stream
+		// identifier field value other than 0x0, the recipient MUST
+		// respond with a connection error (Section 5.4.1) of type
+		// PROTOCOL_ERROR."
+		return ConnectionError(ErrCodeProtocol)
+	}
+	sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
+	return nil
+}
+
+func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
+	sc.serveG.check()
+	switch {
+	case f.StreamID != 0: // stream-level flow control
+		st := sc.streams[f.StreamID]
+		if st == nil {
+			// "WINDOW_UPDATE can be sent by a peer that has sent a
+			// frame bearing the END_STREAM flag. This means that a
+			// receiver could receive a WINDOW_UPDATE frame on a "half
+			// closed (remote)" or "closed" stream. A receiver MUST
+			// NOT treat this as an error, see Section 5.1."
+			return nil
+		}
+		if !st.flow.add(int32(f.Increment)) {
+			return StreamError{f.StreamID, ErrCodeFlowControl}
+		}
+	default: // connection-level flow control
+		if !sc.flow.add(int32(f.Increment)) {
+			return goAwayFlowError{}
+		}
+	}
+	sc.scheduleFrameWrite()
+	return nil
+}
+
+func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
+	sc.serveG.check()
+
+	state, st := sc.state(f.StreamID)
+	if state == stateIdle {
+		// 6.4 "RST_STREAM frames MUST NOT be sent for a
+		// stream in the "idle" state. If a RST_STREAM frame
+		// identifying an idle stream is received, the
+		// recipient MUST treat this as a connection error
+		// (Section 5.4.1) of type PROTOCOL_ERROR.
+		return ConnectionError(ErrCodeProtocol)
+	}
+	if st != nil {
+		st.gotReset = true
+		sc.closeStream(st, StreamError{f.StreamID, f.ErrCode})
+	}
+	return nil
+}
+
+func (sc *serverConn) closeStream(st *stream, err error) {
+	sc.serveG.check()
+	if st.state == stateIdle || st.state == stateClosed {
+		panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
+	}
+	st.state = stateClosed
+	sc.curOpenStreams--
+	delete(sc.streams, st.id)
+	if p := st.body; p != nil {
+		p.Close(err)
+	}
+	st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
+	sc.writeSched.forgetStream(st.id)
+}
+
+func (sc *serverConn) processSettings(f *SettingsFrame) error {
+	sc.serveG.check()
+	if f.IsAck() {
+		sc.unackedSettings--
+		if sc.unackedSettings < 0 {
+			// Why is the peer ACKing settings we never sent?
+			// The spec doesn't mention this case, but
+			// hang up on them anyway.
+			return ConnectionError(ErrCodeProtocol)
+		}
+		return nil
+	}
+	if err := f.ForeachSetting(sc.processSetting); err != nil {
+		return err
+	}
+	sc.needToSendSettingsAck = true
+	sc.scheduleFrameWrite()
+	return nil
+}
+
+func (sc *serverConn) processSetting(s Setting) error {
+	sc.serveG.check()
+	if err := s.Valid(); err != nil {
+		return err
+	}
+	sc.vlogf("processing setting %v", s)
+	switch s.ID {
+	case SettingHeaderTableSize:
+		sc.headerTableSize = s.Val
+		sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
+	case SettingEnablePush:
+		sc.pushEnabled = s.Val != 0
+	case SettingMaxConcurrentStreams:
+		sc.clientMaxStreams = s.Val
+	case SettingInitialWindowSize:
+		return sc.processSettingInitialWindowSize(s.Val)
+	case SettingMaxFrameSize:
+		sc.writeSched.maxFrameSize = s.Val
+	case SettingMaxHeaderListSize:
+		sc.maxHeaderListSize = s.Val
+	default:
+		// Unknown setting: "An endpoint that receives a SETTINGS
+		// frame with any unknown or unsupported identifier MUST
+		// ignore that setting."
+	}
+	return nil
+}
+
+func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
+	sc.serveG.check()
+	// Note: val already validated to be within range by
+	// processSetting's Valid call.
+
+	// "A SETTINGS frame can alter the initial flow control window
+	// size for all current streams. When the value of
+	// SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
+	// adjust the size of all stream flow control windows that it
+	// maintains by the difference between the new value and the
+	// old value."
+	old := sc.initialWindowSize
+	sc.initialWindowSize = int32(val)
+	growth := sc.initialWindowSize - old // may be negative
+	for _, st := range sc.streams {
+		if !st.flow.add(growth) {
+			// 6.9.2 Initial Flow Control Window Size
+			// "An endpoint MUST treat a change to
+			// SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
+			// control window to exceed the maximum size as a
+			// connection error (Section 5.4.1) of type
+			// FLOW_CONTROL_ERROR."
+			return ConnectionError(ErrCodeFlowControl)
+		}
+	}
+	return nil
+}
+
+func (sc *serverConn) processData(f *DataFrame) error {
+	sc.serveG.check()
+	// "If a DATA frame is received whose stream is not in "open"
+	// or "half closed (local)" state, the recipient MUST respond
+	// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
+	id := f.Header().StreamID
+	st, ok := sc.streams[id]
+	if !ok || st.state != stateOpen {
+		// This includes sending a RST_STREAM if the stream is
+		// in stateHalfClosedLocal (which currently means that
+		// the http.Handler returned, so it's done reading &
+		// done writing). Try to stop the client from sending
+		// more DATA.
+		return StreamError{id, ErrCodeStreamClosed}
+	}
+	if st.body == nil {
+		panic("internal error: should have a body in this state")
+	}
+	data := f.Data()
+
+	// Sender sending more than they'd declared?
+	if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
+		st.body.Close(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
+		return StreamError{id, ErrCodeStreamClosed}
+	}
+	if len(data) > 0 {
+		// Check whether the client has flow control quota.
+		if int(st.inflow.available()) < len(data) {
+			return StreamError{id, ErrCodeFlowControl}
+		}
+		st.inflow.take(int32(len(data)))
+		wrote, err := st.body.Write(data)
+		if err != nil {
+			return StreamError{id, ErrCodeStreamClosed}
+		}
+		if wrote != len(data) {
+			panic("internal error: bad Writer")
+		}
+		st.bodyBytes += int64(len(data))
+	}
+	if f.StreamEnded() {
+		if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
+			st.body.Close(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
+				st.declBodyBytes, st.bodyBytes))
+		} else {
+			st.body.Close(io.EOF)
+		}
+		st.state = stateHalfClosedRemote
+	}
+	return nil
+}
+
+func (sc *serverConn) processHeaders(f *HeadersFrame) error {
+	sc.serveG.check()
+	id := f.Header().StreamID
+	if sc.inGoAway {
+		// Ignore.
+		return nil
+	}
+	// http://http2.github.io/http2-spec/#rfc.section.5.1.1
+	if id%2 != 1 || id <= sc.maxStreamID || sc.req.stream != nil {
+		// Streams initiated by a client MUST use odd-numbered
+		// stream identifiers. [...] The identifier of a newly
+		// established stream MUST be numerically greater than all
+		// streams that the initiating endpoint has opened or
+		// reserved. [...]  An endpoint that receives an unexpected
+		// stream identifier MUST respond with a connection error
+		// (Section 5.4.1) of type PROTOCOL_ERROR.
+		return ConnectionError(ErrCodeProtocol)
+	}
+	if id > sc.maxStreamID {
+		sc.maxStreamID = id
+	}
+	st := &stream{
+		id:    id,
+		state: stateOpen,
+	}
+	if f.StreamEnded() {
+		st.state = stateHalfClosedRemote
+	}
+	st.cw.Init()
+
+	st.flow.conn = &sc.flow // link to conn-level counter
+	st.flow.add(sc.initialWindowSize)
+	st.inflow.conn = &sc.inflow      // link to conn-level counter
+	st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
+
+	sc.streams[id] = st
+	if f.HasPriority() {
+		adjustStreamPriority(sc.streams, st.id, f.Priority)
+	}
+	sc.curOpenStreams++
+	sc.req = requestParam{
+		stream: st,
+		header: make(http.Header),
+	}
+	return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded())
+}
+
+func (sc *serverConn) processContinuation(f *ContinuationFrame) error {
+	sc.serveG.check()
+	st := sc.streams[f.Header().StreamID]
+	if st == nil || sc.curHeaderStreamID() != st.id {
+		return ConnectionError(ErrCodeProtocol)
+	}
+	return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded())
+}
+
+func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error {
+	sc.serveG.check()
+	if _, err := sc.hpackDecoder.Write(frag); err != nil {
+		// TODO: convert to stream error I assume?
+		return err
+	}
+	if !end {
+		return nil
+	}
+	if err := sc.hpackDecoder.Close(); err != nil {
+		// TODO: convert to stream error I assume?
+		return err
+	}
+	defer sc.resetPendingRequest()
+	if sc.curOpenStreams > sc.advMaxStreams {
+		// "Endpoints MUST NOT exceed the limit set by their
+		// peer. An endpoint that receives a HEADERS frame
+		// that causes their advertised concurrent stream
+		// limit to be exceeded MUST treat this as a stream
+		// error (Section 5.4.2) of type PROTOCOL_ERROR or
+		// REFUSED_STREAM."
+		if sc.unackedSettings == 0 {
+			// They should know better.
+			return StreamError{st.id, ErrCodeProtocol}
+		}
+		// Assume it's a network race, where they just haven't
+		// received our last SETTINGS update. But actually
+		// this can't happen yet, because we don't yet provide
+		// a way for users to adjust server parameters at
+		// runtime.
+		return StreamError{st.id, ErrCodeRefusedStream}
+	}
+
+	rw, req, err := sc.newWriterAndRequest()
+	if err != nil {
+		return err
+	}
+	st.body = req.Body.(*requestBody).pipe // may be nil
+	st.declBodyBytes = req.ContentLength
+	go sc.runHandler(rw, req)
+	return nil
+}
+
+func (sc *serverConn) processPriority(f *PriorityFrame) error {
+	adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
+	return nil
+}
+
+func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
+	st, ok := streams[streamID]
+	if !ok {
+		// TODO: not quite correct (this streamID might
+		// already exist in the dep tree, but be closed), but
+		// close enough for now.
+		return
+	}
+	st.weight = priority.Weight
+	parent := streams[priority.StreamDep] // might be nil
+	if parent == st {
+		// if client tries to set this stream to be the parent of itself
+		// ignore and keep going
+		return
+	}
+
+	// section 5.3.3: If a stream is made dependent on one of its
+	// own dependencies, the formerly dependent stream is first
+	// moved to be dependent on the reprioritized stream's previous
+	// parent. The moved dependency retains its weight.
+	for piter := parent; piter != nil; piter = piter.parent {
+		if piter == st {
+			parent.parent = st.parent
+			break
+		}
+	}
+	st.parent = parent
+	if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
+		for _, openStream := range streams {
+			if openStream != st && openStream.parent == st.parent {
+				openStream.parent = st
+			}
+		}
+	}
+}
+
+// resetPendingRequest zeros out all state related to a HEADERS frame
+// and its zero or more CONTINUATION frames sent to start a new
+// request.
+func (sc *serverConn) resetPendingRequest() {
+	sc.serveG.check()
+	sc.req = requestParam{}
+}
+
+func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, error) {
+	sc.serveG.check()
+	rp := &sc.req
+	if rp.invalidHeader || rp.method == "" || rp.path == "" ||
+		(rp.scheme != "https" && rp.scheme != "http") {
+		// See 8.1.2.6 Malformed Requests and Responses:
+		//
+		// Malformed requests or responses that are detected
+		// MUST be treated as a stream error (Section 5.4.2)
+		// of type PROTOCOL_ERROR."
+		//
+		// 8.1.2.3 Request Pseudo-Header Fields
+		// "All HTTP/2 requests MUST include exactly one valid
+		// value for the :method, :scheme, and :path
+		// pseudo-header fields"
+		return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}
+	}
+	var tlsState *tls.ConnectionState // nil if not scheme https
+	if rp.scheme == "https" {
+		tlsState = sc.tlsState
+	}
+	authority := rp.authority
+	if authority == "" {
+		authority = rp.header.Get("Host")
+	}
+	needsContinue := rp.header.Get("Expect") == "100-continue"
+	if needsContinue {
+		rp.header.Del("Expect")
+	}
+	bodyOpen := rp.stream.state == stateOpen
+	body := &requestBody{
+		conn:          sc,
+		stream:        rp.stream,
+		needsContinue: needsContinue,
+	}
+	// TODO: handle asterisk '*' requests + test
+	url, err := url.ParseRequestURI(rp.path)
+	if err != nil {
+		// TODO: find the right error code?
+		return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}
+	}
+	req := &http.Request{
+		Method:     rp.method,
+		URL:        url,
+		RemoteAddr: sc.remoteAddrStr,
+		Header:     rp.header,
+		RequestURI: rp.path,
+		Proto:      "HTTP/2.0",
+		ProtoMajor: 2,
+		ProtoMinor: 0,
+		TLS:        tlsState,
+		Host:       authority,
+		Body:       body,
+	}
+	if bodyOpen {
+		body.pipe = &pipe{
+			b: buffer{buf: make([]byte, initialWindowSize)}, // TODO: share/remove XXX
+		}
+		body.pipe.c.L = &body.pipe.m
+
+		if vv, ok := rp.header["Content-Length"]; ok {
+			req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+		} else {
+			req.ContentLength = -1
+		}
+	}
+
+	rws := responseWriterStatePool.Get().(*responseWriterState)
+	bwSave := rws.bw
+	*rws = responseWriterState{} // zero all the fields
+	rws.conn = sc
+	rws.bw = bwSave
+	rws.bw.Reset(chunkWriter{rws})
+	rws.stream = rp.stream
+	rws.req = req
+	rws.body = body
+	rws.frameWriteCh = make(chan error, 1)
+
+	rw := &responseWriter{rws: rws}
+	return rw, req, nil
+}
+
+// Run on its own goroutine.
+func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request) {
+	defer rw.handlerDone()
+	// TODO: catch panics like net/http.Server
+	sc.handler.ServeHTTP(rw, req)
+}
+
+// called from handler goroutines.
+// h may be nil.
+func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders, tempCh chan error) {
+	sc.serveG.checkNotOn() // NOT on
+	var errc chan error
+	if headerData.h != nil {
+		// If there's a header map (which we don't own), so we have to block on
+		// waiting for this frame to be written, so an http.Flush mid-handler
+		// writes out the correct value of keys, before a handler later potentially
+		// mutates it.
+		errc = tempCh
+	}
+	sc.writeFrameFromHandler(frameWriteMsg{
+		write:  headerData,
+		stream: st,
+		done:   errc,
+	})
+	if errc != nil {
+		select {
+		case <-errc:
+			// Ignore. Just for synchronization.
+			// Any error will be handled in the writing goroutine.
+		case <-sc.doneServing:
+			// Client has closed the connection.
+		}
+	}
+}
+
+// called from handler goroutines.
+func (sc *serverConn) write100ContinueHeaders(st *stream) {
+	sc.writeFrameFromHandler(frameWriteMsg{
+		write:  write100ContinueHeadersFrame{st.id},
+		stream: st,
+	})
+}
+
+// A bodyReadMsg tells the server loop that the http.Handler read n
+// bytes of the DATA from the client on the given stream.
+type bodyReadMsg struct {
+	st *stream
+	n  int
+}
+
+// called from handler goroutines.
+// Notes that the handler for the given stream ID read n bytes of its body
+// and schedules flow control tokens to be sent.
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
+	sc.serveG.checkNotOn() // NOT on
+	sc.bodyReadCh <- bodyReadMsg{st, n}
+}
+
+func (sc *serverConn) noteBodyRead(st *stream, n int) {
+	sc.serveG.check()
+	sc.sendWindowUpdate(nil, n) // conn-level
+	if st.state != stateHalfClosedRemote && st.state != stateClosed {
+		// Don't send this WINDOW_UPDATE if the stream is closed
+		// remotely.
+		sc.sendWindowUpdate(st, n)
+	}
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
+	sc.serveG.check()
+	// "The legal range for the increment to the flow control
+	// window is 1 to 2^31-1 (2,147,483,647) octets."
+	// A Go Read call on 64-bit machines could in theory read
+	// a larger Read than this. Very unlikely, but we handle it here
+	// rather than elsewhere for now.
+	const maxUint31 = 1<<31 - 1
+	for n >= maxUint31 {
+		sc.sendWindowUpdate32(st, maxUint31)
+		n -= maxUint31
+	}
+	sc.sendWindowUpdate32(st, int32(n))
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+	sc.serveG.check()
+	if n == 0 {
+		return
+	}
+	if n < 0 {
+		panic("negative update")
+	}
+	var streamID uint32
+	if st != nil {
+		streamID = st.id
+	}
+	sc.writeFrame(frameWriteMsg{
+		write:  writeWindowUpdate{streamID: streamID, n: uint32(n)},
+		stream: st,
+	})
+	var ok bool
+	if st == nil {
+		ok = sc.inflow.add(n)
+	} else {
+		ok = st.inflow.add(n)
+	}
+	if !ok {
+		panic("internal error; sent too many window updates without decrements?")
+	}
+}
+
+type requestBody struct {
+	stream        *stream
+	conn          *serverConn
+	closed        bool
+	pipe          *pipe // non-nil if we have a HTTP entity message body
+	needsContinue bool  // need to send a 100-continue
+}
+
+func (b *requestBody) Close() error {
+	if b.pipe != nil {
+		b.pipe.Close(errClosedBody)
+	}
+	b.closed = true
+	return nil
+}
+
+func (b *requestBody) Read(p []byte) (n int, err error) {
+	if b.needsContinue {
+		b.needsContinue = false
+		b.conn.write100ContinueHeaders(b.stream)
+	}
+	if b.pipe == nil {
+		return 0, io.EOF
+	}
+	n, err = b.pipe.Read(p)
+	if n > 0 {
+		b.conn.noteBodyReadFromHandler(b.stream, n)
+	}
+	return
+}
+
+// responseWriter is the http.ResponseWriter implementation.  It's
+// intentionally small (1 pointer wide) to minimize garbage.  The
+// responseWriterState pointer inside is zeroed at the end of a
+// request (in handlerDone) and calls on the responseWriter thereafter
+// simply crash (caller's mistake), but the much larger responseWriterState
+// and buffers are reused between multiple requests.
+type responseWriter struct {
+	rws *responseWriterState
+}
+
+// Optional http.ResponseWriter interfaces implemented.
+var (
+	_ http.CloseNotifier = (*responseWriter)(nil)
+	_ http.Flusher       = (*responseWriter)(nil)
+	_ stringWriter       = (*responseWriter)(nil)
+)
+
+type responseWriterState struct {
+	// immutable within a request:
+	stream *stream
+	req    *http.Request
+	body   *requestBody // to close at end of request, if DATA frames didn't
+	conn   *serverConn
+
+	// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
+	bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
+
+	// mutated by http.Handler goroutine:
+	handlerHeader http.Header // nil until called
+	snapHeader    http.Header // snapshot of handlerHeader at WriteHeader time
+	status        int         // status code passed to WriteHeader
+	wroteHeader   bool        // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
+	sentHeader    bool        // have we sent the header frame?
+	handlerDone   bool        // handler has finished
+	curWrite      writeData
+	frameWriteCh  chan error // re-used whenever we need to block on a frame being written
+
+	closeNotifierMu sync.Mutex // guards closeNotifierCh
+	closeNotifierCh chan bool  // nil until first used
+}
+
+type chunkWriter struct{ rws *responseWriterState }
+
+func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
+
+// writeChunk writes chunks from the bufio.Writer. But because
+// bufio.Writer may bypass its chunking, sometimes p may be
+// arbitrarily large.
+//
+// writeChunk is also responsible (on the first chunk) for sending the
+// HEADER response.
+func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
+	if !rws.wroteHeader {
+		rws.writeHeader(200)
+	}
+	if !rws.sentHeader {
+		rws.sentHeader = true
+		var ctype, clen string // implicit ones, if we can calculate it
+		if rws.handlerDone && rws.snapHeader.Get("Content-Length") == "" {
+			clen = strconv.Itoa(len(p))
+		}
+		if rws.snapHeader.Get("Content-Type") == "" {
+			ctype = http.DetectContentType(p)
+		}
+		endStream := rws.handlerDone && len(p) == 0
+		rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+			streamID:      rws.stream.id,
+			httpResCode:   rws.status,
+			h:             rws.snapHeader,
+			endStream:     endStream,
+			contentType:   ctype,
+			contentLength: clen,
+		}, rws.frameWriteCh)
+		if endStream {
+			return 0, nil
+		}
+	}
+	if len(p) == 0 && !rws.handlerDone {
+		return 0, nil
+	}
+	curWrite := &rws.curWrite
+	curWrite.streamID = rws.stream.id
+	curWrite.p = p
+	curWrite.endStream = rws.handlerDone
+	if err := rws.conn.writeDataFromHandler(rws.stream, curWrite, rws.frameWriteCh); err != nil {
+		return 0, err
+	}
+	return len(p), nil
+}
+
+func (w *responseWriter) Flush() {
+	rws := w.rws
+	if rws == nil {
+		panic("Header called after Handler finished")
+	}
+	if rws.bw.Buffered() > 0 {
+		if err := rws.bw.Flush(); err != nil {
+			// Ignore the error. The frame writer already knows.
+			return
+		}
+	} else {
+		// The bufio.Writer won't call chunkWriter.Write
+		// (writeChunk with zero bytes, so we have to do it
+		// ourselves to force the HTTP response header and/or
+		// final DATA frame (with END_STREAM) to be sent.
+		rws.writeChunk(nil)
+	}
+}
+
+func (w *responseWriter) CloseNotify() <-chan bool {
+	rws := w.rws
+	if rws == nil {
+		panic("CloseNotify called after Handler finished")
+	}
+	rws.closeNotifierMu.Lock()
+	ch := rws.closeNotifierCh
+	if ch == nil {
+		ch = make(chan bool, 1)
+		rws.closeNotifierCh = ch
+		go func() {
+			rws.stream.cw.Wait() // wait for close
+			ch <- true
+		}()
+	}
+	rws.closeNotifierMu.Unlock()
+	return ch
+}
+
+func (w *responseWriter) Header() http.Header {
+	rws := w.rws
+	if rws == nil {
+		panic("Header called after Handler finished")
+	}
+	if rws.handlerHeader == nil {
+		rws.handlerHeader = make(http.Header)
+	}
+	return rws.handlerHeader
+}
+
+func (w *responseWriter) WriteHeader(code int) {
+	rws := w.rws
+	if rws == nil {
+		panic("WriteHeader called after Handler finished")
+	}
+	rws.writeHeader(code)
+}
+
+func (rws *responseWriterState) writeHeader(code int) {
+	if !rws.wroteHeader {
+		rws.wroteHeader = true
+		rws.status = code
+		if len(rws.handlerHeader) > 0 {
+			rws.snapHeader = cloneHeader(rws.handlerHeader)
+		}
+	}
+}
+
+func cloneHeader(h http.Header) http.Header {
+	h2 := make(http.Header, len(h))
+	for k, vv := range h {
+		vv2 := make([]string, len(vv))
+		copy(vv2, vv)
+		h2[k] = vv2
+	}
+	return h2
+}
+
+// The Life Of A Write is like this:
+//
+// * Handler calls w.Write or w.WriteString ->
+// * -> rws.bw (*bufio.Writer) ->
+// * (Handler migth call Flush)
+// * -> chunkWriter{rws}
+// * -> responseWriterState.writeChunk(p []byte)
+// * -> responseWriterState.writeChunk (most of the magic; see comment there)
+func (w *responseWriter) Write(p []byte) (n int, err error) {
+	return w.write(len(p), p, "")
+}
+
+func (w *responseWriter) WriteString(s string) (n int, err error) {
+	return w.write(len(s), nil, s)
+}
+
+// either dataB or dataS is non-zero.
+func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+	rws := w.rws
+	if rws == nil {
+		panic("Write called after Handler finished")
+	}
+	if !rws.wroteHeader {
+		w.WriteHeader(200)
+	}
+	if dataB != nil {
+		return rws.bw.Write(dataB)
+	} else {
+		return rws.bw.WriteString(dataS)
+	}
+}
+
+func (w *responseWriter) handlerDone() {
+	rws := w.rws
+	if rws == nil {
+		panic("handlerDone called twice")
+	}
+	rws.handlerDone = true
+	w.Flush()
+	w.rws = nil
+	responseWriterStatePool.Put(rws)
+}

+ 2252 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/server_test.go

@@ -0,0 +1,2252 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+	"bytes"
+	"crypto/tls"
+	"errors"
+	"flag"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"net"
+	"net/http"
+	"net/http/httptest"
+	"os"
+	"reflect"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"testing"
+	"time"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/github.com/bradfitz/http2/hpack"
+)
+
+var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered")
+
+type serverTester struct {
+	cc        net.Conn // client conn
+	t         testing.TB
+	ts        *httptest.Server
+	fr        *Framer
+	logBuf    *bytes.Buffer
+	logFilter []string   // substrings to filter out
+	scMu      sync.Mutex // guards sc
+	sc        *serverConn
+
+	// writing headers:
+	headerBuf bytes.Buffer
+	hpackEnc  *hpack.Encoder
+
+	// reading frames:
+	frc       chan Frame
+	frErrc    chan error
+	readTimer *time.Timer
+}
+
+func init() {
+	testHookOnPanicMu = new(sync.Mutex)
+}
+
+func resetHooks() {
+	testHookOnPanicMu.Lock()
+	testHookOnPanic = nil
+	testHookOnPanicMu.Unlock()
+}
+
+type serverTesterOpt string
+
+var optOnlyServer = serverTesterOpt("only_server")
+
+func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester {
+	resetHooks()
+
+	logBuf := new(bytes.Buffer)
+	ts := httptest.NewUnstartedServer(handler)
+
+	tlsConfig := &tls.Config{
+		InsecureSkipVerify: true,
+		// The h2-14 is temporary, until curl is updated. (as used by unit tests
+		// in Docker)
+		NextProtos: []string{NextProtoTLS, "h2-14"},
+	}
+
+	onlyServer := false
+	for _, opt := range opts {
+		switch v := opt.(type) {
+		case func(*tls.Config):
+			v(tlsConfig)
+		case func(*httptest.Server):
+			v(ts)
+		case serverTesterOpt:
+			onlyServer = (v == optOnlyServer)
+		default:
+			t.Fatalf("unknown newServerTester option type %T", v)
+		}
+	}
+
+	ConfigureServer(ts.Config, &Server{})
+
+	st := &serverTester{
+		t:      t,
+		ts:     ts,
+		logBuf: logBuf,
+		frc:    make(chan Frame, 1),
+		frErrc: make(chan error, 1),
+	}
+	st.hpackEnc = hpack.NewEncoder(&st.headerBuf)
+
+	var stderrv io.Writer = ioutil.Discard
+	if *stderrVerbose {
+		stderrv = os.Stderr
+	}
+
+	ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
+	ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv, twriter{t: t, st: st}, logBuf), "", log.LstdFlags)
+	ts.StartTLS()
+
+	if VerboseLogs {
+		t.Logf("Running test server at: %s", ts.URL)
+	}
+	testHookGetServerConn = func(v *serverConn) {
+		st.scMu.Lock()
+		defer st.scMu.Unlock()
+		st.sc = v
+		st.sc.testHookCh = make(chan func())
+	}
+	log.SetOutput(io.MultiWriter(stderrv, twriter{t: t, st: st}))
+	if !onlyServer {
+		cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig)
+		if err != nil {
+			t.Fatal(err)
+		}
+		st.cc = cc
+		st.fr = NewFramer(cc, cc)
+	}
+
+	return st
+}
+
+func (st *serverTester) closeConn() {
+	st.scMu.Lock()
+	defer st.scMu.Unlock()
+	st.sc.conn.Close()
+}
+
+func (st *serverTester) addLogFilter(phrase string) {
+	st.logFilter = append(st.logFilter, phrase)
+}
+
+func (st *serverTester) stream(id uint32) *stream {
+	ch := make(chan *stream, 1)
+	st.sc.testHookCh <- func() {
+		ch <- st.sc.streams[id]
+	}
+	return <-ch
+}
+
+func (st *serverTester) streamState(id uint32) streamState {
+	ch := make(chan streamState, 1)
+	st.sc.testHookCh <- func() {
+		state, _ := st.sc.state(id)
+		ch <- state
+	}
+	return <-ch
+}
+
+func (st *serverTester) Close() {
+	st.ts.Close()
+	if st.cc != nil {
+		st.cc.Close()
+	}
+	log.SetOutput(os.Stderr)
+}
+
+// greet initiates the client's HTTP/2 connection into a state where
+// frames may be sent.
+func (st *serverTester) greet() {
+	st.writePreface()
+	st.writeInitialSettings()
+	st.wantSettings()
+	st.writeSettingsAck()
+	st.wantSettingsAck()
+}
+
+func (st *serverTester) writePreface() {
+	n, err := st.cc.Write(clientPreface)
+	if err != nil {
+		st.t.Fatalf("Error writing client preface: %v", err)
+	}
+	if n != len(clientPreface) {
+		st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface))
+	}
+}
+
+func (st *serverTester) writeInitialSettings() {
+	if err := st.fr.WriteSettings(); err != nil {
+		st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err)
+	}
+}
+
+func (st *serverTester) writeSettingsAck() {
+	if err := st.fr.WriteSettingsAck(); err != nil {
+		st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err)
+	}
+}
+
+func (st *serverTester) writeHeaders(p HeadersFrameParam) {
+	if err := st.fr.WriteHeaders(p); err != nil {
+		st.t.Fatalf("Error writing HEADERS: %v", err)
+	}
+}
+
+func (st *serverTester) encodeHeaderField(k, v string) {
+	err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
+	if err != nil {
+		st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
+	}
+}
+
+// encodeHeader encodes headers and returns their HPACK bytes. headers
+// must contain an even number of key/value pairs.  There may be
+// multiple pairs for keys (e.g. "cookie").  The :method, :path, and
+// :scheme headers default to GET, / and https.
+func (st *serverTester) encodeHeader(headers ...string) []byte {
+	if len(headers)%2 == 1 {
+		panic("odd number of kv args")
+	}
+
+	st.headerBuf.Reset()
+
+	if len(headers) == 0 {
+		// Fast path, mostly for benchmarks, so test code doesn't pollute
+		// profiles when we're looking to improve server allocations.
+		st.encodeHeaderField(":method", "GET")
+		st.encodeHeaderField(":path", "/")
+		st.encodeHeaderField(":scheme", "https")
+		return st.headerBuf.Bytes()
+	}
+
+	if len(headers) == 2 && headers[0] == ":method" {
+		// Another fast path for benchmarks.
+		st.encodeHeaderField(":method", headers[1])
+		st.encodeHeaderField(":path", "/")
+		st.encodeHeaderField(":scheme", "https")
+		return st.headerBuf.Bytes()
+	}
+
+	pseudoCount := map[string]int{}
+	keys := []string{":method", ":path", ":scheme"}
+	vals := map[string][]string{
+		":method": {"GET"},
+		":path":   {"/"},
+		":scheme": {"https"},
+	}
+	for len(headers) > 0 {
+		k, v := headers[0], headers[1]
+		headers = headers[2:]
+		if _, ok := vals[k]; !ok {
+			keys = append(keys, k)
+		}
+		if strings.HasPrefix(k, ":") {
+			pseudoCount[k]++
+			if pseudoCount[k] == 1 {
+				vals[k] = []string{v}
+			} else {
+				// Allows testing of invalid headers w/ dup pseudo fields.
+				vals[k] = append(vals[k], v)
+			}
+		} else {
+			vals[k] = append(vals[k], v)
+		}
+	}
+	st.headerBuf.Reset()
+	for _, k := range keys {
+		for _, v := range vals[k] {
+			st.encodeHeaderField(k, v)
+		}
+	}
+	return st.headerBuf.Bytes()
+}
+
+// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set.
+func (st *serverTester) bodylessReq1(headers ...string) {
+	st.writeHeaders(HeadersFrameParam{
+		StreamID:      1, // clients send odd numbers
+		BlockFragment: st.encodeHeader(headers...),
+		EndStream:     true,
+		EndHeaders:    true,
+	})
+}
+
+func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) {
+	if err := st.fr.WriteData(streamID, endStream, data); err != nil {
+		st.t.Fatalf("Error writing DATA: %v", err)
+	}
+}
+
+func (st *serverTester) readFrame() (Frame, error) {
+	go func() {
+		fr, err := st.fr.ReadFrame()
+		if err != nil {
+			st.frErrc <- err
+		} else {
+			st.frc <- fr
+		}
+	}()
+	t := st.readTimer
+	if t == nil {
+		t = time.NewTimer(2 * time.Second)
+		st.readTimer = t
+	}
+	t.Reset(2 * time.Second)
+	defer t.Stop()
+	select {
+	case f := <-st.frc:
+		return f, nil
+	case err := <-st.frErrc:
+		return nil, err
+	case <-t.C:
+		return nil, errors.New("timeout waiting for frame")
+	}
+}
+
+func (st *serverTester) wantHeaders() *HeadersFrame {
+	f, err := st.readFrame()
+	if err != nil {
+		st.t.Fatalf("Error while expecting a HEADERS frame: %v", err)
+	}
+	hf, ok := f.(*HeadersFrame)
+	if !ok {
+		st.t.Fatalf("got a %T; want *HeadersFrame", f)
+	}
+	return hf
+}
+
+func (st *serverTester) wantContinuation() *ContinuationFrame {
+	f, err := st.readFrame()
+	if err != nil {
+		st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err)
+	}
+	cf, ok := f.(*ContinuationFrame)
+	if !ok {
+		st.t.Fatalf("got a %T; want *ContinuationFrame", f)
+	}
+	return cf
+}
+
+func (st *serverTester) wantData() *DataFrame {
+	f, err := st.readFrame()
+	if err != nil {
+		st.t.Fatalf("Error while expecting a DATA frame: %v", err)
+	}
+	df, ok := f.(*DataFrame)
+	if !ok {
+		st.t.Fatalf("got a %T; want *DataFrame", f)
+	}
+	return df
+}
+
+func (st *serverTester) wantSettings() *SettingsFrame {
+	f, err := st.readFrame()
+	if err != nil {
+		st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err)
+	}
+	sf, ok := f.(*SettingsFrame)
+	if !ok {
+		st.t.Fatalf("got a %T; want *SettingsFrame", f)
+	}
+	return sf
+}
+
+func (st *serverTester) wantPing() *PingFrame {
+	f, err := st.readFrame()
+	if err != nil {
+		st.t.Fatalf("Error while expecting a PING frame: %v", err)
+	}
+	pf, ok := f.(*PingFrame)
+	if !ok {
+		st.t.Fatalf("got a %T; want *PingFrame", f)
+	}
+	return pf
+}
+
+func (st *serverTester) wantGoAway() *GoAwayFrame {
+	f, err := st.readFrame()
+	if err != nil {
+		st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err)
+	}
+	gf, ok := f.(*GoAwayFrame)
+	if !ok {
+		st.t.Fatalf("got a %T; want *GoAwayFrame", f)
+	}
+	return gf
+}
+
+func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) {
+	f, err := st.readFrame()
+	if err != nil {
+		st.t.Fatalf("Error while expecting an RSTStream frame: %v", err)
+	}
+	rs, ok := f.(*RSTStreamFrame)
+	if !ok {
+		st.t.Fatalf("got a %T; want *RSTStreamFrame", f)
+	}
+	if rs.FrameHeader.StreamID != streamID {
+		st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID)
+	}
+	if rs.ErrCode != errCode {
+		st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode)
+	}
+}
+
+func (st *serverTester) wantWindowUpdate(streamID, incr uint32) {
+	f, err := st.readFrame()
+	if err != nil {
+		st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err)
+	}
+	wu, ok := f.(*WindowUpdateFrame)
+	if !ok {
+		st.t.Fatalf("got a %T; want *WindowUpdateFrame", f)
+	}
+	if wu.FrameHeader.StreamID != streamID {
+		st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID)
+	}
+	if wu.Increment != incr {
+		st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr)
+	}
+}
+
+func (st *serverTester) wantSettingsAck() {
+	f, err := st.readFrame()
+	if err != nil {
+		st.t.Fatal(err)
+	}
+	sf, ok := f.(*SettingsFrame)
+	if !ok {
+		st.t.Fatalf("Wanting a settings ACK, received a %T", f)
+	}
+	if !sf.Header().Flags.Has(FlagSettingsAck) {
+		st.t.Fatal("Settings Frame didn't have ACK set")
+	}
+
+}
+
+func TestServer(t *testing.T) {
+	gotReq := make(chan bool, 1)
+	st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Foo", "Bar")
+		gotReq <- true
+	})
+	defer st.Close()
+
+	covers("3.5", `
+		The server connection preface consists of a potentially empty
+		SETTINGS frame ([SETTINGS]) that MUST be the first frame the
+		server sends in the HTTP/2 connection.
+	`)
+
+	st.writePreface()
+	st.writeInitialSettings()
+	st.wantSettings()
+	st.writeSettingsAck()
+	st.wantSettingsAck()
+
+	st.writeHeaders(HeadersFrameParam{
+		StreamID:      1, // clients send odd numbers
+		BlockFragment: st.encodeHeader(),
+		EndStream:     true, // no DATA frames
+		EndHeaders:    true,
+	})
+
+	select {
+	case <-gotReq:
+	case <-time.After(2 * time.Second):
+		t.Error("timeout waiting for request")
+	}
+}
+
+func TestServer_Request_Get(t *testing.T) {
+	testServerRequest(t, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1, // clients send odd numbers
+			BlockFragment: st.encodeHeader("foo-bar", "some-value"),
+			EndStream:     true, // no DATA frames
+			EndHeaders:    true,
+		})
+	}, func(r *http.Request) {
+		if r.Method != "GET" {
+			t.Errorf("Method = %q; want GET", r.Method)
+		}
+		if r.URL.Path != "/" {
+			t.Errorf("URL.Path = %q; want /", r.URL.Path)
+		}
+		if r.ContentLength != 0 {
+			t.Errorf("ContentLength = %v; want 0", r.ContentLength)
+		}
+		if r.Close {
+			t.Error("Close = true; want false")
+		}
+		if !strings.Contains(r.RemoteAddr, ":") {
+			t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr)
+		}
+		if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 {
+			t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor)
+		}
+		wantHeader := http.Header{
+			"Foo-Bar": []string{"some-value"},
+		}
+		if !reflect.DeepEqual(r.Header, wantHeader) {
+			t.Errorf("Header = %#v; want %#v", r.Header, wantHeader)
+		}
+		if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 {
+			t.Errorf("Read = %d, %v; want 0, EOF", n, err)
+		}
+	})
+}
+
+func TestServer_Request_Get_PathSlashes(t *testing.T) {
+	testServerRequest(t, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1, // clients send odd numbers
+			BlockFragment: st.encodeHeader(":path", "/%2f/"),
+			EndStream:     true, // no DATA frames
+			EndHeaders:    true,
+		})
+	}, func(r *http.Request) {
+		if r.RequestURI != "/%2f/" {
+			t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI)
+		}
+		if r.URL.Path != "///" {
+			t.Errorf("URL.Path = %q; want ///", r.URL.Path)
+		}
+	})
+}
+
+// TODO: add a test with EndStream=true on the HEADERS but setting a
+// Content-Length anyway.  Should we just omit it and force it to
+// zero?
+
+func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) {
+	testServerRequest(t, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1, // clients send odd numbers
+			BlockFragment: st.encodeHeader(":method", "POST"),
+			EndStream:     true,
+			EndHeaders:    true,
+		})
+	}, func(r *http.Request) {
+		if r.Method != "POST" {
+			t.Errorf("Method = %q; want POST", r.Method)
+		}
+		if r.ContentLength != 0 {
+			t.Errorf("ContentLength = %v; want 0", r.ContentLength)
+		}
+		if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 {
+			t.Errorf("Read = %d, %v; want 0, EOF", n, err)
+		}
+	})
+}
+
+func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) {
+	testBodyContents(t, -1, "", func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1, // clients send odd numbers
+			BlockFragment: st.encodeHeader(":method", "POST"),
+			EndStream:     false, // to say DATA frames are coming
+			EndHeaders:    true,
+		})
+		st.writeData(1, true, nil) // just kidding. empty body.
+	})
+}
+
+func TestServer_Request_Post_Body_OneData(t *testing.T) {
+	const content = "Some content"
+	testBodyContents(t, -1, content, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1, // clients send odd numbers
+			BlockFragment: st.encodeHeader(":method", "POST"),
+			EndStream:     false, // to say DATA frames are coming
+			EndHeaders:    true,
+		})
+		st.writeData(1, true, []byte(content))
+	})
+}
+
+func TestServer_Request_Post_Body_TwoData(t *testing.T) {
+	const content = "Some content"
+	testBodyContents(t, -1, content, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1, // clients send odd numbers
+			BlockFragment: st.encodeHeader(":method", "POST"),
+			EndStream:     false, // to say DATA frames are coming
+			EndHeaders:    true,
+		})
+		st.writeData(1, false, []byte(content[:5]))
+		st.writeData(1, true, []byte(content[5:]))
+	})
+}
+
+func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) {
+	const content = "Some content"
+	testBodyContents(t, int64(len(content)), content, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID: 1, // clients send odd numbers
+			BlockFragment: st.encodeHeader(
+				":method", "POST",
+				"content-length", strconv.Itoa(len(content)),
+			),
+			EndStream:  false, // to say DATA frames are coming
+			EndHeaders: true,
+		})
+		st.writeData(1, true, []byte(content))
+	})
+}
+
+func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) {
+	testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes",
+		func(st *serverTester) {
+			st.writeHeaders(HeadersFrameParam{
+				StreamID: 1, // clients send odd numbers
+				BlockFragment: st.encodeHeader(
+					":method", "POST",
+					"content-length", "3",
+				),
+				EndStream:  false, // to say DATA frames are coming
+				EndHeaders: true,
+			})
+			st.writeData(1, true, []byte("12"))
+		})
+}
+
+func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) {
+	testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes",
+		func(st *serverTester) {
+			st.writeHeaders(HeadersFrameParam{
+				StreamID: 1, // clients send odd numbers
+				BlockFragment: st.encodeHeader(
+					":method", "POST",
+					"content-length", "4",
+				),
+				EndStream:  false, // to say DATA frames are coming
+				EndHeaders: true,
+			})
+			st.writeData(1, true, []byte("12345"))
+		})
+}
+
+func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) {
+	testServerRequest(t, write, func(r *http.Request) {
+		if r.Method != "POST" {
+			t.Errorf("Method = %q; want POST", r.Method)
+		}
+		if r.ContentLength != wantContentLength {
+			t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
+		}
+		all, err := ioutil.ReadAll(r.Body)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if string(all) != wantBody {
+			t.Errorf("Read = %q; want %q", all, wantBody)
+		}
+		if err := r.Body.Close(); err != nil {
+			t.Fatalf("Close: %v", err)
+		}
+	})
+}
+
+func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) {
+	testServerRequest(t, write, func(r *http.Request) {
+		if r.Method != "POST" {
+			t.Errorf("Method = %q; want POST", r.Method)
+		}
+		if r.ContentLength != wantContentLength {
+			t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
+		}
+		all, err := ioutil.ReadAll(r.Body)
+		if err == nil {
+			t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.",
+				wantReadError, all)
+		}
+		if !strings.Contains(err.Error(), wantReadError) {
+			t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError)
+		}
+		if err := r.Body.Close(); err != nil {
+			t.Fatalf("Close: %v", err)
+		}
+	})
+}
+
+// Using a Host header, instead of :authority
+func TestServer_Request_Get_Host(t *testing.T) {
+	const host = "example.com"
+	testServerRequest(t, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1, // clients send odd numbers
+			BlockFragment: st.encodeHeader("host", host),
+			EndStream:     true,
+			EndHeaders:    true,
+		})
+	}, func(r *http.Request) {
+		if r.Host != host {
+			t.Errorf("Host = %q; want %q", r.Host, host)
+		}
+	})
+}
+
+// Using an :authority pseudo-header, instead of Host
+func TestServer_Request_Get_Authority(t *testing.T) {
+	const host = "example.com"
+	testServerRequest(t, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1, // clients send odd numbers
+			BlockFragment: st.encodeHeader(":authority", host),
+			EndStream:     true,
+			EndHeaders:    true,
+		})
+	}, func(r *http.Request) {
+		if r.Host != host {
+			t.Errorf("Host = %q; want %q", r.Host, host)
+		}
+	})
+}
+
+func TestServer_Request_WithContinuation(t *testing.T) {
+	wantHeader := http.Header{
+		"Foo-One":   []string{"value-one"},
+		"Foo-Two":   []string{"value-two"},
+		"Foo-Three": []string{"value-three"},
+	}
+	testServerRequest(t, func(st *serverTester) {
+		fullHeaders := st.encodeHeader(
+			"foo-one", "value-one",
+			"foo-two", "value-two",
+			"foo-three", "value-three",
+		)
+		remain := fullHeaders
+		chunks := 0
+		for len(remain) > 0 {
+			const maxChunkSize = 5
+			chunk := remain
+			if len(chunk) > maxChunkSize {
+				chunk = chunk[:maxChunkSize]
+			}
+			remain = remain[len(chunk):]
+
+			if chunks == 0 {
+				st.writeHeaders(HeadersFrameParam{
+					StreamID:      1, // clients send odd numbers
+					BlockFragment: chunk,
+					EndStream:     true,  // no DATA frames
+					EndHeaders:    false, // we'll have continuation frames
+				})
+			} else {
+				err := st.fr.WriteContinuation(1, len(remain) == 0, chunk)
+				if err != nil {
+					t.Fatal(err)
+				}
+			}
+			chunks++
+		}
+		if chunks < 2 {
+			t.Fatal("too few chunks")
+		}
+	}, func(r *http.Request) {
+		if !reflect.DeepEqual(r.Header, wantHeader) {
+			t.Errorf("Header = %#v; want %#v", r.Header, wantHeader)
+		}
+	})
+}
+
+// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field")
+func TestServer_Request_CookieConcat(t *testing.T) {
+	const host = "example.com"
+	testServerRequest(t, func(st *serverTester) {
+		st.bodylessReq1(
+			":authority", host,
+			"cookie", "a=b",
+			"cookie", "c=d",
+			"cookie", "e=f",
+		)
+	}, func(r *http.Request) {
+		const want = "a=b; c=d; e=f"
+		if got := r.Header.Get("Cookie"); got != want {
+			t.Errorf("Cookie = %q; want %q", got, want)
+		}
+	})
+}
+
+func TestServer_Request_Reject_CapitalHeader(t *testing.T) {
+	testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") })
+}
+
+func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) {
+	testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") })
+}
+
+func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) {
+	// 8.1.2.3 Request Pseudo-Header Fields
+	// "All HTTP/2 requests MUST include exactly one valid value" ...
+	testRejectRequest(t, func(st *serverTester) {
+		st.addLogFilter("duplicate pseudo-header")
+		st.bodylessReq1(":method", "GET", ":method", "POST")
+	})
+}
+
+func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) {
+	// 8.1.2.3 Request Pseudo-Header Fields
+	// "All pseudo-header fields MUST appear in the header block
+	// before regular header fields. Any request or response that
+	// contains a pseudo-header field that appears in a header
+	// block after a regular header field MUST be treated as
+	// malformed (Section 8.1.2.6)."
+	testRejectRequest(t, func(st *serverTester) {
+		st.addLogFilter("pseudo-header after regular header")
+		var buf bytes.Buffer
+		enc := hpack.NewEncoder(&buf)
+		enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"})
+		enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"})
+		enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"})
+		enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"})
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1, // clients send odd numbers
+			BlockFragment: buf.Bytes(),
+			EndStream:     true,
+			EndHeaders:    true,
+		})
+	})
+}
+
+func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) {
+	testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") })
+}
+
+func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) {
+	testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") })
+}
+
+func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) {
+	testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") })
+}
+
+func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) {
+	testRejectRequest(t, func(st *serverTester) {
+		st.addLogFilter(`invalid pseudo-header ":unknown_thing"`)
+		st.bodylessReq1(":unknown_thing", "")
+	})
+}
+
+func testRejectRequest(t *testing.T, send func(*serverTester)) {
+	st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+		t.Fatal("server request made it to handler; should've been rejected")
+	})
+	defer st.Close()
+
+	st.greet()
+	send(st)
+	st.wantRSTStream(1, ErrCodeProtocol)
+}
+
+func TestServer_Ping(t *testing.T) {
+	st := newServerTester(t, nil)
+	defer st.Close()
+	st.greet()
+
+	// Server should ignore this one, since it has ACK set.
+	ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
+	if err := st.fr.WritePing(true, ackPingData); err != nil {
+		t.Fatal(err)
+	}
+
+	// But the server should reply to this one, since ACK is false.
+	pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
+	if err := st.fr.WritePing(false, pingData); err != nil {
+		t.Fatal(err)
+	}
+
+	pf := st.wantPing()
+	if !pf.Flags.Has(FlagPingAck) {
+		t.Error("response ping doesn't have ACK set")
+	}
+	if pf.Data != pingData {
+		t.Errorf("response ping has data %q; want %q", pf.Data, pingData)
+	}
+}
+
+func TestServer_RejectsLargeFrames(t *testing.T) {
+	st := newServerTester(t, nil)
+	defer st.Close()
+	st.greet()
+
+	// Write too large of a frame (too large by one byte)
+	// We ignore the return value because it's expected that the server
+	// will only read the first 9 bytes (the headre) and then disconnect.
+	st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1))
+
+	gf := st.wantGoAway()
+	if gf.ErrCode != ErrCodeFrameSize {
+		t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize)
+	}
+}
+
+func TestServer_Handler_Sends_WindowUpdate(t *testing.T) {
+	puppet := newHandlerPuppet()
+	st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+		puppet.act(w, r)
+	})
+	defer st.Close()
+	defer puppet.done()
+
+	st.greet()
+
+	st.writeHeaders(HeadersFrameParam{
+		StreamID:      1, // clients send odd numbers
+		BlockFragment: st.encodeHeader(":method", "POST"),
+		EndStream:     false, // data coming
+		EndHeaders:    true,
+	})
+	st.writeData(1, false, []byte("abcdef"))
+	puppet.do(readBodyHandler(t, "abc"))
+	st.wantWindowUpdate(0, 3)
+	st.wantWindowUpdate(1, 3)
+
+	puppet.do(readBodyHandler(t, "def"))
+	st.wantWindowUpdate(0, 3)
+	st.wantWindowUpdate(1, 3)
+
+	st.writeData(1, true, []byte("ghijkl")) // END_STREAM here
+	puppet.do(readBodyHandler(t, "ghi"))
+	puppet.do(readBodyHandler(t, "jkl"))
+	st.wantWindowUpdate(0, 3)
+	st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM
+}
+
+func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) {
+	st := newServerTester(t, nil)
+	defer st.Close()
+	st.greet()
+	if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil {
+		t.Fatal(err)
+	}
+	gf := st.wantGoAway()
+	if gf.ErrCode != ErrCodeFlowControl {
+		t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl)
+	}
+	if gf.LastStreamID != 0 {
+		t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0)
+	}
+}
+
+func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) {
+	inHandler := make(chan bool)
+	blockHandler := make(chan bool)
+	st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+		inHandler <- true
+		<-blockHandler
+	})
+	defer st.Close()
+	defer close(blockHandler)
+	st.greet()
+	st.writeHeaders(HeadersFrameParam{
+		StreamID:      1,
+		BlockFragment: st.encodeHeader(":method", "POST"),
+		EndStream:     false, // keep it open
+		EndHeaders:    true,
+	})
+	<-inHandler
+	// Send a bogus window update:
+	if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil {
+		t.Fatal(err)
+	}
+	st.wantRSTStream(1, ErrCodeFlowControl)
+}
+
+// testServerPostUnblock sends a hanging POST with unsent data to handler,
+// then runs fn once in the handler, and verifies that the error returned from
+// handler is acceptable. It fails if takes over 5 seconds for handler to exit.
+func testServerPostUnblock(t *testing.T,
+	handler func(http.ResponseWriter, *http.Request) error,
+	fn func(*serverTester),
+	checkErr func(error),
+	otherHeaders ...string) {
+	inHandler := make(chan bool)
+	errc := make(chan error, 1)
+	st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+		inHandler <- true
+		errc <- handler(w, r)
+	})
+	st.greet()
+	st.writeHeaders(HeadersFrameParam{
+		StreamID:      1,
+		BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...),
+		EndStream:     false, // keep it open
+		EndHeaders:    true,
+	})
+	<-inHandler
+	fn(st)
+	select {
+	case err := <-errc:
+		if checkErr != nil {
+			checkErr(err)
+		}
+	case <-time.After(5 * time.Second):
+		t.Fatal("timeout waiting for Handler to return")
+	}
+	st.Close()
+}
+
+func TestServer_RSTStream_Unblocks_Read(t *testing.T) {
+	testServerPostUnblock(t,
+		func(w http.ResponseWriter, r *http.Request) (err error) {
+			_, err = r.Body.Read(make([]byte, 1))
+			return
+		},
+		func(st *serverTester) {
+			if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
+				t.Fatal(err)
+			}
+		},
+		func(err error) {
+			if err == nil {
+				t.Error("unexpected nil error from Request.Body.Read")
+			}
+		},
+	)
+}
+
+func TestServer_DeadConn_Unblocks_Read(t *testing.T) {
+	testServerPostUnblock(t,
+		func(w http.ResponseWriter, r *http.Request) (err error) {
+			_, err = r.Body.Read(make([]byte, 1))
+			return
+		},
+		func(st *serverTester) { st.cc.Close() },
+		func(err error) {
+			if err == nil {
+				t.Error("unexpected nil error from Request.Body.Read")
+			}
+		},
+	)
+}
+
+var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error {
+	<-w.(http.CloseNotifier).CloseNotify()
+	return nil
+}
+
+func TestServer_CloseNotify_After_RSTStream(t *testing.T) {
+	testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {
+		if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
+			t.Fatal(err)
+		}
+	}, nil)
+}
+
+func TestServer_CloseNotify_After_ConnClose(t *testing.T) {
+	testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil)
+}
+
+// that CloseNotify unblocks after a stream error due to the client's
+// problem that's unrelated to them explicitly canceling it (which is
+// TestServer_CloseNotify_After_RSTStream above)
+func TestServer_CloseNotify_After_StreamError(t *testing.T) {
+	testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {
+		// data longer than declared Content-Length => stream error
+		st.writeData(1, true, []byte("1234"))
+	}, nil, "content-length", "3")
+}
+
+func TestServer_StateTransitions(t *testing.T) {
+	var st *serverTester
+	inHandler := make(chan bool)
+	writeData := make(chan bool)
+	leaveHandler := make(chan bool)
+	st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+		inHandler <- true
+		if st.stream(1) == nil {
+			t.Errorf("nil stream 1 in handler")
+		}
+		if got, want := st.streamState(1), stateOpen; got != want {
+			t.Errorf("in handler, state is %v; want %v", got, want)
+		}
+		writeData <- true
+		if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF {
+			t.Errorf("body read = %d, %v; want 0, EOF", n, err)
+		}
+		if got, want := st.streamState(1), stateHalfClosedRemote; got != want {
+			t.Errorf("in handler, state is %v; want %v", got, want)
+		}
+
+		<-leaveHandler
+	})
+	st.greet()
+	if st.stream(1) != nil {
+		t.Fatal("stream 1 should be empty")
+	}
+	if got := st.streamState(1); got != stateIdle {
+		t.Fatalf("stream 1 should be idle; got %v", got)
+	}
+
+	st.writeHeaders(HeadersFrameParam{
+		StreamID:      1,
+		BlockFragment: st.encodeHeader(":method", "POST"),
+		EndStream:     false, // keep it open
+		EndHeaders:    true,
+	})
+	<-inHandler
+	<-writeData
+	st.writeData(1, true, nil)
+
+	leaveHandler <- true
+	hf := st.wantHeaders()
+	if !hf.StreamEnded() {
+		t.Fatal("expected END_STREAM flag")
+	}
+
+	if got, want := st.streamState(1), stateClosed; got != want {
+		t.Errorf("at end, state is %v; want %v", got, want)
+	}
+	if st.stream(1) != nil {
+		t.Fatal("at end, stream 1 should be gone")
+	}
+}
+
+// test HEADERS w/o EndHeaders + another HEADERS (should get rejected)
+func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) {
+	testServerRejects(t, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1,
+			BlockFragment: st.encodeHeader(),
+			EndStream:     true,
+			EndHeaders:    false,
+		})
+		st.writeHeaders(HeadersFrameParam{ // Not a continuation.
+			StreamID:      3, // different stream.
+			BlockFragment: st.encodeHeader(),
+			EndStream:     true,
+			EndHeaders:    true,
+		})
+	})
+}
+
+// test HEADERS w/o EndHeaders + PING (should get rejected)
+func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) {
+	testServerRejects(t, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1,
+			BlockFragment: st.encodeHeader(),
+			EndStream:     true,
+			EndHeaders:    false,
+		})
+		if err := st.fr.WritePing(false, [8]byte{}); err != nil {
+			t.Fatal(err)
+		}
+	})
+}
+
+// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected)
+func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) {
+	testServerRejects(t, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1,
+			BlockFragment: st.encodeHeader(),
+			EndStream:     true,
+			EndHeaders:    true,
+		})
+		st.wantHeaders()
+		if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
+			t.Fatal(err)
+		}
+	})
+}
+
+// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID
+func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) {
+	testServerRejects(t, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1,
+			BlockFragment: st.encodeHeader(),
+			EndStream:     true,
+			EndHeaders:    false,
+		})
+		if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
+			t.Fatal(err)
+		}
+	})
+}
+
+// No HEADERS on stream 0.
+func TestServer_Rejects_Headers0(t *testing.T) {
+	testServerRejects(t, func(st *serverTester) {
+		st.fr.AllowIllegalWrites = true
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      0,
+			BlockFragment: st.encodeHeader(),
+			EndStream:     true,
+			EndHeaders:    true,
+		})
+	})
+}
+
+// No CONTINUATION on stream 0.
+func TestServer_Rejects_Continuation0(t *testing.T) {
+	testServerRejects(t, func(st *serverTester) {
+		st.fr.AllowIllegalWrites = true
+		if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil {
+			t.Fatal(err)
+		}
+	})
+}
+
+func TestServer_Rejects_PushPromise(t *testing.T) {
+	testServerRejects(t, func(st *serverTester) {
+		pp := PushPromiseParam{
+			StreamID:  1,
+			PromiseID: 3,
+		}
+		if err := st.fr.WritePushPromise(pp); err != nil {
+			t.Fatal(err)
+		}
+	})
+}
+
+// testServerRejects tests that the server hangs up with a GOAWAY
+// frame and a server close after the client does something
+// deserving a CONNECTION_ERROR.
+func testServerRejects(t *testing.T, writeReq func(*serverTester)) {
+	st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
+	st.addLogFilter("connection error: PROTOCOL_ERROR")
+	defer st.Close()
+	st.greet()
+	writeReq(st)
+
+	st.wantGoAway()
+	errc := make(chan error, 1)
+	go func() {
+		fr, err := st.fr.ReadFrame()
+		if err == nil {
+			err = fmt.Errorf("got frame of type %T", fr)
+		}
+		errc <- err
+	}()
+	select {
+	case err := <-errc:
+		if err != io.EOF {
+			t.Errorf("ReadFrame = %v; want io.EOF", err)
+		}
+	case <-time.After(2 * time.Second):
+		t.Error("timeout waiting for disconnect")
+	}
+}
+
+// testServerRequest sets up an idle HTTP/2 connection and lets you
+// write a single request with writeReq, and then verify that the
+// *http.Request is built correctly in checkReq.
+func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) {
+	gotReq := make(chan bool, 1)
+	st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+		if r.Body == nil {
+			t.Fatal("nil Body")
+		}
+		checkReq(r)
+		gotReq <- true
+	})
+	defer st.Close()
+
+	st.greet()
+	writeReq(st)
+
+	select {
+	case <-gotReq:
+	case <-time.After(2 * time.Second):
+		t.Error("timeout waiting for request")
+	}
+}
+
+func getSlash(st *serverTester) { st.bodylessReq1() }
+
+func TestServer_Response_NoData(t *testing.T) {
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		// Nothing.
+		return nil
+	}, func(st *serverTester) {
+		getSlash(st)
+		hf := st.wantHeaders()
+		if !hf.StreamEnded() {
+			t.Fatal("want END_STREAM flag")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+	})
+}
+
+func TestServer_Response_NoData_Header_FooBar(t *testing.T) {
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		w.Header().Set("Foo-Bar", "some-value")
+		return nil
+	}, func(st *serverTester) {
+		getSlash(st)
+		hf := st.wantHeaders()
+		if !hf.StreamEnded() {
+			t.Fatal("want END_STREAM flag")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+		goth := decodeHeader(t, hf.HeaderBlockFragment())
+		wanth := [][2]string{
+			{":status", "200"},
+			{"foo-bar", "some-value"},
+			{"content-type", "text/plain; charset=utf-8"},
+			{"content-length", "0"},
+		}
+		if !reflect.DeepEqual(goth, wanth) {
+			t.Errorf("Got headers %v; want %v", goth, wanth)
+		}
+	})
+}
+
+func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) {
+	const msg = "<html>this is HTML."
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		w.Header().Set("Content-Type", "foo/bar")
+		io.WriteString(w, msg)
+		return nil
+	}, func(st *serverTester) {
+		getSlash(st)
+		hf := st.wantHeaders()
+		if hf.StreamEnded() {
+			t.Fatal("don't want END_STREAM, expecting data")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+		goth := decodeHeader(t, hf.HeaderBlockFragment())
+		wanth := [][2]string{
+			{":status", "200"},
+			{"content-type", "foo/bar"},
+			{"content-length", strconv.Itoa(len(msg))},
+		}
+		if !reflect.DeepEqual(goth, wanth) {
+			t.Errorf("Got headers %v; want %v", goth, wanth)
+		}
+		df := st.wantData()
+		if !df.StreamEnded() {
+			t.Error("expected DATA to have END_STREAM flag")
+		}
+		if got := string(df.Data()); got != msg {
+			t.Errorf("got DATA %q; want %q", got, msg)
+		}
+	})
+}
+
+func TestServer_Response_TransferEncoding_chunked(t *testing.T) {
+	const msg = "hi"
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		w.Header().Set("Transfer-Encoding", "chunked") // should be stripped
+		io.WriteString(w, msg)
+		return nil
+	}, func(st *serverTester) {
+		getSlash(st)
+		hf := st.wantHeaders()
+		goth := decodeHeader(t, hf.HeaderBlockFragment())
+		wanth := [][2]string{
+			{":status", "200"},
+			{"content-type", "text/plain; charset=utf-8"},
+			{"content-length", strconv.Itoa(len(msg))},
+		}
+		if !reflect.DeepEqual(goth, wanth) {
+			t.Errorf("Got headers %v; want %v", goth, wanth)
+		}
+	})
+}
+
+// Header accessed only after the initial write.
+func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) {
+	const msg = "<html>this is HTML."
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		io.WriteString(w, msg)
+		w.Header().Set("foo", "should be ignored")
+		return nil
+	}, func(st *serverTester) {
+		getSlash(st)
+		hf := st.wantHeaders()
+		if hf.StreamEnded() {
+			t.Fatal("unexpected END_STREAM")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+		goth := decodeHeader(t, hf.HeaderBlockFragment())
+		wanth := [][2]string{
+			{":status", "200"},
+			{"content-type", "text/html; charset=utf-8"},
+			{"content-length", strconv.Itoa(len(msg))},
+		}
+		if !reflect.DeepEqual(goth, wanth) {
+			t.Errorf("Got headers %v; want %v", goth, wanth)
+		}
+	})
+}
+
+// Header accessed before the initial write and later mutated.
+func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) {
+	const msg = "<html>this is HTML."
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		w.Header().Set("foo", "proper value")
+		io.WriteString(w, msg)
+		w.Header().Set("foo", "should be ignored")
+		return nil
+	}, func(st *serverTester) {
+		getSlash(st)
+		hf := st.wantHeaders()
+		if hf.StreamEnded() {
+			t.Fatal("unexpected END_STREAM")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+		goth := decodeHeader(t, hf.HeaderBlockFragment())
+		wanth := [][2]string{
+			{":status", "200"},
+			{"foo", "proper value"},
+			{"content-type", "text/html; charset=utf-8"},
+			{"content-length", strconv.Itoa(len(msg))},
+		}
+		if !reflect.DeepEqual(goth, wanth) {
+			t.Errorf("Got headers %v; want %v", goth, wanth)
+		}
+	})
+}
+
+func TestServer_Response_Data_SniffLenType(t *testing.T) {
+	const msg = "<html>this is HTML."
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		io.WriteString(w, msg)
+		return nil
+	}, func(st *serverTester) {
+		getSlash(st)
+		hf := st.wantHeaders()
+		if hf.StreamEnded() {
+			t.Fatal("don't want END_STREAM, expecting data")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+		goth := decodeHeader(t, hf.HeaderBlockFragment())
+		wanth := [][2]string{
+			{":status", "200"},
+			{"content-type", "text/html; charset=utf-8"},
+			{"content-length", strconv.Itoa(len(msg))},
+		}
+		if !reflect.DeepEqual(goth, wanth) {
+			t.Errorf("Got headers %v; want %v", goth, wanth)
+		}
+		df := st.wantData()
+		if !df.StreamEnded() {
+			t.Error("expected DATA to have END_STREAM flag")
+		}
+		if got := string(df.Data()); got != msg {
+			t.Errorf("got DATA %q; want %q", got, msg)
+		}
+	})
+}
+
+func TestServer_Response_Header_Flush_MidWrite(t *testing.T) {
+	const msg = "<html>this is HTML"
+	const msg2 = ", and this is the next chunk"
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		io.WriteString(w, msg)
+		w.(http.Flusher).Flush()
+		io.WriteString(w, msg2)
+		return nil
+	}, func(st *serverTester) {
+		getSlash(st)
+		hf := st.wantHeaders()
+		if hf.StreamEnded() {
+			t.Fatal("unexpected END_STREAM flag")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+		goth := decodeHeader(t, hf.HeaderBlockFragment())
+		wanth := [][2]string{
+			{":status", "200"},
+			{"content-type", "text/html; charset=utf-8"}, // sniffed
+			// and no content-length
+		}
+		if !reflect.DeepEqual(goth, wanth) {
+			t.Errorf("Got headers %v; want %v", goth, wanth)
+		}
+		{
+			df := st.wantData()
+			if df.StreamEnded() {
+				t.Error("unexpected END_STREAM flag")
+			}
+			if got := string(df.Data()); got != msg {
+				t.Errorf("got DATA %q; want %q", got, msg)
+			}
+		}
+		{
+			df := st.wantData()
+			if !df.StreamEnded() {
+				t.Error("wanted END_STREAM flag on last data chunk")
+			}
+			if got := string(df.Data()); got != msg2 {
+				t.Errorf("got DATA %q; want %q", got, msg2)
+			}
+		}
+	})
+}
+
+func TestServer_Response_LargeWrite(t *testing.T) {
+	const size = 1 << 20
+	const maxFrameSize = 16 << 10
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		n, err := w.Write(bytes.Repeat([]byte("a"), size))
+		if err != nil {
+			return fmt.Errorf("Write error: %v", err)
+		}
+		if n != size {
+			return fmt.Errorf("wrong size %d from Write", n)
+		}
+		return nil
+	}, func(st *serverTester) {
+		if err := st.fr.WriteSettings(
+			Setting{SettingInitialWindowSize, 0},
+			Setting{SettingMaxFrameSize, maxFrameSize},
+		); err != nil {
+			t.Fatal(err)
+		}
+		st.wantSettingsAck()
+
+		getSlash(st) // make the single request
+
+		// Give the handler quota to write:
+		if err := st.fr.WriteWindowUpdate(1, size); err != nil {
+			t.Fatal(err)
+		}
+		// Give the handler quota to write to connection-level
+		// window as well
+		if err := st.fr.WriteWindowUpdate(0, size); err != nil {
+			t.Fatal(err)
+		}
+		hf := st.wantHeaders()
+		if hf.StreamEnded() {
+			t.Fatal("unexpected END_STREAM flag")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+		goth := decodeHeader(t, hf.HeaderBlockFragment())
+		wanth := [][2]string{
+			{":status", "200"},
+			{"content-type", "text/plain; charset=utf-8"}, // sniffed
+			// and no content-length
+		}
+		if !reflect.DeepEqual(goth, wanth) {
+			t.Errorf("Got headers %v; want %v", goth, wanth)
+		}
+		var bytes, frames int
+		for {
+			df := st.wantData()
+			bytes += len(df.Data())
+			frames++
+			for _, b := range df.Data() {
+				if b != 'a' {
+					t.Fatal("non-'a' byte seen in DATA")
+				}
+			}
+			if df.StreamEnded() {
+				break
+			}
+		}
+		if bytes != size {
+			t.Errorf("Got %d bytes; want %d", bytes, size)
+		}
+		if want := int(size / maxFrameSize); frames < want || frames > want*2 {
+			t.Errorf("Got %d frames; want %d", frames, size)
+		}
+	})
+}
+
+// Test that the handler can't write more than the client allows
+func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) {
+	const size = 1 << 20
+	const maxFrameSize = 16 << 10
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		w.(http.Flusher).Flush()
+		n, err := w.Write(bytes.Repeat([]byte("a"), size))
+		if err != nil {
+			return fmt.Errorf("Write error: %v", err)
+		}
+		if n != size {
+			return fmt.Errorf("wrong size %d from Write", n)
+		}
+		return nil
+	}, func(st *serverTester) {
+		// Set the window size to something explicit for this test.
+		// It's also how much initial data we expect.
+		const initWindowSize = 123
+		if err := st.fr.WriteSettings(
+			Setting{SettingInitialWindowSize, initWindowSize},
+			Setting{SettingMaxFrameSize, maxFrameSize},
+		); err != nil {
+			t.Fatal(err)
+		}
+		st.wantSettingsAck()
+
+		getSlash(st) // make the single request
+		defer func() { st.fr.WriteRSTStream(1, ErrCodeCancel) }()
+
+		hf := st.wantHeaders()
+		if hf.StreamEnded() {
+			t.Fatal("unexpected END_STREAM flag")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+
+		df := st.wantData()
+		if got := len(df.Data()); got != initWindowSize {
+			t.Fatalf("Initial window size = %d but got DATA with %d bytes", initWindowSize, got)
+		}
+
+		for _, quota := range []int{1, 13, 127} {
+			if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil {
+				t.Fatal(err)
+			}
+			df := st.wantData()
+			if int(quota) != len(df.Data()) {
+				t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota)
+			}
+		}
+
+		if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
+			t.Fatal(err)
+		}
+	})
+}
+
+// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM.
+func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) {
+	const size = 1 << 20
+	const maxFrameSize = 16 << 10
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		w.(http.Flusher).Flush()
+		errc := make(chan error, 1)
+		go func() {
+			_, err := w.Write(bytes.Repeat([]byte("a"), size))
+			errc <- err
+		}()
+		select {
+		case err := <-errc:
+			if err == nil {
+				return errors.New("unexpected nil error from Write in handler")
+			}
+			return nil
+		case <-time.After(2 * time.Second):
+			return errors.New("timeout waiting for Write in handler")
+		}
+	}, func(st *serverTester) {
+		if err := st.fr.WriteSettings(
+			Setting{SettingInitialWindowSize, 0},
+			Setting{SettingMaxFrameSize, maxFrameSize},
+		); err != nil {
+			t.Fatal(err)
+		}
+		st.wantSettingsAck()
+
+		getSlash(st) // make the single request
+
+		hf := st.wantHeaders()
+		if hf.StreamEnded() {
+			t.Fatal("unexpected END_STREAM flag")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+
+		if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
+			t.Fatal(err)
+		}
+	})
+}
+
+func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) {
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		w.(http.Flusher).Flush()
+		// Nothing; send empty DATA
+		return nil
+	}, func(st *serverTester) {
+		// Handler gets no data quota:
+		if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil {
+			t.Fatal(err)
+		}
+		st.wantSettingsAck()
+
+		getSlash(st) // make the single request
+
+		hf := st.wantHeaders()
+		if hf.StreamEnded() {
+			t.Fatal("unexpected END_STREAM flag")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+
+		df := st.wantData()
+		if got := len(df.Data()); got != 0 {
+			t.Fatalf("unexpected %d DATA bytes; want 0", got)
+		}
+		if !df.StreamEnded() {
+			t.Fatal("DATA didn't have END_STREAM")
+		}
+	})
+}
+
+func TestServer_Response_Automatic100Continue(t *testing.T) {
+	const msg = "foo"
+	const reply = "bar"
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		if v := r.Header.Get("Expect"); v != "" {
+			t.Errorf("Expect header = %q; want empty", v)
+		}
+		buf := make([]byte, len(msg))
+		// This read should trigger the 100-continue being sent.
+		if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg {
+			return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg)
+		}
+		_, err := io.WriteString(w, reply)
+		return err
+	}, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1, // clients send odd numbers
+			BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"),
+			EndStream:     false,
+			EndHeaders:    true,
+		})
+		hf := st.wantHeaders()
+		if hf.StreamEnded() {
+			t.Fatal("unexpected END_STREAM flag")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+		goth := decodeHeader(t, hf.HeaderBlockFragment())
+		wanth := [][2]string{
+			{":status", "100"},
+		}
+		if !reflect.DeepEqual(goth, wanth) {
+			t.Fatalf("Got headers %v; want %v", goth, wanth)
+		}
+
+		// Okay, they sent status 100, so we can send our
+		// gigantic and/or sensitive "foo" payload now.
+		st.writeData(1, true, []byte(msg))
+
+		st.wantWindowUpdate(0, uint32(len(msg)))
+
+		hf = st.wantHeaders()
+		if hf.StreamEnded() {
+			t.Fatal("expected data to follow")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+		goth = decodeHeader(t, hf.HeaderBlockFragment())
+		wanth = [][2]string{
+			{":status", "200"},
+			{"content-type", "text/plain; charset=utf-8"},
+			{"content-length", strconv.Itoa(len(reply))},
+		}
+		if !reflect.DeepEqual(goth, wanth) {
+			t.Errorf("Got headers %v; want %v", goth, wanth)
+		}
+
+		df := st.wantData()
+		if string(df.Data()) != reply {
+			t.Errorf("Client read %q; want %q", df.Data(), reply)
+		}
+		if !df.StreamEnded() {
+			t.Errorf("expect data stream end")
+		}
+	})
+}
+
+func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) {
+	errc := make(chan error, 1)
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		p := []byte("some data.\n")
+		for {
+			_, err := w.Write(p)
+			if err != nil {
+				errc <- err
+				return nil
+			}
+		}
+	}, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1,
+			BlockFragment: st.encodeHeader(),
+			EndStream:     false,
+			EndHeaders:    true,
+		})
+		hf := st.wantHeaders()
+		if hf.StreamEnded() {
+			t.Fatal("unexpected END_STREAM flag")
+		}
+		if !hf.HeadersEnded() {
+			t.Fatal("want END_HEADERS flag")
+		}
+		// Close the connection and wait for the handler to (hopefully) notice.
+		st.cc.Close()
+		select {
+		case <-errc:
+		case <-time.After(5 * time.Second):
+			t.Error("timeout")
+		}
+	})
+}
+
+func TestServer_Rejects_Too_Many_Streams(t *testing.T) {
+	const testPath = "/some/path"
+
+	inHandler := make(chan uint32)
+	leaveHandler := make(chan bool)
+	st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+		id := w.(*responseWriter).rws.stream.id
+		inHandler <- id
+		if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath {
+			t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath)
+		}
+		<-leaveHandler
+	})
+	defer st.Close()
+	st.greet()
+	nextStreamID := uint32(1)
+	streamID := func() uint32 {
+		defer func() { nextStreamID += 2 }()
+		return nextStreamID
+	}
+	sendReq := func(id uint32, headers ...string) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      id,
+			BlockFragment: st.encodeHeader(headers...),
+			EndStream:     true,
+			EndHeaders:    true,
+		})
+	}
+	for i := 0; i < defaultMaxStreams; i++ {
+		sendReq(streamID())
+		<-inHandler
+	}
+	defer func() {
+		for i := 0; i < defaultMaxStreams; i++ {
+			leaveHandler <- true
+		}
+	}()
+
+	// And this one should cross the limit:
+	// (It's also sent as a CONTINUATION, to verify we still track the decoder context,
+	// even if we're rejecting it)
+	rejectID := streamID()
+	headerBlock := st.encodeHeader(":path", testPath)
+	frag1, frag2 := headerBlock[:3], headerBlock[3:]
+	st.writeHeaders(HeadersFrameParam{
+		StreamID:      rejectID,
+		BlockFragment: frag1,
+		EndStream:     true,
+		EndHeaders:    false, // CONTINUATION coming
+	})
+	if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil {
+		t.Fatal(err)
+	}
+	st.wantRSTStream(rejectID, ErrCodeProtocol)
+
+	// But let a handler finish:
+	leaveHandler <- true
+	st.wantHeaders()
+
+	// And now another stream should be able to start:
+	goodID := streamID()
+	sendReq(goodID, ":path", testPath)
+	select {
+	case got := <-inHandler:
+		if got != goodID {
+			t.Errorf("Got stream %d; want %d", got, goodID)
+		}
+	case <-time.After(3 * time.Second):
+		t.Error("timeout waiting for handler")
+	}
+}
+
+// So many response headers that the server needs to use CONTINUATION frames:
+func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) {
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		h := w.Header()
+		for i := 0; i < 5000; i++ {
+			h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i))
+		}
+		return nil
+	}, func(st *serverTester) {
+		getSlash(st)
+		hf := st.wantHeaders()
+		if hf.HeadersEnded() {
+			t.Fatal("got unwanted END_HEADERS flag")
+		}
+		n := 0
+		for {
+			n++
+			cf := st.wantContinuation()
+			if cf.HeadersEnded() {
+				break
+			}
+		}
+		if n < 5 {
+			t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n)
+		}
+	})
+}
+
+// This previously crashed (reported by Mathieu Lonjaret as observed
+// while using Camlistore) because we got a DATA frame from the client
+// after the handler exited and our logic at the time was wrong,
+// keeping a stream in the map in stateClosed, which tickled an
+// invariant check later when we tried to remove that stream (via
+// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop
+// ended.
+func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) {
+	testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
+		// nothing
+		return nil
+	}, func(st *serverTester) {
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      1,
+			BlockFragment: st.encodeHeader(),
+			EndStream:     false, // DATA is coming
+			EndHeaders:    true,
+		})
+		hf := st.wantHeaders()
+		if !hf.HeadersEnded() || !hf.StreamEnded() {
+			t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf)
+		}
+
+		// Sent when the a Handler closes while a client has
+		// indicated it's still sending DATA:
+		st.wantRSTStream(1, ErrCodeCancel)
+
+		// Now the handler has ended, so it's ended its
+		// stream, but the client hasn't closed its side
+		// (stateClosedLocal).  So send more data and verify
+		// it doesn't crash with an internal invariant panic, like
+		// it did before.
+		st.writeData(1, true, []byte("foo"))
+
+		// Sent after a peer sends data anyway (admittedly the
+		// previous RST_STREAM might've still been in-flight),
+		// but they'll get the more friendly 'cancel' code
+		// first.
+		st.wantRSTStream(1, ErrCodeStreamClosed)
+
+		// Set up a bunch of machinery to record the panic we saw
+		// previously.
+		var (
+			panMu    sync.Mutex
+			panicVal interface{}
+		)
+
+		testHookOnPanicMu.Lock()
+		testHookOnPanic = func(sc *serverConn, pv interface{}) bool {
+			panMu.Lock()
+			panicVal = pv
+			panMu.Unlock()
+			return true
+		}
+		testHookOnPanicMu.Unlock()
+
+		// Now force the serve loop to end, via closing the connection.
+		st.cc.Close()
+		select {
+		case <-st.sc.doneServing:
+			// Loop has exited.
+			panMu.Lock()
+			got := panicVal
+			panMu.Unlock()
+			if got != nil {
+				t.Errorf("Got panic: %v", got)
+			}
+		case <-time.After(5 * time.Second):
+			t.Error("timeout")
+		}
+	})
+}
+
+func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) }
+func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) }
+
+func testRejectTLS(t *testing.T, max uint16) {
+	st := newServerTester(t, nil, func(c *tls.Config) {
+		c.MaxVersion = max
+	})
+	defer st.Close()
+	gf := st.wantGoAway()
+	if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
+		t.Errorf("Got error code %v; want %v", got, want)
+	}
+}
+
+func TestServer_Rejects_TLSBadCipher(t *testing.T) {
+	st := newServerTester(t, nil, func(c *tls.Config) {
+		// Only list bad ones:
+		c.CipherSuites = []uint16{
+			tls.TLS_RSA_WITH_RC4_128_SHA,
+			tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+			tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+			tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+			tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+			tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+			tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+			tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+			tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+			tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+			tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+		}
+	})
+	defer st.Close()
+	gf := st.wantGoAway()
+	if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
+		t.Errorf("Got error code %v; want %v", got, want)
+	}
+}
+
+func TestServer_Advertises_Common_Cipher(t *testing.T) {
+	const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+	st := newServerTester(t, nil, func(c *tls.Config) {
+		// Have the client only support the one required by the spec.
+		c.CipherSuites = []uint16{requiredSuite}
+	}, func(ts *httptest.Server) {
+		var srv *http.Server = ts.Config
+		// Have the server configured with one specific cipher suite
+		// which is banned. This tests that ConfigureServer ends up
+		// adding the good one to this list.
+		srv.TLSConfig = &tls.Config{
+			CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}, // just a banned one
+		}
+	})
+	defer st.Close()
+	st.greet()
+}
+
+// TODO: move this onto *serverTester, and re-use the same hpack
+// decoding context throughout.  We're just getting lucky here with
+// creating a new decoder each time.
+func decodeHeader(t *testing.T, headerBlock []byte) (pairs [][2]string) {
+	d := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) {
+		pairs = append(pairs, [2]string{f.Name, f.Value})
+	})
+	if _, err := d.Write(headerBlock); err != nil {
+		t.Fatalf("hpack decoding error: %v", err)
+	}
+	if err := d.Close(); err != nil {
+		t.Fatalf("hpack decoding error: %v", err)
+	}
+	return
+}
+
+// testServerResponse sets up an idle HTTP/2 connection and lets you
+// write a single request with writeReq, and then reply to it in some way with the provided handler,
+// and then verify the output with the serverTester again (assuming the handler returns nil)
+func testServerResponse(t testing.TB,
+	handler func(http.ResponseWriter, *http.Request) error,
+	client func(*serverTester),
+) {
+	errc := make(chan error, 1)
+	st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+		if r.Body == nil {
+			t.Fatal("nil Body")
+		}
+		errc <- handler(w, r)
+	})
+	defer st.Close()
+
+	donec := make(chan bool)
+	go func() {
+		defer close(donec)
+		st.greet()
+		client(st)
+	}()
+
+	select {
+	case <-donec:
+		return
+	case <-time.After(5 * time.Second):
+		t.Fatal("timeout")
+	}
+
+	select {
+	case err := <-errc:
+		if err != nil {
+			t.Fatalf("Error in handler: %v", err)
+		}
+	case <-time.After(2 * time.Second):
+		t.Error("timeout waiting for handler to finish")
+	}
+}
+
+// readBodyHandler returns an http Handler func that reads len(want)
+// bytes from r.Body and fails t if the contents read were not
+// the value of want.
+func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) {
+	return func(w http.ResponseWriter, r *http.Request) {
+		buf := make([]byte, len(want))
+		_, err := io.ReadFull(r.Body, buf)
+		if err != nil {
+			t.Error(err)
+			return
+		}
+		if string(buf) != want {
+			t.Errorf("read %q; want %q", buf, want)
+		}
+	}
+}
+
+// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See:
+//   https://github.com/tatsuhiro-t/nghttp2/issues/140 &
+//   http://sourceforge.net/p/curl/bugs/1472/
+func TestServerWithCurl(t *testing.T)                     { testServerWithCurl(t, false) }
+func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) }
+
+func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) {
+	if runtime.GOOS != "linux" {
+		t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway")
+	}
+	requireCurl(t)
+	const msg = "Hello from curl!\n"
+	ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Foo", "Bar")
+		w.Header().Set("Client-Proto", r.Proto)
+		io.WriteString(w, msg)
+	}))
+	ConfigureServer(ts.Config, &Server{
+		PermitProhibitedCipherSuites: permitProhibitedCipherSuites,
+	})
+	ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
+	ts.StartTLS()
+	defer ts.Close()
+
+	var gotConn int32
+	testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) }
+
+	t.Logf("Running test server for curl to hit at: %s", ts.URL)
+	container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL)
+	defer kill(container)
+	resc := make(chan interface{}, 1)
+	go func() {
+		res, err := dockerLogs(container)
+		if err != nil {
+			resc <- err
+		} else {
+			resc <- res
+		}
+	}()
+	select {
+	case res := <-resc:
+		if err, ok := res.(error); ok {
+			t.Fatal(err)
+		}
+		if !strings.Contains(string(res.([]byte)), "foo: Bar") {
+			t.Errorf("didn't see foo: Bar header")
+			t.Logf("Got: %s", res)
+		}
+		if !strings.Contains(string(res.([]byte)), "client-proto: HTTP/2") {
+			t.Errorf("didn't see client-proto: HTTP/2 header")
+			t.Logf("Got: %s", res)
+		}
+		if !strings.Contains(string(res.([]byte)), msg) {
+			t.Errorf("didn't see %q content", msg)
+			t.Logf("Got: %s", res)
+		}
+	case <-time.After(3 * time.Second):
+		t.Errorf("timeout waiting for curl")
+	}
+
+	if atomic.LoadInt32(&gotConn) == 0 {
+		t.Error("never saw an http2 connection")
+	}
+}
+
+func BenchmarkServerGets(b *testing.B) {
+	b.ReportAllocs()
+
+	const msg = "Hello, world"
+	st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+		io.WriteString(w, msg)
+	})
+	defer st.Close()
+	st.greet()
+
+	// Give the server quota to reply. (plus it has the the 64KB)
+	if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
+		b.Fatal(err)
+	}
+
+	for i := 0; i < b.N; i++ {
+		id := 1 + uint32(i)*2
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      id,
+			BlockFragment: st.encodeHeader(),
+			EndStream:     true,
+			EndHeaders:    true,
+		})
+		st.wantHeaders()
+		df := st.wantData()
+		if !df.StreamEnded() {
+			b.Fatalf("DATA didn't have END_STREAM; got %v", df)
+		}
+	}
+}
+
+func BenchmarkServerPosts(b *testing.B) {
+	b.ReportAllocs()
+
+	const msg = "Hello, world"
+	st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
+		io.WriteString(w, msg)
+	})
+	defer st.Close()
+	st.greet()
+
+	// Give the server quota to reply. (plus it has the the 64KB)
+	if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
+		b.Fatal(err)
+	}
+
+	for i := 0; i < b.N; i++ {
+		id := 1 + uint32(i)*2
+		st.writeHeaders(HeadersFrameParam{
+			StreamID:      id,
+			BlockFragment: st.encodeHeader(":method", "POST"),
+			EndStream:     false,
+			EndHeaders:    true,
+		})
+		st.writeData(id, true, nil)
+		st.wantHeaders()
+		df := st.wantData()
+		if !df.StreamEnded() {
+			b.Fatalf("DATA didn't have END_STREAM; got %v", df)
+		}
+	}
+}

+ 5021 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/testdata/draft-ietf-httpbis-http2.xml

@@ -0,0 +1,5021 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="lib/rfc2629.xslt"?>
+<?rfc toc="yes" ?>
+<?rfc symrefs="yes" ?>
+<?rfc sortrefs="yes" ?>
+<?rfc compact="yes"?>
+<?rfc subcompact="no" ?>
+<?rfc linkmailto="no" ?>
+<?rfc editing="no" ?>
+<?rfc comments="yes" ?>
+<?rfc inline="yes"?>
+<?rfc rfcedstyle="yes"?>
+<?rfc-ext allow-markup-in-artwork="yes" ?>
+<?rfc-ext include-index="no" ?>
+
+<rfc ipr="trust200902"
+     category="std"
+     docName="draft-ietf-httpbis-http2-latest"
+     x:maturity-level="proposed"
+     xmlns:x="http://purl.org/net/xml2rfc/ext">
+  <x:feedback template="mailto:ietf-http-wg@w3.org?subject={docname},%20%22{section}%22&amp;body=&lt;{ref}&gt;:"/>
+  <front>
+    <title abbrev="HTTP/2">Hypertext Transfer Protocol version 2</title>
+
+    <author initials="M." surname="Belshe" fullname="Mike Belshe">
+      <organization>Twist</organization>
+      <address>
+        <email>mbelshe@chromium.org</email>
+      </address>
+    </author>
+
+    <author initials="R." surname="Peon" fullname="Roberto Peon">
+      <organization>Google, Inc</organization>
+      <address>
+        <email>fenix@google.com</email>
+      </address>
+    </author>
+
+    <author initials="M." surname="Thomson" fullname="Martin Thomson" role="editor">
+      <organization>Mozilla</organization>
+      <address>
+        <postal>
+          <street>331 E Evelyn Street</street>
+          <city>Mountain View</city>
+          <region>CA</region>
+          <code>94041</code>
+          <country>US</country>
+        </postal>
+        <email>martin.thomson@gmail.com</email>
+      </address>
+    </author>
+
+    <date year="2014" />
+    <area>Applications</area>
+    <workgroup>HTTPbis</workgroup>
+    <keyword>HTTP</keyword>
+    <keyword>SPDY</keyword>
+    <keyword>Web</keyword>
+
+    <abstract>
+      <t>
+        This specification describes an optimized expression of the semantics of the Hypertext
+        Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a
+        reduced perception of latency by introducing header field compression and allowing multiple
+        concurrent messages on the same connection. It also introduces unsolicited push of
+        representations from servers to clients.
+      </t>
+      <t>
+        This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax.
+        HTTP's existing semantics remain unchanged.
+      </t>
+    </abstract>
+
+    <note title="Editorial Note (To be removed by RFC Editor)">
+      <t>
+        Discussion of this draft takes place on the HTTPBIS working group mailing list
+        (ietf-http-wg@w3.org), which is archived at <eref
+        target="https://lists.w3.org/Archives/Public/ietf-http-wg/"/>.
+      </t>
+      <t>
+        Working Group information can be found at <eref
+        target="https://tools.ietf.org/wg/httpbis/"/>; that specific to HTTP/2 are at <eref
+        target="https://http2.github.io/"/>.
+      </t>
+      <t>
+        The changes in this draft are summarized in <xref
+        target="change.log"/>.
+      </t>
+    </note>
+
+  </front>
+
+  <middle>
+    <section anchor="intro" title="Introduction">
+
+      <t>
+        The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the
+        HTTP/1.1 message format (<xref target="RFC7230" x:fmt="," x:rel="#http.message"/>) has
+        several characteristics that have a negative overall effect on application performance
+        today.
+      </t>
+      <t>
+        In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given
+        TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed
+        request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1
+        clients that need to make many requests typically use multiple connections to a server in
+        order to achieve concurrency and thereby reduce latency.
+      </t>
+      <t>
+        Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary
+        network traffic, as well as causing the initial <xref target="TCP">TCP</xref> congestion
+        window to quickly fill. This can result in excessive latency when multiple requests are
+        made on a new TCP connection.
+      </t>
+      <t>
+        HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an
+        underlying connection. Specifically, it allows interleaving of request and response
+        messages on the same connection and uses an efficient coding for HTTP header fields. It
+        also allows prioritization of requests, letting more important requests complete more
+        quickly, further improving performance.
+      </t>
+      <t>
+        The resulting protocol is more friendly to the network, because fewer TCP connections can
+        be used in comparison to HTTP/1.x. This means less competition with other flows, and
+        longer-lived connections, which in turn leads to better utilization of available network
+        capacity.
+      </t>
+      <t>
+        Finally, HTTP/2 also enables more efficient processing of messages through use of binary
+        message framing.
+      </t>
+    </section>
+
+    <section anchor="Overview" title="HTTP/2 Protocol Overview">
+      <t>
+        HTTP/2 provides an optimized transport for HTTP semantics.  HTTP/2 supports all of the core
+        features of HTTP/1.1, but aims to be more efficient in several ways.
+      </t>
+      <t>
+        The basic protocol unit in HTTP/2 is a <xref target="FrameHeader">frame</xref>.  Each frame
+        type serves a different purpose.  For example, <x:ref>HEADERS</x:ref> and
+        <x:ref>DATA</x:ref> frames form the basis of <xref target="HttpSequence">HTTP requests and
+        responses</xref>; other frame types like <x:ref>SETTINGS</x:ref>,
+        <x:ref>WINDOW_UPDATE</x:ref>, and <x:ref>PUSH_PROMISE</x:ref> are used in support of other
+        HTTP/2 features.
+      </t>
+      <t>
+        Multiplexing of requests is achieved by having each HTTP request-response exchange
+        associated with its own <xref target="StreamsLayer">stream</xref>. Streams are largely
+        independent of each other, so a blocked or stalled request or response does not prevent
+        progress on other streams.
+      </t>
+      <t>
+        Flow control and prioritization ensure that it is possible to efficiently use multiplexed
+        streams.  <xref target="FlowControl">Flow control</xref> helps to ensure that only data that
+        can be used by a receiver is transmitted.  <xref
+        target="StreamPriority">Prioritization</xref> ensures that limited resources can be directed
+        to the most important streams first.
+      </t>
+      <t>
+        HTTP/2 adds a new interaction mode, whereby a server can <xref target="PushResources">push
+        responses to a client</xref>.  Server push allows a server to speculatively send a client
+        data that the server anticipates the client will need, trading off some network usage
+        against a potential latency gain.  The server does this by synthesizing a request, which it
+        sends as a <x:ref>PUSH_PROMISE</x:ref> frame.  The server is then able to send a response to
+        the synthetic request on a separate stream.
+      </t>
+      <t>
+        Frames that contain HTTP header fields are <xref target="HeaderBlock">compressed</xref>.
+        HTTP requests can be highly redundant, so compression can reduce the size of requests and
+        responses significantly.
+      </t>
+
+      <section title="Document Organization">
+        <t>
+          The HTTP/2 specification is split into four parts:
+          <list style="symbols">
+            <t>
+              <xref target="starting">Starting HTTP/2</xref> covers how an HTTP/2 connection is
+              initiated.
+            </t>
+            <t>
+              The <xref target="FramingLayer">framing</xref> and <xref
+              target="StreamsLayer">streams</xref> layers describe the way HTTP/2 frames are
+              structured and formed into multiplexed streams.
+            </t>
+            <t>
+              <xref target="FrameTypes">Frame</xref> and <xref target="ErrorCodes">error</xref>
+              definitions include details of the frame and error types used in HTTP/2.
+            </t>
+            <t>
+              <xref target="HTTPLayer">HTTP mappings</xref> and <xref target="HttpExtra">additional
+              requirements</xref> describe how HTTP semantics are expressed using frames and
+              streams.
+          </t>
+          </list>
+        </t>
+        <t>
+          While some of the frame and stream layer concepts are isolated from HTTP, this
+          specification does not define a completely generic framing layer. The framing and streams
+          layers are tailored to the needs of the HTTP protocol and server push.
+        </t>
+      </section>
+
+      <section title="Conventions and Terminology">
+        <t>
+          The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD
+          NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as
+          described in <xref target="RFC2119">RFC 2119</xref>.
+        </t>
+        <t>
+          All numeric values are in network byte order.  Values are unsigned unless otherwise
+          indicated.  Literal values are provided in decimal or hexadecimal as appropriate.
+          Hexadecimal literals are prefixed with <spanx style="verb">0x</spanx> to distinguish them
+          from decimal literals.
+        </t>
+        <t>
+          The following terms are used:
+          <list style="hanging">
+            <t hangText="client:">
+              The endpoint initiating the HTTP/2 connection.
+            </t>
+            <t hangText="connection:">
+              A transport-layer connection between two endpoints.
+            </t>
+            <t hangText="connection error:">
+              An error that affects the entire HTTP/2 connection.
+            </t>
+            <t hangText="endpoint:">
+              Either the client or server of the connection.
+            </t>
+            <t hangText="frame:">
+              The smallest unit of communication within an HTTP/2 connection, consisting of a header
+              and a variable-length sequence of octets structured according to the frame type.
+            </t>
+            <t hangText="peer:">
+              An endpoint.  When discussing a particular endpoint, "peer" refers to the endpoint
+              that is remote to the primary subject of discussion.
+            </t>
+            <t hangText="receiver:">
+              An endpoint that is receiving frames.
+            </t>
+            <t hangText="sender:">
+              An endpoint that is transmitting frames.
+            </t>
+            <t hangText="server:">
+              The endpoint which did not initiate the HTTP/2 connection.
+            </t>
+            <t hangText="stream:">
+              A bi-directional flow of frames across a virtual channel within the HTTP/2 connection.
+            </t>
+            <t hangText="stream error:">
+              An error on the individual HTTP/2 stream.
+            </t>
+          </list>
+        </t>
+        <t>
+          Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined
+          in <xref target="RFC7230" x:fmt="of" x:rel="#intermediaries"/>.
+        </t>
+      </section>
+    </section>
+
+    <section anchor="starting" title="Starting HTTP/2">
+      <t>
+        An HTTP/2 connection is an application layer protocol running on top of a TCP connection
+        (<xref target="TCP"/>). The client is the TCP connection initiator.
+      </t>
+      <t>
+        HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same
+        default port numbers: 80 for "http" URIs and 443 for "https" URIs.  As a result,
+        implementations processing requests for target resource URIs like <spanx
+        style="verb">http://example.org/foo</spanx> or <spanx
+        style="verb">https://example.com/bar</spanx> are required to first discover whether the
+        upstream server (the immediate peer to which the client wishes to establish a connection)
+        supports HTTP/2.
+      </t>
+
+      <t>
+        The means by which support for HTTP/2 is determined is different for "http" and "https"
+        URIs. Discovery for "http" URIs is described in <xref target="discover-http"/>.  Discovery
+        for "https" URIs is described in <xref target="discover-https"/>.
+      </t>
+
+      <section anchor="versioning" title="HTTP/2 Version Identification">
+        <t>
+          The protocol defined in this document has two identifiers.
+          <list style="symbols">
+            <x:lt>
+              <t>
+                The string "h2" identifies the protocol where HTTP/2 uses <xref
+                target="TLS12">TLS</xref>.  This identifier is used in the <xref
+                target="TLS-ALPN">TLS application layer protocol negotiation extension (ALPN)</xref>
+                field and any place that HTTP/2 over TLS is identified.
+              </t>
+              <t>
+                The "h2" string is serialized into an ALPN protocol identifier as the two octet
+                sequence: 0x68, 0x32.
+              </t>
+            </x:lt>
+            <x:lt>
+              <t>
+                The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP.
+                This identifier is used in the HTTP/1.1 Upgrade header field and any place that
+                HTTP/2 over TCP is identified.
+              </t>
+            </x:lt>
+          </list>
+        </t>
+        <t>
+          Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message
+          semantics described in this document.
+        </t>
+        <t>
+          <cref>RFC Editor's Note: please remove the remainder of this section prior to the
+          publication of a final version of this document.</cref>
+        </t>
+        <t>
+          Only implementations of the final, published RFC can identify themselves as "h2" or "h2c".
+          Until such an RFC exists, implementations MUST NOT identify themselves using these
+          strings.
+        </t>
+        <t>
+          Examples and text throughout the rest of this document use "h2" as a matter of
+          editorial convenience only.  Implementations of draft versions MUST NOT identify using
+          this string.
+        </t>
+        <t>
+          Implementations of draft versions of the protocol MUST add the string "-" and the
+          corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11
+          over TLS is identified using the string "h2-11".
+        </t>
+        <t>
+          Non-compatible experiments that are based on these draft versions MUST append the string
+          "-" and an experiment name to the identifier.  For example, an experimental implementation
+          of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself
+          as "h2-09-emo".  Note that any label MUST conform to the "token" syntax defined in
+          <xref target="RFC7230" x:fmt="of" x:rel="#field.components"/>.  Experimenters are
+          encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list.
+        </t>
+      </section>
+
+      <section anchor="discover-http" title="Starting HTTP/2 for &quot;http&quot; URIs">
+        <t>
+          A client that makes a request for an "http" URI without prior knowledge about support for
+          HTTP/2 uses the HTTP Upgrade mechanism (<xref target="RFC7230" x:fmt="of"
+          x:rel="#header.upgrade"/>).  The client makes an HTTP/1.1 request that includes an Upgrade
+          header field identifying HTTP/2 with the "h2c" token.  The HTTP/1.1 request MUST include
+          exactly one <xref target="Http2SettingsHeader">HTTP2-Settings</xref> header field.
+        </t>
+        <figure>
+          <preamble>For example:</preamble>
+          <artwork type="message/http; msgtype=&#34;request&#34;" x:indent-with="  "><![CDATA[
+GET / HTTP/1.1
+Host: server.example.com
+Connection: Upgrade, HTTP2-Settings
+Upgrade: h2c
+HTTP2-Settings: <base64url encoding of HTTP/2 SETTINGS payload>
+
+]]></artwork>
+        </figure>
+        <t>
+          Requests that contain an entity body MUST be sent in their entirety before the client can
+          send HTTP/2 frames.  This means that a large request entity can block the use of the
+          connection until it is completely sent.
+        </t>
+        <t>
+          If concurrency of an initial request with subsequent requests is important, an OPTIONS
+          request can be used to perform the upgrade to HTTP/2, at the cost of an additional
+          round-trip.
+        </t>
+        <t>
+          A server that does not support HTTP/2 can respond to the request as though the Upgrade
+          header field were absent:
+        </t>
+        <figure>
+          <artwork type="message/http; msgtype=&#34;response&#34;" x:indent-with="  ">
+HTTP/1.1 200 OK
+Content-Length: 243
+Content-Type: text/html
+
+...
+</artwork>
+        </figure>
+        <t>
+          A server MUST ignore a "h2" token in an Upgrade header field.  Presence of a token with
+          "h2" implies HTTP/2 over TLS, which is instead negotiated as described in <xref
+          target="discover-https"/>.
+        </t>
+        <t>
+          A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols)
+          response.  After the empty line that terminates the 101 response, the server can begin
+          sending HTTP/2 frames.  These frames MUST include a response to the request that initiated
+          the Upgrade.
+        </t>
+
+        <figure>
+          <preamble>
+            For example:
+          </preamble>
+          <artwork type="message/http; msgtype=&#34;response&#34;" x:indent-with="  ">
+HTTP/1.1 101 Switching Protocols
+Connection: Upgrade
+Upgrade: h2c
+
+[ HTTP/2 connection ...
+</artwork>
+        </figure>
+        <t>
+          The first HTTP/2 frame sent by the server is a <x:ref>SETTINGS</x:ref> frame (<xref
+          target="SETTINGS"/>) as the server connection preface (<xref
+          target="ConnectionHeader"/>). Upon receiving the 101 response, the client sends a <xref
+          target="ConnectionHeader">connection preface</xref>, which includes a
+          <x:ref>SETTINGS</x:ref> frame.
+        </t>
+        <t>
+          The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is
+          assigned <xref target="pri-default">default priority values</xref>.  Stream 1 is
+          implicitly half closed from the client toward the server, since the request is completed
+          as an HTTP/1.1 request.  After commencing the HTTP/2 connection, stream 1 is used for the
+          response.
+        </t>
+
+        <section anchor="Http2SettingsHeader" title="HTTP2-Settings Header Field">
+          <t>
+            A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one <spanx
+            style="verb">HTTP2-Settings</spanx> header field.  The <spanx
+            style="verb">HTTP2-Settings</spanx> header field is a connection-specific header field
+            that includes parameters that govern the HTTP/2 connection, provided in anticipation of
+            the server accepting the request to upgrade.
+          </t>
+          <figure>
+            <artwork type="abnf" x:indent-with="  "><![CDATA[
+HTTP2-Settings    = token68
+]]></artwork>
+          </figure>
+          <t>
+            A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present,
+            or if more than one is present. A server MUST NOT send this header field.
+          </t>
+
+          <t>
+            The content of the <spanx style="verb">HTTP2-Settings</spanx> header field is the
+            payload of a <x:ref>SETTINGS</x:ref> frame (<xref target="SETTINGS"/>), encoded as a
+            base64url string (that is, the URL- and filename-safe Base64 encoding described in <xref
+            target="RFC4648" x:fmt="of" x:sec="5"/>, with any trailing '=' characters omitted).  The
+            <xref target="RFC5234">ABNF</xref> production for <spanx style="verb">token68</spanx> is
+            defined in <xref target="RFC7235" x:fmt="of" x:rel="#challenge.and.response"/>.
+          </t>
+          <t>
+            Since the upgrade is only intended to apply to the immediate connection, a client
+            sending <spanx style="verb">HTTP2-Settings</spanx> MUST also send <spanx
+            style="verb">HTTP2-Settings</spanx> as a connection option in the <spanx
+            style="verb">Connection</spanx> header field to prevent it from being forwarded
+            downstream.
+          </t>
+          <t>
+            A server decodes and interprets these values as it would any other
+            <x:ref>SETTINGS</x:ref> frame.  <xref target="SettingsSync">Acknowledgement of the
+            SETTINGS parameters</xref> is not necessary, since a 101 response serves as implicit
+            acknowledgment.  Providing these values in the Upgrade request gives a client an
+            opportunity to provide parameters prior to receiving any frames from the server.
+          </t>
+        </section>
+      </section>
+
+      <section anchor="discover-https" title="Starting HTTP/2 for &quot;https&quot; URIs">
+        <t>
+          A client that makes a request to an "https" URI uses <xref target="TLS12">TLS</xref>
+          with the <xref target="TLS-ALPN">application layer protocol negotiation extension</xref>.
+        </t>
+        <t>
+          HTTP/2 over TLS uses the "h2" application token.  The "h2c" token MUST NOT be sent by a
+          client or selected by a server.
+        </t>
+        <t>
+          Once TLS negotiation is complete, both the client and the server send a <xref
+          target="ConnectionHeader">connection preface</xref>.
+        </t>
+      </section>
+
+      <section anchor="known-http" title="Starting HTTP/2 with Prior Knowledge">
+        <t>
+          A client can learn that a particular server supports HTTP/2 by other means.  For example,
+          <xref target="ALT-SVC"/> describes a mechanism for advertising this capability.
+        </t>
+        <t>
+          A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2,
+          after the <xref target="ConnectionHeader">connection preface</xref>; a server can
+          identify such a connection by the presence of the connection preface. This only affects
+          the establishment of HTTP/2 connections over cleartext TCP; implementations that support
+          HTTP/2 over TLS MUST use <xref target="TLS-ALPN">protocol negotiation in TLS</xref>.
+        </t>
+        <t>
+          Without additional information, prior support for HTTP/2 is not a strong signal that a
+          given server will support HTTP/2 for future connections. For example, it is possible for
+          server configurations to change, for configurations to differ between instances in
+          clustered servers, or for network conditions to change.
+        </t>
+      </section>
+
+      <section anchor="ConnectionHeader" title="HTTP/2 Connection Preface">
+        <t>
+          Upon establishment of a TCP connection and determination that HTTP/2 will be used by both
+          peers, each endpoint MUST send a connection preface as a final confirmation and to
+          establish the initial SETTINGS parameters for the HTTP/2 connection.  The client and
+          server each send a different connection preface.
+        </t>
+        <t>
+          The client connection preface starts with a sequence of 24 octets, which in hex notation
+          are:
+        </t>
+        <figure>
+          <artwork type="inline" x:indent-with="  "><![CDATA[
+0x505249202a20485454502f322e300d0a0d0a534d0d0a0d0a
+]]></artwork>
+        </figure>
+        <t>
+          (the string <spanx style="verb">PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n</spanx>).  This sequence
+          is followed by a <x:ref>SETTINGS</x:ref> frame (<xref target="SETTINGS"/>).  The
+          <x:ref>SETTINGS</x:ref> frame MAY be empty.  The client sends the client connection
+          preface immediately upon receipt of a 101 Switching Protocols response (indicating a
+          successful upgrade), or as the first application data octets of a TLS connection. If
+          starting an HTTP/2 connection with prior knowledge of server support for the protocol, the
+          client connection preface is sent upon connection establishment.
+        </t>
+        <t>
+          <list>
+            <t>
+              The client connection preface is selected so that a large proportion of HTTP/1.1 or
+              HTTP/1.0 servers and intermediaries do not attempt to process further frames.  Note
+              that this does not address the concerns raised in <xref target="TALKING"/>.
+            </t>
+          </list>
+        </t>
+        <t>
+          The server connection preface consists of a potentially empty <x:ref>SETTINGS</x:ref>
+          frame (<xref target="SETTINGS"/>) that MUST be the first frame the server sends in the
+          HTTP/2 connection.
+        </t>
+        <t>
+          The <x:ref>SETTINGS</x:ref> frames received from a peer as part of the connection preface
+          MUST be acknowledged (see <xref target="SettingsSync"/>) after sending the connection
+          preface.
+        </t>
+        <t>
+          To avoid unnecessary latency, clients are permitted to send additional frames to the
+          server immediately after sending the client connection preface, without waiting to receive
+          the server connection preface.  It is important to note, however, that the server
+          connection preface <x:ref>SETTINGS</x:ref> frame might include parameters that necessarily
+          alter how a client is expected to communicate with the server. Upon receiving the
+          <x:ref>SETTINGS</x:ref> frame, the client is expected to honor any parameters established.
+          In some configurations, it is possible for the server to transmit <x:ref>SETTINGS</x:ref>
+          before the client sends additional frames, providing an opportunity to avoid this issue.
+        </t>
+        <t>
+          Clients and servers MUST treat an invalid connection preface as a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.  A <x:ref>GOAWAY</x:ref> frame (<xref target="GOAWAY"/>)
+          MAY be omitted in this case, since an invalid preface indicates that the peer is not using
+          HTTP/2.
+        </t>
+      </section>
+    </section>
+
+    <section anchor="FramingLayer" title="HTTP Frames">
+      <t>
+        Once the HTTP/2 connection is established, endpoints can begin exchanging frames.
+      </t>
+
+      <section anchor="FrameHeader" title="Frame Format">
+        <t>
+          All frames begin with a fixed 9-octet header followed by a variable-length payload.
+        </t>
+        <figure title="Frame Layout">
+          <artwork type="inline"><![CDATA[
+  0                   1                   2                   3
+  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |                 Length (24)                   |
+ +---------------+---------------+---------------+
+ |   Type (8)    |   Flags (8)   |
+ +-+-+-----------+---------------+-------------------------------+
+ |R|                 Stream Identifier (31)                      |
+ +=+=============================================================+
+ |                   Frame Payload (0...)                      ...
+ +---------------------------------------------------------------+
+]]></artwork>
+        </figure>
+        <t>
+          The fields of the frame header are defined as:
+          <list style="hanging">
+            <x:lt hangText="Length:">
+              <t>
+                The length of the frame payload expressed as an unsigned 24-bit integer.  Values
+                greater than 2<x:sup>14</x:sup> (16,384) MUST NOT be sent unless the receiver has
+                set a larger value for <x:ref>SETTINGS_MAX_FRAME_SIZE</x:ref>.
+              </t>
+              <t>
+                The 9 octets of the frame header are not included in this value.
+              </t>
+            </x:lt>
+            <x:lt hangText="Type:">
+              <t>
+                The 8-bit type of the frame.  The frame type determines the format and semantics of
+                the frame.  Implementations MUST ignore and discard any frame that has a type that
+                is unknown.
+              </t>
+            </x:lt>
+            <x:lt hangText="Flags:">
+              <t>
+                An 8-bit field reserved for frame-type specific boolean flags.
+              </t>
+              <t>
+                Flags are assigned semantics specific to the indicated frame type.  Flags that have
+                no defined semantics for a particular frame type MUST be ignored, and MUST be left
+                unset (0) when sending.
+              </t>
+            </x:lt>
+            <x:lt hangText="R:">
+              <t>
+                A reserved 1-bit field.  The semantics of this bit are undefined and the bit MUST
+                remain unset (0) when sending and MUST be ignored when receiving.
+              </t>
+            </x:lt>
+            <x:lt hangText="Stream Identifier:">
+              <t>
+                A 31-bit stream identifier (see <xref target="StreamIdentifiers"/>).  The value 0 is
+                reserved for frames that are associated with the connection as a whole as opposed to
+                an individual stream.
+              </t>
+            </x:lt>
+          </list>
+        </t>
+        <t>
+          The structure and content of the frame payload is dependent entirely on the frame type.
+        </t>
+      </section>
+
+      <section anchor="FrameSize" title="Frame Size">
+        <t>
+          The size of a frame payload is limited by the maximum size that a receiver advertises in
+          the <x:ref>SETTINGS_MAX_FRAME_SIZE</x:ref> setting.  This setting can have any value
+          between 2<x:sup>14</x:sup> (16,384) and 2<x:sup>24</x:sup>-1 (16,777,215) octets,
+          inclusive.
+        </t>
+        <t>
+          All implementations MUST be capable of receiving and minimally processing frames up to
+          2<x:sup>14</x:sup> octets in length, plus the 9 octet <xref target="FrameHeader">frame
+          header</xref>.  The size of the frame header is not included when describing frame sizes.
+          <list style="hanging">
+            <t hangText="Note:">
+              Certain frame types, such as <xref target="PING">PING</xref>, impose additional limits
+              on the amount of payload data allowed.
+            </t>
+          </list>
+        </t>
+        <t>
+          If a frame size exceeds any defined limit, or is too small to contain mandatory frame
+          data, the endpoint MUST send a <x:ref>FRAME_SIZE_ERROR</x:ref> error. A frame size error
+          in a frame that could alter the state of the entire connection MUST be treated as a <xref
+          target="ConnectionErrorHandler">connection error</xref>; this includes any frame carrying
+          a <xref target="HeaderBlock">header block</xref> (that is, <x:ref>HEADERS</x:ref>,
+          <x:ref>PUSH_PROMISE</x:ref>, and <x:ref>CONTINUATION</x:ref>), <x:ref>SETTINGS</x:ref>,
+          and any <x:ref>WINDOW_UPDATE</x:ref> frame with a stream identifier of 0.
+        </t>
+        <t>
+          Endpoints are not obligated to use all available space in a frame. Responsiveness can be
+          improved by using frames that are smaller than the permitted maximum size. Sending large
+          frames can result in delays in sending time-sensitive frames (such
+          <x:ref>RST_STREAM</x:ref>, <x:ref>WINDOW_UPDATE</x:ref>, or <x:ref>PRIORITY</x:ref>)
+          which if blocked by the transmission of a large frame, could affect performance.
+        </t>
+      </section>
+
+      <section anchor="HeaderBlock" title="Header Compression and Decompression">
+        <t>
+          Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values.
+          They are used within HTTP request and response messages as well as server push operations
+          (see <xref target="PushResources" />).
+        </t>
+        <t>
+          Header lists are collections of zero or more header fields.  When transmitted over a
+          connection, a header list is serialized into a header block using <xref
+          target="COMPRESSION">HTTP Header Compression</xref>.  The serialized header block is then
+          divided into one or more octet sequences, called header block fragments, and transmitted
+          within the payload of <xref target="HEADERS">HEADERS</xref>, <xref
+          target="PUSH_PROMISE">PUSH_PROMISE</xref> or <xref
+          target="CONTINUATION">CONTINUATION</xref> frames.
+        </t>
+        <t>
+          The <xref target="COOKIE">Cookie header field</xref> is treated specially by the HTTP
+          mapping (see <xref target="CompressCookie"/>).
+        </t>
+        <t>
+          A receiving endpoint reassembles the header block by concatenating its fragments, then
+          decompresses the block to reconstruct the header list.
+        </t>
+        <t>
+          A complete header block consists of either:
+          <list style="symbols">
+            <t>
+              a single <x:ref>HEADERS</x:ref> or <x:ref>PUSH_PROMISE</x:ref> frame,
+              with the END_HEADERS flag set, or
+            </t>
+            <t>
+              a <x:ref>HEADERS</x:ref> or <x:ref>PUSH_PROMISE</x:ref> frame with the END_HEADERS
+              flag cleared and one or more <x:ref>CONTINUATION</x:ref> frames,
+              where the last <x:ref>CONTINUATION</x:ref> frame has the END_HEADERS flag set.
+            </t>
+          </list>
+        </t>
+        <t>
+          Header compression is stateful.  One compression context and one decompression context is
+          used for the entire connection.  Each header block is processed as a discrete unit.
+          Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved
+          frames of any other type or from any other stream.  The last frame in a sequence of
+          <x:ref>HEADERS</x:ref> or <x:ref>CONTINUATION</x:ref> frames MUST have the END_HEADERS
+          flag set.  The last frame in a sequence of <x:ref>PUSH_PROMISE</x:ref> or
+          <x:ref>CONTINUATION</x:ref> frames MUST have the END_HEADERS flag set.  This allows a
+          header block to be logically equivalent to a single frame.
+        </t>
+        <t>
+          Header block fragments can only be sent as the payload of <x:ref>HEADERS</x:ref>,
+          <x:ref>PUSH_PROMISE</x:ref> or <x:ref>CONTINUATION</x:ref> frames, because these frames
+          carry data that can modify the compression context maintained by a receiver.  An endpoint
+          receiving <x:ref>HEADERS</x:ref>, <x:ref>PUSH_PROMISE</x:ref> or
+          <x:ref>CONTINUATION</x:ref> frames MUST reassemble header blocks and perform decompression
+          even if the frames are to be discarded.  A receiver MUST terminate the connection with a
+          <xref target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>COMPRESSION_ERROR</x:ref> if it does not decompress a header block.
+        </t>
+      </section>
+    </section>
+
+    <section anchor="StreamsLayer" title="Streams and Multiplexing">
+      <t>
+        A "stream" is an independent, bi-directional sequence of frames exchanged between the client
+        and server within an HTTP/2 connection.  Streams have several important characteristics:
+        <list style="symbols">
+          <t>
+            A single HTTP/2 connection can contain multiple concurrently open streams, with either
+            endpoint interleaving frames from multiple streams.
+          </t>
+          <t>
+            Streams can be established and used unilaterally or shared by either the client or
+            server.
+          </t>
+          <t>
+            Streams can be closed by either endpoint.
+          </t>
+          <t>
+            The order in which frames are sent on a stream is significant. Recipients process frames
+            in the order they are received.  In particular, the order of <x:ref>HEADERS</x:ref>,
+            and <x:ref>DATA</x:ref> frames is semantically significant.
+          </t>
+          <t>
+            Streams are identified by an integer.  Stream identifiers are assigned to streams by the
+            endpoint initiating the stream.
+          </t>
+        </list>
+      </t>
+
+      <section anchor="StreamStates" title="Stream States">
+        <t>
+          The lifecycle of a stream is shown in <xref target="StreamStatesFigure"/>.
+        </t>
+
+        <figure anchor="StreamStatesFigure" title="Stream States">
+          <artwork type="drawing">
+            <![CDATA[
+                           +--------+
+                     PP    |        |    PP
+                  ,--------|  idle  |--------.
+                 /         |        |         \
+                v          +--------+          v
+         +----------+          |           +----------+
+         |          |          | H         |          |
+     ,---| reserved |          |           | reserved |---.
+     |   | (local)  |          v           | (remote) |   |
+     |   +----------+      +--------+      +----------+   |
+     |      |          ES  |        |  ES          |      |
+     |      | H    ,-------|  open  |-------.      | H    |
+     |      |     /        |        |        \     |      |
+     |      v    v         +--------+         v    v      |
+     |   +----------+          |           +----------+   |
+     |   |   half   |          |           |   half   |   |
+     |   |  closed  |          | R         |  closed  |   |
+     |   | (remote) |          |           | (local)  |   |
+     |   +----------+          |           +----------+   |
+     |        |                v                 |        |
+     |        |  ES / R    +--------+  ES / R    |        |
+     |        `----------->|        |<-----------'        |
+     |  R                  | closed |                  R  |
+     `-------------------->|        |<--------------------'
+                           +--------+
+
+       H:  HEADERS frame (with implied CONTINUATIONs)
+       PP: PUSH_PROMISE frame (with implied CONTINUATIONs)
+       ES: END_STREAM flag
+       R:  RST_STREAM frame
+]]>
+          </artwork>
+        </figure>
+
+        <t>
+          Note that this diagram shows stream state transitions and the frames and flags that affect
+          those transitions only.  In this regard, <x:ref>CONTINUATION</x:ref> frames do not result
+          in state transitions; they are effectively part of the <x:ref>HEADERS</x:ref> or
+          <x:ref>PUSH_PROMISE</x:ref> that they follow.  For this purpose, the END_STREAM flag is
+          processed as a separate event to the frame that bears it; a <x:ref>HEADERS</x:ref> frame
+          with the END_STREAM flag set can cause two state transitions.
+        </t>
+        <t>
+          Both endpoints have a subjective view of the state of a stream that could be different
+          when frames are in transit.  Endpoints do not coordinate the creation of streams; they are
+          created unilaterally by either endpoint.  The negative consequences of a mismatch in
+          states are limited to the "closed" state after sending <x:ref>RST_STREAM</x:ref>, where
+          frames might be received for some time after closing.
+        </t>
+        <t>
+          Streams have the following states:
+          <list style="hanging">
+
+            <x:lt hangText="idle:">
+              <t>
+                <vspace blankLines="0"/>
+                All streams start in the "idle" state.  In this state, no frames have been
+                exchanged.
+              </t>
+              <t>
+                The following transitions are valid from this state:
+                <list style="symbols">
+                  <t>
+                    Sending or receiving a <x:ref>HEADERS</x:ref> frame causes the stream to become
+                    "open".  The stream identifier is selected as described in <xref
+                    target="StreamIdentifiers"/>.  The same <x:ref>HEADERS</x:ref> frame can also
+                    cause a stream to immediately become "half closed".
+                  </t>
+                  <t>
+                    Sending a <x:ref>PUSH_PROMISE</x:ref> frame marks the associated stream for
+                    later use.  The stream state for the reserved stream transitions to "reserved
+                    (local)".
+                  </t>
+                  <t>
+                    Receiving a <x:ref>PUSH_PROMISE</x:ref> frame marks the associated stream as
+                    reserved by the remote peer.  The state of the stream becomes "reserved
+                    (remote)".
+                  </t>
+                </list>
+              </t>
+              <t>
+                Receiving any frames other than <x:ref>HEADERS</x:ref> or
+                <x:ref>PUSH_PROMISE</x:ref> on a stream in this state MUST be treated as a <xref
+                target="ConnectionErrorHandler">connection error</xref> of type
+                <x:ref>PROTOCOL_ERROR</x:ref>.
+              </t>
+            </x:lt>
+
+            <x:lt hangText="reserved (local):">
+              <t>
+                <vspace blankLines="0"/>
+                A stream in the "reserved (local)" state is one that has been promised by sending a
+                <x:ref>PUSH_PROMISE</x:ref> frame.  A <x:ref>PUSH_PROMISE</x:ref> frame reserves an
+                idle stream by associating the stream with an open stream that was initiated by the
+                remote peer (see <xref target="PushResources"/>).
+              </t>
+              <t>
+                In this state, only the following transitions are possible:
+                <list style="symbols">
+                  <t>
+                    The endpoint can send a <x:ref>HEADERS</x:ref> frame.  This causes the stream to
+                    open in a "half closed (remote)" state.
+                  </t>
+                  <t>
+                    Either endpoint can send a <x:ref>RST_STREAM</x:ref> frame to cause the stream
+                    to become "closed".  This releases the stream reservation.
+                  </t>
+                </list>
+              </t>
+              <t>
+                An endpoint MUST NOT send any type of frame other than <x:ref>HEADERS</x:ref> or
+                <x:ref>RST_STREAM</x:ref> in this state.
+              </t>
+              <t>
+                A <x:ref>PRIORITY</x:ref> frame MAY be received in this state.  Receiving any type
+                of frame other than <x:ref>RST_STREAM</x:ref> or <x:ref>PRIORITY</x:ref> on a stream
+                in this state MUST be treated as a <xref target="ConnectionErrorHandler">connection
+                error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+              </t>
+            </x:lt>
+
+            <x:lt hangText="reserved (remote):">
+              <t>
+                <vspace blankLines="0"/>
+                A stream in the "reserved (remote)" state has been reserved by a remote peer.
+              </t>
+              <t>
+                In this state, only the following transitions are possible:
+                <list style="symbols">
+                  <t>
+                    Receiving a <x:ref>HEADERS</x:ref> frame causes the stream to transition to
+                    "half closed (local)".
+                  </t>
+                  <t>
+                    Either endpoint can send a <x:ref>RST_STREAM</x:ref> frame to cause the stream
+                    to become "closed".  This releases the stream reservation.
+                  </t>
+                </list>
+              </t>
+              <t>
+                An endpoint MAY send a <x:ref>PRIORITY</x:ref> frame in this state to reprioritize
+                the reserved stream.  An endpoint MUST NOT send any type of frame other than
+                <x:ref>RST_STREAM</x:ref>, <x:ref>WINDOW_UPDATE</x:ref>, or <x:ref>PRIORITY</x:ref>
+                in this state.
+              </t>
+              <t>
+                Receiving any type of frame other than <x:ref>HEADERS</x:ref> or
+                <x:ref>RST_STREAM</x:ref> on a stream in this state MUST be treated as a <xref
+                target="ConnectionErrorHandler">connection error</xref> of type
+                <x:ref>PROTOCOL_ERROR</x:ref>.
+              </t>
+            </x:lt>
+
+            <x:lt hangText="open:">
+              <t>
+                <vspace blankLines="0"/>
+                A stream in the "open" state may be used by both peers to send frames of any type.
+                In this state, sending peers observe advertised <xref target="FlowControl">stream
+                level flow control limits</xref>.
+              </t>
+              <t>
+                From this state either endpoint can send a frame with an END_STREAM flag set, which
+                causes the stream to transition into one of the "half closed" states: an endpoint
+                sending an END_STREAM flag causes the stream state to become "half closed (local)";
+                an endpoint receiving an END_STREAM flag causes the stream state to become "half
+                closed (remote)".
+              </t>
+              <t>
+                Either endpoint can send a <x:ref>RST_STREAM</x:ref> frame from this state, causing
+                it to transition immediately to "closed".
+              </t>
+            </x:lt>
+
+            <x:lt hangText="half closed (local):">
+              <t>
+                <vspace blankLines="0"/>
+                A stream that is in the "half closed (local)" state cannot be used for sending
+                frames.  Only <x:ref>WINDOW_UPDATE</x:ref>, <x:ref>PRIORITY</x:ref> and
+                <x:ref>RST_STREAM</x:ref> frames can be sent in this state.
+              </t>
+              <t>
+                A stream transitions from this state to "closed" when a frame that contains an
+                END_STREAM flag is received, or when either peer sends a <x:ref>RST_STREAM</x:ref>
+                frame.
+              </t>
+              <t>
+                A receiver can ignore <x:ref>WINDOW_UPDATE</x:ref> frames in this state, which might
+                arrive for a short period after a frame bearing the END_STREAM flag is sent.
+              </t>
+              <t>
+                <x:ref>PRIORITY</x:ref> frames received in this state are used to reprioritize
+                streams that depend on the current stream.
+              </t>
+            </x:lt>
+
+            <x:lt hangText="half closed (remote):">
+              <t>
+                <vspace blankLines="0"/>
+                A stream that is "half closed (remote)" is no longer being used by the peer to send
+                frames.  In this state, an endpoint is no longer obligated to maintain a receiver
+                flow control window if it performs flow control.
+              </t>
+              <t>
+                If an endpoint receives additional frames for a stream that is in this state, other
+                than <x:ref>WINDOW_UPDATE</x:ref>, <x:ref>PRIORITY</x:ref> or
+                <x:ref>RST_STREAM</x:ref>, it MUST respond with a <xref
+                target="StreamErrorHandler">stream error</xref> of type
+                <x:ref>STREAM_CLOSED</x:ref>.
+              </t>
+              <t>
+                A stream that is "half closed (remote)" can be used by the endpoint to send frames
+                of any type. In this state, the endpoint continues to observe advertised <xref
+                target="FlowControl">stream level flow control limits</xref>.
+              </t>
+              <t>
+                A stream can transition from this state to "closed" by sending a frame that contains
+                an END_STREAM flag, or when either peer sends a <x:ref>RST_STREAM</x:ref> frame.
+              </t>
+            </x:lt>
+
+            <x:lt hangText="closed:">
+              <t>
+                <vspace blankLines="0"/>
+                The "closed" state is the terminal state.
+              </t>
+              <t>
+                An endpoint MUST NOT send frames other than <x:ref>PRIORITY</x:ref> on a closed
+                stream.  An endpoint that receives any frame other than <x:ref>PRIORITY</x:ref>
+                after receiving a <x:ref>RST_STREAM</x:ref> MUST treat that as a <xref
+                target="StreamErrorHandler">stream error</xref> of type
+                <x:ref>STREAM_CLOSED</x:ref>.  Similarly, an endpoint that receives any frames after
+                receiving a frame with the END_STREAM flag set MUST treat that as a <xref
+                target="ConnectionErrorHandler">connection error</xref> of type
+                <x:ref>STREAM_CLOSED</x:ref>, unless the frame is permitted as described below.
+              </t>
+              <t>
+                <x:ref>WINDOW_UPDATE</x:ref> or <x:ref>RST_STREAM</x:ref> frames can be received in
+                this state for a short period after a <x:ref>DATA</x:ref> or <x:ref>HEADERS</x:ref>
+                frame containing an END_STREAM flag is sent.  Until the remote peer receives and
+                processes <x:ref>RST_STREAM</x:ref> or the frame bearing the END_STREAM flag, it
+                might send frames of these types.  Endpoints MUST ignore
+                <x:ref>WINDOW_UPDATE</x:ref> or <x:ref>RST_STREAM</x:ref> frames received in this
+                state, though endpoints MAY choose to treat frames that arrive a significant time
+                after sending END_STREAM as a <xref target="ConnectionErrorHandler">connection
+                error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+              </t>
+              <t>
+                <x:ref>PRIORITY</x:ref> frames can be sent on closed streams to prioritize streams
+                that are dependent on the closed stream.  Endpoints SHOULD process
+                <x:ref>PRIORITY</x:ref> frame, though they can be ignored if the stream has been
+                removed from the dependency tree (see <xref target="priority-gc"/>).
+              </t>
+              <t>
+                If this state is reached as a result of sending a <x:ref>RST_STREAM</x:ref> frame,
+                the peer that receives the <x:ref>RST_STREAM</x:ref> might have already sent - or
+                enqueued for sending - frames on the stream that cannot be withdrawn.  An endpoint
+                MUST ignore frames that it receives on closed streams after it has sent a
+                <x:ref>RST_STREAM</x:ref> frame.  An endpoint MAY choose to limit the period over
+                which it ignores frames and treat frames that arrive after this time as being in
+                error.
+              </t>
+              <t>
+                Flow controlled frames (i.e., <x:ref>DATA</x:ref>) received after sending
+                <x:ref>RST_STREAM</x:ref> are counted toward the connection flow control window.
+                Even though these frames might be ignored, because they are sent before the sender
+                receives the <x:ref>RST_STREAM</x:ref>, the sender will consider the frames to count
+                against the flow control window.
+              </t>
+              <t>
+                An endpoint might receive a <x:ref>PUSH_PROMISE</x:ref> frame after it sends
+                <x:ref>RST_STREAM</x:ref>.  <x:ref>PUSH_PROMISE</x:ref> causes a stream to become
+                "reserved" even if the associated stream has been reset.  Therefore, a
+                <x:ref>RST_STREAM</x:ref> is needed to close an unwanted promised stream.
+              </t>
+            </x:lt>
+          </list>
+        </t>
+        <t>
+          In the absence of more specific guidance elsewhere in this document, implementations
+          SHOULD treat the receipt of a frame that is not expressly permitted in the description of
+          a state as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.  Frame of unknown types are ignored.
+        </t>
+        <t>
+          An example of the state transitions for an HTTP request/response exchange can be found in
+          <xref target="HttpSequence"/>.  An example of the state transitions for server push can be
+          found in <xref target="PushRequests"/> and <xref target="PushResponses"/>.
+        </t>
+
+        <section anchor="StreamIdentifiers" title="Stream Identifiers">
+          <t>
+            Streams are identified with an unsigned 31-bit integer.  Streams initiated by a client
+            MUST use odd-numbered stream identifiers; those initiated by the server MUST use
+            even-numbered stream identifiers.  A stream identifier of zero (0x0) is used for
+            connection control messages; the stream identifier zero cannot be used to establish a
+            new stream.
+          </t>
+          <t>
+            HTTP/1.1 requests that are upgraded to HTTP/2 (see <xref target="discover-http"/>) are
+            responded to with a stream identifier of one (0x1).  After the upgrade
+            completes, stream 0x1 is "half closed (local)" to the client.  Therefore, stream 0x1
+            cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1.
+          </t>
+          <t>
+            The identifier of a newly established stream MUST be numerically greater than all
+            streams that the initiating endpoint has opened or reserved.  This governs streams that
+            are opened using a <x:ref>HEADERS</x:ref> frame and streams that are reserved using
+            <x:ref>PUSH_PROMISE</x:ref>.  An endpoint that receives an unexpected stream identifier
+            MUST respond with a <xref target="ConnectionErrorHandler">connection error</xref> of
+            type <x:ref>PROTOCOL_ERROR</x:ref>.
+          </t>
+          <t>
+            The first use of a new stream identifier implicitly closes all streams in the "idle"
+            state that might have been initiated by that peer with a lower-valued stream identifier.
+            For example, if a client sends a <x:ref>HEADERS</x:ref> frame on stream 7 without ever
+            sending a frame on stream 5, then stream 5 transitions to the "closed" state when the
+            first frame for stream 7 is sent or received.
+          </t>
+          <t>
+            Stream identifiers cannot be reused.  Long-lived connections can result in an endpoint
+            exhausting the available range of stream identifiers.  A client that is unable to
+            establish a new stream identifier can establish a new connection for new streams.  A
+            server that is unable to establish a new stream identifier can send a
+            <x:ref>GOAWAY</x:ref> frame so that the client is forced to open a new connection for
+            new streams.
+          </t>
+        </section>
+
+        <section title="Stream Concurrency">
+          <t>
+            A peer can limit the number of concurrently active streams using the
+            <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> parameter (see <xref
+            target="SettingValues"/>) within a <x:ref>SETTINGS</x:ref> frame. The maximum concurrent
+            streams setting is specific to each endpoint and applies only to the peer that receives
+            the setting. That is, clients specify the maximum number of concurrent streams the
+            server can initiate, and servers specify the maximum number of concurrent streams the
+            client can initiate.
+          </t>
+          <t>
+            Streams that are in the "open" state, or either of the "half closed" states count toward
+            the maximum number of streams that an endpoint is permitted to open.  Streams in any of
+            these three states count toward the limit advertised in the
+            <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> setting.  Streams in either of the
+            "reserved" states do not count toward the stream limit.
+          </t>
+          <t>
+            Endpoints MUST NOT exceed the limit set by their peer.  An endpoint that receives a
+            <x:ref>HEADERS</x:ref> frame that causes their advertised concurrent stream limit to be
+            exceeded MUST treat this as a <xref target="StreamErrorHandler">stream error</xref>.  An
+            endpoint that wishes to reduce the value of
+            <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> to a value that is below the current
+            number of open streams can either close streams that exceed the new value or allow
+            streams to complete.
+          </t>
+        </section>
+      </section>
+
+     <section anchor="FlowControl" title="Flow Control">
+        <t>
+          Using streams for multiplexing introduces contention over use of the TCP connection,
+          resulting in blocked streams.  A flow control scheme ensures that streams on the same
+          connection do not destructively interfere with each other.  Flow control is used for both
+          individual streams and for the connection as a whole.
+        </t>
+        <t>
+          HTTP/2 provides for flow control through use of the <xref
+          target="WINDOW_UPDATE">WINDOW_UPDATE frame</xref>.
+        </t>
+
+        <section anchor="fc-principles" title="Flow Control Principles">
+          <t>
+            HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be
+            used without requiring protocol changes. Flow control in HTTP/2 has the following
+            characteristics:
+            <list style="numbers">
+              <t>
+                Flow control is specific to a connection; i.e., it is "hop-by-hop", not
+                "end-to-end".
+              </t>
+              <t>
+                Flow control is based on window update frames.  Receivers advertise how many octets
+                they are prepared to receive on a stream and for the entire connection.  This is a
+                credit-based scheme.
+              </t>
+              <t>
+                Flow control is directional with overall control provided by the receiver.  A
+                receiver MAY choose to set any window size that it desires for each stream and for
+                the entire connection.  A sender MUST respect flow control limits imposed by a
+                receiver.  Clients, servers and intermediaries all independently advertise their
+                flow control window as a receiver and abide by the flow control limits set by
+                their peer when sending.
+              </t>
+              <t>
+                The initial value for the flow control window is 65,535 octets for both new streams
+                and the overall connection.
+              </t>
+              <t>
+                The frame type determines whether flow control applies to a frame.  Of the frames
+                specified in this document, only <x:ref>DATA</x:ref> frames are subject to flow
+                control; all other frame types do not consume space in the advertised flow control
+                window.  This ensures that important control frames are not blocked by flow control.
+              </t>
+              <t>
+                Flow control cannot be disabled.
+              </t>
+              <t>
+                HTTP/2 defines only the format and semantics of the <x:ref>WINDOW_UPDATE</x:ref>
+                frame (<xref target="WINDOW_UPDATE"/>).  This document does not stipulate how a
+                receiver decides when to send this frame or the value that it sends, nor does it
+                specify how a sender chooses to send packets.  Implementations are able to select
+                any algorithm that suits their needs.
+              </t>
+            </list>
+          </t>
+          <t>
+            Implementations are also responsible for managing how requests and responses are sent
+            based on priority; choosing how to avoid head of line blocking for requests; and
+            managing the creation of new streams.  Algorithm choices for these could interact with
+            any flow control algorithm.
+          </t>
+        </section>
+
+        <section anchor="DisableFlowControl" title="Appropriate Use of Flow Control">
+          <t>
+            Flow control is defined to protect endpoints that are operating under resource
+            constraints.  For example, a proxy needs to share memory between many connections, and
+            also might have a slow upstream connection and a fast downstream one.  Flow control
+            addresses cases where the receiver is unable process data on one stream, yet wants to
+            continue to process other streams in the same connection.
+          </t>
+          <t>
+            Deployments that do not require this capability can advertise a flow control window of
+            the maximum size, incrementing the available space when new data is received.  This
+            effectively disables flow control for that receiver.  Conversely, a sender is always
+            subject to the flow control window advertised by the receiver.
+          </t>
+          <t>
+            Deployments with constrained resources (for example, memory) can employ flow control to
+            limit the amount of memory a peer can consume.  Note, however, that this can lead to
+            suboptimal use of available network resources if flow control is enabled without
+            knowledge of the bandwidth-delay product (see <xref target="RFC1323"/>).
+          </t>
+          <t>
+            Even with full awareness of the current bandwidth-delay product, implementation of flow
+            control can be difficult.  When using flow control, the receiver MUST read from the TCP
+            receive buffer in a timely fashion.  Failure to do so could lead to a deadlock when
+            critical frames, such as <x:ref>WINDOW_UPDATE</x:ref>, are not read and acted upon.
+          </t>
+        </section>
+      </section>
+
+      <section anchor="StreamPriority" title="Stream priority">
+        <t>
+          A client can assign a priority for a new stream by including prioritization information in
+          the <xref target="HEADERS">HEADERS frame</xref> that opens the stream.  For an existing
+          stream, the <xref target="PRIORITY">PRIORITY frame</xref> can be used to change the
+          priority.
+        </t>
+        <t>
+          The purpose of prioritization is to allow an endpoint to express how it would prefer its
+          peer allocate resources when managing concurrent streams.  Most importantly, priority can
+          be used to select streams for transmitting frames when there is limited capacity for
+          sending.
+        </t>
+        <t>
+          Streams can be prioritized by marking them as dependent on the completion of other streams
+          (<xref target="pri-depend"/>).  Each dependency is assigned a relative weight, a number
+          that is used to determine the relative proportion of available resources that are assigned
+          to streams dependent on the same stream.
+        </t>
+        <!--
+          Note that stream dependencies have not yet been validated in practice.  The theory
+          might be fairly sound, but there are no implementations currently sending these.  If it
+          turns out that they are not useful, or actively harmful, implementations will be requested
+          to avoid creating stream dependencies.
+        -->
+        <t>
+          Explicitly setting the priority for a stream is input to a prioritization process.  It
+          does not guarantee any particular processing or transmission order for the stream relative
+          to any other stream.  An endpoint cannot force a peer to process concurrent streams in a
+          particular order using priority.  Expressing priority is therefore only ever a suggestion.
+        </t>
+        <t>
+          Providing prioritization information is optional, so default values are used if no
+          explicit indicator is provided (<xref target="pri-default"/>).
+        </t>
+
+        <section title="Stream Dependencies" anchor="pri-depend">
+          <t>
+            Each stream can be given an explicit dependency on another stream.  Including a
+            dependency expresses a preference to allocate resources to the identified stream rather
+            than to the dependent stream.
+          </t>
+          <t>
+            A stream that is not dependent on any other stream is given a stream dependency of 0x0.
+            In other words, the non-existent stream 0 forms the root of the tree.
+          </t>
+          <t>
+            A stream that depends on another stream is a dependent stream. The stream upon which a
+            stream is dependent is a parent stream. A dependency on a stream that is not currently
+            in the tree - such as a stream in the "idle" state - results in that stream being given
+            a <xref target="pri-default">default priority</xref>.
+          </t>
+          <t>
+            When assigning a dependency on another stream, the stream is added as a new dependency
+            of the parent stream.  Dependent streams that share the same parent are not ordered with
+            respect to each other.  For example, if streams B and C are dependent on stream A, and
+            if stream D is created with a dependency on stream A, this results in a dependency order
+            of A followed by B, C, and D in any order.
+          </t>
+          <figure title="Example of Default Dependency Creation">
+            <artwork type="inline"><![CDATA[
+    A                 A
+   / \      ==>      /|\
+  B   C             B D C
+]]></artwork>
+          </figure>
+          <t>
+            An exclusive flag allows for the insertion of a new level of dependencies.  The
+            exclusive flag causes the stream to become the sole dependency of its parent stream,
+            causing other dependencies to become dependent on the exclusive stream.  In the
+            previous example, if stream D is created with an exclusive dependency on stream A, this
+            results in D becoming the dependency parent of B and C.
+          </t>
+          <figure title="Example of Exclusive Dependency Creation">
+            <artwork type="inline"><![CDATA[
+                      A
+    A                 |
+   / \      ==>       D
+  B   C              / \
+                    B   C
+]]></artwork>
+          </figure>
+          <t>
+            Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all
+            of the streams that it depends on (the chain of parent streams up to 0x0) are either
+            closed, or it is not possible to make progress on them.
+          </t>
+          <t>
+            A stream cannot depend on itself.  An endpoint MUST treat this as a <xref
+            target="StreamErrorHandler">stream error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+          </t>
+        </section>
+
+        <section title="Dependency Weighting">
+          <t>
+            All dependent streams are allocated an integer weight between 1 and 256 (inclusive).
+          </t>
+          <t>
+            Streams with the same parent SHOULD be allocated resources proportionally based on their
+            weight.  Thus, if stream B depends on stream A with weight 4, and C depends on stream A
+            with weight 12, and if no progress can be made on A, stream B ideally receives one third
+            of the resources allocated to stream C.
+          </t>
+        </section>
+
+        <section anchor="reprioritize" title="Reprioritization">
+          <t>
+            Stream priorities are changed using the <x:ref>PRIORITY</x:ref> frame.  Setting a
+            dependency causes a stream to become dependent on the identified parent stream.
+          </t>
+          <t>
+            Dependent streams move with their parent stream if the parent is reprioritized.  Setting
+            a dependency with the exclusive flag for a reprioritized stream moves all the
+            dependencies of the new parent stream to become dependent on the reprioritized stream.
+          </t>
+          <t>
+            If a stream is made dependent on one of its own dependencies, the formerly dependent
+            stream is first moved to be dependent on the reprioritized stream's previous parent.
+            The moved dependency retains its weight.
+          </t>
+          <figure title="Example of Dependency Reordering">
+            <preamble>
+              For example, consider an original dependency tree where B and C depend on A, D and E
+              depend on C, and F depends on D.  If A is made dependent on D, then D takes the place
+              of A.  All other dependency relationships stay the same, except for F, which becomes
+              dependent on A if the reprioritization is exclusive.
+            </preamble>
+            <artwork type="inline"><![CDATA[
+    ?                ?                ?                 ?
+    |               / \               |                 |
+    A              D   A              D                 D
+   / \            /   / \            / \                |
+  B   C     ==>  F   B   C   ==>    F   A       OR      A
+     / \                 |             / \             /|\
+    D   E                E            B   C           B C F
+    |                                     |             |
+    F                                     E             E
+               (intermediate)   (non-exclusive)    (exclusive)
+]]></artwork>
+          </figure>
+        </section>
+
+        <section anchor="priority-gc" title="Prioritization State Management">
+          <t>
+            When a stream is removed from the dependency tree, its dependencies can be moved to
+            become dependent on the parent of the closed stream.  The weights of new dependencies
+            are recalculated by distributing the weight of the dependency of the closed stream
+            proportionally based on the weights of its dependencies.
+          </t>
+          <t>
+            Streams that are removed from the dependency tree cause some prioritization information
+            to be lost.  Resources are shared between streams with the same parent stream, which
+            means that if a stream in that set closes or becomes blocked, any spare capacity
+            allocated to a stream is distributed to the immediate neighbors of the stream.  However,
+            if the common dependency is removed from the tree, those streams share resources with
+            streams at the next highest level.
+          </t>
+          <t>
+            For example, assume streams A and B share a parent, and streams C and D both depend on
+            stream A. Prior to the removal of stream A, if streams A and D are unable to proceed,
+            then stream C receives all the resources dedicated to stream A.  If stream A is removed
+            from the tree, the weight of stream A is divided between streams C and D.  If stream D
+            is still unable to proceed, this results in stream C receiving a reduced proportion of
+            resources.  For equal starting weights, C receives one third, rather than one half, of
+            available resources.
+          </t>
+          <t>
+            It is possible for a stream to become closed while prioritization information that
+            creates a dependency on that stream is in transit.  If a stream identified in a
+            dependency has no associated priority information, then the dependent stream is instead
+            assigned a <xref target="pri-default">default priority</xref>.  This potentially creates
+            suboptimal prioritization, since the stream could be given a priority that is different
+            to what is intended.
+          </t>
+          <t>
+            To avoid these problems, an endpoint SHOULD retain stream prioritization state for a
+            period after streams become closed.  The longer state is retained, the lower the chance
+            that streams are assigned incorrect or default priority values.
+          </t>
+          <t>
+            This could create a large state burden for an endpoint, so this state MAY be limited.
+            An endpoint MAY apply a fixed upper limit on the number of closed streams for which
+            prioritization state is tracked to limit state exposure.  The amount of additional state
+            an endpoint maintains could be dependent on load; under high load, prioritization state
+            can be discarded to limit resource commitments.  In extreme cases, an endpoint could
+            even discard prioritization state for active or reserved streams. If a fixed limit is
+            applied, endpoints SHOULD maintain state for at least as many streams as allowed by
+            their setting for <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref>.
+          </t>
+          <t>
+            An endpoint receiving a <x:ref>PRIORITY</x:ref> frame that changes the priority of a
+            closed stream SHOULD alter the dependencies of the streams that depend on it, if it has
+            retained enough state to do so.
+          </t>
+        </section>
+
+        <section title="Default Priorities" anchor="pri-default">
+          <t>
+            Providing priority information is optional.  Streams are assigned a non-exclusive
+            dependency on stream 0x0 by default.  <xref target="PushResources">Pushed streams</xref>
+            initially depend on their associated stream.  In both cases, streams are assigned a
+            default weight of 16.
+          </t>
+        </section>
+      </section>
+
+      <section title="Error Handling">
+        <t>
+          HTTP/2 framing permits two classes of error:
+          <list style="symbols">
+            <t>
+              An error condition that renders the entire connection unusable is a connection error.
+            </t>
+            <t>
+              An error in an individual stream is a stream error.
+            </t>
+          </list>
+        </t>
+        <t>
+          A list of error codes is included in <xref target="ErrorCodes"/>.
+        </t>
+
+        <section anchor="ConnectionErrorHandler" title="Connection Error Handling">
+          <t>
+            A connection error is any error which prevents further processing of the framing layer,
+            or which corrupts any connection state.
+          </t>
+          <t>
+            An endpoint that encounters a connection error SHOULD first send a <x:ref>GOAWAY</x:ref>
+            frame (<xref target="GOAWAY"/>) with the stream identifier of the last stream that it
+            successfully received from its peer.  The <x:ref>GOAWAY</x:ref> frame includes an error
+            code that indicates why the connection is terminating.  After sending the
+            <x:ref>GOAWAY</x:ref> frame, the endpoint MUST close the TCP connection.
+          </t>
+          <t>
+            It is possible that the <x:ref>GOAWAY</x:ref> will not be reliably received by the
+            receiving endpoint (see <xref target="RFC7230" x:fmt=","
+            x:rel="#persistent.tear-down"/>).  In the event of a connection error,
+            <x:ref>GOAWAY</x:ref> only provides a best effort attempt to communicate with the peer
+            about why the connection is being terminated.
+          </t>
+          <t>
+            An endpoint can end a connection at any time.  In particular, an endpoint MAY choose to
+            treat a stream error as a connection error.  Endpoints SHOULD send a
+            <x:ref>GOAWAY</x:ref> frame when ending a connection, providing that circumstances
+            permit it.
+          </t>
+        </section>
+
+        <section anchor="StreamErrorHandler" title="Stream Error Handling">
+          <t>
+            A stream error is an error related to a specific stream that does not affect processing
+            of other streams.
+          </t>
+          <t>
+            An endpoint that detects a stream error sends a <x:ref>RST_STREAM</x:ref> frame (<xref
+            target="RST_STREAM"/>) that contains the stream identifier of the stream where the error
+            occurred.  The <x:ref>RST_STREAM</x:ref> frame includes an error code that indicates the
+            type of error.
+          </t>
+          <t>
+            A <x:ref>RST_STREAM</x:ref> is the last frame that an endpoint can send on a stream.
+            The peer that sends the <x:ref>RST_STREAM</x:ref> frame MUST be prepared to receive any
+            frames that were sent or enqueued for sending by the remote peer.  These frames can be
+            ignored, except where they modify connection state (such as the state maintained for
+            <xref target="HeaderBlock">header compression</xref>, or flow control).
+          </t>
+          <t>
+            Normally, an endpoint SHOULD NOT send more than one <x:ref>RST_STREAM</x:ref> frame for
+            any stream. However, an endpoint MAY send additional <x:ref>RST_STREAM</x:ref> frames if
+            it receives frames on a closed stream after more than a round-trip time.  This behavior
+            is permitted to deal with misbehaving implementations.
+          </t>
+          <t>
+            An endpoint MUST NOT send a <x:ref>RST_STREAM</x:ref> in response to an
+            <x:ref>RST_STREAM</x:ref> frame, to avoid looping.
+          </t>
+        </section>
+
+        <section title="Connection Termination">
+          <t>
+            If the TCP connection is closed or reset while streams remain in open or half closed
+            states, then the endpoint MUST assume that those streams were abnormally interrupted and
+            could be incomplete.
+          </t>
+        </section>
+      </section>
+
+      <section anchor="extensibility" title="Extending HTTP/2">
+        <t>
+          HTTP/2 permits extension of the protocol.  Protocol extensions can be used to provide
+          additional services or alter any aspect of the protocol, within the limitations described
+          in this section.  Extensions are effective only within the scope of a single HTTP/2
+          connection.
+        </t>
+        <t>
+          Extensions are permitted to use new <xref target="FrameHeader">frame types</xref>, new
+          <xref target="SettingValues">settings</xref>, or new <xref target="ErrorCodes">error
+          codes</xref>.  Registries are established for managing these extension points: <xref
+          target="iana-frames">frame types</xref>, <xref target="iana-settings">settings</xref> and
+          <xref target="iana-errors">error codes</xref>.
+        </t>
+        <t>
+          Implementations MUST ignore unknown or unsupported values in all extensible protocol
+          elements.  Implementations MUST discard frames that have unknown or unsupported types.
+          This means that any of these extension points can be safely used by extensions without
+          prior arrangement or negotiation.  However, extension frames that appear in the middle of
+          a <xref target="HeaderBlock">header block</xref> are not permitted; these MUST be treated
+          as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+        <t>
+          However, extensions that could change the semantics of existing protocol components MUST
+          be negotiated before being used.  For example, an extension that changes the layout of the
+          <x:ref>HEADERS</x:ref> frame cannot be used until the peer has given a positive signal
+          that this is acceptable.  In this case, it could also be necessary to coordinate when the
+          revised layout comes into effect.  Note that treating any frame other than
+          <x:ref>DATA</x:ref> frames as flow controlled is such a change in semantics, and can only
+          be done through negotiation.
+        </t>
+        <t>
+          This document doesn't mandate a specific method for negotiating the use of an extension,
+          but notes that a <xref target="SettingValues">setting</xref> could be used for that
+          purpose.  If both peers set a value that indicates willingness to use the extension, then
+          the extension can be used.  If a setting is used for extension negotiation, the initial
+          value MUST be defined so that the extension is initially disabled.
+        </t>
+      </section>
+    </section>
+
+    <section anchor="FrameTypes" title="Frame Definitions">
+      <t>
+        This specification defines a number of frame types, each identified by a unique 8-bit type
+        code. Each frame type serves a distinct purpose either in the establishment and management
+        of the connection as a whole, or of individual streams.
+      </t>
+      <t>
+        The transmission of specific frame types can alter the state of a connection. If endpoints
+        fail to maintain a synchronized view of the connection state, successful communication
+        within the connection will no longer be possible. Therefore, it is important that endpoints
+        have a shared comprehension of how the state is affected by the use any given frame.
+      </t>
+
+      <section anchor="DATA" title="DATA">
+        <t>
+          DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated
+          with a stream. One or more DATA frames are used, for instance, to carry HTTP request or
+          response payloads.
+        </t>
+        <t>
+          DATA frames MAY also contain arbitrary padding.  Padding can be added to DATA frames to
+          obscure the size of messages.
+        </t>
+        <figure title="DATA Frame Payload">
+          <artwork type="inline"><![CDATA[
+  0                   1                   2                   3
+  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Pad Length? (8)|
+ +---------------+-----------------------------------------------+
+ |                            Data (*)                         ...
+ +---------------------------------------------------------------+
+ |                           Padding (*)                       ...
+ +---------------------------------------------------------------+
+]]></artwork>
+        </figure>
+        <t>
+          The DATA frame contains the following fields:
+          <list style="hanging">
+            <t hangText="Pad Length:">
+              An 8-bit field containing the length of the frame padding in units of octets.  This
+              field is optional and is only present if the PADDED flag is set.
+            </t>
+            <t hangText="Data:">
+              Application data.  The amount of data is the remainder of the frame payload after
+              subtracting the length of the other fields that are present.
+            </t>
+            <t hangText="Padding:">
+              Padding octets that contain no application semantic value.  Padding octets MUST be set
+              to zero when sending and ignored when receiving.
+            </t>
+          </list>
+        </t>
+
+        <t>
+          The DATA frame defines the following flags:
+          <list style="hanging">
+            <t hangText="END_STREAM (0x1):">
+              Bit 1 being set indicates that this frame is the last that the endpoint will send for
+              the identified stream.  Setting this flag causes the stream to enter one of <xref
+              target="StreamStates">the "half closed" states or the "closed" state</xref>.
+            </t>
+            <t hangText="PADDED (0x8):">
+              Bit 4 being set indicates that the Pad Length field and any padding that it describes
+              is present.
+            </t>
+          </list>
+        </t>
+        <t>
+          DATA frames MUST be associated with a stream. If a DATA frame is received whose stream
+          identifier field is 0x0, the recipient MUST respond with a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+        <t>
+          DATA frames are subject to flow control and can only be sent when a stream is in the
+          "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow
+          control, including Pad Length and Padding fields if present.  If a DATA frame is received
+          whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond
+          with a <xref target="StreamErrorHandler">stream error</xref> of type
+          <x:ref>STREAM_CLOSED</x:ref>.
+        </t>
+        <t>
+          The total number of padding octets is determined by the value of the Pad Length field. If
+          the length of the padding is greater than the length of the frame payload, the recipient
+          MUST treat this as a <xref target="ConnectionErrorHandler">connection error</xref> of
+          type <x:ref>PROTOCOL_ERROR</x:ref>.
+          <list style="hanging">
+            <t hangText="Note:">
+              A frame can be increased in size by one octet by including a Pad Length field with a
+              value of zero.
+            </t>
+          </list>
+        </t>
+        <t>
+          Padding is a security feature; see <xref target="padding"/>.
+        </t>
+      </section>
+
+      <section anchor="HEADERS" title="HEADERS">
+        <t>
+          The HEADERS frame (type=0x1) is used to <xref target="StreamStates">open a stream</xref>,
+          and additionally carries a header block fragment. HEADERS frames can be sent on a stream
+          in the "open" or "half closed (remote)" states.
+        </t>
+        <figure title="HEADERS Frame Payload">
+          <artwork type="inline"><![CDATA[
+  0                   1                   2                   3
+  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Pad Length? (8)|
+ +-+-------------+-----------------------------------------------+
+ |E|                 Stream Dependency? (31)                     |
+ +-+-------------+-----------------------------------------------+
+ |  Weight? (8)  |
+ +-+-------------+-----------------------------------------------+
+ |                   Header Block Fragment (*)                 ...
+ +---------------------------------------------------------------+
+ |                           Padding (*)                       ...
+ +---------------------------------------------------------------+
+]]></artwork>
+        </figure>
+        <t>
+          The HEADERS frame payload has the following fields:
+          <list style="hanging">
+            <t hangText="Pad Length:">
+              An 8-bit field containing the length of the frame padding in units of octets.  This
+              field is only present if the PADDED flag is set.
+            </t>
+            <t hangText="E:">
+              A single bit flag indicates that the stream dependency is exclusive, see <xref
+              target="StreamPriority"/>.  This field is only present if the PRIORITY flag is set.
+            </t>
+            <t hangText="Stream Dependency:">
+              A 31-bit stream identifier for the stream that this stream depends on, see <xref
+              target="StreamPriority"/>.  This field is only present if the PRIORITY flag is set.
+            </t>
+            <t hangText="Weight:">
+              An 8-bit weight for the stream, see <xref target="StreamPriority"/>.  Add one to the
+              value to obtain a weight between 1 and 256.  This field is only present if the
+              PRIORITY flag is set.
+            </t>
+            <t hangText="Header Block Fragment:">
+              A <xref target="HeaderBlock">header block fragment</xref>.
+            </t>
+            <t hangText="Padding:">
+              Padding octets that contain no application semantic value.  Padding octets MUST be set
+              to zero when sending and ignored when receiving.
+            </t>
+          </list>
+        </t>
+
+        <t>
+          The HEADERS frame defines the following flags:
+          <list style="hanging">
+            <x:lt hangText="END_STREAM (0x1):">
+              <t>
+                Bit 1 being set indicates that the <xref target="HeaderBlock">header block</xref> is
+                the last that the endpoint will send for the identified stream.  Setting this flag
+                causes the stream to enter one of <xref target="StreamStates">"half closed"
+                states</xref>.
+              </t>
+              <t>
+                A HEADERS frame carries the END_STREAM flag that signals the end of a stream.
+                However, a HEADERS frame with the END_STREAM flag set can be followed by
+                <x:ref>CONTINUATION</x:ref> frames on the same stream.  Logically, the
+                <x:ref>CONTINUATION</x:ref> frames are part of the HEADERS frame.
+              </t>
+            </x:lt>
+            <x:lt hangText="END_HEADERS (0x4):">
+              <t>
+                Bit 3 being set indicates that this frame contains an entire <xref
+                target="HeaderBlock">header block</xref> and is not followed by any
+                <x:ref>CONTINUATION</x:ref> frames.
+              </t>
+              <t>
+                A HEADERS frame without the END_HEADERS flag set MUST be followed by a
+                <x:ref>CONTINUATION</x:ref> frame for the same stream.  A receiver MUST treat the
+                receipt of any other type of frame or a frame on a different stream as a <xref
+                target="ConnectionErrorHandler">connection error</xref> of type
+                <x:ref>PROTOCOL_ERROR</x:ref>.
+              </t>
+            </x:lt>
+            <x:lt hangText="PADDED (0x8):">
+              <t>
+                Bit 4 being set indicates that the Pad Length field and any padding that it
+                describes is present.
+              </t>
+            </x:lt>
+            <x:lt hangText="PRIORITY (0x20):">
+              <t>
+                Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight
+                fields are present; see <xref target="StreamPriority"/>.
+              </t>
+            </x:lt>
+          </list>
+        </t>
+
+        <t>
+          The payload of a HEADERS frame contains a <xref target="HeaderBlock">header block
+          fragment</xref>.  A header block that does not fit within a HEADERS frame is continued in
+          a <xref target="CONTINUATION">CONTINUATION frame</xref>.
+        </t>
+
+        <t>
+          HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose
+          stream identifier field is 0x0, the recipient MUST respond with a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+
+        <t>
+          The HEADERS frame changes the connection state as described in <xref
+          target="HeaderBlock"/>.
+        </t>
+
+        <t>
+          The HEADERS frame includes optional padding.  Padding fields and flags are identical to
+          those defined for <xref target="DATA">DATA frames</xref>.
+        </t>
+        <t>
+          Prioritization information in a HEADERS frame is logically equivalent to a separate
+          <x:ref>PRIORITY</x:ref> frame, but inclusion in HEADERS avoids the potential for churn in
+          stream prioritization when new streams are created.  Priorization fields in HEADERS frames
+          subsequent to the first on a stream <xref target="reprioritize">reprioritize the
+          stream</xref>.
+        </t>
+      </section>
+
+      <section anchor="PRIORITY" title="PRIORITY">
+        <t>
+          The PRIORITY frame (type=0x2) specifies the <xref target="StreamPriority">sender-advised
+          priority of a stream</xref>.  It can be sent at any time for an existing stream, including
+          closed streams.  This enables reprioritization of existing streams.
+        </t>
+        <figure title="PRIORITY Frame Payload">
+          <artwork type="inline"><![CDATA[
+  0                   1                   2                   3
+  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |E|                  Stream Dependency (31)                     |
+ +-+-------------+-----------------------------------------------+
+ |   Weight (8)  |
+ +-+-------------+
+]]></artwork>
+        </figure>
+        <t>
+          The payload of a PRIORITY frame contains the following fields:
+          <list style="hanging">
+            <t hangText="E:">
+              A single bit flag indicates that the stream dependency is exclusive, see <xref
+              target="StreamPriority"/>.
+            </t>
+            <t hangText="Stream Dependency:">
+              A 31-bit stream identifier for the stream that this stream depends on, see <xref
+              target="StreamPriority"/>.
+            </t>
+            <t hangText="Weight:">
+              An 8-bit weight for the identified stream dependency, see <xref
+              target="StreamPriority"/>.  Add one to the value to obtain a weight between 1 and 256.
+            </t>
+          </list>
+        </t>
+
+        <t>
+          The PRIORITY frame does not define any flags.
+        </t>
+
+        <t>
+          The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received
+          with a stream identifier of 0x0, the recipient MUST respond with a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+        <t>
+          The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open",
+          "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be
+          sent between consecutive frames that comprise a single <xref target="HeaderBlock">header
+          block</xref>.  Note that this frame could arrive after processing or frame sending has
+          completed, which would cause it to have no effect on the current stream.  For a stream
+          that is in the "half closed (remote)" or "closed" - state, this frame can only affect
+          processing of the current stream and not frame transmission.
+        </t>
+        <t>
+          The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state.
+          This allows for the reprioritization of a group of dependent streams by altering the
+          priority of a parent stream, which might be closed.  However, a PRIORITY frame sent on a
+          closed stream risks being ignored due to the peer having discarded priority state
+          information for that stream.
+        </t>
+      </section>
+
+      <section anchor="RST_STREAM" title="RST_STREAM">
+        <t>
+          The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream.  When sent by
+          the initiator of a stream, it indicates that they wish to cancel the stream or that an
+          error condition has occurred.  When sent by the receiver of a stream, it indicates that
+          either the receiver is rejecting the stream, requesting that the stream be cancelled, or
+          that an error condition has occurred.
+        </t>
+        <figure title="RST_STREAM Frame Payload">
+          <artwork type="inline"><![CDATA[
+  0                   1                   2                   3
+  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |                        Error Code (32)                        |
+ +---------------------------------------------------------------+
+]]></artwork>
+        </figure>
+
+        <t>
+          The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the <xref
+          target="ErrorCodes">error code</xref>.  The error code indicates why the stream is being
+          terminated.
+        </t>
+
+        <t>
+          The RST_STREAM frame does not define any flags.
+        </t>
+
+        <t>
+          The RST_STREAM frame fully terminates the referenced stream and causes it to enter the
+          closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send
+          additional frames for that stream, with the exception of <x:ref>PRIORITY</x:ref>. However,
+          after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process
+          additional frames sent on the stream that might have been sent by the peer prior to the
+          arrival of the RST_STREAM.
+        </t>
+
+        <t>
+          RST_STREAM frames MUST be associated with a stream.  If a RST_STREAM frame is received
+          with a stream identifier of 0x0, the recipient MUST treat this as a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+
+        <t>
+          RST_STREAM frames MUST NOT be sent for a stream in the "idle" state.  If a RST_STREAM
+          frame identifying an idle stream is received, the recipient MUST treat this as a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+
+      </section>
+
+      <section anchor="SETTINGS" title="SETTINGS">
+        <t>
+          The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints
+          communicate, such as preferences and constraints on peer behavior.  The SETTINGS frame is
+          also used to acknowledge the receipt of those parameters.  Individually, a SETTINGS
+          parameter can also be referred to as a "setting".
+        </t>
+        <t>
+          SETTINGS parameters are not negotiated; they describe characteristics of the sending peer,
+          which are used by the receiving peer. Different values for the same parameter can be
+          advertised by each peer. For example, a client might set a high initial flow control
+          window, whereas a server might set a lower value to conserve resources.
+        </t>
+
+        <t>
+          A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be
+          sent at any other time by either endpoint over the lifetime of the connection.
+          Implementations MUST support all of the parameters defined by this specification.
+        </t>
+
+        <t>
+          Each parameter in a SETTINGS frame replaces any existing value for that parameter.
+          Parameters are processed in the order in which they appear, and a receiver of a SETTINGS
+          frame does not need to maintain any state other than the current value of its
+          parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by
+          a receiver.
+        </t>
+        <t>
+          SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS
+          frame defines the following flag:
+          <list style="hanging">
+            <t hangText="ACK (0x1):">
+              Bit 1 being set indicates that this frame acknowledges receipt and application of the
+              peer's SETTINGS frame.  When this bit is set, the payload of the SETTINGS frame MUST
+              be empty.  Receipt of a SETTINGS frame with the ACK flag set and a length field value
+              other than 0 MUST be treated as a <xref target="ConnectionErrorHandler">connection
+              error</xref> of type <x:ref>FRAME_SIZE_ERROR</x:ref>.  For more info, see <xref
+              target="SettingsSync">Settings Synchronization</xref>.
+            </t>
+          </list>
+        </t>
+        <t>
+          SETTINGS frames always apply to a connection, never a single stream.  The stream
+          identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS
+          frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond
+          with a <xref target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+        <t>
+          The SETTINGS frame affects connection state.  A badly formed or incomplete SETTINGS frame
+          MUST be treated as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+
+        <section title="SETTINGS Format" anchor="SettingFormat">
+          <t>
+            The payload of a SETTINGS frame consists of zero or more parameters, each consisting of
+            an unsigned 16-bit setting identifier and an unsigned 32-bit value.
+          </t>
+
+          <figure title="Setting Format">
+            <artwork type="inline"><![CDATA[
+  0                   1                   2                   3
+  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |       Identifier (16)         |
+ +-------------------------------+-------------------------------+
+ |                        Value (32)                             |
+ +---------------------------------------------------------------+
+]]></artwork>
+          </figure>
+        </section>
+
+        <section anchor="SettingValues" title="Defined SETTINGS Parameters">
+          <t>
+            The following parameters are defined:
+            <list style="hanging">
+              <x:lt hangText="SETTINGS_HEADER_TABLE_SIZE (0x1):"
+                    anchor="SETTINGS_HEADER_TABLE_SIZE">
+                <t>
+                  Allows the sender to inform the remote endpoint of the maximum size of the header
+                  compression table used to decode header blocks, in octets. The encoder can select
+                  any size equal to or less than this value by using signaling specific to the
+                  header compression format inside a header block. The initial value is 4,096
+                  octets.
+                </t>
+              </x:lt>
+              <x:lt hangText="SETTINGS_ENABLE_PUSH (0x2):"
+                    anchor="SETTINGS_ENABLE_PUSH">
+                <t>
+                  This setting can be use to disable <xref target="PushResources">server
+                  push</xref>. An endpoint MUST NOT send a <x:ref>PUSH_PROMISE</x:ref> frame if it
+                  receives this parameter set to a value of 0. An endpoint that has both set this
+                  parameter to 0 and had it acknowledged MUST treat the receipt of a
+                  <x:ref>PUSH_PROMISE</x:ref> frame as a <xref
+                  target="ConnectionErrorHandler">connection error</xref> of type
+                  <x:ref>PROTOCOL_ERROR</x:ref>.
+                </t>
+                <t>
+                  The initial value is 1, which indicates that server push is permitted.  Any value
+                  other than 0 or 1 MUST be treated as a <xref
+                  target="ConnectionErrorHandler">connection error</xref> of type
+                  <x:ref>PROTOCOL_ERROR</x:ref>.
+                </t>
+              </x:lt>
+              <x:lt hangText="SETTINGS_MAX_CONCURRENT_STREAMS (0x3):"
+                    anchor="SETTINGS_MAX_CONCURRENT_STREAMS">
+                <t>
+                  Indicates the maximum number of concurrent streams that the sender will allow.
+                  This limit is directional: it applies to the number of streams that the sender
+                  permits the receiver to create. Initially there is no limit to this value.  It is
+                  recommended that this value be no smaller than 100, so as to not unnecessarily
+                  limit parallelism.
+                </t>
+                <t>
+                  A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special
+                  by endpoints.  A zero value does prevent the creation of new streams, however this
+                  can also happen for any limit that is exhausted with active streams.  Servers
+                  SHOULD only set a zero value for short durations; if a server does not wish to
+                  accept requests, closing the connection could be preferable.
+                </t>
+              </x:lt>
+              <x:lt hangText="SETTINGS_INITIAL_WINDOW_SIZE (0x4):"
+                    anchor="SETTINGS_INITIAL_WINDOW_SIZE">
+                <t>
+                  Indicates the sender's initial window size (in octets) for stream level flow
+                  control.  The initial value is 2<x:sup>16</x:sup>-1 (65,535) octets.
+                </t>
+                <t>
+                  This setting affects the window size of all streams, including existing streams,
+                  see <xref target="InitialWindowSize"/>.
+                </t>
+                <t>
+                  Values above the maximum flow control window size of 2<x:sup>31</x:sup>-1 MUST
+                  be treated as a <xref target="ConnectionErrorHandler">connection error</xref> of
+                  type <x:ref>FLOW_CONTROL_ERROR</x:ref>.
+                </t>
+              </x:lt>
+              <x:lt hangText="SETTINGS_MAX_FRAME_SIZE (0x5):"
+                    anchor="SETTINGS_MAX_FRAME_SIZE">
+                <t>
+                  Indicates the size of the largest frame payload that the sender is willing to
+                  receive, in octets.
+                </t>
+                <t>
+                  The initial value is 2<x:sup>14</x:sup> (16,384) octets.  The value advertised by
+                  an endpoint MUST be between this initial value and the maximum allowed frame size
+                  (2<x:sup>24</x:sup>-1 or 16,777,215 octets), inclusive.  Values outside this range
+                  MUST be treated as a <xref target="ConnectionErrorHandler">connection error</xref>
+                  of type <x:ref>PROTOCOL_ERROR</x:ref>.
+                </t>
+              </x:lt>
+              <x:lt hangText="SETTINGS_MAX_HEADER_LIST_SIZE (0x6):"
+                    anchor="SETTINGS_MAX_HEADER_LIST_SIZE">
+                <t>
+                  This advisory setting informs a peer of the maximum size of header list that the
+                  sender is prepared to accept, in octets. The value is based on the uncompressed
+                  size of header fields, including the length of the name and value in octets plus
+                  an overhead of 32 octets for each header field.
+                </t>
+                <t>
+                  For any given request, a lower limit than what is advertised MAY be enforced.  The
+                  initial value of this setting is unlimited.
+                </t>
+              </x:lt>
+            </list>
+          </t>
+          <t>
+            An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier
+            MUST ignore that setting.
+          </t>
+        </section>
+
+        <section anchor="SettingsSync" title="Settings Synchronization">
+          <t>
+            Most values in SETTINGS benefit from or require an understanding of when the peer has
+            received and applied the changed parameter values. In order to provide
+            such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag
+            is not set MUST apply the updated parameters as soon as possible upon receipt.
+          </t>
+          <t>
+            The values in the SETTINGS frame MUST be processed in the order they appear, with no
+            other frame processing between values.  Unsupported parameters MUST be ignored.  Once
+            all values have been processed, the recipient MUST immediately emit a SETTINGS frame
+            with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender
+            of the altered parameters can rely on the setting having been applied.
+          </t>
+          <t>
+            If the sender of a SETTINGS frame does not receive an acknowledgement within a
+            reasonable amount of time, it MAY issue a <xref
+            target="ConnectionErrorHandler">connection error</xref> of type
+            <x:ref>SETTINGS_TIMEOUT</x:ref>.
+          </t>
+        </section>
+      </section>
+
+      <section anchor="PUSH_PROMISE" title="PUSH_PROMISE">
+        <t>
+          The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of
+          streams the sender intends to initiate.  The PUSH_PROMISE frame includes the unsigned
+          31-bit identifier of the stream the endpoint plans to create along with a set of headers
+          that provide additional context for the stream.  <xref target="PushResources"/> contains a
+          thorough description of the use of PUSH_PROMISE frames.
+        </t>
+
+        <figure title="PUSH_PROMISE Payload Format">
+          <artwork type="inline"><![CDATA[
+  0                   1                   2                   3
+  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Pad Length? (8)|
+ +-+-------------+-----------------------------------------------+
+ |R|                  Promised Stream ID (31)                    |
+ +-+-----------------------------+-------------------------------+
+ |                   Header Block Fragment (*)                 ...
+ +---------------------------------------------------------------+
+ |                           Padding (*)                       ...
+ +---------------------------------------------------------------+
+]]></artwork>
+        </figure>
+        <t>
+          The PUSH_PROMISE frame payload has the following fields:
+          <list style="hanging">
+            <t hangText="Pad Length:">
+              An 8-bit field containing the length of the frame padding in units of octets.  This
+              field is only present if the PADDED flag is set.
+            </t>
+            <t hangText="R:">
+              A single reserved bit.
+            </t>
+            <t hangText="Promised Stream ID:">
+              An unsigned 31-bit integer that identifies the stream that is reserved by the
+              PUSH_PROMISE.  The promised stream identifier MUST be a valid choice for the next
+              stream sent by the sender (see <xref target="StreamIdentifiers">new stream
+              identifier</xref>).
+            </t>
+            <t hangText="Header Block Fragment:">
+              A <xref target="HeaderBlock">header block fragment</xref> containing request header
+              fields.
+            </t>
+            <t hangText="Padding:">
+              Padding octets.
+            </t>
+          </list>
+        </t>
+
+        <t>
+          The PUSH_PROMISE frame defines the following flags:
+          <list style="hanging">
+            <x:lt hangText="END_HEADERS (0x4):">
+              <t>
+                Bit 3 being set indicates that this frame contains an entire <xref
+                target="HeaderBlock">header block</xref> and is not followed by any
+                <x:ref>CONTINUATION</x:ref> frames.
+              </t>
+              <t>
+                A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a
+                CONTINUATION frame for the same stream.  A receiver MUST treat the receipt of any
+                other type of frame or a frame on a different stream as a <xref
+                target="ConnectionErrorHandler">connection error</xref> of type
+                <x:ref>PROTOCOL_ERROR</x:ref>.
+              </t>
+            </x:lt>
+            <x:lt hangText="PADDED (0x8):">
+              <t>
+                Bit 4 being set indicates that the Pad Length field and any padding that it
+                describes is present.
+              </t>
+            </x:lt>
+          </list>
+        </t>
+
+        <t>
+          PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream
+          identifier of a PUSH_PROMISE frame indicates the stream it is associated with.  If the
+          stream identifier field specifies the value 0x0, a recipient MUST respond with a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+
+        <t>
+          Promised streams are not required to be used in the order they are promised.  The
+          PUSH_PROMISE only reserves stream identifiers for later use.
+        </t>
+
+        <t>
+          PUSH_PROMISE MUST NOT be sent if the <x:ref>SETTINGS_ENABLE_PUSH</x:ref> setting of the
+          peer endpoint is set to 0.  An endpoint that has set this setting and has received
+          acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+        <t>
+          Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a
+          <x:ref>RST_STREAM</x:ref> referencing the promised stream identifier back to the sender of
+          the PUSH_PROMISE.
+        </t>
+
+       <t>
+          A PUSH_PROMISE frame modifies the connection state in two ways.  The inclusion of a <xref
+          target="HeaderBlock">header block</xref> potentially modifies the state maintained for
+          header compression.  PUSH_PROMISE also reserves a stream for later use, causing the
+          promised stream to enter the "reserved" state.  A sender MUST NOT send a PUSH_PROMISE on a
+          stream unless that stream is either "open" or "half closed (remote)"; the sender MUST
+          ensure that the promised stream is a valid choice for a <xref
+          target="StreamIdentifiers">new stream identifier</xref> (that is, the promised stream MUST
+          be in the "idle" state).
+        </t>
+        <t>
+          Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream
+          state to become indeterminate.  A receiver MUST treat the receipt of a PUSH_PROMISE on a
+          stream that is neither "open" nor "half closed (local)" as a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.  However, an endpoint that has sent
+          <x:ref>RST_STREAM</x:ref> on the associated stream MUST handle PUSH_PROMISE frames that
+          might have been created before the <x:ref>RST_STREAM</x:ref> frame is received and
+          processed.
+        </t>
+        <t>
+          A receiver MUST treat the receipt of a PUSH_PROMISE that promises an <xref
+          target="StreamIdentifiers">illegal stream identifier</xref> (that is, an identifier for a
+          stream that is not currently in the "idle" state) as a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+
+        <t>
+          The PUSH_PROMISE frame includes optional padding.  Padding fields and flags are identical
+          to those defined for <xref target="DATA">DATA frames</xref>.
+        </t>
+      </section>
+
+      <section anchor="PING" title="PING">
+        <t>
+          The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the
+          sender, as well as determining whether an idle connection is still functional.  PING
+          frames can be sent from any endpoint.
+        </t>
+        <figure title="PING Payload Format">
+          <artwork type="inline"><![CDATA[
+  0                   1                   2                   3
+  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |                                                               |
+ |                      Opaque Data (64)                         |
+ |                                                               |
+ +---------------------------------------------------------------+
+]]></artwork>
+        </figure>
+
+        <t>
+          In addition to the frame header, PING frames MUST contain 8 octets of data in the payload.
+          A sender can include any value it chooses and use those bytes in any fashion.
+        </t>
+        <t>
+          Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with
+          the ACK flag set in response, with an identical payload.  PING responses SHOULD be given
+          higher priority than any other frame.
+        </t>
+
+        <t>
+          The PING frame defines the following flags:
+          <list style="hanging">
+            <t hangText="ACK (0x1):">
+              Bit 1 being set indicates that this PING frame is a PING response.  An endpoint MUST
+              set this flag in PING responses.  An endpoint MUST NOT respond to PING frames
+              containing this flag.
+            </t>
+          </list>
+        </t>
+        <t>
+          PING frames are not associated with any individual stream. If a PING frame is received
+          with a stream identifier field value other than 0x0, the recipient MUST respond with a
+          <xref target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+        <t>
+          Receipt of a PING frame with a length field value other than 8 MUST be treated as a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>FRAME_SIZE_ERROR</x:ref>.
+        </t>
+
+      </section>
+
+      <section anchor="GOAWAY" title="GOAWAY">
+        <t>
+          The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this
+          connection.  GOAWAY can be sent by either the client or the server.  Once sent, the sender
+          will ignore frames sent on any new streams with identifiers higher than the included last
+          stream identifier.  Receivers of a GOAWAY frame MUST NOT open additional streams on the
+          connection, although a new connection can be established for new streams.
+        </t>
+        <t>
+          The purpose of this frame is to allow an endpoint to gracefully stop accepting new
+          streams, while still finishing processing of previously established streams.  This enables
+          administrative actions, like server maintainance.
+        </t>
+        <t>
+          There is an inherent race condition between an endpoint starting new streams and the
+          remote sending a GOAWAY frame.  To deal with this case, the GOAWAY contains the stream
+          identifier of the last peer-initiated stream which was or might be processed on the
+          sending endpoint in this connection.  For instance, if the server sends a GOAWAY frame,
+          the identified stream is the highest numbered stream initiated by the client.
+        </t>
+        <t>
+          If the receiver of the GOAWAY has sent data on streams with a higher stream identifier
+          than what is indicated in the GOAWAY frame, those streams are not or will not be
+          processed.  The receiver of the GOAWAY frame can treat the streams as though they had
+          never been created at all, thereby allowing those streams to be retried later on a new
+          connection.
+        </t>
+        <t>
+          Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote
+          can know whether a stream has been partially processed or not.  For example, if an HTTP
+          client sends a POST at the same time that a server closes a connection, the client cannot
+          know if the server started to process that POST request if the server does not send a
+          GOAWAY frame to indicate what streams it might have acted on.
+        </t>
+        <t>
+          An endpoint might choose to close a connection without sending GOAWAY for misbehaving
+          peers.
+        </t>
+
+        <figure title="GOAWAY Payload Format">
+          <artwork type="inline"><![CDATA[
+  0                   1                   2                   3
+  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|                  Last-Stream-ID (31)                        |
+ +-+-------------------------------------------------------------+
+ |                      Error Code (32)                          |
+ +---------------------------------------------------------------+
+ |                  Additional Debug Data (*)                    |
+ +---------------------------------------------------------------+
+]]></artwork>
+        </figure>
+        <t>
+          The GOAWAY frame does not define any flags.
+        </t>
+        <t>
+          The GOAWAY frame applies to the connection, not a specific stream.  An endpoint MUST treat
+          a <x:ref>GOAWAY</x:ref> frame with a stream identifier other than 0x0 as a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+        <t>
+          The last stream identifier in the GOAWAY frame contains the highest numbered stream
+          identifier for which the sender of the GOAWAY frame might have taken some action on, or
+          might yet take action on.  All streams up to and including the identified stream might
+          have been processed in some way.  The last stream identifier can be set to 0 if no streams
+          were processed.
+          <list style="hanging">
+            <t hangText="Note:">
+              In this context, "processed" means that some data from the stream was passed to some
+              higher layer of software that might have taken some action as a result.
+            </t>
+          </list>
+          If a connection terminates without a GOAWAY frame, the last stream identifier is
+          effectively the highest possible stream identifier.
+        </t>
+        <t>
+          On streams with lower or equal numbered identifiers that were not closed completely prior
+          to the connection being closed, re-attempting requests, transactions, or any protocol
+          activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or
+          DELETE.  Any protocol activity that uses higher numbered streams can be safely retried
+          using a new connection.
+        </t>
+        <t>
+          Activity on streams numbered lower or equal to the last stream identifier might still
+          complete successfully.  The sender of a GOAWAY frame might gracefully shut down a
+          connection by sending a GOAWAY frame, maintaining the connection in an open state until
+          all in-progress streams complete.
+        </t>
+        <t>
+          An endpoint MAY send multiple GOAWAY frames if circumstances change.  For instance, an
+          endpoint that sends GOAWAY with <x:ref>NO_ERROR</x:ref> during graceful shutdown could
+          subsequently encounter an condition that requires immediate termination of the connection.
+          The last stream identifier from the last GOAWAY frame received indicates which streams
+          could have been acted upon.  Endpoints MUST NOT increase the value they send in the last
+          stream identifier, since the peers might already have retried unprocessed requests on
+          another connection.
+        </t>
+        <t>
+          A client that is unable to retry requests loses all requests that are in flight when the
+          server closes the connection.  This is especially true for intermediaries that might
+          not be serving clients using HTTP/2.  A server that is attempting to gracefully shut down
+          a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to
+          2<x:sup>31</x:sup>-1 and a <x:ref>NO_ERROR</x:ref> code.  This signals to the client that
+          a shutdown is imminent and that no further requests can be initiated.  After waiting at
+          least one round trip time, the server can send another GOAWAY frame with an updated last
+          stream identifier.  This ensures that a connection can be cleanly shut down without losing
+          requests.
+        </t>
+
+        <t>
+          After sending a GOAWAY frame, the sender can discard frames for streams with identifiers
+          higher than the identified last stream.  However, any frames that alter connection state
+          cannot be completely ignored.  For instance, <x:ref>HEADERS</x:ref>,
+          <x:ref>PUSH_PROMISE</x:ref> and <x:ref>CONTINUATION</x:ref> frames MUST be minimally
+          processed to ensure the state maintained for header compression is consistent (see <xref
+          target="HeaderBlock"/>); similarly DATA frames MUST be counted toward the connection flow
+          control window.  Failure to process these frames can cause flow control or header
+          compression state to become unsynchronized.
+        </t>
+
+        <t>
+          The GOAWAY frame also contains a 32-bit <xref target="ErrorCodes">error code</xref> that
+          contains the reason for closing the connection.
+        </t>
+        <t>
+          Endpoints MAY append opaque data to the payload of any GOAWAY frame.  Additional debug
+          data is intended for diagnostic purposes only and carries no semantic value.  Debug
+          information could contain security- or privacy-sensitive data.  Logged or otherwise
+          persistently stored debug data MUST have adequate safeguards to prevent unauthorized
+          access.
+        </t>
+      </section>
+
+      <section anchor="WINDOW_UPDATE" title="WINDOW_UPDATE">
+        <t>
+          The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see <xref
+          target="FlowControl"/> for an overview.
+        </t>
+        <t>
+          Flow control operates at two levels: on each individual stream and on the entire
+          connection.
+        </t>
+        <t>
+          Both types of flow control are hop-by-hop; that is, only between the two endpoints.
+          Intermediaries do not forward WINDOW_UPDATE frames between dependent connections.
+          However, throttling of data transfer by any receiver can indirectly cause the propagation
+          of flow control information toward the original sender.
+        </t>
+        <t>
+          Flow control only applies to frames that are identified as being subject to flow control.
+          Of the frame types defined in this document, this includes only <x:ref>DATA</x:ref> frames.
+          Frames that are exempt from flow control MUST be accepted and processed, unless the
+          receiver is unable to assign resources to handling the frame.  A receiver MAY respond with
+          a <xref target="StreamErrorHandler">stream error</xref> or <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>FLOW_CONTROL_ERROR</x:ref> if it is unable to accept a frame.
+        </t>
+        <figure title="WINDOW_UPDATE Payload Format">
+          <artwork type="inline"><![CDATA[
+  0                   1                   2                   3
+  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|              Window Size Increment (31)                     |
+ +-+-------------------------------------------------------------+
+]]></artwork>
+        </figure>
+        <t>
+          The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer
+          indicating the number of octets that the sender can transmit in addition to the existing
+          flow control window.  The legal range for the increment to the flow control window is 1 to
+          2<x:sup>31</x:sup>-1 (0x7fffffff) octets.
+        </t>
+        <t>
+          The WINDOW_UPDATE frame does not define any flags.
+        </t>
+        <t>
+          The WINDOW_UPDATE frame can be specific to a stream or to the entire connection.  In the
+          former case, the frame's stream identifier indicates the affected stream; in the latter,
+          the value "0" indicates that the entire connection is the subject of the frame.
+        </t>
+        <t>
+          A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window
+          increment of 0 as a <xref target="StreamErrorHandler">stream error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>; errors on the connection flow control window MUST be
+          treated as a <xref target="ConnectionErrorHandler">connection error</xref>.
+        </t>
+        <t>
+          WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag.
+          This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)"
+          or "closed" stream.  A receiver MUST NOT treat this as an error, see <xref
+          target="StreamStates"/>.
+        </t>
+        <t>
+          A receiver that receives a flow controlled frame MUST always account for its contribution
+          against the connection flow control window, unless the receiver treats this as a <xref
+          target="ConnectionErrorHandler">connection error</xref>.  This is necessary even if the
+          frame is in error.  Since the sender counts the frame toward the flow control window, if
+          the receiver does not, the flow control window at sender and receiver can become
+          different.
+        </t>
+
+        <section title="The Flow Control Window">
+          <t>
+            Flow control in HTTP/2 is implemented using a window kept by each sender on every
+            stream. The flow control window is a simple integer value that indicates how many octets
+            of data the sender is permitted to transmit; as such, its size is a measure of the
+            buffering capacity of the receiver.
+          </t>
+          <t>
+            Two flow control windows are applicable: the stream flow control window and the
+            connection flow control window.  The sender MUST NOT send a flow controlled frame with a
+            length that exceeds the space available in either of the flow control windows advertised
+            by the receiver.  Frames with zero length with the END_STREAM flag set (that is, an
+            empty <x:ref>DATA</x:ref> frame) MAY be sent if there is no available space in either
+            flow control window.
+          </t>
+          <t>
+            For flow control calculations, the 9 octet frame header is not counted.
+          </t>
+          <t>
+            After sending a flow controlled frame, the sender reduces the space available in both
+            windows by the length of the transmitted frame.
+          </t>
+          <t>
+            The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up
+            space in flow control windows.  Separate WINDOW_UPDATE frames are sent for the stream
+            and connection level flow control windows.
+          </t>
+          <t>
+            A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the
+            amount specified in the frame.
+          </t>
+          <t>
+            A sender MUST NOT allow a flow control window to exceed 2<x:sup>31</x:sup>-1 octets.
+            If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this
+            maximum it MUST terminate either the stream or the connection, as appropriate.  For
+            streams, the sender sends a <x:ref>RST_STREAM</x:ref> with the error code of
+            <x:ref>FLOW_CONTROL_ERROR</x:ref> code; for the connection, a <x:ref>GOAWAY</x:ref>
+            frame with a <x:ref>FLOW_CONTROL_ERROR</x:ref> code.
+          </t>
+          <t>
+            Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are
+            completely asynchronous with respect to each other. This property allows a receiver to
+            aggressively update the window size kept by the sender to prevent streams from stalling.
+          </t>
+        </section>
+
+        <section anchor="InitialWindowSize" title="Initial Flow Control Window Size">
+          <t>
+            When an HTTP/2 connection is first established, new streams are created with an initial
+            flow control window size of 65,535 octets. The connection flow control window is 65,535
+            octets. Both endpoints can adjust the initial window size for new streams by including
+            a value for <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> in the <x:ref>SETTINGS</x:ref>
+            frame that forms part of the connection preface. The connection flow control window can
+            only be changed using WINDOW_UPDATE frames.
+          </t>
+          <t>
+            Prior to receiving a <x:ref>SETTINGS</x:ref> frame that sets a value for
+            <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref>, an endpoint can only use the default
+            initial window size when sending flow controlled frames.  Similarly, the connection flow
+            control window is set to the default initial window size until a WINDOW_UPDATE frame is
+            received.
+          </t>
+          <t>
+            A <x:ref>SETTINGS</x:ref> frame can alter the initial flow control window size for all
+            current streams. When the value of <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> changes,
+            a receiver MUST adjust the size of all stream flow control windows that it maintains by
+            the difference between the new value and the old value.
+          </t>
+          <t>
+            A change to <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> can cause the available space in
+            a flow control window to become negative.  A sender MUST track the negative flow control
+            window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE
+            frames that cause the flow control window to become positive.
+          </t>
+          <t>
+            For example, if the client sends 60KB immediately on connection establishment, and the
+            server sets the initial window size to be 16KB, the client will recalculate the
+            available flow control window to be -44KB on receipt of the <x:ref>SETTINGS</x:ref>
+            frame.  The client retains a negative flow control window until WINDOW_UPDATE frames
+            restore the window to being positive, after which the client can resume sending.
+          </t>
+          <t>
+            A <x:ref>SETTINGS</x:ref> frame cannot alter the connection flow control window.
+          </t>
+          <t>
+            An endpoint MUST treat a change to <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> that
+            causes any flow control window to exceed the maximum size as a <xref
+            target="ConnectionErrorHandler">connection error</xref> of type
+            <x:ref>FLOW_CONTROL_ERROR</x:ref>.
+          </t>
+        </section>
+
+        <section title="Reducing the Stream Window Size">
+          <t>
+            A receiver that wishes to use a smaller flow control window than the current size can
+            send a new <x:ref>SETTINGS</x:ref> frame.  However, the receiver MUST be prepared to
+            receive data that exceeds this window size, since the sender might send data that
+            exceeds the lower limit prior to processing the <x:ref>SETTINGS</x:ref> frame.
+          </t>
+          <t>
+            After sending a SETTINGS frame that reduces the initial flow control window size, a
+            receiver has two options for handling streams that exceed flow control limits:
+            <list style="numbers">
+              <t>
+                The receiver can immediately send <x:ref>RST_STREAM</x:ref> with
+                <x:ref>FLOW_CONTROL_ERROR</x:ref> error code for the affected streams.
+              </t>
+              <t>
+                The receiver can accept the streams and tolerate the resulting head of line
+                blocking, sending WINDOW_UPDATE frames as it consumes data.
+              </t>
+            </list>
+          </t>
+        </section>
+      </section>
+
+      <section anchor="CONTINUATION" title="CONTINUATION">
+        <t>
+          The CONTINUATION frame (type=0x9) is used to continue a sequence of <xref
+          target="HeaderBlock">header block fragments</xref>.  Any number of CONTINUATION frames can
+          be sent on an existing stream, as long as the preceding frame is on the same stream and is
+          a <x:ref>HEADERS</x:ref>, <x:ref>PUSH_PROMISE</x:ref> or CONTINUATION frame without the
+          END_HEADERS flag set.
+        </t>
+
+        <figure title="CONTINUATION Frame Payload">
+          <artwork type="inline"><![CDATA[
+  0                   1                   2                   3
+  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |                   Header Block Fragment (*)                 ...
+ +---------------------------------------------------------------+
+]]></artwork>
+        </figure>
+        <t>
+          The CONTINUATION frame payload contains a <xref target="HeaderBlock">header block
+          fragment</xref>.
+        </t>
+
+        <t>
+          The CONTINUATION frame defines the following flag:
+          <list style="hanging">
+            <x:lt hangText="END_HEADERS (0x4):">
+              <t>
+                Bit 3 being set indicates that this frame ends a <xref target="HeaderBlock">header
+                block</xref>.
+              </t>
+              <t>
+                If the END_HEADERS bit is not set, this frame MUST be followed by another
+                CONTINUATION frame.  A receiver MUST treat the receipt of any other type of frame or
+                a frame on a different stream as a <xref target="ConnectionErrorHandler">connection
+                error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+              </t>
+            </x:lt>
+          </list>
+        </t>
+
+        <t>
+          The CONTINUATION frame changes the connection state as defined in <xref
+          target="HeaderBlock" />.
+        </t>
+
+        <t>
+          CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received
+          whose stream identifier field is 0x0, the recipient MUST respond with a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type PROTOCOL_ERROR.
+        </t>
+
+        <t>
+          A CONTINUATION frame MUST be preceded by a <x:ref>HEADERS</x:ref>,
+          <x:ref>PUSH_PROMISE</x:ref> or CONTINUATION frame without the END_HEADERS flag set.  A
+          recipient that observes violation of this rule MUST respond with a <xref
+          target="ConnectionErrorHandler"> connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+      </section>
+    </section>
+
+    <section anchor="ErrorCodes" title="Error Codes">
+      <t>
+        Error codes are 32-bit fields that are used in <x:ref>RST_STREAM</x:ref> and
+        <x:ref>GOAWAY</x:ref> frames to convey the reasons for the stream or connection error.
+      </t>
+
+      <t>
+        Error codes share a common code space.  Some error codes apply only to either streams or the
+        entire connection and have no defined semantics in the other context.
+      </t>
+
+      <t>
+        The following error codes are defined:
+        <list style="hanging">
+          <t hangText="NO_ERROR (0x0):" anchor="NO_ERROR">
+            The associated condition is not as a result of an error.  For example, a
+            <x:ref>GOAWAY</x:ref> might include this code to indicate graceful shutdown of a
+            connection.
+          </t>
+          <t hangText="PROTOCOL_ERROR (0x1):" anchor="PROTOCOL_ERROR">
+            The endpoint detected an unspecific protocol error.  This error is for use when a more
+            specific error code is not available.
+          </t>
+          <t hangText="INTERNAL_ERROR (0x2):" anchor="INTERNAL_ERROR">
+            The endpoint encountered an unexpected internal error.
+          </t>
+          <t hangText="FLOW_CONTROL_ERROR (0x3):" anchor="FLOW_CONTROL_ERROR">
+            The endpoint detected that its peer violated the flow control protocol.
+          </t>
+          <t hangText="SETTINGS_TIMEOUT (0x4):" anchor="SETTINGS_TIMEOUT">
+            The endpoint sent a <x:ref>SETTINGS</x:ref> frame, but did not receive a response in a
+            timely manner.  See <xref target="SettingsSync">Settings Synchronization</xref>.
+          </t>
+          <t hangText="STREAM_CLOSED (0x5):" anchor="STREAM_CLOSED">
+            The endpoint received a frame after a stream was half closed.
+          </t>
+          <t hangText="FRAME_SIZE_ERROR (0x6):" anchor="FRAME_SIZE_ERROR">
+            The endpoint received a frame with an invalid size.
+          </t>
+          <t hangText="REFUSED_STREAM (0x7):" anchor="REFUSED_STREAM">
+            The endpoint refuses the stream prior to performing any application processing, see
+            <xref target="Reliability"/> for details.
+          </t>
+          <t hangText="CANCEL (0x8):" anchor="CANCEL">
+            Used by the endpoint to indicate that the stream is no longer needed.
+          </t>
+          <t hangText="COMPRESSION_ERROR (0x9):" anchor="COMPRESSION_ERROR">
+            The endpoint is unable to maintain the header compression context for the connection.
+          </t>
+          <t hangText="CONNECT_ERROR (0xa):" anchor="CONNECT_ERROR">
+            The connection established in response to a <xref target="CONNECT">CONNECT
+            request</xref> was reset or abnormally closed.
+          </t>
+          <t hangText="ENHANCE_YOUR_CALM (0xb):" anchor="ENHANCE_YOUR_CALM">
+            The endpoint detected that its peer is exhibiting a behavior that might be generating
+            excessive load.
+          </t>
+          <t hangText="INADEQUATE_SECURITY (0xc):" anchor="INADEQUATE_SECURITY">
+            The underlying transport has properties that do not meet minimum security
+            requirements (see <xref target="TLSUsage"/>).
+          </t>
+        </list>
+      </t>
+      <t>
+        Unknown or unsupported error codes MUST NOT trigger any special behavior.  These MAY be
+        treated by an implementation as being equivalent to <x:ref>INTERNAL_ERROR</x:ref>.
+      </t>
+    </section>
+
+    <section anchor="HTTPLayer" title="HTTP Message Exchanges">
+      <t>
+        HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means
+        that, from the application perspective, the features of the protocol are largely
+        unchanged. To achieve this, all request and response semantics are preserved, although the
+        syntax of conveying those semantics has changed.
+      </t>
+      <t>
+        Thus, the specification and requirements of HTTP/1.1 Semantics and Content <xref
+        target="RFC7231"/>, Conditional Requests <xref target="RFC7232"/>, Range Requests <xref
+        target="RFC7233"/>, Caching <xref target="RFC7234"/> and Authentication <xref
+        target="RFC7235"/> are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax
+        and Routing <xref target="RFC7230"/>, such as the HTTP and HTTPS URI schemes, are also
+        applicable in HTTP/2, but the expression of those semantics for this protocol are defined
+        in the sections below.
+      </t>
+
+      <section anchor="HttpSequence" title="HTTP Request/Response Exchange">
+        <t>
+          A client sends an HTTP request on a new stream, using a previously unused <xref
+          target="StreamIdentifiers">stream identifier</xref>.  A server sends an HTTP response on
+          the same stream as the request.
+        </t>
+        <t>
+          An HTTP message (request or response) consists of:
+          <list style="numbers">
+            <t>
+              for a response only, zero or more <x:ref>HEADERS</x:ref> frames (each followed by zero
+              or more <x:ref>CONTINUATION</x:ref> frames) containing the message headers of
+              informational (1xx) HTTP responses (see <xref target="RFC7230" x:fmt=","
+              x:rel="#header.fields"/> and <xref target="RFC7231" x:fmt="," x:rel="#status.1xx"/>),
+              and
+            </t>
+            <t>
+              one <x:ref>HEADERS</x:ref> frame (followed by zero or more <x:ref>CONTINUATION</x:ref>
+              frames) containing the message headers (see <xref target="RFC7230" x:fmt=","
+              x:rel="#header.fields"/>), and
+            </t>
+            <t>
+              zero or more <x:ref>DATA</x:ref> frames containing the message payload (see <xref
+              target="RFC7230" x:fmt="," x:rel="#message.body"/>), and
+            </t>
+            <t>
+              optionally, one <x:ref>HEADERS</x:ref> frame, followed by zero or more
+              <x:ref>CONTINUATION</x:ref> frames containing the trailer-part, if present (see <xref
+              target="RFC7230" x:fmt="," x:rel="#chunked.trailer.part"/>).
+            </t>
+          </list>
+          The last frame in the sequence bears an END_STREAM flag, noting that a
+          <x:ref>HEADERS</x:ref> frame bearing the END_STREAM flag can be followed by
+          <x:ref>CONTINUATION</x:ref> frames that carry any remaining portions of the header block.
+        </t>
+        <t>
+          Other frames (from any stream) MUST NOT occur between either <x:ref>HEADERS</x:ref> frame
+          and any <x:ref>CONTINUATION</x:ref> frames that might follow.
+        </t>
+
+        <t>
+          Trailing header fields are carried in a header block that also terminates the stream.
+          That is, a sequence starting with a <x:ref>HEADERS</x:ref> frame, followed by zero or more
+          <x:ref>CONTINUATION</x:ref> frames, where the <x:ref>HEADERS</x:ref> frame bears an
+          END_STREAM flag.  Header blocks after the first that do not terminate the stream are not
+          part of an HTTP request or response.
+        </t>
+        <t>
+          A <x:ref>HEADERS</x:ref> frame (and associated <x:ref>CONTINUATION</x:ref> frames) can
+          only appear at the start or end of a stream.  An endpoint that receives a
+          <x:ref>HEADERS</x:ref> frame without the END_STREAM flag set after receiving a final
+          (non-informational) status code MUST treat the corresponding request or response as <xref
+          target="malformed">malformed</xref>.
+        </t>
+
+        <t>
+          An HTTP request/response exchange fully consumes a single stream.  A request starts with
+          the <x:ref>HEADERS</x:ref> frame that puts the stream into an "open" state. The request
+          ends with a frame bearing END_STREAM, which causes the stream to become "half closed
+          (local)" for the client and "half closed (remote)" for the server.  A response starts with
+          a <x:ref>HEADERS</x:ref> frame and ends with a frame bearing END_STREAM, which places the
+          stream in the "closed" state.
+          <!-- Yes, the response might be completed before the request does, but that's not a detail
+               we need to expand upon.  It's complicated enough explaining this as it is.  -->
+        </t>
+
+        <section anchor="informational-responses" title="Upgrading From HTTP/2">
+          <t>
+            HTTP/2 removes support for the 101 (Switching Protocols) informational status code
+            (<xref target="RFC7231" x:fmt="," x:rel="#status.101"/>).
+          </t>
+          <t>
+            The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol.
+            Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate
+            their use (see <xref target="starting"/>).
+          </t>
+        </section>
+
+        <section anchor="HttpHeaders" title="HTTP Header Fields">
+          <t>
+            HTTP header fields carry information as a series of key-value pairs. For a listing of
+            registered HTTP headers, see the Message Header Field Registry maintained at <eref
+            target="https://www.iana.org/assignments/message-headers"/>.
+          </t>
+
+          <section anchor="PseudoHeaderFields" title="Pseudo-Header Fields">
+            <t>
+              While HTTP/1.x used the message start-line (see <xref target="RFC7230" x:fmt=","
+              x:rel="#start.line"/>) to convey the target URI and method of the request, and the
+              status code for the response, HTTP/2 uses special pseudo-header fields beginning with
+              ':' character (ASCII 0x3a) for this purpose.
+            </t>
+            <t>
+              Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate
+              pseudo-header fields other than those defined in this document.
+            </t>
+            <t>
+              Pseudo-header fields are only valid in the context in which they are defined.
+              Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header
+              fields defined for responses MUST NOT appear in requests.  Pseudo-header fields MUST
+              NOT appear in trailers.  Endpoints MUST treat a request or response that contains
+              undefined or invalid pseudo-header fields as <xref
+              target="malformed">malformed</xref>.
+            </t>
+            <t>
+              Just as in HTTP/1.x, header field names are strings of ASCII characters that are
+              compared in a case-insensitive fashion. However, header field names MUST be converted
+              to lowercase prior to their encoding in HTTP/2. A request or response containing
+              uppercase header field names MUST be treated as <xref
+              target="malformed">malformed</xref>.
+            </t>
+            <t>
+              All pseudo-header fields MUST appear in the header block before regular header fields.
+              Any request or response that contains a pseudo-header field that appears in a header
+              block after a regular header field MUST be treated as <xref
+              target="malformed">malformed</xref>.
+            </t>
+          </section>
+
+          <section title="Connection-Specific Header Fields">
+            <t>
+              HTTP/2 does not use the <spanx style="verb">Connection</spanx> header field to
+              indicate connection-specific header fields; in this protocol, connection-specific
+              metadata is conveyed by other means.  An endpoint MUST NOT generate a HTTP/2 message
+              containing connection-specific header fields; any message containing
+              connection-specific header fields MUST be treated as <xref
+              target="malformed">malformed</xref>.
+            </t>
+            <t>
+              This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need
+              to remove any header fields nominated by the Connection header field, along with the
+              Connection header field itself. Such intermediaries SHOULD also remove other
+              connection-specific header fields, such as Keep-Alive, Proxy-Connection,
+              Transfer-Encoding and Upgrade, even if they are not nominated by Connection.
+            </t>
+            <t>
+              One exception to this is the TE header field, which MAY be present in an HTTP/2
+              request, but when it is MUST NOT contain any value other than "trailers".
+            </t>
+            <t>
+              <list style="hanging">
+                <t hangText="Note:">
+                  HTTP/2 purposefully does not support upgrade to another protocol.  The handshake
+                  methods described in <xref target="starting"/> are believed sufficient to
+                  negotiate the use of alternative protocols.
+                </t>
+              </list>
+            </t>
+          </section>
+
+          <section anchor="HttpRequest" title="Request Pseudo-Header Fields">
+            <t>
+              The following pseudo-header fields are defined for HTTP/2 requests:
+              <list style="symbols">
+                <x:lt>
+                  <t>
+                    The <spanx style="verb">:method</spanx> pseudo-header field includes the HTTP
+                    method (<xref target="RFC7231" x:fmt="," x:rel="#methods"/>).
+                  </t>
+                </x:lt>
+                <x:lt>
+                  <t>
+                    The <spanx style="verb">:scheme</spanx> pseudo-header field includes the scheme
+                    portion of the target URI (<xref target="RFC3986" x:fmt="," x:sec="3.1"/>).
+                  </t>
+                  <t>
+                    <spanx style="verb">:scheme</spanx> is not restricted to <spanx
+                    style="verb">http</spanx> and <spanx style="verb">https</spanx> schemed URIs.  A
+                    proxy or gateway can translate requests for non-HTTP schemes, enabling the use
+                    of HTTP to interact with non-HTTP services.
+                  </t>
+                </x:lt>
+                <x:lt>
+                  <t>
+                    The <spanx style="verb">:authority</spanx> pseudo-header field includes the
+                    authority portion of the target URI (<xref target="RFC3986" x:fmt=","
+                    x:sec="3.2"/>). The authority MUST NOT include the deprecated <spanx
+                    style="verb">userinfo</spanx> subcomponent for <spanx style="verb">http</spanx>
+                    or <spanx style="verb">https</spanx> schemed URIs.
+                  </t>
+                  <t>
+                    To ensure that the HTTP/1.1 request line can be reproduced accurately, this
+                    pseudo-header field MUST be omitted when translating from an HTTP/1.1 request
+                    that has a request target in origin or asterisk form (see <xref
+                    target="RFC7230" x:fmt="," x:rel="#request-target"/>). Clients that generate
+                    HTTP/2 requests directly SHOULD use the <spanx>:authority</spanx> pseudo-header
+                    field instead of the <spanx style="verb">Host</spanx> header field. An
+                    intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a <spanx
+                    style="verb">Host</spanx> header field if one is not present in a request by
+                    copying the value of the <spanx style="verb">:authority</spanx> pseudo-header
+                    field.
+                  </t>
+                </x:lt>
+                <x:lt>
+                  <t>
+                    The <spanx style="verb">:path</spanx> pseudo-header field includes the path and
+                    query parts of the target URI (the <spanx style="verb">path-absolute</spanx>
+                    production from <xref target="RFC3986"/> and optionally a '?' character
+                    followed by the <spanx style="verb">query</spanx> production, see <xref
+                    target="RFC3986" x:fmt="," x:sec="3.3"/> and <xref target="RFC3986" x:fmt=","
+                    x:sec="3.4"/>). A request in asterisk form includes the value '*' for the
+                    <spanx style="verb">:path</spanx> pseudo-header field.
+                  </t>
+                  <t>
+                    This pseudo-header field MUST NOT be empty for <spanx style="verb">http</spanx>
+                    or <spanx style="verb">https</spanx> URIs; <spanx style="verb">http</spanx> or
+                    <spanx style="verb">https</spanx> URIs that do not contain a path component
+                    MUST include a value of '/'. The exception to this rule is an OPTIONS request
+                    for an <spanx style="verb">http</spanx> or <spanx style="verb">https</spanx>
+                    URI that does not include a path component; these MUST include a <spanx
+                    style="verb">:path</spanx> pseudo-header field with a value of '*' (see <xref
+                    target="RFC7230" x:fmt="," x:rel="#asterisk-form"/>).
+                  </t>
+                </x:lt>
+              </list>
+            </t>
+            <t>
+              All HTTP/2 requests MUST include exactly one valid value for the <spanx
+              style="verb">:method</spanx>, <spanx style="verb">:scheme</spanx>, and <spanx
+              style="verb">:path</spanx> pseudo-header fields, unless it is a <xref
+              target="CONNECT">CONNECT request</xref>. An HTTP request that omits mandatory
+              pseudo-header fields is <xref target="malformed">malformed</xref>.
+            </t>
+            <t>
+              HTTP/2 does not define a way to carry the version identifier that is included in the
+              HTTP/1.1 request line.
+            </t>
+          </section>
+
+          <section anchor="HttpResponse" title="Response Pseudo-Header Fields">
+            <t>
+              For HTTP/2 responses, a single <spanx style="verb">:status</spanx> pseudo-header
+              field is defined that carries the HTTP status code field (see <xref target="RFC7231"
+              x:fmt="," x:rel="#status.codes"/>). This pseudo-header field MUST be included in all
+              responses, otherwise the response is <xref target="malformed">malformed</xref>.
+            </t>
+            <t>
+              HTTP/2 does not define a way to carry the version or reason phrase that is included in
+              an HTTP/1.1 status line.
+            </t>
+          </section>
+
+         <section anchor="CompressCookie" title="Compressing the Cookie Header Field">
+            <t>
+              The <xref target="COOKIE">Cookie header field</xref> can carry a significant amount of
+              redundant data.
+            </t>
+            <t>
+              The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs").
+              This header field doesn't follow the list construction rules in HTTP (see <xref
+              target="RFC7230" x:fmt="," x:rel="#field.order"/>), which prevents cookie-pairs from
+              being separated into different name-value pairs.  This can significantly reduce
+              compression efficiency as individual cookie-pairs are updated.
+            </t>
+            <t>
+              To allow for better compression efficiency, the Cookie header field MAY be split into
+              separate header fields, each with one or more cookie-pairs.  If there are multiple
+              Cookie header fields after decompression, these MUST be concatenated into a single
+              octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ")
+              before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a
+              generic HTTP server application.
+            </t>
+            <figure>
+              <preamble>
+                Therefore, the following two lists of Cookie header fields are semantically
+                equivalent.
+              </preamble>
+              <artwork type="inline"><![CDATA[
+  cookie: a=b; c=d; e=f
+
+  cookie: a=b
+  cookie: c=d
+  cookie: e=f
+]]></artwork>
+            </figure>
+          </section>
+
+          <section anchor="malformed" title="Malformed Requests and Responses">
+            <t>
+              A malformed request or response is one that is an otherwise valid sequence of HTTP/2
+              frames, but is otherwise invalid due to the presence of extraneous frames, prohibited
+              header fields, the absence of mandatory header fields, or the inclusion of uppercase
+              header field names.
+            </t>
+            <t>
+              A request or response that includes an entity body can include a <spanx
+              style="verb">content-length</spanx> header field.  A request or response is also
+              malformed if the value of a <spanx style="verb">content-length</spanx> header field
+              does not equal the sum of the <x:ref>DATA</x:ref> frame payload lengths that form the
+              body.  A response that is defined to have no payload, as described in <xref
+              target="RFC7230" x:fmt="," x:rel="#header.content-length"/>, can have a non-zero
+              <spanx style="verb">content-length</spanx> header field, even though no content is
+              included in <x:ref>DATA</x:ref> frames.
+            </t>
+            <t>
+              Intermediaries that process HTTP requests or responses (i.e., any intermediary not
+              acting as a tunnel) MUST NOT forward a malformed request or response.  Malformed
+              requests or responses that are detected MUST be treated as a <xref
+              target="StreamErrorHandler">stream error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+            </t>
+            <t>
+              For malformed requests, a server MAY send an HTTP response prior to closing or
+              resetting the stream.  Clients MUST NOT accept a malformed response. Note that these
+              requirements are intended to protect against several types of common attacks against
+              HTTP; they are deliberately strict, because being permissive can expose
+              implementations to these vulnerabilities.
+            </t>
+          </section>
+        </section>
+
+        <section title="Examples">
+          <t>
+            This section shows HTTP/1.1 requests and responses, with illustrations of equivalent
+            HTTP/2 requests and responses.
+          </t>
+          <t>
+            An HTTP GET request includes request header fields and no body and is therefore
+            transmitted as a single <x:ref>HEADERS</x:ref> frame, followed by zero or more
+            <x:ref>CONTINUATION</x:ref> frames containing the serialized block of request header
+            fields.  The <x:ref>HEADERS</x:ref> frame in the following has both the END_HEADERS and
+            END_STREAM flags set; no <x:ref>CONTINUATION</x:ref> frames are sent:
+          </t>
+
+          <figure>
+            <artwork type="inline"><![CDATA[
+  GET /resource HTTP/1.1           HEADERS
+  Host: example.org          ==>     + END_STREAM
+  Accept: image/jpeg                 + END_HEADERS
+                                       :method = GET
+                                       :scheme = https
+                                       :path = /resource
+                                       host = example.org
+                                       accept = image/jpeg
+]]></artwork>
+          </figure>
+
+          <t>
+            Similarly, a response that includes only response header fields is transmitted as a
+            <x:ref>HEADERS</x:ref> frame (again, followed by zero or more
+            <x:ref>CONTINUATION</x:ref> frames) containing the serialized block of response header
+            fields.
+          </t>
+
+          <figure>
+            <artwork type="inline"><![CDATA[
+  HTTP/1.1 304 Not Modified        HEADERS
+  ETag: "xyzzy"              ==>     + END_STREAM
+  Expires: Thu, 23 Jan ...           + END_HEADERS
+                                       :status = 304
+                                       etag = "xyzzy"
+                                       expires = Thu, 23 Jan ...
+]]></artwork>
+          </figure>
+
+          <t>
+            An HTTP POST request that includes request header fields and payload data is transmitted
+            as one <x:ref>HEADERS</x:ref> frame, followed by zero or more
+            <x:ref>CONTINUATION</x:ref> frames containing the request header fields, followed by one
+            or more <x:ref>DATA</x:ref> frames, with the last <x:ref>CONTINUATION</x:ref> (or
+            <x:ref>HEADERS</x:ref>) frame having the END_HEADERS flag set and the final
+            <x:ref>DATA</x:ref> frame having the END_STREAM flag set:
+          </t>
+
+          <figure>
+            <artwork type="inline"><![CDATA[
+  POST /resource HTTP/1.1          HEADERS
+  Host: example.org          ==>     - END_STREAM
+  Content-Type: image/jpeg           - END_HEADERS
+  Content-Length: 123                  :method = POST
+                                       :path = /resource
+  {binary data}                        :scheme = https
+
+                                   CONTINUATION
+                                     + END_HEADERS
+                                       content-type = image/jpeg
+                                       host = example.org
+                                       content-length = 123
+
+                                   DATA
+                                     + END_STREAM
+                                   {binary data}
+]]></artwork>
+            <postamble>
+              Note that data contributing to any given header field could be spread between header
+              block fragments.  The allocation of header fields to frames in this example is
+              illustrative only.
+            </postamble>
+          </figure>
+
+          <t>
+            A response that includes header fields and payload data is transmitted as a
+            <x:ref>HEADERS</x:ref> frame, followed by zero or more <x:ref>CONTINUATION</x:ref>
+            frames, followed by one or more <x:ref>DATA</x:ref> frames, with the last
+            <x:ref>DATA</x:ref> frame in the sequence having the END_STREAM flag set:
+          </t>
+
+          <figure>
+            <artwork type="inline"><![CDATA[
+  HTTP/1.1 200 OK                  HEADERS
+  Content-Type: image/jpeg   ==>     - END_STREAM
+  Content-Length: 123                + END_HEADERS
+                                       :status = 200
+  {binary data}                        content-type = image/jpeg
+                                       content-length = 123
+
+                                   DATA
+                                     + END_STREAM
+                                   {binary data}
+]]></artwork>
+          </figure>
+
+          <t>
+            Trailing header fields are sent as a header block after both the request or response
+            header block and all the <x:ref>DATA</x:ref> frames have been sent.  The
+            <x:ref>HEADERS</x:ref> frame starting the trailers header block has the END_STREAM flag
+            set.
+          </t>
+
+          <figure>
+            <artwork type="inline"><![CDATA[
+  HTTP/1.1 200 OK                  HEADERS
+  Content-Type: image/jpeg   ==>     - END_STREAM
+  Transfer-Encoding: chunked         + END_HEADERS
+  Trailer: Foo                         :status = 200
+                                       content-length = 123
+  123                                  content-type = image/jpeg
+  {binary data}                        trailer = Foo
+  0
+  Foo: bar                         DATA
+                                     - END_STREAM
+                                   {binary data}
+
+                                   HEADERS
+                                     + END_STREAM
+                                     + END_HEADERS
+                                       foo = bar
+]]></artwork>
+          </figure>
+
+
+          <figure>
+           <preamble>
+             An informational response using a 1xx status code other than 101 is transmitted as a
+             <x:ref>HEADERS</x:ref> frame, followed by zero or more <x:ref>CONTINUATION</x:ref>
+             frames:
+           </preamble>
+           <artwork type="inline"><![CDATA[
+  HTTP/1.1 103 BAR                 HEADERS
+  Extension-Field: bar       ==>     - END_STREAM
+                                     + END_HEADERS
+                                       :status = 103
+                                       extension-field = bar
+]]></artwork>
+ </figure>
+        </section>
+
+        <section anchor="Reliability" title="Request Reliability Mechanisms in HTTP/2">
+          <t>
+            In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error
+            occurs, because there is no means to determine the nature of the error.  It is possible
+            that some server processing occurred prior to the error, which could result in
+            undesirable effects if the request were reattempted.
+          </t>
+          <t>
+            HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has
+            not been processed:
+            <list style="symbols">
+              <t>
+                The <x:ref>GOAWAY</x:ref> frame indicates the highest stream number that might have
+                been processed.  Requests on streams with higher numbers are therefore guaranteed to
+                be safe to retry.
+              </t>
+              <t>
+                The <x:ref>REFUSED_STREAM</x:ref> error code can be included in a
+                <x:ref>RST_STREAM</x:ref> frame to indicate that the stream is being closed prior to
+                any processing having occurred.  Any request that was sent on the reset stream can
+                be safely retried.
+              </t>
+            </list>
+          </t>
+          <t>
+            Requests that have not been processed have not failed; clients MAY automatically retry
+            them, even those with non-idempotent methods.
+          </t>
+          <t>
+            A server MUST NOT indicate that a stream has not been processed unless it can guarantee
+            that fact.  If frames that are on a stream are passed to the application layer for any
+            stream, then <x:ref>REFUSED_STREAM</x:ref> MUST NOT be used for that stream, and a
+            <x:ref>GOAWAY</x:ref> frame MUST include a stream identifier that is greater than or
+            equal to the given stream identifier.
+          </t>
+          <t>
+            In addition to these mechanisms, the <x:ref>PING</x:ref> frame provides a way for a
+            client to easily test a connection.  Connections that remain idle can become broken as
+            some middleboxes (for instance, network address translators, or load balancers) silently
+            discard connection bindings.  The <x:ref>PING</x:ref> frame allows a client to safely
+            test whether a connection is still active without sending a request.
+          </t>
+        </section>
+      </section>
+
+      <section anchor="PushResources" title="Server Push">
+        <t>
+          HTTP/2 allows a server to pre-emptively send (or "push") responses (along with
+          corresponding "promised" requests) to a client in association with a previous
+          client-initiated request. This can be useful when the server knows the client will need
+          to have those responses available in order to fully process the response to the original
+          request.
+        </t>
+
+        <t>
+          Pushing additional message exchanges in this fashion is optional, and is negotiated
+          between individual endpoints. The <x:ref>SETTINGS_ENABLE_PUSH</x:ref> setting can be set
+          to 0 to indicate that server push is disabled.
+        </t>
+        <t>
+          Promised requests MUST be cacheable (see <xref target="RFC7231" x:fmt=","
+          x:rel="#cacheable.methods"/>), MUST be safe (see <xref target="RFC7231" x:fmt=","
+          x:rel="#safe.methods"/>) and MUST NOT include a request body. Clients that receive a
+          promised request that is not cacheable, unsafe or that includes a request body MUST
+          reset the stream with a <xref target="StreamErrorHandler">stream error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+        <t>
+          Pushed responses that are cacheable (see <xref target="RFC7234" x:fmt=","
+          x:rel="#response.cacheability"/>) can be stored by the client, if it implements a HTTP
+          cache.  Pushed responses are considered successfully validated on the origin server (e.g.,
+          if the "no-cache" cache response directive <xref target="RFC7234" x:fmt=","
+          x:rel="#cache-response-directive"/> is present) while the stream identified by the
+          promised stream ID is still open.
+        </t>
+        <t>
+          Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY
+          be made available to the application separately.
+        </t>
+        <t>
+          An intermediary can receive pushes from the server and choose not to forward them on to
+          the client. In other words, how to make use of the pushed information is up to that
+          intermediary. Equally, the intermediary might choose to make additional pushes to the
+          client, without any action taken by the server.
+        </t>
+        <t>
+          A client cannot push. Thus, servers MUST treat the receipt of a
+          <x:ref>PUSH_PROMISE</x:ref> frame as a <xref target="ConnectionErrorHandler">connection
+          error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>. Clients MUST reject any attempt to
+          change the <x:ref>SETTINGS_ENABLE_PUSH</x:ref> setting to a value other than 0 by treating
+          the message as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>PROTOCOL_ERROR</x:ref>.
+        </t>
+
+        <section anchor="PushRequests" title="Push Requests">
+          <t>
+            Server push is semantically equivalent to a server responding to a request; however, in
+            this case that request is also sent by the server, as a <x:ref>PUSH_PROMISE</x:ref>
+            frame.
+          </t>
+          <t>
+            The <x:ref>PUSH_PROMISE</x:ref> frame includes a header block that contains a complete
+            set of request header fields that the server attributes to the request. It is not
+            possible to push a response to a request that includes a request body.
+          </t>
+
+          <t>
+            Pushed responses are always associated with an explicit request from the client. The
+            <x:ref>PUSH_PROMISE</x:ref> frames sent by the server are sent on that explicit
+            request's stream. The <x:ref>PUSH_PROMISE</x:ref> frame also includes a promised stream
+            identifier, chosen from the stream identifiers available to the server (see <xref
+            target="StreamIdentifiers"/>).
+          </t>
+
+          <t>
+            The header fields in <x:ref>PUSH_PROMISE</x:ref> and any subsequent
+            <x:ref>CONTINUATION</x:ref> frames MUST be a valid and complete set of <xref
+            target="HttpRequest">request header fields</xref>.  The server MUST include a method in
+            the <spanx style="verb">:method</spanx> header field that is safe and cacheable.  If a
+            client receives a <x:ref>PUSH_PROMISE</x:ref> that does not include a complete and valid
+            set of header fields, or the <spanx style="verb">:method</spanx> header field identifies
+            a method that is not safe, it MUST respond with a <xref
+            target="StreamErrorHandler">stream error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.
+          </t>
+
+          <t>
+            The server SHOULD send <x:ref>PUSH_PROMISE</x:ref> (<xref target="PUSH_PROMISE"/>)
+            frames prior to sending any frames that reference the promised responses. This avoids a
+            race where clients issue requests prior to receiving any <x:ref>PUSH_PROMISE</x:ref>
+            frames.
+          </t>
+          <t>
+            For example, if the server receives a request for a document containing embedded links
+            to multiple image files, and the server chooses to push those additional images to the
+            client, sending push promises before the <x:ref>DATA</x:ref> frames that contain the
+            image links ensures that the client is able to see the promises before discovering
+            embedded links. Similarly, if the server pushes responses referenced by the header block
+            (for instance, in Link header fields), sending the push promises before sending the
+            header block ensures that clients do not request them.
+          </t>
+
+          <t>
+            <x:ref>PUSH_PROMISE</x:ref> frames MUST NOT be sent by the client.
+          </t>
+          <t>
+            <x:ref>PUSH_PROMISE</x:ref> frames can be sent by the server in response to any
+            client-initiated stream, but the stream MUST be in either the "open" or "half closed
+            (remote)" state with respect to the server.  <x:ref>PUSH_PROMISE</x:ref> frames are
+            interspersed with the frames that comprise a response, though they cannot be
+            interspersed with <x:ref>HEADERS</x:ref> and <x:ref>CONTINUATION</x:ref> frames that
+            comprise a single header block.
+          </t>
+          <t>
+            Sending a <x:ref>PUSH_PROMISE</x:ref> frame creates a new stream and puts the stream
+            into the “reserved (local)” state for the server and the “reserved (remote)” state for
+            the client.
+          </t>
+        </section>
+
+        <section anchor="PushResponses" title="Push Responses">
+          <t>
+            After sending the <x:ref>PUSH_PROMISE</x:ref> frame, the server can begin delivering the
+            pushed response as a <xref target="HttpResponse">response</xref> on a server-initiated
+            stream that uses the promised stream identifier.  The server uses this stream to
+            transmit an HTTP response, using the same sequence of frames as defined in <xref
+            target="HttpSequence"/>.  This stream becomes <xref target="StreamStates">"half closed"
+            to the client</xref> after the initial <x:ref>HEADERS</x:ref> frame is sent.
+          </t>
+
+          <t>
+            Once a client receives a <x:ref>PUSH_PROMISE</x:ref> frame and chooses to accept the
+            pushed response, the client SHOULD NOT issue any requests for the promised response
+            until after the promised stream has closed.
+          </t>
+
+          <t>
+            If the client determines, for any reason, that it does not wish to receive the pushed
+            response from the server, or if the server takes too long to begin sending the promised
+            response, the client can send an <x:ref>RST_STREAM</x:ref> frame, using either the
+            <x:ref>CANCEL</x:ref> or <x:ref>REFUSED_STREAM</x:ref> codes, and referencing the pushed
+            stream's identifier.
+          </t>
+          <t>
+            A client can use the <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> setting to limit the
+            number of responses that can be concurrently pushed by a server.  Advertising a
+            <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> value of zero disables server push by
+            preventing the server from creating the necessary streams.  This does not prohibit a
+            server from sending <x:ref>PUSH_PROMISE</x:ref> frames; clients need to reset any
+            promised streams that are not wanted.
+          </t>
+
+          <t>
+            Clients receiving a pushed response MUST validate that either the server is
+            authoritative (see <xref target="authority"/>), or the proxy that provided the pushed
+            response is configured for the corresponding request. For example, a server that offers
+            a certificate for only the <spanx style="verb">example.com</spanx> DNS-ID or Common Name
+            is not permitted to push a response for <spanx
+            style="verb">https://www.example.org/doc</spanx>.
+          </t>
+          <t>
+            The response for a <x:ref>PUSH_PROMISE</x:ref> stream begins with a
+            <x:ref>HEADERS</x:ref> frame, which immediately puts the stream into the “half closed
+            (remote)” state for the server and “half closed (local)” state for the client, and ends
+            with a frame bearing END_STREAM, which places the stream in the "closed" state.
+            <list style="hanging">
+              <t hangText="Note:">
+                The client never sends a frame with the END_STREAM flag for a server push.
+              </t>
+            </list>
+          </t>
+        </section>
+
+      </section>
+
+      <section anchor="CONNECT" title="The CONNECT Method">
+        <t>
+          In HTTP/1.x, the pseudo-method CONNECT (<xref target="RFC7231" x:fmt=","
+          x:rel="#CONNECT"/>) is used to convert an HTTP connection into a tunnel to a remote host.
+          CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin
+          server for the purposes of interacting with <spanx style="verb">https</spanx> resources.
+        </t>
+        <t>
+          In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to
+          a remote host, for similar purposes. The HTTP header field mapping works as defined in
+          <xref target="HttpRequest">Request Header Fields</xref>, with a few
+          differences. Specifically:
+          <list style="symbols">
+            <t>
+              The <spanx style="verb">:method</spanx> header field is set to <spanx
+              style="verb">CONNECT</spanx>.
+            </t>
+            <t>
+              The <spanx style="verb">:scheme</spanx> and <spanx style="verb">:path</spanx> header
+              fields MUST be omitted.
+            </t>
+            <t>
+              The <spanx style="verb">:authority</spanx> header field contains the host and port to
+              connect to (equivalent to the authority-form of the request-target of CONNECT
+              requests, see <xref target="RFC7230" x:fmt="," x:rel="#request-target"/>).
+            </t>
+          </list>
+        </t>
+        <t>
+          A proxy that supports CONNECT establishes a <xref target="TCP">TCP connection</xref> to
+          the server identified in the <spanx style="verb">:authority</spanx> header field. Once
+          this connection is successfully established, the proxy sends a <x:ref>HEADERS</x:ref>
+          frame containing a 2xx series status code to the client, as defined in <xref
+          target="RFC7231" x:fmt="," x:rel="#CONNECT"/>.
+        </t>
+        <t>
+          After the initial <x:ref>HEADERS</x:ref> frame sent by each peer, all subsequent
+          <x:ref>DATA</x:ref> frames correspond to data sent on the TCP connection.  The payload of
+          any <x:ref>DATA</x:ref> frames sent by the client is transmitted by the proxy to the TCP
+          server; data received from the TCP server is assembled into <x:ref>DATA</x:ref> frames by
+          the proxy.  Frame types other than <x:ref>DATA</x:ref> or stream management frames
+          (<x:ref>RST_STREAM</x:ref>, <x:ref>WINDOW_UPDATE</x:ref>, and <x:ref>PRIORITY</x:ref>)
+          MUST NOT be sent on a connected stream, and MUST be treated as a <xref
+          target="StreamErrorHandler">stream error</xref> if received.
+        </t>
+        <t>
+          The TCP connection can be closed by either peer.  The END_STREAM flag on a
+          <x:ref>DATA</x:ref> frame is treated as being equivalent to the TCP FIN bit.  A client is
+          expected to send a <x:ref>DATA</x:ref> frame with the END_STREAM flag set after receiving
+          a frame bearing the END_STREAM flag.  A proxy that receives a <x:ref>DATA</x:ref> frame
+          with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP
+          segment.  A proxy that receives a TCP segment with the FIN bit set sends a
+          <x:ref>DATA</x:ref> frame with the END_STREAM flag set.  Note that the final TCP segment
+          or <x:ref>DATA</x:ref> frame could be empty.
+        </t>
+        <t>
+          A TCP connection error is signaled with <x:ref>RST_STREAM</x:ref>.  A proxy treats any
+          error in the TCP connection, which includes receiving a TCP segment with the RST bit set,
+          as a <xref target="StreamErrorHandler">stream error</xref> of type
+          <x:ref>CONNECT_ERROR</x:ref>.  Correspondingly, a proxy MUST send a TCP segment with the
+          RST bit set if it detects an error with the stream or the HTTP/2 connection.
+        </t>
+      </section>
+    </section>
+
+    <section anchor="HttpExtra" title="Additional HTTP Requirements/Considerations">
+      <t>
+        This section outlines attributes of the HTTP protocol that improve interoperability, reduce
+        exposure to known security vulnerabilities, or reduce the potential for implementation
+        variation.
+      </t>
+
+      <section title="Connection Management">
+        <t>
+          HTTP/2 connections are persistent.  For best performance, it is expected clients will not
+          close connections until it is determined that no further communication with a server is
+          necessary (for example, when a user navigates away from a particular web page), or until
+          the server closes the connection.
+        </t>
+        <t>
+          Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair,
+          where host is derived from a URI, a selected <xref target="ALT-SVC">alternative
+          service</xref>, or a configured proxy.
+        </t>
+        <t>
+          A client can create additional connections as replacements, either to replace connections
+          that are near to exhausting the available <xref target="StreamIdentifiers">stream
+          identifier space</xref>, to refresh the keying material for a TLS connection, or to
+          replace connections that have encountered <xref
+          target="ConnectionErrorHandler">errors</xref>.
+        </t>
+        <t>
+          A client MAY open multiple connections to the same IP address and TCP port using different
+          <xref target="TLS-EXT">Server Name Indication</xref> values or to provide different TLS
+          client certificates, but SHOULD avoid creating multiple connections with the same
+          configuration.
+        </t>
+        <t>
+          Servers are encouraged to maintain open connections for as long as possible, but are
+          permitted to terminate idle connections if necessary.  When either endpoint chooses to
+          close the transport-layer TCP connection, the terminating endpoint SHOULD first send a
+          <x:ref>GOAWAY</x:ref> (<xref target="GOAWAY"/>) frame so that both endpoints can reliably
+          determine whether previously sent frames have been processed and gracefully complete or
+          terminate any necessary remaining tasks.
+        </t>
+
+        <section anchor="reuse" title="Connection Reuse">
+          <t>
+            Connections that are made to an origin servers, either directly or through a tunnel
+            created using the <xref target="CONNECT">CONNECT method</xref> MAY be reused for
+            requests with multiple different URI authority components.  A connection can be reused
+            as long as the origin server is <xref target="authority">authoritative</xref>.  For
+            <spanx style="verb">http</spanx> resources, this depends on the host having resolved to
+            the same IP address.
+          </t>
+          <t>
+            For <spanx style="verb">https</spanx> resources, connection reuse additionally depends
+            on having a certificate that is valid for the host in the URI.  An origin server might
+            offer a certificate with multiple <spanx style="verb">subjectAltName</spanx> attributes,
+            or names with wildcards, one of which is valid for the authority in the URI.  For
+            example, a certificate with a <spanx style="verb">subjectAltName</spanx> of <spanx
+            style="verb">*.example.com</spanx> might permit the use of the same connection for
+            requests to URIs starting with <spanx style="verb">https://a.example.com/</spanx> and
+            <spanx style="verb">https://b.example.com/</spanx>.
+          </t>
+          <t>
+            In some deployments, reusing a connection for multiple origins can result in requests
+            being directed to the wrong origin server.  For example, TLS termination might be
+            performed by a middlebox that uses the TLS <xref target="TLS-EXT">Server Name Indication
+            (SNI)</xref> extension to select an origin server.  This means that it is possible
+            for clients to send confidential information to servers that might not be the intended
+            target for the request, even though the server is otherwise authoritative.
+          </t>
+          <t>
+            A server that does not wish clients to reuse connections can indicate that it is not
+            authoritative for a request by sending a 421 (Misdirected Request) status code in response
+            to the request (see <xref target="MisdirectedRequest"/>).
+          </t>
+          <t>
+            A client that is configured to use a proxy over HTTP/2 directs requests to that proxy
+            through a single connection.  That is, all requests sent via a proxy reuse the
+            connection to the proxy.
+          </t>
+        </section>
+
+        <section anchor="MisdirectedRequest" title="The 421 (Misdirected Request) Status Code">
+          <t>
+            The 421 (Misdirected Request) status code indicates that the request was directed at a
+            server that is not able to produce a response.  This can be sent by a server that is not
+            configured to produce responses for the combination of scheme and authority that are
+            included in the request URI.
+          </t>
+          <t>
+            Clients receiving a 421 (Misdirected Request) response from a server MAY retry the
+            request - whether the request method is idempotent or not - over a different connection.
+            This is possible if a connection is reused (<xref target="reuse"/>) or if an alternative
+            service is selected (<xref target="ALT-SVC"/>).
+          </t>
+          <t>
+            This status code MUST NOT be generated by proxies.
+          </t>
+          <t>
+            A 421 response is cacheable by default; i.e., unless otherwise indicated by the method
+            definition or explicit cache controls (see <xref target="RFC7234"
+            x:rel="#heuristic.freshness" x:fmt="of"/>).
+          </t>
+        </section>
+      </section>
+
+      <section title="Use of TLS Features" anchor="TLSUsage">
+        <t>
+          Implementations of HTTP/2 MUST support <xref target="TLS12">TLS 1.2</xref> for HTTP/2 over
+          TLS.  The general TLS usage guidance in <xref target="TLSBCP"/> SHOULD be followed, with
+          some additional restrictions that are specific to HTTP/2.
+        </t>
+
+        <t>
+          An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on
+          feature set and cipher suite described in this section.  Due to implementation
+          limitations, it might not be possible to fail TLS negotiation.  An endpoint MUST
+          immediately terminate an HTTP/2 connection that does not meet these minimum requirements
+          with a <xref target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>INADEQUATE_SECURITY</x:ref>.
+        </t>
+
+        <section anchor="TLSFeatures" title="TLS Features">
+          <t>
+            The TLS implementation MUST support the <xref target="TLS-EXT">Server Name Indication
+            (SNI)</xref> extension to TLS. HTTP/2 clients MUST indicate the target domain name when
+            negotiating TLS.
+          </t>
+          <t>
+            The TLS implementation MUST disable compression.  TLS compression can lead to the
+            exposure of information that would not otherwise be revealed <xref target="RFC3749"/>.
+            Generic compression is unnecessary since HTTP/2 provides compression features that are
+            more aware of context and therefore likely to be more appropriate for use for
+            performance, security or other reasons.
+          </t>
+          <t>
+            The TLS implementation MUST disable renegotiation.  An endpoint MUST treat a TLS
+            renegotiation as a <xref target="ConnectionErrorHandler">connection error</xref> of type
+            <x:ref>PROTOCOL_ERROR</x:ref>.  Note that disabling renegotiation can result in
+            long-lived connections becoming unusable due to limits on the number of messages the
+            underlying cipher suite can encipher.
+          </t>
+          <t>
+            A client MAY use renegotiation to provide confidentiality protection for client
+            credentials offered in the handshake, but any renegotiation MUST occur prior to sending
+            the connection preface.  A server SHOULD request a client certificate if it sees a
+            renegotiation request immediately after establishing a connection.
+          </t>
+          <t>
+            This effectively prevents the use of renegotiation in response to a request for a
+            specific protected resource.  A future specification might provide a way to support this
+            use case. <!-- <cref> We are tracking this in a non-blocking fashion in issue #496 and
+            with a new draft. -->
+          </t>
+        </section>
+
+        <section title="TLS Cipher Suites">
+          <t>
+            The set of TLS cipher suites that are permitted in HTTP/2 is restricted.  HTTP/2 MUST
+            only be used with cipher suites that have ephemeral key exchange, such as the <xref
+            target="TLS12">ephemeral Diffie-Hellman (DHE)</xref> or the <xref
+            target="RFC4492">elliptic curve variant (ECDHE)</xref>.  Ephemeral key exchange MUST
+            have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE.
+            Clients MUST accept DHE sizes of up to 4096 bits.  HTTP MUST NOT be used with cipher
+            suites that use stream or block ciphers.  Authenticated Encryption with Additional Data
+            (AEAD) modes, such as the <xref target="RFC5288">Galois Counter Model (GCM) mode for
+            AES</xref> are acceptable.
+          </t>
+          <t>
+            The effect of these restrictions is that TLS 1.2 implementations could have
+            non-intersecting sets of available cipher suites, since these prevent the use of the
+            cipher suite that TLS 1.2 makes mandatory.  To avoid this problem, implementations of
+            HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 <xref
+            target="TLS-ECDHE"/> with P256 <xref target="FIPS186"/>.
+          </t>
+          <t>
+            Clients MAY advertise support of cipher suites that are prohibited by the above
+            restrictions in order to allow for connection to servers that do not support HTTP/2.
+            This enables a fallback to protocols without these constraints without the additional
+            latency imposed by using a separate connection for fallback.
+          </t>
+        </section>
+      </section>
+    </section>
+
+    <section anchor="security" title="Security Considerations">
+      <section title="Server Authority" anchor="authority">
+        <t>
+          HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is
+          authoritative in providing a given response, see <xref target="RFC7230" x:fmt=","
+          x:rel="#establishing.authority"/>.  This relies on local name resolution for the "http"
+          URI scheme, and the authenticated server identity for the "https" scheme (see <xref
+          target="RFC2818" x:fmt="," x:sec="3"/>).
+        </t>
+      </section>
+
+      <section title="Cross-Protocol Attacks">
+        <t>
+          In a cross-protocol attack, an attacker causes a client to initiate a transaction in one
+          protocol toward a server that understands a different protocol.  An attacker might be able
+          to cause the transaction to appear as valid transaction in the second protocol.  In
+          combination with the capabilities of the web context, this can be used to interact with
+          poorly protected servers in private networks.
+        </t>
+        <t>
+          Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient
+          protection against cross protocol attacks.  ALPN provides a positive indication that a
+          server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based
+          protocols.
+        </t>
+        <t>
+          The encryption in TLS makes it difficult for attackers to control the data which could be
+          used in a cross-protocol attack on a cleartext protocol.
+        </t>
+        <t>
+          The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks.
+          The <xref target="ConnectionHeader">connection preface</xref> contains a string that is
+          designed to confuse HTTP/1.1 servers, but no special protection is offered for other
+          protocols.  A server that is willing to ignore parts of an HTTP/1.1 request containing an
+          Upgrade header field in addition to the client connection preface could be exposed to a
+          cross-protocol attack.
+        </t>
+      </section>
+
+      <section title="Intermediary Encapsulation Attacks">
+        <t>
+          HTTP/2 header field names and values are encoded as sequences of octets with a length
+          prefix.  This enables HTTP/2 to carry any string of octets as the name or value of a
+          header field.  An intermediary that translates HTTP/2 requests or responses into HTTP/1.1
+          directly could permit the creation of corrupted HTTP/1.1 messages.  An attacker might
+          exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal
+          header fields, extra header fields, or even new messages that are entirely falsified.
+        </t>
+        <t>
+          Header field names or values that contain characters not permitted by HTTP/1.1, including
+          carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an
+          intermediary, as stipulated in <xref target="RFC7230" x:rel="#field.parsing" x:fmt=","/>.
+        </t>
+        <t>
+          Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker.
+          Intermediaries that perform translation to HTTP/2 MUST remove any instances of the <spanx
+          style="verb">obs-fold</spanx> production from header field values.
+        </t>
+      </section>
+
+      <section title="Cacheability of Pushed Responses">
+        <t>
+          Pushed responses do not have an explicit request from the client; the request
+          is provided by the server in the <x:ref>PUSH_PROMISE</x:ref> frame.
+        </t>
+        <t>
+          Caching responses that are pushed is possible based on the guidance provided by the origin
+          server in the Cache-Control header field.  However, this can cause issues if a single
+          server hosts more than one tenant.  For example, a server might offer multiple users each
+          a small portion of its URI space.
+        </t>
+        <t>
+          Where multiple tenants share space on the same server, that server MUST ensure that
+          tenants are not able to push representations of resources that they do not have authority
+          over.  Failure to enforce this would allow a tenant to provide a representation that would
+          be served out of cache, overriding the actual representation that the authoritative tenant
+          provides.
+        </t>
+        <t>
+          Pushed responses for which an origin server is not authoritative (see
+          <xref target="authority"/>) are never cached or used.
+        </t>
+      </section>
+
+      <section anchor="dos" title="Denial of Service Considerations">
+        <t>
+          An HTTP/2 connection can demand a greater commitment of resources to operate than a
+          HTTP/1.1 connection.  The use of header compression and flow control depend on a
+          commitment of resources for storing a greater amount of state.  Settings for these
+          features ensure that memory commitments for these features are strictly bounded.
+        </t>
+        <t>
+          The number of <x:ref>PUSH_PROMISE</x:ref> frames is not constrained in the same fashion.
+          A client that accepts server push SHOULD limit the number of streams it allows to be in
+          the "reserved (remote)" state.  Excessive number of server push streams can be treated as
+          a <xref target="StreamErrorHandler">stream error</xref> of type
+          <x:ref>ENHANCE_YOUR_CALM</x:ref>.
+        </t>
+        <t>
+          Processing capacity cannot be guarded as effectively as state capacity.
+        </t>
+        <t>
+          The <x:ref>SETTINGS</x:ref> frame can be abused to cause a peer to expend additional
+          processing time. This might be done by pointlessly changing SETTINGS parameters, setting
+          multiple undefined parameters, or changing the same setting multiple times in the same
+          frame.  <x:ref>WINDOW_UPDATE</x:ref> or <x:ref>PRIORITY</x:ref> frames can be abused to
+          cause an unnecessary waste of resources.
+        </t>
+        <t>
+          Large numbers of small or empty frames can be abused to cause a peer to expend time
+          processing frame headers.  Note however that some uses are entirely legitimate, such as
+          the sending of an empty <x:ref>DATA</x:ref> frame to end a stream.
+        </t>
+        <t>
+          Header compression also offers some opportunities to waste processing resources; see <xref
+          target="COMPRESSION" x:fmt="of" x:rel="#Security"/> for more details on potential abuses.
+        </t>
+        <t>
+          Limits in <x:ref>SETTINGS</x:ref> parameters cannot be reduced instantaneously, which
+          leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In
+          particular, immediately after establishing a connection, limits set by a server are not
+          known to clients and could be exceeded without being an obvious protocol violation.
+        </t>
+        <t>
+          All these features - i.e., <x:ref>SETTINGS</x:ref> changes, small frames, header
+          compression - have legitimate uses.  These features become a burden only when they are
+          used unnecessarily or to excess.
+        </t>
+        <t>
+          An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of
+          service attack.  Implementations SHOULD track the use of these features and set limits on
+          their use.  An endpoint MAY treat activity that is suspicious as a <xref
+          target="ConnectionErrorHandler">connection error</xref> of type
+          <x:ref>ENHANCE_YOUR_CALM</x:ref>.
+        </t>
+
+        <section anchor="MaxHeaderBlock" title="Limits on Header Block Size">
+          <t>
+            A large <xref target="HeaderBlock">header block</xref> can cause an implementation to
+            commit a large amount of state.  Header fields that are critical for routing can appear
+            toward the end of a header block, which prevents streaming of header fields to their
+            ultimate destination. For this an other reasons, such as ensuring cache correctness,
+            means that an endpoint might need to buffer the entire header block.  Since there is no
+            hard limit to the size of a header block, some endpoints could be forced commit a large
+            amount of available memory for header fields.
+          </t>
+          <t>
+            An endpoint can use the <x:ref>SETTINGS_MAX_HEADER_LIST_SIZE</x:ref> to advise peers of
+            limits that might apply on the size of header blocks.  This setting is only advisory, so
+            endpoints MAY choose to send header blocks that exceed this limit and risk having the
+            request or response being treated as malformed.  This setting specific to a connection,
+            so any request or response could encounter a hop with a lower, unknown limit.  An
+            intermediary can attempt to avoid this problem by passing on values presented by
+            different peers, but they are not obligated to do so.
+          </t>
+          <t>
+            A server that receives a larger header block than it is willing to handle can send an
+            HTTP 431 (Request Header Fields Too Large) status code <xref target="RFC6585"/>.  A
+            client can discard responses that it cannot process.  The header block MUST be processed
+            to ensure a consistent connection state, unless the connection is closed.
+          </t>
+        </section>
+      </section>
+
+      <section title="Use of Compression">
+        <t>
+          HTTP/2 enables greater use of compression for both header fields (<xref
+          target="HeaderBlock"/>) and entity bodies.  Compression can allow an attacker to recover
+          secret data when it is compressed in the same context as data under attacker control.
+        </t>
+        <t>
+          There are demonstrable attacks on compression that exploit the characteristics of the web
+          (e.g., <xref target="BREACH"/>).  The attacker induces multiple requests containing
+          varying plaintext, observing the length of the resulting ciphertext in each, which
+          reveals a shorter length when a guess about the secret is correct.
+        </t>
+        <t>
+          Implementations communicating on a secure channel MUST NOT compress content that includes
+          both confidential and attacker-controlled data unless separate compression dictionaries
+          are used for each source of data.  Compression MUST NOT be used if the source of data
+          cannot be reliably determined.  Generic stream compression, such as that provided by TLS
+          MUST NOT be used with HTTP/2 (<xref target="TLSFeatures"/>).
+        </t>
+        <t>
+          Further considerations regarding the compression of header fields are described in <xref
+          target="COMPRESSION"/>.
+        </t>
+      </section>
+
+      <section title="Use of Padding" anchor="padding">
+        <t>
+          Padding within HTTP/2 is not intended as a replacement for general purpose padding, such
+          as might be provided by <xref target="TLS12">TLS</xref>.  Redundant padding could even be
+          counterproductive.  Correct application can depend on having specific knowledge of the
+          data that is being padded.
+        </t>
+        <t>
+          To mitigate attacks that rely on compression, disabling or limiting compression might be
+          preferable to padding as a countermeasure.
+        </t>
+        <t>
+          Padding can be used to obscure the exact size of frame content, and is provided to
+          mitigate specific attacks within HTTP.  For example, attacks where compressed content
+          includes both attacker-controlled plaintext and secret data (see for example, <xref
+          target="BREACH"/>).
+        </t>
+        <t>
+          Use of padding can result in less protection than might seem immediately obvious.  At
+          best, padding only makes it more difficult for an attacker to infer length information by
+          increasing the number of frames an attacker has to observe.  Incorrectly implemented
+          padding schemes can be easily defeated.  In particular, randomized padding with a
+          predictable distribution provides very little protection; similarly, padding payloads to a
+          fixed size exposes information as payload sizes cross the fixed size boundary, which could
+          be possible if an attacker can control plaintext.
+        </t>
+        <t>
+          Intermediaries SHOULD retain padding for <x:ref>DATA</x:ref> frames, but MAY drop padding
+          for <x:ref>HEADERS</x:ref> and <x:ref>PUSH_PROMISE</x:ref> frames.  A valid reason for an
+          intermediary to change the amount of padding of frames is to improve the protections that
+          padding provides.
+        </t>
+      </section>
+
+      <section title="Privacy Considerations">
+        <t>
+          Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions
+          of a single client or server over time.  This includes the value of settings, the manner
+          in which flow control windows are managed, the way priorities are allocated to streams,
+          timing of reactions to stimulus, and handling of any optional features.
+        </t>
+        <t>
+          As far as this creates observable differences in behavior, they could be used as a basis
+          for fingerprinting a specific client, as defined in <xref target="HTML5" x:fmt="of"
+          x:sec="1.8" x:rel="introduction.html#fingerprint"/>.
+        </t>
+      </section>
+    </section>
+
+    <section anchor="iana" title="IANA Considerations">
+      <t>
+        A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation
+        (ALPN) Protocol IDs" registry established in <xref target="TLS-ALPN"/>.
+      </t>
+      <t>
+        This document establishes a registry for frame types, settings, and error codes.  These new
+        registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section.
+      </t>
+      <t>
+        This document registers the <spanx style="verb">HTTP2-Settings</spanx> header field for
+        use in HTTP; and the 421 (Misdirected Request) status code.
+      </t>
+      <t>
+        This document registers the <spanx style="verb">PRI</spanx> method for use in HTTP, to avoid
+        collisions with the <xref target="ConnectionHeader">connection preface</xref>.
+      </t>
+
+      <section anchor="iana-alpn" title="Registration of HTTP/2 Identification Strings">
+        <t>
+          This document creates two registrations for the identification of HTTP/2 in the
+          "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in <xref
+          target="TLS-ALPN"/>.
+        </t>
+        <t>
+          The "h2" string identifies HTTP/2 when used over TLS:
+          <list style="hanging">
+            <t hangText="Protocol:">HTTP/2 over TLS</t>
+            <t hangText="Identification Sequence:">0x68 0x32 ("h2")</t>
+            <t hangText="Specification:">This document</t>
+          </list>
+        </t>
+        <t>
+          The "h2c" string identifies HTTP/2 when used over cleartext TCP:
+          <list style="hanging">
+            <t hangText="Protocol:">HTTP/2 over TCP</t>
+            <t hangText="Identification Sequence:">0x68 0x32 0x63 ("h2c")</t>
+            <t hangText="Specification:">This document</t>
+          </list>
+        </t>
+      </section>
+
+      <section anchor="iana-frames" title="Frame Type Registry">
+        <t>
+          This document establishes a registry for HTTP/2 frame type codes.  The "HTTP/2 Frame
+          Type" registry manages an 8-bit space.  The "HTTP/2 Frame Type" registry operates under
+          either of the <xref target="RFC5226">"IETF Review" or "IESG Approval" policies</xref> for
+          values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for
+          experimental use.
+        </t>
+        <t>
+          New entries in this registry require the following information:
+          <list style="hanging">
+            <t hangText="Frame Type:">
+              A name or label for the frame type.
+            </t>
+            <t hangText="Code:">
+              The 8-bit code assigned to the frame type.
+            </t>
+            <t hangText="Specification:">
+              A reference to a specification that includes a description of the frame layout,
+              it's semantics and flags that the frame type uses, including any parts of the frame
+              that are conditionally present based on the value of flags.
+            </t>
+          </list>
+        </t>
+        <t>
+          The entries in the following table are registered by this document.
+        </t>
+        <texttable align="left" suppress-title="true">
+          <ttcol>Frame Type</ttcol>
+          <ttcol>Code</ttcol>
+          <ttcol>Section</ttcol>
+          <c>DATA</c><c>0x0</c><c><xref target="DATA"/></c>
+          <c>HEADERS</c><c>0x1</c><c><xref target="HEADERS"/></c>
+          <c>PRIORITY</c><c>0x2</c><c><xref target="PRIORITY"/></c>
+          <c>RST_STREAM</c><c>0x3</c><c><xref target="RST_STREAM"/></c>
+          <c>SETTINGS</c><c>0x4</c><c><xref target="SETTINGS"/></c>
+          <c>PUSH_PROMISE</c><c>0x5</c><c><xref target="PUSH_PROMISE"/></c>
+          <c>PING</c><c>0x6</c><c><xref target="PING"/></c>
+          <c>GOAWAY</c><c>0x7</c><c><xref target="GOAWAY"/></c>
+          <c>WINDOW_UPDATE</c><c>0x8</c><c><xref target="WINDOW_UPDATE"/></c>
+          <c>CONTINUATION</c><c>0x9</c><c><xref target="CONTINUATION"/></c>
+        </texttable>
+      </section>
+
+      <section anchor="iana-settings" title="Settings Registry">
+        <t>
+          This document establishes a registry for HTTP/2 settings.  The "HTTP/2 Settings" registry
+          manages a 16-bit space.  The "HTTP/2 Settings" registry operates under the <xref
+          target="RFC5226">"Expert Review" policy</xref> for values in the range from 0x0000 to
+          0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use.
+        </t>
+        <t>
+          New registrations are advised to provide the following information:
+          <list style="hanging">
+            <t hangText="Name:">
+              A symbolic name for the setting.  Specifying a setting name is optional.
+            </t>
+            <t hangText="Code:">
+              The 16-bit code assigned to the setting.
+            </t>
+            <t hangText="Initial Value:">
+              An initial value for the setting.
+            </t>
+            <t hangText="Specification:">
+              An optional reference to a specification that describes the use of the setting.
+            </t>
+          </list>
+        </t>
+        <t>
+          An initial set of setting registrations can be found in <xref target="SettingValues"/>.
+        </t>
+        <texttable align="left" suppress-title="true">
+          <ttcol>Name</ttcol>
+          <ttcol>Code</ttcol>
+          <ttcol>Initial Value</ttcol>
+          <ttcol>Specification</ttcol>
+          <c>HEADER_TABLE_SIZE</c>
+          <c>0x1</c><c>4096</c><c><xref target="SettingValues"/></c>
+          <c>ENABLE_PUSH</c>
+          <c>0x2</c><c>1</c><c><xref target="SettingValues"/></c>
+          <c>MAX_CONCURRENT_STREAMS</c>
+          <c>0x3</c><c>(infinite)</c><c><xref target="SettingValues"/></c>
+          <c>INITIAL_WINDOW_SIZE</c>
+          <c>0x4</c><c>65535</c><c><xref target="SettingValues"/></c>
+          <c>MAX_FRAME_SIZE</c>
+          <c>0x5</c><c>16384</c><c><xref target="SettingValues"/></c>
+          <c>MAX_HEADER_LIST_SIZE</c>
+          <c>0x6</c><c>(infinite)</c><c><xref target="SettingValues"/></c>
+        </texttable>
+
+      </section>
+
+      <section anchor="iana-errors" title="Error Code Registry">
+        <t>
+          This document establishes a registry for HTTP/2 error codes.  The "HTTP/2 Error Code"
+          registry manages a 32-bit space.  The "HTTP/2 Error Code" registry operates under the
+          <xref target="RFC5226">"Expert Review" policy</xref>.
+        </t>
+        <t>
+          Registrations for error codes are required to include a description of the error code.  An
+          expert reviewer is advised to examine new registrations for possible duplication with
+          existing error codes.  Use of existing registrations is to be encouraged, but not
+          mandated.
+        </t>
+        <t>
+          New registrations are advised to provide the following information:
+          <list style="hanging">
+            <t hangText="Name:">
+              A name for the error code.  Specifying an error code name is optional.
+            </t>
+            <t hangText="Code:">
+              The 32-bit error code value.
+            </t>
+            <t hangText="Description:">
+              A brief description of the error code semantics, longer if no detailed specification
+              is provided.
+            </t>
+            <t hangText="Specification:">
+              An optional reference for a specification that defines the error code.
+            </t>
+          </list>
+        </t>
+        <t>
+          The entries in the following table are registered by this document.
+        </t>
+        <texttable align="left" suppress-title="true">
+          <ttcol>Name</ttcol>
+          <ttcol>Code</ttcol>
+          <ttcol>Description</ttcol>
+          <ttcol>Specification</ttcol>
+          <c>NO_ERROR</c><c>0x0</c>
+          <c>Graceful shutdown</c>
+          <c><xref target="ErrorCodes"/></c>
+          <c>PROTOCOL_ERROR</c><c>0x1</c>
+          <c>Protocol error detected</c>
+          <c><xref target="ErrorCodes"/></c>
+          <c>INTERNAL_ERROR</c><c>0x2</c>
+          <c>Implementation fault</c>
+          <c><xref target="ErrorCodes"/></c>
+          <c>FLOW_CONTROL_ERROR</c><c>0x3</c>
+          <c>Flow control limits exceeded</c>
+          <c><xref target="ErrorCodes"/></c>
+          <c>SETTINGS_TIMEOUT</c><c>0x4</c>
+          <c>Settings not acknowledged</c>
+          <c><xref target="ErrorCodes"/></c>
+          <c>STREAM_CLOSED</c><c>0x5</c>
+          <c>Frame received for closed stream</c>
+          <c><xref target="ErrorCodes"/></c>
+          <c>FRAME_SIZE_ERROR</c><c>0x6</c>
+          <c>Frame size incorrect</c>
+          <c><xref target="ErrorCodes"/></c>
+          <c>REFUSED_STREAM</c><c>0x7</c>
+          <c>Stream not processed</c>
+          <c><xref target="ErrorCodes"/></c>
+          <c>CANCEL</c><c>0x8</c>
+          <c>Stream cancelled</c>
+          <c><xref target="ErrorCodes"/></c>
+          <c>COMPRESSION_ERROR</c><c>0x9</c>
+          <c>Compression state not updated</c>
+          <c><xref target="ErrorCodes"/></c>
+          <c>CONNECT_ERROR</c><c>0xa</c>
+          <c>TCP connection error for CONNECT method</c>
+          <c><xref target="ErrorCodes"/></c>
+          <c>ENHANCE_YOUR_CALM</c><c>0xb</c>
+          <c>Processing capacity exceeded</c>
+          <c><xref target="ErrorCodes"/></c>
+          <c>INADEQUATE_SECURITY</c><c>0xc</c>
+          <c>Negotiated TLS parameters not acceptable</c>
+          <c><xref target="ErrorCodes"/></c>
+        </texttable>
+
+      </section>
+
+      <section title="HTTP2-Settings Header Field Registration">
+        <t>
+          This section registers the <spanx style="verb">HTTP2-Settings</spanx> header field in the
+          <xref target="BCP90">Permanent Message Header Field Registry</xref>.
+          <list style="hanging">
+            <t hangText="Header field name:">
+              HTTP2-Settings
+            </t>
+            <t hangText="Applicable protocol:">
+              http
+            </t>
+            <t hangText="Status:">
+              standard
+            </t>
+            <t hangText="Author/Change controller:">
+              IETF
+            </t>
+            <t hangText="Specification document(s):">
+              <xref target="Http2SettingsHeader"/> of this document
+            </t>
+            <t hangText="Related information:">
+              This header field is only used by an HTTP/2 client for Upgrade-based negotiation.
+            </t>
+          </list>
+        </t>
+      </section>
+
+      <section title="PRI Method Registration">
+        <t>
+          This section registers the <spanx style="verb">PRI</spanx> method in the HTTP Method
+          Registry (<xref target="RFC7231" x:fmt="," x:rel="#method.registry"/>).
+          <list style="hanging">
+            <t hangText="Method Name:">
+              PRI
+            </t>
+            <t hangText="Safe">
+              No
+            </t>
+            <t hangText="Idempotent">
+              No
+            </t>
+            <t hangText="Specification document(s)">
+              <xref target="ConnectionHeader"/> of this document
+            </t>
+            <t hangText="Related information:">
+              This method is never used by an actual client. This method will appear to be used
+              when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection
+              preface.
+            </t>
+          </list>
+        </t>
+      </section>
+
+      <section title="The 421 (Misdirected Request) HTTP Status Code"
+               anchor="iana-MisdirectedRequest">
+        <t>
+          This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext
+          Transfer Protocol (HTTP) Status Code Registry (<xref target="RFC7231" x:fmt=","
+          x:rel="#status.code.registry"/>).
+        </t>
+        <t>
+          <list style="hanging">
+            <t hangText="Status Code:">
+              421
+            </t>
+            <t hangText="Short Description:">
+              Misdirected Request
+            </t>
+            <t hangText="Specification:">
+              <xref target="MisdirectedRequest"/> of this document
+            </t>
+          </list>
+        </t>
+      </section>
+
+    </section>
+
+    <section title="Acknowledgements">
+      <t>
+        This document includes substantial input from the following individuals:
+        <list style="symbols">
+          <t>
+            Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin
+            Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin
+            Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY
+            contributors).
+          </t>
+          <t>
+            Gabriel Montenegro and Willy Tarreau (Upgrade mechanism).
+          </t>
+          <t>
+            William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto
+            Peon, Rob Trace (Flow control).
+          </t>
+          <t>
+            Mike Bishop (Extensibility).
+          </t>
+          <t>
+            Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan
+            (Substantial editorial contributions).
+          </t>
+          <t>
+            Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp.
+          </t>
+          <t>
+            Alexey Melnikov was an editor of this document during 2013.
+          </t>
+          <t>
+            A substantial proportion of Martin's contribution was supported by Microsoft during his
+            employment there.
+          </t>
+        </list>
+      </t>
+    </section>
+  </middle>
+
+  <back>
+    <references title="Normative References">
+      <reference anchor="COMPRESSION">
+        <front>
+          <title>HPACK - Header Compression for HTTP/2</title>
+          <author initials="H." surname="Ruellan" fullname="Herve Ruellan"/>
+          <author initials="R." surname="Peon" fullname="Roberto Peon"/>
+          <date month="July" year="2014" />
+        </front>
+        <seriesInfo name="Internet-Draft" value="draft-ietf-httpbis-header-compression-09" />
+        <x:source href="refs/draft-ietf-httpbis-header-compression-09.xml"/>
+      </reference>
+
+      <reference anchor="TCP">
+        <front>
+          <title abbrev="Transmission Control Protocol">
+            Transmission Control Protocol
+          </title>
+          <author initials="J." surname="Postel" fullname="Jon Postel">
+            <organization>University of Southern California (USC)/Information Sciences
+            Institute</organization>
+          </author>
+          <date year="1981" month="September" />
+        </front>
+        <seriesInfo name="STD" value="7" />
+        <seriesInfo name="RFC" value="793" />
+      </reference>
+
+      <reference anchor="RFC2119">
+        <front>
+          <title>
+            Key words for use in RFCs to Indicate Requirement Levels
+          </title>
+          <author initials="S." surname="Bradner" fullname="Scott Bradner">
+            <organization>Harvard University</organization>
+            <address><email>sob@harvard.edu</email></address>
+          </author>
+          <date month="March" year="1997"/>
+        </front>
+        <seriesInfo name="BCP" value="14"/>
+        <seriesInfo name="RFC" value="2119"/>
+      </reference>
+
+     <reference anchor="RFC2818">
+        <front>
+          <title>
+            HTTP Over TLS
+          </title>
+          <author initials="E." surname="Rescorla" fullname="Eric Rescorla"/>
+          <date month="May" year="2000"/>
+        </front>
+        <seriesInfo name="RFC" value="2818"/>
+      </reference>
+
+      <reference anchor="RFC3986">
+        <front>
+          <title abbrev="URI Generic Syntax">Uniform Resource Identifier (URI): Generic
+          Syntax</title>
+          <author initials="T." surname="Berners-Lee" fullname="Tim Berners-Lee"></author>
+          <author initials="R." surname="Fielding" fullname="Roy T. Fielding"></author>
+          <author initials="L." surname="Masinter" fullname="Larry Masinter"></author>
+          <date year="2005" month="January" />
+        </front>
+        <seriesInfo name="STD" value="66" />
+        <seriesInfo name="RFC" value="3986" />
+      </reference>
+
+      <reference anchor="RFC4648">
+        <front>
+          <title>The Base16, Base32, and Base64 Data Encodings</title>
+          <author fullname="S. Josefsson" initials="S." surname="Josefsson"/>
+          <date year="2006" month="October"/>
+        </front>
+        <seriesInfo value="4648" name="RFC"/>
+      </reference>
+
+      <reference anchor="RFC5226">
+        <front>
+          <title>Guidelines for Writing an IANA Considerations Section in RFCs</title>
+          <author initials="T." surname="Narten" fullname="T. Narten"/>
+          <author initials="H." surname="Alvestrand" fullname="H. Alvestrand"/>
+          <date year="2008" month="May" />
+        </front>
+        <seriesInfo name="BCP" value="26" />
+        <seriesInfo name="RFC" value="5226" />
+      </reference>
+
+      <reference anchor="RFC5234">
+        <front>
+          <title>Augmented BNF for Syntax Specifications: ABNF</title>
+          <author initials="D." surname="Crocker" fullname="D. Crocker"/>
+          <author initials="P." surname="Overell" fullname="P. Overell"/>
+          <date year="2008" month="January" />
+        </front>
+        <seriesInfo name="STD" value="68" />
+        <seriesInfo name="RFC" value="5234" />
+      </reference>
+
+      <reference anchor="TLS12">
+        <front>
+          <title>The Transport Layer Security (TLS) Protocol Version 1.2</title>
+          <author initials="T." surname="Dierks" fullname="Tim Dierks"/>
+          <author initials="E." surname="Rescorla" fullname="Eric Rescorla"/>
+          <date year="2008" month="August" />
+        </front>
+        <seriesInfo name="RFC" value="5246" />
+      </reference>
+
+      <reference anchor="TLS-EXT">
+        <front>
+          <title>
+            Transport Layer Security (TLS) Extensions: Extension Definitions
+          </title>
+          <author initials="D." surname="Eastlake" fullname="D. Eastlake"/>
+          <date year="2011" month="January"/>
+        </front>
+        <seriesInfo name="RFC" value="6066"/>
+      </reference>
+
+      <reference anchor="TLS-ALPN">
+        <front>
+          <title>Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension</title>
+          <author initials="S." surname="Friedl" fullname="Stephan Friedl"></author>
+          <author initials="A." surname="Popov" fullname="Andrei Popov"></author>
+          <author initials="A." surname="Langley" fullname="Adam Langley"></author>
+          <author initials="E." surname="Stephan" fullname="Emile Stephan"></author>
+          <date month="July" year="2014" />
+        </front>
+        <seriesInfo name="RFC" value="7301" />
+      </reference>
+
+      <reference anchor="TLS-ECDHE">
+        <front>
+          <title>
+            TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois
+            Counter Mode (GCM)
+          </title>
+          <author initials="E." surname="Rescorla" fullname="E. Rescorla"/>
+          <date year="2008" month="August" />
+        </front>
+        <seriesInfo name="RFC" value="5289" />
+      </reference>
+
+      <reference anchor="FIPS186">
+        <front>
+          <title>
+            Digital Signature Standard (DSS)
+          </title>
+          <author><organization>NIST</organization></author>
+          <date year="2013" month="July" />
+        </front>
+        <seriesInfo name="FIPS" value="PUB 186-4" />
+      </reference>
+
+      <reference anchor="RFC7230">
+        <front>
+          <title>
+          Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing</title>
+          <author fullname="Roy T. Fielding" initials="R." role="editor" surname="Fielding">
+            <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+            <address><email>fielding@gbiv.com</email></address>
+          </author>
+          <author fullname="Julian F. Reschke" initials="J. F." role="editor" surname="Reschke">
+            <organization abbrev="greenbytes">greenbytes GmbH</organization>
+            <address><email>julian.reschke@greenbytes.de</email></address>
+          </author>
+          <date month="June" year="2014" />
+        </front>
+        <seriesInfo name="RFC" value="7230" />
+        <x:source href="refs/rfc7230.xml"
+                  basename="https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230"/>
+      </reference>
+      <reference anchor="RFC7231">
+        <front>
+          <title>
+          Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content</title>
+          <author fullname="Roy T. Fielding" initials="R." role="editor" surname="Fielding">
+            <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+            <address><email>fielding@gbiv.com</email></address>
+          </author>
+          <author fullname="Julian F. Reschke" initials="J. F." role="editor" surname="Reschke">
+            <organization abbrev="greenbytes">greenbytes GmbH</organization>
+            <address><email>julian.reschke@greenbytes.de</email></address>
+          </author>
+          <date month="June" year="2014" />
+        </front>
+        <seriesInfo name="RFC" value="7231" />
+        <x:source href="refs/rfc7231.xml"
+                  basename="https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7231"/>
+      </reference>
+      <reference anchor="RFC7232">
+        <front>
+          <title>Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests</title>
+          <author fullname="Roy T. Fielding" initials="R." role="editor" surname="Fielding">
+            <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+            <address><email>fielding@gbiv.com</email></address>
+          </author>
+          <author fullname="Julian F. Reschke" initials="J. F." role="editor" surname="Reschke">
+            <organization abbrev="greenbytes">greenbytes GmbH</organization>
+            <address><email>julian.reschke@greenbytes.de</email></address>
+          </author>
+          <date month="June" year="2014" />
+        </front>
+        <seriesInfo name="RFC" value="7232" />
+      </reference>
+      <reference anchor="RFC7233">
+        <front>
+          <title>Hypertext Transfer Protocol (HTTP/1.1): Range Requests</title>
+          <author initials="R." surname="Fielding" fullname="Roy T. Fielding" role="editor">
+            <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+            <address><email>fielding@gbiv.com</email></address>
+          </author>
+          <author initials="Y." surname="Lafon" fullname="Yves Lafon" role="editor">
+            <organization abbrev="W3C">World Wide Web Consortium</organization>
+            <address><email>ylafon@w3.org</email></address>
+          </author>
+          <author initials="J. F." surname="Reschke" fullname="Julian F. Reschke" role="editor">
+            <organization abbrev="greenbytes">greenbytes GmbH</organization>
+            <address><email>julian.reschke@greenbytes.de</email></address>
+          </author>
+          <date month="June" year="2014" />
+        </front>
+        <seriesInfo name="RFC" value="7233" />
+      </reference>
+      <reference anchor="RFC7234">
+        <front>
+          <title>Hypertext Transfer Protocol (HTTP/1.1): Caching</title>
+          <author initials="R." surname="Fielding" fullname="Roy T. Fielding" role="editor">
+            <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+            <address><email>fielding@gbiv.com</email></address>
+          </author>
+          <author fullname="Mark Nottingham" initials="M." role="editor" surname="Nottingham">
+            <organization>Akamai</organization>
+            <address><email>mnot@mnot.net</email></address>
+          </author>
+          <author initials="J. F." surname="Reschke" fullname="Julian F. Reschke" role="editor">
+            <organization abbrev="greenbytes">greenbytes GmbH</organization>
+            <address><email>julian.reschke@greenbytes.de</email></address>
+          </author>
+          <date month="June" year="2014" />
+        </front>
+        <seriesInfo name="RFC" value="7234"/>
+        <x:source href="refs/rfc7234.xml"
+                  basename="https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7234"/>
+      </reference>
+      <reference anchor="RFC7235">
+        <front>
+          <title>Hypertext Transfer Protocol (HTTP/1.1): Authentication</title>
+          <author initials="R." surname="Fielding" fullname="Roy T. Fielding" role="editor">
+            <organization abbrev="Adobe">Adobe Systems Incorporated</organization>
+            <address><email>fielding@gbiv.com</email></address>
+          </author>
+          <author initials="J. F." surname="Reschke" fullname="Julian F. Reschke" role="editor">
+            <organization abbrev="greenbytes">greenbytes GmbH</organization>
+            <address><email>julian.reschke@greenbytes.de</email></address>
+          </author>
+          <date month="June" year="2014" />
+        </front>
+        <seriesInfo name="RFC" value="7235"/>
+        <x:source href="refs/rfc7235.xml"
+                  basename="https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7235"/>
+      </reference>
+
+      <reference anchor="COOKIE">
+        <front>
+          <title>HTTP State Management Mechanism</title>
+          <author initials="A." surname="Barth" fullname="A. Barth"/>
+          <date year="2011" month="April" />
+        </front>
+        <seriesInfo name="RFC" value="6265" />
+      </reference>
+    </references>
+
+    <references title="Informative References">
+      <reference anchor="RFC1323">
+        <front>
+          <title>
+            TCP Extensions for High Performance
+          </title>
+          <author initials="V." surname="Jacobson" fullname="Van Jacobson"></author>
+          <author initials="B." surname="Braden" fullname="Bob Braden"></author>
+          <author initials="D." surname="Borman" fullname="Dave Borman"></author>
+          <date year="1992" month="May" />
+        </front>
+        <seriesInfo name="RFC" value="1323" />
+      </reference>
+
+      <reference anchor="RFC3749">
+        <front>
+          <title>Transport Layer Security Protocol Compression Methods</title>
+          <author initials="S." surname="Hollenbeck" fullname="S. Hollenbeck"/>
+          <date year="2004" month="May" />
+        </front>
+        <seriesInfo name="RFC" value="3749" />
+      </reference>
+
+      <reference anchor="RFC6585">
+        <front>
+          <title>Additional HTTP Status Codes</title>
+          <author initials="M." surname="Nottingham" fullname="Mark Nottingham"/>
+          <author initials="R." surname="Fielding" fullname="Roy Fielding"/>
+          <date year="2012" month="April" />
+        </front>
+        <seriesInfo name="RFC" value="6585" />
+      </reference>
+
+      <reference anchor="RFC4492">
+        <front>
+          <title>
+            Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS)
+          </title>
+          <author initials="S." surname="Blake-Wilson" fullname="S. Blake-Wilson"/>
+          <author initials="N." surname="Bolyard" fullname="N. Bolyard"/>
+          <author initials="V." surname="Gupta" fullname="V. Gupta"/>
+          <author initials="C." surname="Hawk" fullname="C. Hawk"/>
+          <author initials="B." surname="Moeller" fullname="B. Moeller"/>
+          <date year="2006" month="May" />
+        </front>
+        <seriesInfo name="RFC" value="4492" />
+      </reference>
+
+      <reference anchor="RFC5288">
+        <front>
+          <title>
+            AES Galois Counter Mode (GCM) Cipher Suites for TLS
+          </title>
+          <author initials="J." surname="Salowey" fullname="J. Salowey"/>
+          <author initials="A." surname="Choudhury" fullname="A. Choudhury"/>
+          <author initials="D." surname="McGrew" fullname="D. McGrew"/>
+          <date year="2008" month="August" />
+        </front>
+        <seriesInfo name="RFC" value="5288" />
+      </reference>
+
+      <reference anchor='HTML5'
+           target='http://www.w3.org/TR/2014/CR-html5-20140731/'>
+        <front>
+          <title>HTML5</title>
+          <author fullname='Robin Berjon' surname='Berjon' initials='R.'/>
+          <author fullname='Steve Faulkner' surname='Faulkner' initials='S.'/>
+          <author fullname='Travis Leithead' surname='Leithead' initials='T.'/>
+          <author fullname='Erika Doyle Navara' surname='Doyle Navara' initials='E.'/>
+          <author fullname='Edward O&apos;Connor' surname='O&apos;Connor' initials='E.'/>
+          <author fullname='Silvia Pfeiffer' surname='Pfeiffer' initials='S.'/>
+          <date year='2014' month='July' day='31'/>
+        </front>
+        <seriesInfo name='W3C Candidate Recommendation' value='CR-html5-20140731'/>
+        <annotation>
+          Latest version available at
+          <eref target='http://www.w3.org/TR/html5/'/>.
+        </annotation>
+      </reference>
+
+      <reference anchor="TALKING" target="http://w2spconf.com/2011/papers/websocket.pdf">
+        <front>
+          <title>
+            Talking to Yourself for Fun and Profit
+          </title>
+          <author initials="L-S." surname="Huang"/>
+          <author initials="E." surname="Chen"/>
+          <author initials="A." surname="Barth"/>
+          <author initials="E." surname="Rescorla"/>
+          <author initials="C." surname="Jackson"/>
+          <date year="2011" />
+        </front>
+      </reference>
+
+      <reference anchor="BREACH"
+                 target="http://breachattack.com/resources/BREACH%20-%20SSL,%20gone%20in%2030%20seconds.pdf">
+        <front>
+          <title>
+            BREACH: Reviving the CRIME Attack
+          </title>
+          <author initials="Y." surname="Gluck"/>
+          <author initials="N." surname="Harris"/>
+          <author initials="A." surname="Prado"/>
+          <date year="2013" month="July" day="12"/>
+        </front>
+      </reference>
+
+      <reference anchor="BCP90">
+        <front>
+          <title>Registration Procedures for Message Header Fields</title>
+          <author initials="G." surname="Klyne" fullname="G. Klyne">
+            <organization>Nine by Nine</organization>
+            <address><email>GK-IETF@ninebynine.org</email></address>
+          </author>
+          <author initials="M." surname="Nottingham" fullname="M. Nottingham">
+            <organization>BEA Systems</organization>
+            <address><email>mnot@pobox.com</email></address>
+          </author>
+          <author initials="J." surname="Mogul" fullname="J. Mogul">
+            <organization>HP Labs</organization>
+            <address><email>JeffMogul@acm.org</email></address>
+          </author>
+          <date year="2004" month="September" />
+        </front>
+        <seriesInfo name="BCP" value="90" />
+        <seriesInfo name="RFC" value="3864" />
+      </reference>
+
+      <reference anchor="TLSBCP">
+        <front>
+          <title>Recommendations for Secure Use of TLS and DTLS</title>
+          <author initials="Y" surname="Sheffer" fullname="Yaron Sheffer">
+            <organization />
+          </author>
+          <author initials="R" surname="Holz" fullname="Ralph Holz">
+            <organization />
+          </author>
+          <author initials="P" surname="Saint-Andre" fullname="Peter Saint-Andre">
+            <organization />
+          </author>
+          <date month="June" day="23" year="2014" />
+        </front>
+        <seriesInfo name="Internet-Draft" value="draft-ietf-uta-tls-bcp-01" />
+      </reference>
+
+      <reference anchor="ALT-SVC">
+        <front>
+          <title>
+            HTTP Alternative Services
+          </title>
+          <author initials="M." surname="Nottingham" fullname="Mark Nottingham">
+            <organization>Akamai</organization>
+          </author>
+          <author initials="P." surname="McManus" fullname="Patrick McManus">
+            <organization>Mozilla</organization>
+          </author>
+          <author initials="J." surname="Reschke" fullname="Julian Reschke">
+            <organization>greenbytes</organization>
+          </author>
+          <date year="2014" month="April"/>
+        </front>
+        <seriesInfo name="Internet-Draft" value="draft-ietf-httpbis-alt-svc-02"/>
+        <x:source href="refs/draft-ietf-httpbis-alt-svc-02.xml"/>
+      </reference>
+    </references>
+
+    <section title="Change Log" anchor="change.log">
+      <t>
+        This section is to be removed by RFC Editor before publication.
+      </t>
+
+      <section title="Since draft-ietf-httpbis-http2-14" anchor="changes.since.draft-ietf-httpbis-http2-14">
+        <t>
+          Renamed Not Authoritative status code to Misdirected Request.
+        </t>
+      </section>
+
+      <section title="Since draft-ietf-httpbis-http2-13" anchor="changes.since.draft-ietf-httpbis-http2-13">
+        <t>
+          Pseudo-header fields are now required to appear strictly before regular ones.
+        </t>
+        <t>
+          Restored 1xx series status codes, except 101.
+        </t>
+        <t>
+          Changed frame length field 24-bits.  Expanded frame header to 9 octets.  Added a setting
+          to limit the damage.
+        </t>
+        <t>
+          Added a setting to advise peers of header set size limits.
+        </t>
+        <t>
+          Removed segments.
+        </t>
+        <t>
+          Made non-semantic-bearing <x:ref>HEADERS</x:ref> frames illegal in the HTTP mapping.
+        </t>
+      </section>
+
+       <section title="Since draft-ietf-httpbis-http2-12" anchor="changes.since.draft-ietf-httpbis-http2-12">
+         <t>
+           Restored extensibility options.
+         </t>
+         <t>
+           Restricting TLS cipher suites to AEAD only.
+         </t>
+         <t>
+           Removing Content-Encoding requirements.
+         </t>
+         <t>
+           Permitting the use of <x:ref>PRIORITY</x:ref> after stream close.
+         </t>
+         <t>
+           Removed ALTSVC frame.
+         </t>
+         <t>
+           Removed BLOCKED frame.
+         </t>
+         <t>
+           Reducing the maximum padding size to 256 octets; removing padding from
+           <x:ref>CONTINUATION</x:ref> frames.
+         </t>
+         <t>
+           Removed per-frame GZIP compression.
+         </t>
+       </section>
+
+       <section title="Since draft-ietf-httpbis-http2-11" anchor="changes.since.draft-ietf-httpbis-http2-11">
+         <t>
+           Added BLOCKED frame (at risk).
+         </t>
+         <t>
+           Simplified priority scheme.
+         </t>
+         <t>
+           Added <x:ref>DATA</x:ref> per-frame GZIP compression.
+         </t>
+       </section>
+
+       <section title="Since draft-ietf-httpbis-http2-10" anchor="changes.since.draft-ietf-httpbis-http2-10">
+        <t>
+          Changed "connection header" to "connection preface" to avoid confusion.
+        </t>
+        <t>
+          Added dependency-based stream prioritization.
+        </t>
+        <t>
+          Added "h2c" identifier to distinguish between cleartext and secured HTTP/2.
+        </t>
+        <t>
+          Adding missing padding to <x:ref>PUSH_PROMISE</x:ref>.
+        </t>
+        <t>
+          Integrate ALTSVC frame and supporting text.
+        </t>
+        <t>
+          Dropping requirement on "deflate" Content-Encoding.
+        </t>
+        <t>
+          Improving security considerations around use of compression.
+        </t>
+      </section>
+
+      <section title="Since draft-ietf-httpbis-http2-09" anchor="changes.since.draft-ietf-httpbis-http2-09">
+        <t>
+          Adding padding for data frames.
+        </t>
+        <t>
+          Renumbering frame types, error codes, and settings.
+        </t>
+        <t>
+          Adding INADEQUATE_SECURITY error code.
+        </t>
+        <t>
+          Updating TLS usage requirements to 1.2; forbidding TLS compression.
+        </t>
+        <t>
+          Removing extensibility for frames and settings.
+        </t>
+        <t>
+          Changing setting identifier size.
+        </t>
+        <t>
+          Removing the ability to disable flow control.
+        </t>
+        <t>
+          Changing the protocol identification token to "h2".
+        </t>
+        <t>
+          Changing the use of :authority to make it optional and to allow userinfo in non-HTTP
+          cases.
+        </t>
+        <t>
+          Allowing split on 0x0 for Cookie.
+        </t>
+        <t>
+          Reserved PRI method in HTTP/1.1 to avoid possible future collisions.
+        </t>
+      </section>
+
+      <section title="Since draft-ietf-httpbis-http2-08" anchor="changes.since.draft-ietf-httpbis-http2-08">
+        <t>
+          Added cookie crumbling for more efficient header compression.
+        </t>
+        <t>
+          Added header field ordering with the value-concatenation mechanism.
+        </t>
+      </section>
+
+      <section title="Since draft-ietf-httpbis-http2-07" anchor="changes.since.draft-ietf-httpbis-http2-07">
+        <t>
+          Marked draft for implementation.
+        </t>
+      </section>
+
+      <section title="Since draft-ietf-httpbis-http2-06" anchor="changes.since.draft-ietf-httpbis-http2-06">
+        <t>
+          Adding definition for CONNECT method.
+        </t>
+        <t>
+          Constraining the use of push to safe, cacheable methods with no request body.
+        </t>
+        <t>
+          Changing from :host to :authority to remove any potential confusion.
+        </t>
+        <t>
+          Adding setting for header compression table size.
+        </t>
+        <t>
+          Adding settings acknowledgement.
+        </t>
+        <t>
+          Removing unnecessary and potentially problematic flags from CONTINUATION.
+        </t>
+        <t>
+          Added denial of service considerations.
+        </t>
+      </section>
+      <section title="Since draft-ietf-httpbis-http2-05" anchor="changes.since.draft-ietf-httpbis-http2-05">
+        <t>
+          Marking the draft ready for implementation.
+        </t>
+        <t>
+          Renumbering END_PUSH_PROMISE flag.
+        </t>
+        <t>
+          Editorial clarifications and changes.
+        </t>
+      </section>
+
+      <section title="Since draft-ietf-httpbis-http2-04" anchor="changes.since.draft-ietf-httpbis-http2-04">
+        <t>
+          Added CONTINUATION frame for HEADERS and PUSH_PROMISE.
+        </t>
+        <t>
+          PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is
+          zero.
+        </t>
+        <t>
+          Push expanded to allow all safe methods without a request body.
+        </t>
+        <t>
+          Clarified the use of HTTP header fields in requests and responses.  Prohibited HTTP/1.1
+          hop-by-hop header fields.
+        </t>
+        <t>
+          Requiring that intermediaries not forward requests with missing or illegal routing
+          :-headers.
+        </t>
+        <t>
+          Clarified requirements around handling different frames after stream close, stream reset
+          and <x:ref>GOAWAY</x:ref>.
+        </t>
+        <t>
+          Added more specific prohibitions for sending of different frame types in various stream
+          states.
+        </t>
+        <t>
+          Making the last received setting value the effective value.
+        </t>
+        <t>
+          Clarified requirements on TLS version, extension and ciphers.
+        </t>
+      </section>
+
+      <section title="Since draft-ietf-httpbis-http2-03" anchor="changes.since.draft-ietf-httpbis-http2-03">
+        <t>
+          Committed major restructuring atrocities.
+        </t>
+        <t>
+          Added reference to first header compression draft.
+        </t>
+        <t>
+          Added more formal description of frame lifecycle.
+        </t>
+        <t>
+          Moved END_STREAM (renamed from FINAL) back to <x:ref>HEADERS</x:ref>/<x:ref>DATA</x:ref>.
+        </t>
+        <t>
+          Removed HEADERS+PRIORITY, added optional priority to <x:ref>HEADERS</x:ref> frame.
+        </t>
+        <t>
+          Added <x:ref>PRIORITY</x:ref> frame.
+        </t>
+      </section>
+
+      <section title="Since draft-ietf-httpbis-http2-02" anchor="changes.since.draft-ietf-httpbis-http2-02">
+        <t>
+          Added continuations to frames carrying header blocks.
+        </t>
+        <t>
+          Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful
+          concepts, like cookies.
+        </t>
+        <t>
+          Removed "message".
+        </t>
+        <t>
+          Switched to TLS ALPN from NPN.
+        </t>
+        <t>
+          Editorial changes.
+        </t>
+      </section>
+
+      <section title="Since draft-ietf-httpbis-http2-01" anchor="changes.since.draft-ietf-httpbis-http2-01">
+        <t>
+          Added IANA considerations section for frame types, error codes and settings.
+        </t>
+        <t>
+          Removed data frame compression.
+        </t>
+        <t>
+          Added <x:ref>PUSH_PROMISE</x:ref>.
+        </t>
+        <t>
+          Added globally applicable flags to framing.
+        </t>
+        <t>
+          Removed zlib-based header compression mechanism.
+        </t>
+        <t>
+          Updated references.
+        </t>
+        <t>
+          Clarified stream identifier reuse.
+        </t>
+        <t>
+          Removed CREDENTIALS frame and associated mechanisms.
+        </t>
+        <t>
+          Added advice against naive implementation of flow control.
+        </t>
+        <t>
+          Added session header section.
+        </t>
+        <t>
+          Restructured frame header.  Removed distinction between data and control frames.
+        </t>
+        <t>
+          Altered flow control properties to include session-level limits.
+        </t>
+        <t>
+          Added note on cacheability of pushed resources and multiple tenant servers.
+        </t>
+        <t>
+          Changed protocol label form based on discussions.
+        </t>
+      </section>
+
+      <section title="Since draft-ietf-httpbis-http2-00" anchor="changes.since.draft-ietf-httpbis-http2-00">
+        <t>
+          Changed title throughout.
+        </t>
+        <t>
+          Removed section on Incompatibilities with SPDY draft#2.
+        </t>
+        <t>
+          Changed <x:ref>INTERNAL_ERROR</x:ref> on <x:ref>GOAWAY</x:ref> to have a value of 2 <eref
+          target="https://groups.google.com/forum/?fromgroups#!topic/spdy-dev/cfUef2gL3iU"/>.
+        </t>
+        <t>
+          Replaced abstract and introduction.
+        </t>
+        <t>
+          Added section on starting HTTP/2.0, including upgrade mechanism.
+        </t>
+        <t>
+          Removed unused references.
+        </t>
+        <t>
+          Added <xref target="fc-principles">flow control principles</xref> based on <eref
+          target="https://tools.ietf.org/html/draft-montenegro-httpbis-http2-fc-principles-01"/>.
+        </t>
+      </section>
+
+      <section title="Since draft-mbelshe-httpbis-spdy-00" anchor="changes.since.draft-mbelshe-httpbis-spdy-00">
+        <t>
+          Adopted as base for draft-ietf-httpbis-http2.
+        </t>
+        <t>
+          Updated authors/editors list.
+        </t>
+        <t>
+          Added status note.
+        </t>
+      </section>
+    </section>
+
+  </back>
+</rfc>
+<!--
+  vim:et:tw=100:sw=2:
+  -->

+ 553 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/transport.go

@@ -0,0 +1,553 @@
+// Copyright 2015 The Go Authors.
+// See https://go.googlesource.com/go/+/master/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://go.googlesource.com/go/+/master/LICENSE
+
+package http2
+
+import (
+	"bufio"
+	"bytes"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"net/http"
+	"strconv"
+	"strings"
+	"sync"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/github.com/bradfitz/http2/hpack"
+)
+
+type Transport struct {
+	Fallback http.RoundTripper
+
+	// TODO: remove this and make more general with a TLS dial hook, like http
+	InsecureTLSDial bool
+
+	connMu sync.Mutex
+	conns  map[string][]*clientConn // key is host:port
+}
+
+type clientConn struct {
+	t        *Transport
+	tconn    *tls.Conn
+	tlsState *tls.ConnectionState
+	connKey  []string // key(s) this connection is cached in, in t.conns
+
+	readerDone chan struct{} // closed on error
+	readerErr  error         // set before readerDone is closed
+	hdec       *hpack.Decoder
+	nextRes    *http.Response
+
+	mu           sync.Mutex
+	closed       bool
+	goAway       *GoAwayFrame // if non-nil, the GoAwayFrame we received
+	streams      map[uint32]*clientStream
+	nextStreamID uint32
+	bw           *bufio.Writer
+	werr         error // first write error that has occurred
+	br           *bufio.Reader
+	fr           *Framer
+	// Settings from peer:
+	maxFrameSize         uint32
+	maxConcurrentStreams uint32
+	initialWindowSize    uint32
+	hbuf                 bytes.Buffer // HPACK encoder writes into this
+	henc                 *hpack.Encoder
+}
+
+type clientStream struct {
+	ID   uint32
+	resc chan resAndError
+	pw   *io.PipeWriter
+	pr   *io.PipeReader
+}
+
+type stickyErrWriter struct {
+	w   io.Writer
+	err *error
+}
+
+func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
+	if *sew.err != nil {
+		return 0, *sew.err
+	}
+	n, err = sew.w.Write(p)
+	*sew.err = err
+	return
+}
+
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+	if req.URL.Scheme != "https" {
+		if t.Fallback == nil {
+			return nil, errors.New("http2: unsupported scheme and no Fallback")
+		}
+		return t.Fallback.RoundTrip(req)
+	}
+
+	host, port, err := net.SplitHostPort(req.URL.Host)
+	if err != nil {
+		host = req.URL.Host
+		port = "443"
+	}
+
+	for {
+		cc, err := t.getClientConn(host, port)
+		if err != nil {
+			return nil, err
+		}
+		res, err := cc.roundTrip(req)
+		if shouldRetryRequest(err) { // TODO: or clientconn is overloaded (too many outstanding requests)?
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+		return res, nil
+	}
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle.
+// It does not interrupt any connections currently in use.
+func (t *Transport) CloseIdleConnections() {
+	t.connMu.Lock()
+	defer t.connMu.Unlock()
+	for _, vv := range t.conns {
+		for _, cc := range vv {
+			cc.closeIfIdle()
+		}
+	}
+}
+
+var errClientConnClosed = errors.New("http2: client conn is closed")
+
+func shouldRetryRequest(err error) bool {
+	// TODO: or GOAWAY graceful shutdown stuff
+	return err == errClientConnClosed
+}
+
+func (t *Transport) removeClientConn(cc *clientConn) {
+	t.connMu.Lock()
+	defer t.connMu.Unlock()
+	for _, key := range cc.connKey {
+		vv, ok := t.conns[key]
+		if !ok {
+			continue
+		}
+		newList := filterOutClientConn(vv, cc)
+		if len(newList) > 0 {
+			t.conns[key] = newList
+		} else {
+			delete(t.conns, key)
+		}
+	}
+}
+
+func filterOutClientConn(in []*clientConn, exclude *clientConn) []*clientConn {
+	out := in[:0]
+	for _, v := range in {
+		if v != exclude {
+			out = append(out, v)
+		}
+	}
+	return out
+}
+
+func (t *Transport) getClientConn(host, port string) (*clientConn, error) {
+	t.connMu.Lock()
+	defer t.connMu.Unlock()
+
+	key := net.JoinHostPort(host, port)
+
+	for _, cc := range t.conns[key] {
+		if cc.canTakeNewRequest() {
+			return cc, nil
+		}
+	}
+	if t.conns == nil {
+		t.conns = make(map[string][]*clientConn)
+	}
+	cc, err := t.newClientConn(host, port, key)
+	if err != nil {
+		return nil, err
+	}
+	t.conns[key] = append(t.conns[key], cc)
+	return cc, nil
+}
+
+func (t *Transport) newClientConn(host, port, key string) (*clientConn, error) {
+	cfg := &tls.Config{
+		ServerName:         host,
+		NextProtos:         []string{NextProtoTLS},
+		InsecureSkipVerify: t.InsecureTLSDial,
+	}
+	tconn, err := tls.Dial("tcp", host+":"+port, cfg)
+	if err != nil {
+		return nil, err
+	}
+	if err := tconn.Handshake(); err != nil {
+		return nil, err
+	}
+	if !t.InsecureTLSDial {
+		if err := tconn.VerifyHostname(cfg.ServerName); err != nil {
+			return nil, err
+		}
+	}
+	state := tconn.ConnectionState()
+	if p := state.NegotiatedProtocol; p != NextProtoTLS {
+		// TODO(bradfitz): fall back to Fallback
+		return nil, fmt.Errorf("bad protocol: %v", p)
+	}
+	if !state.NegotiatedProtocolIsMutual {
+		return nil, errors.New("could not negotiate protocol mutually")
+	}
+	if _, err := tconn.Write(clientPreface); err != nil {
+		return nil, err
+	}
+
+	cc := &clientConn{
+		t:                    t,
+		tconn:                tconn,
+		connKey:              []string{key}, // TODO: cert's validated hostnames too
+		tlsState:             &state,
+		readerDone:           make(chan struct{}),
+		nextStreamID:         1,
+		maxFrameSize:         16 << 10, // spec default
+		initialWindowSize:    65535,    // spec default
+		maxConcurrentStreams: 1000,     // "infinite", per spec. 1000 seems good enough.
+		streams:              make(map[uint32]*clientStream),
+	}
+	cc.bw = bufio.NewWriter(stickyErrWriter{tconn, &cc.werr})
+	cc.br = bufio.NewReader(tconn)
+	cc.fr = NewFramer(cc.bw, cc.br)
+	cc.henc = hpack.NewEncoder(&cc.hbuf)
+
+	cc.fr.WriteSettings()
+	// TODO: re-send more conn-level flow control tokens when server uses all these.
+	cc.fr.WriteWindowUpdate(0, 1<<30) // um, 0x7fffffff doesn't work to Google? it hangs?
+	cc.bw.Flush()
+	if cc.werr != nil {
+		return nil, cc.werr
+	}
+
+	// Read the obligatory SETTINGS frame
+	f, err := cc.fr.ReadFrame()
+	if err != nil {
+		return nil, err
+	}
+	sf, ok := f.(*SettingsFrame)
+	if !ok {
+		return nil, fmt.Errorf("expected settings frame, got: %T", f)
+	}
+	cc.fr.WriteSettingsAck()
+	cc.bw.Flush()
+
+	sf.ForeachSetting(func(s Setting) error {
+		switch s.ID {
+		case SettingMaxFrameSize:
+			cc.maxFrameSize = s.Val
+		case SettingMaxConcurrentStreams:
+			cc.maxConcurrentStreams = s.Val
+		case SettingInitialWindowSize:
+			cc.initialWindowSize = s.Val
+		default:
+			// TODO(bradfitz): handle more
+			log.Printf("Unhandled Setting: %v", s)
+		}
+		return nil
+	})
+	// TODO: figure out henc size
+	cc.hdec = hpack.NewDecoder(initialHeaderTableSize, cc.onNewHeaderField)
+
+	go cc.readLoop()
+	return cc, nil
+}
+
+func (cc *clientConn) setGoAway(f *GoAwayFrame) {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	cc.goAway = f
+}
+
+func (cc *clientConn) canTakeNewRequest() bool {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	return cc.goAway == nil &&
+		int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
+		cc.nextStreamID < 2147483647
+}
+
+func (cc *clientConn) closeIfIdle() {
+	cc.mu.Lock()
+	if len(cc.streams) > 0 {
+		cc.mu.Unlock()
+		return
+	}
+	cc.closed = true
+	// TODO: do clients send GOAWAY too? maybe? Just Close:
+	cc.mu.Unlock()
+
+	cc.tconn.Close()
+}
+
+func (cc *clientConn) roundTrip(req *http.Request) (*http.Response, error) {
+	cc.mu.Lock()
+
+	if cc.closed {
+		cc.mu.Unlock()
+		return nil, errClientConnClosed
+	}
+
+	cs := cc.newStream()
+	hasBody := false // TODO
+
+	// we send: HEADERS[+CONTINUATION] + (DATA?)
+	hdrs := cc.encodeHeaders(req)
+	first := true
+	for len(hdrs) > 0 {
+		chunk := hdrs
+		if len(chunk) > int(cc.maxFrameSize) {
+			chunk = chunk[:cc.maxFrameSize]
+		}
+		hdrs = hdrs[len(chunk):]
+		endHeaders := len(hdrs) == 0
+		if first {
+			cc.fr.WriteHeaders(HeadersFrameParam{
+				StreamID:      cs.ID,
+				BlockFragment: chunk,
+				EndStream:     !hasBody,
+				EndHeaders:    endHeaders,
+			})
+			first = false
+		} else {
+			cc.fr.WriteContinuation(cs.ID, endHeaders, chunk)
+		}
+	}
+	cc.bw.Flush()
+	werr := cc.werr
+	cc.mu.Unlock()
+
+	if hasBody {
+		// TODO: write data. and it should probably be interleaved:
+		//   go ... io.Copy(dataFrameWriter{cc, cs, ...}, req.Body) ... etc
+	}
+
+	if werr != nil {
+		return nil, werr
+	}
+
+	re := <-cs.resc
+	if re.err != nil {
+		return nil, re.err
+	}
+	res := re.res
+	res.Request = req
+	res.TLS = cc.tlsState
+	return res, nil
+}
+
+// requires cc.mu be held.
+func (cc *clientConn) encodeHeaders(req *http.Request) []byte {
+	cc.hbuf.Reset()
+
+	// TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go
+	host := req.Host
+	if host == "" {
+		host = req.URL.Host
+	}
+
+	path := req.URL.Path
+	if path == "" {
+		path = "/"
+	}
+
+	cc.writeHeader(":authority", host) // probably not right for all sites
+	cc.writeHeader(":method", req.Method)
+	cc.writeHeader(":path", path)
+	cc.writeHeader(":scheme", "https")
+
+	for k, vv := range req.Header {
+		lowKey := strings.ToLower(k)
+		if lowKey == "host" {
+			continue
+		}
+		for _, v := range vv {
+			cc.writeHeader(lowKey, v)
+		}
+	}
+	return cc.hbuf.Bytes()
+}
+
+func (cc *clientConn) writeHeader(name, value string) {
+	log.Printf("sending %q = %q", name, value)
+	cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+}
+
+type resAndError struct {
+	res *http.Response
+	err error
+}
+
+// requires cc.mu be held.
+func (cc *clientConn) newStream() *clientStream {
+	cs := &clientStream{
+		ID:   cc.nextStreamID,
+		resc: make(chan resAndError, 1),
+	}
+	cc.nextStreamID += 2
+	cc.streams[cs.ID] = cs
+	return cs
+}
+
+func (cc *clientConn) streamByID(id uint32, andRemove bool) *clientStream {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	cs := cc.streams[id]
+	if andRemove {
+		delete(cc.streams, id)
+	}
+	return cs
+}
+
+// runs in its own goroutine.
+func (cc *clientConn) readLoop() {
+	defer cc.t.removeClientConn(cc)
+	defer close(cc.readerDone)
+
+	activeRes := map[uint32]*clientStream{} // keyed by streamID
+	// Close any response bodies if the server closes prematurely.
+	// TODO: also do this if we've written the headers but not
+	// gotten a response yet.
+	defer func() {
+		err := cc.readerErr
+		if err == io.EOF {
+			err = io.ErrUnexpectedEOF
+		}
+		for _, cs := range activeRes {
+			cs.pw.CloseWithError(err)
+		}
+	}()
+
+	// continueStreamID is the stream ID we're waiting for
+	// continuation frames for.
+	var continueStreamID uint32
+
+	for {
+		f, err := cc.fr.ReadFrame()
+		if err != nil {
+			cc.readerErr = err
+			return
+		}
+		log.Printf("Transport received %v: %#v", f.Header(), f)
+
+		streamID := f.Header().StreamID
+
+		_, isContinue := f.(*ContinuationFrame)
+		if isContinue {
+			if streamID != continueStreamID {
+				log.Printf("Protocol violation: got CONTINUATION with id %d; want %d", streamID, continueStreamID)
+				cc.readerErr = ConnectionError(ErrCodeProtocol)
+				return
+			}
+		} else if continueStreamID != 0 {
+			// Continue frames need to be adjacent in the stream
+			// and we were in the middle of headers.
+			log.Printf("Protocol violation: got %T for stream %d, want CONTINUATION for %d", f, streamID, continueStreamID)
+			cc.readerErr = ConnectionError(ErrCodeProtocol)
+			return
+		}
+
+		if streamID%2 == 0 {
+			// Ignore streams pushed from the server for now.
+			// These always have an even stream id.
+			continue
+		}
+		streamEnded := false
+		if ff, ok := f.(streamEnder); ok {
+			streamEnded = ff.StreamEnded()
+		}
+
+		cs := cc.streamByID(streamID, streamEnded)
+		if cs == nil {
+			log.Printf("Received frame for untracked stream ID %d", streamID)
+			continue
+		}
+
+		switch f := f.(type) {
+		case *HeadersFrame:
+			cc.nextRes = &http.Response{
+				Proto:      "HTTP/2.0",
+				ProtoMajor: 2,
+				Header:     make(http.Header),
+			}
+			cs.pr, cs.pw = io.Pipe()
+			cc.hdec.Write(f.HeaderBlockFragment())
+		case *ContinuationFrame:
+			cc.hdec.Write(f.HeaderBlockFragment())
+		case *DataFrame:
+			log.Printf("DATA: %q", f.Data())
+			cs.pw.Write(f.Data())
+		case *GoAwayFrame:
+			cc.t.removeClientConn(cc)
+			if f.ErrCode != 0 {
+				// TODO: deal with GOAWAY more. particularly the error code
+				log.Printf("transport got GOAWAY with error code = %v", f.ErrCode)
+			}
+			cc.setGoAway(f)
+		default:
+			log.Printf("Transport: unhandled response frame type %T", f)
+		}
+		headersEnded := false
+		if he, ok := f.(headersEnder); ok {
+			headersEnded = he.HeadersEnded()
+			if headersEnded {
+				continueStreamID = 0
+			} else {
+				continueStreamID = streamID
+			}
+		}
+
+		if streamEnded {
+			cs.pw.Close()
+			delete(activeRes, streamID)
+		}
+		if headersEnded {
+			if cs == nil {
+				panic("couldn't find stream") // TODO be graceful
+			}
+			// TODO: set the Body to one which notes the
+			// Close and also sends the server a
+			// RST_STREAM
+			cc.nextRes.Body = cs.pr
+			res := cc.nextRes
+			activeRes[streamID] = cs
+			cs.resc <- resAndError{res: res}
+		}
+	}
+}
+
+func (cc *clientConn) onNewHeaderField(f hpack.HeaderField) {
+	// TODO: verifiy pseudo headers come before non-pseudo headers
+	// TODO: verifiy the status is set
+	log.Printf("Header field: %+v", f)
+	if f.Name == ":status" {
+		code, err := strconv.Atoi(f.Value)
+		if err != nil {
+			panic("TODO: be graceful")
+		}
+		cc.nextRes.Status = f.Value + " " + http.StatusText(code)
+		cc.nextRes.StatusCode = code
+		return
+	}
+	if strings.HasPrefix(f.Name, ":") {
+		// "Endpoints MUST NOT generate pseudo-header fields other than those defined in this document."
+		// TODO: treat as invalid?
+		return
+	}
+	cc.nextRes.Header.Add(http.CanonicalHeaderKey(f.Name), f.Value)
+}

+ 168 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/transport_test.go

@@ -0,0 +1,168 @@
+// Copyright 2015 The Go Authors.
+// See https://go.googlesource.com/go/+/master/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://go.googlesource.com/go/+/master/LICENSE
+
+package http2
+
+import (
+	"flag"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"reflect"
+	"strings"
+	"testing"
+	"time"
+)
+
+var (
+	extNet        = flag.Bool("extnet", false, "do external network tests")
+	transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport")
+	insecure      = flag.Bool("insecure", false, "insecure TLS dials")
+)
+
+func TestTransportExternal(t *testing.T) {
+	if !*extNet {
+		t.Skip("skipping external network test")
+	}
+	req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil)
+	rt := &Transport{
+		InsecureTLSDial: *insecure,
+	}
+	res, err := rt.RoundTrip(req)
+	if err != nil {
+		t.Fatalf("%v", err)
+	}
+	res.Write(os.Stdout)
+}
+
+func TestTransport(t *testing.T) {
+	const body = "sup"
+	st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+		io.WriteString(w, body)
+	})
+	defer st.Close()
+
+	tr := &Transport{InsecureTLSDial: true}
+	defer tr.CloseIdleConnections()
+
+	req, err := http.NewRequest("GET", st.ts.URL, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	res, err := tr.RoundTrip(req)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer res.Body.Close()
+
+	t.Logf("Got res: %+v", res)
+	if g, w := res.StatusCode, 200; g != w {
+		t.Errorf("StatusCode = %v; want %v", g, w)
+	}
+	if g, w := res.Status, "200 OK"; g != w {
+		t.Errorf("Status = %q; want %q", g, w)
+	}
+	wantHeader := http.Header{
+		"Content-Length": []string{"3"},
+		"Content-Type":   []string{"text/plain; charset=utf-8"},
+	}
+	if !reflect.DeepEqual(res.Header, wantHeader) {
+		t.Errorf("res Header = %v; want %v", res.Header, wantHeader)
+	}
+	if res.Request != req {
+		t.Errorf("Response.Request = %p; want %p", res.Request, req)
+	}
+	if res.TLS == nil {
+		t.Errorf("Response.TLS = nil; want non-nil", res.TLS)
+	}
+	slurp, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		t.Error("Body read: %v", err)
+	} else if string(slurp) != body {
+		t.Errorf("Body = %q; want %q", slurp, body)
+	}
+
+}
+
+func TestTransportReusesConns(t *testing.T) {
+	st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+		io.WriteString(w, r.RemoteAddr)
+	}, optOnlyServer)
+	defer st.Close()
+	tr := &Transport{InsecureTLSDial: true}
+	defer tr.CloseIdleConnections()
+	get := func() string {
+		req, err := http.NewRequest("GET", st.ts.URL, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+		res, err := tr.RoundTrip(req)
+		if err != nil {
+			t.Fatal(err)
+		}
+		defer res.Body.Close()
+		slurp, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			t.Fatalf("Body read: %v", err)
+		}
+		addr := strings.TrimSpace(string(slurp))
+		if addr == "" {
+			t.Fatalf("didn't get an addr in response")
+		}
+		return addr
+	}
+	first := get()
+	second := get()
+	if first != second {
+		t.Errorf("first and second responses were on different connections: %q vs %q", first, second)
+	}
+}
+
+func TestTransportAbortClosesPipes(t *testing.T) {
+	shutdown := make(chan struct{})
+	st := newServerTester(t,
+		func(w http.ResponseWriter, r *http.Request) {
+			w.(http.Flusher).Flush()
+			<-shutdown
+		},
+		optOnlyServer,
+	)
+	defer st.Close()
+	defer close(shutdown) // we must shutdown before st.Close() to avoid hanging
+
+	done := make(chan struct{})
+	requestMade := make(chan struct{})
+	go func() {
+		defer close(done)
+		tr := &Transport{
+			InsecureTLSDial: true,
+		}
+		req, err := http.NewRequest("GET", st.ts.URL, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+		res, err := tr.RoundTrip(req)
+		if err != nil {
+			t.Fatal(err)
+		}
+		defer res.Body.Close()
+		close(requestMade)
+		_, err = ioutil.ReadAll(res.Body)
+		if err == nil {
+			t.Error("expected error from res.Body.Read")
+		}
+	}()
+
+	<-requestMade
+	// Now force the serve loop to end, via closing the connection.
+	st.closeConn()
+	// deadlock? that's a bug.
+	select {
+	case <-done:
+	case <-time.After(3 * time.Second):
+		t.Fatal("timeout")
+	}
+}

+ 204 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/write.go

@@ -0,0 +1,204 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+	"bytes"
+	"fmt"
+	"net/http"
+	"time"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/github.com/bradfitz/http2/hpack"
+)
+
+// writeFramer is implemented by any type that is used to write frames.
+type writeFramer interface {
+	writeFrame(writeContext) error
+}
+
+// writeContext is the interface needed by the various frame writer
+// types below. All the writeFrame methods below are scheduled via the
+// frame writing scheduler (see writeScheduler in writesched.go).
+//
+// This interface is implemented by *serverConn.
+// TODO: use it from the client code too, once it exists.
+type writeContext interface {
+	Framer() *Framer
+	Flush() error
+	CloseConn() error
+	// HeaderEncoder returns an HPACK encoder that writes to the
+	// returned buffer.
+	HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
+}
+
+// endsStream reports whether the given frame writer w will locally
+// close the stream.
+func endsStream(w writeFramer) bool {
+	switch v := w.(type) {
+	case *writeData:
+		return v.endStream
+	case *writeResHeaders:
+		return v.endStream
+	}
+	return false
+}
+
+type flushFrameWriter struct{}
+
+func (flushFrameWriter) writeFrame(ctx writeContext) error {
+	return ctx.Flush()
+}
+
+type writeSettings []Setting
+
+func (s writeSettings) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WriteSettings([]Setting(s)...)
+}
+
+type writeGoAway struct {
+	maxStreamID uint32
+	code        ErrCode
+}
+
+func (p *writeGoAway) writeFrame(ctx writeContext) error {
+	err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
+	if p.code != 0 {
+		ctx.Flush() // ignore error: we're hanging up on them anyway
+		time.Sleep(50 * time.Millisecond)
+		ctx.CloseConn()
+	}
+	return err
+}
+
+type writeData struct {
+	streamID  uint32
+	p         []byte
+	endStream bool
+}
+
+func (w *writeData) String() string {
+	return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
+}
+
+func (w *writeData) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
+}
+
+func (se StreamError) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
+}
+
+type writePingAck struct{ pf *PingFrame }
+
+func (w writePingAck) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WritePing(true, w.pf.Data)
+}
+
+type writeSettingsAck struct{}
+
+func (writeSettingsAck) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WriteSettingsAck()
+}
+
+// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
+// for HTTP response headers from a server handler.
+type writeResHeaders struct {
+	streamID    uint32
+	httpResCode int
+	h           http.Header // may be nil
+	endStream   bool
+
+	contentType   string
+	contentLength string
+}
+
+func (w *writeResHeaders) writeFrame(ctx writeContext) error {
+	enc, buf := ctx.HeaderEncoder()
+	buf.Reset()
+	enc.WriteField(hpack.HeaderField{Name: ":status", Value: httpCodeString(w.httpResCode)})
+	for k, vv := range w.h {
+		k = lowerHeader(k)
+		for _, v := range vv {
+			// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
+			if k == "transfer-encoding" && v != "trailers" {
+				continue
+			}
+			enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+		}
+	}
+	if w.contentType != "" {
+		enc.WriteField(hpack.HeaderField{Name: "content-type", Value: w.contentType})
+	}
+	if w.contentLength != "" {
+		enc.WriteField(hpack.HeaderField{Name: "content-length", Value: w.contentLength})
+	}
+
+	headerBlock := buf.Bytes()
+	if len(headerBlock) == 0 {
+		panic("unexpected empty hpack")
+	}
+
+	// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+	// that all peers must support (16KB). Later we could care
+	// more and send larger frames if the peer advertised it, but
+	// there's little point. Most headers are small anyway (so we
+	// generally won't have CONTINUATION frames), and extra frames
+	// only waste 9 bytes anyway.
+	const maxFrameSize = 16384
+
+	first := true
+	for len(headerBlock) > 0 {
+		frag := headerBlock
+		if len(frag) > maxFrameSize {
+			frag = frag[:maxFrameSize]
+		}
+		headerBlock = headerBlock[len(frag):]
+		endHeaders := len(headerBlock) == 0
+		var err error
+		if first {
+			first = false
+			err = ctx.Framer().WriteHeaders(HeadersFrameParam{
+				StreamID:      w.streamID,
+				BlockFragment: frag,
+				EndStream:     w.endStream,
+				EndHeaders:    endHeaders,
+			})
+		} else {
+			err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+type write100ContinueHeadersFrame struct {
+	streamID uint32
+}
+
+func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
+	enc, buf := ctx.HeaderEncoder()
+	buf.Reset()
+	enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"})
+	return ctx.Framer().WriteHeaders(HeadersFrameParam{
+		StreamID:      w.streamID,
+		BlockFragment: buf.Bytes(),
+		EndStream:     false,
+		EndHeaders:    true,
+	})
+}
+
+type writeWindowUpdate struct {
+	streamID uint32 // or 0 for conn-level
+	n        uint32
+}
+
+func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
+	return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
+}

+ 286 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/writesched.go

@@ -0,0 +1,286 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import "fmt"
+
+// frameWriteMsg is a request to write a frame.
+type frameWriteMsg struct {
+	// write is the interface value that does the writing, once the
+	// writeScheduler (below) has decided to select this frame
+	// to write. The write functions are all defined in write.go.
+	write writeFramer
+
+	stream *stream // used for prioritization. nil for non-stream frames.
+
+	// done, if non-nil, must be a buffered channel with space for
+	// 1 message and is sent the return value from write (or an
+	// earlier error) when the frame has been written.
+	done chan error
+}
+
+// for debugging only:
+func (wm frameWriteMsg) String() string {
+	var streamID uint32
+	if wm.stream != nil {
+		streamID = wm.stream.id
+	}
+	var des string
+	if s, ok := wm.write.(fmt.Stringer); ok {
+		des = s.String()
+	} else {
+		des = fmt.Sprintf("%T", wm.write)
+	}
+	return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
+}
+
+// writeScheduler tracks pending frames to write, priorities, and decides
+// the next one to use. It is not thread-safe.
+type writeScheduler struct {
+	// zero are frames not associated with a specific stream.
+	// They're sent before any stream-specific freams.
+	zero writeQueue
+
+	// maxFrameSize is the maximum size of a DATA frame
+	// we'll write. Must be non-zero and between 16K-16M.
+	maxFrameSize uint32
+
+	// sq contains the stream-specific queues, keyed by stream ID.
+	// when a stream is idle, it's deleted from the map.
+	sq map[uint32]*writeQueue
+
+	// canSend is a slice of memory that's reused between frame
+	// scheduling decisions to hold the list of writeQueues (from sq)
+	// which have enough flow control data to send. After canSend is
+	// built, the best is selected.
+	canSend []*writeQueue
+
+	// pool of empty queues for reuse.
+	queuePool []*writeQueue
+}
+
+func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
+	if len(q.s) != 0 {
+		panic("queue must be empty")
+	}
+	ws.queuePool = append(ws.queuePool, q)
+}
+
+func (ws *writeScheduler) getEmptyQueue() *writeQueue {
+	ln := len(ws.queuePool)
+	if ln == 0 {
+		return new(writeQueue)
+	}
+	q := ws.queuePool[ln-1]
+	ws.queuePool = ws.queuePool[:ln-1]
+	return q
+}
+
+func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
+
+func (ws *writeScheduler) add(wm frameWriteMsg) {
+	st := wm.stream
+	if st == nil {
+		ws.zero.push(wm)
+	} else {
+		ws.streamQueue(st.id).push(wm)
+	}
+}
+
+func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
+	if q, ok := ws.sq[streamID]; ok {
+		return q
+	}
+	if ws.sq == nil {
+		ws.sq = make(map[uint32]*writeQueue)
+	}
+	q := ws.getEmptyQueue()
+	ws.sq[streamID] = q
+	return q
+}
+
+// take returns the most important frame to write and removes it from the scheduler.
+// It is illegal to call this if the scheduler is empty or if there are no connection-level
+// flow control bytes available.
+func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
+	if ws.maxFrameSize == 0 {
+		panic("internal error: ws.maxFrameSize not initialized or invalid")
+	}
+
+	// If there any frames not associated with streams, prefer those first.
+	// These are usually SETTINGS, etc.
+	if !ws.zero.empty() {
+		return ws.zero.shift(), true
+	}
+	if len(ws.sq) == 0 {
+		return
+	}
+
+	// Next, prioritize frames on streams that aren't DATA frames (no cost).
+	for id, q := range ws.sq {
+		if q.firstIsNoCost() {
+			return ws.takeFrom(id, q)
+		}
+	}
+
+	// Now, all that remains are DATA frames with non-zero bytes to
+	// send. So pick the best one.
+	if len(ws.canSend) != 0 {
+		panic("should be empty")
+	}
+	for _, q := range ws.sq {
+		if n := ws.streamWritableBytes(q); n > 0 {
+			ws.canSend = append(ws.canSend, q)
+		}
+	}
+	if len(ws.canSend) == 0 {
+		return
+	}
+	defer ws.zeroCanSend()
+
+	// TODO: find the best queue
+	q := ws.canSend[0]
+
+	return ws.takeFrom(q.streamID(), q)
+}
+
+// zeroCanSend is defered from take.
+func (ws *writeScheduler) zeroCanSend() {
+	for i := range ws.canSend {
+		ws.canSend[i] = nil
+	}
+	ws.canSend = ws.canSend[:0]
+}
+
+// streamWritableBytes returns the number of DATA bytes we could write
+// from the given queue's stream, if this stream/queue were
+// selected. It is an error to call this if q's head isn't a
+// *writeData.
+func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
+	wm := q.head()
+	ret := wm.stream.flow.available() // max we can write
+	if ret == 0 {
+		return 0
+	}
+	if int32(ws.maxFrameSize) < ret {
+		ret = int32(ws.maxFrameSize)
+	}
+	if ret == 0 {
+		panic("internal error: ws.maxFrameSize not initialized or invalid")
+	}
+	wd := wm.write.(*writeData)
+	if len(wd.p) < int(ret) {
+		ret = int32(len(wd.p))
+	}
+	return ret
+}
+
+func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
+	wm = q.head()
+	// If the first item in this queue costs flow control tokens
+	// and we don't have enough, write as much as we can.
+	if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
+		allowed := wm.stream.flow.available() // max we can write
+		if allowed == 0 {
+			// No quota available. Caller can try the next stream.
+			return frameWriteMsg{}, false
+		}
+		if int32(ws.maxFrameSize) < allowed {
+			allowed = int32(ws.maxFrameSize)
+		}
+		// TODO: further restrict the allowed size, because even if
+		// the peer says it's okay to write 16MB data frames, we might
+		// want to write smaller ones to properly weight competing
+		// streams' priorities.
+
+		if len(wd.p) > int(allowed) {
+			wm.stream.flow.take(allowed)
+			chunk := wd.p[:allowed]
+			wd.p = wd.p[allowed:]
+			// Make up a new write message of a valid size, rather
+			// than shifting one off the queue.
+			return frameWriteMsg{
+				stream: wm.stream,
+				write: &writeData{
+					streamID: wd.streamID,
+					p:        chunk,
+					// even if the original had endStream set, there
+					// arebytes remaining because len(wd.p) > allowed,
+					// so we know endStream is false:
+					endStream: false,
+				},
+				// our caller is blocking on the final DATA frame, not
+				// these intermediates, so no need to wait:
+				done: nil,
+			}, true
+		}
+		wm.stream.flow.take(int32(len(wd.p)))
+	}
+
+	q.shift()
+	if q.empty() {
+		ws.putEmptyQueue(q)
+		delete(ws.sq, id)
+	}
+	return wm, true
+}
+
+func (ws *writeScheduler) forgetStream(id uint32) {
+	q, ok := ws.sq[id]
+	if !ok {
+		return
+	}
+	delete(ws.sq, id)
+
+	// But keep it for others later.
+	for i := range q.s {
+		q.s[i] = frameWriteMsg{}
+	}
+	q.s = q.s[:0]
+	ws.putEmptyQueue(q)
+}
+
+type writeQueue struct {
+	s []frameWriteMsg
+}
+
+// streamID returns the stream ID for a non-empty stream-specific queue.
+func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
+
+func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+
+func (q *writeQueue) push(wm frameWriteMsg) {
+	q.s = append(q.s, wm)
+}
+
+// head returns the next item that would be removed by shift.
+func (q *writeQueue) head() frameWriteMsg {
+	if len(q.s) == 0 {
+		panic("invalid use of queue")
+	}
+	return q.s[0]
+}
+
+func (q *writeQueue) shift() frameWriteMsg {
+	if len(q.s) == 0 {
+		panic("invalid use of queue")
+	}
+	wm := q.s[0]
+	// TODO: less copy-happy queue.
+	copy(q.s, q.s[1:])
+	q.s[len(q.s)-1] = frameWriteMsg{}
+	q.s = q.s[:len(q.s)-1]
+	return wm
+}
+
+func (q *writeQueue) firstIsNoCost() bool {
+	if df, ok := q.s[0].write.(*writeData); ok {
+		return len(df.p) == 0
+	}
+	return true
+}

+ 357 - 0
Godeps/_workspace/src/github.com/bradfitz/http2/z_spec_test.go

@@ -0,0 +1,357 @@
+// Copyright 2014 The Go Authors.
+// See https://code.google.com/p/go/source/browse/CONTRIBUTORS
+// Licensed under the same terms as Go itself:
+// https://code.google.com/p/go/source/browse/LICENSE
+
+package http2
+
+import (
+	"bytes"
+	"encoding/xml"
+	"flag"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"testing"
+)
+
+var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests")
+
+// The global map of sentence coverage for the http2 spec.
+var defaultSpecCoverage specCoverage
+
+var loadSpecOnce sync.Once
+
+func loadSpec() {
+	if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil {
+		panic(err)
+	} else {
+		defaultSpecCoverage = readSpecCov(f)
+		f.Close()
+	}
+}
+
+// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not
+// "covered" will be included in report outputed by TestSpecCoverage.
+func covers(sec, sentences string) {
+	loadSpecOnce.Do(loadSpec)
+	defaultSpecCoverage.cover(sec, sentences)
+}
+
+type specPart struct {
+	section  string
+	sentence string
+}
+
+func (ss specPart) Less(oo specPart) bool {
+	atoi := func(s string) int {
+		n, err := strconv.Atoi(s)
+		if err != nil {
+			panic(err)
+		}
+		return n
+	}
+	a := strings.Split(ss.section, ".")
+	b := strings.Split(oo.section, ".")
+	for len(a) > 0 {
+		if len(b) == 0 {
+			return false
+		}
+		x, y := atoi(a[0]), atoi(b[0])
+		if x == y {
+			a, b = a[1:], b[1:]
+			continue
+		}
+		return x < y
+	}
+	if len(b) > 0 {
+		return true
+	}
+	return false
+}
+
+type bySpecSection []specPart
+
+func (a bySpecSection) Len() int           { return len(a) }
+func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) }
+func (a bySpecSection) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+type specCoverage struct {
+	coverage map[specPart]bool
+	d        *xml.Decoder
+}
+
+func joinSection(sec []int) string {
+	s := fmt.Sprintf("%d", sec[0])
+	for _, n := range sec[1:] {
+		s = fmt.Sprintf("%s.%d", s, n)
+	}
+	return s
+}
+
+func (sc specCoverage) readSection(sec []int) {
+	var (
+		buf = new(bytes.Buffer)
+		sub = 0
+	)
+	for {
+		tk, err := sc.d.Token()
+		if err != nil {
+			if err == io.EOF {
+				return
+			}
+			panic(err)
+		}
+		switch v := tk.(type) {
+		case xml.StartElement:
+			if skipElement(v) {
+				if err := sc.d.Skip(); err != nil {
+					panic(err)
+				}
+				if v.Name.Local == "section" {
+					sub++
+				}
+				break
+			}
+			switch v.Name.Local {
+			case "section":
+				sub++
+				sc.readSection(append(sec, sub))
+			case "xref":
+				buf.Write(sc.readXRef(v))
+			}
+		case xml.CharData:
+			if len(sec) == 0 {
+				break
+			}
+			buf.Write(v)
+		case xml.EndElement:
+			if v.Name.Local == "section" {
+				sc.addSentences(joinSection(sec), buf.String())
+				return
+			}
+		}
+	}
+}
+
+func (sc specCoverage) readXRef(se xml.StartElement) []byte {
+	var b []byte
+	for {
+		tk, err := sc.d.Token()
+		if err != nil {
+			panic(err)
+		}
+		switch v := tk.(type) {
+		case xml.CharData:
+			if b != nil {
+				panic("unexpected CharData")
+			}
+			b = []byte(string(v))
+		case xml.EndElement:
+			if v.Name.Local != "xref" {
+				panic("expected </xref>")
+			}
+			if b != nil {
+				return b
+			}
+			sig := attrSig(se)
+			switch sig {
+			case "target":
+				return []byte(fmt.Sprintf("[%s]", attrValue(se, "target")))
+			case "fmt-of,rel,target", "fmt-,,rel,target":
+				return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel")))
+			case "fmt-of,sec,target", "fmt-,,sec,target":
+				return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target")))
+			case "fmt-of,rel,sec,target":
+				return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel")))
+			default:
+				panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se)))
+			}
+		default:
+			panic(fmt.Sprintf("unexpected tag %q", v))
+		}
+	}
+}
+
+var skipAnchor = map[string]bool{
+	"intro":    true,
+	"Overview": true,
+}
+
+var skipTitle = map[string]bool{
+	"Acknowledgements":            true,
+	"Change Log":                  true,
+	"Document Organization":       true,
+	"Conventions and Terminology": true,
+}
+
+func skipElement(s xml.StartElement) bool {
+	switch s.Name.Local {
+	case "artwork":
+		return true
+	case "section":
+		for _, attr := range s.Attr {
+			switch attr.Name.Local {
+			case "anchor":
+				if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") {
+					return true
+				}
+			case "title":
+				if skipTitle[attr.Value] {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+func readSpecCov(r io.Reader) specCoverage {
+	sc := specCoverage{
+		coverage: map[specPart]bool{},
+		d:        xml.NewDecoder(r)}
+	sc.readSection(nil)
+	return sc
+}
+
+func (sc specCoverage) addSentences(sec string, sentence string) {
+	for _, s := range parseSentences(sentence) {
+		sc.coverage[specPart{sec, s}] = false
+	}
+}
+
+func (sc specCoverage) cover(sec string, sentence string) {
+	for _, s := range parseSentences(sentence) {
+		p := specPart{sec, s}
+		if _, ok := sc.coverage[p]; !ok {
+			panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s))
+		}
+		sc.coverage[specPart{sec, s}] = true
+	}
+
+}
+
+var whitespaceRx = regexp.MustCompile(`\s+`)
+
+func parseSentences(sens string) []string {
+	sens = strings.TrimSpace(sens)
+	if sens == "" {
+		return nil
+	}
+	ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ")
+	for i, s := range ss {
+		s = strings.TrimSpace(s)
+		if !strings.HasSuffix(s, ".") {
+			s += "."
+		}
+		ss[i] = s
+	}
+	return ss
+}
+
+func TestSpecParseSentences(t *testing.T) {
+	tests := []struct {
+		ss   string
+		want []string
+	}{
+		{"Sentence 1. Sentence 2.",
+			[]string{
+				"Sentence 1.",
+				"Sentence 2.",
+			}},
+		{"Sentence 1.  \nSentence 2.\tSentence 3.",
+			[]string{
+				"Sentence 1.",
+				"Sentence 2.",
+				"Sentence 3.",
+			}},
+	}
+
+	for i, tt := range tests {
+		got := parseSentences(tt.ss)
+		if !reflect.DeepEqual(got, tt.want) {
+			t.Errorf("%d: got = %q, want %q", i, got, tt.want)
+		}
+	}
+}
+
+func TestSpecCoverage(t *testing.T) {
+	if !*coverSpec {
+		t.Skip()
+	}
+
+	loadSpecOnce.Do(loadSpec)
+
+	var (
+		list     []specPart
+		cv       = defaultSpecCoverage.coverage
+		total    = len(cv)
+		complete = 0
+	)
+
+	for sp, touched := range defaultSpecCoverage.coverage {
+		if touched {
+			complete++
+		} else {
+			list = append(list, sp)
+		}
+	}
+	sort.Stable(bySpecSection(list))
+
+	if testing.Short() && len(list) > 5 {
+		list = list[:5]
+	}
+
+	for _, p := range list {
+		t.Errorf("\tSECTION %s: %s", p.section, p.sentence)
+	}
+
+	t.Logf("%d/%d (%d%%) sentances covered", complete, total, (complete/total)*100)
+}
+
+func attrSig(se xml.StartElement) string {
+	var names []string
+	for _, attr := range se.Attr {
+		if attr.Name.Local == "fmt" {
+			names = append(names, "fmt-"+attr.Value)
+		} else {
+			names = append(names, attr.Name.Local)
+		}
+	}
+	sort.Strings(names)
+	return strings.Join(names, ",")
+}
+
+func attrValue(se xml.StartElement, attr string) string {
+	for _, a := range se.Attr {
+		if a.Name.Local == attr {
+			return a.Value
+		}
+	}
+	panic("unknown attribute " + attr)
+}
+
+func TestSpecPartLess(t *testing.T) {
+	tests := []struct {
+		sec1, sec2 string
+		want       bool
+	}{
+		{"6.2.1", "6.2", false},
+		{"6.2", "6.2.1", true},
+		{"6.10", "6.10.1", true},
+		{"6.10", "6.1.1", false}, // 10, not 1
+		{"6.1", "6.1", false},    // equal, so not less
+	}
+	for _, tt := range tests {
+		got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"})
+		if got != tt.want {
+			t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want)
+		}
+	}
+}

+ 191 - 0
Godeps/_workspace/src/github.com/golang/glog/LICENSE

@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 44 - 0
Godeps/_workspace/src/github.com/golang/glog/README

@@ -0,0 +1,44 @@
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+	http://code.google.com/p/google-glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+	Package glog implements logging analogous to the Google-internal
+	C++ INFO/ERROR/V setup.  It provides functions Info, Warning,
+	Error, Fatal, plus formatting variants such as Infof. It
+	also provides V-style logging controlled by the -v and
+	-vmodule=file=2 flags.
+	
+	Basic examples:
+	
+		glog.Info("Prepare to repel boarders")
+	
+		glog.Fatalf("Initialization failed: %s", err)
+	
+	See the documentation for the V function for an explanation
+	of these examples:
+	
+		if glog.V(2) {
+			glog.Info("Starting transaction...")
+		}
+	
+		glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.

+ 1177 - 0
Godeps/_workspace/src/github.com/golang/glog/glog.go

@@ -0,0 +1,1177 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+//	glog.Info("Prepare to repel boarders")
+//
+//	glog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+//	if glog.V(2) {
+//		glog.Info("Starting transaction...")
+//	}
+//
+//	glog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to files in a temporary directory.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+//	-logtostderr=false
+//		Logs are written to standard error instead of to files.
+//	-alsologtostderr=false
+//		Logs are written to standard error as well as to files.
+//	-stderrthreshold=ERROR
+//		Log events at or above this severity are logged to standard
+//		error as well as to files.
+//	-log_dir=""
+//		Log files will be written to this directory instead of the
+//		default temporary directory.
+//
+//	Other flags provide aids to debugging.
+//
+//	-log_backtrace_at=""
+//		When set to a file and line number holding a logging statement,
+//		such as
+//			-log_backtrace_at=gopherflakes.go:234
+//		a stack trace will be written to the Info log whenever execution
+//		hits that statement. (Unlike with -vmodule, the ".go" must be
+//		present.)
+//	-v=0
+//		Enable V-leveled logging at the specified level.
+//	-vmodule=""
+//		The syntax of the argument is a comma-separated list of pattern=N,
+//		where pattern is a literal file name (minus the ".go" suffix) or
+//		"glob" pattern and N is a V level. For instance,
+//			-vmodule=gopher*=3
+//		sets the V level to 3 in all Go files whose names begin "gopher".
+//
+package glog
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"io"
+	stdLog "log"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// severity identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+	infoLog severity = iota
+	warningLog
+	errorLog
+	fatalLog
+	numSeverity = 4
+)
+
+const severityChar = "IWEF"
+
+var severityName = []string{
+	infoLog:    "INFO",
+	warningLog: "WARNING",
+	errorLog:   "ERROR",
+	fatalLog:   "FATAL",
+}
+
+// get returns the value of the severity.
+func (s *severity) get() severity {
+	return severity(atomic.LoadInt32((*int32)(s)))
+}
+
+// set sets the value of the severity.
+func (s *severity) set(val severity) {
+	atomic.StoreInt32((*int32)(s), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severity) String() string {
+	return strconv.FormatInt(int64(*s), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (s *severity) Get() interface{} {
+	return *s
+}
+
+// Set is part of the flag.Value interface.
+func (s *severity) Set(value string) error {
+	var threshold severity
+	// Is it a known name?
+	if v, ok := severityByName(value); ok {
+		threshold = v
+	} else {
+		v, err := strconv.Atoi(value)
+		if err != nil {
+			return err
+		}
+		threshold = severity(v)
+	}
+	logging.stderrThreshold.set(threshold)
+	return nil
+}
+
+func severityByName(s string) (severity, bool) {
+	s = strings.ToUpper(s)
+	for i, name := range severityName {
+		if name == s {
+			return severity(i), true
+		}
+	}
+	return 0, false
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+	lines int64
+	bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+	return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+	return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+	Info, Warning, Error OutputStats
+}
+
+var severityStats = [numSeverity]*OutputStats{
+	infoLog:    &Stats.Info,
+	warningLog: &Stats.Warning,
+	errorLog:   &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+	return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+	atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+	return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (l *Level) Get() interface{} {
+	return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+	v, err := strconv.Atoi(value)
+	if err != nil {
+		return err
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	logging.setVState(Level(v), logging.vmodule.filter, false)
+	return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+	filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+	pattern string
+	literal bool // The pattern is a literal string
+	level   Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+	if m.literal {
+		return file == m.pattern
+	}
+	match, _ := filepath.Match(m.pattern, file)
+	return match
+}
+
+func (m *moduleSpec) String() string {
+	// Lock because the type is not atomic. TODO: clean this up.
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	var b bytes.Buffer
+	for i, f := range m.filter {
+		if i > 0 {
+			b.WriteRune(',')
+		}
+		fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+	}
+	return b.String()
+}
+
+// Get is part of the (Go 1.2)  flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+	return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+	var filter []modulePat
+	for _, pat := range strings.Split(value, ",") {
+		if len(pat) == 0 {
+			// Empty strings such as from a trailing comma can be ignored.
+			continue
+		}
+		patLev := strings.Split(pat, "=")
+		if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+			return errVmoduleSyntax
+		}
+		pattern := patLev[0]
+		v, err := strconv.Atoi(patLev[1])
+		if err != nil {
+			return errors.New("syntax error: expect comma-separated list of filename=N")
+		}
+		if v < 0 {
+			return errors.New("negative value for vmodule level")
+		}
+		if v == 0 {
+			continue // Ignore. It's harmless but no point in paying the overhead.
+		}
+		// TODO: check syntax of filter?
+		filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	logging.setVState(logging.verbosity, filter, true)
+	return nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+	return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+	file string
+	line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+	return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+	if t.line != line {
+		return false
+	}
+	if i := strings.LastIndex(file, "/"); i >= 0 {
+		file = file[i+1:]
+	}
+	return t.file == file
+}
+
+func (t *traceLocation) String() string {
+	// Lock because the type is not atomic. TODO: clean this up.
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+	return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+	if value == "" {
+		// Unset.
+		t.line = 0
+		t.file = ""
+	}
+	fields := strings.Split(value, ":")
+	if len(fields) != 2 {
+		return errTraceSyntax
+	}
+	file, line := fields[0], fields[1]
+	if !strings.Contains(file, ".") {
+		return errTraceSyntax
+	}
+	v, err := strconv.Atoi(line)
+	if err != nil {
+		return errTraceSyntax
+	}
+	if v <= 0 {
+		return errors.New("negative or zero value for level")
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	t.line = v
+	t.file = file
+	return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+	Flush() error
+	Sync() error
+	io.Writer
+}
+
+func init() {
+	flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
+	flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
+	flag.Var(&logging.verbosity, "v", "log level for V logs")
+	flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
+	flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+	flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+
+	// Default stderrThreshold is ERROR.
+	logging.stderrThreshold = errorLog
+
+	logging.setVState(0, nil, false)
+	go logging.flushDaemon()
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+	logging.lockAndFlushAll()
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+	// Boolean flags. Not handled atomically because the flag.Value interface
+	// does not let us avoid the =true, and that shorthand is necessary for
+	// compatibility. TODO: does this matter enough to fix? Seems unlikely.
+	toStderr     bool // The -logtostderr flag.
+	alsoToStderr bool // The -alsologtostderr flag.
+
+	// Level flag. Handled atomically.
+	stderrThreshold severity // The -stderrthreshold flag.
+
+	// freeList is a list of byte buffers, maintained under freeListMu.
+	freeList *buffer
+	// freeListMu maintains the free list. It is separate from the main mutex
+	// so buffers can be grabbed and printed to without holding the main lock,
+	// for better parallelization.
+	freeListMu sync.Mutex
+
+	// mu protects the remaining elements of this structure and is
+	// used to synchronize logging.
+	mu sync.Mutex
+	// file holds writer for each of the log types.
+	file [numSeverity]flushSyncWriter
+	// pcs is used in V to avoid an allocation when computing the caller's PC.
+	pcs [1]uintptr
+	// vmap is a cache of the V Level for each V() call site, identified by PC.
+	// It is wiped whenever the vmodule flag changes state.
+	vmap map[uintptr]Level
+	// filterLength stores the length of the vmodule filter chain. If greater
+	// than zero, it means vmodule is enabled. It may be read safely
+	// using sync.LoadInt32, but is only modified under mu.
+	filterLength int32
+	// traceLocation is the state of the -log_backtrace_at flag.
+	traceLocation traceLocation
+	// These flags are modified only under lock, although verbosity may be fetched
+	// safely using atomic.LoadInt32.
+	vmodule   moduleSpec // The state of the -vmodule flag.
+	verbosity Level      // V logging level, the value of the -v flag/
+}
+
+// buffer holds a byte Buffer for reuse. The zero value is ready for use.
+type buffer struct {
+	bytes.Buffer
+	tmp  [64]byte // temporary byte array for creating headers.
+	next *buffer
+}
+
+var logging loggingT
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+	// Turn verbosity off so V will not fire while we are in transition.
+	logging.verbosity.set(0)
+	// Ditto for filter length.
+	atomic.StoreInt32(&logging.filterLength, 0)
+
+	// Set the new filters and wipe the pc->Level map if the filter has changed.
+	if setFilter {
+		logging.vmodule.filter = filter
+		logging.vmap = make(map[uintptr]Level)
+	}
+
+	// Things are consistent now, so enable filtering and verbosity.
+	// They are enabled in order opposite to that in V.
+	atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
+	logging.verbosity.set(verbosity)
+}
+
+// getBuffer returns a new, ready-to-use buffer.
+func (l *loggingT) getBuffer() *buffer {
+	l.freeListMu.Lock()
+	b := l.freeList
+	if b != nil {
+		l.freeList = b.next
+	}
+	l.freeListMu.Unlock()
+	if b == nil {
+		b = new(buffer)
+	} else {
+		b.next = nil
+		b.Reset()
+	}
+	return b
+}
+
+// putBuffer returns a buffer to the free list.
+func (l *loggingT) putBuffer(b *buffer) {
+	if b.Len() >= 256 {
+		// Let big buffers die a natural death.
+		return
+	}
+	l.freeListMu.Lock()
+	b.next = l.freeList
+	l.freeList = b
+	l.freeListMu.Unlock()
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+	Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+where the fields are defined as follows:
+	L                A single character, representing the log level (eg 'I' for INFO)
+	mm               The month (zero padded; ie May is '05')
+	dd               The day (zero padded)
+	hh:mm:ss.uuuuuu  Time in hours, minutes and fractional seconds
+	threadid         The space-padded thread ID as returned by GetTID()
+	file             The file name
+	line             The line number
+	msg              The user-supplied message
+*/
+func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
+	_, file, line, ok := runtime.Caller(3 + depth)
+	if !ok {
+		file = "???"
+		line = 1
+	} else {
+		slash := strings.LastIndex(file, "/")
+		if slash >= 0 {
+			file = file[slash+1:]
+		}
+	}
+	return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
+	now := timeNow()
+	if line < 0 {
+		line = 0 // not a real line number, but acceptable to someDigits
+	}
+	if s > fatalLog {
+		s = infoLog // for safety.
+	}
+	buf := l.getBuffer()
+
+	// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+	// It's worth about 3X. Fprintf is hard.
+	_, month, day := now.Date()
+	hour, minute, second := now.Clock()
+	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+	buf.tmp[0] = severityChar[s]
+	buf.twoDigits(1, int(month))
+	buf.twoDigits(3, day)
+	buf.tmp[5] = ' '
+	buf.twoDigits(6, hour)
+	buf.tmp[8] = ':'
+	buf.twoDigits(9, minute)
+	buf.tmp[11] = ':'
+	buf.twoDigits(12, second)
+	buf.tmp[14] = '.'
+	buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+	buf.tmp[21] = ' '
+	buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
+	buf.tmp[29] = ' '
+	buf.Write(buf.tmp[:30])
+	buf.WriteString(file)
+	buf.tmp[0] = ':'
+	n := buf.someDigits(1, line)
+	buf.tmp[n+1] = ']'
+	buf.tmp[n+2] = ' '
+	buf.Write(buf.tmp[:n+3])
+	return buf
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
+func (buf *buffer) twoDigits(i, d int) {
+	buf.tmp[i+1] = digits[d%10]
+	d /= 10
+	buf.tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *buffer) nDigits(n, i, d int, pad byte) {
+	j := n - 1
+	for ; j >= 0 && d > 0; j-- {
+		buf.tmp[i+j] = digits[d%10]
+		d /= 10
+	}
+	for ; j >= 0; j-- {
+		buf.tmp[i+j] = pad
+	}
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
+func (buf *buffer) someDigits(i, d int) int {
+	// Print into the top, then copy down. We know there's space for at least
+	// a 10-digit number.
+	j := len(buf.tmp)
+	for {
+		j--
+		buf.tmp[j] = digits[d%10]
+		d /= 10
+		if d == 0 {
+			break
+		}
+	}
+	return copy(buf.tmp[i:], buf.tmp[j:])
+}
+
+func (l *loggingT) println(s severity, args ...interface{}) {
+	buf, file, line := l.header(s, 0)
+	fmt.Fprintln(buf, args...)
+	l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) print(s severity, args ...interface{}) {
+	l.printDepth(s, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
+	buf, file, line := l.header(s, depth)
+	fmt.Fprint(buf, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) printf(s severity, format string, args ...interface{}) {
+	buf, file, line := l.header(s, 0)
+	fmt.Fprintf(buf, format, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number.  If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
+	buf := l.formatHeader(s, file, line)
+	fmt.Fprint(buf, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, alsoToStderr)
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
+	l.mu.Lock()
+	if l.traceLocation.isSet() {
+		if l.traceLocation.match(file, line) {
+			buf.Write(stacks(false))
+		}
+	}
+	data := buf.Bytes()
+	if l.toStderr {
+		os.Stderr.Write(data)
+	} else {
+		if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+			os.Stderr.Write(data)
+		}
+		if l.file[s] == nil {
+			if err := l.createFiles(s); err != nil {
+				os.Stderr.Write(data) // Make sure the message appears somewhere.
+				l.exit(err)
+			}
+		}
+		switch s {
+		case fatalLog:
+			l.file[fatalLog].Write(data)
+			fallthrough
+		case errorLog:
+			l.file[errorLog].Write(data)
+			fallthrough
+		case warningLog:
+			l.file[warningLog].Write(data)
+			fallthrough
+		case infoLog:
+			l.file[infoLog].Write(data)
+		}
+	}
+	if s == fatalLog {
+		// If we got here via Exit rather than Fatal, print no stacks.
+		if atomic.LoadUint32(&fatalNoStacks) > 0 {
+			l.mu.Unlock()
+			timeoutFlush(10 * time.Second)
+			os.Exit(1)
+		}
+		// Dump all goroutine stacks before exiting.
+		// First, make sure we see the trace for the current goroutine on standard error.
+		// If -logtostderr has been specified, the loop below will do that anyway
+		// as the first stack in the full dump.
+		if !l.toStderr {
+			os.Stderr.Write(stacks(false))
+		}
+		// Write the stack trace for all goroutines to the files.
+		trace := stacks(true)
+		logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+		for log := fatalLog; log >= infoLog; log-- {
+			if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+				f.Write(trace)
+			}
+		}
+		l.mu.Unlock()
+		timeoutFlush(10 * time.Second)
+		os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+	}
+	l.putBuffer(buf)
+	l.mu.Unlock()
+	if stats := severityStats[s]; stats != nil {
+		atomic.AddInt64(&stats.lines, 1)
+		atomic.AddInt64(&stats.bytes, int64(len(data)))
+	}
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first.  This is needed because the hooks invoked
+// by Flush may deadlock when glog.Fatal is called from a hook that holds
+// a lock.
+func timeoutFlush(timeout time.Duration) {
+	done := make(chan bool, 1)
+	go func() {
+		Flush() // calls logging.lockAndFlushAll()
+		done <- true
+	}()
+	select {
+	case <-done:
+	case <-time.After(timeout):
+		fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout)
+	}
+}
+
+// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
+func stacks(all bool) []byte {
+	// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+	n := 10000
+	if all {
+		n = 100000
+	}
+	var trace []byte
+	for i := 0; i < 5; i++ {
+		trace = make([]byte, n)
+		nbytes := runtime.Stack(trace, all)
+		if nbytes < len(trace) {
+			return trace[:nbytes]
+		}
+		n *= 2
+	}
+	return trace
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+	fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+	// If logExitFunc is set, we do that instead of exiting.
+	if logExitFunc != nil {
+		logExitFunc(err)
+		return
+	}
+	l.flushAll()
+	os.Exit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+	logger *loggingT
+	*bufio.Writer
+	file   *os.File
+	sev    severity
+	nbytes uint64 // The number of bytes written to this file
+}
+
+func (sb *syncBuffer) Sync() error {
+	return sb.file.Sync()
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+	if sb.nbytes+uint64(len(p)) >= MaxSize {
+		if err := sb.rotateFile(time.Now()); err != nil {
+			sb.logger.exit(err)
+		}
+	}
+	n, err = sb.Writer.Write(p)
+	sb.nbytes += uint64(n)
+	if err != nil {
+		sb.logger.exit(err)
+	}
+	return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+func (sb *syncBuffer) rotateFile(now time.Time) error {
+	if sb.file != nil {
+		sb.Flush()
+		sb.file.Close()
+	}
+	var err error
+	sb.file, _, err = create(severityName[sb.sev], now)
+	sb.nbytes = 0
+	if err != nil {
+		return err
+	}
+
+	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+	// Write header.
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+	fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+	fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+	fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+	n, err := sb.file.Write(buf.Bytes())
+	sb.nbytes += uint64(n)
+	return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity) error {
+	now := time.Now()
+	// Files are created in decreasing severity order, so as soon as we find one
+	// has already been created, we can stop.
+	for s := sev; s >= infoLog && l.file[s] == nil; s-- {
+		sb := &syncBuffer{
+			logger: l,
+			sev:    s,
+		}
+		if err := sb.rotateFile(now); err != nil {
+			return err
+		}
+		l.file[s] = sb
+	}
+	return nil
+}
+
+const flushInterval = 30 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+func (l *loggingT) flushDaemon() {
+	for _ = range time.NewTicker(flushInterval).C {
+		l.lockAndFlushAll()
+	}
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+	l.mu.Lock()
+	l.flushAll()
+	l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+	// Flush from fatal down, in case there's trouble flushing.
+	for s := fatalLog; s >= infoLog; s-- {
+		file := l.file[s]
+		if file != nil {
+			file.Flush() // ignore error
+			file.Sync()  // ignore error
+		}
+	}
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities.  Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL".  If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+	sev, ok := severityByName(name)
+	if !ok {
+		panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+	}
+	// Set a log format that captures the user's file and line:
+	//   d.go:23: message
+	stdLog.SetFlags(stdLog.Lshortfile)
+	stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+	var (
+		file = "???"
+		line = 1
+		text string
+	)
+	// Split "d.go:23: message" into "d.go", "23", and "message".
+	if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+		text = fmt.Sprintf("bad log format: %s", b)
+	} else {
+		file = string(parts[0])
+		text = string(parts[2][1:]) // skip leading space
+		line, err = strconv.Atoi(string(parts[1]))
+		if err != nil {
+			text = fmt.Sprintf("bad line number: %s", b)
+			line = 1
+		}
+	}
+	// printWithFileLine with alsoToStderr=true, so standard log messages
+	// always appear on standard error.
+	logging.printWithFileLine(severity(lb), file, line, true, text)
+	return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+	fn := runtime.FuncForPC(pc)
+	file, _ := fn.FileLine(pc)
+	// The file is something like /a/b/c/d.go. We want just the d.
+	if strings.HasSuffix(file, ".go") {
+		file = file[:len(file)-3]
+	}
+	if slash := strings.LastIndex(file, "/"); slash >= 0 {
+		file = file[slash+1:]
+	}
+	for _, filter := range l.vmodule.filter {
+		if filter.match(file) {
+			l.vmap[pc] = filter.level
+			return filter.level
+		}
+	}
+	l.vmap[pc] = 0
+	return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose bool
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a boolean of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+//	if glog.V(2) { glog.Info("log this") }
+// or
+//	glog.V(2).Info("log this")
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and --vmodule flags; both are off by default. If the level in the call to
+// V is at least the value of -v, or of -vmodule for the source file containing the
+// call, the V call will log.
+func V(level Level) Verbose {
+	// This function tries hard to be cheap unless there's work to do.
+	// The fast path is two atomic loads and compares.
+
+	// Here is a cheap but safe test to see if V logging is enabled globally.
+	if logging.verbosity.get() >= level {
+		return Verbose(true)
+	}
+
+	// It's off globally but it vmodule may still be set.
+	// Here is another cheap but safe test to see if vmodule is enabled.
+	if atomic.LoadInt32(&logging.filterLength) > 0 {
+		// Now we need a proper lock to use the logging structure. The pcs field
+		// is shared so we must lock before accessing it. This is fairly expensive,
+		// but if V logging is enabled we're slow anyway.
+		logging.mu.Lock()
+		defer logging.mu.Unlock()
+		if runtime.Callers(2, logging.pcs[:]) == 0 {
+			return Verbose(false)
+		}
+		v, ok := logging.vmap[logging.pcs[0]]
+		if !ok {
+			v = logging.setV(logging.pcs[0])
+		}
+		return Verbose(v >= level)
+	}
+	return Verbose(false)
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+	if v {
+		logging.print(infoLog, args...)
+	}
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+	if v {
+		logging.println(infoLog, args...)
+	}
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+	if v {
+		logging.printf(infoLog, format, args...)
+	}
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+	logging.print(infoLog, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+	logging.printDepth(infoLog, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Infoln(args ...interface{}) {
+	logging.println(infoLog, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+	logging.printf(infoLog, format, args...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+	logging.print(warningLog, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+	logging.printDepth(warningLog, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Warningln(args ...interface{}) {
+	logging.println(warningLog, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+	logging.printf(warningLog, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+	logging.print(errorLog, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+	logging.printDepth(errorLog, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Errorln(args ...interface{}) {
+	logging.println(errorLog, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+	logging.printf(errorLog, format, args...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+	logging.print(fatalLog, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+	logging.printDepth(fatalLog, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Fatalln(args ...interface{}) {
+	logging.println(fatalLog, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+	logging.printf(fatalLog, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.print(fatalLog, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.printDepth(fatalLog, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+func Exitln(args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.println(fatalLog, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.printf(fatalLog, format, args...)
+}

+ 124 - 0
Godeps/_workspace/src/github.com/golang/glog/glog_file.go

@@ -0,0 +1,124 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package glog
+
+import (
+	"errors"
+	"flag"
+	"fmt"
+	"os"
+	"os/user"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+// If non-empty, overrides the choice of directory in which to write logs.
+// See createLogDirs for the full list of possible destinations.
+var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
+
+func createLogDirs() {
+	if *logDir != "" {
+		logDirs = append(logDirs, *logDir)
+	}
+	logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+	pid      = os.Getpid()
+	program  = filepath.Base(os.Args[0])
+	host     = "unknownhost"
+	userName = "unknownuser"
+)
+
+func init() {
+	h, err := os.Hostname()
+	if err == nil {
+		host = shortHostname(h)
+	}
+
+	current, err := user.Current()
+	if err == nil {
+		userName = current.Username
+	}
+
+	// Sanitize userName since it may contain filepath separators on Windows.
+	userName = strings.Replace(userName, `\`, "_", -1)
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+	if i := strings.Index(hostname, "."); i >= 0 {
+		return hostname[:i]
+	}
+	return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+	name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+		program,
+		host,
+		userName,
+		tag,
+		t.Year(),
+		t.Month(),
+		t.Day(),
+		t.Hour(),
+		t.Minute(),
+		t.Second(),
+		pid)
+	return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t.  If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+func create(tag string, t time.Time) (f *os.File, filename string, err error) {
+	onceLogDirs.Do(createLogDirs)
+	if len(logDirs) == 0 {
+		return nil, "", errors.New("log: no log dirs")
+	}
+	name, link := logName(tag, t)
+	var lastErr error
+	for _, dir := range logDirs {
+		fname := filepath.Join(dir, name)
+		f, err := os.Create(fname)
+		if err == nil {
+			symlink := filepath.Join(dir, link)
+			os.Remove(symlink)        // ignore err
+			os.Symlink(name, symlink) // ignore err
+			return f, fname, nil
+		}
+		lastErr = err
+	}
+	return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}

+ 415 - 0
Godeps/_workspace/src/github.com/golang/glog/glog_test.go

@@ -0,0 +1,415 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package glog
+
+import (
+	"bytes"
+	"fmt"
+	stdLog "log"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"testing"
+	"time"
+)
+
+// Test that shortHostname works as advertised.
+func TestShortHostname(t *testing.T) {
+	for hostname, expect := range map[string]string{
+		"":                "",
+		"host":            "host",
+		"host.google.com": "host",
+	} {
+		if got := shortHostname(hostname); expect != got {
+			t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got)
+		}
+	}
+}
+
+// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter.
+type flushBuffer struct {
+	bytes.Buffer
+}
+
+func (f *flushBuffer) Flush() error {
+	return nil
+}
+
+func (f *flushBuffer) Sync() error {
+	return nil
+}
+
+// swap sets the log writers and returns the old array.
+func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	old = l.file
+	for i, w := range writers {
+		logging.file[i] = w
+	}
+	return
+}
+
+// newBuffers sets the log writers to all new byte buffers and returns the old array.
+func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter {
+	return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)})
+}
+
+// contents returns the specified log value as a string.
+func contents(s severity) string {
+	return logging.file[s].(*flushBuffer).String()
+}
+
+// contains reports whether the string is contained in the log.
+func contains(s severity, str string, t *testing.T) bool {
+	return strings.Contains(contents(s), str)
+}
+
+// setFlags configures the logging flags how the test expects them.
+func setFlags() {
+	logging.toStderr = false
+}
+
+// Test that Info works as advertised.
+func TestInfo(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	Info("test")
+	if !contains(infoLog, "I", t) {
+		t.Errorf("Info has wrong character: %q", contents(infoLog))
+	}
+	if !contains(infoLog, "test", t) {
+		t.Error("Info failed")
+	}
+}
+
+func TestInfoDepth(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+
+	f := func() { InfoDepth(1, "depth-test1") }
+
+	// The next three lines must stay together
+	_, _, wantLine, _ := runtime.Caller(0)
+	InfoDepth(0, "depth-test0")
+	f()
+
+	msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n")
+	if len(msgs) != 2 {
+		t.Fatalf("Got %d lines, expected 2", len(msgs))
+	}
+
+	for i, m := range msgs {
+		if !strings.HasPrefix(m, "I") {
+			t.Errorf("InfoDepth[%d] has wrong character: %q", i, m)
+		}
+		w := fmt.Sprintf("depth-test%d", i)
+		if !strings.Contains(m, w) {
+			t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m)
+		}
+
+		// pull out the line number (between : and ])
+		msg := m[strings.LastIndex(m, ":")+1:]
+		x := strings.Index(msg, "]")
+		if x < 0 {
+			t.Errorf("InfoDepth[%d]: missing ']': %q", i, m)
+			continue
+		}
+		line, err := strconv.Atoi(msg[:x])
+		if err != nil {
+			t.Errorf("InfoDepth[%d]: bad line number: %q", i, m)
+			continue
+		}
+		wantLine++
+		if wantLine != line {
+			t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine)
+		}
+	}
+}
+
+func init() {
+	CopyStandardLogTo("INFO")
+}
+
+// Test that CopyStandardLogTo panics on bad input.
+func TestCopyStandardLogToPanic(t *testing.T) {
+	defer func() {
+		if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") {
+			t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s)
+		}
+	}()
+	CopyStandardLogTo("LOG")
+}
+
+// Test that using the standard log package logs to INFO.
+func TestStandardLog(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	stdLog.Print("test")
+	if !contains(infoLog, "I", t) {
+		t.Errorf("Info has wrong character: %q", contents(infoLog))
+	}
+	if !contains(infoLog, "test", t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that the header has the correct format.
+func TestHeader(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	defer func(previous func() time.Time) { timeNow = previous }(timeNow)
+	timeNow = func() time.Time {
+		return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
+	}
+	pid = 1234
+	Info("test")
+	var line int
+	format := "I0102 15:04:05.067890    1234 glog_test.go:%d] test\n"
+	n, err := fmt.Sscanf(contents(infoLog), format, &line)
+	if n != 1 || err != nil {
+		t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog))
+	}
+	// Scanf treats multiple spaces as equivalent to a single space,
+	// so check for correct space-padding also.
+	want := fmt.Sprintf(format, line)
+	if contents(infoLog) != want {
+		t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want)
+	}
+}
+
+// Test that an Error log goes to Warning and Info.
+// Even in the Info log, the source character will be E, so the data should
+// all be identical.
+func TestError(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	Error("test")
+	if !contains(errorLog, "E", t) {
+		t.Errorf("Error has wrong character: %q", contents(errorLog))
+	}
+	if !contains(errorLog, "test", t) {
+		t.Error("Error failed")
+	}
+	str := contents(errorLog)
+	if !contains(warningLog, str, t) {
+		t.Error("Warning failed")
+	}
+	if !contains(infoLog, str, t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a Warning log goes to Info.
+// Even in the Info log, the source character will be W, so the data should
+// all be identical.
+func TestWarning(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	Warning("test")
+	if !contains(warningLog, "W", t) {
+		t.Errorf("Warning has wrong character: %q", contents(warningLog))
+	}
+	if !contains(warningLog, "test", t) {
+		t.Error("Warning failed")
+	}
+	str := contents(warningLog)
+	if !contains(infoLog, str, t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a V log goes to Info.
+func TestV(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	logging.verbosity.Set("2")
+	defer logging.verbosity.Set("0")
+	V(2).Info("test")
+	if !contains(infoLog, "I", t) {
+		t.Errorf("Info has wrong character: %q", contents(infoLog))
+	}
+	if !contains(infoLog, "test", t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a vmodule enables a log in this file.
+func TestVmoduleOn(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	logging.vmodule.Set("glog_test=2")
+	defer logging.vmodule.Set("")
+	if !V(1) {
+		t.Error("V not enabled for 1")
+	}
+	if !V(2) {
+		t.Error("V not enabled for 2")
+	}
+	if V(3) {
+		t.Error("V enabled for 3")
+	}
+	V(2).Info("test")
+	if !contains(infoLog, "I", t) {
+		t.Errorf("Info has wrong character: %q", contents(infoLog))
+	}
+	if !contains(infoLog, "test", t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a vmodule of another file does not enable a log in this file.
+func TestVmoduleOff(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	logging.vmodule.Set("notthisfile=2")
+	defer logging.vmodule.Set("")
+	for i := 1; i <= 3; i++ {
+		if V(Level(i)) {
+			t.Errorf("V enabled for %d", i)
+		}
+	}
+	V(2).Info("test")
+	if contents(infoLog) != "" {
+		t.Error("V logged incorrectly")
+	}
+}
+
+// vGlobs are patterns that match/don't match this file at V=2.
+var vGlobs = map[string]bool{
+	// Easy to test the numeric match here.
+	"glog_test=1": false, // If -vmodule sets V to 1, V(2) will fail.
+	"glog_test=2": true,
+	"glog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed.
+	// These all use 2 and check the patterns. All are true.
+	"*=2":           true,
+	"?l*=2":         true,
+	"????_*=2":      true,
+	"??[mno]?_*t=2": true,
+	// These all use 2 and check the patterns. All are false.
+	"*x=2":         false,
+	"m*=2":         false,
+	"??_*=2":       false,
+	"?[abc]?_*t=2": false,
+}
+
+// Test that vmodule globbing works as advertised.
+func testVmoduleGlob(pat string, match bool, t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	defer logging.vmodule.Set("")
+	logging.vmodule.Set(pat)
+	if V(2) != Verbose(match) {
+		t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match)
+	}
+}
+
+// Test that a vmodule globbing works as advertised.
+func TestVmoduleGlob(t *testing.T) {
+	for glob, match := range vGlobs {
+		testVmoduleGlob(glob, match, t)
+	}
+}
+
+func TestRollover(t *testing.T) {
+	setFlags()
+	var err error
+	defer func(previous func(error)) { logExitFunc = previous }(logExitFunc)
+	logExitFunc = func(e error) {
+		err = e
+	}
+	defer func(previous uint64) { MaxSize = previous }(MaxSize)
+	MaxSize = 512
+
+	Info("x") // Be sure we have a file.
+	info, ok := logging.file[infoLog].(*syncBuffer)
+	if !ok {
+		t.Fatal("info wasn't created")
+	}
+	if err != nil {
+		t.Fatalf("info has initial error: %v", err)
+	}
+	fname0 := info.file.Name()
+	Info(strings.Repeat("x", int(MaxSize))) // force a rollover
+	if err != nil {
+		t.Fatalf("info has error after big write: %v", err)
+	}
+
+	// Make sure the next log file gets a file name with a different
+	// time stamp.
+	//
+	// TODO: determine whether we need to support subsecond log
+	// rotation.  C++ does not appear to handle this case (nor does it
+	// handle Daylight Savings Time properly).
+	time.Sleep(1 * time.Second)
+
+	Info("x") // create a new file
+	if err != nil {
+		t.Fatalf("error after rotation: %v", err)
+	}
+	fname1 := info.file.Name()
+	if fname0 == fname1 {
+		t.Errorf("info.f.Name did not change: %v", fname0)
+	}
+	if info.nbytes >= MaxSize {
+		t.Errorf("file size was not reset: %d", info.nbytes)
+	}
+}
+
+func TestLogBacktraceAt(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	// The peculiar style of this code simplifies line counting and maintenance of the
+	// tracing block below.
+	var infoLine string
+	setTraceLocation := func(file string, line int, ok bool, delta int) {
+		if !ok {
+			t.Fatal("could not get file:line")
+		}
+		_, file = filepath.Split(file)
+		infoLine = fmt.Sprintf("%s:%d", file, line+delta)
+		err := logging.traceLocation.Set(infoLine)
+		if err != nil {
+			t.Fatal("error setting log_backtrace_at: ", err)
+		}
+	}
+	{
+		// Start of tracing block. These lines know about each other's relative position.
+		_, file, line, ok := runtime.Caller(0)
+		setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls.
+		Info("we want a stack trace here")
+	}
+	numAppearances := strings.Count(contents(infoLog), infoLine)
+	if numAppearances < 2 {
+		// Need 2 appearances, one in the log header and one in the trace:
+		//   log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here
+		//   ...
+		//   github.com/glog/glog_test.go:280 (0x41ba91)
+		//   ...
+		// We could be more precise but that would require knowing the details
+		// of the traceback format, which may not be dependable.
+		t.Fatal("got no trace back; log is ", contents(infoLog))
+	}
+}
+
+func BenchmarkHeader(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		buf, _, _ := logging.header(infoLog, 0)
+		logging.putBuffer(buf)
+	}
+}

+ 14 - 0
Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml

@@ -0,0 +1,14 @@
+language: go
+
+go:
+  - 1.3
+  - 1.4
+
+install:
+  - export GOPATH="$HOME/gopath"
+  - mkdir -p "$GOPATH/src/golang.org/x"
+  - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
+  - go get -v -t -d golang.org/x/oauth2/...
+
+script:
+  - go test -v golang.org/x/oauth2/...

+ 3 - 0
Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS

@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.

+ 31 - 0
Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md

@@ -0,0 +1,31 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+
+## Filing issues
+
+When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+**We do not accept GitHub pull requests**
+(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
+

+ 3 - 0
Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS

@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.

+ 27 - 0
Godeps/_workspace/src/golang.org/x/oauth2/LICENSE

@@ -0,0 +1,27 @@
+Copyright (c) 2009 The oauth2 Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 64 - 0
Godeps/_workspace/src/golang.org/x/oauth2/README.md

@@ -0,0 +1,64 @@
+# OAuth2 for Go
+
+[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
+
+oauth2 package contains a client implementation for OAuth 2.0 spec.
+
+## Installation
+
+~~~~
+go get golang.org/x/oauth2
+~~~~
+
+See godoc for further documentation and examples.
+
+* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
+* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+
+
+## App Engine
+
+In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
+of the [`context.Context`](https://golang.org/x/net/context#Context) type from
+the `golang.org/x/net/context` package
+
+This means its no longer possible to use the "Classic App Engine"
+`appengine.Context` type with the `oauth2` package. (You're using
+Classic App Engine if you import the package `"appengine"`.)
+
+To work around this, you may use the new `"google.golang.org/appengine"`
+package. This package has almost the same API as the `"appengine"` package,
+but it can be fetched with `go get` and used on "Managed VMs" and well as
+Classic App Engine.
+
+See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
+for information on updating your app.
+
+If you don't want to update your entire app to use the new App Engine packages,
+you may use both sets of packages in parallel, using only the new packages
+with the `oauth2` package.
+
+	import (
+		"golang.org/x/net/context"
+		"golang.org/x/oauth2"
+		"golang.org/x/oauth2/google"
+		newappengine "google.golang.org/appengine"
+		newurlftech "google.golang.org/urlfetch"
+
+		"appengine"
+	)
+
+	func handler(w http.ResponseWriter, r *http.Request) {
+		var c appengine.Context = appengine.NewContext(r)
+		c.Infof("Logging a message with the old package")
+
+		var ctx context.Context = newappengine.NewContext(r)
+		client := &http.Client{
+			Transport: &oauth2.Transport{
+				Source: google.AppEngineTokenSource(ctx, "scope"),
+				Base:   &newurlfetch.Transport{Context: ctx},
+			},
+		}
+		client.Get("...")
+	}
+

+ 24 - 0
Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go

@@ -0,0 +1,24 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine appenginevm
+
+// App Engine hooks.
+
+package oauth2
+
+import (
+	"net/http"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
+	"google.golang.org/appengine/urlfetch"
+)
+
+func init() {
+	registerContextClientFunc(contextClientAppEngine)
+}
+
+func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
+	return urlfetch.Client(ctx), nil
+}

+ 45 - 0
Godeps/_workspace/src/golang.org/x/oauth2/example_test.go

@@ -0,0 +1,45 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2_test
+
+import (
+	"fmt"
+	"log"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+)
+
+func ExampleConfig() {
+	conf := &oauth2.Config{
+		ClientID:     "YOUR_CLIENT_ID",
+		ClientSecret: "YOUR_CLIENT_SECRET",
+		Scopes:       []string{"SCOPE1", "SCOPE2"},
+		Endpoint: oauth2.Endpoint{
+			AuthURL:  "https://provider.com/o/oauth2/auth",
+			TokenURL: "https://provider.com/o/oauth2/token",
+		},
+	}
+
+	// Redirect user to consent page to ask for permission
+	// for the scopes specified above.
+	url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
+	fmt.Printf("Visit the URL for the auth dialog: %v", url)
+
+	// Use the authorization code that is pushed to the redirect URL.
+	// NewTransportWithCode will do the handshake to retrieve
+	// an access token and initiate a Transport that is
+	// authorized and authenticated by the retrieved token.
+	var code string
+	if _, err := fmt.Scan(&code); err != nil {
+		log.Fatal(err)
+	}
+	tok, err := conf.Exchange(oauth2.NoContext, code)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	client := conf.Client(oauth2.NoContext, tok)
+	client.Get("...")
+}

+ 16 - 0
Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go

@@ -0,0 +1,16 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package facebook provides constants for using OAuth2 to access Facebook.
+package facebook
+
+import (
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+)
+
+// Endpoint is Facebook's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+	AuthURL:  "https://www.facebook.com/dialog/oauth",
+	TokenURL: "https://graph.facebook.com/oauth/access_token",
+}

+ 16 - 0
Godeps/_workspace/src/golang.org/x/oauth2/github/github.go

@@ -0,0 +1,16 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package github provides constants for using OAuth2 to access Github.
+package github
+
+import (
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+)
+
+// Endpoint is Github's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+	AuthURL:  "https://github.com/login/oauth/authorize",
+	TokenURL: "https://github.com/login/oauth/access_token",
+}

+ 83 - 0
Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go

@@ -0,0 +1,83 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+)
+
+// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
+
+// AppEngineTokenSource returns a token source that fetches tokens
+// issued to the current App Engine application's service account.
+// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
+// that involves user accounts, see oauth2.Config instead.
+//
+// The provided context must have come from appengine.NewContext.
+func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
+	if appengineTokenFunc == nil {
+		panic("google: AppEngineTokenSource can only be used on App Engine.")
+	}
+	scopes := append([]string{}, scope...)
+	sort.Strings(scopes)
+	return &appEngineTokenSource{
+		ctx:    ctx,
+		scopes: scopes,
+		key:    strings.Join(scopes, " "),
+	}
+}
+
+// aeTokens helps the fetched tokens to be reused until their expiration.
+var (
+	aeTokensMu sync.Mutex
+	aeTokens   = make(map[string]*tokenLock) // key is space-separated scopes
+)
+
+type tokenLock struct {
+	mu sync.Mutex // guards t; held while fetching or updating t
+	t  *oauth2.Token
+}
+
+type appEngineTokenSource struct {
+	ctx    context.Context
+	scopes []string
+	key    string // to aeTokens map; space-separated scopes
+}
+
+func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
+	if appengineTokenFunc == nil {
+		panic("google: AppEngineTokenSource can only be used on App Engine.")
+	}
+
+	aeTokensMu.Lock()
+	tok, ok := aeTokens[ts.key]
+	if !ok {
+		tok = &tokenLock{}
+		aeTokens[ts.key] = tok
+	}
+	aeTokensMu.Unlock()
+
+	tok.mu.Lock()
+	defer tok.mu.Unlock()
+	if tok.t.Valid() {
+		return tok.t, nil
+	}
+	access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
+	if err != nil {
+		return nil, err
+	}
+	tok.t = &oauth2.Token{
+		AccessToken: access,
+		Expiry:      exp,
+	}
+	return tok.t, nil
+}

+ 13 - 0
Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go

@@ -0,0 +1,13 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine appenginevm
+
+package google
+
+import "google.golang.org/appengine"
+
+func init() {
+	appengineTokenFunc = appengine.AccessToken
+}

+ 154 - 0
Godeps/_workspace/src/golang.org/x/oauth2/google/default.go

@@ -0,0 +1,154 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"path/filepath"
+	"runtime"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2/jwt"
+	"github.com/coreos/etcd/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata"
+)
+
+// DefaultClient returns an HTTP Client that uses the
+// DefaultTokenSource to obtain authentication credentials.
+//
+// This client should be used when developing services
+// that run on Google App Engine or Google Compute Engine
+// and use "Application Default Credentials."
+//
+// For more details, see:
+// https://developers.google.com/accounts/application-default-credentials
+//
+func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
+	ts, err := DefaultTokenSource(ctx, scope...)
+	if err != nil {
+		return nil, err
+	}
+	return oauth2.NewClient(ctx, ts), nil
+}
+
+// DefaultTokenSource is a token source that uses
+// "Application Default Credentials".
+//
+// It looks for credentials in the following places,
+// preferring the first location found:
+//
+//   1. A JSON file whose path is specified by the
+//      GOOGLE_APPLICATION_CREDENTIALS environment variable.
+//   2. A JSON file in a location known to the gcloud command-line tool.
+//      On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
+//      On other systems, $HOME/.config/gcloud/application_default_credentials.json.
+//   3. On Google App Engine it uses the appengine.AccessToken function.
+//   4. On Google Compute Engine, it fetches credentials from the metadata server.
+//      (In this final case any provided scopes are ignored.)
+//
+// For more details, see:
+// https://developers.google.com/accounts/application-default-credentials
+//
+func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
+	// First, try the environment variable.
+	const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
+	if filename := os.Getenv(envVar); filename != "" {
+		ts, err := tokenSourceFromFile(ctx, filename, scope)
+		if err != nil {
+			return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
+		}
+		return ts, nil
+	}
+
+	// Second, try a well-known file.
+	filename := wellKnownFile()
+	_, err := os.Stat(filename)
+	if err == nil {
+		ts, err2 := tokenSourceFromFile(ctx, filename, scope)
+		if err2 == nil {
+			return ts, nil
+		}
+		err = err2
+	} else if os.IsNotExist(err) {
+		err = nil // ignore this error
+	}
+	if err != nil {
+		return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
+	}
+
+	// Third, if we're on Google App Engine use those credentials.
+	if appengineTokenFunc != nil {
+		return AppEngineTokenSource(ctx, scope...), nil
+	}
+
+	// Fourth, if we're on Google Compute Engine use the metadata server.
+	if metadata.OnGCE() {
+		return ComputeTokenSource(""), nil
+	}
+
+	// None are found; return helpful error.
+	const url = "https://developers.google.com/accounts/application-default-credentials"
+	return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
+}
+
+func wellKnownFile() string {
+	const f = "application_default_credentials.json"
+	if runtime.GOOS == "windows" {
+		return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
+	}
+	return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
+}
+
+func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
+	b, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	var d struct {
+		// Common fields
+		Type     string
+		ClientID string `json:"client_id"`
+
+		// User Credential fields
+		ClientSecret string `json:"client_secret"`
+		RefreshToken string `json:"refresh_token"`
+
+		// Service Account fields
+		ClientEmail  string `json:"client_email"`
+		PrivateKeyID string `json:"private_key_id"`
+		PrivateKey   string `json:"private_key"`
+	}
+	if err := json.Unmarshal(b, &d); err != nil {
+		return nil, err
+	}
+	switch d.Type {
+	case "authorized_user":
+		cfg := &oauth2.Config{
+			ClientID:     d.ClientID,
+			ClientSecret: d.ClientSecret,
+			Scopes:       append([]string{}, scopes...), // copy
+			Endpoint:     Endpoint,
+		}
+		tok := &oauth2.Token{RefreshToken: d.RefreshToken}
+		return cfg.TokenSource(ctx, tok), nil
+	case "service_account":
+		cfg := &jwt.Config{
+			Email:      d.ClientEmail,
+			PrivateKey: []byte(d.PrivateKey),
+			Scopes:     append([]string{}, scopes...), // copy
+			TokenURL:   JWTTokenURL,
+		}
+		return cfg.TokenSource(ctx), nil
+	case "":
+		return nil, errors.New("missing 'type' field in credentials")
+	default:
+		return nil, fmt.Errorf("unknown credential type: %q", d.Type)
+	}
+}

+ 150 - 0
Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go

@@ -0,0 +1,150 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appenginevm !appengine
+
+package google_test
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net/http"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2/google"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2/jwt"
+	"google.golang.org/appengine"
+	"google.golang.org/appengine/urlfetch"
+)
+
+func ExampleDefaultClient() {
+	client, err := google.DefaultClient(oauth2.NoContext,
+		"https://www.googleapis.com/auth/devstorage.full_control")
+	if err != nil {
+		log.Fatal(err)
+	}
+	client.Get("...")
+}
+
+func Example_webServer() {
+	// Your credentials should be obtained from the Google
+	// Developer Console (https://console.developers.google.com).
+	conf := &oauth2.Config{
+		ClientID:     "YOUR_CLIENT_ID",
+		ClientSecret: "YOUR_CLIENT_SECRET",
+		RedirectURL:  "YOUR_REDIRECT_URL",
+		Scopes: []string{
+			"https://www.googleapis.com/auth/bigquery",
+			"https://www.googleapis.com/auth/blogger",
+		},
+		Endpoint: google.Endpoint,
+	}
+	// Redirect user to Google's consent page to ask for permission
+	// for the scopes specified above.
+	url := conf.AuthCodeURL("state")
+	fmt.Printf("Visit the URL for the auth dialog: %v", url)
+
+	// Handle the exchange code to initiate a transport.
+	tok, err := conf.Exchange(oauth2.NoContext, "authorization-code")
+	if err != nil {
+		log.Fatal(err)
+	}
+	client := conf.Client(oauth2.NoContext, tok)
+	client.Get("...")
+}
+
+func ExampleJWTConfigFromJSON() {
+	// Your credentials should be obtained from the Google
+	// Developer Console (https://console.developers.google.com).
+	// Navigate to your project, then see the "Credentials" page
+	// under "APIs & Auth".
+	// To create a service account client, click "Create new Client ID",
+	// select "Service Account", and click "Create Client ID". A JSON
+	// key file will then be downloaded to your computer.
+	data, err := ioutil.ReadFile("/path/to/your-project-key.json")
+	if err != nil {
+		log.Fatal(err)
+	}
+	conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/bigquery")
+	if err != nil {
+		log.Fatal(err)
+	}
+	// Initiate an http.Client. The following GET request will be
+	// authorized and authenticated on the behalf of
+	// your service account.
+	client := conf.Client(oauth2.NoContext)
+	client.Get("...")
+}
+
+func ExampleSDKConfig() {
+	// The credentials will be obtained from the first account that
+	// has been authorized with `gcloud auth login`.
+	conf, err := google.NewSDKConfig("")
+	if err != nil {
+		log.Fatal(err)
+	}
+	// Initiate an http.Client. The following GET request will be
+	// authorized and authenticated on the behalf of the SDK user.
+	client := conf.Client(oauth2.NoContext)
+	client.Get("...")
+}
+
+func Example_serviceAccount() {
+	// Your credentials should be obtained from the Google
+	// Developer Console (https://console.developers.google.com).
+	conf := &jwt.Config{
+		Email: "xxx@developer.gserviceaccount.com",
+		// The contents of your RSA private key or your PEM file
+		// that contains a private key.
+		// If you have a p12 file instead, you
+		// can use `openssl` to export the private key into a pem file.
+		//
+		//    $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
+		//
+		// The field only supports PEM containers with no passphrase.
+		// The openssl command will convert p12 keys to passphrase-less PEM containers.
+		PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."),
+		Scopes: []string{
+			"https://www.googleapis.com/auth/bigquery",
+			"https://www.googleapis.com/auth/blogger",
+		},
+		TokenURL: google.JWTTokenURL,
+		// If you would like to impersonate a user, you can
+		// create a transport with a subject. The following GET
+		// request will be made on the behalf of user@example.com.
+		// Optional.
+		Subject: "user@example.com",
+	}
+	// Initiate an http.Client, the following GET request will be
+	// authorized and authenticated on the behalf of user@example.com.
+	client := conf.Client(oauth2.NoContext)
+	client.Get("...")
+}
+
+func ExampleAppEngineTokenSource() {
+	var req *http.Request // from the ServeHTTP handler
+	ctx := appengine.NewContext(req)
+	client := &http.Client{
+		Transport: &oauth2.Transport{
+			Source: google.AppEngineTokenSource(ctx, "https://www.googleapis.com/auth/bigquery"),
+			Base: &urlfetch.Transport{
+				Context: ctx,
+			},
+		},
+	}
+	client.Get("...")
+}
+
+func ExampleComputeTokenSource() {
+	client := &http.Client{
+		Transport: &oauth2.Transport{
+			// Fetch from Google Compute Engine's metadata server to retrieve
+			// an access token for the provided account.
+			// If no account is specified, "default" is used.
+			Source: google.ComputeTokenSource(""),
+		},
+	}
+	client.Get("...")
+}

+ 145 - 0
Godeps/_workspace/src/golang.org/x/oauth2/google/google.go

@@ -0,0 +1,145 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package google provides support for making OAuth2 authorized and
+// authenticated HTTP requests to Google APIs.
+// It supports the Web server flow, client-side credentials, service accounts,
+// Google Compute Engine service accounts, and Google App Engine service
+// accounts.
+//
+// For more information, please read
+// https://developers.google.com/accounts/docs/OAuth2
+// and
+// https://developers.google.com/accounts/application-default-credentials.
+package google
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2/jwt"
+	"github.com/coreos/etcd/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata"
+)
+
+// Endpoint is Google's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+	AuthURL:  "https://accounts.google.com/o/oauth2/auth",
+	TokenURL: "https://accounts.google.com/o/oauth2/token",
+}
+
+// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
+const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
+
+// ConfigFromJSON uses a Google Developers Console client_credentials.json
+// file to construct a config.
+// client_credentials.json can be downloadable from https://console.developers.google.com,
+// under "APIs & Auth" > "Credentials". Download the Web application credentials in the
+// JSON format and provide the contents of the file as jsonKey.
+func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
+	type cred struct {
+		ClientID     string   `json:"client_id"`
+		ClientSecret string   `json:"client_secret"`
+		RedirectURIs []string `json:"redirect_uris"`
+		AuthURI      string   `json:"auth_uri"`
+		TokenURI     string   `json:"token_uri"`
+	}
+	var j struct {
+		Web       *cred `json:"web"`
+		Installed *cred `json:"installed"`
+	}
+	if err := json.Unmarshal(jsonKey, &j); err != nil {
+		return nil, err
+	}
+	var c *cred
+	switch {
+	case j.Web != nil:
+		c = j.Web
+	case j.Installed != nil:
+		c = j.Installed
+	default:
+		return nil, fmt.Errorf("oauth2/google: no credentials found")
+	}
+	if len(c.RedirectURIs) < 1 {
+		return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
+	}
+	return &oauth2.Config{
+		ClientID:     c.ClientID,
+		ClientSecret: c.ClientSecret,
+		RedirectURL:  c.RedirectURIs[0],
+		Scopes:       scope,
+		Endpoint: oauth2.Endpoint{
+			AuthURL:  c.AuthURI,
+			TokenURL: c.TokenURI,
+		},
+	}, nil
+}
+
+// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
+// the credentials that authorize and authenticate the requests.
+// Create a service account on "Credentials" page under "APIs & Auth" for your
+// project at https://console.developers.google.com to download a JSON key file.
+func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
+	var key struct {
+		Email      string `json:"client_email"`
+		PrivateKey string `json:"private_key"`
+	}
+	if err := json.Unmarshal(jsonKey, &key); err != nil {
+		return nil, err
+	}
+	return &jwt.Config{
+		Email:      key.Email,
+		PrivateKey: []byte(key.PrivateKey),
+		Scopes:     scope,
+		TokenURL:   JWTTokenURL,
+	}, nil
+}
+
+// ComputeTokenSource returns a token source that fetches access tokens
+// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
+// this token source if your program is running on a GCE instance.
+// If no account is specified, "default" is used.
+// Further information about retrieving access tokens from the GCE metadata
+// server can be found at https://cloud.google.com/compute/docs/authentication.
+func ComputeTokenSource(account string) oauth2.TokenSource {
+	return oauth2.ReuseTokenSource(nil, computeSource{account: account})
+}
+
+type computeSource struct {
+	account string
+}
+
+func (cs computeSource) Token() (*oauth2.Token, error) {
+	if !metadata.OnGCE() {
+		return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
+	}
+	acct := cs.account
+	if acct == "" {
+		acct = "default"
+	}
+	tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
+	if err != nil {
+		return nil, err
+	}
+	var res struct {
+		AccessToken  string `json:"access_token"`
+		ExpiresInSec int    `json:"expires_in"`
+		TokenType    string `json:"token_type"`
+	}
+	err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
+	if err != nil {
+		return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
+	}
+	if res.ExpiresInSec == 0 || res.AccessToken == "" {
+		return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
+	}
+	return &oauth2.Token{
+		AccessToken: res.AccessToken,
+		TokenType:   res.TokenType,
+		Expiry:      time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
+	}, nil
+}

+ 67 - 0
Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go

@@ -0,0 +1,67 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+	"strings"
+	"testing"
+)
+
+var webJSONKey = []byte(`
+{
+    "web": {
+        "auth_uri": "https://google.com/o/oauth2/auth",
+        "client_secret": "3Oknc4jS_wA2r9i",
+        "token_uri": "https://google.com/o/oauth2/token",
+        "client_email": "222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com",
+        "redirect_uris": ["https://www.example.com/oauth2callback"],
+        "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com",
+        "client_id": "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com",
+        "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
+        "javascript_origins": ["https://www.example.com"]
+    }
+}`)
+
+var installedJSONKey = []byte(`{
+  "installed": {
+      "client_id": "222-installed.apps.googleusercontent.com",
+      "redirect_uris": ["https://www.example.com/oauth2callback"]
+    }
+}`)
+
+func TestConfigFromJSON(t *testing.T) {
+	conf, err := ConfigFromJSON(webJSONKey, "scope1", "scope2")
+	if err != nil {
+		t.Error(err)
+	}
+	if got, want := conf.ClientID, "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com"; got != want {
+		t.Errorf("ClientID = %q; want %q", got, want)
+	}
+	if got, want := conf.ClientSecret, "3Oknc4jS_wA2r9i"; got != want {
+		t.Errorf("ClientSecret = %q; want %q", got, want)
+	}
+	if got, want := conf.RedirectURL, "https://www.example.com/oauth2callback"; got != want {
+		t.Errorf("RedictURL = %q; want %q", got, want)
+	}
+	if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want {
+		t.Errorf("Scopes = %q; want %q", got, want)
+	}
+	if got, want := conf.Endpoint.AuthURL, "https://google.com/o/oauth2/auth"; got != want {
+		t.Errorf("AuthURL = %q; want %q", got, want)
+	}
+	if got, want := conf.Endpoint.TokenURL, "https://google.com/o/oauth2/token"; got != want {
+		t.Errorf("TokenURL = %q; want %q", got, want)
+	}
+}
+
+func TestConfigFromJSON_Installed(t *testing.T) {
+	conf, err := ConfigFromJSON(installedJSONKey)
+	if err != nil {
+		t.Error(err)
+	}
+	if got, want := conf.ClientID, "222-installed.apps.googleusercontent.com"; got != want {
+		t.Errorf("ClientID = %q; want %q", got, want)
+	}
+}

+ 168 - 0
Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go

@@ -0,0 +1,168 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"os"
+	"os/user"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2/internal"
+)
+
+type sdkCredentials struct {
+	Data []struct {
+		Credential struct {
+			ClientID     string     `json:"client_id"`
+			ClientSecret string     `json:"client_secret"`
+			AccessToken  string     `json:"access_token"`
+			RefreshToken string     `json:"refresh_token"`
+			TokenExpiry  *time.Time `json:"token_expiry"`
+		} `json:"credential"`
+		Key struct {
+			Account string `json:"account"`
+			Scope   string `json:"scope"`
+		} `json:"key"`
+	}
+}
+
+// An SDKConfig provides access to tokens from an account already
+// authorized via the Google Cloud SDK.
+type SDKConfig struct {
+	conf         oauth2.Config
+	initialToken *oauth2.Token
+}
+
+// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
+// account. If account is empty, the account currently active in
+// Google Cloud SDK properties is used.
+// Google Cloud SDK credentials must be created by running `gcloud auth`
+// before using this function.
+// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
+func NewSDKConfig(account string) (*SDKConfig, error) {
+	configPath, err := sdkConfigPath()
+	if err != nil {
+		return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
+	}
+	credentialsPath := filepath.Join(configPath, "credentials")
+	f, err := os.Open(credentialsPath)
+	if err != nil {
+		return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
+	}
+	defer f.Close()
+
+	var c sdkCredentials
+	if err := json.NewDecoder(f).Decode(&c); err != nil {
+		return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
+	}
+	if len(c.Data) == 0 {
+		return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
+	}
+	if account == "" {
+		propertiesPath := filepath.Join(configPath, "properties")
+		f, err := os.Open(propertiesPath)
+		if err != nil {
+			return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
+		}
+		defer f.Close()
+		ini, err := internal.ParseINI(f)
+		if err != nil {
+			return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
+		}
+		core, ok := ini["core"]
+		if !ok {
+			return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
+		}
+		active, ok := core["account"]
+		if !ok {
+			return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
+		}
+		account = active
+	}
+
+	for _, d := range c.Data {
+		if account == "" || d.Key.Account == account {
+			if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
+				return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
+			}
+			var expiry time.Time
+			if d.Credential.TokenExpiry != nil {
+				expiry = *d.Credential.TokenExpiry
+			}
+			return &SDKConfig{
+				conf: oauth2.Config{
+					ClientID:     d.Credential.ClientID,
+					ClientSecret: d.Credential.ClientSecret,
+					Scopes:       strings.Split(d.Key.Scope, " "),
+					Endpoint:     Endpoint,
+					RedirectURL:  "oob",
+				},
+				initialToken: &oauth2.Token{
+					AccessToken:  d.Credential.AccessToken,
+					RefreshToken: d.Credential.RefreshToken,
+					Expiry:       expiry,
+				},
+			}, nil
+		}
+	}
+	return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
+}
+
+// Client returns an HTTP client using Google Cloud SDK credentials to
+// authorize requests. The token will auto-refresh as necessary. The
+// underlying http.RoundTripper will be obtained using the provided
+// context. The returned client and its Transport should not be
+// modified.
+func (c *SDKConfig) Client(ctx context.Context) *http.Client {
+	return &http.Client{
+		Transport: &oauth2.Transport{
+			Source: c.TokenSource(ctx),
+		},
+	}
+}
+
+// TokenSource returns an oauth2.TokenSource that retrieve tokens from
+// Google Cloud SDK credentials using the provided context.
+// It will returns the current access token stored in the credentials,
+// and refresh it when it expires, but it won't update the credentials
+// with the new access token.
+func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
+	return c.conf.TokenSource(ctx, c.initialToken)
+}
+
+// Scopes are the OAuth 2.0 scopes the current account is authorized for.
+func (c *SDKConfig) Scopes() []string {
+	return c.conf.Scopes
+}
+
+// sdkConfigPath tries to guess where the gcloud config is located.
+// It can be overridden during tests.
+var sdkConfigPath = func() (string, error) {
+	if runtime.GOOS == "windows" {
+		return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
+	}
+	homeDir := guessUnixHomeDir()
+	if homeDir == "" {
+		return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
+	}
+	return filepath.Join(homeDir, ".config", "gcloud"), nil
+}
+
+func guessUnixHomeDir() string {
+	usr, err := user.Current()
+	if err == nil {
+		return usr.HomeDir
+	}
+	return os.Getenv("HOME")
+}

+ 46 - 0
Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go

@@ -0,0 +1,46 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import "testing"
+
+func TestSDKConfig(t *testing.T) {
+	sdkConfigPath = func() (string, error) {
+		return "testdata/gcloud", nil
+	}
+
+	tests := []struct {
+		account     string
+		accessToken string
+		err         bool
+	}{
+		{"", "bar_access_token", false},
+		{"foo@example.com", "foo_access_token", false},
+		{"bar@example.com", "bar_access_token", false},
+		{"baz@serviceaccount.example.com", "", true},
+	}
+	for _, tt := range tests {
+		c, err := NewSDKConfig(tt.account)
+		if got, want := err != nil, tt.err; got != want {
+			if !tt.err {
+				t.Errorf("expected no error, got error: %v", tt.err, err)
+			} else {
+				t.Errorf("expected error, got none")
+			}
+			continue
+		}
+		if err != nil {
+			continue
+		}
+		tok := c.initialToken
+		if tok == nil {
+			t.Errorf("expected token %q, got: nil", tt.accessToken)
+			continue
+		}
+		if tok.AccessToken != tt.accessToken {
+			t.Errorf("expected token %q, got: %q", tt.accessToken, tok.AccessToken)
+		}
+	}
+}

+ 122 - 0
Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials

@@ -0,0 +1,122 @@
+{
+  "data": [
+    {
+      "credential": {
+        "_class": "OAuth2Credentials",
+        "_module": "oauth2client.client",
+        "access_token": "foo_access_token",
+        "client_id": "foo_client_id",
+        "client_secret": "foo_client_secret",
+        "id_token": {
+          "at_hash": "foo_at_hash",
+          "aud": "foo_aud",
+          "azp": "foo_azp",
+          "cid": "foo_cid",
+          "email": "foo@example.com",
+          "email_verified": true,
+          "exp": 1420573614,
+          "iat": 1420569714,
+          "id": "1337",
+          "iss": "accounts.google.com",
+          "sub": "1337",
+          "token_hash": "foo_token_hash",
+          "verified_email": true
+        },
+        "invalid": false,
+        "refresh_token": "foo_refresh_token",
+        "revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
+        "token_expiry": "2015-01-09T00:51:51Z",
+        "token_response": {
+          "access_token": "foo_access_token",
+          "expires_in": 3600,
+          "id_token": "foo_id_token",
+          "token_type": "Bearer"
+        },
+        "token_uri": "https://accounts.google.com/o/oauth2/token",
+        "user_agent": "Cloud SDK Command Line Tool"
+      },
+      "key": {
+        "account": "foo@example.com",
+        "clientId": "foo_client_id",
+        "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
+        "type": "google-cloud-sdk"
+      }
+    },
+    {
+      "credential": {
+        "_class": "OAuth2Credentials",
+        "_module": "oauth2client.client",
+        "access_token": "bar_access_token",
+        "client_id": "bar_client_id",
+        "client_secret": "bar_client_secret",
+        "id_token": {
+          "at_hash": "bar_at_hash",
+          "aud": "bar_aud",
+          "azp": "bar_azp",
+          "cid": "bar_cid",
+          "email": "bar@example.com",
+          "email_verified": true,
+          "exp": 1420573614,
+          "iat": 1420569714,
+          "id": "1337",
+          "iss": "accounts.google.com",
+          "sub": "1337",
+          "token_hash": "bar_token_hash",
+          "verified_email": true
+        },
+        "invalid": false,
+        "refresh_token": "bar_refresh_token",
+        "revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
+        "token_expiry": "2015-01-09T00:51:51Z",
+        "token_response": {
+          "access_token": "bar_access_token",
+          "expires_in": 3600,
+          "id_token": "bar_id_token",
+          "token_type": "Bearer"
+        },
+        "token_uri": "https://accounts.google.com/o/oauth2/token",
+        "user_agent": "Cloud SDK Command Line Tool"
+      },
+      "key": {
+        "account": "bar@example.com",
+        "clientId": "bar_client_id",
+        "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
+        "type": "google-cloud-sdk"
+      }
+    },
+    {
+      "credential": {
+        "_class": "ServiceAccountCredentials",
+        "_kwargs": {},
+        "_module": "oauth2client.client",
+        "_private_key_id": "00000000000000000000000000000000",
+        "_private_key_pkcs8_text": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCt3fpiynPSaUhWSIKMGV331zudwJ6GkGmvQtwsoK2S2LbvnSwU\nNxgj4fp08kIDR5p26wF4+t/HrKydMwzftXBfZ9UmLVJgRdSswmS5SmChCrfDS5OE\nvFFcN5+6w1w8/Nu657PF/dse8T0bV95YrqyoR0Osy8WHrUOMSIIbC3hRuwIDAQAB\nAoGAJrGE/KFjn0sQ7yrZ6sXmdLawrM3mObo/2uI9T60+k7SpGbBX0/Pi6nFrJMWZ\nTVONG7P3Mu5aCPzzuVRYJB0j8aldSfzABTY3HKoWCczqw1OztJiEseXGiYz4QOyr\nYU3qDyEpdhS6q6wcoLKGH+hqRmz6pcSEsc8XzOOu7s4xW8kCQQDkc75HjhbarCnd\nJJGMe3U76+6UGmdK67ltZj6k6xoB5WbTNChY9TAyI2JC+ppYV89zv3ssj4L+02u3\nHIHFGxsHAkEAwtU1qYb1tScpchPobnYUFiVKJ7KA8EZaHVaJJODW/cghTCV7BxcJ\nbgVvlmk4lFKn3lPKAgWw7PdQsBTVBUcCrQJATPwoIirizrv3u5soJUQxZIkENAqV\nxmybZx9uetrzP7JTrVbFRf0SScMcyN90hdLJiQL8+i4+gaszgFht7sNMnwJAAbfj\nq0UXcauQwALQ7/h2oONfTg5S+MuGC/AxcXPSMZbMRGGoPh3D5YaCv27aIuS/ukQ+\n6dmm/9AGlCb64fsIWQJAPaokbjIifo+LwC5gyK73Mc4t8nAOSZDenzd/2f6TCq76\nS1dcnKiPxaED7W/y6LJiuBT2rbZiQ2L93NJpFZD/UA==\n-----END RSA PRIVATE KEY-----\n",
+        "_revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
+        "_scopes": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
+        "_service_account_email": "baz@serviceaccount.example.com",
+        "_service_account_id": "baz.serviceaccount.example.com",
+        "_token_uri": "https://accounts.google.com/o/oauth2/token",
+        "_user_agent": "Cloud SDK Command Line Tool",
+        "access_token": null,
+        "assertion_type": null,
+        "client_id": null,
+        "client_secret": null,
+        "id_token": null,
+        "invalid": false,
+        "refresh_token": null,
+        "revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
+        "service_account_name": "baz@serviceaccount.example.com",
+        "token_expiry": null,
+        "token_response": null,
+        "user_agent": "Cloud SDK Command Line Tool"
+      },
+      "key": {
+        "account": "baz@serviceaccount.example.com",
+        "clientId": "baz_client_id",
+        "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
+        "type": "google-cloud-sdk"
+      }
+    }
+  ],
+  "file_version": 1
+}

+ 2 - 0
Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties

@@ -0,0 +1,2 @@
+[core]
+account = bar@example.com

+ 69 - 0
Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go

@@ -0,0 +1,69 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+	"bufio"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+)
+
+// ParseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+	block, _ := pem.Decode(key)
+	if block != nil {
+		key = block.Bytes
+	}
+	parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+	if err != nil {
+		parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+		if err != nil {
+			return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
+		}
+	}
+	parsed, ok := parsedKey.(*rsa.PrivateKey)
+	if !ok {
+		return nil, errors.New("private key is invalid")
+	}
+	return parsed, nil
+}
+
+func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
+	result := map[string]map[string]string{
+		"": map[string]string{}, // root section
+	}
+	scanner := bufio.NewScanner(ini)
+	currentSection := ""
+	for scanner.Scan() {
+		line := strings.TrimSpace(scanner.Text())
+		if strings.HasPrefix(line, ";") {
+			// comment.
+			continue
+		}
+		if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
+			currentSection = strings.TrimSpace(line[1 : len(line)-1])
+			result[currentSection] = map[string]string{}
+			continue
+		}
+		parts := strings.SplitN(line, "=", 2)
+		if len(parts) == 2 && parts[0] != "" {
+			result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
+		}
+	}
+	if err := scanner.Err(); err != nil {
+		return nil, fmt.Errorf("error scanning ini: %v", err)
+	}
+	return result, nil
+}

+ 62 - 0
Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go

@@ -0,0 +1,62 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+	"reflect"
+	"strings"
+	"testing"
+)
+
+func TestParseINI(t *testing.T) {
+	tests := []struct {
+		ini  string
+		want map[string]map[string]string
+	}{
+		{
+			`root = toor
+[foo]  
+bar = hop
+ini = nin
+`,
+			map[string]map[string]string{
+				"":    map[string]string{"root": "toor"},
+				"foo": map[string]string{"bar": "hop", "ini": "nin"},
+			},
+		},
+		{
+			`[empty]
+[section]
+empty=
+`,
+			map[string]map[string]string{
+				"":        map[string]string{},
+				"empty":   map[string]string{},
+				"section": map[string]string{"empty": ""},
+			},
+		},
+		{
+			`ignore
+[invalid
+=stuff
+;comment=true
+`,
+			map[string]map[string]string{
+				"": map[string]string{},
+			},
+		},
+	}
+	for _, tt := range tests {
+		result, err := ParseINI(strings.NewReader(tt.ini))
+		if err != nil {
+			t.Errorf("ParseINI(%q) error %v, want: no error", tt.ini, err)
+			continue
+		}
+		if !reflect.DeepEqual(result, tt.want) {
+			t.Errorf("ParseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want)
+		}
+	}
+}

+ 160 - 0
Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go

@@ -0,0 +1,160 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jws provides encoding and decoding utilities for
+// signed JWS messages.
+package jws
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/sha256"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strings"
+	"time"
+)
+
+// ClaimSet contains information about the JWT signature including the
+// permissions being requested (scopes), the target of the token, the issuer,
+// the time the token was issued, and the lifetime of the token.
+type ClaimSet struct {
+	Iss   string `json:"iss"`             // email address of the client_id of the application making the access token request
+	Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
+	Aud   string `json:"aud"`             // descriptor of the intended target of the assertion (Optional).
+	Exp   int64  `json:"exp"`             // the expiration time of the assertion
+	Iat   int64  `json:"iat"`             // the time the assertion was issued.
+	Typ   string `json:"typ,omitempty"`   // token type (Optional).
+
+	// Email for which the application is requesting delegated access (Optional).
+	Sub string `json:"sub,omitempty"`
+
+	// The old name of Sub. Client keeps setting Prn to be
+	// complaint with legacy OAuth 2.0 providers. (Optional)
+	Prn string `json:"prn,omitempty"`
+
+	// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
+	// This array is marshalled using custom code (see (c *ClaimSet) encode()).
+	PrivateClaims map[string]interface{} `json:"-"`
+
+	exp time.Time
+	iat time.Time
+}
+
+func (c *ClaimSet) encode() (string, error) {
+	if c.exp.IsZero() || c.iat.IsZero() {
+		// Reverting time back for machines whose time is not perfectly in sync.
+		// If client machine's time is in the future according
+		// to Google servers, an access token will not be issued.
+		now := time.Now().Add(-10 * time.Second)
+		c.iat = now
+		c.exp = now.Add(time.Hour)
+	}
+
+	c.Exp = c.exp.Unix()
+	c.Iat = c.iat.Unix()
+
+	b, err := json.Marshal(c)
+	if err != nil {
+		return "", err
+	}
+
+	if len(c.PrivateClaims) == 0 {
+		return base64Encode(b), nil
+	}
+
+	// Marshal private claim set and then append it to b.
+	prv, err := json.Marshal(c.PrivateClaims)
+	if err != nil {
+		return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
+	}
+
+	// Concatenate public and private claim JSON objects.
+	if !bytes.HasSuffix(b, []byte{'}'}) {
+		return "", fmt.Errorf("jws: invalid JSON %s", b)
+	}
+	if !bytes.HasPrefix(prv, []byte{'{'}) {
+		return "", fmt.Errorf("jws: invalid JSON %s", prv)
+	}
+	b[len(b)-1] = ','         // Replace closing curly brace with a comma.
+	b = append(b, prv[1:]...) // Append private claims.
+	return base64Encode(b), nil
+}
+
+// Header represents the header for the signed JWS payloads.
+type Header struct {
+	// The algorithm used for signature.
+	Algorithm string `json:"alg"`
+
+	// Represents the token type.
+	Typ string `json:"typ"`
+}
+
+func (h *Header) encode() (string, error) {
+	b, err := json.Marshal(h)
+	if err != nil {
+		return "", err
+	}
+	return base64Encode(b), nil
+}
+
+// Decode decodes a claim set from a JWS payload.
+func Decode(payload string) (*ClaimSet, error) {
+	// decode returned id token to get expiry
+	s := strings.Split(payload, ".")
+	if len(s) < 2 {
+		// TODO(jbd): Provide more context about the error.
+		return nil, errors.New("jws: invalid token received")
+	}
+	decoded, err := base64Decode(s[1])
+	if err != nil {
+		return nil, err
+	}
+	c := &ClaimSet{}
+	err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
+	return c, err
+}
+
+// Encode encodes a signed JWS with provided header and claim set.
+func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) {
+	head, err := header.encode()
+	if err != nil {
+		return "", err
+	}
+	cs, err := c.encode()
+	if err != nil {
+		return "", err
+	}
+	ss := fmt.Sprintf("%s.%s", head, cs)
+	h := sha256.New()
+	h.Write([]byte(ss))
+	b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil))
+	if err != nil {
+		return "", err
+	}
+	sig := base64Encode(b)
+	return fmt.Sprintf("%s.%s", ss, sig), nil
+}
+
+// base64Encode returns and Base64url encoded version of the input string with any
+// trailing "=" stripped.
+func base64Encode(b []byte) string {
+	return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// base64Decode decodes the Base64url encoded string
+func base64Decode(s string) ([]byte, error) {
+	// add back missing padding
+	switch len(s) % 4 {
+	case 2:
+		s += "=="
+	case 3:
+		s += "="
+	}
+	return base64.URLEncoding.DecodeString(s)
+}

+ 31 - 0
Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go

@@ -0,0 +1,31 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jwt_test
+
+import (
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2/jwt"
+)
+
+func ExampleJWTConfig() {
+	conf := &jwt.Config{
+		Email: "xxx@developer.com",
+		// The contents of your RSA private key or your PEM file
+		// that contains a private key.
+		// If you have a p12 file instead, you
+		// can use `openssl` to export the private key into a pem file.
+		//
+		//    $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+		//
+		// It only supports PEM containers with no passphrase.
+		PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."),
+		Subject:    "user@example.com",
+		TokenURL:   "https://provider.com/o/oauth2/token",
+	}
+	// Initiate an http.Client, the following GET request will be
+	// authorized and authenticated on the behalf of user@example.com.
+	client := conf.Client(oauth2.NoContext)
+	client.Get("...")
+}

+ 147 - 0
Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go

@@ -0,0 +1,147 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
+// known as "two-legged OAuth 2.0".
+//
+// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
+package jwt
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2/internal"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2/jws"
+)
+
+var (
+	defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+	defaultHeader    = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
+)
+
+// Config is the configuration for using JWT to fetch tokens,
+// commonly known as "two-legged OAuth 2.0".
+type Config struct {
+	// Email is the OAuth client identifier used when communicating with
+	// the configured OAuth provider.
+	Email string
+
+	// PrivateKey contains the contents of an RSA private key or the
+	// contents of a PEM file that contains a private key. The provided
+	// private key is used to sign JWT payloads.
+	// PEM containers with a passphrase are not supported.
+	// Use the following command to convert a PKCS 12 file into a PEM.
+	//
+	//    $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+	//
+	PrivateKey []byte
+
+	// Subject is the optional user to impersonate.
+	Subject string
+
+	// Scopes optionally specifies a list of requested permission scopes.
+	Scopes []string
+
+	// TokenURL is the endpoint required to complete the 2-legged JWT flow.
+	TokenURL string
+}
+
+// TokenSource returns a JWT TokenSource using the configuration
+// in c and the HTTP client from the provided context.
+func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
+	return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
+}
+
+// Client returns an HTTP client wrapping the context's
+// HTTP transport and adding Authorization headers with tokens
+// obtained from c.
+//
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context) *http.Client {
+	return oauth2.NewClient(ctx, c.TokenSource(ctx))
+}
+
+// jwtSource is a source that always does a signed JWT request for a token.
+// It should typically be wrapped with a reuseTokenSource.
+type jwtSource struct {
+	ctx  context.Context
+	conf *Config
+}
+
+func (js jwtSource) Token() (*oauth2.Token, error) {
+	pk, err := internal.ParseKey(js.conf.PrivateKey)
+	if err != nil {
+		return nil, err
+	}
+	hc := oauth2.NewClient(js.ctx, nil)
+	claimSet := &jws.ClaimSet{
+		Iss:   js.conf.Email,
+		Scope: strings.Join(js.conf.Scopes, " "),
+		Aud:   js.conf.TokenURL,
+	}
+	if subject := js.conf.Subject; subject != "" {
+		claimSet.Sub = subject
+		// prn is the old name of sub. Keep setting it
+		// to be compatible with legacy OAuth 2.0 providers.
+		claimSet.Prn = subject
+	}
+	payload, err := jws.Encode(defaultHeader, claimSet, pk)
+	if err != nil {
+		return nil, err
+	}
+	v := url.Values{}
+	v.Set("grant_type", defaultGrantType)
+	v.Set("assertion", payload)
+	resp, err := hc.PostForm(js.conf.TokenURL, v)
+	if err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	defer resp.Body.Close()
+	body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
+	if err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	if c := resp.StatusCode; c < 200 || c > 299 {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
+	}
+	// tokenRes is the JSON response body.
+	var tokenRes struct {
+		AccessToken string `json:"access_token"`
+		TokenType   string `json:"token_type"`
+		IDToken     string `json:"id_token"`
+		ExpiresIn   int64  `json:"expires_in"` // relative seconds from now
+	}
+	if err := json.Unmarshal(body, &tokenRes); err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	token := &oauth2.Token{
+		AccessToken: tokenRes.AccessToken,
+		TokenType:   tokenRes.TokenType,
+	}
+	raw := make(map[string]interface{})
+	json.Unmarshal(body, &raw) // no error checks for optional fields
+	token = token.WithExtra(raw)
+
+	if secs := tokenRes.ExpiresIn; secs > 0 {
+		token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+	}
+	if v := tokenRes.IDToken; v != "" {
+		// decode returned id token to get expiry
+		claimSet, err := jws.Decode(v)
+		if err != nil {
+			return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
+		}
+		token.Expiry = time.Unix(claimSet.Exp, 0)
+	}
+	return token, nil
+}

+ 134 - 0
Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go

@@ -0,0 +1,134 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jwt
+
+import (
+	"net/http"
+	"net/http/httptest"
+	"testing"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+)
+
+var dummyPrivateKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE
+DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY
+fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK
+1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr
+k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9
+/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt
+3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn
+2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3
+nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK
+6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf
+5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e
+DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1
+M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g
+z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y
+1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK
+J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U
+f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx
+QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA
+cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr
+Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw
+5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg
+KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84
+OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd
+mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ
+5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg==
+-----END RSA PRIVATE KEY-----`)
+
+func TestJWTFetch_JSONResponse(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/json")
+		w.Write([]byte(`{
+			"access_token": "90d64460d14870c08c81352a05dedd3465940a7c",
+			"scope": "user",
+			"token_type": "bearer",
+			"expires_in": 3600
+		}`))
+	}))
+	defer ts.Close()
+
+	conf := &Config{
+		Email:      "aaa@xxx.com",
+		PrivateKey: dummyPrivateKey,
+		TokenURL:   ts.URL,
+	}
+	tok, err := conf.TokenSource(oauth2.NoContext).Token()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !tok.Valid() {
+		t.Errorf("Token invalid")
+	}
+	if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+		t.Errorf("Unexpected access token, %#v", tok.AccessToken)
+	}
+	if tok.TokenType != "bearer" {
+		t.Errorf("Unexpected token type, %#v", tok.TokenType)
+	}
+	if tok.Expiry.IsZero() {
+		t.Errorf("Unexpected token expiry, %#v", tok.Expiry)
+	}
+	scope := tok.Extra("scope")
+	if scope != "user" {
+		t.Errorf("Unexpected value for scope: %v", scope)
+	}
+}
+
+func TestJWTFetch_BadResponse(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/json")
+		w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`))
+	}))
+	defer ts.Close()
+
+	conf := &Config{
+		Email:      "aaa@xxx.com",
+		PrivateKey: dummyPrivateKey,
+		TokenURL:   ts.URL,
+	}
+	tok, err := conf.TokenSource(oauth2.NoContext).Token()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if tok == nil {
+		t.Fatalf("token is nil")
+	}
+	if tok.Valid() {
+		t.Errorf("token is valid. want invalid.")
+	}
+	if tok.AccessToken != "" {
+		t.Errorf("Unexpected non-empty access token %q.", tok.AccessToken)
+	}
+	if want := "bearer"; tok.TokenType != want {
+		t.Errorf("TokenType = %q; want %q", tok.TokenType, want)
+	}
+	scope := tok.Extra("scope")
+	if want := "user"; scope != want {
+		t.Errorf("token scope = %q; want %q", scope, want)
+	}
+}
+
+func TestJWTFetch_BadResponseType(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/json")
+		w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`))
+	}))
+	defer ts.Close()
+	conf := &Config{
+		Email:      "aaa@xxx.com",
+		PrivateKey: dummyPrivateKey,
+		TokenURL:   ts.URL,
+	}
+	tok, err := conf.TokenSource(oauth2.NoContext).Token()
+	if err == nil {
+		t.Error("got a token; expected error")
+		if tok.AccessToken != "" {
+			t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+		}
+	}
+}

+ 16 - 0
Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go

@@ -0,0 +1,16 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package linkedin provides constants for using OAuth2 to access LinkedIn.
+package linkedin
+
+import (
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+)
+
+// Endpoint is LinkedIn's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+	AuthURL:  "https://www.linkedin.com/uas/oauth2/authorization",
+	TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken",
+}

+ 523 - 0
Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go

@@ -0,0 +1,523 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package oauth2 provides support for making
+// OAuth2 authorized and authenticated HTTP requests.
+// It can additionally grant authorization with Bearer JWT.
+package oauth2
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"mime"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
+)
+
+// NoContext is the default context you should supply if not using
+// your own context.Context (see https://golang.org/x/net/context).
+var NoContext = context.TODO()
+
+// Config describes a typical 3-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+type Config struct {
+	// ClientID is the application's ID.
+	ClientID string
+
+	// ClientSecret is the application's secret.
+	ClientSecret string
+
+	// Endpoint contains the resource server's token endpoint
+	// URLs. These are constants specific to each server and are
+	// often available via site-specific packages, such as
+	// google.Endpoint or github.Endpoint.
+	Endpoint Endpoint
+
+	// RedirectURL is the URL to redirect users going through
+	// the OAuth flow, after the resource owner's URLs.
+	RedirectURL string
+
+	// Scope specifies optional requested permissions.
+	Scopes []string
+}
+
+// A TokenSource is anything that can return a token.
+type TokenSource interface {
+	// Token returns a token or an error.
+	// Token must be safe for concurrent use by multiple goroutines.
+	// The returned Token must not be modified.
+	Token() (*Token, error)
+}
+
+// Endpoint contains the OAuth 2.0 provider's authorization and token
+// endpoint URLs.
+type Endpoint struct {
+	AuthURL  string
+	TokenURL string
+}
+
+var (
+	// AccessTypeOnline and AccessTypeOffline are options passed
+	// to the Options.AuthCodeURL method. They modify the
+	// "access_type" field that gets sent in the URL returned by
+	// AuthCodeURL.
+	//
+	// Online is the default if neither is specified. If your
+	// application needs to refresh access tokens when the user
+	// is not present at the browser, then use offline. This will
+	// result in your application obtaining a refresh token the
+	// first time your application exchanges an authorization
+	// code for a user.
+	AccessTypeOnline  AuthCodeOption = SetParam("access_type", "online")
+	AccessTypeOffline AuthCodeOption = SetParam("access_type", "offline")
+
+	// ApprovalForce forces the users to view the consent dialog
+	// and confirm the permissions request at the URL returned
+	// from AuthCodeURL, even if they've already done so.
+	ApprovalForce AuthCodeOption = SetParam("approval_prompt", "force")
+)
+
+// An AuthCodeOption is passed to Config.AuthCodeURL.
+type AuthCodeOption interface {
+	setValue(url.Values)
+}
+
+type setParam struct{ k, v string }
+
+func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
+
+// SetParam builds an AuthCodeOption which passes key/value parameters
+// to a provider's authorization endpoint.
+func SetParam(key, value string) AuthCodeOption {
+	return setParam{key, value}
+}
+
+// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
+// that asks for permissions for the required scopes explicitly.
+//
+// State is a token to protect the user from CSRF attacks. You must
+// always provide a non-zero string and validate that it matches the
+// the state query parameter on your redirect callback.
+// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
+//
+// Opts may include AccessTypeOnline or AccessTypeOffline, as well
+// as ApprovalForce.
+func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
+	var buf bytes.Buffer
+	buf.WriteString(c.Endpoint.AuthURL)
+	v := url.Values{
+		"response_type": {"code"},
+		"client_id":     {c.ClientID},
+		"redirect_uri":  condVal(c.RedirectURL),
+		"scope":         condVal(strings.Join(c.Scopes, " ")),
+		"state":         condVal(state),
+	}
+	for _, opt := range opts {
+		opt.setValue(v)
+	}
+	if strings.Contains(c.Endpoint.AuthURL, "?") {
+		buf.WriteByte('&')
+	} else {
+		buf.WriteByte('?')
+	}
+	buf.WriteString(v.Encode())
+	return buf.String()
+}
+
+// PasswordCredentialsToken converts a resource owner username and password
+// pair into a token.
+//
+// Per the RFC, this grant type should only be used "when there is a high
+// degree of trust between the resource owner and the client (e.g., the client
+// is part of the device operating system or a highly privileged application),
+// and when other authorization grant types are not available."
+// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
+//
+// The HTTP client to use is derived from the context.
+// If nil, http.DefaultClient is used.
+func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
+	return retrieveToken(ctx, c, url.Values{
+		"grant_type": {"password"},
+		"username":   {username},
+		"password":   {password},
+		"scope":      condVal(strings.Join(c.Scopes, " ")),
+	})
+}
+
+// Exchange converts an authorization code into a token.
+//
+// It is used after a resource provider redirects the user back
+// to the Redirect URI (the URL obtained from AuthCodeURL).
+//
+// The HTTP client to use is derived from the context.
+// If a client is not provided via the context, http.DefaultClient is used.
+//
+// The code will be in the *http.Request.FormValue("code"). Before
+// calling Exchange, be sure to validate FormValue("state").
+func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
+	return retrieveToken(ctx, c, url.Values{
+		"grant_type":   {"authorization_code"},
+		"code":         {code},
+		"redirect_uri": condVal(c.RedirectURL),
+		"scope":        condVal(strings.Join(c.Scopes, " ")),
+	})
+}
+
+// contextClientFunc is a func which tries to return an *http.Client
+// given a Context value. If it returns an error, the search stops
+// with that error.  If it returns (nil, nil), the search continues
+// down the list of registered funcs.
+type contextClientFunc func(context.Context) (*http.Client, error)
+
+var contextClientFuncs []contextClientFunc
+
+func registerContextClientFunc(fn contextClientFunc) {
+	contextClientFuncs = append(contextClientFuncs, fn)
+}
+
+func contextClient(ctx context.Context) (*http.Client, error) {
+	for _, fn := range contextClientFuncs {
+		c, err := fn(ctx)
+		if err != nil {
+			return nil, err
+		}
+		if c != nil {
+			return c, nil
+		}
+	}
+	if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
+		return hc, nil
+	}
+	return http.DefaultClient, nil
+}
+
+func contextTransport(ctx context.Context) http.RoundTripper {
+	hc, err := contextClient(ctx)
+	if err != nil {
+		// This is a rare error case (somebody using nil on App Engine),
+		// so I'd rather not everybody do an error check on this Client
+		// method. They can get the error that they're doing it wrong
+		// later, at client.Get/PostForm time.
+		return errorTransport{err}
+	}
+	return hc.Transport
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary. The underlying
+// HTTP transport will be obtained using the provided context.
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
+	return NewClient(ctx, c.TokenSource(ctx, t))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
+	tkr := &tokenRefresher{
+		ctx:  ctx,
+		conf: c,
+	}
+	if t != nil {
+		tkr.refreshToken = t.RefreshToken
+	}
+	return &reuseTokenSource{
+		t:   t,
+		new: tkr,
+	}
+}
+
+// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// HTTP requests to renew a token using a RefreshToken.
+type tokenRefresher struct {
+	ctx          context.Context // used to get HTTP requests
+	conf         *Config
+	refreshToken string
+}
+
+// WARNING: Token is not safe for concurrent access, as it
+// updates the tokenRefresher's refreshToken field.
+// Within this package, it is used by reuseTokenSource which
+// synchronizes calls to this method with its own mutex.
+func (tf *tokenRefresher) Token() (*Token, error) {
+	if tf.refreshToken == "" {
+		return nil, errors.New("oauth2: token expired and refresh token is not set")
+	}
+
+	tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
+		"grant_type":    {"refresh_token"},
+		"refresh_token": {tf.refreshToken},
+	})
+
+	if err != nil {
+		return nil, err
+	}
+	if tf.refreshToken != tk.RefreshToken {
+		tf.refreshToken = tk.RefreshToken
+	}
+	return tk, err
+}
+
+// reuseTokenSource is a TokenSource that holds a single token in memory
+// and validates its expiry before each call to retrieve it with
+// Token. If it's expired, it will be auto-refreshed using the
+// new TokenSource.
+type reuseTokenSource struct {
+	new TokenSource // called when t is expired.
+
+	mu sync.Mutex // guards t
+	t  *Token
+}
+
+// Token returns the current token if it's still valid, else will
+// refresh the current token (using r.Context for HTTP client
+// information) and return the new one.
+func (s *reuseTokenSource) Token() (*Token, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.t.Valid() {
+		return s.t, nil
+	}
+	t, err := s.new.Token()
+	if err != nil {
+		return nil, err
+	}
+	s.t = t
+	return t, nil
+}
+
+func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
+	hc, err := contextClient(ctx)
+	if err != nil {
+		return nil, err
+	}
+	v.Set("client_id", c.ClientID)
+	bustedAuth := !providerAuthHeaderWorks(c.Endpoint.TokenURL)
+	if bustedAuth && c.ClientSecret != "" {
+		v.Set("client_secret", c.ClientSecret)
+	}
+	req, err := http.NewRequest("POST", c.Endpoint.TokenURL, strings.NewReader(v.Encode()))
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+	if !bustedAuth {
+		req.SetBasicAuth(c.ClientID, c.ClientSecret)
+	}
+	r, err := hc.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer r.Body.Close()
+	body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+	if err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	if code := r.StatusCode; code < 200 || code > 299 {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
+	}
+
+	var token *Token
+	content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
+	switch content {
+	case "application/x-www-form-urlencoded", "text/plain":
+		vals, err := url.ParseQuery(string(body))
+		if err != nil {
+			return nil, err
+		}
+		token = &Token{
+			AccessToken:  vals.Get("access_token"),
+			TokenType:    vals.Get("token_type"),
+			RefreshToken: vals.Get("refresh_token"),
+			raw:          vals,
+		}
+		e := vals.Get("expires_in")
+		if e == "" {
+			// TODO(jbd): Facebook's OAuth2 implementation is broken and
+			// returns expires_in field in expires. Remove the fallback to expires,
+			// when Facebook fixes their implementation.
+			e = vals.Get("expires")
+		}
+		expires, _ := strconv.Atoi(e)
+		if expires != 0 {
+			token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+		}
+	default:
+		var tj tokenJSON
+		if err = json.Unmarshal(body, &tj); err != nil {
+			return nil, err
+		}
+		token = &Token{
+			AccessToken:  tj.AccessToken,
+			TokenType:    tj.TokenType,
+			RefreshToken: tj.RefreshToken,
+			Expiry:       tj.expiry(),
+			raw:          make(map[string]interface{}),
+		}
+		json.Unmarshal(body, &token.raw) // no error checks for optional fields
+	}
+	// Don't overwrite `RefreshToken` with an empty value
+	// if this was a token refreshing request.
+	if token.RefreshToken == "" {
+		token.RefreshToken = v.Get("refresh_token")
+	}
+	return token, nil
+}
+
+// tokenJSON is the struct representing the HTTP response from OAuth2
+// providers returning a token in JSON form.
+type tokenJSON struct {
+	AccessToken  string         `json:"access_token"`
+	TokenType    string         `json:"token_type"`
+	RefreshToken string         `json:"refresh_token"`
+	ExpiresIn    expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
+	Expires      expirationTime `json:"expires"`    // broken Facebook spelling of expires_in
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+	if v := e.ExpiresIn; v != 0 {
+		return time.Now().Add(time.Duration(v) * time.Second)
+	}
+	if v := e.Expires; v != 0 {
+		return time.Now().Add(time.Duration(v) * time.Second)
+	}
+	return
+}
+
+type expirationTime int32
+
+func (e *expirationTime) UnmarshalJSON(b []byte) error {
+	var n json.Number
+	err := json.Unmarshal(b, &n)
+	if err != nil {
+		return err
+	}
+	i, err := n.Int64()
+	if err != nil {
+		return err
+	}
+	*e = expirationTime(i)
+	return nil
+}
+
+func condVal(v string) []string {
+	if v == "" {
+		return nil
+	}
+	return []string{v}
+}
+
+var brokenAuthHeaderProviders = []string{
+	"https://accounts.google.com/",
+	"https://www.googleapis.com/",
+	"https://github.com/",
+	"https://api.instagram.com/",
+	"https://www.douban.com/",
+	"https://api.dropbox.com/",
+	"https://api.soundcloud.com/",
+	"https://www.linkedin.com/",
+	"https://api.twitch.tv/",
+	"https://oauth.vk.com/",
+	"https://api.odnoklassniki.ru/",
+	"https://connect.stripe.com/",
+	"https://api.pushbullet.com/",
+	"https://oauth.sandbox.trainingpeaks.com/",
+	"https://oauth.trainingpeaks.com/",
+	"https://www.strava.com/oauth/",
+}
+
+// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
+// implements the OAuth2 spec correctly
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+// In summary:
+// - Reddit only accepts client secret in the Authorization header
+// - Dropbox accepts either it in URL param or Auth header, but not both.
+// - Google only accepts URL param (not spec compliant?), not Auth header
+// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+func providerAuthHeaderWorks(tokenURL string) bool {
+	for _, s := range brokenAuthHeaderProviders {
+		if strings.HasPrefix(tokenURL, s) {
+			// Some sites fail to implement the OAuth2 spec fully.
+			return false
+		}
+	}
+
+	// Assume the provider implements the spec properly
+	// otherwise. We can add more exceptions as they're
+	// discovered. We will _not_ be adding configurable hooks
+	// to this package to let users select server bugs.
+	return true
+}
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient contextKey
+
+// contextKey is just an empty struct. It exists so HTTPClient can be
+// an immutable public variable with a unique type. It's immutable
+// because nobody else can create a contextKey, being unexported.
+type contextKey struct{}
+
+// NewClient creates an *http.Client from a Context and TokenSource.
+// The returned client is not valid beyond the lifetime of the context.
+//
+// As a special case, if src is nil, a non-OAuth2 client is returned
+// using the provided context. This exists to support related OAuth2
+// packages.
+func NewClient(ctx context.Context, src TokenSource) *http.Client {
+	if src == nil {
+		c, err := contextClient(ctx)
+		if err != nil {
+			return &http.Client{Transport: errorTransport{err}}
+		}
+		return c
+	}
+	return &http.Client{
+		Transport: &Transport{
+			Base:   contextTransport(ctx),
+			Source: ReuseTokenSource(nil, src),
+		},
+	}
+}
+
+// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// same token as long as it's valid, starting with t.
+// When its cached token is invalid, a new token is obtained from src.
+//
+// ReuseTokenSource is typically used to reuse tokens from a cache
+// (such as a file on disk) between runs of a program, rather than
+// obtaining new tokens unnecessarily.
+//
+// The initial token t may be nil, in which case the TokenSource is
+// wrapped in a caching version if it isn't one already. This also
+// means it's always safe to wrap ReuseTokenSource around any other
+// TokenSource without adverse effects.
+func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
+	// Don't wrap a reuseTokenSource in itself. That would work,
+	// but cause an unnecessary number of mutex operations.
+	// Just build the equivalent one.
+	if rt, ok := src.(*reuseTokenSource); ok {
+		if t == nil {
+			// Just use it directly.
+			return rt
+		}
+		src = rt.new
+	}
+	return &reuseTokenSource{
+		t:   t,
+		new: src,
+	}
+}

+ 435 - 0
Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go

@@ -0,0 +1,435 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/http/httptest"
+	"reflect"
+	"strconv"
+	"testing"
+	"time"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
+)
+
+type mockTransport struct {
+	rt func(req *http.Request) (resp *http.Response, err error)
+}
+
+func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+	return t.rt(req)
+}
+
+type mockCache struct {
+	token   *Token
+	readErr error
+}
+
+func (c *mockCache) ReadToken() (*Token, error) {
+	return c.token, c.readErr
+}
+
+func (c *mockCache) WriteToken(*Token) {
+	// do nothing
+}
+
+func newConf(url string) *Config {
+	return &Config{
+		ClientID:     "CLIENT_ID",
+		ClientSecret: "CLIENT_SECRET",
+		RedirectURL:  "REDIRECT_URL",
+		Scopes:       []string{"scope1", "scope2"},
+		Endpoint: Endpoint{
+			AuthURL:  url + "/auth",
+			TokenURL: url + "/token",
+		},
+	}
+}
+
+func TestAuthCodeURL(t *testing.T) {
+	conf := newConf("server")
+	url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce)
+	if url != "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo" {
+		t.Errorf("Auth code URL doesn't match the expected, found: %v", url)
+	}
+}
+
+func TestAuthCodeURL_CustomParam(t *testing.T) {
+	conf := newConf("server")
+	param := SetParam("foo", "bar")
+	url := conf.AuthCodeURL("baz", param)
+	if url != "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz" {
+		t.Errorf("Auth code URL doesn't match the expected, found: %v", url)
+	}
+}
+
+func TestAuthCodeURL_Optional(t *testing.T) {
+	conf := &Config{
+		ClientID: "CLIENT_ID",
+		Endpoint: Endpoint{
+			AuthURL:  "/auth-url",
+			TokenURL: "/token-url",
+		},
+	}
+	url := conf.AuthCodeURL("")
+	if url != "/auth-url?client_id=CLIENT_ID&response_type=code" {
+		t.Fatalf("Auth code URL doesn't match the expected, found: %v", url)
+	}
+}
+
+func TestExchangeRequest(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		if r.URL.String() != "/token" {
+			t.Errorf("Unexpected exchange request URL, %v is found.", r.URL)
+		}
+		headerAuth := r.Header.Get("Authorization")
+		if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
+			t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+		}
+		headerContentType := r.Header.Get("Content-Type")
+		if headerContentType != "application/x-www-form-urlencoded" {
+			t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+		}
+		body, err := ioutil.ReadAll(r.Body)
+		if err != nil {
+			t.Errorf("Failed reading request body: %s.", err)
+		}
+		if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" {
+			t.Errorf("Unexpected exchange payload, %v is found.", string(body))
+		}
+		w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+		w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
+	}))
+	defer ts.Close()
+	conf := newConf(ts.URL)
+	tok, err := conf.Exchange(NoContext, "exchange-code")
+	if err != nil {
+		t.Error(err)
+	}
+	if !tok.Valid() {
+		t.Fatalf("Token invalid. Got: %#v", tok)
+	}
+	if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+		t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+	}
+	if tok.TokenType != "bearer" {
+		t.Errorf("Unexpected token type, %#v.", tok.TokenType)
+	}
+	scope := tok.Extra("scope")
+	if scope != "user" {
+		t.Errorf("Unexpected value for scope: %v", scope)
+	}
+}
+
+func TestExchangeRequest_JSONResponse(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		if r.URL.String() != "/token" {
+			t.Errorf("Unexpected exchange request URL, %v is found.", r.URL)
+		}
+		headerAuth := r.Header.Get("Authorization")
+		if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
+			t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+		}
+		headerContentType := r.Header.Get("Content-Type")
+		if headerContentType != "application/x-www-form-urlencoded" {
+			t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+		}
+		body, err := ioutil.ReadAll(r.Body)
+		if err != nil {
+			t.Errorf("Failed reading request body: %s.", err)
+		}
+		if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" {
+			t.Errorf("Unexpected exchange payload, %v is found.", string(body))
+		}
+		w.Header().Set("Content-Type", "application/json")
+		w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`))
+	}))
+	defer ts.Close()
+	conf := newConf(ts.URL)
+	tok, err := conf.Exchange(NoContext, "exchange-code")
+	if err != nil {
+		t.Error(err)
+	}
+	if !tok.Valid() {
+		t.Fatalf("Token invalid. Got: %#v", tok)
+	}
+	if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+		t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+	}
+	if tok.TokenType != "bearer" {
+		t.Errorf("Unexpected token type, %#v.", tok.TokenType)
+	}
+	scope := tok.Extra("scope")
+	if scope != "user" {
+		t.Errorf("Unexpected value for scope: %v", scope)
+	}
+}
+
+const day = 24 * time.Hour
+
+func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) {
+	seconds := int32(day.Seconds())
+	jsonNumberType := reflect.TypeOf(json.Number("0"))
+	for _, c := range []struct {
+		expires string
+		expect  error
+	}{
+		{fmt.Sprintf(`"expires_in": %d`, seconds), nil},
+		{fmt.Sprintf(`"expires_in": "%d"`, seconds), nil},                             // PayPal case
+		{fmt.Sprintf(`"expires": %d`, seconds), nil},                                  // Facebook case
+		{`"expires": false`, &json.UnmarshalTypeError{"bool", jsonNumberType}},        // wrong type
+		{`"expires": {}`, &json.UnmarshalTypeError{"object", jsonNumberType}},         // wrong type
+		{`"expires": "zzz"`, &strconv.NumError{"ParseInt", "zzz", strconv.ErrSyntax}}, // wrong value
+	} {
+		testExchangeRequest_JSONResponse_expiry(t, c.expires, c.expect)
+	}
+}
+
+func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, expect error) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/json")
+		w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp)))
+	}))
+	defer ts.Close()
+	conf := newConf(ts.URL)
+	t1 := time.Now().Add(day)
+	tok, err := conf.Exchange(NoContext, "exchange-code")
+	t2 := time.Now().Add(day)
+	if err == nil && expect != nil {
+		t.Errorf("Incorrect state, conf.Exchange() should return an error: %v", expect)
+	} else if err != nil {
+		if reflect.DeepEqual(err, expect) {
+			t.Logf("Expected error: %v", err)
+			return
+		} else {
+			t.Error(err)
+		}
+
+	}
+	if !tok.Valid() {
+		t.Fatalf("Token invalid. Got: %#v", tok)
+	}
+	expiry := tok.Expiry
+	if expiry.Before(t1) || expiry.After(t2) {
+		t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2)
+	}
+}
+
+func TestExchangeRequest_BadResponse(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/json")
+		w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`))
+	}))
+	defer ts.Close()
+	conf := newConf(ts.URL)
+	tok, err := conf.Exchange(NoContext, "code")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if tok.AccessToken != "" {
+		t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+	}
+}
+
+func TestExchangeRequest_BadResponseType(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/json")
+		w.Write([]byte(`{"access_token":123,  "scope": "user", "token_type": "bearer"}`))
+	}))
+	defer ts.Close()
+	conf := newConf(ts.URL)
+	_, err := conf.Exchange(NoContext, "exchange-code")
+	if err == nil {
+		t.Error("expected error from invalid access_token type")
+	}
+}
+
+func TestExchangeRequest_NonBasicAuth(t *testing.T) {
+	tr := &mockTransport{
+		rt: func(r *http.Request) (w *http.Response, err error) {
+			headerAuth := r.Header.Get("Authorization")
+			if headerAuth != "" {
+				t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+			}
+			return nil, errors.New("no response")
+		},
+	}
+	c := &http.Client{Transport: tr}
+	conf := &Config{
+		ClientID: "CLIENT_ID",
+		Endpoint: Endpoint{
+			AuthURL:  "https://accounts.google.com/auth",
+			TokenURL: "https://accounts.google.com/token",
+		},
+	}
+
+	ctx := context.WithValue(context.Background(), HTTPClient, c)
+	conf.Exchange(ctx, "code")
+}
+
+func TestPasswordCredentialsTokenRequest(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		defer r.Body.Close()
+		expected := "/token"
+		if r.URL.String() != expected {
+			t.Errorf("URL = %q; want %q", r.URL, expected)
+		}
+		headerAuth := r.Header.Get("Authorization")
+		expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ="
+		if headerAuth != expected {
+			t.Errorf("Authorization header = %q; want %q", headerAuth, expected)
+		}
+		headerContentType := r.Header.Get("Content-Type")
+		expected = "application/x-www-form-urlencoded"
+		if headerContentType != expected {
+			t.Errorf("Content-Type header = %q; want %q", headerContentType, expected)
+		}
+		body, err := ioutil.ReadAll(r.Body)
+		if err != nil {
+			t.Errorf("Failed reading request body: %s.", err)
+		}
+		expected = "client_id=CLIENT_ID&grant_type=password&password=password1&scope=scope1+scope2&username=user1"
+		if string(body) != expected {
+			t.Errorf("res.Body = %q; want %q", string(body), expected)
+		}
+		w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+		w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
+	}))
+	defer ts.Close()
+	conf := newConf(ts.URL)
+	tok, err := conf.PasswordCredentialsToken(NoContext, "user1", "password1")
+	if err != nil {
+		t.Error(err)
+	}
+	if !tok.Valid() {
+		t.Fatalf("Token invalid. Got: %#v", tok)
+	}
+	expected := "90d64460d14870c08c81352a05dedd3465940a7c"
+	if tok.AccessToken != expected {
+		t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected)
+	}
+	expected = "bearer"
+	if tok.TokenType != expected {
+		t.Errorf("TokenType = %q; want %q", tok.TokenType, expected)
+	}
+}
+
+func TestTokenRefreshRequest(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		if r.URL.String() == "/somethingelse" {
+			return
+		}
+		if r.URL.String() != "/token" {
+			t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
+		}
+		headerContentType := r.Header.Get("Content-Type")
+		if headerContentType != "application/x-www-form-urlencoded" {
+			t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+		}
+		body, _ := ioutil.ReadAll(r.Body)
+		if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" {
+			t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
+		}
+	}))
+	defer ts.Close()
+	conf := newConf(ts.URL)
+	c := conf.Client(NoContext, &Token{RefreshToken: "REFRESH_TOKEN"})
+	c.Get(ts.URL + "/somethingelse")
+}
+
+func TestFetchWithNoRefreshToken(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		if r.URL.String() == "/somethingelse" {
+			return
+		}
+		if r.URL.String() != "/token" {
+			t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
+		}
+		headerContentType := r.Header.Get("Content-Type")
+		if headerContentType != "application/x-www-form-urlencoded" {
+			t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+		}
+		body, _ := ioutil.ReadAll(r.Body)
+		if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" {
+			t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
+		}
+	}))
+	defer ts.Close()
+	conf := newConf(ts.URL)
+	c := conf.Client(NoContext, nil)
+	_, err := c.Get(ts.URL + "/somethingelse")
+	if err == nil {
+		t.Errorf("Fetch should return an error if no refresh token is set")
+	}
+}
+
+func TestRefreshToken_RefreshTokenReplacement(t *testing.T) {
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/json")
+		w.Write([]byte(`{"access_token":"ACCESS TOKEN",  "scope": "user", "token_type": "bearer", "refresh_token": "NEW REFRESH TOKEN"}`))
+		return
+	}))
+	defer ts.Close()
+	conf := newConf(ts.URL)
+	tkr := tokenRefresher{
+		conf:         conf,
+		ctx:          NoContext,
+		refreshToken: "OLD REFRESH TOKEN",
+	}
+	tk, err := tkr.Token()
+	if err != nil {
+		t.Errorf("Unexpected refreshToken error returned: %v", err)
+		return
+	}
+	if tk.RefreshToken != tkr.refreshToken {
+		t.Errorf("tokenRefresher.refresh_token = %s; want %s", tkr.refreshToken, tk.RefreshToken)
+	}
+}
+
+func TestConfigClientWithToken(t *testing.T) {
+	tok := &Token{
+		AccessToken: "abc123",
+	}
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want {
+			t.Errorf("Authorization header = %q; want %q", got, want)
+		}
+		return
+	}))
+	defer ts.Close()
+	conf := newConf(ts.URL)
+
+	c := conf.Client(NoContext, tok)
+	req, err := http.NewRequest("GET", ts.URL, nil)
+	if err != nil {
+		t.Error(err)
+	}
+	_, err = c.Do(req)
+	if err != nil {
+		t.Error(err)
+	}
+}
+
+func Test_providerAuthHeaderWorks(t *testing.T) {
+	for _, p := range brokenAuthHeaderProviders {
+		if providerAuthHeaderWorks(p) {
+			t.Errorf("URL: %s not found in list", p)
+		}
+		p := fmt.Sprintf("%ssomesuffix", p)
+		if providerAuthHeaderWorks(p) {
+			t.Errorf("URL: %s not found in list", p)
+		}
+	}
+	p := "https://api.not-in-the-list-example.com/"
+	if !providerAuthHeaderWorks(p) {
+		t.Errorf("URL: %s found in list", p)
+	}
+
+}

+ 16 - 0
Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go

@@ -0,0 +1,16 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki.
+package odnoklassniki
+
+import (
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+)
+
+// Endpoint is Odnoklassniki's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+	AuthURL:  "https://www.odnoklassniki.ru/oauth/authorize",
+	TokenURL: "https://api.odnoklassniki.ru/oauth/token.do",
+}

+ 22 - 0
Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go

@@ -0,0 +1,22 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package paypal provides constants for using OAuth2 to access PayPal.
+package paypal
+
+import (
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+)
+
+// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment.
+var Endpoint = oauth2.Endpoint{
+	AuthURL:  "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
+	TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice",
+}
+
+// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment.
+var SandboxEndpoint = oauth2.Endpoint{
+	AuthURL:  "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
+	TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice",
+}

+ 104 - 0
Godeps/_workspace/src/golang.org/x/oauth2/token.go

@@ -0,0 +1,104 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+	"net/http"
+	"net/url"
+	"time"
+)
+
+// expiryDelta determines how earlier a token should be considered
+// expired than its actual expiration time. It is used to avoid late
+// expirations due to client-server time mismatches.
+const expiryDelta = 10 * time.Second
+
+// Token represents the crendentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// Most users of this package should not access fields of Token
+// directly. They're exported mostly for use by related packages
+// implementing derivative OAuth2 flows.
+type Token struct {
+	// AccessToken is the token that authorizes and authenticates
+	// the requests.
+	AccessToken string `json:"access_token"`
+
+	// TokenType is the type of token.
+	// The Type method returns either this or "Bearer", the default.
+	TokenType string `json:"token_type,omitempty"`
+
+	// RefreshToken is a token that's used by the application
+	// (as opposed to the user) to refresh the access token
+	// if it expires.
+	RefreshToken string `json:"refresh_token,omitempty"`
+
+	// Expiry is the optional expiration time of the access token.
+	//
+	// If zero, TokenSource implementations will reuse the same
+	// token forever and RefreshToken or equivalent
+	// mechanisms for that TokenSource will not be used.
+	Expiry time.Time `json:"expiry,omitempty"`
+
+	// raw optionally contains extra metadata from the server
+	// when updating a token.
+	raw interface{}
+}
+
+// Type returns t.TokenType if non-empty, else "Bearer".
+func (t *Token) Type() string {
+	if t.TokenType != "" {
+		return t.TokenType
+	}
+	return "Bearer"
+}
+
+// SetAuthHeader sets the Authorization header to r using the access
+// token in t.
+//
+// This method is unnecessary when using Transport or an HTTP Client
+// returned by this package.
+func (t *Token) SetAuthHeader(r *http.Request) {
+	r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
+}
+
+// WithExtra returns a new Token that's a clone of t, but using the
+// provided raw extra map. This is only intended for use by packages
+// implementing derivative OAuth2 flows.
+func (t *Token) WithExtra(extra interface{}) *Token {
+	t2 := new(Token)
+	*t2 = *t
+	t2.raw = extra
+	return t2
+}
+
+// Extra returns an extra field.
+// Extra fields are key-value pairs returned by the server as a
+// part of the token retrieval response.
+func (t *Token) Extra(key string) interface{} {
+	if vals, ok := t.raw.(url.Values); ok {
+		// TODO(jbd): Cast numeric values to int64 or float64.
+		return vals.Get(key)
+	}
+	if raw, ok := t.raw.(map[string]interface{}); ok {
+		return raw[key]
+	}
+	return nil
+}
+
+// expired reports whether the token is expired.
+// t must be non-nil.
+func (t *Token) expired() bool {
+	if t.Expiry.IsZero() {
+		return false
+	}
+	return t.Expiry.Add(-expiryDelta).Before(time.Now())
+}
+
+// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
+func (t *Token) Valid() bool {
+	return t != nil && t.AccessToken != "" && !t.expired()
+}

+ 50 - 0
Godeps/_workspace/src/golang.org/x/oauth2/token_test.go

@@ -0,0 +1,50 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+	"testing"
+	"time"
+)
+
+func TestTokenExtra(t *testing.T) {
+	type testCase struct {
+		key  string
+		val  interface{}
+		want interface{}
+	}
+	const key = "extra-key"
+	cases := []testCase{
+		{key: key, val: "abc", want: "abc"},
+		{key: key, val: 123, want: 123},
+		{key: key, val: "", want: ""},
+		{key: "other-key", val: "def", want: nil},
+	}
+	for _, tc := range cases {
+		extra := make(map[string]interface{})
+		extra[tc.key] = tc.val
+		tok := &Token{raw: extra}
+		if got, want := tok.Extra(key), tc.want; got != want {
+			t.Errorf("Extra(%q) = %q; want %q", key, got, want)
+		}
+	}
+}
+
+func TestTokenExpiry(t *testing.T) {
+	now := time.Now()
+	cases := []struct {
+		name string
+		tok  *Token
+		want bool
+	}{
+		{name: "12 seconds", tok: &Token{Expiry: now.Add(12 * time.Second)}, want: false},
+		{name: "10 seconds", tok: &Token{Expiry: now.Add(expiryDelta)}, want: true},
+	}
+	for _, tc := range cases {
+		if got, want := tc.tok.expired(), tc.want; got != want {
+			t.Errorf("expired (%q) = %v; want %v", tc.name, got, want)
+		}
+	}
+}

+ 138 - 0
Godeps/_workspace/src/golang.org/x/oauth2/transport.go

@@ -0,0 +1,138 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+	"errors"
+	"io"
+	"net/http"
+	"sync"
+)
+
+// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
+// wrapping a base RoundTripper and adding an Authorization header
+// with a token from the supplied Sources.
+//
+// Transport is a low-level mechanism. Most code will use the
+// higher-level Config.Client method instead.
+type Transport struct {
+	// Source supplies the token to add to outgoing requests'
+	// Authorization headers.
+	Source TokenSource
+
+	// Base is the base RoundTripper used to make HTTP requests.
+	// If nil, http.DefaultTransport is used.
+	Base http.RoundTripper
+
+	mu     sync.Mutex                      // guards modReq
+	modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+	if t.Source == nil {
+		return nil, errors.New("oauth2: Transport's Source is nil")
+	}
+	token, err := t.Source.Token()
+	if err != nil {
+		return nil, err
+	}
+
+	req2 := cloneRequest(req) // per RoundTripper contract
+	token.SetAuthHeader(req2)
+	t.setModReq(req, req2)
+	res, err := t.base().RoundTrip(req2)
+	if err != nil {
+		t.setModReq(req, nil)
+		return nil, err
+	}
+	res.Body = &onEOFReader{
+		rc: res.Body,
+		fn: func() { t.setModReq(req, nil) },
+	}
+	return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *Transport) CancelRequest(req *http.Request) {
+	type canceler interface {
+		CancelRequest(*http.Request)
+	}
+	if cr, ok := t.base().(canceler); ok {
+		t.mu.Lock()
+		modReq := t.modReq[req]
+		delete(t.modReq, req)
+		t.mu.Unlock()
+		cr.CancelRequest(modReq)
+	}
+}
+
+func (t *Transport) base() http.RoundTripper {
+	if t.Base != nil {
+		return t.Base
+	}
+	return http.DefaultTransport
+}
+
+func (t *Transport) setModReq(orig, mod *http.Request) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.modReq == nil {
+		t.modReq = make(map[*http.Request]*http.Request)
+	}
+	if mod == nil {
+		delete(t.modReq, orig)
+	} else {
+		t.modReq[orig] = mod
+	}
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header, len(r.Header))
+	for k, s := range r.Header {
+		r2.Header[k] = append([]string(nil), s...)
+	}
+	return r2
+}
+
+type onEOFReader struct {
+	rc io.ReadCloser
+	fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+	n, err = r.rc.Read(p)
+	if err == io.EOF {
+		r.runFunc()
+	}
+	return
+}
+
+func (r *onEOFReader) Close() error {
+	err := r.rc.Close()
+	r.runFunc()
+	return err
+}
+
+func (r *onEOFReader) runFunc() {
+	if fn := r.fn; fn != nil {
+		fn()
+		r.fn = nil
+	}
+}
+
+type errorTransport struct{ err error }
+
+func (t errorTransport) RoundTrip(*http.Request) (*http.Response, error) {
+	return nil, t.err
+}

+ 53 - 0
Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go

@@ -0,0 +1,53 @@
+package oauth2
+
+import (
+	"net/http"
+	"net/http/httptest"
+	"testing"
+	"time"
+)
+
+type tokenSource struct{ token *Token }
+
+func (t *tokenSource) Token() (*Token, error) {
+	return t.token, nil
+}
+
+func TestTransportTokenSource(t *testing.T) {
+	ts := &tokenSource{
+		token: &Token{
+			AccessToken: "abc",
+		},
+	}
+	tr := &Transport{
+		Source: ts,
+	}
+	server := newMockServer(func(w http.ResponseWriter, r *http.Request) {
+		if r.Header.Get("Authorization") != "Bearer abc" {
+			t.Errorf("Transport doesn't set the Authorization header from the fetched token")
+		}
+	})
+	defer server.Close()
+	client := http.Client{Transport: tr}
+	client.Get(server.URL)
+}
+
+func TestTokenValidNoAccessToken(t *testing.T) {
+	token := &Token{}
+	if token.Valid() {
+		t.Errorf("Token should not be valid with no access token")
+	}
+}
+
+func TestExpiredWithExpiry(t *testing.T) {
+	token := &Token{
+		Expiry: time.Now().Add(-5 * time.Hour),
+	}
+	if token.Valid() {
+		t.Errorf("Token should not be valid if it expired in the past")
+	}
+}
+
+func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server {
+	return httptest.NewServer(http.HandlerFunc(handler))
+}

+ 16 - 0
Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go

@@ -0,0 +1,16 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vk provides constants for using OAuth2 to access VK.com.
+package vk
+
+import (
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+)
+
+// Endpoint is VK's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+	AuthURL:  "https://oauth.vk.com/authorize",
+	TokenURL: "https://oauth.vk.com/access_token",
+}

+ 37 - 0
Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/go13.go

@@ -0,0 +1,37 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.3
+
+package metadata
+
+import (
+	"net"
+	"time"
+)
+
+// This is a workaround for https://github.com/golang/oauth2/issues/70, where
+// net.Dialer.KeepAlive is unavailable on Go 1.2 (which App Engine as of
+// Jan 2015 still runs).
+//
+// TODO(bradfitz,jbd,adg): remove this once App Engine supports Go
+// 1.3+.
+func init() {
+	go13Dialer = func() *net.Dialer {
+		return &net.Dialer{
+			Timeout:   750 * time.Millisecond,
+			KeepAlive: 30 * time.Second,
+		}
+	}
+}

+ 267 - 0
Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go

@@ -0,0 +1,267 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metadata provides access to Google Compute Engine (GCE)
+// metadata and API service accounts.
+//
+// This package is a wrapper around the GCE metadata service,
+// as documented at https://developers.google.com/compute/docs/metadata.
+package metadata
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/google.golang.org/cloud/internal"
+)
+
+type cachedValue struct {
+	k    string
+	trim bool
+	mu   sync.Mutex
+	v    string
+}
+
+var (
+	projID  = &cachedValue{k: "project/project-id", trim: true}
+	projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
+	instID  = &cachedValue{k: "instance/id", trim: true}
+)
+
+var metaClient = &http.Client{
+	Transport: &internal.Transport{
+		Base: &http.Transport{
+			Dial: dialer().Dial,
+			ResponseHeaderTimeout: 750 * time.Millisecond,
+		},
+	},
+}
+
+// go13Dialer is nil until we're using Go 1.3+.
+// This is a workaround for https://github.com/golang/oauth2/issues/70, where
+// net.Dialer.KeepAlive is unavailable on Go 1.2 (which App Engine as of
+// Jan 2015 still runs).
+//
+// TODO(bradfitz,jbd,adg,dsymonds): remove this once App Engine supports Go
+// 1.3+ and go-app-builder also supports 1.3+, or when Go 1.2 is no longer an
+// option on App Engine.
+var go13Dialer func() *net.Dialer
+
+func dialer() *net.Dialer {
+	if fn := go13Dialer; fn != nil {
+		return fn()
+	}
+	return &net.Dialer{
+		Timeout: 750 * time.Millisecond,
+	}
+}
+
+// NotDefinedError is returned when requested metadata is not defined.
+//
+// The underlying string is the suffix after "/computeMetadata/v1/".
+//
+// This error is not returned if the value is defined to be the empty
+// string.
+type NotDefinedError string
+
+func (suffix NotDefinedError) Error() string {
+	return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
+}
+
+// Get returns a value from the metadata service.
+// The suffix is appended to "http://metadata/computeMetadata/v1/".
+//
+// If the requested metadata is not defined, the returned error will
+// be of type NotDefinedError.
+func Get(suffix string) (string, error) {
+	// Using 169.254.169.254 instead of "metadata" here because Go
+	// binaries built with the "netgo" tag and without cgo won't
+	// know the search suffix for "metadata" is
+	// ".google.internal", and this IP address is documented as
+	// being stable anyway.
+	url := "http://169.254.169.254/computeMetadata/v1/" + suffix
+	req, _ := http.NewRequest("GET", url, nil)
+	req.Header.Set("Metadata-Flavor", "Google")
+	res, err := metaClient.Do(req)
+	if err != nil {
+		return "", err
+	}
+	defer res.Body.Close()
+	if res.StatusCode == http.StatusNotFound {
+		return "", NotDefinedError(suffix)
+	}
+	if res.StatusCode != 200 {
+		return "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
+	}
+	all, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return "", err
+	}
+	return string(all), nil
+}
+
+func getTrimmed(suffix string) (s string, err error) {
+	s, err = Get(suffix)
+	s = strings.TrimSpace(s)
+	return
+}
+
+func (c *cachedValue) get() (v string, err error) {
+	defer c.mu.Unlock()
+	c.mu.Lock()
+	if c.v != "" {
+		return c.v, nil
+	}
+	if c.trim {
+		v, err = getTrimmed(c.k)
+	} else {
+		v, err = Get(c.k)
+	}
+	if err == nil {
+		c.v = v
+	}
+	return
+}
+
+var onGCE struct {
+	sync.Mutex
+	set bool
+	v   bool
+}
+
+// OnGCE reports whether this process is running on Google Compute Engine.
+func OnGCE() bool {
+	defer onGCE.Unlock()
+	onGCE.Lock()
+	if onGCE.set {
+		return onGCE.v
+	}
+	onGCE.set = true
+
+	// We use the DNS name of the metadata service here instead of the IP address
+	// because we expect that to fail faster in the not-on-GCE case.
+	res, err := metaClient.Get("http://metadata.google.internal")
+	if err != nil {
+		return false
+	}
+	onGCE.v = res.Header.Get("Metadata-Flavor") == "Google"
+	return onGCE.v
+}
+
+// ProjectID returns the current instance's project ID string.
+func ProjectID() (string, error) { return projID.get() }
+
+// NumericProjectID returns the current instance's numeric project ID.
+func NumericProjectID() (string, error) { return projNum.get() }
+
+// InternalIP returns the instance's primary internal IP address.
+func InternalIP() (string, error) {
+	return getTrimmed("instance/network-interfaces/0/ip")
+}
+
+// ExternalIP returns the instance's primary external (public) IP address.
+func ExternalIP() (string, error) {
+	return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
+}
+
+// Hostname returns the instance's hostname. This will probably be of
+// the form "INSTANCENAME.c.PROJECT.internal" but that isn't
+// guaranteed.
+//
+// TODO: what is this defined to be? Docs say "The host name of the
+// instance."
+func Hostname() (string, error) {
+	return getTrimmed("network-interfaces/0/ip")
+}
+
+// InstanceTags returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func InstanceTags() ([]string, error) {
+	var s []string
+	j, err := Get("instance/tags")
+	if err != nil {
+		return nil, err
+	}
+	if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
+		return nil, err
+	}
+	return s, nil
+}
+
+// InstanceID returns the current VM's numeric instance ID.
+func InstanceID() (string, error) {
+	return instID.get()
+}
+
+// InstanceAttributes returns the list of user-defined attributes,
+// assigned when initially creating a GCE VM instance. The value of an
+// attribute can be obtained with InstanceAttributeValue.
+func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
+
+// ProjectAttributes returns the list of user-defined attributes
+// applying to the project as a whole, not just this VM.  The value of
+// an attribute can be obtained with ProjectAttributeValue.
+func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
+
+func lines(suffix string) ([]string, error) {
+	j, err := Get(suffix)
+	if err != nil {
+		return nil, err
+	}
+	s := strings.Split(strings.TrimSpace(j), "\n")
+	for i := range s {
+		s[i] = strings.TrimSpace(s[i])
+	}
+	return s, nil
+}
+
+// InstanceAttributeValue returns the value of the provided VM
+// instance attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// InstanceAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func InstanceAttributeValue(attr string) (string, error) {
+	return Get("instance/attributes/" + attr)
+}
+
+// ProjectAttributeValue returns the value of the provided
+// project attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// ProjectAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func ProjectAttributeValue(attr string) (string, error) {
+	return Get("project/attributes/" + attr)
+}
+
+// Scopes returns the service account scopes for the given account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func Scopes(serviceAccount string) ([]string, error) {
+	if serviceAccount == "" {
+		serviceAccount = "default"
+	}
+	return lines("instance/service-accounts/" + serviceAccount + "/scopes")
+}

+ 128 - 0
Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go

@@ -0,0 +1,128 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package internal provides support for the cloud packages.
+//
+// Users should not import this package directly.
+package internal
+
+import (
+	"fmt"
+	"net/http"
+	"sync"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
+)
+
+type contextKey struct{}
+
+func WithContext(parent context.Context, projID string, c *http.Client) context.Context {
+	if c == nil {
+		panic("nil *http.Client passed to WithContext")
+	}
+	if projID == "" {
+		panic("empty project ID passed to WithContext")
+	}
+	return context.WithValue(parent, contextKey{}, &cloudContext{
+		ProjectID:  projID,
+		HTTPClient: c,
+	})
+}
+
+const userAgent = "gcloud-golang/0.1"
+
+type cloudContext struct {
+	ProjectID  string
+	HTTPClient *http.Client
+
+	mu  sync.Mutex             // guards svc
+	svc map[string]interface{} // e.g. "storage" => *rawStorage.Service
+}
+
+// Service returns the result of the fill function if it's never been
+// called before for the given name (which is assumed to be an API
+// service name, like "datastore"). If it has already been cached, the fill
+// func is not run.
+// It's safe for concurrent use by multiple goroutines.
+func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} {
+	return cc(ctx).service(name, fill)
+}
+
+func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.svc == nil {
+		c.svc = make(map[string]interface{})
+	} else if v, ok := c.svc[name]; ok {
+		return v
+	}
+	v := fill(c.HTTPClient)
+	c.svc[name] = v
+	return v
+}
+
+// Transport is an http.RoundTripper that appends
+// Google Cloud client's user-agent to the original
+// request's user-agent header.
+type Transport struct {
+	// Base represents the actual http.RoundTripper
+	// the requests will be delegated to.
+	Base http.RoundTripper
+}
+
+// RoundTrip appends a user-agent to the existing user-agent
+// header and delegates the request to the base http.RoundTripper.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+	req = cloneRequest(req)
+	ua := req.Header.Get("User-Agent")
+	if ua == "" {
+		ua = userAgent
+	} else {
+		ua = fmt.Sprintf("%s %s", ua, userAgent)
+	}
+	req.Header.Set("User-Agent", ua)
+	return t.Base.RoundTrip(req)
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header)
+	for k, s := range r.Header {
+		r2.Header[k] = s
+	}
+	return r2
+}
+
+func ProjID(ctx context.Context) string {
+	return cc(ctx).ProjectID
+}
+
+func HTTPClient(ctx context.Context) *http.Client {
+	return cc(ctx).HTTPClient
+}
+
+// cc returns the internal *cloudContext (cc) state for a context.Context.
+// It panics if the user did it wrong.
+func cc(ctx context.Context) *cloudContext {
+	if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok {
+		return c
+	}
+	panic("invalid context.Context type; it should be created with cloud.NewContext")
+}

+ 1633 - 0
Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go

@@ -0,0 +1,1633 @@
+// Code generated by protoc-gen-go.
+// source: datastore_v1.proto
+// DO NOT EDIT!
+
+/*
+Package datastore is a generated protocol buffer package.
+
+It is generated from these files:
+	datastore_v1.proto
+
+It has these top-level messages:
+	PartitionId
+	Key
+	Value
+	Property
+	Entity
+	EntityResult
+	Query
+	KindExpression
+	PropertyReference
+	PropertyExpression
+	PropertyOrder
+	Filter
+	CompositeFilter
+	PropertyFilter
+	GqlQuery
+	GqlQueryArg
+	QueryResultBatch
+	Mutation
+	MutationResult
+	ReadOptions
+	LookupRequest
+	LookupResponse
+	RunQueryRequest
+	RunQueryResponse
+	BeginTransactionRequest
+	BeginTransactionResponse
+	RollbackRequest
+	RollbackResponse
+	CommitRequest
+	CommitResponse
+	AllocateIdsRequest
+	AllocateIdsResponse
+*/
+package datastore
+
+import proto "github.com/coreos/etcd/Godeps/_workspace/src/github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+// Specifies what data the 'entity' field contains.
+// A ResultType is either implied (for example, in LookupResponse.found it
+// is always FULL) or specified by context (for example, in message
+// QueryResultBatch, field 'entity_result_type' specifies a ResultType
+// for all the values in field 'entity_result').
+type EntityResult_ResultType int32
+
+const (
+	EntityResult_FULL       EntityResult_ResultType = 1
+	EntityResult_PROJECTION EntityResult_ResultType = 2
+	// The entity may have no key.
+	// A property value may have meaning 18.
+	EntityResult_KEY_ONLY EntityResult_ResultType = 3
+)
+
+var EntityResult_ResultType_name = map[int32]string{
+	1: "FULL",
+	2: "PROJECTION",
+	3: "KEY_ONLY",
+}
+var EntityResult_ResultType_value = map[string]int32{
+	"FULL":       1,
+	"PROJECTION": 2,
+	"KEY_ONLY":   3,
+}
+
+func (x EntityResult_ResultType) Enum() *EntityResult_ResultType {
+	p := new(EntityResult_ResultType)
+	*p = x
+	return p
+}
+func (x EntityResult_ResultType) String() string {
+	return proto.EnumName(EntityResult_ResultType_name, int32(x))
+}
+func (x *EntityResult_ResultType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(EntityResult_ResultType_value, data, "EntityResult_ResultType")
+	if err != nil {
+		return err
+	}
+	*x = EntityResult_ResultType(value)
+	return nil
+}
+
+type PropertyExpression_AggregationFunction int32
+
+const (
+	PropertyExpression_FIRST PropertyExpression_AggregationFunction = 1
+)
+
+var PropertyExpression_AggregationFunction_name = map[int32]string{
+	1: "FIRST",
+}
+var PropertyExpression_AggregationFunction_value = map[string]int32{
+	"FIRST": 1,
+}
+
+func (x PropertyExpression_AggregationFunction) Enum() *PropertyExpression_AggregationFunction {
+	p := new(PropertyExpression_AggregationFunction)
+	*p = x
+	return p
+}
+func (x PropertyExpression_AggregationFunction) String() string {
+	return proto.EnumName(PropertyExpression_AggregationFunction_name, int32(x))
+}
+func (x *PropertyExpression_AggregationFunction) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(PropertyExpression_AggregationFunction_value, data, "PropertyExpression_AggregationFunction")
+	if err != nil {
+		return err
+	}
+	*x = PropertyExpression_AggregationFunction(value)
+	return nil
+}
+
+type PropertyOrder_Direction int32
+
+const (
+	PropertyOrder_ASCENDING  PropertyOrder_Direction = 1
+	PropertyOrder_DESCENDING PropertyOrder_Direction = 2
+)
+
+var PropertyOrder_Direction_name = map[int32]string{
+	1: "ASCENDING",
+	2: "DESCENDING",
+}
+var PropertyOrder_Direction_value = map[string]int32{
+	"ASCENDING":  1,
+	"DESCENDING": 2,
+}
+
+func (x PropertyOrder_Direction) Enum() *PropertyOrder_Direction {
+	p := new(PropertyOrder_Direction)
+	*p = x
+	return p
+}
+func (x PropertyOrder_Direction) String() string {
+	return proto.EnumName(PropertyOrder_Direction_name, int32(x))
+}
+func (x *PropertyOrder_Direction) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(PropertyOrder_Direction_value, data, "PropertyOrder_Direction")
+	if err != nil {
+		return err
+	}
+	*x = PropertyOrder_Direction(value)
+	return nil
+}
+
+type CompositeFilter_Operator int32
+
+const (
+	CompositeFilter_AND CompositeFilter_Operator = 1
+)
+
+var CompositeFilter_Operator_name = map[int32]string{
+	1: "AND",
+}
+var CompositeFilter_Operator_value = map[string]int32{
+	"AND": 1,
+}
+
+func (x CompositeFilter_Operator) Enum() *CompositeFilter_Operator {
+	p := new(CompositeFilter_Operator)
+	*p = x
+	return p
+}
+func (x CompositeFilter_Operator) String() string {
+	return proto.EnumName(CompositeFilter_Operator_name, int32(x))
+}
+func (x *CompositeFilter_Operator) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(CompositeFilter_Operator_value, data, "CompositeFilter_Operator")
+	if err != nil {
+		return err
+	}
+	*x = CompositeFilter_Operator(value)
+	return nil
+}
+
+type PropertyFilter_Operator int32
+
+const (
+	PropertyFilter_LESS_THAN             PropertyFilter_Operator = 1
+	PropertyFilter_LESS_THAN_OR_EQUAL    PropertyFilter_Operator = 2
+	PropertyFilter_GREATER_THAN          PropertyFilter_Operator = 3
+	PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4
+	PropertyFilter_EQUAL                 PropertyFilter_Operator = 5
+	PropertyFilter_HAS_ANCESTOR          PropertyFilter_Operator = 11
+)
+
+var PropertyFilter_Operator_name = map[int32]string{
+	1:  "LESS_THAN",
+	2:  "LESS_THAN_OR_EQUAL",
+	3:  "GREATER_THAN",
+	4:  "GREATER_THAN_OR_EQUAL",
+	5:  "EQUAL",
+	11: "HAS_ANCESTOR",
+}
+var PropertyFilter_Operator_value = map[string]int32{
+	"LESS_THAN":             1,
+	"LESS_THAN_OR_EQUAL":    2,
+	"GREATER_THAN":          3,
+	"GREATER_THAN_OR_EQUAL": 4,
+	"EQUAL":                 5,
+	"HAS_ANCESTOR":          11,
+}
+
+func (x PropertyFilter_Operator) Enum() *PropertyFilter_Operator {
+	p := new(PropertyFilter_Operator)
+	*p = x
+	return p
+}
+func (x PropertyFilter_Operator) String() string {
+	return proto.EnumName(PropertyFilter_Operator_name, int32(x))
+}
+func (x *PropertyFilter_Operator) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(PropertyFilter_Operator_value, data, "PropertyFilter_Operator")
+	if err != nil {
+		return err
+	}
+	*x = PropertyFilter_Operator(value)
+	return nil
+}
+
+// The possible values for the 'more_results' field.
+type QueryResultBatch_MoreResultsType int32
+
+const (
+	QueryResultBatch_NOT_FINISHED             QueryResultBatch_MoreResultsType = 1
+	QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2
+	// results after the limit.
+	QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3
+)
+
+var QueryResultBatch_MoreResultsType_name = map[int32]string{
+	1: "NOT_FINISHED",
+	2: "MORE_RESULTS_AFTER_LIMIT",
+	3: "NO_MORE_RESULTS",
+}
+var QueryResultBatch_MoreResultsType_value = map[string]int32{
+	"NOT_FINISHED":             1,
+	"MORE_RESULTS_AFTER_LIMIT": 2,
+	"NO_MORE_RESULTS":          3,
+}
+
+func (x QueryResultBatch_MoreResultsType) Enum() *QueryResultBatch_MoreResultsType {
+	p := new(QueryResultBatch_MoreResultsType)
+	*p = x
+	return p
+}
+func (x QueryResultBatch_MoreResultsType) String() string {
+	return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x))
+}
+func (x *QueryResultBatch_MoreResultsType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(QueryResultBatch_MoreResultsType_value, data, "QueryResultBatch_MoreResultsType")
+	if err != nil {
+		return err
+	}
+	*x = QueryResultBatch_MoreResultsType(value)
+	return nil
+}
+
+type ReadOptions_ReadConsistency int32
+
+const (
+	ReadOptions_DEFAULT  ReadOptions_ReadConsistency = 0
+	ReadOptions_STRONG   ReadOptions_ReadConsistency = 1
+	ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2
+)
+
+var ReadOptions_ReadConsistency_name = map[int32]string{
+	0: "DEFAULT",
+	1: "STRONG",
+	2: "EVENTUAL",
+}
+var ReadOptions_ReadConsistency_value = map[string]int32{
+	"DEFAULT":  0,
+	"STRONG":   1,
+	"EVENTUAL": 2,
+}
+
+func (x ReadOptions_ReadConsistency) Enum() *ReadOptions_ReadConsistency {
+	p := new(ReadOptions_ReadConsistency)
+	*p = x
+	return p
+}
+func (x ReadOptions_ReadConsistency) String() string {
+	return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x))
+}
+func (x *ReadOptions_ReadConsistency) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(ReadOptions_ReadConsistency_value, data, "ReadOptions_ReadConsistency")
+	if err != nil {
+		return err
+	}
+	*x = ReadOptions_ReadConsistency(value)
+	return nil
+}
+
+type BeginTransactionRequest_IsolationLevel int32
+
+const (
+	BeginTransactionRequest_SNAPSHOT BeginTransactionRequest_IsolationLevel = 0
+	// conflict if their mutations conflict. For example:
+	// Read(A),Write(B) may not conflict with Read(B),Write(A),
+	// but Read(B),Write(B) does conflict with Read(B),Write(B).
+	BeginTransactionRequest_SERIALIZABLE BeginTransactionRequest_IsolationLevel = 1
+)
+
+var BeginTransactionRequest_IsolationLevel_name = map[int32]string{
+	0: "SNAPSHOT",
+	1: "SERIALIZABLE",
+}
+var BeginTransactionRequest_IsolationLevel_value = map[string]int32{
+	"SNAPSHOT":     0,
+	"SERIALIZABLE": 1,
+}
+
+func (x BeginTransactionRequest_IsolationLevel) Enum() *BeginTransactionRequest_IsolationLevel {
+	p := new(BeginTransactionRequest_IsolationLevel)
+	*p = x
+	return p
+}
+func (x BeginTransactionRequest_IsolationLevel) String() string {
+	return proto.EnumName(BeginTransactionRequest_IsolationLevel_name, int32(x))
+}
+func (x *BeginTransactionRequest_IsolationLevel) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_IsolationLevel_value, data, "BeginTransactionRequest_IsolationLevel")
+	if err != nil {
+		return err
+	}
+	*x = BeginTransactionRequest_IsolationLevel(value)
+	return nil
+}
+
+type CommitRequest_Mode int32
+
+const (
+	CommitRequest_TRANSACTIONAL     CommitRequest_Mode = 1
+	CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2
+)
+
+var CommitRequest_Mode_name = map[int32]string{
+	1: "TRANSACTIONAL",
+	2: "NON_TRANSACTIONAL",
+}
+var CommitRequest_Mode_value = map[string]int32{
+	"TRANSACTIONAL":     1,
+	"NON_TRANSACTIONAL": 2,
+}
+
+func (x CommitRequest_Mode) Enum() *CommitRequest_Mode {
+	p := new(CommitRequest_Mode)
+	*p = x
+	return p
+}
+func (x CommitRequest_Mode) String() string {
+	return proto.EnumName(CommitRequest_Mode_name, int32(x))
+}
+func (x *CommitRequest_Mode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(CommitRequest_Mode_value, data, "CommitRequest_Mode")
+	if err != nil {
+		return err
+	}
+	*x = CommitRequest_Mode(value)
+	return nil
+}
+
+// An identifier for a particular subset of entities.
+//
+// Entities are partitioned into various subsets, each used by different
+// datasets and different namespaces within a dataset and so forth.
+//
+// All input partition IDs are normalized before use.
+// A partition ID is normalized as follows:
+//   If the partition ID is unset or is set to an empty partition ID, replace it
+//       with the context partition ID.
+//   Otherwise, if the partition ID has no dataset ID, assign it the context
+//       partition ID's dataset ID.
+// Unless otherwise documented, the context partition ID has the dataset ID set
+// to the context dataset ID and no other partition dimension set.
+//
+// A partition ID is empty if all of its fields are unset.
+//
+// Partition dimension:
+// A dimension may be unset.
+// A dimension's value must never be "".
+// A dimension's value must match [A-Za-z\d\.\-_]{1,100}
+// If the value of any dimension matches regex "__.*__",
+// the partition is reserved/read-only.
+// A reserved/read-only partition ID is forbidden in certain documented contexts.
+//
+// Dataset ID:
+// A dataset id's value must never be "".
+// A dataset id's value must match
+// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99}
+type PartitionId struct {
+	// The dataset ID.
+	DatasetId *string `protobuf:"bytes,3,opt,name=dataset_id" json:"dataset_id,omitempty"`
+	// The namespace.
+	Namespace        *string `protobuf:"bytes,4,opt,name=namespace" json:"namespace,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *PartitionId) Reset()         { *m = PartitionId{} }
+func (m *PartitionId) String() string { return proto.CompactTextString(m) }
+func (*PartitionId) ProtoMessage()    {}
+
+func (m *PartitionId) GetDatasetId() string {
+	if m != nil && m.DatasetId != nil {
+		return *m.DatasetId
+	}
+	return ""
+}
+
+func (m *PartitionId) GetNamespace() string {
+	if m != nil && m.Namespace != nil {
+		return *m.Namespace
+	}
+	return ""
+}
+
+// A unique identifier for an entity.
+// If a key's partition id or any of its path kinds or names are
+// reserved/read-only, the key is reserved/read-only.
+// A reserved/read-only key is forbidden in certain documented contexts.
+type Key struct {
+	// Entities are partitioned into subsets, currently identified by a dataset
+	// (usually implicitly specified by the project) and namespace ID.
+	// Queries are scoped to a single partition.
+	PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id" json:"partition_id,omitempty"`
+	// The entity path.
+	// An entity path consists of one or more elements composed of a kind and a
+	// string or numerical identifier, which identify entities. The first
+	// element identifies a <em>root entity</em>, the second element identifies
+	// a <em>child</em> of the root entity, the third element a child of the
+	// second entity, and so forth. The entities identified by all prefixes of
+	// the path are called the element's <em>ancestors</em>.
+	// An entity path is always fully complete: ALL of the entity's ancestors
+	// are required to be in the path along with the entity identifier itself.
+	// The only exception is that in some documented cases, the identifier in the
+	// last path element (for the entity) itself may be omitted. A path can never
+	// be empty.
+	PathElement      []*Key_PathElement `protobuf:"bytes,2,rep,name=path_element" json:"path_element,omitempty"`
+	XXX_unrecognized []byte             `json:"-"`
+}
+
+func (m *Key) Reset()         { *m = Key{} }
+func (m *Key) String() string { return proto.CompactTextString(m) }
+func (*Key) ProtoMessage()    {}
+
+func (m *Key) GetPartitionId() *PartitionId {
+	if m != nil {
+		return m.PartitionId
+	}
+	return nil
+}
+
+func (m *Key) GetPathElement() []*Key_PathElement {
+	if m != nil {
+		return m.PathElement
+	}
+	return nil
+}
+
+// A (kind, ID/name) pair used to construct a key path.
+//
+// At most one of name or ID may be set.
+// If either is set, the element is complete.
+// If neither is set, the element is incomplete.
+type Key_PathElement struct {
+	// The kind of the entity.
+	// A kind matching regex "__.*__" is reserved/read-only.
+	// A kind must not contain more than 500 characters.
+	// Cannot be "".
+	Kind *string `protobuf:"bytes,1,req,name=kind" json:"kind,omitempty"`
+	// The ID of the entity.
+	// Never equal to zero. Values less than zero are discouraged and will not
+	// be supported in the future.
+	Id *int64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"`
+	// The name of the entity.
+	// A name matching regex "__.*__" is reserved/read-only.
+	// A name must not be more than 500 characters.
+	// Cannot be "".
+	Name             *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *Key_PathElement) Reset()         { *m = Key_PathElement{} }
+func (m *Key_PathElement) String() string { return proto.CompactTextString(m) }
+func (*Key_PathElement) ProtoMessage()    {}
+
+func (m *Key_PathElement) GetKind() string {
+	if m != nil && m.Kind != nil {
+		return *m.Kind
+	}
+	return ""
+}
+
+func (m *Key_PathElement) GetId() int64 {
+	if m != nil && m.Id != nil {
+		return *m.Id
+	}
+	return 0
+}
+
+func (m *Key_PathElement) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+// A message that can hold any of the supported value types and associated
+// metadata.
+//
+// At most one of the <type>Value fields may be set.
+// If none are set the value is "null".
+//
+type Value struct {
+	// A boolean value.
+	BooleanValue *bool `protobuf:"varint,1,opt,name=boolean_value" json:"boolean_value,omitempty"`
+	// An integer value.
+	IntegerValue *int64 `protobuf:"varint,2,opt,name=integer_value" json:"integer_value,omitempty"`
+	// A double value.
+	DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"`
+	// A timestamp value.
+	TimestampMicrosecondsValue *int64 `protobuf:"varint,4,opt,name=timestamp_microseconds_value" json:"timestamp_microseconds_value,omitempty"`
+	// A key value.
+	KeyValue *Key `protobuf:"bytes,5,opt,name=key_value" json:"key_value,omitempty"`
+	// A blob key value.
+	BlobKeyValue *string `protobuf:"bytes,16,opt,name=blob_key_value" json:"blob_key_value,omitempty"`
+	// A UTF-8 encoded string value.
+	StringValue *string `protobuf:"bytes,17,opt,name=string_value" json:"string_value,omitempty"`
+	// A blob value.
+	BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value" json:"blob_value,omitempty"`
+	// An entity value.
+	// May have no key.
+	// May have a key with an incomplete key path.
+	// May have a reserved/read-only key.
+	EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value" json:"entity_value,omitempty"`
+	// A list value.
+	// Cannot contain another list value.
+	// Cannot also have a meaning and indexing set.
+	ListValue []*Value `protobuf:"bytes,7,rep,name=list_value" json:"list_value,omitempty"`
+	// The <code>meaning</code> field is reserved and should not be used.
+	Meaning *int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"`
+	// If the value should be indexed.
+	//
+	// The <code>indexed</code> property may be set for a
+	// <code>null</code> value.
+	// When <code>indexed</code> is <code>true</code>, <code>stringValue</code>
+	// is limited to 500 characters and the blob value is limited to 500 bytes.
+	// Exception: If meaning is set to 2, string_value is limited to 2038
+	// characters regardless of indexed.
+	// When indexed is true, meaning 15 and 22 are not allowed, and meaning 16
+	// will be ignored on input (and will never be set on output).
+	// Input values by default have <code>indexed</code> set to
+	// <code>true</code>; however, you can explicitly set <code>indexed</code> to
+	// <code>true</code> if you want. (An output value never has
+	// <code>indexed</code> explicitly set to <code>true</code>.) If a value is
+	// itself an entity, it cannot have <code>indexed</code> set to
+	// <code>true</code>.
+	// Exception: An entity value with meaning 9, 20 or 21 may be indexed.
+	Indexed          *bool  `protobuf:"varint,15,opt,name=indexed,def=1" json:"indexed,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Value) Reset()         { *m = Value{} }
+func (m *Value) String() string { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage()    {}
+
+const Default_Value_Indexed bool = true
+
+func (m *Value) GetBooleanValue() bool {
+	if m != nil && m.BooleanValue != nil {
+		return *m.BooleanValue
+	}
+	return false
+}
+
+func (m *Value) GetIntegerValue() int64 {
+	if m != nil && m.IntegerValue != nil {
+		return *m.IntegerValue
+	}
+	return 0
+}
+
+func (m *Value) GetDoubleValue() float64 {
+	if m != nil && m.DoubleValue != nil {
+		return *m.DoubleValue
+	}
+	return 0
+}
+
+func (m *Value) GetTimestampMicrosecondsValue() int64 {
+	if m != nil && m.TimestampMicrosecondsValue != nil {
+		return *m.TimestampMicrosecondsValue
+	}
+	return 0
+}
+
+func (m *Value) GetKeyValue() *Key {
+	if m != nil {
+		return m.KeyValue
+	}
+	return nil
+}
+
+func (m *Value) GetBlobKeyValue() string {
+	if m != nil && m.BlobKeyValue != nil {
+		return *m.BlobKeyValue
+	}
+	return ""
+}
+
+func (m *Value) GetStringValue() string {
+	if m != nil && m.StringValue != nil {
+		return *m.StringValue
+	}
+	return ""
+}
+
+func (m *Value) GetBlobValue() []byte {
+	if m != nil {
+		return m.BlobValue
+	}
+	return nil
+}
+
+func (m *Value) GetEntityValue() *Entity {
+	if m != nil {
+		return m.EntityValue
+	}
+	return nil
+}
+
+func (m *Value) GetListValue() []*Value {
+	if m != nil {
+		return m.ListValue
+	}
+	return nil
+}
+
+func (m *Value) GetMeaning() int32 {
+	if m != nil && m.Meaning != nil {
+		return *m.Meaning
+	}
+	return 0
+}
+
+func (m *Value) GetIndexed() bool {
+	if m != nil && m.Indexed != nil {
+		return *m.Indexed
+	}
+	return Default_Value_Indexed
+}
+
+// An entity property.
+type Property struct {
+	// The name of the property.
+	// A property name matching regex "__.*__" is reserved.
+	// A reserved property name is forbidden in certain documented contexts.
+	// The name must not contain more than 500 characters.
+	// Cannot be "".
+	Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+	// The value(s) of the property.
+	// Each value can have only one value property populated. For example,
+	// you cannot have a values list of <code>{ value: { integerValue: 22,
+	// stringValue: "a" } }</code>, but you can have <code>{ value: { listValue:
+	// [ { integerValue: 22 }, { stringValue: "a" } ] }</code>.
+	Value            *Value `protobuf:"bytes,4,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Property) Reset()         { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage()    {}
+
+func (m *Property) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *Property) GetValue() *Value {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// An entity.
+//
+// An entity is limited to 1 megabyte when stored. That <em>roughly</em>
+// corresponds to a limit of 1 megabyte for the serialized form of this
+// message.
+type Entity struct {
+	// The entity's key.
+	//
+	// An entity must have a key, unless otherwise documented (for example,
+	// an entity in <code>Value.entityValue</code> may have no key).
+	// An entity's kind is its key's path's last element's kind,
+	// or null if it has no key.
+	Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+	// The entity's properties.
+	// Each property's name must be unique for its entity.
+	Property         []*Property `protobuf:"bytes,2,rep,name=property" json:"property,omitempty"`
+	XXX_unrecognized []byte      `json:"-"`
+}
+
+func (m *Entity) Reset()         { *m = Entity{} }
+func (m *Entity) String() string { return proto.CompactTextString(m) }
+func (*Entity) ProtoMessage()    {}
+
+func (m *Entity) GetKey() *Key {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *Entity) GetProperty() []*Property {
+	if m != nil {
+		return m.Property
+	}
+	return nil
+}
+
+// The result of fetching an entity from the datastore.
+type EntityResult struct {
+	// The resulting entity.
+	Entity           *Entity `protobuf:"bytes,1,req,name=entity" json:"entity,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *EntityResult) Reset()         { *m = EntityResult{} }
+func (m *EntityResult) String() string { return proto.CompactTextString(m) }
+func (*EntityResult) ProtoMessage()    {}
+
+func (m *EntityResult) GetEntity() *Entity {
+	if m != nil {
+		return m.Entity
+	}
+	return nil
+}
+
+// A query.
+type Query struct {
+	// The projection to return. If not set the entire entity is returned.
+	Projection []*PropertyExpression `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"`
+	// The kinds to query (if empty, returns entities from all kinds).
+	Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"`
+	// The filter to apply (optional).
+	Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"`
+	// The order to apply to the query results (if empty, order is unspecified).
+	Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"`
+	// The properties to group by (if empty, no grouping is applied to the
+	// result set).
+	GroupBy []*PropertyReference `protobuf:"bytes,6,rep,name=group_by" json:"group_by,omitempty"`
+	// A starting point for the query results. Optional. Query cursors are
+	// returned in query result batches.
+	StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor" json:"start_cursor,omitempty"`
+	// An ending point for the query results. Optional. Query cursors are
+	// returned in query result batches.
+	EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor" json:"end_cursor,omitempty"`
+	// The number of results to skip. Applies before limit, but after all other
+	// constraints (optional, defaults to 0).
+	Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+	// The maximum number of results to return. Applies after all other
+	// constraints. Optional.
+	Limit            *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query) Reset()         { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage()    {}
+
+const Default_Query_Offset int32 = 0
+
+func (m *Query) GetProjection() []*PropertyExpression {
+	if m != nil {
+		return m.Projection
+	}
+	return nil
+}
+
+func (m *Query) GetKind() []*KindExpression {
+	if m != nil {
+		return m.Kind
+	}
+	return nil
+}
+
+func (m *Query) GetFilter() *Filter {
+	if m != nil {
+		return m.Filter
+	}
+	return nil
+}
+
+func (m *Query) GetOrder() []*PropertyOrder {
+	if m != nil {
+		return m.Order
+	}
+	return nil
+}
+
+func (m *Query) GetGroupBy() []*PropertyReference {
+	if m != nil {
+		return m.GroupBy
+	}
+	return nil
+}
+
+func (m *Query) GetStartCursor() []byte {
+	if m != nil {
+		return m.StartCursor
+	}
+	return nil
+}
+
+func (m *Query) GetEndCursor() []byte {
+	if m != nil {
+		return m.EndCursor
+	}
+	return nil
+}
+
+func (m *Query) GetOffset() int32 {
+	if m != nil && m.Offset != nil {
+		return *m.Offset
+	}
+	return Default_Query_Offset
+}
+
+func (m *Query) GetLimit() int32 {
+	if m != nil && m.Limit != nil {
+		return *m.Limit
+	}
+	return 0
+}
+
+// A representation of a kind.
+type KindExpression struct {
+	// The name of the kind.
+	Name             *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *KindExpression) Reset()         { *m = KindExpression{} }
+func (m *KindExpression) String() string { return proto.CompactTextString(m) }
+func (*KindExpression) ProtoMessage()    {}
+
+func (m *KindExpression) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+// A reference to a property relative to the kind expressions.
+// exactly.
+type PropertyReference struct {
+	// The name of the property.
+	Name             *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *PropertyReference) Reset()         { *m = PropertyReference{} }
+func (m *PropertyReference) String() string { return proto.CompactTextString(m) }
+func (*PropertyReference) ProtoMessage()    {}
+
+func (m *PropertyReference) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+// A representation of a property in a projection.
+type PropertyExpression struct {
+	// The property to project.
+	Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"`
+	// The aggregation function to apply to the property. Optional.
+	// Can only be used when grouping by at least one property. Must
+	// then be set on all properties in the projection that are not
+	// being grouped by.
+	AggregationFunction *PropertyExpression_AggregationFunction `protobuf:"varint,2,opt,name=aggregation_function,enum=datastore.PropertyExpression_AggregationFunction" json:"aggregation_function,omitempty"`
+	XXX_unrecognized    []byte                                  `json:"-"`
+}
+
+func (m *PropertyExpression) Reset()         { *m = PropertyExpression{} }
+func (m *PropertyExpression) String() string { return proto.CompactTextString(m) }
+func (*PropertyExpression) ProtoMessage()    {}
+
+func (m *PropertyExpression) GetProperty() *PropertyReference {
+	if m != nil {
+		return m.Property
+	}
+	return nil
+}
+
+func (m *PropertyExpression) GetAggregationFunction() PropertyExpression_AggregationFunction {
+	if m != nil && m.AggregationFunction != nil {
+		return *m.AggregationFunction
+	}
+	return PropertyExpression_FIRST
+}
+
+// The desired order for a specific property.
+type PropertyOrder struct {
+	// The property to order by.
+	Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"`
+	// The direction to order by.
+	Direction        *PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=datastore.PropertyOrder_Direction,def=1" json:"direction,omitempty"`
+	XXX_unrecognized []byte                   `json:"-"`
+}
+
+func (m *PropertyOrder) Reset()         { *m = PropertyOrder{} }
+func (m *PropertyOrder) String() string { return proto.CompactTextString(m) }
+func (*PropertyOrder) ProtoMessage()    {}
+
+const Default_PropertyOrder_Direction PropertyOrder_Direction = PropertyOrder_ASCENDING
+
+func (m *PropertyOrder) GetProperty() *PropertyReference {
+	if m != nil {
+		return m.Property
+	}
+	return nil
+}
+
+func (m *PropertyOrder) GetDirection() PropertyOrder_Direction {
+	if m != nil && m.Direction != nil {
+		return *m.Direction
+	}
+	return Default_PropertyOrder_Direction
+}
+
+// A holder for any type of filter. Exactly one field should be specified.
+type Filter struct {
+	// A composite filter.
+	CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter" json:"composite_filter,omitempty"`
+	// A filter on a property.
+	PropertyFilter   *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter" json:"property_filter,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *Filter) Reset()         { *m = Filter{} }
+func (m *Filter) String() string { return proto.CompactTextString(m) }
+func (*Filter) ProtoMessage()    {}
+
+func (m *Filter) GetCompositeFilter() *CompositeFilter {
+	if m != nil {
+		return m.CompositeFilter
+	}
+	return nil
+}
+
+func (m *Filter) GetPropertyFilter() *PropertyFilter {
+	if m != nil {
+		return m.PropertyFilter
+	}
+	return nil
+}
+
+// A filter that merges the multiple other filters using the given operation.
+type CompositeFilter struct {
+	// The operator for combining multiple filters.
+	Operator *CompositeFilter_Operator `protobuf:"varint,1,req,name=operator,enum=datastore.CompositeFilter_Operator" json:"operator,omitempty"`
+	// The list of filters to combine.
+	// Must contain at least one filter.
+	Filter           []*Filter `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"`
+	XXX_unrecognized []byte    `json:"-"`
+}
+
+func (m *CompositeFilter) Reset()         { *m = CompositeFilter{} }
+func (m *CompositeFilter) String() string { return proto.CompactTextString(m) }
+func (*CompositeFilter) ProtoMessage()    {}
+
+func (m *CompositeFilter) GetOperator() CompositeFilter_Operator {
+	if m != nil && m.Operator != nil {
+		return *m.Operator
+	}
+	return CompositeFilter_AND
+}
+
+func (m *CompositeFilter) GetFilter() []*Filter {
+	if m != nil {
+		return m.Filter
+	}
+	return nil
+}
+
+// A filter on a specific property.
+type PropertyFilter struct {
+	// The property to filter by.
+	Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"`
+	// The operator to filter by.
+	Operator *PropertyFilter_Operator `protobuf:"varint,2,req,name=operator,enum=datastore.PropertyFilter_Operator" json:"operator,omitempty"`
+	// The value to compare the property to.
+	Value            *Value `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyFilter) Reset()         { *m = PropertyFilter{} }
+func (m *PropertyFilter) String() string { return proto.CompactTextString(m) }
+func (*PropertyFilter) ProtoMessage()    {}
+
+func (m *PropertyFilter) GetProperty() *PropertyReference {
+	if m != nil {
+		return m.Property
+	}
+	return nil
+}
+
+func (m *PropertyFilter) GetOperator() PropertyFilter_Operator {
+	if m != nil && m.Operator != nil {
+		return *m.Operator
+	}
+	return PropertyFilter_LESS_THAN
+}
+
+func (m *PropertyFilter) GetValue() *Value {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// A GQL query.
+type GqlQuery struct {
+	QueryString *string `protobuf:"bytes,1,req,name=query_string" json:"query_string,omitempty"`
+	// When false, the query string must not contain a literal.
+	AllowLiteral *bool `protobuf:"varint,2,opt,name=allow_literal,def=0" json:"allow_literal,omitempty"`
+	// A named argument must set field GqlQueryArg.name.
+	// No two named arguments may have the same name.
+	// For each non-reserved named binding site in the query string,
+	// there must be a named argument with that name,
+	// but not necessarily the inverse.
+	NameArg []*GqlQueryArg `protobuf:"bytes,3,rep,name=name_arg" json:"name_arg,omitempty"`
+	// Numbered binding site @1 references the first numbered argument,
+	// effectively using 1-based indexing, rather than the usual 0.
+	// A numbered argument must NOT set field GqlQueryArg.name.
+	// For each binding site numbered i in query_string,
+	// there must be an ith numbered argument.
+	// The inverse must also be true.
+	NumberArg        []*GqlQueryArg `protobuf:"bytes,4,rep,name=number_arg" json:"number_arg,omitempty"`
+	XXX_unrecognized []byte         `json:"-"`
+}
+
+func (m *GqlQuery) Reset()         { *m = GqlQuery{} }
+func (m *GqlQuery) String() string { return proto.CompactTextString(m) }
+func (*GqlQuery) ProtoMessage()    {}
+
+const Default_GqlQuery_AllowLiteral bool = false
+
+func (m *GqlQuery) GetQueryString() string {
+	if m != nil && m.QueryString != nil {
+		return *m.QueryString
+	}
+	return ""
+}
+
+func (m *GqlQuery) GetAllowLiteral() bool {
+	if m != nil && m.AllowLiteral != nil {
+		return *m.AllowLiteral
+	}
+	return Default_GqlQuery_AllowLiteral
+}
+
+func (m *GqlQuery) GetNameArg() []*GqlQueryArg {
+	if m != nil {
+		return m.NameArg
+	}
+	return nil
+}
+
+func (m *GqlQuery) GetNumberArg() []*GqlQueryArg {
+	if m != nil {
+		return m.NumberArg
+	}
+	return nil
+}
+
+// A binding argument for a GQL query.
+// Exactly one of fields value and cursor must be set.
+type GqlQueryArg struct {
+	// Must match regex "[A-Za-z_$][A-Za-z_$0-9]*".
+	// Must not match regex "__.*__".
+	// Must not be "".
+	Name             *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Value            *Value  `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+	Cursor           []byte  `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GqlQueryArg) Reset()         { *m = GqlQueryArg{} }
+func (m *GqlQueryArg) String() string { return proto.CompactTextString(m) }
+func (*GqlQueryArg) ProtoMessage()    {}
+
+func (m *GqlQueryArg) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *GqlQueryArg) GetValue() *Value {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *GqlQueryArg) GetCursor() []byte {
+	if m != nil {
+		return m.Cursor
+	}
+	return nil
+}
+
+// A batch of results produced by a query.
+type QueryResultBatch struct {
+	// The result type for every entity in entityResults.
+	EntityResultType *EntityResult_ResultType `protobuf:"varint,1,req,name=entity_result_type,enum=datastore.EntityResult_ResultType" json:"entity_result_type,omitempty"`
+	// The results for this batch.
+	EntityResult []*EntityResult `protobuf:"bytes,2,rep,name=entity_result" json:"entity_result,omitempty"`
+	// A cursor that points to the position after the last result in the batch.
+	// May be absent.
+	EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor" json:"end_cursor,omitempty"`
+	// The state of the query after the current batch.
+	MoreResults *QueryResultBatch_MoreResultsType `protobuf:"varint,5,req,name=more_results,enum=datastore.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"`
+	// The number of results skipped because of <code>Query.offset</code>.
+	SkippedResults   *int32 `protobuf:"varint,6,opt,name=skipped_results" json:"skipped_results,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *QueryResultBatch) Reset()         { *m = QueryResultBatch{} }
+func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) }
+func (*QueryResultBatch) ProtoMessage()    {}
+
+func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType {
+	if m != nil && m.EntityResultType != nil {
+		return *m.EntityResultType
+	}
+	return EntityResult_FULL
+}
+
+func (m *QueryResultBatch) GetEntityResult() []*EntityResult {
+	if m != nil {
+		return m.EntityResult
+	}
+	return nil
+}
+
+func (m *QueryResultBatch) GetEndCursor() []byte {
+	if m != nil {
+		return m.EndCursor
+	}
+	return nil
+}
+
+func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType {
+	if m != nil && m.MoreResults != nil {
+		return *m.MoreResults
+	}
+	return QueryResultBatch_NOT_FINISHED
+}
+
+func (m *QueryResultBatch) GetSkippedResults() int32 {
+	if m != nil && m.SkippedResults != nil {
+		return *m.SkippedResults
+	}
+	return 0
+}
+
+// A set of changes to apply.
+//
+// No entity in this message may have a reserved property name,
+// not even a property in an entity in a value.
+// No value in this message may have meaning 18,
+// not even a value in an entity in another value.
+//
+// If entities with duplicate keys are present, an arbitrary choice will
+// be made as to which is written.
+type Mutation struct {
+	// Entities to upsert.
+	// Each upserted entity's key must have a complete path and
+	// must not be reserved/read-only.
+	Upsert []*Entity `protobuf:"bytes,1,rep,name=upsert" json:"upsert,omitempty"`
+	// Entities to update.
+	// Each updated entity's key must have a complete path and
+	// must not be reserved/read-only.
+	Update []*Entity `protobuf:"bytes,2,rep,name=update" json:"update,omitempty"`
+	// Entities to insert.
+	// Each inserted entity's key must have a complete path and
+	// must not be reserved/read-only.
+	Insert []*Entity `protobuf:"bytes,3,rep,name=insert" json:"insert,omitempty"`
+	// Insert entities with a newly allocated ID.
+	// Each inserted entity's key must omit the final identifier in its path and
+	// must not be reserved/read-only.
+	InsertAutoId []*Entity `protobuf:"bytes,4,rep,name=insert_auto_id" json:"insert_auto_id,omitempty"`
+	// Keys of entities to delete.
+	// Each key must have a complete key path and must not be reserved/read-only.
+	Delete []*Key `protobuf:"bytes,5,rep,name=delete" json:"delete,omitempty"`
+	// Ignore a user specified read-only period. Optional.
+	Force            *bool  `protobuf:"varint,6,opt,name=force" json:"force,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Mutation) Reset()         { *m = Mutation{} }
+func (m *Mutation) String() string { return proto.CompactTextString(m) }
+func (*Mutation) ProtoMessage()    {}
+
+func (m *Mutation) GetUpsert() []*Entity {
+	if m != nil {
+		return m.Upsert
+	}
+	return nil
+}
+
+func (m *Mutation) GetUpdate() []*Entity {
+	if m != nil {
+		return m.Update
+	}
+	return nil
+}
+
+func (m *Mutation) GetInsert() []*Entity {
+	if m != nil {
+		return m.Insert
+	}
+	return nil
+}
+
+func (m *Mutation) GetInsertAutoId() []*Entity {
+	if m != nil {
+		return m.InsertAutoId
+	}
+	return nil
+}
+
+func (m *Mutation) GetDelete() []*Key {
+	if m != nil {
+		return m.Delete
+	}
+	return nil
+}
+
+func (m *Mutation) GetForce() bool {
+	if m != nil && m.Force != nil {
+		return *m.Force
+	}
+	return false
+}
+
+// The result of applying a mutation.
+type MutationResult struct {
+	// Number of index writes.
+	IndexUpdates *int32 `protobuf:"varint,1,req,name=index_updates" json:"index_updates,omitempty"`
+	// Keys for <code>insertAutoId</code> entities. One per entity from the
+	// request, in the same order.
+	InsertAutoIdKey  []*Key `protobuf:"bytes,2,rep,name=insert_auto_id_key" json:"insert_auto_id_key,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MutationResult) Reset()         { *m = MutationResult{} }
+func (m *MutationResult) String() string { return proto.CompactTextString(m) }
+func (*MutationResult) ProtoMessage()    {}
+
+func (m *MutationResult) GetIndexUpdates() int32 {
+	if m != nil && m.IndexUpdates != nil {
+		return *m.IndexUpdates
+	}
+	return 0
+}
+
+func (m *MutationResult) GetInsertAutoIdKey() []*Key {
+	if m != nil {
+		return m.InsertAutoIdKey
+	}
+	return nil
+}
+
+// Options shared by read requests.
+type ReadOptions struct {
+	// The read consistency to use.
+	// Cannot be set when transaction is set.
+	// Lookup and ancestor queries default to STRONG, global queries default to
+	// EVENTUAL and cannot be set to STRONG.
+	ReadConsistency *ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,enum=datastore.ReadOptions_ReadConsistency,def=0" json:"read_consistency,omitempty"`
+	// The transaction to use. Optional.
+	Transaction      []byte `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ReadOptions) Reset()         { *m = ReadOptions{} }
+func (m *ReadOptions) String() string { return proto.CompactTextString(m) }
+func (*ReadOptions) ProtoMessage()    {}
+
+const Default_ReadOptions_ReadConsistency ReadOptions_ReadConsistency = ReadOptions_DEFAULT
+
+func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency {
+	if m != nil && m.ReadConsistency != nil {
+		return *m.ReadConsistency
+	}
+	return Default_ReadOptions_ReadConsistency
+}
+
+func (m *ReadOptions) GetTransaction() []byte {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+// The request for Lookup.
+type LookupRequest struct {
+	// Options for this lookup request. Optional.
+	ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"`
+	// Keys of entities to look up from the datastore.
+	Key              []*Key `protobuf:"bytes,3,rep,name=key" json:"key,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LookupRequest) Reset()         { *m = LookupRequest{} }
+func (m *LookupRequest) String() string { return proto.CompactTextString(m) }
+func (*LookupRequest) ProtoMessage()    {}
+
+func (m *LookupRequest) GetReadOptions() *ReadOptions {
+	if m != nil {
+		return m.ReadOptions
+	}
+	return nil
+}
+
+func (m *LookupRequest) GetKey() []*Key {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+// The response for Lookup.
+type LookupResponse struct {
+	// Entities found as ResultType.FULL entities.
+	Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"`
+	// Entities not found as ResultType.KEY_ONLY entities.
+	Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"`
+	// A list of keys that were not looked up due to resource constraints.
+	Deferred         []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LookupResponse) Reset()         { *m = LookupResponse{} }
+func (m *LookupResponse) String() string { return proto.CompactTextString(m) }
+func (*LookupResponse) ProtoMessage()    {}
+
+func (m *LookupResponse) GetFound() []*EntityResult {
+	if m != nil {
+		return m.Found
+	}
+	return nil
+}
+
+func (m *LookupResponse) GetMissing() []*EntityResult {
+	if m != nil {
+		return m.Missing
+	}
+	return nil
+}
+
+func (m *LookupResponse) GetDeferred() []*Key {
+	if m != nil {
+		return m.Deferred
+	}
+	return nil
+}
+
+// The request for RunQuery.
+type RunQueryRequest struct {
+	// The options for this query.
+	ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"`
+	// Entities are partitioned into subsets, identified by a dataset (usually
+	// implicitly specified by the project) and namespace ID. Queries are scoped
+	// to a single partition.
+	// This partition ID is normalized with the standard default context
+	// partition ID, but all other partition IDs in RunQueryRequest are
+	// normalized with this partition ID as the context partition ID.
+	PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id" json:"partition_id,omitempty"`
+	// The query to run.
+	// Either this field or field gql_query must be set, but not both.
+	Query *Query `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"`
+	// The GQL query to run.
+	// Either this field or field query must be set, but not both.
+	GqlQuery         *GqlQuery `protobuf:"bytes,7,opt,name=gql_query" json:"gql_query,omitempty"`
+	XXX_unrecognized []byte    `json:"-"`
+}
+
+func (m *RunQueryRequest) Reset()         { *m = RunQueryRequest{} }
+func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) }
+func (*RunQueryRequest) ProtoMessage()    {}
+
+func (m *RunQueryRequest) GetReadOptions() *ReadOptions {
+	if m != nil {
+		return m.ReadOptions
+	}
+	return nil
+}
+
+func (m *RunQueryRequest) GetPartitionId() *PartitionId {
+	if m != nil {
+		return m.PartitionId
+	}
+	return nil
+}
+
+func (m *RunQueryRequest) GetQuery() *Query {
+	if m != nil {
+		return m.Query
+	}
+	return nil
+}
+
+func (m *RunQueryRequest) GetGqlQuery() *GqlQuery {
+	if m != nil {
+		return m.GqlQuery
+	}
+	return nil
+}
+
+// The response for RunQuery.
+type RunQueryResponse struct {
+	// A batch of query results (always present).
+	Batch            *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *RunQueryResponse) Reset()         { *m = RunQueryResponse{} }
+func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) }
+func (*RunQueryResponse) ProtoMessage()    {}
+
+func (m *RunQueryResponse) GetBatch() *QueryResultBatch {
+	if m != nil {
+		return m.Batch
+	}
+	return nil
+}
+
+// The request for BeginTransaction.
+type BeginTransactionRequest struct {
+	// The transaction isolation level.
+	IsolationLevel   *BeginTransactionRequest_IsolationLevel `protobuf:"varint,1,opt,name=isolation_level,enum=datastore.BeginTransactionRequest_IsolationLevel,def=0" json:"isolation_level,omitempty"`
+	XXX_unrecognized []byte                                  `json:"-"`
+}
+
+func (m *BeginTransactionRequest) Reset()         { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage()    {}
+
+const Default_BeginTransactionRequest_IsolationLevel BeginTransactionRequest_IsolationLevel = BeginTransactionRequest_SNAPSHOT
+
+func (m *BeginTransactionRequest) GetIsolationLevel() BeginTransactionRequest_IsolationLevel {
+	if m != nil && m.IsolationLevel != nil {
+		return *m.IsolationLevel
+	}
+	return Default_BeginTransactionRequest_IsolationLevel
+}
+
+// The response for BeginTransaction.
+type BeginTransactionResponse struct {
+	// The transaction identifier (always present).
+	Transaction      []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BeginTransactionResponse) Reset()         { *m = BeginTransactionResponse{} }
+func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionResponse) ProtoMessage()    {}
+
+func (m *BeginTransactionResponse) GetTransaction() []byte {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+// The request for Rollback.
+type RollbackRequest struct {
+	// The transaction identifier, returned by a call to
+	// <code>beginTransaction</code>.
+	Transaction      []byte `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RollbackRequest) Reset()         { *m = RollbackRequest{} }
+func (m *RollbackRequest) String() string { return proto.CompactTextString(m) }
+func (*RollbackRequest) ProtoMessage()    {}
+
+func (m *RollbackRequest) GetTransaction() []byte {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+// The response for Rollback.
+type RollbackResponse struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RollbackResponse) Reset()         { *m = RollbackResponse{} }
+func (m *RollbackResponse) String() string { return proto.CompactTextString(m) }
+func (*RollbackResponse) ProtoMessage()    {}
+
+// The request for Commit.
+type CommitRequest struct {
+	// The transaction identifier, returned by a call to
+	// <code>beginTransaction</code>. Must be set when mode is TRANSACTIONAL.
+	Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"`
+	// The mutation to perform. Optional.
+	Mutation *Mutation `protobuf:"bytes,2,opt,name=mutation" json:"mutation,omitempty"`
+	// The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL.
+	Mode             *CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=datastore.CommitRequest_Mode,def=1" json:"mode,omitempty"`
+	XXX_unrecognized []byte              `json:"-"`
+}
+
+func (m *CommitRequest) Reset()         { *m = CommitRequest{} }
+func (m *CommitRequest) String() string { return proto.CompactTextString(m) }
+func (*CommitRequest) ProtoMessage()    {}
+
+const Default_CommitRequest_Mode CommitRequest_Mode = CommitRequest_TRANSACTIONAL
+
+func (m *CommitRequest) GetTransaction() []byte {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+func (m *CommitRequest) GetMutation() *Mutation {
+	if m != nil {
+		return m.Mutation
+	}
+	return nil
+}
+
+func (m *CommitRequest) GetMode() CommitRequest_Mode {
+	if m != nil && m.Mode != nil {
+		return *m.Mode
+	}
+	return Default_CommitRequest_Mode
+}
+
+// The response for Commit.
+type CommitResponse struct {
+	// The result of performing the mutation (if any).
+	MutationResult   *MutationResult `protobuf:"bytes,1,opt,name=mutation_result" json:"mutation_result,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *CommitResponse) Reset()         { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage()    {}
+
+func (m *CommitResponse) GetMutationResult() *MutationResult {
+	if m != nil {
+		return m.MutationResult
+	}
+	return nil
+}
+
+// The request for AllocateIds.
+type AllocateIdsRequest struct {
+	// A list of keys with incomplete key paths to allocate IDs for.
+	// No key may be reserved/read-only.
+	Key              []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsRequest) Reset()         { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage()    {}
+
+func (m *AllocateIdsRequest) GetKey() []*Key {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+// The response for AllocateIds.
+type AllocateIdsResponse struct {
+	// The keys specified in the request (in the same order), each with
+	// its key path completed with a newly allocated ID.
+	Key              []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsResponse) Reset()         { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage()    {}
+
+func (m *AllocateIdsResponse) GetKey() []*Key {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("datastore.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value)
+	proto.RegisterEnum("datastore.PropertyExpression_AggregationFunction", PropertyExpression_AggregationFunction_name, PropertyExpression_AggregationFunction_value)
+	proto.RegisterEnum("datastore.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value)
+	proto.RegisterEnum("datastore.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value)
+	proto.RegisterEnum("datastore.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value)
+	proto.RegisterEnum("datastore.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value)
+	proto.RegisterEnum("datastore.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value)
+	proto.RegisterEnum("datastore.BeginTransactionRequest_IsolationLevel", BeginTransactionRequest_IsolationLevel_name, BeginTransactionRequest_IsolationLevel_value)
+	proto.RegisterEnum("datastore.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value)
+}

+ 594 - 0
Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto

@@ -0,0 +1,594 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// The datastore v1 service proto definitions
+
+syntax = "proto2";
+
+package datastore;
+option java_package = "com.google.api.services.datastore";
+
+
+// An identifier for a particular subset of entities.
+//
+// Entities are partitioned into various subsets, each used by different
+// datasets and different namespaces within a dataset and so forth.
+//
+// All input partition IDs are normalized before use.
+// A partition ID is normalized as follows:
+//   If the partition ID is unset or is set to an empty partition ID, replace it
+//       with the context partition ID.
+//   Otherwise, if the partition ID has no dataset ID, assign it the context
+//       partition ID's dataset ID.
+// Unless otherwise documented, the context partition ID has the dataset ID set
+// to the context dataset ID and no other partition dimension set.
+//
+// A partition ID is empty if all of its fields are unset.
+//
+// Partition dimension:
+// A dimension may be unset.
+// A dimension's value must never be "".
+// A dimension's value must match [A-Za-z\d\.\-_]{1,100}
+// If the value of any dimension matches regex "__.*__",
+// the partition is reserved/read-only.
+// A reserved/read-only partition ID is forbidden in certain documented contexts.
+//
+// Dataset ID:
+// A dataset id's value must never be "".
+// A dataset id's value must match
+// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99}
+message PartitionId {
+  // The dataset ID.
+  optional string dataset_id = 3;
+  // The namespace.
+  optional string namespace = 4;
+}
+
+// A unique identifier for an entity.
+// If a key's partition id or any of its path kinds or names are
+// reserved/read-only, the key is reserved/read-only.
+// A reserved/read-only key is forbidden in certain documented contexts.
+message Key {
+  // Entities are partitioned into subsets, currently identified by a dataset
+  // (usually implicitly specified by the project) and namespace ID.
+  // Queries are scoped to a single partition.
+  optional PartitionId partition_id = 1;
+
+  // A (kind, ID/name) pair used to construct a key path.
+  //
+  // At most one of name or ID may be set.
+  // If either is set, the element is complete.
+  // If neither is set, the element is incomplete.
+  message PathElement {
+    // The kind of the entity.
+    // A kind matching regex "__.*__" is reserved/read-only.
+    // A kind must not contain more than 500 characters.
+    // Cannot be "".
+    required string kind = 1;
+    // The ID of the entity.
+    // Never equal to zero. Values less than zero are discouraged and will not
+    // be supported in the future.
+    optional int64 id = 2;
+    // The name of the entity.
+    // A name matching regex "__.*__" is reserved/read-only.
+    // A name must not be more than 500 characters.
+    // Cannot be "".
+    optional string name = 3;
+  }
+
+  // The entity path.
+  // An entity path consists of one or more elements composed of a kind and a
+  // string or numerical identifier, which identify entities. The first
+  // element identifies a <em>root entity</em>, the second element identifies
+  // a <em>child</em> of the root entity, the third element a child of the
+  // second entity, and so forth. The entities identified by all prefixes of
+  // the path are called the element's <em>ancestors</em>.
+  // An entity path is always fully complete: ALL of the entity's ancestors
+  // are required to be in the path along with the entity identifier itself.
+  // The only exception is that in some documented cases, the identifier in the
+  // last path element (for the entity) itself may be omitted. A path can never
+  // be empty.
+  repeated PathElement path_element = 2;
+}
+
+// A message that can hold any of the supported value types and associated
+// metadata.
+//
+// At most one of the <type>Value fields may be set.
+// If none are set the value is "null".
+//
+message Value {
+  // A boolean value.
+  optional bool boolean_value = 1;
+  // An integer value.
+  optional int64 integer_value = 2;
+  // A double value.
+  optional double double_value = 3;
+  // A timestamp value.
+  optional int64 timestamp_microseconds_value = 4;
+  // A key value.
+  optional Key key_value  = 5;
+  // A blob key value.
+  optional string blob_key_value = 16;
+  // A UTF-8 encoded string value.
+  optional string string_value = 17;
+  // A blob value.
+  optional bytes blob_value = 18;
+  // An entity value.
+  // May have no key.
+  // May have a key with an incomplete key path.
+  // May have a reserved/read-only key.
+  optional Entity entity_value = 6;
+  // A list value.
+  // Cannot contain another list value.
+  // Cannot also have a meaning and indexing set.
+  repeated Value list_value = 7;
+
+  // The <code>meaning</code> field is reserved and should not be used.
+  optional int32 meaning = 14;
+
+  // If the value should be indexed.
+  //
+  // The <code>indexed</code> property may be set for a
+  // <code>null</code> value.
+  // When <code>indexed</code> is <code>true</code>, <code>stringValue</code>
+  // is limited to 500 characters and the blob value is limited to 500 bytes.
+  // Exception: If meaning is set to 2, string_value is limited to 2038
+  // characters regardless of indexed.
+  // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16
+  // will be ignored on input (and will never be set on output).
+  // Input values by default have <code>indexed</code> set to
+  // <code>true</code>; however, you can explicitly set <code>indexed</code> to
+  // <code>true</code> if you want. (An output value never has
+  // <code>indexed</code> explicitly set to <code>true</code>.) If a value is
+  // itself an entity, it cannot have <code>indexed</code> set to
+  // <code>true</code>.
+  // Exception: An entity value with meaning 9, 20 or 21 may be indexed.
+  optional bool indexed = 15 [default = true];
+}
+
+// An entity property.
+message Property {
+  // The name of the property.
+  // A property name matching regex "__.*__" is reserved.
+  // A reserved property name is forbidden in certain documented contexts.
+  // The name must not contain more than 500 characters.
+  // Cannot be "".
+  required string name = 1;
+
+  // The value(s) of the property.
+  // Each value can have only one value property populated. For example,
+  // you cannot have a values list of <code>{ value: { integerValue: 22,
+  // stringValue: "a" } }</code>, but you can have <code>{ value: { listValue:
+  // [ { integerValue: 22 }, { stringValue: "a" } ] }</code>.
+  required Value value = 4;
+}
+
+// An entity.
+//
+// An entity is limited to 1 megabyte when stored. That <em>roughly</em>
+// corresponds to a limit of 1 megabyte for the serialized form of this
+// message.
+message Entity {
+  // The entity's key.
+  //
+  // An entity must have a key, unless otherwise documented (for example,
+  // an entity in <code>Value.entityValue</code> may have no key).
+  // An entity's kind is its key's path's last element's kind,
+  // or null if it has no key.
+  optional Key key = 1;
+  // The entity's properties.
+  // Each property's name must be unique for its entity.
+  repeated Property property = 2;
+}
+
+// The result of fetching an entity from the datastore.
+message EntityResult {
+  // Specifies what data the 'entity' field contains.
+  // A ResultType is either implied (for example, in LookupResponse.found it
+  // is always FULL) or specified by context (for example, in message
+  // QueryResultBatch, field 'entity_result_type' specifies a ResultType
+  // for all the values in field 'entity_result').
+  enum ResultType {
+    FULL = 1;  // The entire entity.
+    PROJECTION = 2;  // A projected subset of properties.
+                     // The entity may have no key.
+                     // A property value may have meaning 18.
+    KEY_ONLY = 3;  // Only the key.
+  }
+
+  // The resulting entity.
+  required Entity entity = 1;
+}
+
+// A query.
+message Query {
+  // The projection to return. If not set the entire entity is returned.
+  repeated PropertyExpression projection = 2;
+
+  // The kinds to query (if empty, returns entities from all kinds).
+  repeated KindExpression kind = 3;
+
+  // The filter to apply (optional).
+  optional Filter filter = 4;
+
+  // The order to apply to the query results (if empty, order is unspecified).
+  repeated PropertyOrder order = 5;
+
+  // The properties to group by (if empty, no grouping is applied to the
+  // result set).
+  repeated PropertyReference group_by = 6;
+
+  // A starting point for the query results. Optional. Query cursors are
+  // returned in query result batches.
+  optional bytes /* serialized QueryCursor */ start_cursor = 7;
+
+  // An ending point for the query results. Optional. Query cursors are
+  // returned in query result batches.
+  optional bytes /* serialized QueryCursor */ end_cursor = 8;
+
+  // The number of results to skip. Applies before limit, but after all other
+  // constraints (optional, defaults to 0).
+  optional int32 offset = 10 [default=0];
+
+  // The maximum number of results to return. Applies after all other
+  // constraints. Optional.
+  optional int32 limit = 11;
+}
+
+// A representation of a kind.
+message KindExpression {
+  // The name of the kind.
+  required string name = 1;
+}
+
+// A reference to a property relative to the kind expressions.
+// exactly.
+message PropertyReference {
+  // The name of the property.
+  required string name = 2;
+}
+
+// A representation of a property in a projection.
+message PropertyExpression {
+  enum AggregationFunction {
+    FIRST = 1;
+  }
+  // The property to project.
+  required PropertyReference property = 1;
+  // The aggregation function to apply to the property. Optional.
+  // Can only be used when grouping by at least one property. Must
+  // then be set on all properties in the projection that are not
+  // being grouped by.
+  optional AggregationFunction aggregation_function = 2;
+}
+
+// The desired order for a specific property.
+message PropertyOrder {
+  enum Direction {
+    ASCENDING = 1;
+    DESCENDING = 2;
+  }
+  // The property to order by.
+  required PropertyReference property = 1;
+  // The direction to order by.
+  optional Direction direction = 2 [default=ASCENDING];
+}
+
+// A holder for any type of filter. Exactly one field should be specified.
+message Filter {
+  // A composite filter.
+  optional CompositeFilter composite_filter = 1;
+  // A filter on a property.
+  optional PropertyFilter property_filter = 2;
+}
+
+// A filter that merges the multiple other filters using the given operation.
+message CompositeFilter {
+  enum Operator {
+    AND = 1;
+  }
+
+  // The operator for combining multiple filters.
+  required Operator operator = 1;
+  // The list of filters to combine.
+  // Must contain at least one filter.
+  repeated Filter filter = 2;
+}
+
+// A filter on a specific property.
+message PropertyFilter {
+  enum Operator {
+    LESS_THAN = 1;
+    LESS_THAN_OR_EQUAL = 2;
+    GREATER_THAN = 3;
+    GREATER_THAN_OR_EQUAL = 4;
+    EQUAL = 5;
+
+    HAS_ANCESTOR = 11;
+  }
+
+  // The property to filter by.
+  required PropertyReference property = 1;
+  // The operator to filter by.
+  required Operator operator = 2;
+  // The value to compare the property to.
+  required Value value = 3;
+}
+
+// A GQL query.
+message GqlQuery {
+  required string query_string = 1;
+  // When false, the query string must not contain a literal.
+  optional bool allow_literal = 2 [default = false];
+  // A named argument must set field GqlQueryArg.name.
+  // No two named arguments may have the same name.
+  // For each non-reserved named binding site in the query string,
+  // there must be a named argument with that name,
+  // but not necessarily the inverse.
+  repeated GqlQueryArg name_arg = 3;
+  // Numbered binding site @1 references the first numbered argument,
+  // effectively using 1-based indexing, rather than the usual 0.
+  // A numbered argument must NOT set field GqlQueryArg.name.
+  // For each binding site numbered i in query_string,
+  // there must be an ith numbered argument.
+  // The inverse must also be true.
+  repeated GqlQueryArg number_arg = 4;
+}
+
+// A binding argument for a GQL query.
+// Exactly one of fields value and cursor must be set.
+message GqlQueryArg {
+  // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*".
+  // Must not match regex "__.*__".
+  // Must not be "".
+  optional string name = 1;
+  optional Value value = 2;
+  optional bytes cursor = 3;
+}
+
+// A batch of results produced by a query.
+message QueryResultBatch {
+  // The possible values for the 'more_results' field.
+  enum MoreResultsType {
+    NOT_FINISHED = 1;  // There are additional batches to fetch from this query.
+    MORE_RESULTS_AFTER_LIMIT = 2;  // The query is finished, but there are more
+                                   // results after the limit.
+    NO_MORE_RESULTS = 3;  // The query has been exhausted.
+  }
+
+  // The result type for every entity in entityResults.
+  required EntityResult.ResultType entity_result_type = 1;
+  // The results for this batch.
+  repeated EntityResult entity_result = 2;
+
+  // A cursor that points to the position after the last result in the batch.
+  // May be absent.
+  optional bytes /* serialized QueryCursor */ end_cursor = 4;
+
+  // The state of the query after the current batch.
+  required MoreResultsType more_results = 5;
+
+  // The number of results skipped because of <code>Query.offset</code>.
+  optional int32 skipped_results = 6;
+}
+
+// A set of changes to apply.
+//
+// No entity in this message may have a reserved property name,
+// not even a property in an entity in a value.
+// No value in this message may have meaning 18,
+// not even a value in an entity in another value.
+//
+// If entities with duplicate keys are present, an arbitrary choice will
+// be made as to which is written.
+message Mutation {
+  // Entities to upsert.
+  // Each upserted entity's key must have a complete path and
+  // must not be reserved/read-only.
+  repeated Entity upsert = 1;
+  // Entities to update.
+  // Each updated entity's key must have a complete path and
+  // must not be reserved/read-only.
+  repeated Entity update = 2;
+  // Entities to insert.
+  // Each inserted entity's key must have a complete path and
+  // must not be reserved/read-only.
+  repeated Entity insert = 3;
+  // Insert entities with a newly allocated ID.
+  // Each inserted entity's key must omit the final identifier in its path and
+  // must not be reserved/read-only.
+  repeated Entity insert_auto_id = 4;
+  // Keys of entities to delete.
+  // Each key must have a complete key path and must not be reserved/read-only.
+  repeated Key delete = 5;
+  // Ignore a user specified read-only period. Optional.
+  optional bool force = 6;
+}
+
+// The result of applying a mutation.
+message MutationResult {
+  // Number of index writes.
+  required int32 index_updates = 1;
+  // Keys for <code>insertAutoId</code> entities. One per entity from the
+  // request, in the same order.
+  repeated Key insert_auto_id_key = 2;
+}
+
+// Options shared by read requests.
+message ReadOptions {
+  enum ReadConsistency {
+    DEFAULT = 0;
+    STRONG = 1;
+    EVENTUAL = 2;
+  }
+
+  // The read consistency to use.
+  // Cannot be set when transaction is set.
+  // Lookup and ancestor queries default to STRONG, global queries default to
+  // EVENTUAL and cannot be set to STRONG.
+  optional ReadConsistency read_consistency = 1 [default=DEFAULT];
+
+  // The transaction to use. Optional.
+  optional bytes /* serialized Transaction */ transaction = 2;
+}
+
+// The request for Lookup.
+message LookupRequest {
+
+  // Options for this lookup request. Optional.
+  optional ReadOptions read_options = 1;
+  // Keys of entities to look up from the datastore.
+  repeated Key key = 3;
+}
+
+// The response for Lookup.
+message LookupResponse {
+
+  // The order of results in these fields is undefined and has no relation to
+  // the order of the keys in the input.
+
+  // Entities found as ResultType.FULL entities.
+  repeated EntityResult found = 1;
+
+  // Entities not found as ResultType.KEY_ONLY entities.
+  repeated EntityResult missing = 2;
+
+  // A list of keys that were not looked up due to resource constraints.
+  repeated Key deferred = 3;
+}
+
+
+// The request for RunQuery.
+message RunQueryRequest {
+
+  // The options for this query.
+  optional ReadOptions read_options = 1;
+
+  // Entities are partitioned into subsets, identified by a dataset (usually
+  // implicitly specified by the project) and namespace ID. Queries are scoped
+  // to a single partition.
+  // This partition ID is normalized with the standard default context
+  // partition ID, but all other partition IDs in RunQueryRequest are
+  // normalized with this partition ID as the context partition ID.
+  optional PartitionId partition_id = 2;
+
+  // The query to run.
+  // Either this field or field gql_query must be set, but not both.
+  optional Query query = 3;
+  // The GQL query to run.
+  // Either this field or field query must be set, but not both.
+  optional GqlQuery gql_query = 7;
+}
+
+// The response for RunQuery.
+message RunQueryResponse {
+
+  // A batch of query results (always present).
+  optional QueryResultBatch batch = 1;
+
+}
+
+// The request for BeginTransaction.
+message BeginTransactionRequest {
+
+  enum IsolationLevel {
+    SNAPSHOT = 0;  // Read from a consistent snapshot. Concurrent transactions
+                   // conflict if their mutations conflict. For example:
+                   // Read(A),Write(B) may not conflict with Read(B),Write(A),
+                   // but Read(B),Write(B) does conflict with Read(B),Write(B).
+    SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent
+                      // transactions conflict if they cannot be serialized.
+                      // For example Read(A),Write(B) does conflict with
+                      // Read(B),Write(A) but Read(A) may not conflict with
+                      // Write(A).
+  }
+
+  // The transaction isolation level.
+  optional IsolationLevel isolation_level = 1 [default=SNAPSHOT];
+}
+
+// The response for BeginTransaction.
+message BeginTransactionResponse {
+
+  // The transaction identifier (always present).
+  optional bytes /* serialized Transaction */ transaction = 1;
+}
+
+// The request for Rollback.
+message RollbackRequest {
+
+  // The transaction identifier, returned by a call to
+  // <code>beginTransaction</code>.
+  required bytes /* serialized Transaction */ transaction = 1;
+}
+
+// The response for Rollback.
+message RollbackResponse {
+// Empty
+}
+
+// The request for Commit.
+message CommitRequest {
+
+  enum Mode {
+    TRANSACTIONAL = 1;
+    NON_TRANSACTIONAL = 2;
+  }
+
+  // The transaction identifier, returned by a call to
+  // <code>beginTransaction</code>. Must be set when mode is TRANSACTIONAL.
+  optional bytes /* serialized Transaction */ transaction = 1;
+  // The mutation to perform. Optional.
+  optional Mutation mutation = 2;
+  // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL.
+  optional Mode mode = 5 [default=TRANSACTIONAL];
+}
+
+// The response for Commit.
+message CommitResponse {
+
+  // The result of performing the mutation (if any).
+  optional MutationResult mutation_result = 1;
+}
+
+// The request for AllocateIds.
+message AllocateIdsRequest {
+
+  // A list of keys with incomplete key paths to allocate IDs for.
+  // No key may be reserved/read-only.
+  repeated Key key = 1;
+}
+
+// The response for AllocateIds.
+message AllocateIdsResponse {
+
+  // The keys specified in the request (in the same order), each with
+  // its key path completed with a newly allocated ID.
+  repeated Key key = 1;
+}
+
+// Each rpc normalizes the partition IDs of the keys in its input entities,
+// and always returns entities with keys with normalized partition IDs.
+// (Note that applies to all entities, including entities in values.)
+service DatastoreService {
+  // Look up some entities by key.
+  rpc Lookup(LookupRequest) returns (LookupResponse) {
+  };
+  // Query for entities.
+  rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) {
+  };
+  // Begin a new transaction.
+  rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) {
+  };
+  // Commit a transaction, optionally creating, deleting or modifying some
+  // entities.
+  rpc Commit(CommitRequest) returns (CommitResponse) {
+  };
+  // Roll back a transaction.
+  rpc Rollback(RollbackRequest) returns (RollbackResponse) {
+  };
+  // Allocate IDs for incomplete keys (useful for referencing an entity before
+  // it is inserted).
+  rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) {
+  };
+}

+ 57 - 0
Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go

@@ -0,0 +1,57 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package testutil contains helper functions for writing tests.
+package testutil
+
+import (
+	"io/ioutil"
+	"log"
+	"net/http"
+	"os"
+
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2"
+	"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/oauth2/google"
+	"google.golang.org/cloud"
+)
+
+const (
+	envProjID     = "GCLOUD_TESTS_GOLANG_PROJECT_ID"
+	envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY"
+)
+
+func Context(scopes ...string) context.Context {
+	key, projID := os.Getenv(envPrivateKey), os.Getenv(envProjID)
+	if key == "" || projID == "" {
+		log.Fatal("GCLOUD_TESTS_GOLANG_KEY and GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.")
+	}
+	jsonKey, err := ioutil.ReadFile(key)
+	if err != nil {
+		log.Fatalf("Cannot read the JSON key file, err: %v", err)
+	}
+	conf, err := google.JWTConfigFromJSON(jsonKey, scopes...)
+	if err != nil {
+		log.Fatal(err)
+	}
+	return cloud.NewContext(projID, conf.Client(oauth2.NoContext))
+}
+
+func NoAuthContext() context.Context {
+	projID := os.Getenv(envProjID)
+	if projID == "" {
+		log.Fatal("GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.")
+	}
+	return cloud.NewContext(projID, &http.Client{Transport: http.DefaultTransport})
+}

+ 10 - 0
Godeps/_workspace/src/google.golang.org/grpc/.travis.yml

@@ -0,0 +1,10 @@
+sudo: false
+
+language: go
+
+install:
+  - go get -v -t -d google.golang.org/grpc/...
+
+script:
+  - go test -v -cpu 1,4 google.golang.org/grpc/...
+  - go test -v -race -cpu 1,4 google.golang.org/grpc/...

+ 27 - 0
Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md

@@ -0,0 +1,27 @@
+# How to contribute
+
+We definitely welcome patches and contribution to grpc! Here is some guideline
+and information about how to do so.
+
+## Getting started
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+### Filing Issues
+When filing an issue, make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+### Contributing code
+Please read the Contribution Guidelines before sending patches.
+
+We will not accept GitHub pull requests once Gerrit is setup (we will use Gerrit instead for code review).
+
+Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.

+ 28 - 0
Godeps/_workspace/src/google.golang.org/grpc/LICENSE

@@ -0,0 +1,28 @@
+Copyright 2014, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 22 - 0
Godeps/_workspace/src/google.golang.org/grpc/PATENTS

@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the GRPC project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of GRPC, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of GRPC.  This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation.  If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of GRPC or any code incorporated within this
+implementation of GRPC constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of GRPC
+shall terminate as of the date such litigation is filed.

Some files were not shown because too many files changed in this diff