Browse Source

Merge pull request #6741 from sinsharat/clientv3_add_client_side_metrics

clientv3: added client side metrics support
Xiang Li 9 years ago
parent
commit
89107a49fa

+ 5 - 0
clientv3/client.go

@@ -24,6 +24,7 @@ import (
 	"time"
 
 	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
 
 	"golang.org/x/net/context"
 	"google.golang.org/grpc"
@@ -247,6 +248,10 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
 		opts = append(opts, grpc.WithPerRPCCredentials(authTokenCredential{token: resp.Token}))
 	}
 
+	// add metrics options
+	opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor))
+	opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor))
+
 	conn, err := grpc.Dial(host, opts...)
 	if err != nil {
 		return nil, err

+ 197 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore

@@ -0,0 +1,197 @@
+# Created by .ignore support plugin (hsz.mobi)
+### Go template
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+### Windows template
+# Windows image file caches
+Thumbs.db
+ehthumbs.db
+
+# Folder config file
+Desktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+### Kate template
+# Swap Files #
+.*.kate-swp
+.swp.*
+### SublimeText template
+# cache files for sublime text
+*.tmlanguage.cache
+*.tmPreferences.cache
+*.stTheme.cache
+
+# workspace files are user-specific
+*.sublime-workspace
+
+# project files should be checked into the repository, unless a significant
+# proportion of contributors will probably not be using SublimeText
+# *.sublime-project
+
+# sftp configuration file
+sftp-config.json
+### Linux template
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+### JetBrains template
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff:
+.idea
+.idea/tasks.xml
+.idea/dictionaries
+.idea/vcs.xml
+.idea/jsLibraryMappings.xml
+
+# Sensitive or high-churn files:
+.idea/dataSources.ids
+.idea/dataSources.xml
+.idea/dataSources.local.xml
+.idea/sqlDataSources.xml
+.idea/dynamic.xml
+.idea/uiDesigner.xml
+
+# Gradle:
+.idea/gradle.xml
+.idea/libraries
+
+# Mongo Explorer plugin:
+.idea/mongoSettings.xml
+
+## File-based project format:
+*.iws
+
+## Plugin-specific files:
+
+# IntelliJ
+/out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+### Xcode template
+# Xcode
+#
+# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
+
+## Build generated
+build/
+DerivedData/
+
+## Various settings
+*.pbxuser
+!default.pbxuser
+*.mode1v3
+!default.mode1v3
+*.mode2v3
+!default.mode2v3
+*.perspectivev3
+!default.perspectivev3
+xcuserdata/
+
+## Other
+*.moved-aside
+*.xccheckout
+*.xcscmblueprint
+### Eclipse template
+
+.metadata
+bin/
+tmp/
+*.tmp
+*.bak
+*.swp
+*~.nib
+local.properties
+.settings/
+.loadpath
+.recommenders
+
+# Eclipse Core
+.project
+
+# External tool builders
+.externalToolBuilders/
+
+# Locally stored "Eclipse launch configurations"
+*.launch
+
+# PyDev specific (Python IDE for Eclipse)
+*.pydevproject
+
+# CDT-specific (C/C++ Development Tooling)
+.cproject
+
+# JDT-specific (Eclipse Java Development Tools)
+.classpath
+
+# Java annotation processor (APT)
+.factorypath
+
+# PDT-specific (PHP Development Tools)
+.buildpath
+
+# sbteclipse plugin
+.target
+
+# Tern plugin
+.tern-project
+
+# TeXlipse plugin
+.texlipse
+
+# STS (Spring Tool Suite)
+.springBeans
+
+# Code Recommenders
+.recommenders/
+

+ 13 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml

@@ -0,0 +1,13 @@
+language: go
+go:
+  - 1.6
+  - 1.7
+
+install:
+  - go get github.com/prometheus/client_golang/prometheus
+  - go get google.golang.org/grpc
+  - go get golang.org/x/net/context
+  - go get github.com/stretchr/testify
+
+script:
+ - go test -race -v ./...

+ 201 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE

@@ -0,0 +1,201 @@
+                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 245 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md

@@ -0,0 +1,245 @@
+# Go gRPC Interceptors for Prometheus monitoring 
+
+[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus)
+[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus)
+[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus)
+[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
+
+[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients.
+
+A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus).
+
+## Interceptors
+
+[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed
+by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement
+common patterns: auth, logging and... monitoring.
+
+To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware).
+
+## Usage
+
+There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both.
+
+### Server-side
+
+```go
+import "github.com/grpc-ecosystem/go-grpc-prometheus"
+...
+    // Initialize your gRPC server's interceptor.
+    myServer := grpc.NewServer(
+        grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
+        grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
+    )
+    // Register your gRPC service implementations.
+    myservice.RegisterMyServiceServer(s.server, &myServiceImpl{})
+    // After all your registrations, make sure all of the Prometheus metrics are initialized.
+    grpc_prometheus.Register(myServer)
+    // Register Prometheus metrics handler.    
+    http.Handle("/metrics", prometheus.Handler())
+...
+```
+
+### Client-side
+
+```go
+import "github.com/grpc-ecosystem/go-grpc-prometheus"
+...
+   clientConn, err = grpc.Dial(
+       address,
+		   grpc.WithUnaryInterceptor(UnaryClientInterceptor),
+		   grpc.WithStreamInterceptor(StreamClientInterceptor)
+   )
+   client = pb_testproto.NewTestServiceClient(clientConn)
+   resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
+...
+```
+
+# Metrics
+
+## Labels
+
+All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods
+contain the same rich labels:
+  
+  * `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and
+    the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and 
+     `service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"`
+  * `grpc_method` - the name of the method called on the gRPC service. E.g.  
+    `grpc_method="Ping"`
+  * `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle). 
+    Differentiating between the two is important especially for latency measurements.
+
+     - `unary` is single request, single response RPC
+     - `client_stream` is a multi-request, single response RPC
+     - `server_stream` is a single request, multi-response RPC
+     - `bidi_stream` is a multi-request, multi-response RPC
+    
+
+Additionally for completed RPCs, the following labels are used:
+
+  * `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go).
+    The list of all statuses is to long, but here are some common ones:
+      
+      - `OK` - means the RPC was successful
+      - `IllegalArgument` - RPC contained bad values
+      - `Internal` - server-side error not disclosed to the clients
+      
+## Counters
+
+The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go) 
+the respective Prometheus handler (usually `/metrics`). 
+
+For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts.
+
+For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto),
+calling the method `PingList`. The call succeeds and returns 20 messages in the stream.
+
+First, immediately after the server receives the call it will increment the
+`grpc_server_started_total` and start the handling time clock (if histograms are enabled). 
+
+```jsoniq
+grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+Then the user logic gets invoked. It receives one message from the client containing the request 
+(it's a `server_stream`):
+
+```jsoniq
+grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+The user logic may return an error, or send multiple messages back to the client. In this case, on 
+each of the 20 messages sent back, a counter will be incremented:
+
+```jsoniq
+grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20
+```
+
+After the call completes, it's status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go)) 
+and the relevant call labels increment the `grpc_server_handled_total` counter.
+
+```jsoniq
+grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+## Histograms
+
+[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way
+to measure latency distributions of your RPCs. However since it is bad practice to have metrics
+of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels))
+the latency monitoring metrics are disabled by default. To enable them please call the following
+in your server initialization code:
+
+```jsoniq
+grpc_prometheus.EnableHandlingTimeHistogram()
+```
+
+After the call completes, it's handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
+variable `grpc_server_handling_seconds`. It contains three sub-metrics:
+
+ * `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method 
+ * `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for 
+   calculating average handling times
+ * `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective
+   handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/))
+
+The counter values will look as follows:
+
+```jsoniq
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1
+grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001
+grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+
+## Useful query examples
+
+Prometheus philosophy is to provide the most detailed metrics possible to the monitoring system, and
+let the aggregations be handled there. The verbosity of above metrics make it possible to have that
+flexibility. Here's a couple of useful monitoring queries:
+
+
+### request inbound rate
+```jsoniq
+sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service)
+```
+For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the
+rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note
+how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together.
+
+### unary request error rate
+```jsoniq
+sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
+```
+For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the 
+ones that didn't finish with `OK` code.
+
+### unary request error percentage
+```jsoniq
+sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
+ / 
+sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service)
+ * 100.0
+```
+For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that
+this is a combination of the two above examples. This is an example of a query you would like to
+[alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g.
+"no more than 1% requests should fail".
+
+### average response stream size
+```jsoniq
+sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
+ /
+sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
+```
+For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all `
+server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows 
+you to track when clients started to send "wide" queries that ret
+Note the divisor is the number of started RPCs, in order to account for in-flight requests.
+
+### 99%-tile latency of unary requests
+```jsoniq
+histogram_quantile(0.99, 
+  sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le)
+)
+```
+For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles)
+of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile
+estimation will take samples in a rolling `5m` window. When combined with other quantiles
+(e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system 
+(e.g. impact of caching).
+
+### percentage of slow unary queries (>250ms)
+```jsoniq
+100.0 - (
+sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service)
+ / 
+sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service)
+) * 100.0
+```
+For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25` 
+seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal)
+buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps.
+This is an example of a query you would like to alert on in your system for SLA violations, 
+e.g. "less than 1% of requests are slower than 250ms".
+
+
+## Status
+
+This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services  at [Improbable](https://improbable.io).
+
+## License
+
+`go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.

+ 72 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go

@@ -0,0 +1,72 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+// gRPC Prometheus monitoring interceptors for client-side gRPC.
+
+package grpc_prometheus
+
+import (
+	"io"
+
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+)
+
+// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
+func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+	monitor := newClientReporter(Unary, method)
+	monitor.SentMessage()
+	err := invoker(ctx, method, req, reply, cc, opts...)
+	if err != nil {
+		monitor.ReceivedMessage()
+	}
+	monitor.Handled(grpc.Code(err))
+	return err
+}
+
+// StreamServerInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+	monitor := newClientReporter(clientStreamType(desc), method)
+	clientStream, err := streamer(ctx, desc, cc, method, opts...)
+	if err != nil {
+		monitor.Handled(grpc.Code(err))
+		return nil, err
+	}
+	return &monitoredClientStream{clientStream, monitor}, nil
+}
+
+func clientStreamType(desc *grpc.StreamDesc) grpcType {
+	if desc.ClientStreams && !desc.ServerStreams {
+		return ClientStream
+	} else if !desc.ClientStreams && desc.ServerStreams {
+		return ServerStream
+	}
+	return BidiStream
+}
+
+// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
+type monitoredClientStream struct {
+	grpc.ClientStream
+	monitor *clientReporter
+}
+
+func (s *monitoredClientStream) SendMsg(m interface{}) error {
+	err := s.ClientStream.SendMsg(m)
+	if err == nil {
+		s.monitor.SentMessage()
+	}
+	return err
+}
+
+func (s *monitoredClientStream) RecvMsg(m interface{}) error {
+	err := s.ClientStream.RecvMsg(m)
+	if err == nil {
+		s.monitor.ReceivedMessage()
+	} else if err == io.EOF {
+		s.monitor.Handled(codes.OK)
+	} else {
+		s.monitor.Handled(grpc.Code(err))
+	}
+	return err
+}

+ 111 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go

@@ -0,0 +1,111 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"time"
+
+	"google.golang.org/grpc/codes"
+
+	prom "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+	clientStartedCounter = prom.NewCounterVec(
+		prom.CounterOpts{
+			Namespace: "grpc",
+			Subsystem: "client",
+			Name:      "started_total",
+			Help:      "Total number of RPCs started on the client.",
+		}, []string{"grpc_type", "grpc_service", "grpc_method"})
+
+	clientHandledCounter = prom.NewCounterVec(
+		prom.CounterOpts{
+			Namespace: "grpc",
+			Subsystem: "client",
+			Name:      "handled_total",
+			Help:      "Total number of RPCs completed by the client, regardless of success or failure.",
+		}, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"})
+
+	clientStreamMsgReceived = prom.NewCounterVec(
+		prom.CounterOpts{
+			Namespace: "grpc",
+			Subsystem: "client",
+			Name:      "msg_received_total",
+			Help:      "Total number of RPC stream messages received by the client.",
+		}, []string{"grpc_type", "grpc_service", "grpc_method"})
+
+	clientStreamMsgSent = prom.NewCounterVec(
+		prom.CounterOpts{
+			Namespace: "grpc",
+			Subsystem: "client",
+			Name:      "msg_sent_total",
+			Help:      "Total number of gRPC stream messages sent by the client.",
+		}, []string{"grpc_type", "grpc_service", "grpc_method"})
+
+	clientHandledHistogramEnabled = false
+	clientHandledHistogramOpts    = prom.HistogramOpts{
+		Namespace: "grpc",
+		Subsystem: "client",
+		Name:      "handling_seconds",
+		Help:      "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
+		Buckets:   prom.DefBuckets,
+	}
+	clientHandledHistogram *prom.HistogramVec
+)
+
+func init() {
+	prom.MustRegister(clientStartedCounter)
+	prom.MustRegister(clientHandledCounter)
+	prom.MustRegister(clientStreamMsgReceived)
+	prom.MustRegister(clientStreamMsgSent)
+}
+
+// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
+// Histogram metrics can be very expensive for Prometheus to retain and query.
+func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
+	for _, o := range opts {
+		o(&clientHandledHistogramOpts)
+	}
+	if !clientHandledHistogramEnabled {
+		clientHandledHistogram = prom.NewHistogramVec(
+			clientHandledHistogramOpts,
+			[]string{"grpc_type", "grpc_service", "grpc_method"},
+		)
+		prom.Register(clientHandledHistogram)
+	}
+	clientHandledHistogramEnabled = true
+}
+
+type clientReporter struct {
+	rpcType     grpcType
+	serviceName string
+	methodName  string
+	startTime   time.Time
+}
+
+func newClientReporter(rpcType grpcType, fullMethod string) *clientReporter {
+	r := &clientReporter{rpcType: rpcType}
+	if clientHandledHistogramEnabled {
+		r.startTime = time.Now()
+	}
+	r.serviceName, r.methodName = splitMethodName(fullMethod)
+	clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+	return r
+}
+
+func (r *clientReporter) ReceivedMessage() {
+	clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *clientReporter) SentMessage() {
+	clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *clientReporter) Handled(code codes.Code) {
+	clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
+	if clientHandledHistogramEnabled {
+		clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
+	}
+}

+ 212 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_test.go

@@ -0,0 +1,212 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"net"
+	"testing"
+
+	"time"
+
+	"io"
+
+	pb_testproto "github.com/grpc-ecosystem/go-grpc-prometheus/examples/testproto"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"github.com/stretchr/testify/suite"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+)
+
+func TestClientInterceptorSuite(t *testing.T) {
+	suite.Run(t, &ClientInterceptorTestSuite{})
+}
+
+type ClientInterceptorTestSuite struct {
+	suite.Suite
+
+	serverListener net.Listener
+	server         *grpc.Server
+	clientConn     *grpc.ClientConn
+	testClient     pb_testproto.TestServiceClient
+	ctx            context.Context
+}
+
+func (s *ClientInterceptorTestSuite) SetupSuite() {
+	var err error
+
+	EnableClientHandlingTimeHistogram()
+
+	s.serverListener, err = net.Listen("tcp", "127.0.0.1:0")
+	require.NoError(s.T(), err, "must be able to allocate a port for serverListener")
+
+	// This is the point where we hook up the interceptor
+	s.server = grpc.NewServer()
+	pb_testproto.RegisterTestServiceServer(s.server, &testService{t: s.T()})
+
+	go func() {
+		s.server.Serve(s.serverListener)
+	}()
+
+	s.clientConn, err = grpc.Dial(
+		s.serverListener.Addr().String(),
+		grpc.WithInsecure(),
+		grpc.WithBlock(),
+		grpc.WithUnaryInterceptor(UnaryClientInterceptor),
+		grpc.WithStreamInterceptor(StreamClientInterceptor),
+		grpc.WithTimeout(2*time.Second))
+	require.NoError(s.T(), err, "must not error on client Dial")
+	s.testClient = pb_testproto.NewTestServiceClient(s.clientConn)
+}
+
+func (s *ClientInterceptorTestSuite) SetupTest() {
+	// Make all RPC calls last at most 2 sec, meaning all async issues or deadlock will not kill tests.
+	s.ctx, _ = context.WithTimeout(context.TODO(), 2*time.Second)
+}
+
+func (s *ClientInterceptorTestSuite) TearDownSuite() {
+	if s.serverListener != nil {
+		s.server.Stop()
+		s.T().Logf("stopped grpc.Server at: %v", s.serverListener.Addr().String())
+		s.serverListener.Close()
+
+	}
+	if s.clientConn != nil {
+		s.clientConn.Close()
+	}
+}
+
+func (s *ClientInterceptorTestSuite) TestUnaryIncrementsStarted() {
+	var before int
+	var after int
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_client_started_total", "PingEmpty", "unary")
+	s.testClient.PingEmpty(s.ctx, &pb_testproto.Empty{})
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_client_started_total", "PingEmpty", "unary")
+	assert.EqualValues(s.T(), before+1, after, "grpc_client_started_total should be incremented for PingEmpty")
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_client_started_total", "PingError", "unary")
+	s.testClient.PingError(s.ctx, &pb_testproto.PingRequest{ErrorCodeReturned: uint32(codes.Unavailable)})
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_client_started_total", "PingError", "unary")
+	assert.EqualValues(s.T(), before+1, after, "grpc_client_started_total should be incremented for PingError")
+}
+
+func (s *ClientInterceptorTestSuite) TestUnaryIncrementsHandled() {
+	var before int
+	var after int
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_client_handled_total", "PingEmpty", "unary", "OK")
+	s.testClient.PingEmpty(s.ctx, &pb_testproto.Empty{}) // should return with code=OK
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_client_handled_total", "PingEmpty", "unary", "OK")
+	assert.EqualValues(s.T(), before+1, after, "grpc_client_handled_count should be incremented for PingEmpty")
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_client_handled_total", "PingError", "unary", "FailedPrecondition")
+	s.testClient.PingError(s.ctx, &pb_testproto.PingRequest{ErrorCodeReturned: uint32(codes.FailedPrecondition)}) // should return with code=FailedPrecondition
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_client_handled_total", "PingError", "unary", "FailedPrecondition")
+	assert.EqualValues(s.T(), before+1, after, "grpc_client_handled_total should be incremented for PingError")
+}
+
+func (s *ClientInterceptorTestSuite) TestUnaryIncrementsHistograms() {
+	var before int
+	var after int
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_client_handling_seconds_count", "PingEmpty", "unary")
+	s.testClient.PingEmpty(s.ctx, &pb_testproto.Empty{}) // should return with code=OK
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_client_handling_seconds_count", "PingEmpty", "unary")
+	assert.EqualValues(s.T(), before+1, after, "grpc_client_handled_count should be incremented for PingEmpty")
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_client_handling_seconds_count", "PingError", "unary")
+	s.testClient.PingError(s.ctx, &pb_testproto.PingRequest{ErrorCodeReturned: uint32(codes.FailedPrecondition)}) // should return with code=FailedPrecondition
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_client_handling_seconds_count", "PingError", "unary")
+	assert.EqualValues(s.T(), before+1, after, "grpc_client_handling_seconds_count should be incremented for PingError")
+}
+
+func (s *ClientInterceptorTestSuite) TestStreamingIncrementsStarted() {
+	var before int
+	var after int
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_client_started_total", "PingList", "server_stream")
+	s.testClient.PingList(s.ctx, &pb_testproto.PingRequest{})
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_client_started_total", "PingList", "server_stream")
+	assert.EqualValues(s.T(), before+1, after, "grpc_client_started_total should be incremented for PingList")
+}
+
+func (s *ClientInterceptorTestSuite) TestStreamingIncrementsHistograms() {
+	var before int
+	var after int
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_client_handling_seconds_count", "PingList", "server_stream")
+	ss, _ := s.testClient.PingList(s.ctx, &pb_testproto.PingRequest{}) // should return with code=OK
+	// Do a read, just for kicks.
+	for {
+		_, err := ss.Recv()
+		if err == io.EOF {
+			break
+		}
+		require.NoError(s.T(), err, "reading pingList shouldn't fail")
+	}
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_client_handling_seconds_count", "PingList", "server_stream")
+	assert.EqualValues(s.T(), before+1, after, "grpc_client_handling_seconds_count should be incremented for PingList OK")
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_client_handling_seconds_count", "PingList", "server_stream")
+	ss, err := s.testClient.PingList(s.ctx, &pb_testproto.PingRequest{ErrorCodeReturned: uint32(codes.FailedPrecondition)}) // should return with code=FailedPrecondition
+	require.NoError(s.T(), err, "PingList must not fail immedietely")
+	// Do a read, just to progate errors.
+	_, err = ss.Recv()
+	require.Equal(s.T(), codes.FailedPrecondition, grpc.Code(err), "Recv must return FailedPrecondition, otherwise the test is wrong")
+
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_client_handling_seconds_count", "PingList", "server_stream")
+	assert.EqualValues(s.T(), before+1, after, "grpc_client_handling_seconds_count should be incremented for PingList FailedPrecondition")
+}
+
+func (s *ClientInterceptorTestSuite) TestStreamingIncrementsHandled() {
+	var before int
+	var after int
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_client_handled_total", "PingList", "server_stream", "OK")
+	ss, _ := s.testClient.PingList(s.ctx, &pb_testproto.PingRequest{}) // should return with code=OK
+	// Do a read, just for kicks.
+	for {
+		_, err := ss.Recv()
+		if err == io.EOF {
+			break
+		}
+		require.NoError(s.T(), err, "reading pingList shouldn't fail")
+	}
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_client_handled_total", "PingList", "server_stream", "OK")
+	assert.EqualValues(s.T(), before+1, after, "grpc_client_handled_total should be incremented for PingList OK")
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_client_handled_total", "PingList", "server_stream", "FailedPrecondition")
+	ss, err := s.testClient.PingList(s.ctx, &pb_testproto.PingRequest{ErrorCodeReturned: uint32(codes.FailedPrecondition)}) // should return with code=FailedPrecondition
+	require.NoError(s.T(), err, "PingList must not fail immedietely")
+	// Do a read, just to progate errors.
+	_, err = ss.Recv()
+	require.Equal(s.T(), codes.FailedPrecondition, grpc.Code(err), "Recv must return FailedPrecondition, otherwise the test is wrong")
+
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_client_handled_total", "PingList", "server_stream", "FailedPrecondition")
+	assert.EqualValues(s.T(), before+1, after, "grpc_client_handled_total should be incremented for PingList FailedPrecondition")
+}
+
+func (s *ClientInterceptorTestSuite) TestStreamingIncrementsMessageCounts() {
+	beforeRecv := sumCountersForMetricAndLabels(s.T(), "grpc_client_msg_received_total", "PingList", "server_stream")
+	beforeSent := sumCountersForMetricAndLabels(s.T(), "grpc_client_msg_sent_total", "PingList", "server_stream")
+	ss, _ := s.testClient.PingList(s.ctx, &pb_testproto.PingRequest{}) // should return with code=OK
+	// Do a read, just for kicks.
+	count := 0
+	for {
+		_, err := ss.Recv()
+		if err == io.EOF {
+			break
+		}
+		require.NoError(s.T(), err, "reading pingList shouldn't fail")
+		count += 1
+	}
+	require.EqualValues(s.T(), countListResponses, count, "Number of received msg on the wire must match")
+	afterSent := sumCountersForMetricAndLabels(s.T(), "grpc_client_msg_sent_total", "PingList", "server_stream")
+	afterRecv := sumCountersForMetricAndLabels(s.T(), "grpc_client_msg_received_total", "PingList", "server_stream")
+
+	assert.EqualValues(s.T(), beforeSent+1, afterSent, "grpc_client_msg_sent_total should be incremented 20 times for PingList")
+	assert.EqualValues(s.T(), beforeRecv+countListResponses, afterRecv, "grpc_client_msg_sent_total should be incremented ones for PingList ")
+}

+ 10 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/examples/testproto/Makefile

@@ -0,0 +1,10 @@
+all: test_go
+
+test_go: test.proto
+	PATH="${GOPATH}/bin:${PATH}" protoc \
+	  -I. \
+		-I${GOPATH}/src \
+		--go_out=plugins=grpc:. \
+		test.proto
+
+

+ 294 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/examples/testproto/test.pb.go

@@ -0,0 +1,294 @@
+// Code generated by protoc-gen-go.
+// source: test.proto
+// DO NOT EDIT!
+
+/*
+Package mwitkow_testproto is a generated protocol buffer package.
+
+It is generated from these files:
+	test.proto
+
+It has these top-level messages:
+	Empty
+	PingRequest
+	PingResponse
+*/
+package mwitkow_testproto
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Empty struct {
+}
+
+func (m *Empty) Reset()                    { *m = Empty{} }
+func (m *Empty) String() string            { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage()               {}
+func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+type PingRequest struct {
+	Value             string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
+	SleepTimeMs       int32  `protobuf:"varint,2,opt,name=sleep_time_ms,json=sleepTimeMs" json:"sleep_time_ms,omitempty"`
+	ErrorCodeReturned uint32 `protobuf:"varint,3,opt,name=error_code_returned,json=errorCodeReturned" json:"error_code_returned,omitempty"`
+}
+
+func (m *PingRequest) Reset()                    { *m = PingRequest{} }
+func (m *PingRequest) String() string            { return proto.CompactTextString(m) }
+func (*PingRequest) ProtoMessage()               {}
+func (*PingRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+type PingResponse struct {
+	Value   string `protobuf:"bytes,1,opt,name=Value,json=value" json:"Value,omitempty"`
+	Counter int32  `protobuf:"varint,2,opt,name=counter" json:"counter,omitempty"`
+}
+
+func (m *PingResponse) Reset()                    { *m = PingResponse{} }
+func (m *PingResponse) String() string            { return proto.CompactTextString(m) }
+func (*PingResponse) ProtoMessage()               {}
+func (*PingResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func init() {
+	proto.RegisterType((*Empty)(nil), "mwitkow.testproto.Empty")
+	proto.RegisterType((*PingRequest)(nil), "mwitkow.testproto.PingRequest")
+	proto.RegisterType((*PingResponse)(nil), "mwitkow.testproto.PingResponse")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion3
+
+// Client API for TestService service
+
+type TestServiceClient interface {
+	PingEmpty(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*PingResponse, error)
+	Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error)
+	PingError(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*Empty, error)
+	PingList(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (TestService_PingListClient, error)
+}
+
+type testServiceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient {
+	return &testServiceClient{cc}
+}
+
+func (c *testServiceClient) PingEmpty(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*PingResponse, error) {
+	out := new(PingResponse)
+	err := grpc.Invoke(ctx, "/mwitkow.testproto.TestService/PingEmpty", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *testServiceClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) {
+	out := new(PingResponse)
+	err := grpc.Invoke(ctx, "/mwitkow.testproto.TestService/Ping", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *testServiceClient) PingError(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := grpc.Invoke(ctx, "/mwitkow.testproto.TestService/PingError", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *testServiceClient) PingList(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (TestService_PingListClient, error) {
+	stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/mwitkow.testproto.TestService/PingList", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &testServicePingListClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type TestService_PingListClient interface {
+	Recv() (*PingResponse, error)
+	grpc.ClientStream
+}
+
+type testServicePingListClient struct {
+	grpc.ClientStream
+}
+
+func (x *testServicePingListClient) Recv() (*PingResponse, error) {
+	m := new(PingResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// Server API for TestService service
+
+type TestServiceServer interface {
+	PingEmpty(context.Context, *Empty) (*PingResponse, error)
+	Ping(context.Context, *PingRequest) (*PingResponse, error)
+	PingError(context.Context, *PingRequest) (*Empty, error)
+	PingList(*PingRequest, TestService_PingListServer) error
+}
+
+func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) {
+	s.RegisterService(&_TestService_serviceDesc, srv)
+}
+
+func _TestService_PingEmpty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TestServiceServer).PingEmpty(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/mwitkow.testproto.TestService/PingEmpty",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TestServiceServer).PingEmpty(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _TestService_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PingRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TestServiceServer).Ping(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/mwitkow.testproto.TestService/Ping",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TestServiceServer).Ping(ctx, req.(*PingRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _TestService_PingError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PingRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TestServiceServer).PingError(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/mwitkow.testproto.TestService/PingError",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TestServiceServer).PingError(ctx, req.(*PingRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _TestService_PingList_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(PingRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(TestServiceServer).PingList(m, &testServicePingListServer{stream})
+}
+
+type TestService_PingListServer interface {
+	Send(*PingResponse) error
+	grpc.ServerStream
+}
+
+type testServicePingListServer struct {
+	grpc.ServerStream
+}
+
+func (x *testServicePingListServer) Send(m *PingResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+var _TestService_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "mwitkow.testproto.TestService",
+	HandlerType: (*TestServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "PingEmpty",
+			Handler:    _TestService_PingEmpty_Handler,
+		},
+		{
+			MethodName: "Ping",
+			Handler:    _TestService_Ping_Handler,
+		},
+		{
+			MethodName: "PingError",
+			Handler:    _TestService_PingError_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "PingList",
+			Handler:       _TestService_PingList_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: fileDescriptor0,
+}
+
+func init() { proto.RegisterFile("test.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+	// 273 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x90, 0xcf, 0x4b, 0xc3, 0x30,
+	0x14, 0xc7, 0xd7, 0x69, 0x9d, 0x7b, 0x75, 0x87, 0x45, 0x0f, 0xc5, 0x83, 0x96, 0x9c, 0x7a, 0x0a,
+	0xa2, 0x77, 0x2f, 0x22, 0x2a, 0x28, 0x4a, 0x1c, 0x5e, 0x8b, 0xb6, 0x0f, 0x09, 0x2e, 0x4d, 0x4d,
+	0x5e, 0x57, 0xfc, 0xdf, 0xfc, 0xe3, 0x24, 0x59, 0x05, 0x61, 0x0e, 0x3d, 0xec, 0x98, 0xcf, 0xf7,
+	0xf1, 0xfd, 0x11, 0x00, 0x42, 0x47, 0xa2, 0xb1, 0x86, 0x0c, 0x9b, 0xea, 0x4e, 0xd1, 0x9b, 0xe9,
+	0x84, 0x67, 0x01, 0xf1, 0x11, 0xc4, 0x97, 0xba, 0xa1, 0x0f, 0xde, 0x41, 0xf2, 0xa0, 0xea, 0x57,
+	0x89, 0xef, 0x2d, 0x3a, 0x62, 0x07, 0x10, 0x2f, 0x9e, 0xe7, 0x2d, 0xa6, 0x51, 0x16, 0xe5, 0x63,
+	0xb9, 0x7c, 0x30, 0x0e, 0x13, 0x37, 0x47, 0x6c, 0x0a, 0x52, 0x1a, 0x0b, 0xed, 0xd2, 0x61, 0x16,
+	0xe5, 0xb1, 0x4c, 0x02, 0x9c, 0x29, 0x8d, 0x77, 0x8e, 0x09, 0xd8, 0x47, 0x6b, 0x8d, 0x2d, 0x4a,
+	0x53, 0x61, 0x61, 0x91, 0x5a, 0x5b, 0x63, 0x95, 0x6e, 0x65, 0x51, 0x3e, 0x91, 0xd3, 0x20, 0x5d,
+	0x98, 0x0a, 0x65, 0x2f, 0xf0, 0x73, 0xd8, 0x5b, 0x06, 0xbb, 0xc6, 0xd4, 0x0e, 0x7d, 0xf2, 0xd3,
+	0x6a, 0x72, 0x0a, 0xa3, 0xd2, 0xb4, 0x35, 0xa1, 0xed, 0x33, 0xbf, 0x9f, 0xa7, 0x9f, 0x43, 0x48,
+	0x66, 0xe8, 0xe8, 0x11, 0xed, 0x42, 0x95, 0xc8, 0xae, 0x61, 0xec, 0xfd, 0xc2, 0x2a, 0x96, 0x8a,
+	0x95, 0xc9, 0x22, 0x28, 0x87, 0xc7, 0xbf, 0x28, 0x3f, 0x7b, 0xf0, 0x01, 0xbb, 0x81, 0x6d, 0x4f,
+	0xd8, 0xd1, 0xda, 0xd3, 0xf0, 0x57, 0xff, 0xb1, 0xba, 0xea, 0x4b, 0xf9, 0xf5, 0x7f, 0xfa, 0xad,
+	0x2d, 0xcd, 0x07, 0xec, 0x1e, 0x76, 0xfd, 0xe9, 0xad, 0x72, 0xb4, 0x81, 0x5e, 0x27, 0xd1, 0xcb,
+	0x4e, 0xe0, 0x67, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x38, 0x3e, 0x02, 0xe9, 0x28, 0x02, 0x00,
+	0x00,
+}

+ 28 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/examples/testproto/test.proto

@@ -0,0 +1,28 @@
+syntax = "proto3";
+
+package mwitkow.testproto;
+
+
+message Empty {
+}
+
+message PingRequest {
+  string value = 1;
+  int32 sleep_time_ms = 2;
+  uint32 error_code_returned = 3;
+}
+
+message PingResponse {
+  string Value = 1;
+  int32 counter = 2;
+}
+
+service TestService {
+  rpc PingEmpty(Empty) returns (PingResponse) {}
+
+  rpc Ping(PingRequest) returns (PingResponse) {}
+
+  rpc PingError(PingRequest) returns (Empty) {}
+
+  rpc PingList(PingRequest) returns (stream PingResponse) {}
+}

+ 74 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go

@@ -0,0 +1,74 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+// gRPC Prometheus monitoring interceptors for server-side gRPC.
+
+package grpc_prometheus
+
+import (
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+)
+
+// PreregisterServices takes a gRPC server and pre-initializes all counters to 0.
+// This allows for easier monitoring in Prometheus (no missing metrics), and should be called *after* all services have
+// been registered with the server.
+func Register(server *grpc.Server) {
+	serviceInfo := server.GetServiceInfo()
+	for serviceName, info := range serviceInfo {
+		for _, mInfo := range info.Methods {
+			preRegisterMethod(serviceName, &mInfo)
+		}
+	}
+}
+
+// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
+func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+	monitor := newServerReporter(Unary, info.FullMethod)
+	monitor.ReceivedMessage()
+	resp, err := handler(ctx, req)
+	monitor.Handled(grpc.Code(err))
+	if err == nil {
+		monitor.SentMessage()
+	}
+	return resp, err
+}
+
+// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+	monitor := newServerReporter(streamRpcType(info), info.FullMethod)
+	err := handler(srv, &monitoredServerStream{ss, monitor})
+	monitor.Handled(grpc.Code(err))
+	return err
+}
+
+func streamRpcType(info *grpc.StreamServerInfo) grpcType {
+	if info.IsClientStream && !info.IsServerStream {
+		return ClientStream
+	} else if !info.IsClientStream && info.IsServerStream {
+		return ServerStream
+	}
+	return BidiStream
+}
+
+// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters.
+type monitoredServerStream struct {
+	grpc.ServerStream
+	monitor *serverReporter
+}
+
+func (s *monitoredServerStream) SendMsg(m interface{}) error {
+	err := s.ServerStream.SendMsg(m)
+	if err == nil {
+		s.monitor.SentMessage()
+	}
+	return err
+}
+
+func (s *monitoredServerStream) RecvMsg(m interface{}) error {
+	err := s.ServerStream.RecvMsg(m)
+	if err == nil {
+		s.monitor.ReceivedMessage()
+	}
+	return err
+}

+ 157 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go

@@ -0,0 +1,157 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"time"
+
+	"google.golang.org/grpc/codes"
+
+	prom "github.com/prometheus/client_golang/prometheus"
+	"google.golang.org/grpc"
+)
+
+type grpcType string
+
+const (
+	Unary        grpcType = "unary"
+	ClientStream grpcType = "client_stream"
+	ServerStream grpcType = "server_stream"
+	BidiStream   grpcType = "bidi_stream"
+)
+
+var (
+	serverStartedCounter = prom.NewCounterVec(
+		prom.CounterOpts{
+			Namespace: "grpc",
+			Subsystem: "server",
+			Name:      "started_total",
+			Help:      "Total number of RPCs started on the server.",
+		}, []string{"grpc_type", "grpc_service", "grpc_method"})
+
+	serverHandledCounter = prom.NewCounterVec(
+		prom.CounterOpts{
+			Namespace: "grpc",
+			Subsystem: "server",
+			Name:      "handled_total",
+			Help:      "Total number of RPCs completed on the server, regardless of success or failure.",
+		}, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"})
+
+	serverStreamMsgReceived = prom.NewCounterVec(
+		prom.CounterOpts{
+			Namespace: "grpc",
+			Subsystem: "server",
+			Name:      "msg_received_total",
+			Help:      "Total number of RPC stream messages received on the server.",
+		}, []string{"grpc_type", "grpc_service", "grpc_method"})
+
+	serverStreamMsgSent = prom.NewCounterVec(
+		prom.CounterOpts{
+			Namespace: "grpc",
+			Subsystem: "server",
+			Name:      "msg_sent_total",
+			Help:      "Total number of gRPC stream messages sent by the server.",
+		}, []string{"grpc_type", "grpc_service", "grpc_method"})
+
+	serverHandledHistogramEnabled = false
+	serverHandledHistogramOpts    = prom.HistogramOpts{
+		Namespace: "grpc",
+		Subsystem: "server",
+		Name:      "handling_seconds",
+		Help:      "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
+		Buckets:   prom.DefBuckets,
+	}
+	serverHandledHistogram *prom.HistogramVec
+)
+
+func init() {
+	prom.MustRegister(serverStartedCounter)
+	prom.MustRegister(serverHandledCounter)
+	prom.MustRegister(serverStreamMsgReceived)
+	prom.MustRegister(serverStreamMsgSent)
+}
+
+type HistogramOption func(*prom.HistogramOpts)
+
+// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
+func WithHistogramBuckets(buckets []float64) HistogramOption {
+	return func(o *prom.HistogramOpts) { o.Buckets = buckets }
+}
+
+// EnableHandlingTimeHistogram turns on recording of handling time of RPCs for server-side interceptors.
+// Histogram metrics can be very expensive for Prometheus to retain and query.
+func EnableHandlingTimeHistogram(opts ...HistogramOption) {
+	for _, o := range opts {
+		o(&serverHandledHistogramOpts)
+	}
+	if !serverHandledHistogramEnabled {
+		serverHandledHistogram = prom.NewHistogramVec(
+			serverHandledHistogramOpts,
+			[]string{"grpc_type", "grpc_service", "grpc_method"},
+		)
+		prom.Register(serverHandledHistogram)
+	}
+	serverHandledHistogramEnabled = true
+}
+
+type serverReporter struct {
+	rpcType     grpcType
+	serviceName string
+	methodName  string
+	startTime   time.Time
+}
+
+func newServerReporter(rpcType grpcType, fullMethod string) *serverReporter {
+	r := &serverReporter{rpcType: rpcType}
+	if serverHandledHistogramEnabled {
+		r.startTime = time.Now()
+	}
+	r.serviceName, r.methodName = splitMethodName(fullMethod)
+	serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+	return r
+}
+
+func (r *serverReporter) ReceivedMessage() {
+	serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *serverReporter) SentMessage() {
+	serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *serverReporter) Handled(code codes.Code) {
+	serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
+	if serverHandledHistogramEnabled {
+		serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
+	}
+}
+
+// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
+func preRegisterMethod(serviceName string, mInfo *grpc.MethodInfo) {
+	methodName := mInfo.Name
+	methodType := string(typeFromMethodInfo(mInfo))
+	// These are just references (no increments), as just referencing will create the labels but not set values.
+	serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	if serverHandledHistogramEnabled {
+		serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	}
+	for _, code := range allCodes {
+		serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
+	}
+}
+
+func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
+	if mInfo.IsClientStream == false && mInfo.IsServerStream == false {
+		return Unary
+	}
+	if mInfo.IsClientStream == true && mInfo.IsServerStream == false {
+		return ClientStream
+	}
+	if mInfo.IsClientStream == false && mInfo.IsServerStream == true {
+		return ServerStream
+	}
+	return BidiStream
+}

+ 307 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_test.go

@@ -0,0 +1,307 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"bufio"
+	"io"
+	"net"
+	"net/http"
+	"net/http/httptest"
+	"strconv"
+	"strings"
+	"testing"
+	"time"
+
+	pb_testproto "github.com/grpc-ecosystem/go-grpc-prometheus/examples/testproto"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"github.com/stretchr/testify/suite"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+)
+
+const (
+	pingDefaultValue   = "I like kittens."
+	countListResponses = 20
+)
+
+func TestServerInterceptorSuite(t *testing.T) {
+	suite.Run(t, &ServerInterceptorTestSuite{})
+}
+
+type ServerInterceptorTestSuite struct {
+	suite.Suite
+
+	serverListener net.Listener
+	server         *grpc.Server
+	clientConn     *grpc.ClientConn
+	testClient     pb_testproto.TestServiceClient
+	ctx            context.Context
+}
+
+func (s *ServerInterceptorTestSuite) SetupSuite() {
+	var err error
+
+	EnableHandlingTimeHistogram()
+
+	s.serverListener, err = net.Listen("tcp", "127.0.0.1:0")
+	require.NoError(s.T(), err, "must be able to allocate a port for serverListener")
+
+	// This is the point where we hook up the interceptor
+	s.server = grpc.NewServer(
+		grpc.StreamInterceptor(StreamServerInterceptor),
+		grpc.UnaryInterceptor(UnaryServerInterceptor),
+	)
+	pb_testproto.RegisterTestServiceServer(s.server, &testService{t: s.T()})
+
+	go func() {
+		s.server.Serve(s.serverListener)
+	}()
+
+	s.clientConn, err = grpc.Dial(s.serverListener.Addr().String(), grpc.WithInsecure(), grpc.WithBlock(), grpc.WithTimeout(2*time.Second))
+	require.NoError(s.T(), err, "must not error on client Dial")
+	s.testClient = pb_testproto.NewTestServiceClient(s.clientConn)
+
+	// Important! Pre-register stuff here.
+	Register(s.server)
+}
+
+func (s *ServerInterceptorTestSuite) SetupTest() {
+	// Make all RPC calls last at most 2 sec, meaning all async issues or deadlock will not kill tests.
+	s.ctx, _ = context.WithTimeout(context.TODO(), 2*time.Second)
+}
+
+func (s *ServerInterceptorTestSuite) TearDownSuite() {
+	if s.serverListener != nil {
+		s.server.Stop()
+		s.T().Logf("stopped grpc.Server at: %v", s.serverListener.Addr().String())
+		s.serverListener.Close()
+
+	}
+	if s.clientConn != nil {
+		s.clientConn.Close()
+	}
+}
+
+func (s *ServerInterceptorTestSuite) TestRegisterPresetsStuff() {
+	for testId, testCase := range []struct {
+		metricName     string
+		existingLabels []string
+	}{
+		{"grpc_server_started_total", []string{"mwitkow.testproto.TestService", "PingEmpty", "unary"}},
+		{"grpc_server_started_total", []string{"mwitkow.testproto.TestService", "PingList", "server_stream"}},
+		{"grpc_server_msg_received_total", []string{"mwitkow.testproto.TestService", "PingList", "server_stream"}},
+		{"grpc_server_msg_sent_total", []string{"mwitkow.testproto.TestService", "PingEmpty", "unary"}},
+		{"grpc_server_handling_seconds_sum", []string{"mwitkow.testproto.TestService", "PingEmpty", "unary"}},
+		{"grpc_server_handling_seconds_count", []string{"mwitkow.testproto.TestService", "PingList", "server_stream"}},
+		{"grpc_server_handled_total", []string{"mwitkow.testproto.TestService", "PingList", "server_stream", "OutOfRange"}},
+		{"grpc_server_handled_total", []string{"mwitkow.testproto.TestService", "PingList", "server_stream", "Aborted"}},
+		{"grpc_server_handled_total", []string{"mwitkow.testproto.TestService", "PingEmpty", "unary", "FailedPrecondition"}},
+		{"grpc_server_handled_total", []string{"mwitkow.testproto.TestService", "PingEmpty", "unary", "ResourceExhausted"}},
+	} {
+		lineCount := len(fetchPrometheusLines(s.T(), testCase.metricName, testCase.existingLabels...))
+		assert.NotEqual(s.T(), 0, lineCount, "metrics must exist for test case %d", testId)
+	}
+}
+
+func (s *ServerInterceptorTestSuite) TestUnaryIncrementsStarted() {
+	var before int
+	var after int
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_server_started_total", "PingEmpty", "unary")
+	s.testClient.PingEmpty(s.ctx, &pb_testproto.Empty{})
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_server_started_total", "PingEmpty", "unary")
+	assert.EqualValues(s.T(), before+1, after, "grpc_server_started_total should be incremented for PingEmpty")
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_server_started_total", "PingError", "unary")
+	s.testClient.PingError(s.ctx, &pb_testproto.PingRequest{ErrorCodeReturned: uint32(codes.Unavailable)})
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_server_started_total", "PingError", "unary")
+	assert.EqualValues(s.T(), before+1, after, "grpc_server_started_total should be incremented for PingError")
+}
+
+func (s *ServerInterceptorTestSuite) TestUnaryIncrementsHandled() {
+	var before int
+	var after int
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_server_handled_total", "PingEmpty", "unary", "OK")
+	s.testClient.PingEmpty(s.ctx, &pb_testproto.Empty{}) // should return with code=OK
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_server_handled_total", "PingEmpty", "unary", "OK")
+	assert.EqualValues(s.T(), before+1, after, "grpc_server_handled_count should be incremented for PingEmpty")
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_server_handled_total", "PingError", "unary", "FailedPrecondition")
+	s.testClient.PingError(s.ctx, &pb_testproto.PingRequest{ErrorCodeReturned: uint32(codes.FailedPrecondition)}) // should return with code=FailedPrecondition
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_server_handled_total", "PingError", "unary", "FailedPrecondition")
+	assert.EqualValues(s.T(), before+1, after, "grpc_server_handled_total should be incremented for PingError")
+}
+
+func (s *ServerInterceptorTestSuite) TestUnaryIncrementsHistograms() {
+	var before int
+	var after int
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_server_handling_seconds_count", "PingEmpty", "unary")
+	s.testClient.PingEmpty(s.ctx, &pb_testproto.Empty{}) // should return with code=OK
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_server_handling_seconds_count", "PingEmpty", "unary")
+	assert.EqualValues(s.T(), before+1, after, "grpc_server_handled_count should be incremented for PingEmpty")
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_server_handling_seconds_count", "PingError", "unary")
+	s.testClient.PingError(s.ctx, &pb_testproto.PingRequest{ErrorCodeReturned: uint32(codes.FailedPrecondition)}) // should return with code=FailedPrecondition
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_server_handling_seconds_count", "PingError", "unary")
+	assert.EqualValues(s.T(), before+1, after, "grpc_server_handling_seconds_count should be incremented for PingError")
+}
+
+func (s *ServerInterceptorTestSuite) TestStreamingIncrementsStarted() {
+	var before int
+	var after int
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_server_started_total", "PingList", "server_stream")
+	s.testClient.PingList(s.ctx, &pb_testproto.PingRequest{})
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_server_started_total", "PingList", "server_stream")
+	assert.EqualValues(s.T(), before+1, after, "grpc_server_started_total should be incremented for PingList")
+}
+
+func (s *ServerInterceptorTestSuite) TestStreamingIncrementsHistograms() {
+	var before int
+	var after int
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_server_handling_seconds_count", "PingList", "server_stream")
+	ss, _ := s.testClient.PingList(s.ctx, &pb_testproto.PingRequest{}) // should return with code=OK
+	// Do a read, just for kicks.
+	for {
+		_, err := ss.Recv()
+		if err == io.EOF {
+			break
+		}
+		require.NoError(s.T(), err, "reading pingList shouldn't fail")
+	}
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_server_handling_seconds_count", "PingList", "server_stream")
+	assert.EqualValues(s.T(), before+1, after, "grpc_server_handling_seconds_count should be incremented for PingList OK")
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_server_handling_seconds_count", "PingList", "server_stream")
+	_, err := s.testClient.PingList(s.ctx, &pb_testproto.PingRequest{ErrorCodeReturned: uint32(codes.FailedPrecondition)}) // should return with code=FailedPrecondition
+	require.NoError(s.T(), err, "PingList must not fail immedietely")
+
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_server_handling_seconds_count", "PingList", "server_stream")
+	assert.EqualValues(s.T(), before+1, after, "grpc_server_handling_seconds_count should be incremented for PingList FailedPrecondition")
+}
+
+func (s *ServerInterceptorTestSuite) TestStreamingIncrementsHandled() {
+	var before int
+	var after int
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_server_handled_total", "PingList", "server_stream", "OK")
+	ss, _ := s.testClient.PingList(s.ctx, &pb_testproto.PingRequest{}) // should return with code=OK
+	// Do a read, just for kicks.
+	for {
+		_, err := ss.Recv()
+		if err == io.EOF {
+			break
+		}
+		require.NoError(s.T(), err, "reading pingList shouldn't fail")
+	}
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_server_handled_total", "PingList", "server_stream", "OK")
+	assert.EqualValues(s.T(), before+1, after, "grpc_server_handled_total should be incremented for PingList OK")
+
+	before = sumCountersForMetricAndLabels(s.T(), "grpc_server_handled_total", "PingList", "server_stream", "FailedPrecondition")
+	_, err := s.testClient.PingList(s.ctx, &pb_testproto.PingRequest{ErrorCodeReturned: uint32(codes.FailedPrecondition)}) // should return with code=FailedPrecondition
+	require.NoError(s.T(), err, "PingList must not fail immedietely")
+
+	after = sumCountersForMetricAndLabels(s.T(), "grpc_server_handled_total", "PingList", "server_stream", "FailedPrecondition")
+	assert.EqualValues(s.T(), before+1, after, "grpc_server_handled_total should be incremented for PingList FailedPrecondition")
+}
+
+func (s *ServerInterceptorTestSuite) TestStreamingIncrementsMessageCounts() {
+	beforeRecv := sumCountersForMetricAndLabels(s.T(), "grpc_server_msg_received_total", "PingList", "server_stream")
+	beforeSent := sumCountersForMetricAndLabels(s.T(), "grpc_server_msg_sent_total", "PingList", "server_stream")
+	ss, _ := s.testClient.PingList(s.ctx, &pb_testproto.PingRequest{}) // should return with code=OK
+	// Do a read, just for kicks.
+	count := 0
+	for {
+		_, err := ss.Recv()
+		if err == io.EOF {
+			break
+		}
+		require.NoError(s.T(), err, "reading pingList shouldn't fail")
+		count += 1
+	}
+	require.EqualValues(s.T(), countListResponses, count, "Number of received msg on the wire must match")
+	afterSent := sumCountersForMetricAndLabels(s.T(), "grpc_server_msg_sent_total", "PingList", "server_stream")
+	afterRecv := sumCountersForMetricAndLabels(s.T(), "grpc_server_msg_received_total", "PingList", "server_stream")
+
+	assert.EqualValues(s.T(), beforeSent+countListResponses, afterSent, "grpc_server_msg_sent_total should be incremented 20 times for PingList")
+	assert.EqualValues(s.T(), beforeRecv+1, afterRecv, "grpc_server_msg_sent_total should be incremented ones for PingList ")
+}
+
+func fetchPrometheusLines(t *testing.T, metricName string, matchingLabelValues ...string) []string {
+	resp := httptest.NewRecorder()
+	req, err := http.NewRequest("GET", "/", nil)
+	require.NoError(t, err, "failed creating request for Prometheus handler")
+	prometheus.Handler().ServeHTTP(resp, req)
+	reader := bufio.NewReader(resp.Body)
+	ret := []string{}
+	for {
+		line, err := reader.ReadString('\n')
+		if err == io.EOF {
+			break
+		} else {
+			require.NoError(t, err, "error reading stuff")
+		}
+		if !strings.HasPrefix(line, metricName) {
+			continue
+		}
+		matches := true
+		for _, labelValue := range matchingLabelValues {
+			if !strings.Contains(line, `"`+labelValue+`"`) {
+				matches = false
+			}
+		}
+		if matches {
+			ret = append(ret, line)
+		}
+
+	}
+	return ret
+}
+
+func sumCountersForMetricAndLabels(t *testing.T, metricName string, matchingLabelValues ...string) int {
+	count := 0
+	for _, line := range fetchPrometheusLines(t, metricName, matchingLabelValues...) {
+		valueString := line[strings.LastIndex(line, " ")+1 : len(line)-1]
+		valueFloat, err := strconv.ParseFloat(valueString, 32)
+		require.NoError(t, err, "failed parsing value for line: %v", line)
+		count += int(valueFloat)
+	}
+	return count
+}
+
+type testService struct {
+	t *testing.T
+}
+
+func (s *testService) PingEmpty(ctx context.Context, _ *pb_testproto.Empty) (*pb_testproto.PingResponse, error) {
+	return &pb_testproto.PingResponse{Value: pingDefaultValue, Counter: 42}, nil
+}
+
+func (s *testService) Ping(ctx context.Context, ping *pb_testproto.PingRequest) (*pb_testproto.PingResponse, error) {
+	// Send user trailers and headers.
+	return &pb_testproto.PingResponse{Value: ping.Value, Counter: 42}, nil
+}
+
+func (s *testService) PingError(ctx context.Context, ping *pb_testproto.PingRequest) (*pb_testproto.Empty, error) {
+	code := codes.Code(ping.ErrorCodeReturned)
+	return nil, grpc.Errorf(code, "Userspace error.")
+}
+
+func (s *testService) PingList(ping *pb_testproto.PingRequest, stream pb_testproto.TestService_PingListServer) error {
+	if ping.ErrorCodeReturned != 0 {
+		return grpc.Errorf(codes.Code(ping.ErrorCodeReturned), "foobar")
+	}
+	// Send user trailers and headers.
+	for i := 0; i < countListResponses; i++ {
+		stream.Send(&pb_testproto.PingResponse{Value: ping.Value, Counter: int32(i)})
+	}
+	return nil
+}

+ 27 - 0
cmd/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go

@@ -0,0 +1,27 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"strings"
+
+	"google.golang.org/grpc/codes"
+)
+
+var (
+	allCodes = []codes.Code{
+		codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound,
+		codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted,
+		codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal,
+		codes.Unavailable, codes.DataLoss,
+	}
+)
+
+func splitMethodName(fullMethodName string) (string, string) {
+	fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
+	if i := strings.Index(fullMethodName, "/"); i >= 0 {
+		return fullMethodName[:i], fullMethodName[i+1:]
+	}
+	return "unknown", "unknown"
+}

+ 2 - 0
glide.lock

@@ -108,6 +108,8 @@ imports:
   version: 6011f165dc288c72abd8acd7722f837c5c64198d
 - name: github.com/xiang90/probing
   version: 07dd2e8dfe18522e9c447ba95f2fe95262f63bb2
+-package: github.com/grpc-ecosystem/go-grpc-prometheus
+  version: 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
 - name: golang.org/x/crypto
   version: 1351f936d976c60a0a48d728281922cf63eafb8d
   subpackages:

+ 2 - 0
glide.yaml

@@ -106,6 +106,8 @@ import:
   version: 6011f165dc288c72abd8acd7722f837c5c64198d
 - package: github.com/xiang90/probing
   version: 07dd2e8dfe18522e9c447ba95f2fe95262f63bb2
+- package: github.com/grpc-ecosystem/go-grpc-prometheus
+  version: 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 
 - package: golang.org/x/crypto
   version: 1351f936d976c60a0a48d728281922cf63eafb8d
   subpackages: