浏览代码

Merge branch 'samples'

Richard Crowley 12 年之前
父节点
当前提交
b7b6bfd002
共有 21 个文件被更改,包括 1359 次插入608 次删除
  1. 52 21
      counter.go
  2. 10 0
      counter_test.go
  3. 47 20
      ewma.go
  4. 36 15
      gauge.go
  5. 10 0
      gauge_test.go
  6. 22 19
      graphite.go
  7. 19 21
      healthcheck.go
  8. 122 157
      histogram.go
  9. 39 25
      histogram_test.go
  10. 33 32
      json.go
  11. 16 17
      librato/librato.go
  12. 24 21
      log.go
  13. 85 52
      meter.go
  14. 9 1
      meter_test.go
  15. 32 32
      metrics_test.go
  16. 393 39
      sample.go
  17. 163 4
      sample_test.go
  18. 22 19
      stathat/stathat.go
  19. 24 21
      syslog.go
  20. 177 71
      timer.go
  21. 24 21
      writer.go

+ 52 - 21
counter.go

@@ -3,17 +3,16 @@ package metrics
 import "sync/atomic"
 
 // Counters hold an int64 value that can be incremented and decremented.
-//
-// This is an interface so as to encourage other structs to implement
-// the Counter API as appropriate.
 type Counter interface {
 	Clear()
 	Count() int64
 	Dec(int64)
 	Inc(int64)
+	Snapshot() Counter
 }
 
-// Get an existing or create and register a new Counter.
+// GetOrRegisterCounter returns an existing Counter or constructs and registers
+// a new StandardCounter.
 func GetOrRegisterCounter(name string, r Registry) Counter {
 	if nil == r {
 		r = DefaultRegistry
@@ -21,7 +20,7 @@ func GetOrRegisterCounter(name string, r Registry) Counter {
 	return r.GetOrRegister(name, NewCounter()).(Counter)
 }
 
-// Create a new Counter.
+// NewCounter constructs a new StandardCounter.
 func NewCounter() Counter {
 	if UseNilMetrics {
 		return NilCounter{}
@@ -29,7 +28,7 @@ func NewCounter() Counter {
 	return &StandardCounter{0}
 }
 
-// Create and register a new Counter.
+// NewRegisteredCounter constructs and registers a new StandardCounter.
 func NewRegisteredCounter(name string, r Registry) Counter {
 	c := NewCounter()
 	if nil == r {
@@ -39,43 +38,75 @@ func NewRegisteredCounter(name string, r Registry) Counter {
 	return c
 }
 
-// No-op Counter.
+// CounterSnapshot is a read-only copy of another Counter.
+type CounterSnapshot int64
+
+// Clear panics.
+func (CounterSnapshot) Clear() {
+	panic("Clear called on a CounterSnapshot")
+}
+
+// Count returns the count at the time the snapshot was taken.
+func (c CounterSnapshot) Count() int64 { return int64(c) }
+
+// Dec panics.
+func (CounterSnapshot) Dec(int64) {
+	panic("Dec called on a CounterSnapshot")
+}
+
+// Inc panics.
+func (CounterSnapshot) Inc(int64) {
+	panic("Inc called on a CounterSnapshot")
+}
+
+// Snapshot returns the snapshot.
+func (c CounterSnapshot) Snapshot() Counter { return c }
+
+// NilCounter is a no-op Counter.
 type NilCounter struct{}
 
-// No-op.
-func (c NilCounter) Clear() {}
+// Clear is a no-op.
+func (NilCounter) Clear() {}
 
-// No-op.
-func (c NilCounter) Count() int64 { return 0 }
+// Count is a no-op.
+func (NilCounter) Count() int64 { return 0 }
 
-// No-op.
-func (c NilCounter) Dec(i int64) {}
+// Dec is a no-op.
+func (NilCounter) Dec(i int64) {}
 
-// No-op.
-func (c NilCounter) Inc(i int64) {}
+// Inc is a no-op.
+func (NilCounter) Inc(i int64) {}
 
-// The standard implementation of a Counter uses the sync/atomic package
-// to manage a single int64 value.
+// Snapshot is a no-op.
+func (NilCounter) Snapshot() Counter { return NilCounter{} }
+
+// StandardCounter is the standard implementation of a Counter and uses the
+// sync/atomic package to manage a single int64 value.
 type StandardCounter struct {
 	count int64
 }
 
-// Clear the counter: set it to zero.
+// Clear sets the counter to zero.
 func (c *StandardCounter) Clear() {
 	atomic.StoreInt64(&c.count, 0)
 }
 
-// Return the current count.
+// Count returns the current count.
 func (c *StandardCounter) Count() int64 {
 	return atomic.LoadInt64(&c.count)
 }
 
-// Decrement the counter by the given amount.
+// Dec decrements the counter by the given amount.
 func (c *StandardCounter) Dec(i int64) {
 	atomic.AddInt64(&c.count, -i)
 }
 
-// Increment the counter by the given amount.
+// Inc increments the counter by the given amount.
 func (c *StandardCounter) Inc(i int64) {
 	atomic.AddInt64(&c.count, i)
 }
+
+// Snapshot returns a read-only copy of the counter.
+func (c *StandardCounter) Snapshot() Counter {
+	return CounterSnapshot(c.Count())
+}

+ 10 - 0
counter_test.go

@@ -51,6 +51,16 @@ func TestCounterInc2(t *testing.T) {
 	}
 }
 
+func TestCounterSnapshot(t *testing.T) {
+	c := NewCounter()
+	c.Inc(1)
+	snapshot := c.Snapshot()
+	c.Inc(1)
+	if count := snapshot.Count(); 1 != count {
+		t.Errorf("c.Count(): 1 != %v\n", count)
+	}
+}
+
 func TestCounterZero(t *testing.T) {
 	c := NewCounter()
 	if count := c.Count(); 0 != count {

+ 47 - 20
ewma.go

@@ -8,16 +8,14 @@ import (
 
 // EWMAs continuously calculate an exponentially-weighted moving average
 // based on an outside source of clock ticks.
-//
-// This is an interface so as to encourage other structs to implement
-// the EWMA API as appropriate.
 type EWMA interface {
 	Rate() float64
+	Snapshot() EWMA
 	Tick()
 	Update(int64)
 }
 
-// Create a new EWMA with the given alpha.
+// NewEWMA constructs a new EWMA with the given alpha.
 func NewEWMA(alpha float64) EWMA {
 	if UseNilMetrics {
 		return NilEWMA{}
@@ -25,36 +23,59 @@ func NewEWMA(alpha float64) EWMA {
 	return &StandardEWMA{alpha: alpha}
 }
 
-// Create a new EWMA with alpha set for a one-minute moving average.
+// NewEWMA1 constructs a new EWMA for a one-minute moving average.
 func NewEWMA1() EWMA {
 	return NewEWMA(1 - math.Exp(-5.0/60.0/1))
 }
 
-// Create a new EWMA with alpha set for a five-minute moving average.
+// NewEWMA5 constructs a new EWMA for a five-minute moving average.
 func NewEWMA5() EWMA {
 	return NewEWMA(1 - math.Exp(-5.0/60.0/5))
 }
 
-// Create a new EWMA with alpha set for a fifteen-minute moving average.
+// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
 func NewEWMA15() EWMA {
 	return NewEWMA(1 - math.Exp(-5.0/60.0/15))
 }
 
-// No-op EWMA.
+// EWMASnapshot is a read-only copy of another EWMA.
+type EWMASnapshot float64
+
+// Rate returns the rate of events per second at the time the snapshot was
+// taken.
+func (a EWMASnapshot) Rate() float64 { return float64(a) }
+
+// Snapshot returns the snapshot.
+func (a EWMASnapshot) Snapshot() EWMA { return a }
+
+// Tick panics.
+func (EWMASnapshot) Tick() {
+	panic("Tick called on an EWMASnapshot")
+}
+
+// Update panics.
+func (EWMASnapshot) Update(int64) {
+	panic("Update called on an EWMASnapshot")
+}
+
+// NilEWMA is a no-op EWMA.
 type NilEWMA struct{}
 
-// No-op.
-func (a NilEWMA) Rate() float64 { return 0.0 }
+// Rate is a no-op.
+func (NilEWMA) Rate() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
 
-// No-op.
-func (a NilEWMA) Tick() {}
+// Tick is a no-op.
+func (NilEWMA) Tick() {}
 
-// No-op.
-func (a NilEWMA) Update(n int64) {}
+// Update is a no-op.
+func (NilEWMA) Update(n int64) {}
 
-// The standard implementation of an EWMA tracks the number of uncounted
-// events and processes them on each tick.  It uses the sync/atomic package
-// to manage uncounted events.
+// StandardEWMA is the standard implementation of an EWMA and tracks the number
+// of uncounted events and processes them on each tick.  It uses the
+// sync/atomic package to manage uncounted events.
 type StandardEWMA struct {
 	alpha     float64
 	init      bool
@@ -63,14 +84,20 @@ type StandardEWMA struct {
 	uncounted int64
 }
 
-// Return the moving average rate of events per second.
+// Rate returns the moving average rate of events per second.
 func (a *StandardEWMA) Rate() float64 {
 	a.mutex.Lock()
 	defer a.mutex.Unlock()
 	return a.rate * float64(1e9)
 }
 
-// Tick the clock to update the moving average.
+// Snapshot returns a read-only copy of the EWMA.
+func (a *StandardEWMA) Snapshot() EWMA {
+	return EWMASnapshot(a.Rate())
+}
+
+// Tick ticks the clock to update the moving average.  It assumes it is called
+// every five seconds.
 func (a *StandardEWMA) Tick() {
 	count := atomic.LoadInt64(&a.uncounted)
 	atomic.AddInt64(&a.uncounted, -count)
@@ -85,7 +112,7 @@ func (a *StandardEWMA) Tick() {
 	}
 }
 
-// Add n uncounted events.
+// Update adds n uncounted events.
 func (a *StandardEWMA) Update(n int64) {
 	atomic.AddInt64(&a.uncounted, n)
 }

+ 36 - 15
gauge.go

@@ -3,15 +3,14 @@ package metrics
 import "sync/atomic"
 
 // Gauges hold an int64 value that can be set arbitrarily.
-//
-// This is an interface so as to encourage other structs to implement
-// the Gauge API as appropriate.
 type Gauge interface {
+	Snapshot() Gauge
 	Update(int64)
 	Value() int64
 }
 
-// Get an existing or create and register a new Gauge.
+// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
+// new StandardGauge.
 func GetOrRegisterGauge(name string, r Registry) Gauge {
 	if nil == r {
 		r = DefaultRegistry
@@ -19,7 +18,7 @@ func GetOrRegisterGauge(name string, r Registry) Gauge {
 	return r.GetOrRegister(name, NewGauge()).(Gauge)
 }
 
-// Create a new Gauge.
+// NewGauge constructs a new StandardGauge.
 func NewGauge() Gauge {
 	if UseNilMetrics {
 		return NilGauge{}
@@ -27,7 +26,7 @@ func NewGauge() Gauge {
 	return &StandardGauge{0}
 }
 
-// Create and register a new Gauge.
+// NewRegisteredGauge constructs and registers a new StandardGauge.
 func NewRegisteredGauge(name string, r Registry) Gauge {
 	c := NewGauge()
 	if nil == r {
@@ -37,27 +36,49 @@ func NewRegisteredGauge(name string, r Registry) Gauge {
 	return c
 }
 
-// No-op Gauge.
+// GaugeSnapshot is a read-only copy of another Gauge.
+type GaugeSnapshot int64
+
+// Snapshot returns the snapshot.
+func (g GaugeSnapshot) Snapshot() Gauge { return g }
+
+// Update panics.
+func (GaugeSnapshot) Update(int64) {
+	panic("Update called on a GaugeSnapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeSnapshot) Value() int64 { return int64(g) }
+
+// NilGauge is a no-op Gauge.
 type NilGauge struct{}
 
-// No-op.
-func (g NilGauge) Update(v int64) {}
+// Snapshot is a no-op.
+func (NilGauge) Snapshot() Gauge { return NilGauge{} }
+
+// Update is a no-op.
+func (NilGauge) Update(v int64) {}
 
-// No-op.
-func (g NilGauge) Value() int64 { return 0 }
+// Value is a no-op.
+func (NilGauge) Value() int64 { return 0 }
 
-// The standard implementation of a Gauge uses the sync/atomic package
-// to manage a single int64 value.
+// StandardGauge is the standard implementation of a Gauge and uses the
+// sync/atomic package to manage a single int64 value.
 type StandardGauge struct {
 	value int64
 }
 
-// Update the gauge's value.
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGauge) Snapshot() Gauge {
+	return GaugeSnapshot(g.Value())
+}
+
+// Update updates the gauge's value.
 func (g *StandardGauge) Update(v int64) {
 	atomic.StoreInt64(&g.value, v)
 }
 
-// Return the gauge's current value.
+// Value returns the gauge's current value.
 func (g *StandardGauge) Value() int64 {
 	return atomic.LoadInt64(&g.value)
 }

+ 10 - 0
gauge_test.go

@@ -18,6 +18,16 @@ func TestGauge(t *testing.T) {
 	}
 }
 
+func TestGaugeSnapshot(t *testing.T) {
+	g := NewGauge()
+	g.Update(int64(47))
+	snapshot := g.Snapshot()
+	g.Update(int64(0))
+	if v := snapshot.Value(); 47 != v {
+		t.Errorf("g.Value(): 47 != %v\n", v)
+	}
+}
+
 func TestGetOrRegisterGauge(t *testing.T) {
 	r := NewRegistry()
 	NewRegisteredGauge("foo", r).Update(47)

+ 22 - 19
graphite.go

@@ -26,45 +26,48 @@ func graphite(r Registry, prefix string, addr *net.TCPAddr) error {
 	defer conn.Close()
 	w := bufio.NewWriter(conn)
 	r.Each(func(name string, i interface{}) {
-		switch m := i.(type) {
+		switch metric := i.(type) {
 		case Counter:
-			fmt.Fprintf(w, "%s.%s.count %d %d\n", prefix, name, m.Count(), now)
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", prefix, name, metric.Count(), now)
 		case Gauge:
-			fmt.Fprintf(w, "%s.%s.value %d %d\n", prefix, name, m.Value(), now)
+			fmt.Fprintf(w, "%s.%s.value %d %d\n", prefix, name, metric.Value(), now)
 		case Histogram:
-			ps := m.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
-			fmt.Fprintf(w, "%s.%s.count %d %d\n", prefix, name, m.Count(), now)
-			fmt.Fprintf(w, "%s.%s.min %d %d\n", prefix, name, m.Min(), now)
-			fmt.Fprintf(w, "%s.%s.max %d %d\n", prefix, name, m.Max(), now)
-			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", prefix, name, m.Mean(), now)
-			fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", prefix, name, m.StdDev(), now)
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", prefix, name, h.Count(), now)
+			fmt.Fprintf(w, "%s.%s.min %d %d\n", prefix, name, h.Min(), now)
+			fmt.Fprintf(w, "%s.%s.max %d %d\n", prefix, name, h.Max(), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", prefix, name, h.Mean(), now)
+			fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", prefix, name, h.StdDev(), now)
 			fmt.Fprintf(w, "%s.%s.50-percentile %.2f %d\n", prefix, name, ps[0], now)
 			fmt.Fprintf(w, "%s.%s.75-percentile %.2f %d\n", prefix, name, ps[1], now)
 			fmt.Fprintf(w, "%s.%s.95-percentile %.2f %d\n", prefix, name, ps[2], now)
 			fmt.Fprintf(w, "%s.%s.99-percentile %.2f %d\n", prefix, name, ps[3], now)
 			fmt.Fprintf(w, "%s.%s.999-percentile %.2f %d\n", prefix, name, ps[4], now)
 		case Meter:
+			m := metric.Snapshot()
 			fmt.Fprintf(w, "%s.%s.count %d %d\n", prefix, name, m.Count(), now)
 			fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", prefix, name, m.Rate1(), now)
 			fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", prefix, name, m.Rate5(), now)
 			fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", prefix, name, m.Rate15(), now)
 			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", prefix, name, m.RateMean(), now)
 		case Timer:
-			ps := m.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
-			fmt.Fprintf(w, "%s.%s.count %d %d\n", prefix, name, m.Count(), now)
-			fmt.Fprintf(w, "%s.%s.min %d %d\n", prefix, name, m.Min(), now)
-			fmt.Fprintf(w, "%s.%s.max %d %d\n", prefix, name, m.Max(), now)
-			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", prefix, name, m.Mean(), now)
-			fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", prefix, name, m.StdDev(), now)
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", prefix, name, t.Count(), now)
+			fmt.Fprintf(w, "%s.%s.min %d %d\n", prefix, name, t.Min(), now)
+			fmt.Fprintf(w, "%s.%s.max %d %d\n", prefix, name, t.Max(), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", prefix, name, t.Mean(), now)
+			fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", prefix, name, t.StdDev(), now)
 			fmt.Fprintf(w, "%s.%s.50-percentile %.2f %d\n", prefix, name, ps[0], now)
 			fmt.Fprintf(w, "%s.%s.75-percentile %.2f %d\n", prefix, name, ps[1], now)
 			fmt.Fprintf(w, "%s.%s.95-percentile %.2f %d\n", prefix, name, ps[2], now)
 			fmt.Fprintf(w, "%s.%s.99-percentile %.2f %d\n", prefix, name, ps[3], now)
 			fmt.Fprintf(w, "%s.%s.999-percentile %.2f %d\n", prefix, name, ps[4], now)
-			fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", prefix, name, m.Rate1(), now)
-			fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", prefix, name, m.Rate5(), now)
-			fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", prefix, name, m.Rate15(), now)
-			fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", prefix, name, m.RateMean(), now)
+			fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", prefix, name, t.Rate1(), now)
+			fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", prefix, name, t.Rate5(), now)
+			fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", prefix, name, t.Rate15(), now)
+			fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", prefix, name, t.RateMean(), now)
 		}
 		w.Flush()
 	})

+ 19 - 21
healthcheck.go

@@ -1,9 +1,6 @@
 package metrics
 
-// Healthchecks hold an os.Error value describing an arbitrary up/down status.
-//
-// This is an interface so as to encourage other structs to implement
-// the Healthcheck API as appropriate.
+// Healthchecks hold an error value describing an arbitrary up/down status.
 type Healthcheck interface {
 	Check()
 	Error() error
@@ -11,8 +8,8 @@ type Healthcheck interface {
 	Unhealthy(error)
 }
 
-// Create a new Healthcheck, which will use the given function to update
-// its status.
+// NewHealthcheck constructs a new Healthcheck which will use the given
+// function to update its status.
 func NewHealthcheck(f func(Healthcheck)) Healthcheck {
 	if UseNilMetrics {
 		return NilHealthcheck{}
@@ -20,44 +17,45 @@ func NewHealthcheck(f func(Healthcheck)) Healthcheck {
 	return &StandardHealthcheck{nil, f}
 }
 
-// No-op Healthcheck.
+// NilHealthcheck is a no-op.
 type NilHealthcheck struct{}
 
-// No-op.
-func (h NilHealthcheck) Check() {}
+// Check is a no-op.
+func (NilHealthcheck) Check() {}
 
-// No-op.
-func (h NilHealthcheck) Error() error { return nil }
+// Error is a no-op.
+func (NilHealthcheck) Error() error { return nil }
 
-// No-op.
-func (h NilHealthcheck) Healthy() {}
+// Healthy is a no-op.
+func (NilHealthcheck) Healthy() {}
 
-// No-op.
-func (h NilHealthcheck) Unhealthy(err error) {}
+// Unhealthy is a no-op.
+func (NilHealthcheck) Unhealthy(error) {}
 
-// The standard implementation of a Healthcheck stores the status and a
-// function to call to update the status.
+// StandardHealthcheck is the standard implementation of a Healthcheck and
+// stores the status and a function to call to update the status.
 type StandardHealthcheck struct {
 	err error
 	f   func(Healthcheck)
 }
 
-// Update the healthcheck's status.
+// Check runs the healthcheck function to update the healthcheck's status.
 func (h *StandardHealthcheck) Check() {
 	h.f(h)
 }
 
-// Return the healthcheck's status, which will be nil if it is healthy.
+// Error returns the healthcheck's status, which will be nil if it is healthy.
 func (h *StandardHealthcheck) Error() error {
 	return h.err
 }
 
-// Mark the healthcheck as healthy.
+// Healthy marks the healthcheck as healthy.
 func (h *StandardHealthcheck) Healthy() {
 	h.err = nil
 }
 
-// Mark the healthcheck as unhealthy.  The error should provide details.
+// Unhealthy marks the healthcheck as unhealthy.  The error is stored and
+// may be retrieved by the Error method.
 func (h *StandardHealthcheck) Unhealthy(err error) {
 	h.err = err
 }

+ 122 - 157
histogram.go

@@ -1,16 +1,6 @@
 package metrics
 
-import (
-	"math"
-	"sort"
-	"sync"
-	"sync/atomic"
-)
-
-// Histograms calculate distribution statistics from an int64 value.
-//
-// This is an interface so as to encourage other structs to implement
-// the Histogram API as appropriate.
+// Histograms calculate distribution statistics from a series of int64 values.
 type Histogram interface {
 	Clear()
 	Count() int64
@@ -19,12 +9,15 @@ type Histogram interface {
 	Min() int64
 	Percentile(float64) float64
 	Percentiles([]float64) []float64
+	Sample() Sample
+	Snapshot() Histogram
 	StdDev() float64
 	Update(int64)
 	Variance() float64
 }
 
-// Get an existing or create and register a new Histogram.
+// GetOrRegisterHistogram returns an existing Histogram or constructs and
+// registers a new StandardHistogram.
 func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
 	if nil == r {
 		r = DefaultRegistry
@@ -32,22 +25,16 @@ func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
 	return r.GetOrRegister(name, NewHistogram(s)).(Histogram)
 }
 
-// Create a new Histogram with the given Sample.  The initial values compare
-// so that the first value will be both min and max and the variance is flagged
-// for special treatment on its first iteration.
+// NewHistogram constructs a new StandardHistogram from a Sample.
 func NewHistogram(s Sample) Histogram {
 	if UseNilMetrics {
 		return NilHistogram{}
 	}
-	return &StandardHistogram{
-		max:      math.MinInt64,
-		min:      math.MaxInt64,
-		s:        s,
-		variance: [2]float64{-1.0, 0.0},
-	}
+	return &StandardHistogram{sample: s}
 }
 
-// Create and register a new Histogram.
+// NewRegisteredHistogram constructs and registers a new StandardHistogram from
+// a Sample.
 func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
 	c := NewHistogram(s)
 	if nil == r {
@@ -57,171 +44,149 @@ func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
 	return c
 }
 
-// No-op Histogram.
+// HistogramSnapshot is a read-only copy of another Histogram.
+type HistogramSnapshot struct {
+	sample *SampleSnapshot
+}
+
+// Clear panics.
+func (*HistogramSnapshot) Clear() {
+	panic("Clear called on a HistogramSnapshot")
+}
+
+// Count returns the number of samples recorded at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample at the time the snapshot
+// was taken.
+func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) Percentile(p float64) float64 {
+	return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the sample
+// at the time the snapshot was taken.
+func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
+	return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *HistogramSnapshot) Sample() Sample { return h.sample }
+
+// Snapshot returns the snapshot.
+func (h *HistogramSnapshot) Snapshot() Histogram { return h }
+
+// StdDev returns the standard deviation of the values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
+
+// Update panics.
+func (*HistogramSnapshot) Update(int64) {
+	panic("Update called on a HistogramSnapshot")
+}
+
+// Variance returns the variance of inputs at the time the snapshot was taken.
+func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
+
+// NilHistogram is a no-op Histogram.
 type NilHistogram struct{}
 
-// No-op.
-func (h NilHistogram) Clear() {}
+// Clear is a no-op.
+func (NilHistogram) Clear() {}
 
-// No-op.
-func (h NilHistogram) Count() int64 { return 0 }
+// Count is a no-op.
+func (NilHistogram) Count() int64 { return 0 }
 
-// No-op.
-func (h NilHistogram) Max() int64 { return 0 }
+// Max is a no-op.
+func (NilHistogram) Max() int64 { return 0 }
 
-// No-op.
-func (h NilHistogram) Mean() float64 { return 0.0 }
+// Mean is a no-op.
+func (NilHistogram) Mean() float64 { return 0.0 }
 
-// No-op.
-func (h NilHistogram) Min() int64 { return 0 }
+// Min is a no-op.
+func (NilHistogram) Min() int64 { return 0 }
 
-// No-op.
-func (h NilHistogram) Percentile(p float64) float64 { return 0.0 }
+// Percentile is a no-op.
+func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
 
-// No-op.
-func (h NilHistogram) Percentiles(ps []float64) []float64 {
+// Percentiles is a no-op.
+func (NilHistogram) Percentiles(ps []float64) []float64 {
 	return make([]float64, len(ps))
 }
 
-// No-op.
-func (h NilHistogram) StdDev() float64 { return 0.0 }
+// Sample is a no-op.
+func (NilHistogram) Sample() Sample { return NilSample{} }
+
+// Snapshot is a no-op.
+func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
+
+// StdDev is a no-op.
+func (NilHistogram) StdDev() float64 { return 0.0 }
 
-// No-op.
-func (h NilHistogram) Update(v int64) {}
+// Update is a no-op.
+func (NilHistogram) Update(v int64) {}
 
-// No-op.
-func (h NilHistogram) Variance() float64 { return 0.0 }
+// Variance is a no-op.
+func (NilHistogram) Variance() float64 { return 0.0 }
 
-// The standard implementation of a Histogram uses a Sample and a goroutine
-// to synchronize its calculations.
+// StandardHistogram is the standard implementation of a Histogram and uses a
+// Sample to bound its memory use.
 type StandardHistogram struct {
-	count, sum, min, max int64
-	mutex                sync.Mutex
-	s                    Sample
-	variance             [2]float64
+	sample Sample
 }
 
-// Clear the histogram.
-func (h *StandardHistogram) Clear() {
-	h.mutex.Lock()
-	defer h.mutex.Unlock()
-	h.count = 0
-	h.max = math.MinInt64
-	h.min = math.MaxInt64
-	h.s.Clear()
-	h.sum = 0
-	h.variance = [...]float64{-1.0, 0.0}
-}
+// Clear clears the histogram and its sample.
+func (h *StandardHistogram) Clear() { h.sample.Clear() }
 
-// Return the count of inputs since the histogram was last cleared.
-func (h *StandardHistogram) Count() int64 {
-	return atomic.LoadInt64(&h.count)
-}
+// Count returns the number of samples recorded since the histogram was last
+// cleared.
+func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
 
-// Return the maximal value seen since the histogram was last cleared.
-func (h *StandardHistogram) Max() int64 {
-	h.mutex.Lock()
-	defer h.mutex.Unlock()
-	if 0 == h.count {
-		return 0
-	}
-	return h.max
-}
+// Max returns the maximum value in the sample.
+func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
 
-// Return the mean of all values seen since the histogram was last cleared.
-func (h *StandardHistogram) Mean() float64 {
-	h.mutex.Lock()
-	defer h.mutex.Unlock()
-	if 0 == h.count {
-		return 0
-	}
-	return float64(h.sum) / float64(h.count)
-}
+// Mean returns the mean of the values in the sample.
+func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
 
-// Return the minimal value seen since the histogram was last cleared.
-func (h *StandardHistogram) Min() int64 {
-	h.mutex.Lock()
-	defer h.mutex.Unlock()
-	if 0 == h.count {
-		return 0
-	}
-	return h.min
-}
+// Min returns the minimum value in the sample.
+func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
 
-// Return an arbitrary percentile of all values seen since the histogram was
-// last cleared.
+// Percentile returns an arbitrary percentile of the values in the sample.
 func (h *StandardHistogram) Percentile(p float64) float64 {
-	return h.Percentiles([]float64{p})[0]
+	return h.sample.Percentile(p)
 }
 
-// Return a slice of arbitrary percentiles of all values seen since the
-// histogram was last cleared.
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
 func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
-	scores := make([]float64, len(ps))
-	values := int64Slice(h.s.Values())
-	size := len(values)
-	if size > 0 {
-		sort.Sort(values)
-		for i, p := range ps {
-			pos := p * float64(size+1)
-			if pos < 1.0 {
-				scores[i] = float64(values[0])
-			} else if pos >= float64(size) {
-				scores[i] = float64(values[size-1])
-			} else {
-				lower := float64(values[int(pos)-1])
-				upper := float64(values[int(pos)])
-				scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
-			}
-		}
-	}
-	return scores
+	return h.sample.Percentiles(ps)
 }
 
-// Return the standard deviation of all values seen since the histogram was
-// last cleared.
-func (h *StandardHistogram) StdDev() float64 {
-	return math.Sqrt(h.Variance())
-}
+// Sample returns the Sample underlying the histogram.
+func (h *StandardHistogram) Sample() Sample { return h.sample }
 
-// Update the histogram with a new value.
-func (h *StandardHistogram) Update(v int64) {
-	h.mutex.Lock()
-	defer h.mutex.Unlock()
-	h.s.Update(v)
-	h.count++
-	if v < h.min {
-		h.min = v
-	}
-	if v > h.max {
-		h.max = v
-	}
-	h.sum += v
-	fv := float64(v)
-	if -1.0 == h.variance[0] {
-		h.variance[0] = fv
-		h.variance[1] = 0.0
-	} else {
-		m := h.variance[0]
-		s := h.variance[1]
-		h.variance[0] = m + (fv-m)/float64(h.count)
-		h.variance[1] = s + (fv-m)*(fv-h.variance[0])
-	}
+// Snapshot returns a read-only copy of the histogram.
+func (h *StandardHistogram) Snapshot() Histogram {
+	return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
 }
 
-// Return the variance of all values seen since the histogram was last cleared.
-func (h *StandardHistogram) Variance() float64 {
-	h.mutex.Lock()
-	defer h.mutex.Unlock()
-	if 1 >= h.count {
-		return 0.0
-	}
-	return h.variance[1] / float64(h.count-1)
-}
+// StdDev returns the standard deviation of the values in the sample.
+func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
 
-// Cribbed from the standard library's `sort` package.
-type int64Slice []int64
+// Update samples a new value.
+func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
 
-func (p int64Slice) Len() int           { return len(p) }
-func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+// Variance returns the variance of the values in the sample.
+func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }

+ 39 - 25
histogram_test.go

@@ -24,31 +24,7 @@ func TestHistogram10000(t *testing.T) {
 	for i := 1; i <= 10000; i++ {
 		h.Update(int64(i))
 	}
-	if count := h.Count(); 10000 != count {
-		t.Errorf("h.Count(): 10000 != %v\n", count)
-	}
-	if min := h.Min(); 1 != min {
-		t.Errorf("h.Min(): 1 != %v\n", min)
-	}
-	if max := h.Max(); 10000 != max {
-		t.Errorf("h.Max(): 10000 != %v\n", max)
-	}
-	if mean := h.Mean(); 5000.5 != mean {
-		t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
-	}
-	if stdDev := h.StdDev(); 2886.8956799071675 != stdDev {
-		t.Errorf("h.StdDev(): 2886.8956799071675 != %v\n", stdDev)
-	}
-	ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
-	if 5000.5 != ps[0] {
-		t.Errorf("median: 5000.5 != %v\n", ps[0])
-	}
-	if 7500.75 != ps[1] {
-		t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
-	}
-	if 9900.99 != ps[2] {
-		t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
-	}
+	testHistogram10000(t, h)
 }
 
 func TestHistogramEmpty(t *testing.T) {
@@ -79,3 +55,41 @@ func TestHistogramEmpty(t *testing.T) {
 		t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
 	}
 }
+
+func TestHistogramSnapshot(t *testing.T) {
+	h := NewHistogram(NewUniformSample(100000))
+	for i := 1; i <= 10000; i++ {
+		h.Update(int64(i))
+	}
+	snapshot := h.Snapshot()
+	h.Update(0)
+	testHistogram10000(t, snapshot)
+}
+
+func testHistogram10000(t *testing.T, h Histogram) {
+	if count := h.Count(); 10000 != count {
+		t.Errorf("h.Count(): 10000 != %v\n", count)
+	}
+	if min := h.Min(); 1 != min {
+		t.Errorf("h.Min(): 1 != %v\n", min)
+	}
+	if max := h.Max(); 10000 != max {
+		t.Errorf("h.Max(): 10000 != %v\n", max)
+	}
+	if mean := h.Mean(); 5000.5 != mean {
+		t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
+	}
+	if stdDev := h.StdDev(); 2886.8956799071675 != stdDev {
+		t.Errorf("h.StdDev(): 2886.8956799071675 != %v\n", stdDev)
+	}
+	ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
+	if 5000.5 != ps[0] {
+		t.Errorf("median: 5000.5 != %v\n", ps[0])
+	}
+	if 7500.75 != ps[1] {
+		t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
+	}
+	if 9900.99 != ps[2] {
+		t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
+	}
+}

+ 33 - 32
json.go

@@ -1,8 +1,6 @@
 package metrics
 
-import (
-	"encoding/json"
-)
+import "encoding/json"
 
 // MarshalJSON returns a byte slice containing a JSON representation of all
 // the metrics in the Registry.
@@ -10,48 +8,51 @@ func (r StandardRegistry) MarshalJSON() ([]byte, error) {
 	data := make(map[string]map[string]interface{})
 	r.Each(func(name string, i interface{}) {
 		values := make(map[string]interface{})
-		switch m := i.(type) {
+		switch metric := i.(type) {
 		case Counter:
-			values["count"] = m.Count()
+			values["count"] = metric.Count()
 		case Gauge:
-			values["value"] = m.Value()
+			values["value"] = metric.Value()
 		case Healthcheck:
-			m.Check()
-			values["error"] = m.Error()
+			metric.Check()
+			values["error"] = metric.Error().Error()
 		case Histogram:
-			ps := m.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
-			values["count"] = m.Count()
-			values["min"] = m.Min()
-			values["max"] = m.Max()
-			values["mean"] = m.Mean()
-			values["stddev"] = m.StdDev()
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			values["count"] = h.Count()
+			values["min"] = h.Min()
+			values["max"] = h.Max()
+			values["mean"] = h.Mean()
+			values["stddev"] = h.StdDev()
 			values["median"] = ps[0]
-			values["75%%"] = ps[1]
-			values["95%%"] = ps[2]
-			values["99%%"] = ps[3]
-			values["99.9%%"] = ps[4]
+			values["75%"] = ps[1]
+			values["95%"] = ps[2]
+			values["99%"] = ps[3]
+			values["99.9%"] = ps[4]
 		case Meter:
+			m := metric.Snapshot()
 			values["count"] = m.Count()
 			values["1m.rate"] = m.Rate1()
 			values["5m.rate"] = m.Rate5()
 			values["15m.rate"] = m.Rate15()
 			values["mean.rate"] = m.RateMean()
 		case Timer:
-			ps := m.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
-			values["count"] = m.Count()
-			values["min"] = m.Min()
-			values["max"] = m.Max()
-			values["mean"] = m.Mean()
-			values["stddev"] = m.StdDev()
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			values["count"] = t.Count()
+			values["min"] = t.Min()
+			values["max"] = t.Max()
+			values["mean"] = t.Mean()
+			values["stddev"] = t.StdDev()
 			values["median"] = ps[0]
-			values["75%%"] = ps[1]
-			values["95%%"] = ps[2]
-			values["99%%"] = ps[3]
-			values["99.9%%"] = ps[4]
-			values["1m.rate"] = m.Rate1()
-			values["5m.rate"] = m.Rate5()
-			values["15m.rate"] = m.Rate15()
-			values["mean.rate"] = m.RateMean()
+			values["75%"] = ps[1]
+			values["95%"] = ps[2]
+			values["99%"] = ps[3]
+			values["99.9%"] = ps[4]
+			values["1m.rate"] = t.Rate1()
+			values["5m.rate"] = t.Rate5()
+			values["15m.rate"] = t.Rate15()
+			values["mean.rate"] = t.RateMean()
 		}
 		data[name] = values
 	})

+ 16 - 17
librato/librato.go

@@ -54,21 +54,19 @@ func (self *Reporter) Run() {
 
 // calculate sum of squares from data provided by metrics.Histogram
 // see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods
-func sumSquares(m metrics.Histogram) float64 {
-	count := float64(m.Count())
-	sum := m.Mean() * float64(m.Count())
-	sumSquared := math.Pow(float64(sum), 2)
-	sumSquares := math.Pow(count*m.StdDev(), 2) + sumSquared/float64(m.Count())
+func sumSquares(s metrics.Sample) float64 {
+	count := float64(s.Count())
+	sumSquared := math.Pow(count*s.Mean(), 2)
+	sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count
 	if math.IsNaN(sumSquares) {
 		return 0.0
 	}
-	return sumSquared
+	return sumSquares
 }
-func sumSquaresTimer(m metrics.Timer) float64 {
-	count := float64(m.Count())
-	sum := m.Mean() * float64(m.Count())
-	sumSquared := math.Pow(float64(sum), 2)
-	sumSquares := math.Pow(count*m.StdDev(), 2) + sumSquared/float64(m.Count())
+func sumSquaresTimer(t metrics.Timer) float64 {
+	count := float64(t.Count())
+	sumSquared := math.Pow(count*t.Mean(), 2)
+	sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count
 	if math.IsNaN(sumSquares) {
 		return 0.0
 	}
@@ -104,17 +102,18 @@ func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot
 		case metrics.Histogram:
 			if m.Count() > 0 {
 				gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount)
+				s := m.Sample()
 				measurement[Name] = fmt.Sprintf("%s.%s", name, "hist")
-				measurement[Count] = uint64(m.Count())
-				measurement[Sum] = m.Mean() * float64(m.Count())
-				measurement[Max] = float64(m.Max())
-				measurement[Min] = float64(m.Min())
-				measurement[SumSquares] = sumSquares(m)
+				measurement[Count] = uint64(s.Count())
+				measurement[Sum] = s.Sum()
+				measurement[Max] = float64(s.Max())
+				measurement[Min] = float64(s.Min())
+				measurement[SumSquares] = sumSquares(s)
 				gauges[0] = measurement
 				for i, p := range self.Percentiles {
 					gauges[i+1] = Measurement{
 						Name:   fmt.Sprintf("%s.%.2f", measurement[Name], p),
-						Value:  m.Percentile(p),
+						Value:  s.Percentile(p),
 						Period: measurement[Period],
 					}
 				}

+ 24 - 21
log.go

@@ -10,31 +10,33 @@ import (
 func Log(r Registry, d time.Duration, l *log.Logger) {
 	for {
 		r.Each(func(name string, i interface{}) {
-			switch m := i.(type) {
+			switch metric := i.(type) {
 			case Counter:
 				l.Printf("counter %s\n", name)
-				l.Printf("  count:       %9d\n", m.Count())
+				l.Printf("  count:       %9d\n", metric.Count())
 			case Gauge:
 				l.Printf("gauge %s\n", name)
-				l.Printf("  value:       %9d\n", m.Value())
+				l.Printf("  value:       %9d\n", metric.Value())
 			case Healthcheck:
-				m.Check()
+				metric.Check()
 				l.Printf("healthcheck %s\n", name)
-				l.Printf("  error:       %v\n", m.Error())
+				l.Printf("  error:       %v\n", metric.Error())
 			case Histogram:
-				ps := m.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				h := metric.Snapshot()
+				ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
 				l.Printf("histogram %s\n", name)
-				l.Printf("  count:       %9d\n", m.Count())
-				l.Printf("  min:         %9d\n", m.Min())
-				l.Printf("  max:         %9d\n", m.Max())
-				l.Printf("  mean:        %12.2f\n", m.Mean())
-				l.Printf("  stddev:      %12.2f\n", m.StdDev())
+				l.Printf("  count:       %9d\n", h.Count())
+				l.Printf("  min:         %9d\n", h.Min())
+				l.Printf("  max:         %9d\n", h.Max())
+				l.Printf("  mean:        %12.2f\n", h.Mean())
+				l.Printf("  stddev:      %12.2f\n", h.StdDev())
 				l.Printf("  median:      %12.2f\n", ps[0])
 				l.Printf("  75%%:         %12.2f\n", ps[1])
 				l.Printf("  95%%:         %12.2f\n", ps[2])
 				l.Printf("  99%%:         %12.2f\n", ps[3])
 				l.Printf("  99.9%%:       %12.2f\n", ps[4])
 			case Meter:
+				m := metric.Snapshot()
 				l.Printf("meter %s\n", name)
 				l.Printf("  count:       %9d\n", m.Count())
 				l.Printf("  1-min rate:  %12.2f\n", m.Rate1())
@@ -42,22 +44,23 @@ func Log(r Registry, d time.Duration, l *log.Logger) {
 				l.Printf("  15-min rate: %12.2f\n", m.Rate15())
 				l.Printf("  mean rate:   %12.2f\n", m.RateMean())
 			case Timer:
-				ps := m.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				t := metric.Snapshot()
+				ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
 				l.Printf("timer %s\n", name)
-				l.Printf("  count:       %9d\n", m.Count())
-				l.Printf("  min:         %9d\n", m.Min())
-				l.Printf("  max:         %9d\n", m.Max())
-				l.Printf("  mean:        %12.2f\n", m.Mean())
-				l.Printf("  stddev:      %12.2f\n", m.StdDev())
+				l.Printf("  count:       %9d\n", t.Count())
+				l.Printf("  min:         %9d\n", t.Min())
+				l.Printf("  max:         %9d\n", t.Max())
+				l.Printf("  mean:        %12.2f\n", t.Mean())
+				l.Printf("  stddev:      %12.2f\n", t.StdDev())
 				l.Printf("  median:      %12.2f\n", ps[0])
 				l.Printf("  75%%:         %12.2f\n", ps[1])
 				l.Printf("  95%%:         %12.2f\n", ps[2])
 				l.Printf("  99%%:         %12.2f\n", ps[3])
 				l.Printf("  99.9%%:       %12.2f\n", ps[4])
-				l.Printf("  1-min rate:  %12.2f\n", m.Rate1())
-				l.Printf("  5-min rate:  %12.2f\n", m.Rate5())
-				l.Printf("  15-min rate: %12.2f\n", m.Rate15())
-				l.Printf("  mean rate:   %12.2f\n", m.RateMean())
+				l.Printf("  1-min rate:  %12.2f\n", t.Rate1())
+				l.Printf("  5-min rate:  %12.2f\n", t.Rate5())
+				l.Printf("  15-min rate: %12.2f\n", t.Rate15())
+				l.Printf("  mean rate:   %12.2f\n", t.RateMean())
 			}
 		})
 		time.Sleep(d)

+ 85 - 52
meter.go

@@ -4,9 +4,6 @@ import "time"
 
 // Meters count events to produce exponentially-weighted moving average rates
 // at one-, five-, and fifteen-minutes and a mean rate.
-//
-// This is an interface so as to encourage other structs to implement
-// the Meter API as appropriate.
 type Meter interface {
 	Count() int64
 	Mark(int64)
@@ -14,9 +11,11 @@ type Meter interface {
 	Rate5() float64
 	Rate15() float64
 	RateMean() float64
+	Snapshot() Meter
 }
 
-// Get an existing or create and register a new Meter.
+// GetOrRegisterMeter returns an existing Meter or constructs and registers a
+// new StandardMeter.
 func GetOrRegisterMeter(name string, r Registry) Meter {
 	if nil == r {
 		r = DefaultRegistry
@@ -24,22 +23,22 @@ func GetOrRegisterMeter(name string, r Registry) Meter {
 	return r.GetOrRegister(name, NewMeter()).(Meter)
 }
 
-// Create a new Meter.  Create the communication channels and start the
-// synchronizing goroutine.
+// NewMeter constructs a new StandardMeter and launches a goroutine.
 func NewMeter() Meter {
 	if UseNilMetrics {
 		return NilMeter{}
 	}
 	m := &StandardMeter{
 		make(chan int64),
-		make(chan meterV),
+		make(chan *MeterSnapshot),
 		time.NewTicker(5e9),
 	}
 	go m.arbiter()
 	return m
 }
 
-// Create and register a new Meter.
+// NewMeter constructs and registers a new StandardMeter and launches a
+// goroutine.
 func NewRegisteredMeter(name string, r Registry) Meter {
 	c := NewMeter()
 	if nil == r {
@@ -49,71 +48,112 @@ func NewRegisteredMeter(name string, r Registry) Meter {
 	return c
 }
 
-// No-op Meter.
+// MeterSnapshot is a read-only copy of another Meter.
+type MeterSnapshot struct {
+	count                          int64
+	rate1, rate5, rate15, rateMean float64
+}
+
+// Count returns the count of events at the time the snapshot was taken.
+func (m *MeterSnapshot) Count() int64 { return m.count }
+
+// Mark panics.
+func (*MeterSnapshot) Mark(n int64) {
+	panic("Mark called on a MeterSnapshot")
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
+
+// Snapshot returns the snapshot.
+func (m *MeterSnapshot) Snapshot() Meter { return m }
+
+// NilMeter is a no-op Meter.
 type NilMeter struct{}
 
-// No-op.
-func (m NilMeter) Count() int64 { return 0 }
+// Count is a no-op.
+func (NilMeter) Count() int64 { return 0 }
+
+// Mark is a no-op.
+func (NilMeter) Mark(n int64) {}
 
-// No-op.
-func (m NilMeter) Mark(n int64) {}
+// Rate1 is a no-op.
+func (NilMeter) Rate1() float64 { return 0.0 }
 
-// No-op.
-func (m NilMeter) Rate1() float64 { return 0.0 }
+// Rate5 is a no-op.
+func (NilMeter) Rate5() float64 { return 0.0 }
 
-// No-op.
-func (m NilMeter) Rate5() float64 { return 0.0 }
+// Rate15is a no-op.
+func (NilMeter) Rate15() float64 { return 0.0 }
 
-// No-op.
-func (m NilMeter) Rate15() float64 { return 0.0 }
+// RateMean is a no-op.
+func (NilMeter) RateMean() float64 { return 0.0 }
 
-// No-op.
-func (m NilMeter) RateMean() float64 { return 0.0 }
+// Snapshot is a no-op.
+func (NilMeter) Snapshot() Meter { return NilMeter{} }
 
-// The standard implementation of a Meter uses a goroutine to synchronize
-// its calculations and another goroutine (via time.Ticker) to produce
-// clock ticks.
+// StandardMeter is the standard implementation of a Meter and uses a
+// goroutine to synchronize its calculations and a time.Ticker to pass time.
 type StandardMeter struct {
 	in     chan int64
-	out    chan meterV
+	out    chan *MeterSnapshot
 	ticker *time.Ticker
 }
 
-// Return the count of events seen.
+// Count returns the number of events recorded.
 func (m *StandardMeter) Count() int64 {
 	return (<-m.out).count
 }
 
-// Mark the occurance of n events.
+// Mark records the occurance of n events.
 func (m *StandardMeter) Mark(n int64) {
 	m.in <- n
 }
 
-// Return the meter's one-minute moving average rate of events.
+// Rate1 returns the one-minute moving average rate of events per second.
 func (m *StandardMeter) Rate1() float64 {
 	return (<-m.out).rate1
 }
 
-// Return the meter's five-minute moving average rate of events.
+// Rate5 returns the five-minute moving average rate of events per second.
 func (m *StandardMeter) Rate5() float64 {
 	return (<-m.out).rate5
 }
 
-// Return the meter's fifteen-minute moving average rate of events.
+// Rate15 returns the fifteen-minute moving average rate of events per second.
 func (m *StandardMeter) Rate15() float64 {
 	return (<-m.out).rate15
 }
 
-// Return the meter's mean rate of events.
+// RateMean returns the meter's mean rate of events per second.
 func (m *StandardMeter) RateMean() float64 {
 	return (<-m.out).rateMean
 }
 
-// Receive inputs and send outputs.  Count each input and update the various
-// moving averages and the mean rate of events.  Send a copy of the meterV
-// as output.
+// Snapshot returns a read-only copy of the meter.
+func (m *StandardMeter) Snapshot() Meter {
+	snapshot := *<-m.out
+	return &snapshot
+}
+
+// arbiter receives inputs and sends outputs.  It counts each input and updates
+// the various moving averages and the mean rate of events.  It sends a copy of
+// the meterV as output.
 func (m *StandardMeter) arbiter() {
-	var mv meterV
+	snapshot := &MeterSnapshot{}
 	a1 := NewEWMA1()
 	a5 := NewEWMA5()
 	a15 := NewEWMA15()
@@ -121,30 +161,23 @@ func (m *StandardMeter) arbiter() {
 	for {
 		select {
 		case n := <-m.in:
-			mv.count += n
+			snapshot.count += n
 			a1.Update(n)
 			a5.Update(n)
 			a15.Update(n)
-			mv.rate1 = a1.Rate()
-			mv.rate5 = a5.Rate()
-			mv.rate15 = a15.Rate()
-			mv.rateMean = float64(1e9*mv.count) / float64(time.Since(t))
-		case m.out <- mv:
+			snapshot.rate1 = a1.Rate()
+			snapshot.rate5 = a5.Rate()
+			snapshot.rate15 = a15.Rate()
+			snapshot.rateMean = float64(1e9*snapshot.count) / float64(time.Since(t))
+		case m.out <- snapshot:
 		case <-m.ticker.C:
 			a1.Tick()
 			a5.Tick()
 			a15.Tick()
-			mv.rate1 = a1.Rate()
-			mv.rate5 = a5.Rate()
-			mv.rate15 = a15.Rate()
-			mv.rateMean = float64(1e9*mv.count) / float64(time.Since(t))
+			snapshot.rate1 = a1.Rate()
+			snapshot.rate5 = a5.Rate()
+			snapshot.rate15 = a15.Rate()
+			snapshot.rateMean = float64(1e9*snapshot.count) / float64(time.Since(t))
 		}
 	}
 }
-
-// A meterV contains all the values that would need to be passed back
-// from the synchronizing goroutine.
-type meterV struct {
-	count                          int64
-	rate1, rate5, rate15, rateMean float64
-}

+ 9 - 1
meter_test.go

@@ -24,7 +24,7 @@ func TestGetOrRegisterMeter(t *testing.T) {
 func TestMeterDecay(t *testing.T) {
 	m := &StandardMeter{
 		make(chan int64),
-		make(chan meterV),
+		make(chan *MeterSnapshot),
 		time.NewTicker(1),
 	}
 	go m.arbiter()
@@ -44,6 +44,14 @@ func TestMeterNonzero(t *testing.T) {
 	}
 }
 
+func TestMeterSnapshot(t *testing.T) {
+	m := NewMeter()
+	m.Mark(1)
+	if snapshot := m.Snapshot(); m.RateMean() != snapshot.RateMean() {
+		t.Fatal(snapshot)
+	}
+}
+
 func TestMeterZero(t *testing.T) {
 	m := NewMeter()
 	if count := m.Count(); 0 != count {

+ 32 - 32
metrics_test.go

@@ -28,25 +28,25 @@ func BenchmarkMetrics(b *testing.B) {
 	ch := make(chan bool)
 
 	wgD := &sync.WaitGroup{}
-/*
-	wgD.Add(1)
-	go func() {
-		defer wgD.Done()
-		//log.Println("go CaptureDebugGCStats")
-		for {
-			select {
-			case <-ch:
-				//log.Println("done CaptureDebugGCStats")
-				return
-			default:
-				CaptureDebugGCStatsOnce(r)
+	/*
+		wgD.Add(1)
+		go func() {
+			defer wgD.Done()
+			//log.Println("go CaptureDebugGCStats")
+			for {
+				select {
+				case <-ch:
+					//log.Println("done CaptureDebugGCStats")
+					return
+				default:
+					CaptureDebugGCStatsOnce(r)
+				}
 			}
-		}
-	}()
-//*/
+		}()
+	//*/
 
 	wgR := &sync.WaitGroup{}
-//*
+	//*
 	wgR.Add(1)
 	go func() {
 		defer wgR.Done()
@@ -61,25 +61,25 @@ func BenchmarkMetrics(b *testing.B) {
 			}
 		}
 	}()
-//*/
+	//*/
 
 	wgW := &sync.WaitGroup{}
-/*
-	wgW.Add(1)
-	go func() {
-		defer wgW.Done()
-		//log.Println("go Write")
-		for {
-			select {
-			case <-ch:
-				//log.Println("done Write")
-				return
-			default:
-				WriteOnce(r, ioutil.Discard)
+	/*
+		wgW.Add(1)
+		go func() {
+			defer wgW.Done()
+			//log.Println("go Write")
+			for {
+				select {
+				case <-ch:
+					//log.Println("done Write")
+					return
+				default:
+					WriteOnce(r, ioutil.Discard)
+				}
 			}
-		}
-	}()
-//*/
+		}()
+	//*/
 
 	wg := &sync.WaitGroup{}
 	wg.Add(FANOUT)

+ 393 - 39
sample.go

@@ -4,39 +4,49 @@ import (
 	"container/heap"
 	"math"
 	"math/rand"
+	"sort"
 	"sync"
+	"sync/atomic"
 	"time"
 )
 
-const rescaleThreshold = 1e9 * 60 * 60
+const rescaleThreshold = time.Hour
 
 // Samples maintain a statistically-significant selection of values from
 // a stream.
-//
-// This is an interface so as to encourage other structs to implement
-// the Sample API as appropriate.
 type Sample interface {
 	Clear()
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
 	Size() int
+	Snapshot() Sample
+	StdDev() float64
+	Sum() int64
 	Update(int64)
 	Values() []int64
+	Variance() float64
 }
 
-// An exponentially-decaying sample using a forward-decaying priority
-// reservoir.  See Cormode et al's "Forward Decay: A Practical Time Decay
-// Model for Streaming Systems".
+// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
+// priority reservoir.  See Cormode et al's "Forward Decay: A Practical Time
+// Decay Model for Streaming Systems".
 //
 // <http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf>
 type ExpDecaySample struct {
 	alpha         float64
+	count         int64
 	mutex         sync.Mutex
 	reservoirSize int
 	t0, t1        time.Time
 	values        expDecaySampleHeap
 }
 
-// Create a new exponentially-decaying sample with the given reservoir size
-// and alpha.
+// NewExpDecaySample constructs a new exponentially-decaying sample with the
+// given reservoir size and alpha.
 func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
 	if UseNilMetrics {
 		return NilSample{}
@@ -51,30 +61,111 @@ func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
 	return s
 }
 
-// Clear all samples.
+// Clear clears all samples.
 func (s *ExpDecaySample) Clear() {
 	s.mutex.Lock()
 	defer s.mutex.Unlock()
-	s.values = make(expDecaySampleHeap, 0, s.reservoirSize)
+	s.count = 0
 	s.t0 = time.Now()
 	s.t1 = s.t0.Add(rescaleThreshold)
+	s.values = make(expDecaySampleHeap, 0, s.reservoirSize)
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *ExpDecaySample) Count() int64 {
+	return atomic.LoadInt64(&s.count)
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Max() int64 {
+	return SampleMax(s.Values())
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *ExpDecaySample) Mean() float64 {
+	return SampleMean(s.Values())
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Min() int64 {
+	return SampleMin(s.Values())
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *ExpDecaySample) Percentile(p float64) float64 {
+	return SamplePercentile(s.Values(), p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
+	return SamplePercentiles(s.Values(), ps)
 }
 
-// Return the size of the sample, which is at most the reservoir size.
+// Size returns the size of the sample, which is at most the reservoir size.
 func (s *ExpDecaySample) Size() int {
 	s.mutex.Lock()
 	defer s.mutex.Unlock()
 	return len(s.values)
 }
 
-// Update the sample with a new value.
+// Snapshot returns a read-only copy of the sample.
+func (s *ExpDecaySample) Snapshot() Sample {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	for i, v := range s.values {
+		values[i] = v.v
+	}
+	return &SampleSnapshot{
+		count:  s.count,
+		values: values,
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *ExpDecaySample) StdDev() float64 {
+	return SampleStdDev(s.Values())
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *ExpDecaySample) Sum() int64 {
+	return SampleSum(s.Values())
+}
+
+// Update samples a new value.
 func (s *ExpDecaySample) Update(v int64) {
+	s.update(time.Now(), v)
+}
+
+// Values returns a copy of the values in the sample.
+func (s *ExpDecaySample) Values() []int64 {
 	s.mutex.Lock()
 	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	for i, v := range s.values {
+		values[i] = v.v
+	}
+	return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *ExpDecaySample) Variance() float64 {
+	return SampleVariance(s.Values())
+}
+
+// update samples a new value at a particular timestamp.  This is a method all
+// its own to facilitate testing.
+func (s *ExpDecaySample) update(t time.Time, v int64) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count++
 	if len(s.values) == s.reservoirSize {
 		heap.Pop(&s.values)
 	}
-	t := time.Now()
 	heap.Push(&s.values, expDecaySample{
 		k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
 		v: v,
@@ -92,42 +183,220 @@ func (s *ExpDecaySample) Update(v int64) {
 	}
 }
 
-// Return all the values in the sample.
-func (s *ExpDecaySample) Values() []int64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	values := make([]int64, len(s.values))
-	for i, v := range s.values {
-		values[i] = v.v
+// NilSample is a no-op Sample.
+type NilSample struct{}
+
+// Clear is a no-op.
+func (NilSample) Clear() {}
+
+// Count is a no-op.
+func (NilSample) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilSample) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilSample) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilSample) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilSample) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilSample) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Size is a no-op.
+func (NilSample) Size() int { return 0 }
+
+// Sample is a no-op.
+func (NilSample) Snapshot() Sample { return NilSample{} }
+
+// StdDev is a no-op.
+func (NilSample) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilSample) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilSample) Update(v int64) {}
+
+// Values is a no-op.
+func (NilSample) Values() []int64 { return []int64{} }
+
+// Variance is a no-op.
+func (NilSample) Variance() float64 { return 0.0 }
+
+// SampleMax returns the maximum value of the slice of int64.
+func SampleMax(values []int64) int64 {
+	if 0 == len(values) {
+		return 0
 	}
-	return values
+	var max int64 = math.MinInt64
+	for _, v := range values {
+		if max < v {
+			max = v
+		}
+	}
+	return max
 }
 
-// No-op Sample.
-type NilSample struct{}
+// SampleMean returns the mean value of the slice of int64.
+func SampleMean(values []int64) float64 {
+	if 0 == len(values) {
+		return 0.0
+	}
+	return float64(SampleSum(values)) / float64(len(values))
+}
+
+// SampleMin returns the minimum value of the slice of int64.
+func SampleMin(values []int64) int64 {
+	if 0 == len(values) {
+		return 0
+	}
+	var min int64 = math.MaxInt64
+	for _, v := range values {
+		if min > v {
+			min = v
+		}
+	}
+	return min
+}
+
+// SamplePercentiles returns an arbitrary percentile of the slice of int64.
+func SamplePercentile(values int64Slice, p float64) float64 {
+	return SamplePercentiles(values, []float64{p})[0]
+}
+
+// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64.
+func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+	scores := make([]float64, len(ps))
+	size := len(values)
+	if size > 0 {
+		sort.Sort(values)
+		for i, p := range ps {
+			pos := p * float64(size+1)
+			if pos < 1.0 {
+				scores[i] = float64(values[0])
+			} else if pos >= float64(size) {
+				scores[i] = float64(values[size-1])
+			} else {
+				lower := float64(values[int(pos)-1])
+				upper := float64(values[int(pos)])
+				scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
+			}
+		}
+	}
+	return scores
+}
+
+// SampleSnapshot is a read-only copy of another Sample.
+type SampleSnapshot struct {
+	count  int64
+	values []int64
+}
+
+// Clear panics.
+func (*SampleSnapshot) Clear() {
+	panic("Clear called on a SampleSnapshot")
+}
+
+// Count returns the count of inputs at the time the snapshot was taken.
+func (s *SampleSnapshot) Count() int64 { return s.count }
 
-// No-op.
-func (s NilSample) Clear() {}
+// Max returns the maximal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
 
-// No-op.
-func (s NilSample) Size() int { return 0 }
+// Mean returns the mean value at the time the snapshot was taken.
+func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
 
-// No-op.
-func (s NilSample) Update(v int64) {}
+// Min returns the minimal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
 
-// No-op.
-func (s NilSample) Values() []int64 { return []int64{} }
+// Percentile returns an arbitrary percentile of values at the time the
+// snapshot was taken.
+func (s *SampleSnapshot) Percentile(p float64) float64 {
+	return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values at the time
+// the snapshot was taken.
+func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
+	return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (s *SampleSnapshot) Size() int { return len(s.values) }
+
+// Snapshot returns the snapshot.
+func (s *SampleSnapshot) Snapshot() Sample { return s }
+
+// StdDev returns the standard deviation of values at the time the snapshot was
+// taken.
+func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+
+// Sum returns the sum of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
+
+// Update panics.
+func (*SampleSnapshot) Update(int64) {
+	panic("Update called on a SampleSnapshot")
+}
+
+// Values returns a copy of the values in the sample.
+func (s *SampleSnapshot) Values() []int64 {
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return values
+}
+
+// Variance returns the variance of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
+
+// SampleStdDev returns the standard deviation of the slice of int64.
+func SampleStdDev(values []int64) float64 {
+	return math.Sqrt(SampleVariance(values))
+}
+
+// SampleSum returns the sum of the slice of int64.
+func SampleSum(values []int64) int64 {
+	var sum int64
+	for _, v := range values {
+		sum += v
+	}
+	return sum
+}
+
+// SampleVariance returns the variance of the slice of int64.
+func SampleVariance(values []int64) float64 {
+	if 0 == len(values) {
+		return 0.0
+	}
+	m := SampleMean(values)
+	var sum float64
+	for _, v := range values {
+		d := float64(v) - m
+		sum += d * d
+	}
+	return sum / float64(len(values))
+}
 
 // A uniform sample using Vitter's Algorithm R.
 //
 // <http://www.cs.umd.edu/~samir/498/vitter.pdf>
 type UniformSample struct {
+	count         int64
 	mutex         sync.Mutex
 	reservoirSize int
 	values        []int64
 }
 
-// Create a new uniform sample with the given reservoir size.
+// NewUniformSample constructs a new uniform sample with the given reservoir
+// size.
 func NewUniformSample(reservoirSize int) Sample {
 	if UseNilMetrics {
 		return NilSample{}
@@ -135,24 +404,96 @@ func NewUniformSample(reservoirSize int) Sample {
 	return &UniformSample{reservoirSize: reservoirSize}
 }
 
-// Clear all samples.
+// Clear clears all samples.
 func (s *UniformSample) Clear() {
 	s.mutex.Lock()
 	defer s.mutex.Unlock()
+	s.count = 0
 	s.values = make([]int64, 0, s.reservoirSize)
 }
 
-// Return the size of the sample, which is at most the reservoir size.
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *UniformSample) Count() int64 {
+	return atomic.LoadInt64(&s.count)
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *UniformSample) Max() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMax(s.values)
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *UniformSample) Mean() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMean(s.values)
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *UniformSample) Min() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMin(s.values)
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *UniformSample) Percentile(p float64) float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *UniformSample) Percentiles(ps []float64) []float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
 func (s *UniformSample) Size() int {
 	s.mutex.Lock()
 	defer s.mutex.Unlock()
 	return len(s.values)
 }
 
-// Update the sample with a new value.
+// Snapshot returns a read-only copy of the sample.
+func (s *UniformSample) Snapshot() Sample {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return &SampleSnapshot{
+		count:  s.count,
+		values: values,
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *UniformSample) StdDev() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleStdDev(s.values)
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *UniformSample) Sum() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleSum(s.values)
+}
+
+// Update samples a new value.
 func (s *UniformSample) Update(v int64) {
 	s.mutex.Lock()
 	defer s.mutex.Unlock()
+	s.count++
 	if len(s.values) < s.reservoirSize {
 		s.values = append(s.values, v)
 	} else {
@@ -160,7 +501,7 @@ func (s *UniformSample) Update(v int64) {
 	}
 }
 
-// Return all the values in the sample.
+// Values returns a copy of the values in the sample.
 func (s *UniformSample) Values() []int64 {
 	s.mutex.Lock()
 	defer s.mutex.Unlock()
@@ -169,13 +510,20 @@ func (s *UniformSample) Values() []int64 {
 	return values
 }
 
-// An individual sample.
+// Variance returns the variance of the values in the sample.
+func (s *UniformSample) Variance() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleVariance(s.values)
+}
+
+// expDecaySample represents an individual sample in a heap.
 type expDecaySample struct {
 	k float64
 	v int64
 }
 
-// A min-heap of samples.
+// expDecaySampleHeap is a min-heap of expDecaySamples.
 type expDecaySampleHeap []expDecaySample
 
 func (q expDecaySampleHeap) Len() int {
@@ -206,3 +554,9 @@ func (q *expDecaySampleHeap) Push(x interface{}) {
 func (q expDecaySampleHeap) Swap(i, j int) {
 	q[i], q[j] = q[j], q[i]
 }
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int           { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }

+ 163 - 4
sample_test.go

@@ -1,11 +1,59 @@
 package metrics
 
 import (
+	"math/rand"
 	"runtime"
 	"testing"
 	"time"
 )
 
+// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
+// expensive computations like Variance, the cost of copying the Sample, as
+// approximated by a make and copy, is much greater than the cost of the
+// computation for small samples and only slightly less for large samples.
+func BenchmarkCompute1000(b *testing.B) {
+	s := make([]int64, 1000)
+	for i := 0; i < len(s); i++ {
+		s[i] = int64(i)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		SampleVariance(s)
+	}
+}
+func BenchmarkCompute1000000(b *testing.B) {
+	s := make([]int64, 1000000)
+	for i := 0; i < len(s); i++ {
+		s[i] = int64(i)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		SampleVariance(s)
+	}
+}
+func BenchmarkCopy1000(b *testing.B) {
+	s := make([]int64, 1000)
+	for i := 0; i < len(s); i++ {
+		s[i] = int64(i)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		sCopy := make([]int64, len(s))
+		copy(sCopy, s)
+	}
+}
+func BenchmarkCopy1000000(b *testing.B) {
+	s := make([]int64, 1000000)
+	for i := 0; i < len(s); i++ {
+		s[i] = int64(i)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		sCopy := make([]int64, len(s))
+		copy(sCopy, s)
+	}
+}
+
 func BenchmarkExpDecaySample257(b *testing.B) {
 	benchmarkSample(b, NewExpDecaySample(257, 0.015))
 }
@@ -31,10 +79,14 @@ func BenchmarkUniformSample1028(b *testing.B) {
 }
 
 func TestExpDecaySample10(t *testing.T) {
+	rand.Seed(1)
 	s := NewExpDecaySample(100, 0.99)
 	for i := 0; i < 10; i++ {
 		s.Update(int64(i))
 	}
+	if size := s.Count(); 10 != size {
+		t.Errorf("s.Count(): 10 != %v\n", size)
+	}
 	if size := s.Size(); 10 != size {
 		t.Errorf("s.Size(): 10 != %v\n", size)
 	}
@@ -49,10 +101,14 @@ func TestExpDecaySample10(t *testing.T) {
 }
 
 func TestExpDecaySample100(t *testing.T) {
+	rand.Seed(1)
 	s := NewExpDecaySample(1000, 0.01)
 	for i := 0; i < 100; i++ {
 		s.Update(int64(i))
 	}
+	if size := s.Count(); 100 != size {
+		t.Errorf("s.Count(): 100 != %v\n", size)
+	}
 	if size := s.Size(); 100 != size {
 		t.Errorf("s.Size(): 100 != %v\n", size)
 	}
@@ -67,10 +123,14 @@ func TestExpDecaySample100(t *testing.T) {
 }
 
 func TestExpDecaySample1000(t *testing.T) {
+	rand.Seed(1)
 	s := NewExpDecaySample(100, 0.99)
 	for i := 0; i < 1000; i++ {
 		s.Update(int64(i))
 	}
+	if size := s.Count(); 1000 != size {
+		t.Errorf("s.Count(): 1000 != %v\n", size)
+	}
 	if size := s.Size(); 100 != size {
 		t.Errorf("s.Size(): 100 != %v\n", size)
 	}
@@ -89,6 +149,7 @@ func TestExpDecaySample1000(t *testing.T) {
 // The priority becomes +Inf quickly after starting if this is done,
 // effectively freezing the set of samples until a rescale step happens.
 func TestExpDecaySampleNanosecondRegression(t *testing.T) {
+	rand.Seed(1)
 	s := NewExpDecaySample(100, 0.99)
 	for i := 0; i < 100; i++ {
 		s.Update(10)
@@ -108,11 +169,37 @@ func TestExpDecaySampleNanosecondRegression(t *testing.T) {
 	}
 }
 
+func TestExpDecaySampleSnapshot(t *testing.T) {
+	now := time.Now()
+	rand.Seed(1)
+	s := NewExpDecaySample(100, 0.99)
+	for i := 1; i <= 10000; i++ {
+		s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
+	}
+	snapshot := s.Snapshot()
+	s.Update(1)
+	testExpDecaySampleStatistics(t, snapshot)
+}
+
+func TestExpDecaySampleStatistics(t *testing.T) {
+	now := time.Now()
+	rand.Seed(1)
+	s := NewExpDecaySample(100, 0.99)
+	for i := 1; i <= 10000; i++ {
+		s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
+	}
+	testExpDecaySampleStatistics(t, s)
+}
+
 func TestUniformSample(t *testing.T) {
+	rand.Seed(1)
 	s := NewUniformSample(100)
 	for i := 0; i < 1000; i++ {
 		s.Update(int64(i))
 	}
+	if size := s.Count(); 1000 != size {
+		t.Errorf("s.Count(): 1000 != %v\n", size)
+	}
 	if size := s.Size(); 100 != size {
 		t.Errorf("s.Size(): 100 != %v\n", size)
 	}
@@ -127,26 +214,42 @@ func TestUniformSample(t *testing.T) {
 }
 
 func TestUniformSampleIncludesTail(t *testing.T) {
+	rand.Seed(1)
 	s := NewUniformSample(100)
 	max := 100
-
 	for i := 0; i < max; i++ {
 		s.Update(int64(i))
 	}
-
 	v := s.Values()
 	sum := 0
 	exp := (max - 1) * max / 2
-
 	for i := 0; i < len(v); i++ {
 		sum += int(v[i])
 	}
-
 	if exp != sum {
 		t.Errorf("sum: %v != %v\n", exp, sum)
 	}
 }
 
+func TestUniformSampleSnapshot(t *testing.T) {
+	s := NewUniformSample(100)
+	for i := 1; i <= 10000; i++ {
+		s.Update(int64(i))
+	}
+	snapshot := s.Snapshot()
+	s.Update(1)
+	testUniformSampleStatistics(t, snapshot)
+}
+
+func TestUniformSampleStatistics(t *testing.T) {
+	rand.Seed(1)
+	s := NewUniformSample(100)
+	for i := 1; i <= 10000; i++ {
+		s.Update(int64(i))
+	}
+	testUniformSampleStatistics(t, s)
+}
+
 func benchmarkSample(b *testing.B, s Sample) {
 	var memStats runtime.MemStats
 	runtime.ReadMemStats(&memStats)
@@ -160,3 +263,59 @@ func benchmarkSample(b *testing.B, s Sample) {
 	runtime.ReadMemStats(&memStats)
 	b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
 }
+
+func testExpDecaySampleStatistics(t *testing.T, s Sample) {
+	if count := s.Count(); 10000 != count {
+		t.Errorf("s.Count(): 10000 != %v\n", count)
+	}
+	if min := s.Min(); 107 != min {
+		t.Errorf("s.Min(): 107 != %v\n", min)
+	}
+	if max := s.Max(); 10000 != max {
+		t.Errorf("s.Max(): 10000 != %v\n", max)
+	}
+	if mean := s.Mean(); 4965.98 != mean {
+		t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
+	}
+	if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
+		t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
+	}
+	ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
+	if 4615 != ps[0] {
+		t.Errorf("median: 4615 != %v\n", ps[0])
+	}
+	if 7672 != ps[1] {
+		t.Errorf("75th percentile: 7672 != %v\n", ps[1])
+	}
+	if 9998.99 != ps[2] {
+		t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
+	}
+}
+
+func testUniformSampleStatistics(t *testing.T, s Sample) {
+	if count := s.Count(); 10000 != count {
+		t.Errorf("s.Count(): 10000 != %v\n", count)
+	}
+	if min := s.Min(); 9412 != min {
+		t.Errorf("s.Min(): 9412 != %v\n", min)
+	}
+	if max := s.Max(); 10000 != max {
+		t.Errorf("s.Max(): 10000 != %v\n", max)
+	}
+	if mean := s.Mean(); 9902.26 != mean {
+		t.Errorf("s.Mean(): 9902.26 != %v\n", mean)
+	}
+	if stdDev := s.StdDev(); 101.8667384380201 != stdDev {
+		t.Errorf("s.StdDev(): 101.8667384380201 != %v\n", stdDev)
+	}
+	ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
+	if 9930.5 != ps[0] {
+		t.Errorf("median: 9930.5 != %v\n", ps[0])
+	}
+	if 9973.75 != ps[1] {
+		t.Errorf("75th percentile: 9973.75 != %v\n", ps[1])
+	}
+	if 9999.99 != ps[2] {
+		t.Errorf("99th percentile: 9999.99 != %v\n", ps[2])
+	}
+}

+ 22 - 19
stathat/stathat.go

@@ -19,45 +19,48 @@ func Stathat(r metrics.Registry, d time.Duration, userkey string) {
 
 func sh(r metrics.Registry, userkey string) error {
 	r.Each(func(name string, i interface{}) {
-		switch m := i.(type) {
+		switch metric := i.(type) {
 		case metrics.Counter:
-			stathat.PostEZCount(name, userkey, int(m.Count()))
+			stathat.PostEZCount(name, userkey, int(metric.Count()))
 		case metrics.Gauge:
-			stathat.PostEZValue(name, userkey, float64(m.Value()))
+			stathat.PostEZValue(name, userkey, float64(metric.Value()))
 		case metrics.Histogram:
-			ps := m.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
-			stathat.PostEZCount(name+".count", userkey, int(m.Count()))
-			stathat.PostEZValue(name+".min", userkey, float64(m.Min()))
-			stathat.PostEZValue(name+".max", userkey, float64(m.Max()))
-			stathat.PostEZValue(name+".mean", userkey, float64(m.Mean()))
-			stathat.PostEZValue(name+".std-dev", userkey, float64(m.StdDev()))
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			stathat.PostEZCount(name+".count", userkey, int(h.Count()))
+			stathat.PostEZValue(name+".min", userkey, float64(h.Min()))
+			stathat.PostEZValue(name+".max", userkey, float64(h.Max()))
+			stathat.PostEZValue(name+".mean", userkey, float64(h.Mean()))
+			stathat.PostEZValue(name+".std-dev", userkey, float64(h.StdDev()))
 			stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0]))
 			stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1]))
 			stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2]))
 			stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3]))
 			stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4]))
 		case metrics.Meter:
+			m := metric.Snapshot()
 			stathat.PostEZCount(name+".count", userkey, int(m.Count()))
 			stathat.PostEZValue(name+".one-minute", userkey, float64(m.Rate1()))
 			stathat.PostEZValue(name+".five-minute", userkey, float64(m.Rate5()))
 			stathat.PostEZValue(name+".fifteen-minute", userkey, float64(m.Rate15()))
 			stathat.PostEZValue(name+".mean", userkey, float64(m.RateMean()))
 		case metrics.Timer:
-			ps := m.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
-			stathat.PostEZCount(name+".count", userkey, int(m.Count()))
-			stathat.PostEZValue(name+".min", userkey, float64(m.Min()))
-			stathat.PostEZValue(name+".max", userkey, float64(m.Max()))
-			stathat.PostEZValue(name+".mean", userkey, float64(m.Mean()))
-			stathat.PostEZValue(name+".std-dev", userkey, float64(m.StdDev()))
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			stathat.PostEZCount(name+".count", userkey, int(t.Count()))
+			stathat.PostEZValue(name+".min", userkey, float64(t.Min()))
+			stathat.PostEZValue(name+".max", userkey, float64(t.Max()))
+			stathat.PostEZValue(name+".mean", userkey, float64(t.Mean()))
+			stathat.PostEZValue(name+".std-dev", userkey, float64(t.StdDev()))
 			stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0]))
 			stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1]))
 			stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2]))
 			stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3]))
 			stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4]))
-			stathat.PostEZValue(name+".one-minute", userkey, float64(m.Rate1()))
-			stathat.PostEZValue(name+".five-minute", userkey, float64(m.Rate5()))
-			stathat.PostEZValue(name+".fifteen-minute", userkey, float64(m.Rate15()))
-			stathat.PostEZValue(name+".mean-rate", userkey, float64(m.RateMean()))
+			stathat.PostEZValue(name+".one-minute", userkey, float64(t.Rate1()))
+			stathat.PostEZValue(name+".five-minute", userkey, float64(t.Rate5()))
+			stathat.PostEZValue(name+".fifteen-minute", userkey, float64(t.Rate15()))
+			stathat.PostEZValue(name+".mean-rate", userkey, float64(t.RateMean()))
 		}
 	})
 	return nil

+ 24 - 21
syslog.go

@@ -13,24 +13,25 @@ import (
 func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
 	for {
 		r.Each(func(name string, i interface{}) {
-			switch m := i.(type) {
+			switch metric := i.(type) {
 			case Counter:
-				w.Info(fmt.Sprintf("counter %s: count: %d", name, m.Count()))
+				w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
 			case Gauge:
-				w.Info(fmt.Sprintf("gauge %s: value: %d", name, m.Value()))
+				w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
 			case Healthcheck:
-				m.Check()
-				w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, m.Error()))
+				metric.Check()
+				w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
 			case Histogram:
-				ps := m.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				h := metric.Snapshot()
+				ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
 				w.Info(fmt.Sprintf(
 					"histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
 					name,
-					m.Count(),
-					m.Min(),
-					m.Max(),
-					m.Mean(),
-					m.StdDev(),
+					h.Count(),
+					h.Min(),
+					h.Max(),
+					h.Mean(),
+					h.StdDev(),
 					ps[0],
 					ps[1],
 					ps[2],
@@ -38,6 +39,7 @@ func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
 					ps[4],
 				))
 			case Meter:
+				m := metric.Snapshot()
 				w.Info(fmt.Sprintf(
 					"meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
 					name,
@@ -48,24 +50,25 @@ func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
 					m.RateMean(),
 				))
 			case Timer:
-				ps := m.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				t := metric.Snapshot()
+				ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
 				w.Info(fmt.Sprintf(
 					"timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
 					name,
-					m.Count(),
-					m.Min(),
-					m.Max(),
-					m.Mean(),
-					m.StdDev(),
+					t.Count(),
+					t.Min(),
+					t.Max(),
+					t.Mean(),
+					t.StdDev(),
 					ps[0],
 					ps[1],
 					ps[2],
 					ps[3],
 					ps[4],
-					m.Rate1(),
-					m.Rate5(),
-					m.Rate15(),
-					m.RateMean(),
+					t.Rate1(),
+					t.Rate5(),
+					t.Rate15(),
+					t.RateMean(),
 				))
 			}
 		})

+ 177 - 71
timer.go

@@ -1,11 +1,11 @@
 package metrics
 
-import "time"
+import (
+	"sync"
+	"time"
+)
 
 // Timers capture the duration and rate of events.
-//
-// This is an interface so as to encourage other structs to implement
-// the Timer API as appropriate.
 type Timer interface {
 	Count() int64
 	Max() int64
@@ -17,13 +17,16 @@ type Timer interface {
 	Rate5() float64
 	Rate15() float64
 	RateMean() float64
+	Snapshot() Timer
 	StdDev() float64
 	Time(func())
 	Update(time.Duration)
 	UpdateSince(time.Time)
+	Variance() float64
 }
 
-// Get an existing or create and register a new Timer.
+// GetOrRegisterTimer returns an existing Timer or constructs and registers a
+// new StandardTimer.
 func GetOrRegisterTimer(name string, r Registry) Timer {
 	if nil == r {
 		r = DefaultRegistry
@@ -31,15 +34,18 @@ func GetOrRegisterTimer(name string, r Registry) Timer {
 	return r.GetOrRegister(name, NewTimer()).(Timer)
 }
 
-// Create a new timer with the given Histogram and Meter.
+// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
 func NewCustomTimer(h Histogram, m Meter) Timer {
 	if UseNilMetrics {
 		return NilTimer{}
 	}
-	return &StandardTimer{h, m}
+	return &StandardTimer{
+		histogram: h,
+		meter:     m,
+	}
 }
 
-// Create and register a new Timer.
+// NewRegisteredTimer constructs and registers a new StandardTimer.
 func NewRegisteredTimer(name string, r Registry) Timer {
 	c := NewTimer()
 	if nil == r {
@@ -49,128 +55,146 @@ func NewRegisteredTimer(name string, r Registry) Timer {
 	return c
 }
 
-// Create a new timer with a standard histogram and meter.  The histogram
-// will use an exponentially-decaying sample with the same reservoir size
-// and alpha as UNIX load averages.
+// NewTimer constructs a new StandardTimer using an exponentially-decaying
+// sample with the same reservoir size and alpha as UNIX load averages.
 func NewTimer() Timer {
 	if UseNilMetrics {
 		return NilTimer{}
 	}
 	return &StandardTimer{
-		NewHistogram(NewExpDecaySample(1028, 0.015)),
-		NewMeter(),
+		histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
+		meter:     NewMeter(),
 	}
 }
 
-// No-op Timer.
+// NilTimer is a no-op Timer.
 type NilTimer struct {
 	h Histogram
 	m Meter
 }
 
-// No-op.
-func (t NilTimer) Count() int64 { return 0 }
+// Count is a no-op.
+func (NilTimer) Count() int64 { return 0 }
 
-// No-op.
-func (t NilTimer) Max() int64 { return 0 }
+// Max is a no-op.
+func (NilTimer) Max() int64 { return 0 }
 
-// No-op.
-func (t NilTimer) Mean() float64 { return 0.0 }
+// Mean is a no-op.
+func (NilTimer) Mean() float64 { return 0.0 }
 
-// No-op.
-func (t NilTimer) Min() int64 { return 0 }
+// Min is a no-op.
+func (NilTimer) Min() int64 { return 0 }
 
-// No-op.
-func (t NilTimer) Percentile(p float64) float64 { return 0.0 }
+// Percentile is a no-op.
+func (NilTimer) Percentile(p float64) float64 { return 0.0 }
 
-// No-op.
-func (t NilTimer) Percentiles(ps []float64) []float64 {
+// Percentiles is a no-op.
+func (NilTimer) Percentiles(ps []float64) []float64 {
 	return make([]float64, len(ps))
 }
 
-// No-op.
-func (t NilTimer) Rate1() float64 { return 0.0 }
+// Rate1 is a no-op.
+func (NilTimer) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilTimer) Rate5() float64 { return 0.0 }
 
-// No-op.
-func (t NilTimer) Rate5() float64 { return 0.0 }
+// Rate15 is a no-op.
+func (NilTimer) Rate15() float64 { return 0.0 }
 
-// No-op.
-func (t NilTimer) Rate15() float64 { return 0.0 }
+// RateMean is a no-op.
+func (NilTimer) RateMean() float64 { return 0.0 }
 
-// No-op.
-func (t NilTimer) RateMean() float64 { return 0.0 }
+// Snapshot is a no-op.
+func (NilTimer) Snapshot() Timer { return NilTimer{} }
 
-// No-op.
-func (t NilTimer) StdDev() float64 { return 0.0 }
+// StdDev is a no-op.
+func (NilTimer) StdDev() float64 { return 0.0 }
 
-// No-op.
-func (t NilTimer) Time(f func()) {}
+// Time is a no-op.
+func (NilTimer) Time(func()) {}
 
-// No-op.
-func (t NilTimer) Update(d time.Duration) {}
+// Update is a no-op.
+func (NilTimer) Update(time.Duration) {}
 
-// No-op.
-func (t NilTimer) UpdateSince(ts time.Time) {}
+// UpdateSince is a no-op.
+func (NilTimer) UpdateSince(time.Time) {}
 
-// The standard implementation of a Timer uses a Histogram and Meter directly.
+// Variance is a no-op.
+func (NilTimer) Variance() float64 { return 0.0 }
+
+// StandardTimer is the standard implementation of a Timer and uses a Histogram
+// and Meter.
 type StandardTimer struct {
-	h Histogram
-	m Meter
+	histogram Histogram
+	meter     Meter
+	mutex     sync.Mutex
 }
 
-// Return the count of inputs.
+// Count returns the number of events recorded.
 func (t *StandardTimer) Count() int64 {
-	return t.h.Count()
+	return t.histogram.Count()
 }
 
-// Return the maximal value seen.
+// Max returns the maximum value in the sample.
 func (t *StandardTimer) Max() int64 {
-	return t.h.Max()
+	return t.histogram.Max()
 }
 
-// Return the mean of all values seen.
+// Mean returns the mean of the values in the sample.
 func (t *StandardTimer) Mean() float64 {
-	return t.h.Mean()
+	return t.histogram.Mean()
 }
 
-// Return the minimal value seen.
+// Min returns the minimum value in the sample.
 func (t *StandardTimer) Min() int64 {
-	return t.h.Min()
+	return t.histogram.Min()
 }
 
-// Return an arbitrary percentile of all values seen.
+// Percentile returns an arbitrary percentile of the values in the sample.
 func (t *StandardTimer) Percentile(p float64) float64 {
-	return t.h.Percentile(p)
+	return t.histogram.Percentile(p)
 }
 
-// Return a slice of arbitrary percentiles of all values seen.
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
 func (t *StandardTimer) Percentiles(ps []float64) []float64 {
-	return t.h.Percentiles(ps)
+	return t.histogram.Percentiles(ps)
 }
 
-// Return the meter's one-minute moving average rate of events.
+// Rate1 returns the one-minute moving average rate of events per second.
 func (t *StandardTimer) Rate1() float64 {
-	return t.m.Rate1()
+	return t.meter.Rate1()
 }
 
-// Return the meter's five-minute moving average rate of events.
+// Rate5 returns the five-minute moving average rate of events per second.
 func (t *StandardTimer) Rate5() float64 {
-	return t.m.Rate5()
+	return t.meter.Rate5()
 }
 
-// Return the meter's fifteen-minute moving average rate of events.
+// Rate15 returns the fifteen-minute moving average rate of events per second.
 func (t *StandardTimer) Rate15() float64 {
-	return t.m.Rate15()
+	return t.meter.Rate15()
 }
 
-// Return the meter's mean rate of events.
+// RateMean returns the meter's mean rate of events per second.
 func (t *StandardTimer) RateMean() float64 {
-	return t.m.RateMean()
+	return t.meter.RateMean()
+}
+
+// Snapshot returns a read-only copy of the timer.
+func (t *StandardTimer) Snapshot() Timer {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	return &TimerSnapshot{
+		histogram: t.histogram.Snapshot().(*HistogramSnapshot),
+		meter:     t.meter.Snapshot().(*MeterSnapshot),
+	}
 }
 
-// Return the standard deviation of all values seen.
+// StdDev returns the standard deviation of the values in the sample.
 func (t *StandardTimer) StdDev() float64 {
-	return t.h.StdDev()
+	return t.histogram.StdDev()
 }
 
 // Record the duration of the execution of the given function.
@@ -182,12 +206,94 @@ func (t *StandardTimer) Time(f func()) {
 
 // Record the duration of an event.
 func (t *StandardTimer) Update(d time.Duration) {
-	t.h.Update(int64(d))
-	t.m.Mark(1)
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.histogram.Update(int64(d))
+	t.meter.Mark(1)
 }
 
 // Record the duration of an event that started at a time and ends now.
 func (t *StandardTimer) UpdateSince(ts time.Time) {
-	t.h.Update(int64(time.Since(ts)))
-	t.m.Mark(1)
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.histogram.Update(int64(time.Since(ts)))
+	t.meter.Mark(1)
+}
+
+// Variance returns the variance of the values in the sample.
+func (t *StandardTimer) Variance() float64 {
+	return t.histogram.Variance()
+}
+
+// TimerSnapshot is a read-only copy of another Timer.
+type TimerSnapshot struct {
+	histogram *HistogramSnapshot
+	meter     *MeterSnapshot
+}
+
+// Count returns the number of events recorded at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+
+// Max returns the maximum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+
+// Min returns the minimum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+
+// Percentile returns an arbitrary percentile of sampled values at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) Percentile(p float64) float64 {
+	return t.histogram.Percentile(p)
 }
+
+// Percentiles returns a slice of arbitrary percentiles of sampled values at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+	return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
+
+// Snapshot returns the snapshot.
+func (t *TimerSnapshot) Snapshot() Timer { return t }
+
+// StdDev returns the standard deviation of the values at the time the snapshot
+// was taken.
+func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
+
+// Time panics.
+func (*TimerSnapshot) Time(func()) {
+	panic("Time called on a TimerSnapshot")
+}
+
+// Update panics.
+func (*TimerSnapshot) Update(time.Duration) {
+	panic("Update called on a TimerSnapshot")
+}
+
+// UpdateSince panics.
+func (*TimerSnapshot) UpdateSince(time.Time) {
+	panic("UpdateSince called on a TimerSnapshot")
+}
+
+// Variance returns the variance of the values at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }

+ 24 - 21
writer.go

@@ -17,31 +17,33 @@ func Write(r Registry, d time.Duration, w io.Writer) {
 
 func WriteOnce(r Registry, w io.Writer) {
 	r.Each(func(name string, i interface{}) {
-		switch m := i.(type) {
+		switch metric := i.(type) {
 		case Counter:
 			fmt.Fprintf(w, "counter %s\n", name)
-			fmt.Fprintf(w, "  count:       %9d\n", m.Count())
+			fmt.Fprintf(w, "  count:       %9d\n", metric.Count())
 		case Gauge:
 			fmt.Fprintf(w, "gauge %s\n", name)
-			fmt.Fprintf(w, "  value:       %9d\n", m.Value())
+			fmt.Fprintf(w, "  value:       %9d\n", metric.Value())
 		case Healthcheck:
-			m.Check()
+			metric.Check()
 			fmt.Fprintf(w, "healthcheck %s\n", name)
-			fmt.Fprintf(w, "  error:       %v\n", m.Error())
+			fmt.Fprintf(w, "  error:       %v\n", metric.Error())
 		case Histogram:
-			ps := m.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
 			fmt.Fprintf(w, "histogram %s\n", name)
-			fmt.Fprintf(w, "  count:       %9d\n", m.Count())
-			fmt.Fprintf(w, "  min:         %9d\n", m.Min())
-			fmt.Fprintf(w, "  max:         %9d\n", m.Max())
-			fmt.Fprintf(w, "  mean:        %12.2f\n", m.Mean())
-			fmt.Fprintf(w, "  stddev:      %12.2f\n", m.StdDev())
+			fmt.Fprintf(w, "  count:       %9d\n", h.Count())
+			fmt.Fprintf(w, "  min:         %9d\n", h.Min())
+			fmt.Fprintf(w, "  max:         %9d\n", h.Max())
+			fmt.Fprintf(w, "  mean:        %12.2f\n", h.Mean())
+			fmt.Fprintf(w, "  stddev:      %12.2f\n", h.StdDev())
 			fmt.Fprintf(w, "  median:      %12.2f\n", ps[0])
 			fmt.Fprintf(w, "  75%%:         %12.2f\n", ps[1])
 			fmt.Fprintf(w, "  95%%:         %12.2f\n", ps[2])
 			fmt.Fprintf(w, "  99%%:         %12.2f\n", ps[3])
 			fmt.Fprintf(w, "  99.9%%:       %12.2f\n", ps[4])
 		case Meter:
+			m := metric.Snapshot()
 			fmt.Fprintf(w, "meter %s\n", name)
 			fmt.Fprintf(w, "  count:       %9d\n", m.Count())
 			fmt.Fprintf(w, "  1-min rate:  %12.2f\n", m.Rate1())
@@ -49,22 +51,23 @@ func WriteOnce(r Registry, w io.Writer) {
 			fmt.Fprintf(w, "  15-min rate: %12.2f\n", m.Rate15())
 			fmt.Fprintf(w, "  mean rate:   %12.2f\n", m.RateMean())
 		case Timer:
-			ps := m.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
 			fmt.Fprintf(w, "timer %s\n", name)
-			fmt.Fprintf(w, "  count:       %9d\n", m.Count())
-			fmt.Fprintf(w, "  min:         %9d\n", m.Min())
-			fmt.Fprintf(w, "  max:         %9d\n", m.Max())
-			fmt.Fprintf(w, "  mean:        %12.2f\n", m.Mean())
-			fmt.Fprintf(w, "  stddev:      %12.2f\n", m.StdDev())
+			fmt.Fprintf(w, "  count:       %9d\n", t.Count())
+			fmt.Fprintf(w, "  min:         %9d\n", t.Min())
+			fmt.Fprintf(w, "  max:         %9d\n", t.Max())
+			fmt.Fprintf(w, "  mean:        %12.2f\n", t.Mean())
+			fmt.Fprintf(w, "  stddev:      %12.2f\n", t.StdDev())
 			fmt.Fprintf(w, "  median:      %12.2f\n", ps[0])
 			fmt.Fprintf(w, "  75%%:         %12.2f\n", ps[1])
 			fmt.Fprintf(w, "  95%%:         %12.2f\n", ps[2])
 			fmt.Fprintf(w, "  99%%:         %12.2f\n", ps[3])
 			fmt.Fprintf(w, "  99.9%%:       %12.2f\n", ps[4])
-			fmt.Fprintf(w, "  1-min rate:  %12.2f\n", m.Rate1())
-			fmt.Fprintf(w, "  5-min rate:  %12.2f\n", m.Rate5())
-			fmt.Fprintf(w, "  15-min rate: %12.2f\n", m.Rate15())
-			fmt.Fprintf(w, "  mean rate:   %12.2f\n", m.RateMean())
+			fmt.Fprintf(w, "  1-min rate:  %12.2f\n", t.Rate1())
+			fmt.Fprintf(w, "  5-min rate:  %12.2f\n", t.Rate5())
+			fmt.Fprintf(w, "  15-min rate: %12.2f\n", t.Rate15())
+			fmt.Fprintf(w, "  mean rate:   %12.2f\n", t.RateMean())
 		}
 	})
 }