Browse Source

Rework NumGC and PauseNs runtime metrics.

Fixes #41.

@majek is right that numGC not being updated breaks the PauseNs
histogram pretty badly.  What's more, the simplistic bridge between
runtime.MemStats and go-metrics was unable to cope with a couple of
obvious possibilities like wrapping PauseNs and more than 256 GCs since
the last metric update.
Richard Crowley 11 years ago
parent
commit
10272f002f
2 changed files with 52 additions and 2 deletions
  1. 20 2
      runtime.go
  2. 32 0
      runtime_test.go

+ 20 - 2
runtime.go

@@ -93,9 +93,27 @@ func CaptureRuntimeMemStatsOnce(r Registry) {
 	runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
 	runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
 	runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC))
-	for i := uint32(1); i <= memStats.NumGC-numGC; i++ {
-		runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[(memStats.NumGC%256-i)%256])) // <https://code.google.com/p/go/source/browse/src/pkg/runtime/mgc0.c>
+
+	// <https://code.google.com/p/go/source/browse/src/pkg/runtime/mgc0.c>
+	i := numGC % uint32(len(memStats.PauseNs))
+	ii := memStats.NumGC % uint32(len(memStats.PauseNs))
+	if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
+		for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
+			runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+		}
+	} else {
+		if i > ii {
+			for ; i < uint32(len(memStats.PauseNs)); i++ {
+				runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+			}
+			i = 0
+		}
+		for ; i < ii; i++ {
+			runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+		}
 	}
+	numGC = memStats.NumGC
+
 	runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
 	runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
 	runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))

+ 32 - 0
runtime_test.go

@@ -15,6 +15,38 @@ func BenchmarkRuntimeMemStats(b *testing.B) {
 	}
 }
 
+func TestRuntimeMemStats(t *testing.T) {
+	r := NewRegistry()
+	RegisterRuntimeMemStats(r)
+	CaptureRuntimeMemStatsOnce(r)
+	zero := runtimeMetrics.MemStats.PauseNs.Count() // Get a "zero" since GC may have run before these tests.
+	runtime.GC()
+	CaptureRuntimeMemStatsOnce(r)
+	if count := runtimeMetrics.MemStats.PauseNs.Count(); 1 != count-zero {
+		t.Fatal(count - zero)
+	}
+	runtime.GC()
+	runtime.GC()
+	CaptureRuntimeMemStatsOnce(r)
+	if count := runtimeMetrics.MemStats.PauseNs.Count(); 3 != count-zero {
+		t.Fatal(count - zero)
+	}
+	for i := 0; i < 256; i++ {
+		runtime.GC()
+	}
+	CaptureRuntimeMemStatsOnce(r)
+	if count := runtimeMetrics.MemStats.PauseNs.Count(); 259 != count-zero {
+		t.Fatal(count - zero)
+	}
+	for i := 0; i < 257; i++ {
+		runtime.GC()
+	}
+	CaptureRuntimeMemStatsOnce(r)
+	if count := runtimeMetrics.MemStats.PauseNs.Count(); 515 != count-zero { // We lost one because there were too many GCs between captures.
+		t.Fatal(count - zero)
+	}
+}
+
 func TestRuntimeMemStatsBlocking(t *testing.T) {
 	if g := runtime.GOMAXPROCS(0); g < 2 {
 		t.Skipf("skipping TestRuntimeMemStatsBlocking with GOMAXPROCS=%d\n", g)