Browse Source

etcdserver: configurable backend size quota

Configurable with the flag --experimental-quota-backend-bytes and
through ServerConfig.QuotaBackendBytes.

Fixes #4894
Anthony Romano 9 years ago
parent
commit
9b2c963179
6 changed files with 42 additions and 16 deletions
  1. 2 0
      etcdmain/config.go
  2. 1 0
      etcdmain/etcd.go
  3. 1 0
      etcdserver/config.go
  4. 20 1
      etcdserver/quota.go
  5. 9 15
      integration/v3_grpc_test.go
  6. 9 0
      storage/backend/backend.go

+ 2 - 0
etcdmain/config.go

@@ -123,6 +123,7 @@ type config struct {
 	printVersion bool
 
 	autoCompactionRetention int
+	quotaBackendBytes       int64
 
 	enablePprof bool
 
@@ -224,6 +225,7 @@ func NewConfig() *config {
 
 	// demo flag
 	fs.IntVar(&cfg.autoCompactionRetention, "experimental-auto-compaction-retention", 0, "Auto compaction retention in hour. 0 means disable auto compaction.")
+	fs.Int64Var(&cfg.quotaBackendBytes, "quota-backend-bytes", 0, "Raise alarms when backend size exceeds the given quota. 0 means use the default quota.")
 
 	// backwards-compatibility with v0.4.6
 	fs.Var(&flags.IPAddressPort{}, "addr", "DEPRECATED: Use --advertise-client-urls instead.")

+ 1 - 0
etcdmain/etcd.go

@@ -347,6 +347,7 @@ func startEtcd(cfg *config) (<-chan struct{}, error) {
 		TickMs:                  cfg.TickMs,
 		ElectionTicks:           cfg.electionTicks(),
 		AutoCompactionRetention: cfg.autoCompactionRetention,
+		QuotaBackendBytes:       cfg.quotaBackendBytes,
 		StrictReconfigCheck:     cfg.strictReconfigCheck,
 		EnablePprof:             cfg.enablePprof,
 	}

+ 1 - 0
etcdserver/config.go

@@ -51,6 +51,7 @@ type ServerConfig struct {
 	BootstrapTimeout time.Duration
 
 	AutoCompactionRetention int
+	QuotaBackendBytes       int64
 
 	StrictReconfigCheck bool
 

+ 20 - 1
etcdserver/quota.go

@@ -31,6 +31,12 @@ type Quota interface {
 	Remaining() int64
 }
 
+type passthroughQuota struct{}
+
+func (*passthroughQuota) Available(interface{}) bool { return true }
+func (*passthroughQuota) Cost(interface{}) int       { return 0 }
+func (*passthroughQuota) Remaining() int64           { return 1 }
+
 type backendQuota struct {
 	s               *EtcdServer
 	maxBackendBytes int64
@@ -44,7 +50,20 @@ const (
 )
 
 func NewBackendQuota(s *EtcdServer) Quota {
-	return &backendQuota{s, backend.InitialMmapSize}
+	if s.cfg.QuotaBackendBytes < 0 {
+		// disable quotas if negative
+		plog.Warningf("disabling backend quota")
+		return &passthroughQuota{}
+	}
+	if s.cfg.QuotaBackendBytes == 0 {
+		// use default size if no quota size given
+		return &backendQuota{s, backend.DefaultQuotaBytes}
+	}
+	if s.cfg.QuotaBackendBytes > backend.MaxQuotaBytes {
+		plog.Warningf("backend quota %v exceeds maximum quota %v; using maximum", s.cfg.QuotaBackendBytes, backend.MaxQuotaBytes)
+		return &backendQuota{s, backend.MaxQuotaBytes}
+	}
+	return &backendQuota{s, s.cfg.QuotaBackendBytes}
 }
 
 func (b *backendQuota) Available(v interface{}) bool {

+ 9 - 15
integration/v3_grpc_test.go

@@ -24,7 +24,6 @@ import (
 	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
 	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
 	"github.com/coreos/etcd/pkg/testutil"
-	"github.com/coreos/etcd/storage/backend"
 	"golang.org/x/net/context"
 	"google.golang.org/grpc"
 )
@@ -461,16 +460,16 @@ func TestV3Hash(t *testing.T) {
 
 // TestV3StorageQuotaAPI tests the V3 server respects quotas at the API layer
 func TestV3StorageQuotaAPI(t *testing.T) {
-	oldSize := backend.InitialMmapSize
-	defer func() {
-		backend.InitialMmapSize = oldSize
-		testutil.AfterTest(t)
-	}()
+	defer testutil.AfterTest(t)
 
-	backend.InitialMmapSize = 64 * 1024
 	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
+
+	clus.Members[0].QuotaBackendBytes = 64 * 1024
+	clus.Members[0].Stop(t)
+	clus.Members[0].Restart(t)
+
 	defer clus.Terminate(t)
-	kvc := toGRPC(clus.RandClient()).KV
+	kvc := toGRPC(clus.Client(0)).KV
 
 	key := []byte("abc")
 
@@ -506,11 +505,7 @@ func TestV3StorageQuotaAPI(t *testing.T) {
 
 // TestV3StorageQuotaApply tests the V3 server respects quotas during apply
 func TestV3StorageQuotaApply(t *testing.T) {
-	oldSize := backend.InitialMmapSize
-	defer func() {
-		backend.InitialMmapSize = oldSize
-		testutil.AfterTest(t)
-	}()
+	testutil.AfterTest(t)
 
 	clus := NewClusterV3(t, &ClusterConfig{Size: 2})
 	defer clus.Terminate(t)
@@ -518,7 +513,7 @@ func TestV3StorageQuotaApply(t *testing.T) {
 	kvc1 := toGRPC(clus.Client(1)).KV
 
 	// force a node to have a different quota
-	backend.InitialMmapSize = 64 * 1024
+	clus.Members[0].QuotaBackendBytes = 64 * 1024
 	clus.Members[0].Stop(t)
 	clus.Members[0].Restart(t)
 	clus.waitLeader(t, clus.Members)
@@ -552,7 +547,6 @@ func TestV3StorageQuotaApply(t *testing.T) {
 	}
 
 	// reset large quota node to ensure alarm persisted
-	backend.InitialMmapSize = oldSize
 	clus.Members[1].Stop(t)
 	clus.Members[1].Restart(t)
 	clus.waitLeader(t, clus.Members)

+ 9 - 0
storage/backend/backend.go

@@ -41,6 +41,15 @@ var (
 	InitialMmapSize = int64(10 * 1024 * 1024 * 1024)
 )
 
+const (
+	// DefaultQuotaBytes is the number of bytes the backend Size may
+	// consume before exceeding the space quota.
+	DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB
+	// MaxQuotaBytes is the maximum number of bytes suggested for a backend
+	// quota. A larger quota may lead to degraded performance.
+	MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB
+)
+
 type Backend interface {
 	BatchTx() BatchTx
 	Snapshot() Snapshot