Browse Source

etcdserver: Calculate IDs for nodes solely on PeerURLs

Removes the notion of name being anything more than advisory or
command-line grouping, and adds checks for bootstrapping the command
line. IDs are consistent if the URLs are consistent.
Barak Michener 11 years ago
parent
commit
456d1ebcae

+ 0 - 10
etcdserver/cluster.go

@@ -43,16 +43,6 @@ func (c Cluster) FindID(id uint64) *Member {
 	return c.members[id]
 }
 
-func (c Cluster) FindName(name string) *Member {
-	for _, m := range c.members {
-		if m.Name == name {
-			return m
-		}
-	}
-
-	return nil
-}
-
 func (c Cluster) Add(m Member) error {
 	if c.FindID(m.ID) != nil {
 		return fmt.Errorf("Member exists with identical ID %v", m)

+ 1 - 1
etcdserver/cluster_test.go

@@ -137,7 +137,7 @@ func TestClusterFind(t *testing.T) {
 		c := NewCluster()
 		c.AddSlice(tt.mems)
 
-		m := c.FindName(tt.name)
+		m := c.FindID(tt.id)
 		if m == nil && !tt.match {
 			continue
 		}

+ 12 - 7
etcdserver/config.go

@@ -27,7 +27,7 @@ import (
 
 // ServerConfig holds the configuration of etcd as taken from the command line or discovery.
 type ServerConfig struct {
-	Name         string
+	LocalMember  Member
 	DiscoveryURL string
 	ClientURLs   types.URLs
 	DataDir      string
@@ -45,11 +45,16 @@ func (c *ServerConfig) VerifyBootstrapConfig() error {
 	}
 
 	// Make sure the cluster at least contains the local server.
-	m := c.Cluster.FindName(c.Name)
-	if m == nil {
-		return fmt.Errorf("could not find name %v in cluster", c.Name)
+	isOk := false
+	for _, m := range c.Cluster.members {
+		if m.ID == c.LocalMember.ID {
+			isOk = true
+		}
+	}
+	if !isOk {
+		return fmt.Errorf("couldn't find local ID in cluster config")
 	}
-	if m.ID == raft.None {
+	if c.LocalMember.ID == raft.None {
 		return fmt.Errorf("could not use %x as member id", raft.None)
 	}
 
@@ -58,7 +63,7 @@ func (c *ServerConfig) VerifyBootstrapConfig() error {
 	for _, m := range c.Cluster.Members() {
 		for _, url := range m.PeerURLs {
 			if urlMap[url] {
-				return fmt.Errorf("duplicate url %v in server config", url)
+				return fmt.Errorf("duplicate url %v in cluster config", url)
 			}
 			urlMap[url] = true
 		}
@@ -70,7 +75,7 @@ func (c *ServerConfig) WALDir() string { return path.Join(c.DataDir, "wal") }
 
 func (c *ServerConfig) SnapDir() string { return path.Join(c.DataDir, "snap") }
 
-func (c *ServerConfig) ID() uint64 { return c.Cluster.FindName(c.Name).ID }
+func (c *ServerConfig) ID() uint64 { return c.LocalMember.ID }
 
 func (c *ServerConfig) ShouldDiscover() bool {
 	return c.DiscoveryURL != ""

+ 10 - 3
etcdserver/config_test.go

@@ -45,15 +45,22 @@ func TestBootstrapConfigVerify(t *testing.T) {
 
 	for i, tt := range tests {
 		cluster := &Cluster{}
-		cluster.Set(tt.clusterSetting)
+		err := cluster.Set(tt.clusterSetting)
+		if err != nil && tt.shouldError {
+			continue
+		}
+
 		cfg := ServerConfig{
-			Name:         "node1",
+			LocalMember: Member{
+				ID: 0x7350a9cd4dc16f76,
+			},
 			DiscoveryURL: tt.disc,
 			Cluster:      cluster,
 			ClusterState: tt.clst,
 		}
-		err := cfg.VerifyBootstrapConfig()
+		err = cfg.VerifyBootstrapConfig()
 		if (err == nil) && tt.shouldError {
+			t.Errorf("%#v", *cluster)
 			t.Errorf("#%d: Got no error where one was expected", i)
 		}
 		if (err != nil) && !tt.shouldError {

+ 7 - 1
etcdserver/member.go

@@ -22,6 +22,7 @@ import (
 	"fmt"
 	"log"
 	"path"
+	"sort"
 	"strconv"
 	"time"
 
@@ -54,7 +55,8 @@ func newMember(name string, peerURLs types.URLs, now *time.Time) *Member {
 		Attributes:     Attributes{Name: name},
 	}
 
-	b := []byte(m.Name)
+	var b []byte
+	sort.Strings(m.PeerURLs)
 	for _, p := range m.PeerURLs {
 		b = append(b, []byte(p)...)
 	}
@@ -68,6 +70,10 @@ func newMember(name string, peerURLs types.URLs, now *time.Time) *Member {
 	return m
 }
 
+func NewMemberFromURLs(name string, urls types.URLs) *Member {
+	return newMember(name, urls, nil)
+}
+
 func (m Member) storeKey() string {
 	return path.Join(storeMembersPrefix, idAsHex(m.ID))
 }

+ 9 - 2
etcdserver/member_test.go

@@ -35,8 +35,15 @@ func TestMemberTime(t *testing.T) {
 		mem *Member
 		id  uint64
 	}{
-		{newMember("mem1", []url.URL{{Scheme: "http", Host: "10.0.0.8:2379"}}, nil), 11240395089494390470},
-		{newMember("mem1", []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}}, timeParse("1984-12-23T15:04:05Z")), 5483967913615174889},
+		{newMember("mem1", []url.URL{{Scheme: "http", Host: "10.0.0.8:2379"}}, nil), 14544069596553697298},
+		// Same ID, different name (names shouldn't matter)
+		{newMember("memfoo", []url.URL{{Scheme: "http", Host: "10.0.0.8:2379"}}, nil), 14544069596553697298},
+		// Same ID, different Time
+		{newMember("mem1", []url.URL{{Scheme: "http", Host: "10.0.0.8:2379"}}, timeParse("1984-12-23T15:04:05Z")), 2448790162483548276},
+		{newMember("mem1", []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}}, timeParse("1984-12-23T15:04:05Z")), 1466075294948436910},
+		// Order shouldn't matter
+		{newMember("mem1", []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "10.0.0.2:2379"}}, nil), 16552244735972308939},
+		{newMember("mem1", []url.URL{{Scheme: "http", Host: "10.0.0.2:2379"}, {Scheme: "http", Host: "10.0.0.1:2379"}}, nil), 16552244735972308939},
 	}
 	for i, tt := range tests {
 		if tt.mem.ID != tt.id {

+ 2 - 2
etcdserver/server.go

@@ -213,7 +213,7 @@ func NewServer(cfg *ServerConfig) *EtcdServer {
 	cls := &clusterStore{Store: st, id: cid}
 
 	sstats := &stats.ServerStats{
-		Name: cfg.Name,
+		Name: cfg.LocalMember.Attributes.Name,
 		ID:   idAsHex(cfg.ID()),
 	}
 	lstats := stats.NewLeaderStats(idAsHex(cfg.ID()))
@@ -223,7 +223,7 @@ func NewServer(cfg *ServerConfig) *EtcdServer {
 		node:       n,
 		id:         id,
 		clusterID:  cid,
-		attributes: Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
+		attributes: Attributes{Name: cfg.LocalMember.Attributes.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
 		storage: struct {
 			*wal.WAL
 			*snap.Snapshotter

+ 6 - 3
integration/cluster_test.go

@@ -89,18 +89,21 @@ func (c *cluster) Launch(t *testing.T) {
 		lns[i] = l
 		bootstrapCfgs[i] = fmt.Sprintf("%s=%s", c.name(i), "http://"+l.Addr().String())
 	}
-	clusterCfg := &etcdserver.Cluster{}
+	clusterCfg := etcdserver.NewCluster()
 	if err := clusterCfg.Set(strings.Join(bootstrapCfgs, ",")); err != nil {
 		t.Fatal(err)
 	}
 
-	var err error
 	for i := 0; i < c.Size; i++ {
 		m := member{}
 		m.PeerListeners = []net.Listener{lns[i]}
 		cln := newLocalListener(t)
 		m.ClientListeners = []net.Listener{cln}
-		m.Name = c.name(i)
+		listenUrls, err := types.NewURLs([]string{"http://" + lns[i].Addr().String()})
+		if err != nil {
+			t.Fatal(err)
+		}
+		m.LocalMember = *etcdserver.NewMemberFromURLs(c.name(i), listenUrls)
 		m.ClientURLs, err = types.NewURLs([]string{"http://" + cln.Addr().String()})
 		if err != nil {
 			t.Fatal(err)

+ 43 - 16
main.go

@@ -131,12 +131,14 @@ func main() {
 	}
 
 	pkg.SetFlagsFromEnv(fs)
-	if err := setClusterForDiscovery(); err != nil {
-		log.Fatalf("etcd: %v", err)
+
+	localMember, err := setupCluster()
+	if err != nil {
+		log.Fatalf("etcd: setupCluster returned error %v", err)
 	}
 
 	if string(*proxyFlag) == flagtypes.ProxyValueOff {
-		startEtcd()
+		startEtcd(localMember)
 	} else {
 		startProxy()
 	}
@@ -146,7 +148,7 @@ func main() {
 }
 
 // startEtcd launches the etcd server and HTTP handlers for client/server communication.
-func startEtcd() {
+func startEtcd(self *etcdserver.Member) {
 	if *dir == "" {
 		*dir = fmt.Sprintf("%v_etcd_data", *name)
 		log.Printf("etcd: no data-dir provided, using default data-dir ./%s", *dir)
@@ -165,7 +167,7 @@ func startEtcd() {
 		log.Fatal(err.Error())
 	}
 	cfg := &etcdserver.ServerConfig{
-		Name:         *name,
+		LocalMember:  *self,
 		ClientURLs:   acurls,
 		DataDir:      *dir,
 		SnapCount:    *snapCount,
@@ -262,28 +264,53 @@ func startProxy() {
 	}
 }
 
-// setClusterForDiscovery sets cluster to a temporary value if you are using
-// the discovery.
-func setClusterForDiscovery() error {
+// setupCluster sets cluster to a temporary value if you are using
+// discovery, or to the static configuration for bootstrapped clusters.
+// Returns the local member on success.
+func setupCluster() (*etcdserver.Member, error) {
 	set := make(map[string]bool)
+	var m *etcdserver.Member
 	flag.Visit(func(f *flag.Flag) {
 		set[f.Name] = true
 	})
 	if set["discovery"] && set["initial-cluster"] {
-		return fmt.Errorf("both discovery and initial-cluster are set")
+		return nil, fmt.Errorf("both discovery and bootstrap-config are set")
 	}
 	if set["discovery"] {
 		apurls, err := pkg.URLsFromFlags(fs, "advertise-peer-urls", "peer-addr", peerTLSInfo)
 		if err != nil {
-			return err
+			return nil, err
 		}
-		addrs := make([]string, len(apurls))
-		for i := range apurls {
-			addrs[i] = fmt.Sprintf("%s=%s", *name, apurls[i].String())
+		m = etcdserver.NewMemberFromURLs(*name, apurls)
+		cluster = etcdserver.NewCluster()
+		cluster.Add(*m)
+		return m, nil
+	} else if set["initial-cluster"] {
+		// We're statically configured, and cluster has appropriately been set.
+		// Try to configure by indexing the static cluster by name.
+		if set["name"] {
+			for _, c := range cluster.Members() {
+				if c.Name == *name {
+					return c, nil
+				}
+			}
+			log.Println("etcd: cannot find the passed name", *name, "in initial-cluster. Trying advertise-peer-urls")
 		}
-		if err := cluster.Set(strings.Join(addrs, ",")); err != nil {
-			return err
+		// Try to configure by looking for a matching machine based on the peer urls.
+		if set["advertise-peer-urls"] {
+			apurls, err := pkg.URLsFromFlags(flag.CommandLine, "advertise-peer-urls", "addr", peerTLSInfo)
+			if err != nil {
+				return nil, err
+			}
+			m = etcdserver.NewMemberFromURLs(*name, apurls)
+			for _, c := range cluster.Members() {
+				if c.ID == m.ID {
+					return c, nil
+				}
+			}
+			log.Println("etcd: Could not find a matching peer for the local instance in initial-cluster.")
 		}
+		return nil, fmt.Errorf("etcd: Bootstrapping a static cluster, but local name or local peer urls are not defined")
 	}
-	return nil
+	return nil, fmt.Errorf("etcd: Flag configuration not set for discovery or initial-cluster")
 }