Browse Source

HostInfo is now a pointer everywhere

Chris Bannister 10 năm trước cách đây
mục cha
commit
c1597c8274
9 tập tin đã thay đổi với 232 bổ sung217 xóa
  1. 16 22
      connectionpool.go
  2. 4 4
      control.go
  3. 1 1
      events.go
  4. 42 30
      host_source.go
  5. 37 34
      policies.go
  6. 46 46
      policies_test.go
  7. 7 0
      session.go
  8. 4 5
      token.go
  9. 75 75
      token_test.go

+ 16 - 22
connectionpool.go

@@ -19,7 +19,7 @@ import (
 
 // interface to implement to receive the host information
 type SetHosts interface {
-	SetHosts(hosts []HostInfo)
+	SetHosts(hosts []*HostInfo)
 }
 
 // interface to implement to receive the partitioner value
@@ -69,6 +69,8 @@ type policyConnPool struct {
 	hostPolicy    HostSelectionPolicy
 	connPolicy    func() ConnSelectionPolicy
 	hostConnPools map[string]*hostConnPool
+
+	endpoints []string
 }
 
 func connConfig(session *Session) (*ConnConfig, error) {
@@ -91,7 +93,6 @@ func connConfig(session *Session) (*ConnConfig, error) {
 		ProtoVersion:  cfg.ProtoVersion,
 		CQLVersion:    cfg.CQLVersion,
 		Timeout:       cfg.Timeout,
-		NumStreams:    cfg.NumStreams,
 		Compressor:    cfg.Compressor,
 		Authenticator: cfg.Authenticator,
 		Keepalive:     cfg.SocketKeepalive,
@@ -119,18 +120,13 @@ func newPolicyConnPool(session *Session, hostPolicy HostSelectionPolicy,
 		hostConnPools: map[string]*hostConnPool{},
 	}
 
-	// TODO(zariel): fetch this from session metadata.
-	hosts := make([]HostInfo, len(session.cfg.Hosts))
-	for i, hostAddr := range session.cfg.Hosts {
-		hosts[i].Peer = hostAddr
-	}
-
-	pool.SetHosts(hosts)
+	pool.endpoints = make([]string, len(session.cfg.Hosts))
+	copy(pool.endpoints, session.cfg.Hosts)
 
 	return pool, nil
 }
 
-func (p *policyConnPool) SetHosts(hosts []HostInfo) {
+func (p *policyConnPool) SetHosts(hosts []*HostInfo) {
 	p.mu.Lock()
 	defer p.mu.Unlock()
 
@@ -141,24 +137,23 @@ func (p *policyConnPool) SetHosts(hosts []HostInfo) {
 
 	// TODO connect to hosts in parallel, but wait for pools to be
 	// created before returning
-
-	for i := range hosts {
-		pool, exists := p.hostConnPools[hosts[i].Peer]
+	for _, host := range hosts {
+		pool, exists := p.hostConnPools[host.Peer()]
 		if !exists {
 			// create a connection pool for the host
 			pool = newHostConnPool(
 				p.session,
-				hosts[i].Peer,
+				host.Peer(),
 				p.port,
 				p.numConns,
 				p.connCfg,
 				p.keyspace,
 				p.connPolicy(),
 			)
-			p.hostConnPools[hosts[i].Peer] = pool
+			p.hostConnPools[host.Peer()] = pool
 		} else {
 			// still have this host, so don't remove it
-			delete(toRemove, hosts[i].Peer)
+			delete(toRemove, host.Peer())
 		}
 	}
 
@@ -170,7 +165,6 @@ func (p *policyConnPool) SetHosts(hosts []HostInfo) {
 
 	// update the policy
 	p.hostPolicy.SetHosts(hosts)
-
 }
 
 func (p *policyConnPool) SetPartitioner(partitioner string) {
@@ -206,7 +200,7 @@ func (p *policyConnPool) Pick(qry *Query) (SelectedHost, *Conn) {
 			panic(fmt.Sprintf("policy %T returned no host info: %+v", p.hostPolicy, host))
 		}
 
-		pool, ok := p.hostConnPools[host.Info().Peer]
+		pool, ok := p.hostConnPools[host.Info().Peer()]
 		if !ok {
 			continue
 		}
@@ -221,7 +215,7 @@ func (p *policyConnPool) Close() {
 	defer p.mu.Unlock()
 
 	// remove the hosts from the policy
-	p.hostPolicy.SetHosts([]HostInfo{})
+	p.hostPolicy.SetHosts(nil)
 
 	// close the pools
 	for addr, pool := range p.hostConnPools {
@@ -234,14 +228,14 @@ func (p *policyConnPool) addHost(host *HostInfo) {
 	p.mu.Lock()
 	defer p.mu.Unlock()
 
-	pool, ok := p.hostConnPools[host.Peer]
+	pool, ok := p.hostConnPools[host.Peer()]
 	if ok {
 		return
 	}
 
 	pool = newHostConnPool(
 		p.session,
-		host.Peer,
+		host.Peer(),
 		p.port,
 		p.numConns,
 		p.connCfg,
@@ -249,7 +243,7 @@ func (p *policyConnPool) addHost(host *HostInfo) {
 		p.connPolicy(),
 	)
 
-	p.hostConnPools[host.Peer] = pool
+	p.hostConnPools[host.Peer()] = pool
 }
 
 func (p *policyConnPool) removeHost(addr string) {

+ 4 - 4
control.go

@@ -39,7 +39,6 @@ func createControlConn(session *Session) *controlConn {
 }
 
 func (c *controlConn) heartBeat() {
-	c.closeWg.Add(1)
 	defer c.closeWg.Done()
 
 	for {
@@ -115,6 +114,7 @@ func (c *controlConn) connect(endpoints []string) error {
 
 	c.conn.Store(conn)
 	atomic.StoreInt64(&c.connecting, 0)
+	c.closeWg.Add(1)
 	go c.heartBeat()
 
 	return nil
@@ -258,14 +258,14 @@ func (c *controlConn) fetchHostInfo(addr net.IP, port int) (*HostInfo, error) {
 		fn = func(host *HostInfo) error {
 			// TODO(zariel): should we fetch rpc_address from here?
 			iter := c.query("SELECT data_center, rack, host_id, tokens FROM system.local WHERE key='local'")
-			iter.Scan(&host.DataCenter, &host.Rack, &host.HostId, &host.Tokens)
+			iter.Scan(&host.dataCenter, &host.rack, &host.hostId, &host.tokens)
 			return iter.Close()
 		}
 	} else {
 		fn = func(host *HostInfo) error {
 			// TODO(zariel): should we fetch rpc_address from here?
 			iter := c.query("SELECT data_center, rack, host_id, tokens FROM system.peers WHERE peer=?", addr)
-			iter.Scan(&host.DataCenter, &host.Rack, &host.HostId, &host.Tokens)
+			iter.Scan(&host.dataCenter, &host.rack, &host.hostId, &host.tokens)
 			return iter.Close()
 		}
 	}
@@ -274,7 +274,7 @@ func (c *controlConn) fetchHostInfo(addr net.IP, port int) (*HostInfo, error) {
 	if err := fn(host); err != nil {
 		return nil, err
 	}
-	host.Peer = addr.String()
+	host.peer = addr.String()
 
 	return host, nil
 }

+ 1 - 1
events.go

@@ -56,7 +56,7 @@ func (s *Session) handleNewNode(host net.IP, port int) {
 		return
 	}
 
-	if s.hostFilter.Accept(*hostInfo) {
+	if s.hostFilter.Accept(hostInfo) {
 		s.pool.addHost(hostInfo)
 	}
 }

+ 42 - 30
host_source.go

@@ -10,12 +10,13 @@ import (
 
 type nodeState int32
 
-func (n nodeState) String() {
+func (n nodeState) String() string {
 	if n == NodeUp {
 		return "UP"
 	} else if n == NodeDown {
 		return "DOWN"
 	}
+	return fmt.Sprintf("UNKNOWN_%d", n)
 }
 
 const (
@@ -44,13 +45,22 @@ type HostInfo struct {
 	tokens     []string
 }
 
+func (h *HostInfo) Equal(host *HostInfo) bool {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	host.mu.RLock()
+	defer host.mu.RUnlock()
+
+	return h.peer == host.peer && h.hostId == host.hostId
+}
+
 func (h *HostInfo) Peer() string {
 	h.mu.RLock()
 	defer h.mu.RUnlock()
 	return h.peer
 }
 
-func (h *HostInfo) SetPeer(peer string) *HostInfo {
+func (h *HostInfo) setPeer(peer string) *HostInfo {
 	h.mu.Lock()
 	defer h.mu.Unlock()
 	h.peer = peer
@@ -63,20 +73,20 @@ func (h *HostInfo) DataCenter() string {
 	return h.dataCenter
 }
 
-func (h *HostInfo) SetDataCenter(dataCenter string) *HostInfo {
+func (h *HostInfo) setDataCenter(dataCenter string) *HostInfo {
 	h.mu.Lock()
 	defer h.mu.Unlock()
 	h.dataCenter = dataCenter
 	return h
 }
 
-func (h *HostFilter) Rack() string {
+func (h *HostInfo) Rack() string {
 	h.mu.RLock()
 	defer h.mu.RUnlock()
 	return h.rack
 }
 
-func (h *HostInfo) SetRack(rack string) *HostInfo {
+func (h *HostInfo) setRack(rack string) *HostInfo {
 	h.mu.Lock()
 	defer h.mu.Unlock()
 	h.rack = rack
@@ -89,7 +99,7 @@ func (h *HostInfo) HostID() string {
 	return h.hostId
 }
 
-func (h *HostInfo) SetHostID(hostID string) *HostInfo {
+func (h *HostInfo) setHostID(hostID string) *HostInfo {
 	h.mu.Lock()
 	defer h.mu.Unlock()
 	h.hostId = hostID
@@ -102,7 +112,7 @@ func (h *HostInfo) Version() cassVersion {
 	return h.version
 }
 
-func (h *HostInfo) SetVersion(major, minor, patch int) *HostInfo {
+func (h *HostInfo) setVersion(major, minor, patch int) *HostInfo {
 	h.mu.Lock()
 	defer h.mu.Unlock()
 	h.version.Major = major
@@ -117,7 +127,7 @@ func (h *HostInfo) State() nodeState {
 	return h.state
 }
 
-func (h *HostInfo) SetState(state nodeState) *HostInfo {
+func (h *HostInfo) setState(state nodeState) *HostInfo {
 	h.mu.Lock()
 	defer h.mu.Unlock()
 	h.state = state
@@ -130,7 +140,7 @@ func (h *HostInfo) Tokens() []string {
 	return h.tokens
 }
 
-func (h *HostInfo) SetTokens(tokens []string) *HostInfo {
+func (h *HostInfo) setTokens(tokens []string) *HostInfo {
 	h.mu.Lock()
 	defer h.mu.Unlock()
 	h.tokens = tokens
@@ -138,21 +148,23 @@ func (h *HostInfo) SetTokens(tokens []string) *HostInfo {
 }
 
 func (h HostInfo) String() string {
-	return fmt.Sprintf("[hostinfo peer=%q data_centre=%q rack=%q host_id=%q version=%q state=%s num_tokens=%d]", h.Peer, h.DataCenter, h.Rack, h.HostId, h.Version, h.State, len(h.Tokens))
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return fmt.Sprintf("[hostinfo peer=%q data_centre=%q rack=%q host_id=%q version=%q state=%s num_tokens=%d]", h.peer, h.dataCenter, h.rack, h.hostId, h.version, h.state, len(h.tokens))
 }
 
 // Polls system.peers at a specific interval to find new hosts
 type ringDescriber struct {
-	dcFilter        string
-	rackFilter      string
-	prevHosts       []HostInfo
-	prevPartitioner string
-	session         *Session
-	closeChan       chan bool
+	dcFilter   string
+	rackFilter string
+	session    *Session
+	closeChan  chan bool
 	// indicates that we can use system.local to get the connections remote address
 	localHasRpcAddr bool
 
-	mu sync.Mutex
+	mu              sync.Mutex
+	prevHosts       []*HostInfo
+	prevPartitioner string
 }
 
 func checkSystemLocal(control *controlConn) (bool, error) {
@@ -170,7 +182,7 @@ func checkSystemLocal(control *controlConn) (bool, error) {
 	return true, nil
 }
 
-func (r *ringDescriber) GetHosts() (hosts []HostInfo, partitioner string, err error) {
+func (r *ringDescriber) GetHosts() (hosts []*HostInfo, partitioner string, err error) {
 	r.mu.Lock()
 	defer r.mu.Unlock()
 	// we need conn to be the same because we need to query system.peers and system.local
@@ -182,15 +194,15 @@ func (r *ringDescriber) GetHosts() (hosts []HostInfo, partitioner string, err er
 		localQuery = "SELECT broadcast_address, data_center, rack, host_id, tokens, partitioner FROM system.local"
 	)
 
-	var localHost HostInfo
+	localHost := &HostInfo{}
 	if r.localHasRpcAddr {
 		iter := r.session.control.query(localQuery)
 		if iter == nil {
 			return r.prevHosts, r.prevPartitioner, nil
 		}
 
-		iter.Scan(&localHost.Peer, &localHost.DataCenter, &localHost.Rack,
-			&localHost.HostId, &localHost.Tokens, &partitioner)
+		iter.Scan(&localHost.peer, &localHost.dataCenter, &localHost.rack,
+			&localHost.hostId, &localHost.tokens, &partitioner)
 
 		if err = iter.Close(); err != nil {
 			return nil, "", err
@@ -201,7 +213,7 @@ func (r *ringDescriber) GetHosts() (hosts []HostInfo, partitioner string, err er
 			return r.prevHosts, r.prevPartitioner, nil
 		}
 
-		iter.Scan(&localHost.DataCenter, &localHost.Rack, &localHost.HostId, &localHost.Tokens, &partitioner)
+		iter.Scan(&localHost.dataCenter, &localHost.rack, &localHost.hostId, &localHost.tokens, &partitioner)
 
 		if err = iter.Close(); err != nil {
 			return nil, "", err
@@ -214,22 +226,22 @@ func (r *ringDescriber) GetHosts() (hosts []HostInfo, partitioner string, err er
 			panic(err)
 		}
 
-		localHost.Peer = addr
+		localHost.peer = addr
 	}
 
-	hosts = []HostInfo{localHost}
+	hosts = []*HostInfo{localHost}
 
 	iter := r.session.control.query("SELECT rpc_address, data_center, rack, host_id, tokens FROM system.peers")
 	if iter == nil {
 		return r.prevHosts, r.prevPartitioner, nil
 	}
 
-	host := HostInfo{}
-	for iter.Scan(&host.Peer, &host.DataCenter, &host.Rack, &host.HostId, &host.Tokens) {
-		if r.matchFilter(&host) {
+	host := &HostInfo{}
+	for iter.Scan(&host.peer, &host.dataCenter, &host.rack, &host.hostId, &host.tokens) {
+		if r.matchFilter(host) {
 			hosts = append(hosts, host)
 		}
-		host = HostInfo{}
+		host = &HostInfo{}
 	}
 
 	if err = iter.Close(); err != nil {
@@ -244,11 +256,11 @@ func (r *ringDescriber) GetHosts() (hosts []HostInfo, partitioner string, err er
 
 func (r *ringDescriber) matchFilter(host *HostInfo) bool {
 
-	if r.dcFilter != "" && r.dcFilter != host.DataCenter {
+	if r.dcFilter != "" && r.dcFilter != host.DataCenter() {
 		return false
 	}
 
-	if r.rackFilter != "" && r.rackFilter != host.Rack {
+	if r.rackFilter != "" && r.rackFilter != host.Rack() {
 		return false
 	}
 

+ 37 - 34
policies.go

@@ -12,40 +12,40 @@ import (
 	"github.com/hailocab/go-hostpool"
 )
 
-// cowHostList implements a copy on write host list, its equivilent type is []HostInfo
+// cowHostList implements a copy on write host list, its equivilent type is []*HostInfo
 type cowHostList struct {
 	list atomic.Value
 	mu   sync.Mutex
 }
 
-func (c *cowHostList) get() []HostInfo {
+func (c *cowHostList) get() []*HostInfo {
 	// TODO(zariel): should we replace this with []*HostInfo?
-	l, ok := c.list.Load().(*[]HostInfo)
+	l, ok := c.list.Load().(*[]*HostInfo)
 	if !ok {
 		return nil
 	}
 	return *l
 }
 
-func (c *cowHostList) set(list []HostInfo) {
+func (c *cowHostList) set(list []*HostInfo) {
 	c.mu.Lock()
 	c.list.Store(&list)
 	c.mu.Unlock()
 }
 
 // add will add a host if it not already in the list
-func (c *cowHostList) add(host HostInfo) {
+func (c *cowHostList) add(host *HostInfo) bool {
 	c.mu.Lock()
 	l := c.get()
 
 	if n := len(l); n == 0 {
-		l = []HostInfo{host}
+		l = []*HostInfo{host}
 	} else {
-		newL := make([]HostInfo, n+1)
+		newL := make([]*HostInfo, n+1)
 		for i := 0; i < n; i++ {
-			if host.Peer == l[i].Peer && host.HostId == l[i].HostId {
+			if host.Equal(l[i]) {
 				c.mu.Unlock()
-				return
+				return false
 			}
 			newL[i] = l[i]
 		}
@@ -55,9 +55,10 @@ func (c *cowHostList) add(host HostInfo) {
 
 	c.list.Store(&l)
 	c.mu.Unlock()
+	return true
 }
 
-func (c *cowHostList) update(host HostInfo) {
+func (c *cowHostList) update(host *HostInfo) {
 	c.mu.Lock()
 	l := c.get()
 
@@ -67,11 +68,11 @@ func (c *cowHostList) update(host HostInfo) {
 	}
 
 	found := false
-	newL := make([]HostInfo, len(l))
+	newL := make([]*HostInfo, len(l))
 	for i := range l {
-		if host.Peer == l[i].Peer && host.HostId == l[i].HostId {
+		if host.Equal(l[i]) {
 			newL[i] = host
-			found := true
+			found = true
 		} else {
 			newL[i] = l[i]
 		}
@@ -84,19 +85,19 @@ func (c *cowHostList) update(host HostInfo) {
 	c.mu.Unlock()
 }
 
-func (c *cowHostList) remove(addr string) {
+func (c *cowHostList) remove(addr string) bool {
 	c.mu.Lock()
 	l := c.get()
 	size := len(l)
 	if size == 0 {
 		c.mu.Unlock()
-		return
+		return false
 	}
 
 	found := false
-	newL := make([]HostInfo, 0, size)
+	newL := make([]*HostInfo, 0, size)
 	for i := 0; i < len(l); i++ {
-		if l[i].Peer != addr {
+		if l[i].Peer() != addr {
 			newL = append(newL, l[i])
 		} else {
 			found = true
@@ -105,12 +106,14 @@ func (c *cowHostList) remove(addr string) {
 
 	if !found {
 		c.mu.Unlock()
-		return
+		return false
 	}
 
 	newL = newL[:size-1 : size-1]
 	c.list.Store(&newL)
 	c.mu.Unlock()
+
+	return true
 }
 
 // RetryableQuery is an interface that represents a query or batch statement that
@@ -189,7 +192,7 @@ type roundRobinHostPolicy struct {
 	mu    sync.RWMutex
 }
 
-func (r *roundRobinHostPolicy) SetHosts(hosts []HostInfo) {
+func (r *roundRobinHostPolicy) SetHosts(hosts []*HostInfo) {
 	r.hosts.set(hosts)
 }
 
@@ -209,18 +212,18 @@ func (r *roundRobinHostPolicy) Pick(qry *Query) NextHost {
 
 		// always increment pos to evenly distribute traffic in case of
 		// failures
-		pos := atomic.AddUint32(&r.pos, 1)
+		pos := atomic.AddUint32(&r.pos, 1) - 1
 		if i >= len(hosts) {
 			return nil
 		}
-		host := &r.hosts[(pos)%uint32(len(r.hosts))]
+		host := hosts[(pos)%uint32(len(hosts))]
 		i++
 		return selectedRoundRobinHost{host}
 	}
 }
 
 func (r *roundRobinHostPolicy) AddHost(host *HostInfo) {
-	r.hosts.add(*host)
+	r.hosts.add(host)
 }
 
 func (r *roundRobinHostPolicy) RemoveHost(addr string) {
@@ -256,7 +259,7 @@ type tokenAwareHostPolicy struct {
 	fallback    HostSelectionPolicy
 }
 
-func (t *tokenAwareHostPolicy) SetHosts(hosts []HostInfo) {
+func (t *tokenAwareHostPolicy) SetHosts(hosts []*HostInfo) {
 	t.hosts.set(hosts)
 
 	t.mu.Lock()
@@ -281,7 +284,7 @@ func (t *tokenAwareHostPolicy) SetPartitioner(partitioner string) {
 }
 
 func (t *tokenAwareHostPolicy) AddHost(host *HostInfo) {
-	t.hosts.add(*host)
+	t.hosts.add(host)
 
 	t.mu.Lock()
 	t.resetTokenRing()
@@ -398,22 +401,22 @@ func (host selectedTokenAwareHost) Mark(err error) {
 //     )
 //
 func HostPoolHostPolicy(hp hostpool.HostPool) HostSelectionPolicy {
-	return &hostPoolHostPolicy{hostMap: map[string]HostInfo{}, hp: hp}
+	return &hostPoolHostPolicy{hostMap: map[string]*HostInfo{}, hp: hp}
 }
 
 type hostPoolHostPolicy struct {
 	hp      hostpool.HostPool
 	mu      sync.RWMutex
-	hostMap map[string]HostInfo
+	hostMap map[string]*HostInfo
 }
 
-func (r *hostPoolHostPolicy) SetHosts(hosts []HostInfo) {
+func (r *hostPoolHostPolicy) SetHosts(hosts []*HostInfo) {
 	peers := make([]string, len(hosts))
-	hostMap := make(map[string]HostInfo, len(hosts))
+	hostMap := make(map[string]*HostInfo, len(hosts))
 
 	for i, host := range hosts {
-		peers[i] = host.Peer
-		hostMap[host.Peer] = host
+		peers[i] = host.Peer()
+		hostMap[host.Peer()] = host
 	}
 
 	r.mu.Lock()
@@ -426,7 +429,7 @@ func (r *hostPoolHostPolicy) AddHost(host *HostInfo) {
 	r.mu.Lock()
 	defer r.mu.Unlock()
 
-	if _, ok := r.hostMap[host.Peer]; ok {
+	if _, ok := r.hostMap[host.Peer()]; ok {
 		return
 	}
 
@@ -434,10 +437,10 @@ func (r *hostPoolHostPolicy) AddHost(host *HostInfo) {
 	for addr := range r.hostMap {
 		hosts = append(hosts, addr)
 	}
-	hosts = append(hosts, host.Peer)
+	hosts = append(hosts, host.Peer())
 
 	r.hp.SetHosts(hosts)
-	r.hostMap[host.Peer] = *host
+	r.hostMap[host.Peer()] = host
 }
 
 func (r *hostPoolHostPolicy) RemoveHost(addr string) {
@@ -476,7 +479,7 @@ func (r *hostPoolHostPolicy) Pick(qry *Query) NextHost {
 			return nil
 		}
 
-		return selectedHostPoolHost{&host, hostR}
+		return selectedHostPoolHost{host, hostR}
 	}
 }
 

+ 46 - 46
policies_test.go

@@ -16,35 +16,35 @@ import (
 func TestRoundRobinHostPolicy(t *testing.T) {
 	policy := RoundRobinHostPolicy()
 
-	hosts := []HostInfo{
-		HostInfo{HostId: "0"},
-		HostInfo{HostId: "1"},
+	hosts := []*HostInfo{
+		{hostId: "0"},
+		{hostId: "1"},
 	}
 
 	policy.SetHosts(hosts)
 
 	// interleaved iteration should always increment the host
 	iterA := policy.Pick(nil)
-	if actual := iterA(); actual.Info() != &hosts[0] {
-		t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostId)
+	if actual := iterA(); actual.Info() != hosts[0] {
+		t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostID())
 	}
 	iterB := policy.Pick(nil)
-	if actual := iterB(); actual.Info() != &hosts[1] {
-		t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostId)
+	if actual := iterB(); actual.Info() != hosts[1] {
+		t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostID())
 	}
-	if actual := iterB(); actual.Info() != &hosts[0] {
-		t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostId)
+	if actual := iterB(); actual.Info() != hosts[0] {
+		t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostID())
 	}
-	if actual := iterA(); actual.Info() != &hosts[1] {
-		t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostId)
+	if actual := iterA(); actual.Info() != hosts[1] {
+		t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostID())
 	}
 
 	iterC := policy.Pick(nil)
-	if actual := iterC(); actual.Info() != &hosts[0] {
-		t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostId)
+	if actual := iterC(); actual.Info() != hosts[0] {
+		t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostID())
 	}
-	if actual := iterC(); actual.Info() != &hosts[1] {
-		t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostId)
+	if actual := iterC(); actual.Info() != hosts[1] {
+		t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostID())
 	}
 }
 
@@ -65,23 +65,23 @@ func TestTokenAwareHostPolicy(t *testing.T) {
 	}
 
 	// set the hosts
-	hosts := []HostInfo{
-		HostInfo{Peer: "0", Tokens: []string{"00"}},
-		HostInfo{Peer: "1", Tokens: []string{"25"}},
-		HostInfo{Peer: "2", Tokens: []string{"50"}},
-		HostInfo{Peer: "3", Tokens: []string{"75"}},
+	hosts := []*HostInfo{
+		{peer: "0", tokens: []string{"00"}},
+		{peer: "1", tokens: []string{"25"}},
+		{peer: "2", tokens: []string{"50"}},
+		{peer: "3", tokens: []string{"75"}},
 	}
 	policy.SetHosts(hosts)
 
 	// the token ring is not setup without the partitioner, but the fallback
 	// should work
-	if actual := policy.Pick(nil)(); actual.Info().Peer != "0" {
-		t.Errorf("Expected peer 0 but was %s", actual.Info().Peer)
+	if actual := policy.Pick(nil)(); actual.Info().Peer() != "0" {
+		t.Errorf("Expected peer 0 but was %s", actual.Info().Peer())
 	}
 
 	query.RoutingKey([]byte("30"))
-	if actual := policy.Pick(query)(); actual.Info().Peer != "1" {
-		t.Errorf("Expected peer 1 but was %s", actual.Info().Peer)
+	if actual := policy.Pick(query)(); actual.Info().Peer() != "1" {
+		t.Errorf("Expected peer 1 but was %s", actual.Info().Peer())
 	}
 
 	policy.SetPartitioner("OrderedPartitioner")
@@ -89,18 +89,18 @@ func TestTokenAwareHostPolicy(t *testing.T) {
 	// now the token ring is configured
 	query.RoutingKey([]byte("20"))
 	iter = policy.Pick(query)
-	if actual := iter(); actual.Info().Peer != "1" {
-		t.Errorf("Expected peer 1 but was %s", actual.Info().Peer)
+	if actual := iter(); actual.Info().Peer() != "1" {
+		t.Errorf("Expected peer 1 but was %s", actual.Info().Peer())
 	}
 	// rest are round robin
-	if actual := iter(); actual.Info().Peer != "2" {
-		t.Errorf("Expected peer 2 but was %s", actual.Info().Peer)
+	if actual := iter(); actual.Info().Peer() != "2" {
+		t.Errorf("Expected peer 2 but was %s", actual.Info().Peer())
 	}
-	if actual := iter(); actual.Info().Peer != "3" {
-		t.Errorf("Expected peer 3 but was %s", actual.Info().Peer)
+	if actual := iter(); actual.Info().Peer() != "3" {
+		t.Errorf("Expected peer 3 but was %s", actual.Info().Peer())
 	}
-	if actual := iter(); actual.Info().Peer != "0" {
-		t.Errorf("Expected peer 0 but was %s", actual.Info().Peer)
+	if actual := iter(); actual.Info().Peer() != "0" {
+		t.Errorf("Expected peer 0 but was %s", actual.Info().Peer())
 	}
 }
 
@@ -108,9 +108,9 @@ func TestTokenAwareHostPolicy(t *testing.T) {
 func TestHostPoolHostPolicy(t *testing.T) {
 	policy := HostPoolHostPolicy(hostpool.New(nil))
 
-	hosts := []HostInfo{
-		HostInfo{HostId: "0", Peer: "0"},
-		HostInfo{HostId: "1", Peer: "1"},
+	hosts := []*HostInfo{
+		{hostId: "0", peer: "0"},
+		{hostId: "1", peer: "1"},
 	}
 
 	policy.SetHosts(hosts)
@@ -119,26 +119,26 @@ func TestHostPoolHostPolicy(t *testing.T) {
 	// interleaved iteration should always increment the host
 	iter := policy.Pick(nil)
 	actualA := iter()
-	if actualA.Info().HostId != "0" {
-		t.Errorf("Expected hosts[0] but was hosts[%s]", actualA.Info().HostId)
+	if actualA.Info().HostID() != "0" {
+		t.Errorf("Expected hosts[0] but was hosts[%s]", actualA.Info().HostID())
 	}
 	actualA.Mark(nil)
 
 	actualB := iter()
-	if actualB.Info().HostId != "1" {
-		t.Errorf("Expected hosts[1] but was hosts[%s]", actualB.Info().HostId)
+	if actualB.Info().HostID() != "1" {
+		t.Errorf("Expected hosts[1] but was hosts[%s]", actualB.Info().HostID())
 	}
 	actualB.Mark(fmt.Errorf("error"))
 
 	actualC := iter()
-	if actualC.Info().HostId != "0" {
-		t.Errorf("Expected hosts[0] but was hosts[%s]", actualC.Info().HostId)
+	if actualC.Info().HostID() != "0" {
+		t.Errorf("Expected hosts[0] but was hosts[%s]", actualC.Info().HostID())
 	}
 	actualC.Mark(nil)
 
 	actualD := iter()
-	if actualD.Info().HostId != "0" {
-		t.Errorf("Expected hosts[0] but was hosts[%s]", actualD.Info().HostId)
+	if actualD.Info().HostID() != "0" {
+		t.Errorf("Expected hosts[0] but was hosts[%s]", actualD.Info().HostID())
 	}
 	actualD.Mark(nil)
 }
@@ -170,8 +170,8 @@ func TestRoundRobinConnPolicy(t *testing.T) {
 func TestRoundRobinNilHostInfo(t *testing.T) {
 	policy := RoundRobinHostPolicy()
 
-	host := HostInfo{HostId: "host-1"}
-	policy.SetHosts([]HostInfo{host})
+	host := &HostInfo{hostId: "host-1"}
+	policy.SetHosts([]*HostInfo{host})
 
 	iter := policy.Pick(nil)
 	next := iter()
@@ -179,7 +179,7 @@ func TestRoundRobinNilHostInfo(t *testing.T) {
 		t.Fatal("got nil host")
 	} else if v := next.Info(); v == nil {
 		t.Fatal("got nil HostInfo")
-	} else if v.HostId != host.HostId {
+	} else if v.HostID() != host.HostID() {
 		t.Fatalf("expected host %v got %v", host, *v)
 	}
 

+ 7 - 0
session.go

@@ -99,6 +99,7 @@ func NewSession(cfg ClusterConfig) (*Session, error) {
 			return nil, err
 		}
 		s.pool = pool
+
 		// TODO(zariel): this should be used to create initial metadata
 		s.pool.SetHosts(hosts)
 	} else {
@@ -108,6 +109,12 @@ func NewSession(cfg ClusterConfig) (*Session, error) {
 			return nil, err
 		}
 		s.pool = pool
+		// we dont get host info
+		hosts := make([]*HostInfo, len(cfg.Hosts))
+		for i, addr := range cfg.Hosts {
+			hosts[i] = &HostInfo{peer: addr}
+		}
+		s.pool.SetHosts(hosts)
 	}
 
 	// TODO(zariel): we probably dont need this any more as we verify that we

+ 4 - 5
token.go

@@ -121,7 +121,7 @@ type tokenRing struct {
 	hosts       []*HostInfo
 }
 
-func newTokenRing(partitioner string, hosts []HostInfo) (*tokenRing, error) {
+func newTokenRing(partitioner string, hosts []*HostInfo) (*tokenRing, error) {
 	tokenRing := &tokenRing{
 		tokens: []token{},
 		hosts:  []*HostInfo{},
@@ -137,9 +137,8 @@ func newTokenRing(partitioner string, hosts []HostInfo) (*tokenRing, error) {
 		return nil, fmt.Errorf("Unsupported partitioner '%s'", partitioner)
 	}
 
-	for i := range hosts {
-		host := &hosts[i]
-		for _, strToken := range host.Tokens {
+	for _, host := range hosts {
+		for _, strToken := range host.Tokens() {
 			token := tokenRing.partitioner.ParseString(strToken)
 			tokenRing.tokens = append(tokenRing.tokens, token)
 			tokenRing.hosts = append(tokenRing.hosts, host)
@@ -181,7 +180,7 @@ func (t *tokenRing) String() string {
 		buf.WriteString("]")
 		buf.WriteString(t.tokens[i].String())
 		buf.WriteString(":")
-		buf.WriteString(t.hosts[i].Peer)
+		buf.WriteString(t.hosts[i].Peer())
 	}
 	buf.WriteString("\n}")
 	return string(buf.Bytes())

+ 75 - 75
token_test.go

@@ -215,22 +215,22 @@ func TestUnknownTokenRing(t *testing.T) {
 // Test of the tokenRing with the Murmur3Partitioner
 func TestMurmur3TokenRing(t *testing.T) {
 	// Note, strings are parsed directly to int64, they are not murmur3 hashed
-	var hosts []HostInfo = []HostInfo{
-		HostInfo{
-			Peer:   "0",
-			Tokens: []string{"0"},
+	hosts := []*HostInfo{
+		{
+			peer:   "0",
+			tokens: []string{"0"},
 		},
-		HostInfo{
-			Peer:   "1",
-			Tokens: []string{"25"},
+		{
+			peer:   "1",
+			tokens: []string{"25"},
 		},
-		HostInfo{
-			Peer:   "2",
-			Tokens: []string{"50"},
+		{
+			peer:   "2",
+			tokens: []string{"50"},
 		},
-		HostInfo{
-			Peer:   "3",
-			Tokens: []string{"75"},
+		{
+			peer:   "3",
+			tokens: []string{"75"},
 		},
 	}
 	ring, err := newTokenRing("Murmur3Partitioner", hosts)
@@ -242,33 +242,33 @@ func TestMurmur3TokenRing(t *testing.T) {
 
 	var actual *HostInfo
 	actual = ring.GetHostForToken(p.ParseString("0"))
-	if actual.Peer != "0" {
-		t.Errorf("Expected peer 0 for token \"0\", but was %s", actual.Peer)
+	if actual.Peer() != "0" {
+		t.Errorf("Expected peer 0 for token \"0\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("25"))
-	if actual.Peer != "1" {
-		t.Errorf("Expected peer 1 for token \"25\", but was %s", actual.Peer)
+	if actual.Peer() != "1" {
+		t.Errorf("Expected peer 1 for token \"25\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("50"))
-	if actual.Peer != "2" {
-		t.Errorf("Expected peer 2 for token \"50\", but was %s", actual.Peer)
+	if actual.Peer() != "2" {
+		t.Errorf("Expected peer 2 for token \"50\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("75"))
-	if actual.Peer != "3" {
-		t.Errorf("Expected peer 3 for token \"01\", but was %s", actual.Peer)
+	if actual.Peer() != "3" {
+		t.Errorf("Expected peer 3 for token \"01\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("12"))
-	if actual.Peer != "1" {
-		t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer)
+	if actual.Peer() != "1" {
+		t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("24324545443332"))
-	if actual.Peer != "0" {
-		t.Errorf("Expected peer 0 for token \"24324545443332\", but was %s", actual.Peer)
+	if actual.Peer() != "0" {
+		t.Errorf("Expected peer 0 for token \"24324545443332\", but was %s", actual.Peer())
 	}
 }
 
@@ -276,28 +276,28 @@ func TestMurmur3TokenRing(t *testing.T) {
 func TestOrderedTokenRing(t *testing.T) {
 	// Tokens here more or less are similar layout to the int tokens above due
 	// to each numeric character translating to a consistently offset byte.
-	var hosts []HostInfo = []HostInfo{
-		HostInfo{
-			Peer: "0",
-			Tokens: []string{
+	hosts := []*HostInfo{
+		{
+			peer: "0",
+			tokens: []string{
 				"00",
 			},
 		},
-		HostInfo{
-			Peer: "1",
-			Tokens: []string{
+		{
+			peer: "1",
+			tokens: []string{
 				"25",
 			},
 		},
-		HostInfo{
-			Peer: "2",
-			Tokens: []string{
+		{
+			peer: "2",
+			tokens: []string{
 				"50",
 			},
 		},
-		HostInfo{
-			Peer: "3",
-			Tokens: []string{
+		{
+			peer: "3",
+			tokens: []string{
 				"75",
 			},
 		},
@@ -311,61 +311,61 @@ func TestOrderedTokenRing(t *testing.T) {
 
 	var actual *HostInfo
 	actual = ring.GetHostForToken(p.ParseString("0"))
-	if actual.Peer != "0" {
-		t.Errorf("Expected peer 0 for token \"0\", but was %s", actual.Peer)
+	if actual.Peer() != "0" {
+		t.Errorf("Expected peer 0 for token \"0\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("25"))
-	if actual.Peer != "1" {
-		t.Errorf("Expected peer 1 for token \"25\", but was %s", actual.Peer)
+	if actual.Peer() != "1" {
+		t.Errorf("Expected peer 1 for token \"25\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("50"))
-	if actual.Peer != "2" {
-		t.Errorf("Expected peer 2 for token \"50\", but was %s", actual.Peer)
+	if actual.Peer() != "2" {
+		t.Errorf("Expected peer 2 for token \"50\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("75"))
-	if actual.Peer != "3" {
-		t.Errorf("Expected peer 3 for token \"01\", but was %s", actual.Peer)
+	if actual.Peer() != "3" {
+		t.Errorf("Expected peer 3 for token \"01\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("12"))
-	if actual.Peer != "1" {
-		t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer)
+	if actual.Peer() != "1" {
+		t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("24324545443332"))
-	if actual.Peer != "1" {
-		t.Errorf("Expected peer 1 for token \"24324545443332\", but was %s", actual.Peer)
+	if actual.Peer() != "1" {
+		t.Errorf("Expected peer 1 for token \"24324545443332\", but was %s", actual.Peer())
 	}
 }
 
 // Test of the tokenRing with the RandomPartitioner
 func TestRandomTokenRing(t *testing.T) {
 	// String tokens are parsed into big.Int in base 10
-	var hosts []HostInfo = []HostInfo{
-		HostInfo{
-			Peer: "0",
-			Tokens: []string{
+	hosts := []*HostInfo{
+		{
+			peer: "0",
+			tokens: []string{
 				"00",
 			},
 		},
-		HostInfo{
-			Peer: "1",
-			Tokens: []string{
+		{
+			peer: "1",
+			tokens: []string{
 				"25",
 			},
 		},
-		HostInfo{
-			Peer: "2",
-			Tokens: []string{
+		{
+			peer: "2",
+			tokens: []string{
 				"50",
 			},
 		},
-		HostInfo{
-			Peer: "3",
-			Tokens: []string{
+		{
+			peer: "3",
+			tokens: []string{
 				"75",
 			},
 		},
@@ -379,32 +379,32 @@ func TestRandomTokenRing(t *testing.T) {
 
 	var actual *HostInfo
 	actual = ring.GetHostForToken(p.ParseString("0"))
-	if actual.Peer != "0" {
-		t.Errorf("Expected peer 0 for token \"0\", but was %s", actual.Peer)
+	if actual.Peer() != "0" {
+		t.Errorf("Expected peer 0 for token \"0\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("25"))
-	if actual.Peer != "1" {
-		t.Errorf("Expected peer 1 for token \"25\", but was %s", actual.Peer)
+	if actual.Peer() != "1" {
+		t.Errorf("Expected peer 1 for token \"25\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("50"))
-	if actual.Peer != "2" {
-		t.Errorf("Expected peer 2 for token \"50\", but was %s", actual.Peer)
+	if actual.Peer() != "2" {
+		t.Errorf("Expected peer 2 for token \"50\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("75"))
-	if actual.Peer != "3" {
-		t.Errorf("Expected peer 3 for token \"01\", but was %s", actual.Peer)
+	if actual.Peer() != "3" {
+		t.Errorf("Expected peer 3 for token \"01\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("12"))
-	if actual.Peer != "1" {
-		t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer)
+	if actual.Peer() != "1" {
+		t.Errorf("Expected peer 1 for token \"12\", but was %s", actual.Peer())
 	}
 
 	actual = ring.GetHostForToken(p.ParseString("24324545443332"))
-	if actual.Peer != "0" {
-		t.Errorf("Expected peer 0 for token \"24324545443332\", but was %s", actual.Peer)
+	if actual.Peer() != "0" {
+		t.Errorf("Expected peer 0 for token \"24324545443332\", but was %s", actual.Peer())
 	}
 }