|
@@ -1,6 +1,7 @@
|
|
|
package gocql
|
|
package gocql
|
|
|
|
|
|
|
|
import (
|
|
import (
|
|
|
|
|
+ "errors"
|
|
|
"fmt"
|
|
"fmt"
|
|
|
"net"
|
|
"net"
|
|
|
"strconv"
|
|
"strconv"
|
|
@@ -98,15 +99,25 @@ func (c cassVersion) nodeUpDelay() time.Duration {
|
|
|
type HostInfo struct {
|
|
type HostInfo struct {
|
|
|
// TODO(zariel): reduce locking maybe, not all values will change, but to ensure
|
|
// TODO(zariel): reduce locking maybe, not all values will change, but to ensure
|
|
|
// that we are thread safe use a mutex to access all fields.
|
|
// that we are thread safe use a mutex to access all fields.
|
|
|
- mu sync.RWMutex
|
|
|
|
|
- peer net.IP
|
|
|
|
|
- port int
|
|
|
|
|
- dataCenter string
|
|
|
|
|
- rack string
|
|
|
|
|
- hostId string
|
|
|
|
|
- version cassVersion
|
|
|
|
|
- state nodeState
|
|
|
|
|
- tokens []string
|
|
|
|
|
|
|
+ mu sync.RWMutex
|
|
|
|
|
+ peer net.IP
|
|
|
|
|
+ broadcastAddress net.IP
|
|
|
|
|
+ listenAddress net.IP
|
|
|
|
|
+ rpcAddress net.IP
|
|
|
|
|
+ preferredIP net.IP
|
|
|
|
|
+ connectAddress net.IP
|
|
|
|
|
+ port int
|
|
|
|
|
+ dataCenter string
|
|
|
|
|
+ rack string
|
|
|
|
|
+ hostId string
|
|
|
|
|
+ workload string
|
|
|
|
|
+ graph bool
|
|
|
|
|
+ dseVersion string
|
|
|
|
|
+ partitioner string
|
|
|
|
|
+ clusterName string
|
|
|
|
|
+ version cassVersion
|
|
|
|
|
+ state nodeState
|
|
|
|
|
+ tokens []string
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
func (h *HostInfo) Equal(host *HostInfo) bool {
|
|
func (h *HostInfo) Equal(host *HostInfo) bool {
|
|
@@ -115,7 +126,7 @@ func (h *HostInfo) Equal(host *HostInfo) bool {
|
|
|
host.mu.RLock()
|
|
host.mu.RLock()
|
|
|
defer host.mu.RUnlock()
|
|
defer host.mu.RUnlock()
|
|
|
|
|
|
|
|
- return h.peer.Equal(host.peer)
|
|
|
|
|
|
|
+ return h.ConnectAddress().Equal(host.ConnectAddress())
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
func (h *HostInfo) Peer() net.IP {
|
|
func (h *HostInfo) Peer() net.IP {
|
|
@@ -131,6 +142,57 @@ func (h *HostInfo) setPeer(peer net.IP) *HostInfo {
|
|
|
return h
|
|
return h
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+// Returns the address that should be used to connect to the host.
|
|
|
|
|
+// If you wish to override this, use an AddressTranslator or
|
|
|
|
|
+// use a HostFilter to SetConnectAddress()
|
|
|
|
|
+func (h *HostInfo) ConnectAddress() net.IP {
|
|
|
|
|
+ h.mu.RLock()
|
|
|
|
|
+ defer h.mu.RUnlock()
|
|
|
|
|
+
|
|
|
|
|
+ if h.connectAddress == nil {
|
|
|
|
|
+ // Use 'rpc_address' if provided and it's not 0.0.0.0
|
|
|
|
|
+ if h.rpcAddress != nil && !h.rpcAddress.IsUnspecified() {
|
|
|
|
|
+ return h.rpcAddress
|
|
|
|
|
+ }
|
|
|
|
|
+ // Peer should always be set if this from 'system.peer'
|
|
|
|
|
+ if h.peer != nil {
|
|
|
|
|
+ return h.peer
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ return h.connectAddress
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+func (h *HostInfo) SetConnectAddress(address net.IP) *HostInfo {
|
|
|
|
|
+ h.mu.Lock()
|
|
|
|
|
+ defer h.mu.Unlock()
|
|
|
|
|
+ h.connectAddress = address
|
|
|
|
|
+ return h
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+func (h *HostInfo) BroadcastAddress() net.IP {
|
|
|
|
|
+ h.mu.RLock()
|
|
|
|
|
+ defer h.mu.RUnlock()
|
|
|
|
|
+ return h.broadcastAddress
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+func (h *HostInfo) ListenAddress() net.IP {
|
|
|
|
|
+ h.mu.RLock()
|
|
|
|
|
+ defer h.mu.RUnlock()
|
|
|
|
|
+ return h.listenAddress
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+func (h *HostInfo) RPCAddress() net.IP {
|
|
|
|
|
+ h.mu.RLock()
|
|
|
|
|
+ defer h.mu.RUnlock()
|
|
|
|
|
+ return h.rpcAddress
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+func (h *HostInfo) PreferredIP() net.IP {
|
|
|
|
|
+ h.mu.RLock()
|
|
|
|
|
+ defer h.mu.RUnlock()
|
|
|
|
|
+ return h.preferredIP
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
func (h *HostInfo) DataCenter() string {
|
|
func (h *HostInfo) DataCenter() string {
|
|
|
h.mu.RLock()
|
|
h.mu.RLock()
|
|
|
defer h.mu.RUnlock()
|
|
defer h.mu.RUnlock()
|
|
@@ -170,6 +232,36 @@ func (h *HostInfo) setHostID(hostID string) *HostInfo {
|
|
|
return h
|
|
return h
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+func (h *HostInfo) WorkLoad() string {
|
|
|
|
|
+ h.mu.RLock()
|
|
|
|
|
+ defer h.mu.RUnlock()
|
|
|
|
|
+ return h.workload
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+func (h *HostInfo) Graph() bool {
|
|
|
|
|
+ h.mu.RLock()
|
|
|
|
|
+ defer h.mu.RUnlock()
|
|
|
|
|
+ return h.graph
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+func (h *HostInfo) DSEVersion() string {
|
|
|
|
|
+ h.mu.RLock()
|
|
|
|
|
+ defer h.mu.RUnlock()
|
|
|
|
|
+ return h.dseVersion
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+func (h *HostInfo) Partitioner() string {
|
|
|
|
|
+ h.mu.RLock()
|
|
|
|
|
+ defer h.mu.RUnlock()
|
|
|
|
|
+ return h.partitioner
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+func (h *HostInfo) ClusterName() string {
|
|
|
|
|
+ h.mu.RLock()
|
|
|
|
|
+ defer h.mu.RUnlock()
|
|
|
|
|
+ return h.clusterName
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
func (h *HostInfo) Version() cassVersion {
|
|
func (h *HostInfo) Version() cassVersion {
|
|
|
h.mu.RLock()
|
|
h.mu.RLock()
|
|
|
defer h.mu.RUnlock()
|
|
defer h.mu.RUnlock()
|
|
@@ -239,25 +331,24 @@ func (h *HostInfo) IsUp() bool {
|
|
|
func (h *HostInfo) String() string {
|
|
func (h *HostInfo) String() string {
|
|
|
h.mu.RLock()
|
|
h.mu.RLock()
|
|
|
defer h.mu.RUnlock()
|
|
defer h.mu.RUnlock()
|
|
|
- return fmt.Sprintf("[hostinfo peer=%q port=%d data_centre=%q rack=%q host_id=%q version=%q state=%s num_tokens=%d]", h.peer, h.port, h.dataCenter, h.rack, h.hostId, h.version, h.state, len(h.tokens))
|
|
|
|
|
|
|
+ return fmt.Sprintf("[HostInfo connectAddress=%q peer=%q rpc_address=%q broadcast_address=%q "+
|
|
|
|
|
+ "port=%d data_centre=%q rack=%q host_id=%q version=%q state=%s num_tokens=%d]",
|
|
|
|
|
+ h.connectAddress, h.peer, h.rpcAddress, h.broadcastAddress,
|
|
|
|
|
+ h.port, h.dataCenter, h.rack, h.hostId, h.version, h.state, len(h.tokens))
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
// Polls system.peers at a specific interval to find new hosts
|
|
// Polls system.peers at a specific interval to find new hosts
|
|
|
type ringDescriber struct {
|
|
type ringDescriber struct {
|
|
|
- dcFilter string
|
|
|
|
|
- rackFilter string
|
|
|
|
|
- session *Session
|
|
|
|
|
- closeChan chan bool
|
|
|
|
|
- // indicates that we can use system.local to get the connections remote address
|
|
|
|
|
- localHasRpcAddr bool
|
|
|
|
|
-
|
|
|
|
|
|
|
+ session *Session
|
|
|
mu sync.Mutex
|
|
mu sync.Mutex
|
|
|
prevHosts []*HostInfo
|
|
prevHosts []*HostInfo
|
|
|
|
|
+ localHost *HostInfo
|
|
|
prevPartitioner string
|
|
prevPartitioner string
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-func checkSystemLocal(control *controlConn) (bool, error) {
|
|
|
|
|
- iter := control.query("SELECT broadcast_address FROM system.local")
|
|
|
|
|
|
|
+// Returns true if we are using system_schema.keyspaces instead of system.schema_keyspaces
|
|
|
|
|
+func checkSystemSchema(control *controlConn) (bool, error) {
|
|
|
|
|
+ iter := control.query("SELECT * FROM system_schema.keyspaces")
|
|
|
if err := iter.err; err != nil {
|
|
if err := iter.err; err != nil {
|
|
|
if errf, ok := err.(*errorFrame); ok {
|
|
if errf, ok := err.(*errorFrame); ok {
|
|
|
if errf.code == errSyntax {
|
|
if errf.code == errSyntax {
|
|
@@ -271,106 +362,267 @@ func checkSystemLocal(control *controlConn) (bool, error) {
|
|
|
return true, nil
|
|
return true, nil
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-// Returns true if we are using system_schema.keyspaces instead of system.schema_keyspaces
|
|
|
|
|
-func checkSystemSchema(control *controlConn) (bool, error) {
|
|
|
|
|
- iter := control.query("SELECT * FROM system_schema.keyspaces")
|
|
|
|
|
- if err := iter.err; err != nil {
|
|
|
|
|
- if errf, ok := err.(*errorFrame); ok {
|
|
|
|
|
- if errf.code == errReadFailure {
|
|
|
|
|
- return false, nil
|
|
|
|
|
|
|
+// Given a map that represents a row from either system.local or system.peers
|
|
|
|
|
+// return as much information as we can in *HostInfo
|
|
|
|
|
+func (r *ringDescriber) hostInfoFromMap(row map[string]interface{}) (*HostInfo, error) {
|
|
|
|
|
+ const assertErrorMsg = "Assertion failed for %s"
|
|
|
|
|
+ var ok bool
|
|
|
|
|
+
|
|
|
|
|
+ // Default to our connected port if the cluster doesn't have port information
|
|
|
|
|
+ host := HostInfo{
|
|
|
|
|
+ port: r.session.cfg.Port,
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ for key, value := range row {
|
|
|
|
|
+ switch key {
|
|
|
|
|
+ case "data_center":
|
|
|
|
|
+ host.dataCenter, ok = value.(string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "data_center")
|
|
|
|
|
+ }
|
|
|
|
|
+ case "rack":
|
|
|
|
|
+ host.rack, ok = value.(string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "rack")
|
|
|
|
|
+ }
|
|
|
|
|
+ case "host_id":
|
|
|
|
|
+ hostId, ok := value.(UUID)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "host_id")
|
|
|
|
|
+ }
|
|
|
|
|
+ host.hostId = hostId.String()
|
|
|
|
|
+ case "release_version":
|
|
|
|
|
+ version, ok := value.(string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "release_version")
|
|
|
|
|
+ }
|
|
|
|
|
+ host.version.Set(version)
|
|
|
|
|
+ case "peer":
|
|
|
|
|
+ ip, ok := value.(string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "peer")
|
|
|
|
|
+ }
|
|
|
|
|
+ host.peer = net.ParseIP(ip)
|
|
|
|
|
+ case "cluster_name":
|
|
|
|
|
+ host.clusterName, ok = value.(string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "cluster_name")
|
|
|
|
|
+ }
|
|
|
|
|
+ case "partitioner":
|
|
|
|
|
+ host.partitioner, ok = value.(string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "partitioner")
|
|
|
|
|
+ }
|
|
|
|
|
+ case "broadcast_address":
|
|
|
|
|
+ ip, ok := value.(string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "broadcast_address")
|
|
|
|
|
+ }
|
|
|
|
|
+ host.broadcastAddress = net.ParseIP(ip)
|
|
|
|
|
+ case "preferred_ip":
|
|
|
|
|
+ ip, ok := value.(string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "preferred_ip")
|
|
|
|
|
+ }
|
|
|
|
|
+ host.preferredIP = net.ParseIP(ip)
|
|
|
|
|
+ case "rpc_address":
|
|
|
|
|
+ ip, ok := value.(string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "rpc_address")
|
|
|
|
|
+ }
|
|
|
|
|
+ host.rpcAddress = net.ParseIP(ip)
|
|
|
|
|
+ case "listen_address":
|
|
|
|
|
+ ip, ok := value.(string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "listen_address")
|
|
|
|
|
+ }
|
|
|
|
|
+ host.listenAddress = net.ParseIP(ip)
|
|
|
|
|
+ case "workload":
|
|
|
|
|
+ host.workload, ok = value.(string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "workload")
|
|
|
|
|
+ }
|
|
|
|
|
+ case "graph":
|
|
|
|
|
+ host.graph, ok = value.(bool)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "graph")
|
|
|
|
|
+ }
|
|
|
|
|
+ case "tokens":
|
|
|
|
|
+ host.tokens, ok = value.([]string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "tokens")
|
|
|
|
|
+ }
|
|
|
|
|
+ case "dse_version":
|
|
|
|
|
+ host.dseVersion, ok = value.(string)
|
|
|
|
|
+ if !ok {
|
|
|
|
|
+ return nil, fmt.Errorf(assertErrorMsg, "dse_version")
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
+ // TODO(thrawn01): Add 'port'? once CASSANDRA-7544 is complete
|
|
|
|
|
+ // Not sure what the port field will be called until the JIRA issue is complete
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- return false, err
|
|
|
|
|
|
|
+ return &host, nil
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// Ask the control node for it's local host information
|
|
|
|
|
+func (r *ringDescriber) GetLocalHostInfo() (*HostInfo, error) {
|
|
|
|
|
+ it := r.session.control.query("SELECT * FROM system.local WHERE key='local'")
|
|
|
|
|
+ if it == nil {
|
|
|
|
|
+ return nil, errors.New("Attempted to query 'system.local' on a closed control connection")
|
|
|
}
|
|
}
|
|
|
|
|
+ return r.extractHostInfo(it)
|
|
|
|
|
+}
|
|
|
|
|
|
|
|
- return true, nil
|
|
|
|
|
|
|
+// Given an ip address and port, return a peer that matched the ip address
|
|
|
|
|
+func (r *ringDescriber) GetPeerHostInfo(ip net.IP, port int) (*HostInfo, error) {
|
|
|
|
|
+ it := r.session.control.query("SELECT * FROM system.peers WHERE peer=?", ip)
|
|
|
|
|
+ if it == nil {
|
|
|
|
|
+ return nil, errors.New("Attempted to query 'system.peers' on a closed control connection")
|
|
|
|
|
+ }
|
|
|
|
|
+ return r.extractHostInfo(it)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-func (r *ringDescriber) GetHosts() (hosts []*HostInfo, partitioner string, err error) {
|
|
|
|
|
- r.mu.Lock()
|
|
|
|
|
- defer r.mu.Unlock()
|
|
|
|
|
- // we need conn to be the same because we need to query system.peers and system.local
|
|
|
|
|
- // on the same node to get the whole cluster
|
|
|
|
|
|
|
+func (r *ringDescriber) extractHostInfo(it *Iter) (*HostInfo, error) {
|
|
|
|
|
+ row := make(map[string]interface{})
|
|
|
|
|
|
|
|
- const (
|
|
|
|
|
- legacyLocalQuery = "SELECT data_center, rack, host_id, tokens, partitioner, release_version FROM system.local"
|
|
|
|
|
- // only supported in 2.2.0, 2.1.6, 2.0.16
|
|
|
|
|
- localQuery = "SELECT broadcast_address, data_center, rack, host_id, tokens, partitioner, release_version FROM system.local"
|
|
|
|
|
- )
|
|
|
|
|
|
|
+ // expect only 1 row
|
|
|
|
|
+ it.MapScan(row)
|
|
|
|
|
+ if err := it.Close(); err != nil {
|
|
|
|
|
+ return nil, err
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- localHost := &HostInfo{}
|
|
|
|
|
- if r.localHasRpcAddr {
|
|
|
|
|
- iter := r.session.control.query(localQuery)
|
|
|
|
|
- if iter == nil {
|
|
|
|
|
- return r.prevHosts, r.prevPartitioner, nil
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ // extract all available info about the host
|
|
|
|
|
+ return r.hostInfoFromMap(row)
|
|
|
|
|
+}
|
|
|
|
|
|
|
|
- iter.Scan(&localHost.peer, &localHost.dataCenter, &localHost.rack,
|
|
|
|
|
- &localHost.hostId, &localHost.tokens, &partitioner, &localHost.version)
|
|
|
|
|
|
|
+// Ask the control node for host info on all it's known peers
|
|
|
|
|
+func (r *ringDescriber) GetClusterPeerInfo() ([]*HostInfo, error) {
|
|
|
|
|
+ var hosts []*HostInfo
|
|
|
|
|
|
|
|
- if err = iter.Close(); err != nil {
|
|
|
|
|
- return nil, "", err
|
|
|
|
|
- }
|
|
|
|
|
- } else {
|
|
|
|
|
- iter := r.session.control.withConn(func(c *Conn) *Iter {
|
|
|
|
|
- localHost = c.host
|
|
|
|
|
- return c.query(legacyLocalQuery)
|
|
|
|
|
- })
|
|
|
|
|
|
|
+ // Ask the node for a list of it's peers
|
|
|
|
|
+ it := r.session.control.query("SELECT * FROM system.peers")
|
|
|
|
|
+ if it == nil {
|
|
|
|
|
+ return nil, errors.New("Attempted to query 'system.peers' on a closed connection")
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- if iter == nil {
|
|
|
|
|
- return r.prevHosts, r.prevPartitioner, nil
|
|
|
|
|
|
|
+ for {
|
|
|
|
|
+ row := make(map[string]interface{})
|
|
|
|
|
+ if !it.MapScan(row) {
|
|
|
|
|
+ break
|
|
|
|
|
+ }
|
|
|
|
|
+ // extract all available info about the peer
|
|
|
|
|
+ host, err := r.hostInfoFromMap(row)
|
|
|
|
|
+ if err != nil {
|
|
|
|
|
+ return nil, err
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- iter.Scan(&localHost.dataCenter, &localHost.rack, &localHost.hostId, &localHost.tokens, &partitioner, &localHost.version)
|
|
|
|
|
-
|
|
|
|
|
- if err = iter.Close(); err != nil {
|
|
|
|
|
- return nil, "", err
|
|
|
|
|
|
|
+ // If it's not a valid peer
|
|
|
|
|
+ if !r.IsValidPeer(host) {
|
|
|
|
|
+ Logger.Printf("Found invalid peer '%+v' "+
|
|
|
|
|
+ "Likely due to a gossip or snitch issue, this host will be ignored", host)
|
|
|
|
|
+ continue
|
|
|
}
|
|
}
|
|
|
|
|
+ hosts = append(hosts, host)
|
|
|
}
|
|
}
|
|
|
|
|
+ if it.err != nil {
|
|
|
|
|
+ return nil, fmt.Errorf("while scanning 'system.peers' table: %s", it.err)
|
|
|
|
|
+ }
|
|
|
|
|
+ return hosts, nil
|
|
|
|
|
+}
|
|
|
|
|
|
|
|
- localHost.port = r.session.cfg.Port
|
|
|
|
|
|
|
+// Return true if the host is a valid peer
|
|
|
|
|
+func (r *ringDescriber) IsValidPeer(host *HostInfo) bool {
|
|
|
|
|
+ return !(len(host.RPCAddress()) == 0 ||
|
|
|
|
|
+ host.hostId == "" ||
|
|
|
|
|
+ host.dataCenter == "" ||
|
|
|
|
|
+ host.rack == "" ||
|
|
|
|
|
+ len(host.tokens) == 0)
|
|
|
|
|
+}
|
|
|
|
|
|
|
|
- hosts = []*HostInfo{localHost}
|
|
|
|
|
|
|
+// Return a list of hosts the cluster knows about
|
|
|
|
|
+func (r *ringDescriber) GetHosts() ([]*HostInfo, string, error) {
|
|
|
|
|
+ r.mu.Lock()
|
|
|
|
|
+ defer r.mu.Unlock()
|
|
|
|
|
|
|
|
- rows := r.session.control.query("SELECT rpc_address, data_center, rack, host_id, tokens, release_version FROM system.peers").Scanner()
|
|
|
|
|
- if rows == nil {
|
|
|
|
|
- return r.prevHosts, r.prevPartitioner, nil
|
|
|
|
|
|
|
+ // Update the localHost info with data from the connected host
|
|
|
|
|
+ localHost, err := r.GetLocalHostInfo()
|
|
|
|
|
+ if err != nil {
|
|
|
|
|
+ return r.prevHosts, r.prevPartitioner, err
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- for rows.Next() {
|
|
|
|
|
- host := &HostInfo{port: r.session.cfg.Port}
|
|
|
|
|
- err := rows.Scan(&host.peer, &host.dataCenter, &host.rack, &host.hostId, &host.tokens, &host.version)
|
|
|
|
|
- if err != nil {
|
|
|
|
|
- Logger.Println(err)
|
|
|
|
|
- continue
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ // Update our list of hosts by querying the cluster
|
|
|
|
|
+ hosts, err := r.GetClusterPeerInfo()
|
|
|
|
|
+ if err != nil {
|
|
|
|
|
+ return r.prevHosts, r.prevPartitioner, err
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- if r.matchFilter(host) {
|
|
|
|
|
- hosts = append(hosts, host)
|
|
|
|
|
|
|
+ hosts = append(hosts, localHost)
|
|
|
|
|
+
|
|
|
|
|
+ // Filter the hosts if filter is provided
|
|
|
|
|
+ filteredHosts := hosts
|
|
|
|
|
+ if r.session.cfg.HostFilter != nil {
|
|
|
|
|
+ filteredHosts = filteredHosts[:0]
|
|
|
|
|
+ for _, host := range hosts {
|
|
|
|
|
+ if r.session.cfg.HostFilter.Accept(host) {
|
|
|
|
|
+ filteredHosts = append(filteredHosts, host)
|
|
|
|
|
+ }
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- if err = rows.Err(); err != nil {
|
|
|
|
|
- return nil, "", err
|
|
|
|
|
|
|
+ r.prevHosts = filteredHosts
|
|
|
|
|
+ r.prevPartitioner = localHost.partitioner
|
|
|
|
|
+ r.localHost = localHost
|
|
|
|
|
+
|
|
|
|
|
+ return filteredHosts, localHost.partitioner, nil
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// Given an ip/port return HostInfo for the specified ip/port
|
|
|
|
|
+func (r *ringDescriber) GetHostInfo(ip net.IP, port int) (*HostInfo, error) {
|
|
|
|
|
+ // TODO(thrawn01): Is IgnorePeerAddr still useful now that we have DisableInitialHostLookup?
|
|
|
|
|
+ // TODO(thrawn01): should we also check for DisableInitialHostLookup and return if true?
|
|
|
|
|
+
|
|
|
|
|
+ // Ignore the port and connect address and use the address/port we already have
|
|
|
|
|
+ if r.session.control == nil || r.session.cfg.IgnorePeerAddr {
|
|
|
|
|
+ return &HostInfo{connectAddress: ip, port: port}, nil
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- r.prevHosts = hosts
|
|
|
|
|
- r.prevPartitioner = partitioner
|
|
|
|
|
|
|
+ // Attempt to get the host info for our control connection
|
|
|
|
|
+ controlHost := r.session.control.GetHostInfo()
|
|
|
|
|
+ if controlHost == nil {
|
|
|
|
|
+ return nil, errors.New("invalid control connection")
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- return hosts, partitioner, nil
|
|
|
|
|
-}
|
|
|
|
|
|
|
+ var (
|
|
|
|
|
+ host *HostInfo
|
|
|
|
|
+ err error
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ // If we are asking about the same node our control connection has a connection too
|
|
|
|
|
+ if controlHost.ConnectAddress().Equal(ip) {
|
|
|
|
|
+ host, err = r.GetLocalHostInfo()
|
|
|
|
|
|
|
|
-func (r *ringDescriber) matchFilter(host *HostInfo) bool {
|
|
|
|
|
- if r.dcFilter != "" && r.dcFilter != host.DataCenter() {
|
|
|
|
|
- return false
|
|
|
|
|
|
|
+ // Always respect the provided control node address and disregard the ip address
|
|
|
|
|
+ // the cassandra node provides. We do this as we are already connected and have a
|
|
|
|
|
+ // known valid ip address. This insulates gocql from client connection issues stemming
|
|
|
|
|
+ // from node misconfiguration. For instance when a node is run from a container, by
|
|
|
|
|
+ // default the node will report its ip address as 127.0.0.1 which is typically invalid.
|
|
|
|
|
+ host.SetConnectAddress(ip)
|
|
|
|
|
+ } else {
|
|
|
|
|
+ host, err = r.GetPeerHostInfo(ip, port)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- if r.rackFilter != "" && r.rackFilter != host.Rack() {
|
|
|
|
|
- return false
|
|
|
|
|
|
|
+ // No host was found matching this ip/port
|
|
|
|
|
+ if err != nil {
|
|
|
|
|
+ return nil, err
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- return true
|
|
|
|
|
|
|
+ // Apply host filter to the result
|
|
|
|
|
+ if r.session.cfg.HostFilter != nil && r.session.cfg.HostFilter.Accept(host) != true {
|
|
|
|
|
+ return nil, err
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return host, err
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
func (r *ringDescriber) refreshRing() error {
|
|
func (r *ringDescriber) refreshRing() error {
|
|
@@ -386,13 +638,11 @@ func (r *ringDescriber) refreshRing() error {
|
|
|
// TODO: move this to session
|
|
// TODO: move this to session
|
|
|
// TODO: handle removing hosts here
|
|
// TODO: handle removing hosts here
|
|
|
for _, h := range hosts {
|
|
for _, h := range hosts {
|
|
|
- if r.session.cfg.HostFilter == nil || r.session.cfg.HostFilter.Accept(h) {
|
|
|
|
|
- if host, ok := r.session.ring.addHostIfMissing(h); !ok {
|
|
|
|
|
- r.session.pool.addHost(h)
|
|
|
|
|
- r.session.policy.AddHost(h)
|
|
|
|
|
- } else {
|
|
|
|
|
- host.update(h)
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ if host, ok := r.session.ring.addHostIfMissing(h); !ok {
|
|
|
|
|
+ r.session.pool.addHost(h)
|
|
|
|
|
+ r.session.policy.AddHost(h)
|
|
|
|
|
+ } else {
|
|
|
|
|
+ host.update(h)
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|