|
|
@@ -1,6 +1,7 @@
|
|
|
package gocql
|
|
|
|
|
|
import (
|
|
|
+ "fmt"
|
|
|
"log"
|
|
|
"net"
|
|
|
"time"
|
|
|
@@ -14,6 +15,10 @@ type HostInfo struct {
|
|
|
Tokens []string
|
|
|
}
|
|
|
|
|
|
+func (h HostInfo) String() string {
|
|
|
+ return fmt.Sprintf("[hostinfo peer=%q data_centre=%q rack=%q host_id=%q num_tokens=%d]", h.Peer, h.DataCenter, h.Rack, h.HostId, len(h.Tokens))
|
|
|
+}
|
|
|
+
|
|
|
// Polls system.peers at a specific interval to find new hosts
|
|
|
type ringDescriber struct {
|
|
|
dcFilter string
|
|
|
@@ -22,46 +27,78 @@ type ringDescriber struct {
|
|
|
prevPartitioner string
|
|
|
session *Session
|
|
|
closeChan chan bool
|
|
|
+ // indicates that we can use system.local to get the connections remote address
|
|
|
+ localHasRpcAddr bool
|
|
|
+}
|
|
|
+
|
|
|
+func checkSystemLocal(control *controlConn) (bool, error) {
|
|
|
+ iter := control.query("SELECT rpc_address FROM system.local")
|
|
|
+ if err := iter.err; err != nil {
|
|
|
+ if errf, ok := err.(*errorFrame); ok {
|
|
|
+ if errf.code == errSyntax {
|
|
|
+ return false, nil
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return false, err
|
|
|
+ }
|
|
|
+
|
|
|
+ return true, nil
|
|
|
}
|
|
|
|
|
|
func (r *ringDescriber) GetHosts() (hosts []HostInfo, partitioner string, err error) {
|
|
|
// we need conn to be the same because we need to query system.peers and system.local
|
|
|
// on the same node to get the whole cluster
|
|
|
|
|
|
- iter := r.session.control.query("SELECT data_center, rack, host_id, tokens, partitioner FROM system.local")
|
|
|
- if iter == nil {
|
|
|
- return r.prevHosts, r.prevPartitioner, nil
|
|
|
- }
|
|
|
+ const (
|
|
|
+ legacyLocalQuery = "SELECT data_center, rack, host_id, tokens, partitioner FROM system.local"
|
|
|
+ // only supported in 2.2.0, 2.1.6, 2.0.16
|
|
|
+ localQuery = "SELECT rpc_address, data_center, rack, host_id, tokens, partitioner FROM system.local"
|
|
|
+ )
|
|
|
+
|
|
|
+ var localHost HostInfo
|
|
|
+ if r.localHasRpcAddr {
|
|
|
+ iter := r.session.control.query(localQuery)
|
|
|
+ if iter == nil {
|
|
|
+ return r.prevHosts, r.prevPartitioner, nil
|
|
|
+ }
|
|
|
|
|
|
- conn := r.session.pool.Pick(nil)
|
|
|
- if conn == nil {
|
|
|
- return r.prevHosts, r.prevPartitioner, nil
|
|
|
- }
|
|
|
+ iter.Scan(&localHost.Peer, &localHost.DataCenter, &localHost.Rack,
|
|
|
+ &localHost.HostId, &localHost.Tokens, &partitioner)
|
|
|
|
|
|
- host := HostInfo{}
|
|
|
- iter.Scan(&host.DataCenter, &host.Rack, &host.HostId, &host.Tokens, &partitioner)
|
|
|
+ if err = iter.Close(); err != nil {
|
|
|
+ return nil, "", err
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ iter := r.session.control.query(legacyLocalQuery)
|
|
|
+ if iter == nil {
|
|
|
+ return r.prevHosts, r.prevPartitioner, nil
|
|
|
+ }
|
|
|
|
|
|
- if err = iter.Close(); err != nil {
|
|
|
- return nil, "", err
|
|
|
- }
|
|
|
+ iter.Scan(&localHost.DataCenter, &localHost.Rack, &localHost.HostId, &localHost.Tokens, &partitioner)
|
|
|
|
|
|
- addr, _, err := net.SplitHostPort(conn.Address())
|
|
|
- if err != nil {
|
|
|
- // this should not happen, ever, as this is the address that was dialed by conn, here
|
|
|
- // a panic makes sense, please report a bug if it occurs.
|
|
|
- panic(err)
|
|
|
- }
|
|
|
+ if err = iter.Close(); err != nil {
|
|
|
+ return nil, "", err
|
|
|
+ }
|
|
|
|
|
|
- host.Peer = addr
|
|
|
+ addr, _, err := net.SplitHostPort(r.session.control.addr())
|
|
|
+ if err != nil {
|
|
|
+ // this should not happen, ever, as this is the address that was dialed by conn, here
|
|
|
+ // a panic makes sense, please report a bug if it occurs.
|
|
|
+ panic(err)
|
|
|
+ }
|
|
|
+
|
|
|
+ localHost.Peer = addr
|
|
|
+ }
|
|
|
|
|
|
- hosts = []HostInfo{host}
|
|
|
+ hosts = []HostInfo{localHost}
|
|
|
|
|
|
- iter = r.session.control.query("SELECT peer, data_center, rack, host_id, tokens FROM system.peers")
|
|
|
+ iter := r.session.control.query("SELECT peer, data_center, rack, host_id, tokens FROM system.peers")
|
|
|
if iter == nil {
|
|
|
return r.prevHosts, r.prevPartitioner, nil
|
|
|
}
|
|
|
|
|
|
- host = HostInfo{}
|
|
|
+ host := HostInfo{}
|
|
|
for iter.Scan(&host.Peer, &host.DataCenter, &host.Rack, &host.HostId, &host.Tokens) {
|
|
|
if r.matchFilter(&host) {
|
|
|
hosts = append(hosts, host)
|
|
|
@@ -92,28 +129,32 @@ func (r *ringDescriber) matchFilter(host *HostInfo) bool {
|
|
|
return true
|
|
|
}
|
|
|
|
|
|
-func (h *ringDescriber) run(sleep time.Duration) {
|
|
|
+func (r *ringDescriber) refreshRing() {
|
|
|
+ // if we have 0 hosts this will return the previous list of hosts to
|
|
|
+ // attempt to reconnect to the cluster otherwise we would never find
|
|
|
+ // downed hosts again, could possibly have an optimisation to only
|
|
|
+ // try to add new hosts if GetHosts didnt error and the hosts didnt change.
|
|
|
+ hosts, partitioner, err := r.GetHosts()
|
|
|
+ if err != nil {
|
|
|
+ log.Println("RingDescriber: unable to get ring topology:", err)
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ r.session.pool.SetHosts(hosts)
|
|
|
+ r.session.pool.SetPartitioner(partitioner)
|
|
|
+}
|
|
|
+
|
|
|
+func (r *ringDescriber) run(sleep time.Duration) {
|
|
|
if sleep == 0 {
|
|
|
sleep = 30 * time.Second
|
|
|
}
|
|
|
|
|
|
for {
|
|
|
- // if we have 0 hosts this will return the previous list of hosts to
|
|
|
- // attempt to reconnect to the cluster otherwise we would never find
|
|
|
- // downed hosts again, could possibly have an optimisation to only
|
|
|
- // try to add new hosts if GetHosts didnt error and the hosts didnt change.
|
|
|
- hosts, partitioner, err := h.GetHosts()
|
|
|
- if err != nil {
|
|
|
- log.Println("RingDescriber: unable to get ring topology:", err)
|
|
|
- continue
|
|
|
- }
|
|
|
-
|
|
|
- h.session.pool.SetHosts(hosts)
|
|
|
- h.session.pool.SetPartitioner(partitioner)
|
|
|
+ r.refreshRing()
|
|
|
|
|
|
select {
|
|
|
case <-time.After(sleep):
|
|
|
- case <-h.closeChan:
|
|
|
+ case <-r.closeChan:
|
|
|
return
|
|
|
}
|
|
|
}
|