|
|
@@ -7,6 +7,7 @@ import (
|
|
|
"fmt"
|
|
|
"io/ioutil"
|
|
|
"log"
|
|
|
+ "net"
|
|
|
"sync"
|
|
|
"time"
|
|
|
)
|
|
|
@@ -90,9 +91,13 @@ To see a more complete example of a ConnectionPool implementation please see the
|
|
|
type ConnectionPool interface {
|
|
|
Pick(*Query) *Conn
|
|
|
Size() int
|
|
|
- HandleError(*Conn, error, bool)
|
|
|
Close()
|
|
|
- SetHosts(host []HostInfo)
|
|
|
+ SetHosts(hosts []HostInfo)
|
|
|
+}
|
|
|
+
|
|
|
+// interface to implement to receive the partitioner value
|
|
|
+type SetPartitioner interface {
|
|
|
+ SetPartitioner(partitioner string)
|
|
|
}
|
|
|
|
|
|
//NewPoolFunc is the type used by ClusterConfig to create a pool of a specific type.
|
|
|
@@ -395,7 +400,6 @@ func (c *SimplePool) SetHosts(hosts []HostInfo) {
|
|
|
|
|
|
for _, host := range hosts {
|
|
|
host := host
|
|
|
-
|
|
|
delete(toRemove, host.Peer)
|
|
|
// we already have it
|
|
|
if _, ok := c.hosts[host.Peer]; ok {
|
|
|
@@ -435,3 +439,404 @@ func (c *SimplePool) removeHostLocked(addr string) {
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+//NewRoundRobinConnPool creates a connection pool which selects hosts by
|
|
|
+//round-robin, and then selects a connection for that host by round-robin.
|
|
|
+func NewRoundRobinConnPool(cfg *ClusterConfig) (ConnectionPool, error) {
|
|
|
+ return NewPolicyConnPool(
|
|
|
+ cfg,
|
|
|
+ NewRoundRobinHostPolicy(),
|
|
|
+ NewRoundRobinConnPolicy,
|
|
|
+ )
|
|
|
+}
|
|
|
+
|
|
|
+//NewTokenAwareConnPool creates a connection pool which selects hosts by
|
|
|
+//a token aware policy, and then selects a connection for that host by
|
|
|
+//round-robin.
|
|
|
+func NewTokenAwareConnPool(cfg *ClusterConfig) (ConnectionPool, error) {
|
|
|
+ return NewPolicyConnPool(
|
|
|
+ cfg,
|
|
|
+ NewTokenAwareHostPolicy(NewRoundRobinHostPolicy()),
|
|
|
+ NewRoundRobinConnPolicy,
|
|
|
+ )
|
|
|
+}
|
|
|
+
|
|
|
+type policyConnPool struct {
|
|
|
+ port int
|
|
|
+ numConns int
|
|
|
+ connCfg ConnConfig
|
|
|
+ keyspace string
|
|
|
+
|
|
|
+ mu sync.RWMutex
|
|
|
+ hostPolicy HostSelectionPolicy
|
|
|
+ connPolicy func() ConnSelectionPolicy
|
|
|
+ hostConnPools map[string]*hostConnPool
|
|
|
+}
|
|
|
+
|
|
|
+func NewPolicyConnPool(
|
|
|
+ cfg *ClusterConfig,
|
|
|
+ hostPolicy HostSelectionPolicy,
|
|
|
+ connPolicy func() ConnSelectionPolicy,
|
|
|
+) (ConnectionPool, error) {
|
|
|
+ var err error
|
|
|
+ var tlsConfig *tls.Config
|
|
|
+
|
|
|
+ if cfg.SslOpts != nil {
|
|
|
+ tlsConfig, err = setupTLSConfig(cfg.SslOpts)
|
|
|
+ if err != nil {
|
|
|
+ return nil, err
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // create the pool
|
|
|
+ pool := &policyConnPool{
|
|
|
+ port: cfg.Port,
|
|
|
+ numConns: cfg.NumConns,
|
|
|
+ connCfg: ConnConfig{
|
|
|
+ ProtoVersion: cfg.ProtoVersion,
|
|
|
+ CQLVersion: cfg.CQLVersion,
|
|
|
+ Timeout: cfg.Timeout,
|
|
|
+ NumStreams: cfg.NumStreams,
|
|
|
+ Compressor: cfg.Compressor,
|
|
|
+ Authenticator: cfg.Authenticator,
|
|
|
+ Keepalive: cfg.SocketKeepalive,
|
|
|
+ tlsConfig: tlsConfig,
|
|
|
+ },
|
|
|
+ keyspace: cfg.Keyspace,
|
|
|
+ hostPolicy: hostPolicy,
|
|
|
+ connPolicy: connPolicy,
|
|
|
+ hostConnPools: map[string]*hostConnPool{},
|
|
|
+ }
|
|
|
+
|
|
|
+ hosts := make([]HostInfo, len(cfg.Hosts))
|
|
|
+ for i, hostAddr := range cfg.Hosts {
|
|
|
+ hosts[i].Peer = hostAddr
|
|
|
+ }
|
|
|
+
|
|
|
+ pool.SetHosts(hosts)
|
|
|
+
|
|
|
+ return pool, nil
|
|
|
+}
|
|
|
+
|
|
|
+func (p *policyConnPool) SetHosts(hosts []HostInfo) {
|
|
|
+ p.mu.Lock()
|
|
|
+
|
|
|
+ toRemove := make(map[string]struct{})
|
|
|
+ for addr := range p.hostConnPools {
|
|
|
+ toRemove[addr] = struct{}{}
|
|
|
+ }
|
|
|
+
|
|
|
+ // TODO connect to hosts in parallel, but wait for pools to be
|
|
|
+ // created before returning
|
|
|
+
|
|
|
+ for i := range hosts {
|
|
|
+ pool, exists := p.hostConnPools[hosts[i].Peer]
|
|
|
+ if !exists {
|
|
|
+ // create a connection pool for the host
|
|
|
+ pool = newHostConnPool(
|
|
|
+ hosts[i].Peer,
|
|
|
+ p.port,
|
|
|
+ p.numConns,
|
|
|
+ p.connCfg,
|
|
|
+ p.keyspace,
|
|
|
+ p.connPolicy(),
|
|
|
+ )
|
|
|
+ p.hostConnPools[hosts[i].Peer] = pool
|
|
|
+ } else {
|
|
|
+ // still have this host, so don't remove it
|
|
|
+ delete(toRemove, hosts[i].Peer)
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ for addr := range toRemove {
|
|
|
+ pool := p.hostConnPools[addr]
|
|
|
+ delete(p.hostConnPools, addr)
|
|
|
+ pool.Close()
|
|
|
+ }
|
|
|
+
|
|
|
+ // update the policy
|
|
|
+ p.hostPolicy.SetHosts(hosts)
|
|
|
+
|
|
|
+ p.mu.Unlock()
|
|
|
+}
|
|
|
+
|
|
|
+func (p *policyConnPool) SetPartitioner(partitioner string) {
|
|
|
+ p.hostPolicy.SetPartitioner(partitioner)
|
|
|
+}
|
|
|
+
|
|
|
+func (p *policyConnPool) Size() int {
|
|
|
+ p.mu.RLock()
|
|
|
+ count := 0
|
|
|
+ for _, pool := range p.hostConnPools {
|
|
|
+ count += pool.Size()
|
|
|
+ }
|
|
|
+ p.mu.RUnlock()
|
|
|
+
|
|
|
+ return count
|
|
|
+}
|
|
|
+
|
|
|
+func (p *policyConnPool) Pick(qry *Query) *Conn {
|
|
|
+ nextHost := p.hostPolicy.Pick(qry)
|
|
|
+
|
|
|
+ p.mu.RLock()
|
|
|
+ var host *HostInfo
|
|
|
+ var conn *Conn
|
|
|
+ for conn == nil {
|
|
|
+ host = nextHost()
|
|
|
+ if host == nil {
|
|
|
+ break
|
|
|
+ }
|
|
|
+ conn = p.hostConnPools[host.Peer].Pick(qry)
|
|
|
+ }
|
|
|
+ p.mu.RUnlock()
|
|
|
+ return conn
|
|
|
+}
|
|
|
+
|
|
|
+func (p *policyConnPool) Close() {
|
|
|
+ p.mu.Lock()
|
|
|
+
|
|
|
+ // remove the hosts from the policy
|
|
|
+ p.hostPolicy.SetHosts([]HostInfo{})
|
|
|
+
|
|
|
+ // close the pools
|
|
|
+ for addr, pool := range p.hostConnPools {
|
|
|
+ delete(p.hostConnPools, addr)
|
|
|
+ pool.Close()
|
|
|
+ }
|
|
|
+ p.mu.Unlock()
|
|
|
+}
|
|
|
+
|
|
|
+// hostConnPool is a connection pool for a single host.
|
|
|
+// Connection selection is based on a provided ConnSelectionPolicy
|
|
|
+type hostConnPool struct {
|
|
|
+ host string
|
|
|
+ port int
|
|
|
+ addr string
|
|
|
+ size int
|
|
|
+ connCfg ConnConfig
|
|
|
+ keyspace string
|
|
|
+ policy ConnSelectionPolicy
|
|
|
+ // protection for conns, closed, filling
|
|
|
+ mu sync.Mutex
|
|
|
+ conns []*Conn
|
|
|
+ closed bool
|
|
|
+ filling bool
|
|
|
+}
|
|
|
+
|
|
|
+func newHostConnPool(
|
|
|
+ host string,
|
|
|
+ port int,
|
|
|
+ size int,
|
|
|
+ connCfg ConnConfig,
|
|
|
+ keyspace string,
|
|
|
+ policy ConnSelectionPolicy,
|
|
|
+) *hostConnPool {
|
|
|
+
|
|
|
+ pool := &hostConnPool{
|
|
|
+ host: host,
|
|
|
+ port: port,
|
|
|
+ addr: JoinHostPort(host, port),
|
|
|
+ size: size,
|
|
|
+ connCfg: connCfg,
|
|
|
+ keyspace: keyspace,
|
|
|
+ policy: policy,
|
|
|
+ conns: make([]*Conn, 0, size),
|
|
|
+ filling: false,
|
|
|
+ closed: false,
|
|
|
+ }
|
|
|
+
|
|
|
+ // fill the pool with the initial connections before returning
|
|
|
+ pool.fill()
|
|
|
+
|
|
|
+ return pool
|
|
|
+}
|
|
|
+
|
|
|
+// Pick a connection from this connection pool for the given query.
|
|
|
+func (pool *hostConnPool) Pick(qry *Query) *Conn {
|
|
|
+ pool.mu.Lock()
|
|
|
+ if pool.closed {
|
|
|
+ pool.mu.Unlock()
|
|
|
+ return nil
|
|
|
+ }
|
|
|
+
|
|
|
+ empty := len(pool.conns) == 0
|
|
|
+ pool.mu.Unlock()
|
|
|
+
|
|
|
+ if empty {
|
|
|
+ // try to fill the empty pool
|
|
|
+ pool.fill()
|
|
|
+ }
|
|
|
+
|
|
|
+ return pool.policy.Pick(qry)
|
|
|
+}
|
|
|
+
|
|
|
+//Size returns the number of connections currently active in the pool
|
|
|
+func (pool *hostConnPool) Size() int {
|
|
|
+ pool.mu.Lock()
|
|
|
+ defer pool.mu.Unlock()
|
|
|
+
|
|
|
+ return len(pool.conns)
|
|
|
+}
|
|
|
+
|
|
|
+//Close the connection pool
|
|
|
+func (pool *hostConnPool) Close() {
|
|
|
+ pool.mu.Lock()
|
|
|
+ defer pool.mu.Unlock()
|
|
|
+
|
|
|
+ if pool.closed {
|
|
|
+ return
|
|
|
+ }
|
|
|
+ pool.closed = true
|
|
|
+
|
|
|
+ // drain, but don't wait
|
|
|
+ go pool.drain()
|
|
|
+}
|
|
|
+
|
|
|
+// Fill the connection pool
|
|
|
+func (pool *hostConnPool) fill() {
|
|
|
+ pool.mu.Lock()
|
|
|
+ // avoid filling a closed pool, or concurrent filling
|
|
|
+ if pool.closed || pool.filling {
|
|
|
+ pool.mu.Unlock()
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ // determine the filling work to be done
|
|
|
+ startCount := len(pool.conns)
|
|
|
+ fillCount := pool.size - startCount
|
|
|
+
|
|
|
+ // avoid filling a full (or overfull) pool
|
|
|
+ if fillCount <= 0 {
|
|
|
+ pool.mu.Unlock()
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ // ok fill the pool
|
|
|
+ pool.filling = true
|
|
|
+
|
|
|
+ // allow others to access the pool while filling
|
|
|
+ pool.mu.Unlock()
|
|
|
+
|
|
|
+ // fill only the first connection synchronously
|
|
|
+ if startCount == 0 {
|
|
|
+ err := pool.connect()
|
|
|
+ if opErr, ok := err.(*net.OpError); ok && opErr.Op == "read" {
|
|
|
+ // connection refused
|
|
|
+ // these are typical during a node outage so avoid log spam.
|
|
|
+ } else if err != nil {
|
|
|
+ log.Printf("error: failed to connect to %s - %v", pool.addr, err)
|
|
|
+ }
|
|
|
+
|
|
|
+ if err != nil {
|
|
|
+ // probably unreachable host
|
|
|
+ go pool.fillingStopped()
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ // filled one
|
|
|
+ fillCount--
|
|
|
+ }
|
|
|
+
|
|
|
+ // fill the rest of the pool asynchronously
|
|
|
+ go func() {
|
|
|
+ for fillCount > 0 {
|
|
|
+ err := pool.connect()
|
|
|
+ if opErr, ok := err.(*net.OpError); ok && opErr.Op == "read" {
|
|
|
+ // connection refused
|
|
|
+ // these are typical during a node outage so avoid log spam.
|
|
|
+ } else if err != nil {
|
|
|
+ log.Printf("error: failed to connect to %s - %v", pool.addr, err)
|
|
|
+ }
|
|
|
+
|
|
|
+ // decrement, even on error
|
|
|
+ fillCount--
|
|
|
+ }
|
|
|
+
|
|
|
+ // mark the end of filling
|
|
|
+ pool.fillingStopped()
|
|
|
+ }()
|
|
|
+}
|
|
|
+
|
|
|
+// transition back to a not-filling state.
|
|
|
+func (pool *hostConnPool) fillingStopped() {
|
|
|
+ // wait for some time to avoid back-to-back filling
|
|
|
+ time.Sleep(100 * time.Millisecond)
|
|
|
+
|
|
|
+ pool.mu.Lock()
|
|
|
+ pool.filling = false
|
|
|
+ pool.mu.Unlock()
|
|
|
+}
|
|
|
+
|
|
|
+// create a new connection to the host and add it to the pool
|
|
|
+func (pool *hostConnPool) connect() error {
|
|
|
+ // try to connect
|
|
|
+ conn, err := Connect(pool.addr, pool.connCfg, pool)
|
|
|
+ if err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ if pool.keyspace != "" {
|
|
|
+ // set the keyspace
|
|
|
+ if err := conn.UseKeyspace(pool.keyspace); err != nil {
|
|
|
+ conn.Close()
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // add the Conn to the pool
|
|
|
+ pool.mu.Lock()
|
|
|
+ defer pool.mu.Unlock()
|
|
|
+
|
|
|
+ if pool.closed {
|
|
|
+ conn.Close()
|
|
|
+ return nil
|
|
|
+ }
|
|
|
+
|
|
|
+ pool.conns = append(pool.conns, conn)
|
|
|
+ pool.policy.SetConns(pool.conns)
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// handle any error from a Conn
|
|
|
+func (pool *hostConnPool) HandleError(conn *Conn, err error, closed bool) {
|
|
|
+ if !closed {
|
|
|
+ // still an open connection, so continue using it
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ pool.mu.Lock()
|
|
|
+ defer pool.mu.Unlock()
|
|
|
+
|
|
|
+ // find the connection index
|
|
|
+ for i, candidate := range pool.conns {
|
|
|
+ if candidate == conn {
|
|
|
+ // remove the connection, not preserving order
|
|
|
+ pool.conns[i], pool.conns = pool.conns[len(pool.conns)-1], pool.conns[:len(pool.conns)-1]
|
|
|
+
|
|
|
+ // update the policy
|
|
|
+ pool.policy.SetConns(pool.conns)
|
|
|
+
|
|
|
+ // lost a connection, so fill the pool
|
|
|
+ go pool.fill()
|
|
|
+ break
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// removes and closes all connections from the pool
|
|
|
+func (pool *hostConnPool) drain() {
|
|
|
+ pool.mu.Lock()
|
|
|
+ defer pool.mu.Unlock()
|
|
|
+
|
|
|
+ // empty the pool
|
|
|
+ conns := pool.conns
|
|
|
+ pool.conns = pool.conns[:0]
|
|
|
+
|
|
|
+ // update the policy
|
|
|
+ pool.policy.SetConns(pool.conns)
|
|
|
+
|
|
|
+ // close the connections
|
|
|
+ for _, conn := range conns {
|
|
|
+ conn.Close()
|
|
|
+ }
|
|
|
+}
|