Chris Bannister 10 rokov pred
rodič
commit
be960bdc91
3 zmenil súbory, kde vykonal 135 pridanie a 11 odobranie
  1. 1 1
      connectionpool.go
  2. 106 10
      host_source.go
  3. 28 0
      policies.go

+ 1 - 1
connectionpool.go

@@ -537,7 +537,7 @@ func (pool *hostConnPool) drain() {
 
 	// empty the pool
 	conns := pool.conns
-	pool.conns = pool.conns[:0]
+	pool.conns = pool.conns[:0:0]
 
 	// update the policy
 	pool.policy.SetConns(pool.conns)

+ 106 - 10
host_source.go

@@ -23,16 +23,6 @@ const (
 	NodeDown
 )
 
-type HostInfo struct {
-	Peer       string
-	DataCenter string
-	Rack       string
-	HostId     string
-	Version    cassVersion
-	State      nodeState
-	Tokens     []string
-}
-
 type cassVersion struct {
 	Major, Minor, Patch int
 }
@@ -41,6 +31,112 @@ func (c cassVersion) String() string {
 	return fmt.Sprintf("v%d.%d.%d", c.Major, c.Minor, c.Patch)
 }
 
+type HostInfo struct {
+	// TODO(zariel): reduce locking maybe, not all values will change, but to ensure
+	// that we are thread safe use a mutex to access all fields.
+	mu         sync.RWMutex
+	peer       string
+	dataCenter string
+	rack       string
+	hostId     string
+	version    cassVersion
+	state      nodeState
+	tokens     []string
+}
+
+func (h *HostInfo) Peer() string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.peer
+}
+
+func (h *HostInfo) SetPeer(peer string) *HostInfo {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.peer = peer
+	return h
+}
+
+func (h *HostInfo) DataCenter() string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.dataCenter
+}
+
+func (h *HostInfo) SetDataCenter(dataCenter string) *HostInfo {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.dataCenter = dataCenter
+	return h
+}
+
+func (h *HostFilter) Rack() string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.rack
+}
+
+func (h *HostInfo) SetRack(rack string) *HostInfo {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.rack = rack
+	return h
+}
+
+func (h *HostInfo) HostID() string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.hostId
+}
+
+func (h *HostInfo) SetHostID(hostID string) *HostInfo {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.hostId = hostID
+	return h
+}
+
+func (h *HostInfo) Version() cassVersion {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.version
+}
+
+func (h *HostInfo) SetVersion(major, minor, patch int) *HostInfo {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.version.Major = major
+	h.version.Minor = minor
+	h.version.Patch = patch
+	return h
+}
+
+func (h *HostInfo) State() nodeState {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.state
+}
+
+func (h *HostInfo) SetState(state nodeState) *HostInfo {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.state = state
+	return h
+}
+
+func (h *HostInfo) Tokens() []string {
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.tokens
+}
+
+func (h *HostInfo) SetTokens(tokens []string) *HostInfo {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.tokens = tokens
+	return h
+}
+
 func (h HostInfo) String() string {
 	return fmt.Sprintf("[hostinfo peer=%q data_centre=%q rack=%q host_id=%q version=%q state=%s num_tokens=%d]", h.Peer, h.DataCenter, h.Rack, h.HostId, h.Version, h.State, len(h.Tokens))
 }

+ 28 - 0
policies.go

@@ -33,6 +33,7 @@ func (c *cowHostList) set(list []HostInfo) {
 	c.mu.Unlock()
 }
 
+// add will add a host if it not already in the list
 func (c *cowHostList) add(host HostInfo) {
 	c.mu.Lock()
 	l := c.get()
@@ -56,6 +57,33 @@ func (c *cowHostList) add(host HostInfo) {
 	c.mu.Unlock()
 }
 
+func (c *cowHostList) update(host HostInfo) {
+	c.mu.Lock()
+	l := c.get()
+
+	if len(l) == 0 {
+		c.mu.Unlock()
+		return
+	}
+
+	found := false
+	newL := make([]HostInfo, len(l))
+	for i := range l {
+		if host.Peer == l[i].Peer && host.HostId == l[i].HostId {
+			newL[i] = host
+			found := true
+		} else {
+			newL[i] = l[i]
+		}
+	}
+
+	if found {
+		c.list.Store(&newL)
+	}
+
+	c.mu.Unlock()
+}
+
 func (c *cowHostList) remove(addr string) {
 	c.mu.Lock()
 	l := c.get()