host_source.go 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. package gocql
  2. import (
  3. "log"
  4. "net"
  5. "time"
  6. )
  7. type HostInfo struct {
  8. Peer string
  9. DataCenter string
  10. Rack string
  11. HostId string
  12. Tokens []string
  13. }
  14. // Polls system.peers at a specific interval to find new hosts
  15. type ringDescriber struct {
  16. dcFilter string
  17. rackFilter string
  18. prevHosts []HostInfo
  19. prevPartitioner string
  20. session *Session
  21. closeChan chan bool
  22. }
  23. func (r *ringDescriber) GetHosts() (hosts []HostInfo, partitioner string, err error) {
  24. // we need conn to be the same because we need to query system.peers and system.local
  25. // on the same node to get the whole cluster
  26. iter := r.session.control.query("SELECT data_center, rack, host_id, tokens, partitioner FROM system.local")
  27. if iter == nil {
  28. return r.prevHosts, r.prevPartitioner, nil
  29. }
  30. conn := r.session.pool.Pick(nil)
  31. if conn == nil {
  32. return r.prevHosts, r.prevPartitioner, nil
  33. }
  34. host := HostInfo{}
  35. iter.Scan(&host.DataCenter, &host.Rack, &host.HostId, &host.Tokens, &partitioner)
  36. if err = iter.Close(); err != nil {
  37. return nil, "", err
  38. }
  39. addr, _, err := net.SplitHostPort(conn.Address())
  40. if err != nil {
  41. // this should not happen, ever, as this is the address that was dialed by conn, here
  42. // a panic makes sense, please report a bug if it occurs.
  43. panic(err)
  44. }
  45. host.Peer = addr
  46. hosts = []HostInfo{host}
  47. iter = r.session.control.query("SELECT peer, data_center, rack, host_id, tokens FROM system.peers")
  48. if iter == nil {
  49. return r.prevHosts, r.prevPartitioner, nil
  50. }
  51. host = HostInfo{}
  52. for iter.Scan(&host.Peer, &host.DataCenter, &host.Rack, &host.HostId, &host.Tokens) {
  53. if r.matchFilter(&host) {
  54. hosts = append(hosts, host)
  55. }
  56. host = HostInfo{}
  57. }
  58. if err = iter.Close(); err != nil {
  59. return nil, "", err
  60. }
  61. r.prevHosts = hosts
  62. r.prevPartitioner = partitioner
  63. return hosts, partitioner, nil
  64. }
  65. func (r *ringDescriber) matchFilter(host *HostInfo) bool {
  66. if r.dcFilter != "" && r.dcFilter != host.DataCenter {
  67. return false
  68. }
  69. if r.rackFilter != "" && r.rackFilter != host.Rack {
  70. return false
  71. }
  72. return true
  73. }
  74. func (h *ringDescriber) run(sleep time.Duration) {
  75. if sleep == 0 {
  76. sleep = 30 * time.Second
  77. }
  78. for {
  79. // if we have 0 hosts this will return the previous list of hosts to
  80. // attempt to reconnect to the cluster otherwise we would never find
  81. // downed hosts again, could possibly have an optimisation to only
  82. // try to add new hosts if GetHosts didnt error and the hosts didnt change.
  83. hosts, partitioner, err := h.GetHosts()
  84. if err != nil {
  85. log.Println("RingDescriber: unable to get ring topology:", err)
  86. continue
  87. }
  88. h.session.pool.SetHosts(hosts)
  89. h.session.pool.SetPartitioner(partitioner)
  90. select {
  91. case <-time.After(sleep):
  92. case <-h.closeChan:
  93. return
  94. }
  95. }
  96. }