host_source.go 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. package gocql
  2. import (
  3. "log"
  4. "net"
  5. "time"
  6. )
  7. type HostInfo struct {
  8. Peer string
  9. DataCenter string
  10. Rack string
  11. HostId string
  12. Tokens []string
  13. }
  14. // Polls system.peers at a specific interval to find new hosts
  15. type ringDescriber struct {
  16. dcFilter string
  17. rackFilter string
  18. prevHosts []HostInfo
  19. prevPartitioner string
  20. session *Session
  21. closeChan chan bool
  22. }
  23. func (r *ringDescriber) GetHosts() (
  24. hosts []HostInfo,
  25. partitioner string,
  26. err error,
  27. ) {
  28. // we need conn to be the same because we need to query system.peers and system.local
  29. // on the same node to get the whole cluster
  30. conn := r.session.Pool.Pick(nil)
  31. if conn == nil {
  32. return r.prevHosts, r.prevPartitioner, nil
  33. }
  34. query := r.session.Query("SELECT data_center, rack, host_id, tokens, partitioner FROM system.local")
  35. iter := conn.executeQuery(query)
  36. host := HostInfo{}
  37. iter.Scan(&host.DataCenter, &host.Rack, &host.HostId, &host.Tokens, &partitioner)
  38. if err = iter.Close(); err != nil {
  39. return nil, "", err
  40. }
  41. addr, _, err := net.SplitHostPort(conn.Address())
  42. if err != nil {
  43. // this should not happen, ever, as this is the address that was dialed by conn, here
  44. // a panic makes sense, please report a bug if it occurs.
  45. panic(err)
  46. }
  47. host.Peer = addr
  48. hosts = []HostInfo{host}
  49. query = r.session.Query("SELECT peer, data_center, rack, host_id, tokens FROM system.peers")
  50. iter = conn.executeQuery(query)
  51. host = HostInfo{}
  52. for iter.Scan(&host.Peer, &host.DataCenter, &host.Rack, &host.HostId, &host.Tokens) {
  53. if r.matchFilter(&host) {
  54. hosts = append(hosts, host)
  55. }
  56. host = HostInfo{}
  57. }
  58. if err = iter.Close(); err != nil {
  59. return nil, "", err
  60. }
  61. r.prevHosts = hosts
  62. r.prevPartitioner = partitioner
  63. return hosts, partitioner, nil
  64. }
  65. func (r *ringDescriber) matchFilter(host *HostInfo) bool {
  66. if r.dcFilter != "" && r.dcFilter != host.DataCenter {
  67. return false
  68. }
  69. if r.rackFilter != "" && r.rackFilter != host.Rack {
  70. return false
  71. }
  72. return true
  73. }
  74. func (h *ringDescriber) run(sleep time.Duration) {
  75. if sleep == 0 {
  76. sleep = 30 * time.Second
  77. }
  78. for {
  79. // if we have 0 hosts this will return the previous list of hosts to
  80. // attempt to reconnect to the cluster otherwise we would never find
  81. // downed hosts again, could possibly have an optimisation to only
  82. // try to add new hosts if GetHosts didnt error and the hosts didnt change.
  83. hosts, partitioner, err := h.GetHosts()
  84. if err != nil {
  85. log.Println("RingDescriber: unable to get ring topology:", err)
  86. } else {
  87. h.session.Pool.SetHosts(hosts)
  88. if v, ok := h.session.Pool.(SetPartitioner); ok {
  89. v.SetPartitioner(partitioner)
  90. }
  91. }
  92. time.Sleep(sleep)
  93. select {
  94. case <-h.closeChan:
  95. return
  96. }
  97. }
  98. }