events.go 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. package gocql
  2. import (
  3. "log"
  4. "net"
  5. "sync"
  6. "time"
  7. )
  8. type eventDeouncer struct {
  9. name string
  10. timer *time.Timer
  11. mu sync.Mutex
  12. events []frame
  13. callback func([]frame)
  14. quit chan struct{}
  15. }
  16. func newEventDeouncer(name string, eventHandler func([]frame)) *eventDeouncer {
  17. e := &eventDeouncer{
  18. name: name,
  19. quit: make(chan struct{}),
  20. timer: time.NewTimer(eventDebounceTime),
  21. callback: eventHandler,
  22. }
  23. e.timer.Stop()
  24. go e.flusher()
  25. return e
  26. }
  27. func (e *eventDeouncer) stop() {
  28. e.quit <- struct{}{} // sync with flusher
  29. close(e.quit)
  30. }
  31. func (e *eventDeouncer) flusher() {
  32. for {
  33. select {
  34. case <-e.timer.C:
  35. e.mu.Lock()
  36. e.flush()
  37. e.mu.Unlock()
  38. case <-e.quit:
  39. return
  40. }
  41. }
  42. }
  43. const (
  44. eventBufferSize = 1000
  45. eventDebounceTime = 1 * time.Second
  46. )
  47. // flush must be called with mu locked
  48. func (e *eventDeouncer) flush() {
  49. if len(e.events) == 0 {
  50. return
  51. }
  52. // if the flush interval is faster than the callback then we will end up calling
  53. // the callback multiple times, probably a bad idea. In this case we could drop
  54. // frames?
  55. go e.callback(e.events)
  56. e.events = make([]frame, 0, eventBufferSize)
  57. }
  58. func (e *eventDeouncer) debounce(frame frame) {
  59. e.mu.Lock()
  60. e.timer.Reset(eventDebounceTime)
  61. // TODO: probably need a warning to track if this threshold is too low
  62. if len(e.events) < eventBufferSize {
  63. e.events = append(e.events, frame)
  64. } else {
  65. log.Printf("%s: buffer full, dropping event frame: %s", e.name, frame)
  66. }
  67. e.mu.Unlock()
  68. }
  69. func (s *Session) handleNodeEvent(frames []frame) {
  70. type nodeEvent struct {
  71. change string
  72. host net.IP
  73. port int
  74. }
  75. events := make(map[string]*nodeEvent)
  76. for _, frame := range frames {
  77. // TODO: can we be sure the order of events in the buffer is correct?
  78. switch f := frame.(type) {
  79. case *topologyChangeEventFrame:
  80. event, ok := events[f.host.String()]
  81. if !ok {
  82. event = &nodeEvent{change: f.change, host: f.host, port: f.port}
  83. events[f.host.String()] = event
  84. }
  85. event.change = f.change
  86. case *statusChangeEventFrame:
  87. event, ok := events[f.host.String()]
  88. if !ok {
  89. event = &nodeEvent{change: f.change, host: f.host, port: f.port}
  90. events[f.host.String()] = event
  91. }
  92. event.change = f.change
  93. }
  94. }
  95. for _, f := range events {
  96. switch f.change {
  97. case "NEW_NODE":
  98. s.handleNewNode(f.host, f.port, true)
  99. case "REMOVED_NODE":
  100. s.handleRemovedNode(f.host, f.port)
  101. case "MOVED_NODE":
  102. // java-driver handles this, not mentioned in the spec
  103. // TODO(zariel): refresh token map
  104. case "UP":
  105. s.handleNodeUp(f.host, f.port, true)
  106. case "DOWN":
  107. s.handleNodeDown(f.host, f.port)
  108. }
  109. }
  110. }
  111. func (s *Session) handleEvent(framer *framer) {
  112. // TODO(zariel): need to debounce events frames, and possible also events
  113. defer framerPool.Put(framer)
  114. frame, err := framer.parseFrame()
  115. if err != nil {
  116. // TODO: logger
  117. log.Printf("gocql: unable to parse event frame: %v\n", err)
  118. return
  119. }
  120. // TODO: handle medatadata events
  121. switch f := frame.(type) {
  122. case *schemaChangeKeyspace:
  123. case *schemaChangeFunction:
  124. case *schemaChangeTable:
  125. case *topologyChangeEventFrame, *statusChangeEventFrame:
  126. s.nodeEvents.debounce(frame)
  127. default:
  128. log.Printf("gocql: invalid event frame (%T): %v\n", f, f)
  129. }
  130. }
  131. func (s *Session) handleNewNode(host net.IP, port int, waitForBinary bool) {
  132. // TODO(zariel): need to be able to filter discovered nodes
  133. var hostInfo *HostInfo
  134. if s.control != nil {
  135. var err error
  136. hostInfo, err = s.control.fetchHostInfo(host, port)
  137. if err != nil {
  138. log.Printf("gocql: events: unable to fetch host info for %v: %v\n", host, err)
  139. return
  140. }
  141. } else {
  142. hostInfo = &HostInfo{peer: host.String(), port: port, state: NodeUp}
  143. }
  144. addr := host.String()
  145. if s.cfg.IgnorePeerAddr && hostInfo.Peer() != addr {
  146. hostInfo.setPeer(addr)
  147. }
  148. if s.cfg.HostFilter != nil {
  149. if !s.cfg.HostFilter.Accept(hostInfo) {
  150. return
  151. }
  152. } else if !s.cfg.Discovery.matchFilter(hostInfo) {
  153. // TODO: remove this when the host selection policy is more sophisticated
  154. return
  155. }
  156. if t := hostInfo.Version().nodeUpDelay(); t > 0 && waitForBinary {
  157. time.Sleep(t)
  158. }
  159. // should this handle token moving?
  160. if existing, ok := s.ring.addHostIfMissing(hostInfo); ok {
  161. existing.update(hostInfo)
  162. hostInfo = existing
  163. }
  164. s.pool.addHost(hostInfo)
  165. hostInfo.setState(NodeUp)
  166. if s.control != nil {
  167. s.hostSource.refreshRing()
  168. }
  169. }
  170. func (s *Session) handleRemovedNode(ip net.IP, port int) {
  171. // we remove all nodes but only add ones which pass the filter
  172. addr := ip.String()
  173. host := s.ring.getHost(addr)
  174. if host == nil {
  175. host = &HostInfo{peer: addr}
  176. }
  177. if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) {
  178. return
  179. }
  180. host.setState(NodeDown)
  181. s.pool.removeHost(addr)
  182. s.ring.removeHost(addr)
  183. s.hostSource.refreshRing()
  184. }
  185. func (s *Session) handleNodeUp(ip net.IP, port int, waitForBinary bool) {
  186. addr := ip.String()
  187. host := s.ring.getHost(addr)
  188. if host != nil {
  189. if s.cfg.IgnorePeerAddr && host.Peer() != addr {
  190. host.setPeer(addr)
  191. }
  192. if s.cfg.HostFilter != nil {
  193. if !s.cfg.HostFilter.Accept(host) {
  194. return
  195. }
  196. } else if !s.cfg.Discovery.matchFilter(host) {
  197. // TODO: remove this when the host selection policy is more sophisticated
  198. return
  199. }
  200. if t := host.Version().nodeUpDelay(); t > 0 && waitForBinary {
  201. time.Sleep(t)
  202. }
  203. s.pool.hostUp(host)
  204. host.setState(NodeUp)
  205. return
  206. }
  207. s.handleNewNode(ip, port, waitForBinary)
  208. }
  209. func (s *Session) handleNodeDown(ip net.IP, port int) {
  210. addr := ip.String()
  211. host := s.ring.getHost(addr)
  212. if host == nil {
  213. host = &HostInfo{peer: addr}
  214. }
  215. if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) {
  216. return
  217. }
  218. host.setState(NodeDown)
  219. s.pool.hostDown(addr)
  220. }