events.go 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. package gocql
  2. import (
  3. "net"
  4. "sync"
  5. "time"
  6. )
  7. type eventDebouncer struct {
  8. name string
  9. timer *time.Timer
  10. mu sync.Mutex
  11. events []frame
  12. callback func([]frame)
  13. quit chan struct{}
  14. }
  15. func newEventDebouncer(name string, eventHandler func([]frame)) *eventDebouncer {
  16. e := &eventDebouncer{
  17. name: name,
  18. quit: make(chan struct{}),
  19. timer: time.NewTimer(eventDebounceTime),
  20. callback: eventHandler,
  21. }
  22. e.timer.Stop()
  23. go e.flusher()
  24. return e
  25. }
  26. func (e *eventDebouncer) stop() {
  27. e.quit <- struct{}{} // sync with flusher
  28. close(e.quit)
  29. }
  30. func (e *eventDebouncer) flusher() {
  31. for {
  32. select {
  33. case <-e.timer.C:
  34. e.mu.Lock()
  35. e.flush()
  36. e.mu.Unlock()
  37. case <-e.quit:
  38. return
  39. }
  40. }
  41. }
  42. const (
  43. eventBufferSize = 1000
  44. eventDebounceTime = 1 * time.Second
  45. )
  46. // flush must be called with mu locked
  47. func (e *eventDebouncer) flush() {
  48. if len(e.events) == 0 {
  49. return
  50. }
  51. // if the flush interval is faster than the callback then we will end up calling
  52. // the callback multiple times, probably a bad idea. In this case we could drop
  53. // frames?
  54. go e.callback(e.events)
  55. e.events = make([]frame, 0, eventBufferSize)
  56. }
  57. func (e *eventDebouncer) debounce(frame frame) {
  58. e.mu.Lock()
  59. e.timer.Reset(eventDebounceTime)
  60. // TODO: probably need a warning to track if this threshold is too low
  61. if len(e.events) < eventBufferSize {
  62. e.events = append(e.events, frame)
  63. } else {
  64. Logger.Printf("%s: buffer full, dropping event frame: %s", e.name, frame)
  65. }
  66. e.mu.Unlock()
  67. }
  68. func (s *Session) handleEvent(framer *framer) {
  69. defer framerPool.Put(framer)
  70. frame, err := framer.parseFrame()
  71. if err != nil {
  72. // TODO: logger
  73. Logger.Printf("gocql: unable to parse event frame: %v\n", err)
  74. return
  75. }
  76. if gocqlDebug {
  77. Logger.Printf("gocql: handling frame: %v\n", frame)
  78. }
  79. switch f := frame.(type) {
  80. case *schemaChangeKeyspace, *schemaChangeFunction,
  81. *schemaChangeTable, *schemaChangeAggregate, *schemaChangeType:
  82. s.schemaEvents.debounce(frame)
  83. case *topologyChangeEventFrame, *statusChangeEventFrame:
  84. s.nodeEvents.debounce(frame)
  85. default:
  86. Logger.Printf("gocql: invalid event frame (%T): %v\n", f, f)
  87. }
  88. }
  89. func (s *Session) handleSchemaEvent(frames []frame) {
  90. // TODO: debounce events
  91. for _, frame := range frames {
  92. switch f := frame.(type) {
  93. case *schemaChangeKeyspace:
  94. s.schemaDescriber.clearSchema(f.keyspace)
  95. s.handleKeyspaceChange(f.keyspace, f.change)
  96. case *schemaChangeTable:
  97. s.schemaDescriber.clearSchema(f.keyspace)
  98. case *schemaChangeAggregate:
  99. s.schemaDescriber.clearSchema(f.keyspace)
  100. case *schemaChangeFunction:
  101. s.schemaDescriber.clearSchema(f.keyspace)
  102. case *schemaChangeType:
  103. s.schemaDescriber.clearSchema(f.keyspace)
  104. }
  105. }
  106. }
  107. func (s *Session) handleKeyspaceChange(keyspace, change string) {
  108. s.control.awaitSchemaAgreement()
  109. s.policy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: keyspace, Change: change})
  110. }
  111. func (s *Session) handleNodeEvent(frames []frame) {
  112. type nodeEvent struct {
  113. change string
  114. host net.IP
  115. port int
  116. }
  117. events := make(map[string]*nodeEvent)
  118. for _, frame := range frames {
  119. // TODO: can we be sure the order of events in the buffer is correct?
  120. switch f := frame.(type) {
  121. case *topologyChangeEventFrame:
  122. event, ok := events[f.host.String()]
  123. if !ok {
  124. event = &nodeEvent{change: f.change, host: f.host, port: f.port}
  125. events[f.host.String()] = event
  126. }
  127. event.change = f.change
  128. case *statusChangeEventFrame:
  129. event, ok := events[f.host.String()]
  130. if !ok {
  131. event = &nodeEvent{change: f.change, host: f.host, port: f.port}
  132. events[f.host.String()] = event
  133. }
  134. event.change = f.change
  135. }
  136. }
  137. for _, f := range events {
  138. if gocqlDebug {
  139. Logger.Printf("gocql: dispatching event: %+v\n", f)
  140. }
  141. switch f.change {
  142. case "NEW_NODE":
  143. s.handleNewNode(f.host, f.port, true)
  144. case "REMOVED_NODE":
  145. s.handleRemovedNode(f.host, f.port)
  146. case "MOVED_NODE":
  147. // java-driver handles this, not mentioned in the spec
  148. // TODO(zariel): refresh token map
  149. case "UP":
  150. s.handleNodeUp(f.host, f.port, true)
  151. case "DOWN":
  152. s.handleNodeDown(f.host, f.port)
  153. }
  154. }
  155. }
  156. func (s *Session) addNewNode(host *HostInfo) {
  157. if s.cfg.filterHost(host) {
  158. return
  159. }
  160. host.setState(NodeUp)
  161. s.pool.addHost(host)
  162. s.policy.AddHost(host)
  163. }
  164. func (s *Session) handleNewNode(ip net.IP, port int, waitForBinary bool) {
  165. if gocqlDebug {
  166. Logger.Printf("gocql: Session.handleNewNode: %s:%d\n", ip.String(), port)
  167. }
  168. ip, port = s.cfg.translateAddressPort(ip, port)
  169. // Get host info and apply any filters to the host
  170. hostInfo, err := s.hostSource.getHostInfo(ip, port)
  171. if err != nil {
  172. Logger.Printf("gocql: events: unable to fetch host info for (%s:%d): %v\n", ip, port, err)
  173. return
  174. } else if hostInfo == nil {
  175. // If hostInfo is nil, this host was filtered out by cfg.HostFilter
  176. return
  177. }
  178. if t := hostInfo.Version().nodeUpDelay(); t > 0 && waitForBinary {
  179. time.Sleep(t)
  180. }
  181. // should this handle token moving?
  182. hostInfo = s.ring.addOrUpdate(hostInfo)
  183. s.addNewNode(hostInfo)
  184. if s.control != nil && !s.cfg.IgnorePeerAddr {
  185. // TODO(zariel): debounce ring refresh
  186. s.hostSource.refreshRing()
  187. }
  188. }
  189. func (s *Session) handleRemovedNode(ip net.IP, port int) {
  190. if gocqlDebug {
  191. Logger.Printf("gocql: Session.handleRemovedNode: %s:%d\n", ip.String(), port)
  192. }
  193. ip, port = s.cfg.translateAddressPort(ip, port)
  194. // we remove all nodes but only add ones which pass the filter
  195. host := s.ring.getHost(ip)
  196. if host == nil {
  197. host = &HostInfo{connectAddress: ip, port: port}
  198. }
  199. if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) {
  200. return
  201. }
  202. host.setState(NodeDown)
  203. s.policy.RemoveHost(host)
  204. s.pool.removeHost(ip)
  205. s.ring.removeHost(ip)
  206. if !s.cfg.IgnorePeerAddr {
  207. s.hostSource.refreshRing()
  208. }
  209. }
  210. func (s *Session) handleNodeUp(eventIp net.IP, eventPort int, waitForBinary bool) {
  211. if gocqlDebug {
  212. Logger.Printf("gocql: Session.handleNodeUp: %s:%d\n", eventIp.String(), eventPort)
  213. }
  214. ip, _ := s.cfg.translateAddressPort(eventIp, eventPort)
  215. host := s.ring.getHost(ip)
  216. if host == nil {
  217. // TODO(zariel): avoid the need to translate twice in this
  218. // case
  219. s.handleNewNode(eventIp, eventPort, waitForBinary)
  220. return
  221. }
  222. if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) {
  223. return
  224. }
  225. if t := host.Version().nodeUpDelay(); t > 0 && waitForBinary {
  226. time.Sleep(t)
  227. }
  228. s.addNewNode(host)
  229. }
  230. func (s *Session) handleNodeDown(ip net.IP, port int) {
  231. if gocqlDebug {
  232. Logger.Printf("gocql: Session.handleNodeDown: %s:%d\n", ip.String(), port)
  233. }
  234. host := s.ring.getHost(ip)
  235. if host == nil {
  236. host = &HostInfo{connectAddress: ip, port: port}
  237. }
  238. if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) {
  239. return
  240. }
  241. host.setState(NodeDown)
  242. s.policy.HostDown(host)
  243. s.pool.hostDown(ip)
  244. }