events.go 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. package gocql
  2. import (
  3. "log"
  4. "net"
  5. "sync"
  6. "time"
  7. )
  8. type eventDeouncer struct {
  9. name string
  10. timer *time.Timer
  11. mu sync.Mutex
  12. events []frame
  13. callback func([]frame)
  14. quit chan struct{}
  15. }
  16. func newEventDeouncer(name string, eventHandler func([]frame)) *eventDeouncer {
  17. e := &eventDeouncer{
  18. name: name,
  19. quit: make(chan struct{}),
  20. timer: time.NewTimer(eventDebounceTime),
  21. callback: eventHandler,
  22. }
  23. e.timer.Stop()
  24. go e.flusher()
  25. return e
  26. }
  27. func (e *eventDeouncer) stop() {
  28. e.quit <- struct{}{} // sync with flusher
  29. close(e.quit)
  30. }
  31. func (e *eventDeouncer) flusher() {
  32. for {
  33. select {
  34. case <-e.timer.C:
  35. e.mu.Lock()
  36. e.flush()
  37. e.mu.Unlock()
  38. case <-e.quit:
  39. return
  40. }
  41. }
  42. }
  43. const (
  44. eventBufferSize = 1000
  45. eventDebounceTime = 1 * time.Second
  46. )
  47. // flush must be called with mu locked
  48. func (e *eventDeouncer) flush() {
  49. if len(e.events) == 0 {
  50. return
  51. }
  52. // if the flush interval is faster than the callback then we will end up calling
  53. // the callback multiple times, probably a bad idea. In this case we could drop
  54. // frames?
  55. go e.callback(e.events)
  56. e.events = make([]frame, 0, eventBufferSize)
  57. }
  58. func (e *eventDeouncer) debounce(frame frame) {
  59. e.mu.Lock()
  60. e.timer.Reset(eventDebounceTime)
  61. // TODO: probably need a warning to track if this threshold is too low
  62. if len(e.events) < eventBufferSize {
  63. e.events = append(e.events, frame)
  64. } else {
  65. log.Printf("%s: buffer full, dropping event frame: %s", e.name, frame)
  66. }
  67. e.mu.Unlock()
  68. }
  69. func (s *Session) handleEvent(framer *framer) {
  70. // TODO(zariel): need to debounce events frames, and possible also events
  71. defer framerPool.Put(framer)
  72. frame, err := framer.parseFrame()
  73. if err != nil {
  74. // TODO: logger
  75. log.Printf("gocql: unable to parse event frame: %v\n", err)
  76. return
  77. }
  78. if gocqlDebug {
  79. log.Printf("gocql: handling frame: %v\n", frame)
  80. }
  81. // TODO: handle medatadata events
  82. switch f := frame.(type) {
  83. case *schemaChangeKeyspace, *schemaChangeFunction, *schemaChangeTable:
  84. s.schemaEvents.debounce(frame)
  85. case *topologyChangeEventFrame, *statusChangeEventFrame:
  86. s.nodeEvents.debounce(frame)
  87. default:
  88. log.Printf("gocql: invalid event frame (%T): %v\n", f, f)
  89. }
  90. }
  91. func (s *Session) handleSchemaEvent(frames []frame) {
  92. if s.schemaDescriber == nil {
  93. return
  94. }
  95. for _, frame := range frames {
  96. switch f := frame.(type) {
  97. case *schemaChangeKeyspace:
  98. s.schemaDescriber.clearSchema(f.keyspace)
  99. case *schemaChangeTable:
  100. s.schemaDescriber.clearSchema(f.keyspace)
  101. }
  102. }
  103. }
  104. func (s *Session) handleNodeEvent(frames []frame) {
  105. type nodeEvent struct {
  106. change string
  107. host net.IP
  108. port int
  109. }
  110. events := make(map[string]*nodeEvent)
  111. for _, frame := range frames {
  112. // TODO: can we be sure the order of events in the buffer is correct?
  113. switch f := frame.(type) {
  114. case *topologyChangeEventFrame:
  115. event, ok := events[f.host.String()]
  116. if !ok {
  117. event = &nodeEvent{change: f.change, host: f.host, port: f.port}
  118. events[f.host.String()] = event
  119. }
  120. event.change = f.change
  121. case *statusChangeEventFrame:
  122. event, ok := events[f.host.String()]
  123. if !ok {
  124. event = &nodeEvent{change: f.change, host: f.host, port: f.port}
  125. events[f.host.String()] = event
  126. }
  127. event.change = f.change
  128. }
  129. }
  130. for _, f := range events {
  131. if gocqlDebug {
  132. log.Printf("gocql: dispatching event: %+v\n", f)
  133. }
  134. switch f.change {
  135. case "NEW_NODE":
  136. s.handleNewNode(f.host, f.port, true)
  137. case "REMOVED_NODE":
  138. s.handleRemovedNode(f.host, f.port)
  139. case "MOVED_NODE":
  140. // java-driver handles this, not mentioned in the spec
  141. // TODO(zariel): refresh token map
  142. case "UP":
  143. s.handleNodeUp(f.host, f.port, true)
  144. case "DOWN":
  145. s.handleNodeDown(f.host, f.port)
  146. }
  147. }
  148. }
  149. func (s *Session) handleNewNode(host net.IP, port int, waitForBinary bool) {
  150. // TODO(zariel): need to be able to filter discovered nodes
  151. var hostInfo *HostInfo
  152. if s.control != nil && !s.cfg.IgnorePeerAddr {
  153. var err error
  154. hostInfo, err = s.control.fetchHostInfo(host, port)
  155. if err != nil {
  156. log.Printf("gocql: events: unable to fetch host info for %v: %v\n", host, err)
  157. return
  158. }
  159. } else {
  160. hostInfo = &HostInfo{peer: host.String(), port: port, state: NodeUp}
  161. }
  162. addr := host.String()
  163. if s.cfg.IgnorePeerAddr && hostInfo.Peer() != addr {
  164. hostInfo.setPeer(addr)
  165. }
  166. if s.cfg.HostFilter != nil {
  167. if !s.cfg.HostFilter.Accept(hostInfo) {
  168. return
  169. }
  170. } else if !s.cfg.Discovery.matchFilter(hostInfo) {
  171. // TODO: remove this when the host selection policy is more sophisticated
  172. return
  173. }
  174. if t := hostInfo.Version().nodeUpDelay(); t > 0 && waitForBinary {
  175. time.Sleep(t)
  176. }
  177. // should this handle token moving?
  178. if existing, ok := s.ring.addHostIfMissing(hostInfo); ok {
  179. existing.update(hostInfo)
  180. hostInfo = existing
  181. }
  182. s.pool.addHost(hostInfo)
  183. s.policy.AddHost(hostInfo)
  184. hostInfo.setState(NodeUp)
  185. if s.control != nil && !s.cfg.IgnorePeerAddr {
  186. s.hostSource.refreshRing()
  187. }
  188. }
  189. func (s *Session) handleRemovedNode(ip net.IP, port int) {
  190. // we remove all nodes but only add ones which pass the filter
  191. addr := ip.String()
  192. host := s.ring.getHost(addr)
  193. if host == nil {
  194. host = &HostInfo{peer: addr}
  195. }
  196. if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) {
  197. return
  198. }
  199. host.setState(NodeDown)
  200. s.policy.RemoveHost(addr)
  201. s.pool.removeHost(addr)
  202. s.ring.removeHost(addr)
  203. if !s.cfg.IgnorePeerAddr {
  204. s.hostSource.refreshRing()
  205. }
  206. }
  207. func (s *Session) handleNodeUp(ip net.IP, port int, waitForBinary bool) {
  208. if gocqlDebug {
  209. log.Printf("gocql: Session.handleNodeUp: %s:%d\n", ip.String(), port)
  210. }
  211. addr := ip.String()
  212. host := s.ring.getHost(addr)
  213. if host != nil {
  214. if s.cfg.IgnorePeerAddr && host.Peer() != addr {
  215. host.setPeer(addr)
  216. }
  217. if s.cfg.HostFilter != nil {
  218. if !s.cfg.HostFilter.Accept(host) {
  219. return
  220. }
  221. } else if !s.cfg.Discovery.matchFilter(host) {
  222. // TODO: remove this when the host selection policy is more sophisticated
  223. return
  224. }
  225. if t := host.Version().nodeUpDelay(); t > 0 && waitForBinary {
  226. time.Sleep(t)
  227. }
  228. host.setPort(port)
  229. s.pool.hostUp(host)
  230. s.policy.HostUp(host)
  231. host.setState(NodeUp)
  232. return
  233. }
  234. s.handleNewNode(ip, port, waitForBinary)
  235. }
  236. func (s *Session) handleNodeDown(ip net.IP, port int) {
  237. if gocqlDebug {
  238. log.Printf("gocql: Session.handleNodeDown: %s:%d\n", ip.String(), port)
  239. }
  240. addr := ip.String()
  241. host := s.ring.getHost(addr)
  242. if host == nil {
  243. host = &HostInfo{peer: addr}
  244. }
  245. if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) {
  246. return
  247. }
  248. host.setState(NodeDown)
  249. s.policy.HostDown(addr)
  250. s.pool.hostDown(addr)
  251. }