events.go 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. package gocql
  2. import (
  3. "log"
  4. "net"
  5. "sync"
  6. "time"
  7. )
  8. type eventDeouncer struct {
  9. name string
  10. timer *time.Timer
  11. mu sync.Mutex
  12. events []frame
  13. callback func([]frame)
  14. quit chan struct{}
  15. }
  16. func newEventDeouncer(name string, eventHandler func([]frame)) *eventDeouncer {
  17. e := &eventDeouncer{
  18. name: name,
  19. quit: make(chan struct{}),
  20. timer: time.NewTimer(eventDebounceTime),
  21. callback: eventHandler,
  22. }
  23. e.timer.Stop()
  24. go e.flusher()
  25. return e
  26. }
  27. func (e *eventDeouncer) stop() {
  28. e.quit <- struct{}{} // sync with flusher
  29. close(e.quit)
  30. }
  31. func (e *eventDeouncer) flusher() {
  32. for {
  33. select {
  34. case <-e.timer.C:
  35. e.mu.Lock()
  36. e.flush()
  37. e.mu.Unlock()
  38. case <-e.quit:
  39. return
  40. }
  41. }
  42. }
  43. const (
  44. eventBufferSize = 1000
  45. eventDebounceTime = 1 * time.Second
  46. )
  47. // flush must be called with mu locked
  48. func (e *eventDeouncer) flush() {
  49. log.Printf("%s: flushing %d events\n", e.name, len(e.events))
  50. if len(e.events) == 0 {
  51. return
  52. }
  53. // if the flush interval is faster than the callback then we will end up calling
  54. // the callback multiple times, probably a bad idea. In this case we could drop
  55. // frames?
  56. go e.callback(e.events)
  57. e.events = make([]frame, 0, eventBufferSize)
  58. }
  59. func (e *eventDeouncer) debounce(frame frame) {
  60. e.mu.Lock()
  61. e.timer.Reset(eventDebounceTime)
  62. // TODO: probably need a warning to track if this threshold is too low
  63. if len(e.events) < eventBufferSize {
  64. log.Printf("%s: buffering event: %v", e.name, frame)
  65. e.events = append(e.events, frame)
  66. } else {
  67. log.Printf("%s: buffer full, dropping event frame: %s", e.name, frame)
  68. }
  69. e.mu.Unlock()
  70. }
  71. func (s *Session) handleNodeEvent(frames []frame) {
  72. type nodeEvent struct {
  73. change string
  74. host net.IP
  75. port int
  76. }
  77. events := make(map[string]*nodeEvent)
  78. for _, frame := range frames {
  79. // TODO: can we be sure the order of events in the buffer is correct?
  80. switch f := frame.(type) {
  81. case *topologyChangeEventFrame:
  82. event, ok := events[f.host.String()]
  83. if !ok {
  84. event = &nodeEvent{change: f.change, host: f.host}
  85. events[f.host.String()] = event
  86. }
  87. event.change = f.change
  88. case *statusChangeEventFrame:
  89. event, ok := events[f.host.String()]
  90. if !ok {
  91. event = &nodeEvent{change: f.change, host: f.host}
  92. events[f.host.String()] = event
  93. }
  94. event.change = f.change
  95. }
  96. }
  97. for addr, f := range events {
  98. log.Printf("NodeEvent: handling debounced event: %q => %s", addr, f.change)
  99. switch f.change {
  100. case "NEW_NODE":
  101. s.handleNewNode(f.host, f.port)
  102. case "REMOVED_NODE":
  103. s.handleRemovedNode(f.host, f.port)
  104. case "MOVED_NODE":
  105. // java-driver handles this, not mentioned in the spec
  106. // TODO(zariel): refresh token map
  107. case "UP":
  108. s.handleNodeUp(f.host, f.port)
  109. case "DOWN":
  110. s.handleNodeDown(f.host, f.port)
  111. }
  112. }
  113. }
  114. func (s *Session) handleEvent(framer *framer) {
  115. // TODO(zariel): need to debounce events frames, and possible also events
  116. defer framerPool.Put(framer)
  117. frame, err := framer.parseFrame()
  118. if err != nil {
  119. // TODO: logger
  120. log.Printf("gocql: unable to parse event frame: %v\n", err)
  121. return
  122. }
  123. log.Println(frame)
  124. // TODO: handle medatadata events
  125. switch f := frame.(type) {
  126. case *schemaChangeKeyspace:
  127. case *schemaChangeFunction:
  128. case *schemaChangeTable:
  129. case *topologyChangeEventFrame, *statusChangeEventFrame:
  130. s.nodeEvents.debounce(frame)
  131. default:
  132. log.Printf("gocql: invalid event frame (%T): %v\n", f, f)
  133. }
  134. }
  135. func (s *Session) handleNewNode(host net.IP, port int) {
  136. // TODO(zariel): need to be able to filter discovered nodes
  137. if s.control == nil {
  138. return
  139. }
  140. hostInfo, err := s.control.fetchHostInfo(host, port)
  141. if err != nil {
  142. log.Printf("gocql: unable to fetch host info for %v: %v\n", host, err)
  143. return
  144. }
  145. // should this handle token moving?
  146. if !s.ring.addHostIfMissing(hostInfo) {
  147. s.handleNodeUp(host, port)
  148. return
  149. }
  150. s.pool.addHost(hostInfo)
  151. }
  152. func (s *Session) handleRemovedNode(ip net.IP, port int) {
  153. // we remove all nodes but only add ones which pass the filter
  154. addr := ip.String()
  155. s.pool.removeHost(addr)
  156. s.ring.removeHost(addr)
  157. }
  158. func (s *Session) handleNodeUp(ip net.IP, port int) {
  159. addr := ip.String()
  160. host := s.ring.getHost(addr)
  161. if host != nil {
  162. host.setState(NodeUp)
  163. s.pool.hostUp(host)
  164. return
  165. }
  166. // TODO: this could infinite loop
  167. s.handleNewNode(ip, port)
  168. }
  169. func (s *Session) handleNodeDown(ip net.IP, port int) {
  170. addr := ip.String()
  171. host := s.ring.getHost(addr)
  172. if host != nil {
  173. host.setState(NodeDown)
  174. }
  175. s.pool.hostDown(addr)
  176. }