events_ccm_test.go 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. // +build ccm
  2. package gocql
  3. import (
  4. "github.com/gocql/gocql/ccm_test"
  5. "log"
  6. "testing"
  7. "time"
  8. )
  9. func TestEventDiscovery(t *testing.T) {
  10. if err := ccm.AllUp(); err != nil {
  11. t.Fatal(err)
  12. }
  13. session := createSession(t)
  14. defer session.Close()
  15. status, err := ccm.Status()
  16. if err != nil {
  17. t.Fatal(err)
  18. }
  19. t.Logf("status=%+v\n", status)
  20. session.pool.mu.RLock()
  21. poolHosts := session.pool.hostConnPools // TODO: replace with session.ring
  22. t.Logf("poolhosts=%+v\n", poolHosts)
  23. // check we discovered all the nodes in the ring
  24. for _, host := range status {
  25. if _, ok := poolHosts[host.Addr]; !ok {
  26. t.Errorf("did not discover %q", host.Addr)
  27. }
  28. }
  29. session.pool.mu.RUnlock()
  30. if t.Failed() {
  31. t.FailNow()
  32. }
  33. }
  34. func TestEventNodeDownControl(t *testing.T) {
  35. const targetNode = "node1"
  36. t.Log("marking " + targetNode + " as down")
  37. if err := ccm.AllUp(); err != nil {
  38. t.Fatal(err)
  39. }
  40. session := createSession(t)
  41. defer session.Close()
  42. if err := ccm.NodeDown(targetNode); err != nil {
  43. t.Fatal(err)
  44. }
  45. status, err := ccm.Status()
  46. if err != nil {
  47. t.Fatal(err)
  48. }
  49. t.Logf("status=%+v\n", status)
  50. t.Logf("marking node %q down: %v\n", targetNode, status[targetNode])
  51. time.Sleep(5 * time.Second)
  52. session.pool.mu.RLock()
  53. poolHosts := session.pool.hostConnPools
  54. node := status[targetNode]
  55. t.Logf("poolhosts=%+v\n", poolHosts)
  56. if _, ok := poolHosts[node.Addr]; ok {
  57. session.pool.mu.RUnlock()
  58. t.Fatal("node not removed after remove event")
  59. }
  60. session.pool.mu.RUnlock()
  61. }
  62. func TestEventNodeDown(t *testing.T) {
  63. const targetNode = "node3"
  64. if err := ccm.AllUp(); err != nil {
  65. t.Fatal(err)
  66. }
  67. session := createSession(t)
  68. defer session.Close()
  69. if err := ccm.NodeDown(targetNode); err != nil {
  70. t.Fatal(err)
  71. }
  72. status, err := ccm.Status()
  73. if err != nil {
  74. t.Fatal(err)
  75. }
  76. t.Logf("status=%+v\n", status)
  77. t.Logf("marking node %q down: %v\n", targetNode, status[targetNode])
  78. time.Sleep(5 * time.Second)
  79. session.pool.mu.RLock()
  80. defer session.pool.mu.RUnlock()
  81. poolHosts := session.pool.hostConnPools
  82. node := status[targetNode]
  83. t.Logf("poolhosts=%+v\n", poolHosts)
  84. if _, ok := poolHosts[node.Addr]; ok {
  85. t.Fatal("node not removed after remove event")
  86. }
  87. }
  88. func TestEventNodeUp(t *testing.T) {
  89. if err := ccm.AllUp(); err != nil {
  90. t.Fatal(err)
  91. }
  92. status, err := ccm.Status()
  93. if err != nil {
  94. t.Fatal(err)
  95. }
  96. log.Printf("status=%+v\n", status)
  97. session := createSession(t)
  98. defer session.Close()
  99. poolHosts := session.pool.hostConnPools
  100. const targetNode = "node2"
  101. session.pool.mu.RLock()
  102. _, ok := poolHosts[status[targetNode].Addr]
  103. session.pool.mu.RUnlock()
  104. if !ok {
  105. session.pool.mu.RLock()
  106. t.Errorf("target pool not in connection pool: addr=%q pools=%v", status[targetNode].Addr, poolHosts)
  107. session.pool.mu.RUnlock()
  108. t.FailNow()
  109. }
  110. if err := ccm.NodeDown(targetNode); err != nil {
  111. t.Fatal(err)
  112. }
  113. time.Sleep(5 * time.Second)
  114. session.pool.mu.RLock()
  115. log.Printf("poolhosts=%+v\n", poolHosts)
  116. node := status[targetNode]
  117. if _, ok := poolHosts[node.Addr]; ok {
  118. session.pool.mu.RUnlock()
  119. t.Fatal("node not removed after remove event")
  120. }
  121. session.pool.mu.RUnlock()
  122. if err := ccm.NodeUp(targetNode); err != nil {
  123. t.Fatal(err)
  124. }
  125. // cassandra < 2.2 needs 10 seconds to start up the binary service
  126. time.Sleep(10 * time.Second)
  127. session.pool.mu.RLock()
  128. log.Printf("poolhosts=%+v\n", poolHosts)
  129. if _, ok := poolHosts[node.Addr]; !ok {
  130. session.pool.mu.RUnlock()
  131. t.Fatal("node not added after node added event")
  132. }
  133. session.pool.mu.RUnlock()
  134. }