policies_test.go 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. // Copyright (c) 2015 The gocql Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package gocql
  5. import (
  6. "fmt"
  7. "net"
  8. "testing"
  9. "time"
  10. "github.com/hailocab/go-hostpool"
  11. )
  12. // Tests of the round-robin host selection policy implementation
  13. func TestRoundRobinHostPolicy(t *testing.T) {
  14. policy := RoundRobinHostPolicy()
  15. hosts := [...]*HostInfo{
  16. {hostId: "0", connectAddress: net.IPv4(0, 0, 0, 1)},
  17. {hostId: "1", connectAddress: net.IPv4(0, 0, 0, 2)},
  18. }
  19. for _, host := range hosts {
  20. policy.AddHost(host)
  21. }
  22. // interleaved iteration should always increment the host
  23. iterA := policy.Pick(nil)
  24. if actual := iterA(); actual.Info() != hosts[0] {
  25. t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostID())
  26. }
  27. iterB := policy.Pick(nil)
  28. if actual := iterB(); actual.Info() != hosts[1] {
  29. t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostID())
  30. }
  31. if actual := iterB(); actual.Info() != hosts[0] {
  32. t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostID())
  33. }
  34. if actual := iterA(); actual.Info() != hosts[1] {
  35. t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostID())
  36. }
  37. iterC := policy.Pick(nil)
  38. if actual := iterC(); actual.Info() != hosts[0] {
  39. t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostID())
  40. }
  41. if actual := iterC(); actual.Info() != hosts[1] {
  42. t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostID())
  43. }
  44. }
  45. // Tests of the token-aware host selection policy implementation with a
  46. // round-robin host selection policy fallback.
  47. func TestTokenAwareHostPolicy(t *testing.T) {
  48. policy := TokenAwareHostPolicy(RoundRobinHostPolicy())
  49. query := &Query{}
  50. iter := policy.Pick(nil)
  51. if iter == nil {
  52. t.Fatal("host iterator was nil")
  53. }
  54. actual := iter()
  55. if actual != nil {
  56. t.Fatalf("expected nil from iterator, but was %v", actual)
  57. }
  58. // set the hosts
  59. hosts := [...]*HostInfo{
  60. {connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{"00"}},
  61. {connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{"25"}},
  62. {connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{"50"}},
  63. {connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{"75"}},
  64. }
  65. for _, host := range hosts {
  66. policy.AddHost(host)
  67. }
  68. // the token ring is not setup without the partitioner, but the fallback
  69. // should work
  70. if actual := policy.Pick(nil)(); !actual.Info().ConnectAddress().Equal(hosts[0].ConnectAddress()) {
  71. t.Errorf("Expected peer 0 but was %s", actual.Info().ConnectAddress())
  72. }
  73. query.RoutingKey([]byte("30"))
  74. if actual := policy.Pick(query)(); !actual.Info().ConnectAddress().Equal(hosts[1].ConnectAddress()) {
  75. t.Errorf("Expected peer 1 but was %s", actual.Info().ConnectAddress())
  76. }
  77. policy.SetPartitioner("OrderedPartitioner")
  78. // now the token ring is configured
  79. query.RoutingKey([]byte("20"))
  80. iter = policy.Pick(query)
  81. if actual := iter(); !actual.Info().ConnectAddress().Equal(hosts[1].ConnectAddress()) {
  82. t.Errorf("Expected peer 1 but was %s", actual.Info().ConnectAddress())
  83. }
  84. // rest are round robin
  85. if actual := iter(); !actual.Info().ConnectAddress().Equal(hosts[2].ConnectAddress()) {
  86. t.Errorf("Expected peer 2 but was %s", actual.Info().ConnectAddress())
  87. }
  88. if actual := iter(); !actual.Info().ConnectAddress().Equal(hosts[3].ConnectAddress()) {
  89. t.Errorf("Expected peer 3 but was %s", actual.Info().ConnectAddress())
  90. }
  91. if actual := iter(); !actual.Info().ConnectAddress().Equal(hosts[0].ConnectAddress()) {
  92. t.Errorf("Expected peer 0 but was %s", actual.Info().ConnectAddress())
  93. }
  94. }
  95. // Tests of the host pool host selection policy implementation
  96. func TestHostPoolHostPolicy(t *testing.T) {
  97. policy := HostPoolHostPolicy(hostpool.New(nil))
  98. hosts := []*HostInfo{
  99. {hostId: "0", connectAddress: net.IPv4(10, 0, 0, 0)},
  100. {hostId: "1", connectAddress: net.IPv4(10, 0, 0, 1)},
  101. }
  102. // Using set host to control the ordering of the hosts as calling "AddHost" iterates the map
  103. // which will result in an unpredictable ordering
  104. policy.(*hostPoolHostPolicy).SetHosts(hosts)
  105. // the first host selected is actually at [1], but this is ok for RR
  106. // interleaved iteration should always increment the host
  107. iter := policy.Pick(nil)
  108. actualA := iter()
  109. if actualA.Info().HostID() != "0" {
  110. t.Errorf("Expected hosts[0] but was hosts[%s]", actualA.Info().HostID())
  111. }
  112. actualA.Mark(nil)
  113. actualB := iter()
  114. if actualB.Info().HostID() != "1" {
  115. t.Errorf("Expected hosts[1] but was hosts[%s]", actualB.Info().HostID())
  116. }
  117. actualB.Mark(fmt.Errorf("error"))
  118. actualC := iter()
  119. if actualC.Info().HostID() != "0" {
  120. t.Errorf("Expected hosts[0] but was hosts[%s]", actualC.Info().HostID())
  121. }
  122. actualC.Mark(nil)
  123. actualD := iter()
  124. if actualD.Info().HostID() != "0" {
  125. t.Errorf("Expected hosts[0] but was hosts[%s]", actualD.Info().HostID())
  126. }
  127. actualD.Mark(nil)
  128. }
  129. func TestRoundRobinNilHostInfo(t *testing.T) {
  130. policy := RoundRobinHostPolicy()
  131. host := &HostInfo{hostId: "host-1"}
  132. policy.AddHost(host)
  133. iter := policy.Pick(nil)
  134. next := iter()
  135. if next == nil {
  136. t.Fatal("got nil host")
  137. } else if v := next.Info(); v == nil {
  138. t.Fatal("got nil HostInfo")
  139. } else if v.HostID() != host.HostID() {
  140. t.Fatalf("expected host %v got %v", host, v)
  141. }
  142. next = iter()
  143. if next != nil {
  144. t.Errorf("expected to get nil host got %+v", next)
  145. if next.Info() == nil {
  146. t.Fatalf("HostInfo is nil")
  147. }
  148. }
  149. }
  150. func TestTokenAwareNilHostInfo(t *testing.T) {
  151. policy := TokenAwareHostPolicy(RoundRobinHostPolicy())
  152. hosts := [...]*HostInfo{
  153. {connectAddress: net.IPv4(10, 0, 0, 0), tokens: []string{"00"}},
  154. {connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{"25"}},
  155. {connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{"50"}},
  156. {connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{"75"}},
  157. }
  158. for _, host := range hosts {
  159. policy.AddHost(host)
  160. }
  161. policy.SetPartitioner("OrderedPartitioner")
  162. query := &Query{}
  163. query.RoutingKey([]byte("20"))
  164. iter := policy.Pick(query)
  165. next := iter()
  166. if next == nil {
  167. t.Fatal("got nil host")
  168. } else if v := next.Info(); v == nil {
  169. t.Fatal("got nil HostInfo")
  170. } else if !v.ConnectAddress().Equal(hosts[1].ConnectAddress()) {
  171. t.Fatalf("expected peer 1 got %v", v.ConnectAddress())
  172. }
  173. // Empty the hosts to trigger the panic when using the fallback.
  174. for _, host := range hosts {
  175. policy.RemoveHost(host)
  176. }
  177. next = iter()
  178. if next != nil {
  179. t.Errorf("expected to get nil host got %+v", next)
  180. if next.Info() == nil {
  181. t.Fatalf("HostInfo is nil")
  182. }
  183. }
  184. }
  185. func TestCOWList_Add(t *testing.T) {
  186. var cow cowHostList
  187. toAdd := [...]net.IP{net.IPv4(0, 0, 0, 0), net.IPv4(1, 0, 0, 0), net.IPv4(2, 0, 0, 0)}
  188. for _, addr := range toAdd {
  189. if !cow.add(&HostInfo{connectAddress: addr}) {
  190. t.Fatal("did not add peer which was not in the set")
  191. }
  192. }
  193. hosts := cow.get()
  194. if len(hosts) != len(toAdd) {
  195. t.Fatalf("expected to have %d hosts got %d", len(toAdd), len(hosts))
  196. }
  197. set := make(map[string]bool)
  198. for _, host := range hosts {
  199. set[string(host.ConnectAddress())] = true
  200. }
  201. for _, addr := range toAdd {
  202. if !set[string(addr)] {
  203. t.Errorf("addr was not in the host list: %q", addr)
  204. }
  205. }
  206. }
  207. // TestSimpleRetryPolicy makes sure that we only allow 1 + numRetries attempts
  208. func TestSimpleRetryPolicy(t *testing.T) {
  209. q := &Query{}
  210. // this should allow a total of 3 tries.
  211. rt := &SimpleRetryPolicy{NumRetries: 2}
  212. cases := []struct {
  213. attempts int
  214. allow bool
  215. }{
  216. {0, true},
  217. {1, true},
  218. {2, true},
  219. {3, false},
  220. {4, false},
  221. {5, false},
  222. }
  223. for _, c := range cases {
  224. q.attempts = c.attempts
  225. if c.allow && !rt.Attempt(q) {
  226. t.Fatalf("should allow retry after %d attempts", c.attempts)
  227. }
  228. if !c.allow && rt.Attempt(q) {
  229. t.Fatalf("should not allow retry after %d attempts", c.attempts)
  230. }
  231. }
  232. }
  233. func TestExponentialBackoffPolicy(t *testing.T) {
  234. // test with defaults
  235. sut := &ExponentialBackoffRetryPolicy{NumRetries: 2}
  236. cases := []struct {
  237. attempts int
  238. delay time.Duration
  239. }{
  240. {1, 100 * time.Millisecond},
  241. {2, (2) * 100 * time.Millisecond},
  242. {3, (2 * 2) * 100 * time.Millisecond},
  243. {4, (2 * 2 * 2) * 100 * time.Millisecond},
  244. }
  245. for _, c := range cases {
  246. // test 100 times for each case
  247. for i := 0; i < 100; i++ {
  248. d := sut.napTime(c.attempts)
  249. if d < c.delay-(100*time.Millisecond)/2 {
  250. t.Fatalf("Delay %d less than jitter min of %d", d, c.delay-100*time.Millisecond/2)
  251. }
  252. if d > c.delay+(100*time.Millisecond)/2 {
  253. t.Fatalf("Delay %d greater than jitter max of %d", d, c.delay+100*time.Millisecond/2)
  254. }
  255. }
  256. }
  257. }