policies.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. // Copyright (c) 2012 The gocql Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. //This file will be the future home for more policies
  5. package gocql
  6. import (
  7. "fmt"
  8. "math"
  9. "math/rand"
  10. "net"
  11. "sync"
  12. "sync/atomic"
  13. "time"
  14. "github.com/hailocab/go-hostpool"
  15. )
  16. // cowHostList implements a copy on write host list, its equivalent type is []*HostInfo
  17. type cowHostList struct {
  18. list atomic.Value
  19. mu sync.Mutex
  20. }
  21. func (c *cowHostList) String() string {
  22. return fmt.Sprintf("%+v", c.get())
  23. }
  24. func (c *cowHostList) get() []*HostInfo {
  25. // TODO(zariel): should we replace this with []*HostInfo?
  26. l, ok := c.list.Load().(*[]*HostInfo)
  27. if !ok {
  28. return nil
  29. }
  30. return *l
  31. }
  32. func (c *cowHostList) set(list []*HostInfo) {
  33. c.mu.Lock()
  34. c.list.Store(&list)
  35. c.mu.Unlock()
  36. }
  37. // add will add a host if it not already in the list
  38. func (c *cowHostList) add(host *HostInfo) bool {
  39. c.mu.Lock()
  40. l := c.get()
  41. if n := len(l); n == 0 {
  42. l = []*HostInfo{host}
  43. } else {
  44. newL := make([]*HostInfo, n+1)
  45. for i := 0; i < n; i++ {
  46. if host.Equal(l[i]) {
  47. c.mu.Unlock()
  48. return false
  49. }
  50. newL[i] = l[i]
  51. }
  52. newL[n] = host
  53. l = newL
  54. }
  55. c.list.Store(&l)
  56. c.mu.Unlock()
  57. return true
  58. }
  59. func (c *cowHostList) update(host *HostInfo) {
  60. c.mu.Lock()
  61. l := c.get()
  62. if len(l) == 0 {
  63. c.mu.Unlock()
  64. return
  65. }
  66. found := false
  67. newL := make([]*HostInfo, len(l))
  68. for i := range l {
  69. if host.Equal(l[i]) {
  70. newL[i] = host
  71. found = true
  72. } else {
  73. newL[i] = l[i]
  74. }
  75. }
  76. if found {
  77. c.list.Store(&newL)
  78. }
  79. c.mu.Unlock()
  80. }
  81. func (c *cowHostList) remove(ip net.IP) bool {
  82. c.mu.Lock()
  83. l := c.get()
  84. size := len(l)
  85. if size == 0 {
  86. c.mu.Unlock()
  87. return false
  88. }
  89. found := false
  90. newL := make([]*HostInfo, 0, size)
  91. for i := 0; i < len(l); i++ {
  92. if !l[i].ConnectAddress().Equal(ip) {
  93. newL = append(newL, l[i])
  94. } else {
  95. found = true
  96. }
  97. }
  98. if !found {
  99. c.mu.Unlock()
  100. return false
  101. }
  102. newL = newL[:size-1 : size-1]
  103. c.list.Store(&newL)
  104. c.mu.Unlock()
  105. return true
  106. }
  107. // RetryableQuery is an interface that represents a query or batch statement that
  108. // exposes the correct functions for the retry policy logic to evaluate correctly.
  109. type RetryableQuery interface {
  110. Attempts() int
  111. SetConsistency(c Consistency)
  112. GetConsistency() Consistency
  113. }
  114. type RetryType uint16
  115. const (
  116. Retry RetryType = 0x00 // retry on same connection
  117. RetryNextHost RetryType = 0x01 // retry on another connection
  118. Ignore RetryType = 0x02 // ignore error and return result
  119. Rethrow RetryType = 0x03 // raise error and stop retrying
  120. )
  121. // RetryPolicy interface is used by gocql to determine if a query can be attempted
  122. // again after a retryable error has been received. The interface allows gocql
  123. // users to implement their own logic to determine if a query can be attempted
  124. // again.
  125. //
  126. // See SimpleRetryPolicy as an example of implementing and using a RetryPolicy
  127. // interface.
  128. type RetryPolicy interface {
  129. Attempt(RetryableQuery) bool
  130. GetRetryType(error) RetryType
  131. }
  132. // SimpleRetryPolicy has simple logic for attempting a query a fixed number of times.
  133. //
  134. // See below for examples of usage:
  135. //
  136. // //Assign to the cluster
  137. // cluster.RetryPolicy = &gocql.SimpleRetryPolicy{NumRetries: 3}
  138. //
  139. // //Assign to a query
  140. // query.RetryPolicy(&gocql.SimpleRetryPolicy{NumRetries: 1})
  141. //
  142. type SimpleRetryPolicy struct {
  143. NumRetries int //Number of times to retry a query
  144. }
  145. // Attempt tells gocql to attempt the query again based on query.Attempts being less
  146. // than the NumRetries defined in the policy.
  147. func (s *SimpleRetryPolicy) Attempt(q RetryableQuery) bool {
  148. return q.Attempts() <= s.NumRetries
  149. }
  150. func (s *SimpleRetryPolicy) GetRetryType(err error) RetryType {
  151. return RetryNextHost
  152. }
  153. // ExponentialBackoffRetryPolicy sleeps between attempts
  154. type ExponentialBackoffRetryPolicy struct {
  155. NumRetries int
  156. Min, Max time.Duration
  157. }
  158. func (e *ExponentialBackoffRetryPolicy) Attempt(q RetryableQuery) bool {
  159. if q.Attempts() > e.NumRetries {
  160. return false
  161. }
  162. time.Sleep(e.napTime(q.Attempts()))
  163. return true
  164. }
  165. // used to calculate exponentially growing time
  166. func getExponentialTime(min time.Duration, max time.Duration, attempts int) time.Duration {
  167. if min <= 0 {
  168. min = 100 * time.Millisecond
  169. }
  170. if max <= 0 {
  171. max = 10 * time.Second
  172. }
  173. minFloat := float64(min)
  174. napDuration := minFloat * math.Pow(2, float64(attempts-1))
  175. // add some jitter
  176. napDuration += rand.Float64()*minFloat - (minFloat / 2)
  177. if napDuration > float64(max) {
  178. return time.Duration(max)
  179. }
  180. return time.Duration(napDuration)
  181. }
  182. func (e *ExponentialBackoffRetryPolicy) GetRetryType(err error) RetryType {
  183. return RetryNextHost
  184. }
  185. // DowngradingConsistencyRetryPolicy: Next retry will be with the next consistency level
  186. // provided in the slice
  187. //
  188. // On a read timeout: the operation is retried with the next provided consistency
  189. // level.
  190. //
  191. // On a write timeout: if the operation is an :attr:`~.UNLOGGED_BATCH`
  192. // and at least one replica acknowledged the write, the operation is
  193. // retried with the next consistency level. Furthermore, for other
  194. // write types, if at least one replica acknowledged the write, the
  195. // timeout is ignored.
  196. //
  197. // On an unavailable exception: if at least one replica is alive, the
  198. // operation is retried with the next provided consistency level.
  199. type DowngradingConsistencyRetryPolicy struct {
  200. ConsistencyLevelsToTry []Consistency
  201. }
  202. func (d *DowngradingConsistencyRetryPolicy) Attempt(q RetryableQuery) bool {
  203. currentAttempt := q.Attempts()
  204. if currentAttempt > len(d.ConsistencyLevelsToTry) {
  205. return false
  206. } else if currentAttempt > 0 {
  207. q.SetConsistency(d.ConsistencyLevelsToTry[currentAttempt-1])
  208. if gocqlDebug {
  209. Logger.Printf("%T: set consistency to %q\n",
  210. d,
  211. d.ConsistencyLevelsToTry[currentAttempt-1])
  212. }
  213. }
  214. return true
  215. }
  216. func (d *DowngradingConsistencyRetryPolicy) GetRetryType(err error) RetryType {
  217. switch t := err.(type) {
  218. case *RequestErrUnavailable:
  219. if t.Alive > 0 {
  220. return Retry
  221. }
  222. return Rethrow
  223. case *RequestErrWriteTimeout:
  224. if t.WriteType == "SIMPLE" || t.WriteType == "BATCH" || t.WriteType == "COUNTER" {
  225. if t.Received > 0 {
  226. return Ignore
  227. }
  228. return Rethrow
  229. }
  230. if t.WriteType == "UNLOGGED_BATCH" {
  231. return Retry
  232. }
  233. return Rethrow
  234. case *RequestErrReadTimeout:
  235. return Retry
  236. default:
  237. return RetryNextHost
  238. }
  239. }
  240. func (e *ExponentialBackoffRetryPolicy) napTime(attempts int) time.Duration {
  241. return getExponentialTime(e.Min, e.Max, attempts)
  242. }
  243. type HostStateNotifier interface {
  244. AddHost(host *HostInfo)
  245. RemoveHost(host *HostInfo)
  246. HostUp(host *HostInfo)
  247. HostDown(host *HostInfo)
  248. }
  249. type KeyspaceUpdateEvent struct {
  250. Keyspace string
  251. Change string
  252. }
  253. // HostSelectionPolicy is an interface for selecting
  254. // the most appropriate host to execute a given query.
  255. type HostSelectionPolicy interface {
  256. HostStateNotifier
  257. SetPartitioner
  258. KeyspaceChanged(KeyspaceUpdateEvent)
  259. Init(*Session)
  260. IsLocal(host *HostInfo) bool
  261. //Pick returns an iteration function over selected hosts
  262. Pick(ExecutableQuery) NextHost
  263. }
  264. // SelectedHost is an interface returned when picking a host from a host
  265. // selection policy.
  266. type SelectedHost interface {
  267. Info() *HostInfo
  268. Mark(error)
  269. }
  270. type selectedHost HostInfo
  271. func (host *selectedHost) Info() *HostInfo {
  272. return (*HostInfo)(host)
  273. }
  274. func (host *selectedHost) Mark(err error) {}
  275. // NextHost is an iteration function over picked hosts
  276. type NextHost func() SelectedHost
  277. // RoundRobinHostPolicy is a round-robin load balancing policy, where each host
  278. // is tried sequentially for each query.
  279. func RoundRobinHostPolicy() HostSelectionPolicy {
  280. return &roundRobinHostPolicy{}
  281. }
  282. type roundRobinHostPolicy struct {
  283. hosts cowHostList
  284. pos uint32
  285. mu sync.RWMutex
  286. }
  287. func (r *roundRobinHostPolicy) IsLocal(*HostInfo) bool { return true }
  288. func (r *roundRobinHostPolicy) KeyspaceChanged(KeyspaceUpdateEvent) {}
  289. func (r *roundRobinHostPolicy) SetPartitioner(partitioner string) {}
  290. func (r *roundRobinHostPolicy) Init(*Session) {}
  291. func (r *roundRobinHostPolicy) Pick(qry ExecutableQuery) NextHost {
  292. // i is used to limit the number of attempts to find a host
  293. // to the number of hosts known to this policy
  294. var i int
  295. return func() SelectedHost {
  296. hosts := r.hosts.get()
  297. if len(hosts) == 0 {
  298. return nil
  299. }
  300. // always increment pos to evenly distribute traffic in case of
  301. // failures
  302. pos := atomic.AddUint32(&r.pos, 1) - 1
  303. if i >= len(hosts) {
  304. return nil
  305. }
  306. host := hosts[(pos)%uint32(len(hosts))]
  307. i++
  308. return (*selectedHost)(host)
  309. }
  310. }
  311. func (r *roundRobinHostPolicy) AddHost(host *HostInfo) {
  312. r.hosts.add(host)
  313. }
  314. func (r *roundRobinHostPolicy) RemoveHost(host *HostInfo) {
  315. r.hosts.remove(host.ConnectAddress())
  316. }
  317. func (r *roundRobinHostPolicy) HostUp(host *HostInfo) {
  318. r.AddHost(host)
  319. }
  320. func (r *roundRobinHostPolicy) HostDown(host *HostInfo) {
  321. r.RemoveHost(host)
  322. }
  323. func ShuffleReplicas() func(*tokenAwareHostPolicy) {
  324. return func(t *tokenAwareHostPolicy) {
  325. t.shuffleReplicas = true
  326. }
  327. }
  328. // TokenAwareHostPolicy is a token aware host selection policy, where hosts are
  329. // selected based on the partition key, so queries are sent to the host which
  330. // owns the partition. Fallback is used when routing information is not available.
  331. func TokenAwareHostPolicy(fallback HostSelectionPolicy, opts ...func(*tokenAwareHostPolicy)) HostSelectionPolicy {
  332. p := &tokenAwareHostPolicy{fallback: fallback}
  333. for _, opt := range opts {
  334. opt(p)
  335. }
  336. return p
  337. }
  338. type keyspaceMeta struct {
  339. replicas map[string]map[token][]*HostInfo
  340. }
  341. type tokenAwareHostPolicy struct {
  342. hosts cowHostList
  343. mu sync.RWMutex
  344. partitioner string
  345. fallback HostSelectionPolicy
  346. session *Session
  347. tokenRing atomic.Value // *tokenRing
  348. keyspaces atomic.Value // *keyspaceMeta
  349. shuffleReplicas bool
  350. }
  351. func (t *tokenAwareHostPolicy) Init(s *Session) {
  352. t.session = s
  353. }
  354. func (t *tokenAwareHostPolicy) IsLocal(host *HostInfo) bool {
  355. return t.fallback.IsLocal(host)
  356. }
  357. func (t *tokenAwareHostPolicy) KeyspaceChanged(update KeyspaceUpdateEvent) {
  358. meta, _ := t.keyspaces.Load().(*keyspaceMeta)
  359. var size = 1
  360. if meta != nil {
  361. size = len(meta.replicas)
  362. }
  363. newMeta := &keyspaceMeta{
  364. replicas: make(map[string]map[token][]*HostInfo, size),
  365. }
  366. ks, err := t.session.KeyspaceMetadata(update.Keyspace)
  367. if err == nil {
  368. strat := getStrategy(ks)
  369. tr := t.tokenRing.Load().(*tokenRing)
  370. if tr != nil {
  371. newMeta.replicas[update.Keyspace] = strat.replicaMap(t.hosts.get(), tr.tokens)
  372. }
  373. }
  374. if meta != nil {
  375. for ks, replicas := range meta.replicas {
  376. if ks != update.Keyspace {
  377. newMeta.replicas[ks] = replicas
  378. }
  379. }
  380. }
  381. t.keyspaces.Store(newMeta)
  382. }
  383. func (t *tokenAwareHostPolicy) SetPartitioner(partitioner string) {
  384. t.mu.Lock()
  385. defer t.mu.Unlock()
  386. if t.partitioner != partitioner {
  387. t.fallback.SetPartitioner(partitioner)
  388. t.partitioner = partitioner
  389. t.resetTokenRing(partitioner)
  390. }
  391. }
  392. func (t *tokenAwareHostPolicy) AddHost(host *HostInfo) {
  393. t.hosts.add(host)
  394. t.fallback.AddHost(host)
  395. t.mu.RLock()
  396. partitioner := t.partitioner
  397. t.mu.RUnlock()
  398. t.resetTokenRing(partitioner)
  399. }
  400. func (t *tokenAwareHostPolicy) RemoveHost(host *HostInfo) {
  401. t.hosts.remove(host.ConnectAddress())
  402. t.fallback.RemoveHost(host)
  403. t.mu.RLock()
  404. partitioner := t.partitioner
  405. t.mu.RUnlock()
  406. t.resetTokenRing(partitioner)
  407. }
  408. func (t *tokenAwareHostPolicy) HostUp(host *HostInfo) {
  409. // TODO: need to avoid doing all the work on AddHost on hostup/down
  410. // because it now expensive to calculate the replica map for each
  411. // token
  412. t.AddHost(host)
  413. }
  414. func (t *tokenAwareHostPolicy) HostDown(host *HostInfo) {
  415. t.RemoveHost(host)
  416. }
  417. func (t *tokenAwareHostPolicy) resetTokenRing(partitioner string) {
  418. if partitioner == "" {
  419. // partitioner not yet set
  420. return
  421. }
  422. // create a new token ring
  423. hosts := t.hosts.get()
  424. tokenRing, err := newTokenRing(partitioner, hosts)
  425. if err != nil {
  426. Logger.Printf("Unable to update the token ring due to error: %s", err)
  427. return
  428. }
  429. // replace the token ring
  430. t.tokenRing.Store(tokenRing)
  431. }
  432. func (t *tokenAwareHostPolicy) getReplicas(keyspace string, token token) ([]*HostInfo, bool) {
  433. meta, _ := t.keyspaces.Load().(*keyspaceMeta)
  434. if meta == nil {
  435. return nil, false
  436. }
  437. tokens, ok := meta.replicas[keyspace][token]
  438. return tokens, ok
  439. }
  440. func (t *tokenAwareHostPolicy) Pick(qry ExecutableQuery) NextHost {
  441. if qry == nil {
  442. return t.fallback.Pick(qry)
  443. }
  444. routingKey, err := qry.GetRoutingKey()
  445. if err != nil {
  446. return t.fallback.Pick(qry)
  447. } else if routingKey == nil {
  448. return t.fallback.Pick(qry)
  449. }
  450. tr, _ := t.tokenRing.Load().(*tokenRing)
  451. if tr == nil {
  452. return t.fallback.Pick(qry)
  453. }
  454. token := tr.partitioner.Hash(routingKey)
  455. primaryEndpoint := tr.GetHostForToken(token)
  456. if primaryEndpoint == nil || token == nil {
  457. return t.fallback.Pick(qry)
  458. }
  459. replicas, ok := t.getReplicas(qry.Keyspace(), token)
  460. if !ok {
  461. replicas = []*HostInfo{primaryEndpoint}
  462. } else if t.shuffleReplicas {
  463. replicas = shuffleHosts(replicas)
  464. }
  465. var (
  466. fallbackIter NextHost
  467. i int
  468. )
  469. used := make(map[*HostInfo]bool, len(replicas))
  470. return func() SelectedHost {
  471. for i < len(replicas) {
  472. h := replicas[i]
  473. i++
  474. if h.IsUp() && t.fallback.IsLocal(h) {
  475. used[h] = true
  476. return (*selectedHost)(h)
  477. }
  478. }
  479. if fallbackIter == nil {
  480. // fallback
  481. fallbackIter = t.fallback.Pick(qry)
  482. }
  483. // filter the token aware selected hosts from the fallback hosts
  484. for fallbackHost := fallbackIter(); fallbackHost != nil; fallbackHost = fallbackIter() {
  485. if !used[fallbackHost.Info()] {
  486. return fallbackHost
  487. }
  488. }
  489. return nil
  490. }
  491. }
  492. // HostPoolHostPolicy is a host policy which uses the bitly/go-hostpool library
  493. // to distribute queries between hosts and prevent sending queries to
  494. // unresponsive hosts. When creating the host pool that is passed to the policy
  495. // use an empty slice of hosts as the hostpool will be populated later by gocql.
  496. // See below for examples of usage:
  497. //
  498. // // Create host selection policy using a simple host pool
  499. // cluster.PoolConfig.HostSelectionPolicy = HostPoolHostPolicy(hostpool.New(nil))
  500. //
  501. // // Create host selection policy using an epsilon greedy pool
  502. // cluster.PoolConfig.HostSelectionPolicy = HostPoolHostPolicy(
  503. // hostpool.NewEpsilonGreedy(nil, 0, &hostpool.LinearEpsilonValueCalculator{}),
  504. // )
  505. //
  506. func HostPoolHostPolicy(hp hostpool.HostPool) HostSelectionPolicy {
  507. return &hostPoolHostPolicy{hostMap: map[string]*HostInfo{}, hp: hp}
  508. }
  509. type hostPoolHostPolicy struct {
  510. hp hostpool.HostPool
  511. mu sync.RWMutex
  512. hostMap map[string]*HostInfo
  513. }
  514. func (r *hostPoolHostPolicy) Init(*Session) {}
  515. func (r *hostPoolHostPolicy) KeyspaceChanged(KeyspaceUpdateEvent) {}
  516. func (r *hostPoolHostPolicy) SetPartitioner(string) {}
  517. func (r *hostPoolHostPolicy) IsLocal(*HostInfo) bool { return true }
  518. func (r *hostPoolHostPolicy) SetHosts(hosts []*HostInfo) {
  519. peers := make([]string, len(hosts))
  520. hostMap := make(map[string]*HostInfo, len(hosts))
  521. for i, host := range hosts {
  522. ip := host.ConnectAddress().String()
  523. peers[i] = ip
  524. hostMap[ip] = host
  525. }
  526. r.mu.Lock()
  527. r.hp.SetHosts(peers)
  528. r.hostMap = hostMap
  529. r.mu.Unlock()
  530. }
  531. func (r *hostPoolHostPolicy) AddHost(host *HostInfo) {
  532. ip := host.ConnectAddress().String()
  533. r.mu.Lock()
  534. defer r.mu.Unlock()
  535. // If the host addr is present and isn't nil return
  536. if h, ok := r.hostMap[ip]; ok && h != nil {
  537. return
  538. }
  539. // otherwise, add the host to the map
  540. r.hostMap[ip] = host
  541. // and construct a new peer list to give to the HostPool
  542. hosts := make([]string, 0, len(r.hostMap))
  543. for addr := range r.hostMap {
  544. hosts = append(hosts, addr)
  545. }
  546. r.hp.SetHosts(hosts)
  547. }
  548. func (r *hostPoolHostPolicy) RemoveHost(host *HostInfo) {
  549. ip := host.ConnectAddress().String()
  550. r.mu.Lock()
  551. defer r.mu.Unlock()
  552. if _, ok := r.hostMap[ip]; !ok {
  553. return
  554. }
  555. delete(r.hostMap, ip)
  556. hosts := make([]string, 0, len(r.hostMap))
  557. for _, host := range r.hostMap {
  558. hosts = append(hosts, host.ConnectAddress().String())
  559. }
  560. r.hp.SetHosts(hosts)
  561. }
  562. func (r *hostPoolHostPolicy) HostUp(host *HostInfo) {
  563. r.AddHost(host)
  564. }
  565. func (r *hostPoolHostPolicy) HostDown(host *HostInfo) {
  566. r.RemoveHost(host)
  567. }
  568. func (r *hostPoolHostPolicy) Pick(qry ExecutableQuery) NextHost {
  569. return func() SelectedHost {
  570. r.mu.RLock()
  571. defer r.mu.RUnlock()
  572. if len(r.hostMap) == 0 {
  573. return nil
  574. }
  575. hostR := r.hp.Get()
  576. host, ok := r.hostMap[hostR.Host()]
  577. if !ok {
  578. return nil
  579. }
  580. return selectedHostPoolHost{
  581. policy: r,
  582. info: host,
  583. hostR: hostR,
  584. }
  585. }
  586. }
  587. // selectedHostPoolHost is a host returned by the hostPoolHostPolicy and
  588. // implements the SelectedHost interface
  589. type selectedHostPoolHost struct {
  590. policy *hostPoolHostPolicy
  591. info *HostInfo
  592. hostR hostpool.HostPoolResponse
  593. }
  594. func (host selectedHostPoolHost) Info() *HostInfo {
  595. return host.info
  596. }
  597. func (host selectedHostPoolHost) Mark(err error) {
  598. ip := host.info.ConnectAddress().String()
  599. host.policy.mu.RLock()
  600. defer host.policy.mu.RUnlock()
  601. if _, ok := host.policy.hostMap[ip]; !ok {
  602. // host was removed between pick and mark
  603. return
  604. }
  605. host.hostR.Mark(err)
  606. }
  607. type dcAwareRR struct {
  608. local string
  609. pos uint32
  610. mu sync.RWMutex
  611. localHosts cowHostList
  612. remoteHosts cowHostList
  613. }
  614. // DCAwareRoundRobinPolicy is a host selection policies which will prioritize and
  615. // return hosts which are in the local datacentre before returning hosts in all
  616. // other datercentres
  617. func DCAwareRoundRobinPolicy(localDC string) HostSelectionPolicy {
  618. return &dcAwareRR{local: localDC}
  619. }
  620. func (d *dcAwareRR) Init(*Session) {}
  621. func (d *dcAwareRR) KeyspaceChanged(KeyspaceUpdateEvent) {}
  622. func (d *dcAwareRR) SetPartitioner(p string) {}
  623. func (d *dcAwareRR) IsLocal(host *HostInfo) bool {
  624. return host.DataCenter() == d.local
  625. }
  626. func (d *dcAwareRR) AddHost(host *HostInfo) {
  627. if host.DataCenter() == d.local {
  628. d.localHosts.add(host)
  629. } else {
  630. d.remoteHosts.add(host)
  631. }
  632. }
  633. func (d *dcAwareRR) RemoveHost(host *HostInfo) {
  634. if host.DataCenter() == d.local {
  635. d.localHosts.remove(host.ConnectAddress())
  636. } else {
  637. d.remoteHosts.remove(host.ConnectAddress())
  638. }
  639. }
  640. func (d *dcAwareRR) HostUp(host *HostInfo) { d.AddHost(host) }
  641. func (d *dcAwareRR) HostDown(host *HostInfo) { d.RemoveHost(host) }
  642. func (d *dcAwareRR) Pick(q ExecutableQuery) NextHost {
  643. var i int
  644. return func() SelectedHost {
  645. var hosts []*HostInfo
  646. localHosts := d.localHosts.get()
  647. remoteHosts := d.remoteHosts.get()
  648. if len(localHosts) != 0 {
  649. hosts = localHosts
  650. } else {
  651. hosts = remoteHosts
  652. }
  653. if len(hosts) == 0 {
  654. return nil
  655. }
  656. // always increment pos to evenly distribute traffic in case of
  657. // failures
  658. pos := atomic.AddUint32(&d.pos, 1) - 1
  659. if i >= len(localHosts)+len(remoteHosts) {
  660. return nil
  661. }
  662. host := hosts[(pos)%uint32(len(hosts))]
  663. i++
  664. return (*selectedHost)(host)
  665. }
  666. }
  667. // ConvictionPolicy interface is used by gocql to determine if a host should be
  668. // marked as DOWN based on the error and host info
  669. type ConvictionPolicy interface {
  670. // Implementations should return `true` if the host should be convicted, `false` otherwise.
  671. AddFailure(error error, host *HostInfo) bool
  672. //Implementations should clear out any convictions or state regarding the host.
  673. Reset(host *HostInfo)
  674. }
  675. // SimpleConvictionPolicy implements a ConvictionPolicy which convicts all hosts
  676. // regardless of error
  677. type SimpleConvictionPolicy struct {
  678. }
  679. func (e *SimpleConvictionPolicy) AddFailure(error error, host *HostInfo) bool {
  680. return true
  681. }
  682. func (e *SimpleConvictionPolicy) Reset(host *HostInfo) {}
  683. // ReconnectionPolicy interface is used by gocql to determine if reconnection
  684. // can be attempted after connection error. The interface allows gocql users
  685. // to implement their own logic to determine how to attempt reconnection.
  686. //
  687. type ReconnectionPolicy interface {
  688. GetInterval(currentRetry int) time.Duration
  689. GetMaxRetries() int
  690. }
  691. // ConstantReconnectionPolicy has simple logic for returning a fixed reconnection interval.
  692. //
  693. // Examples of usage:
  694. //
  695. // cluster.ReconnectionPolicy = &gocql.ConstantReconnectionPolicy{MaxRetries: 10, Interval: 8 * time.Second}
  696. //
  697. type ConstantReconnectionPolicy struct {
  698. MaxRetries int
  699. Interval time.Duration
  700. }
  701. func (c *ConstantReconnectionPolicy) GetInterval(currentRetry int) time.Duration {
  702. return c.Interval
  703. }
  704. func (c *ConstantReconnectionPolicy) GetMaxRetries() int {
  705. return c.MaxRetries
  706. }
  707. // ExponentialReconnectionPolicy returns a growing reconnection interval.
  708. type ExponentialReconnectionPolicy struct {
  709. MaxRetries int
  710. InitialInterval time.Duration
  711. }
  712. func (e *ExponentialReconnectionPolicy) GetInterval(currentRetry int) time.Duration {
  713. return getExponentialTime(e.InitialInterval, math.MaxInt16*time.Second, e.GetMaxRetries())
  714. }
  715. func (e *ExponentialReconnectionPolicy) GetMaxRetries() int {
  716. return e.MaxRetries
  717. }