policies.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865
  1. // Copyright (c) 2012 The gocql Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. //This file will be the future home for more policies
  5. package gocql
  6. import (
  7. "context"
  8. "fmt"
  9. "math"
  10. "math/rand"
  11. "net"
  12. "sync"
  13. "sync/atomic"
  14. "time"
  15. "github.com/hailocab/go-hostpool"
  16. )
  17. // cowHostList implements a copy on write host list, its equivalent type is []*HostInfo
  18. type cowHostList struct {
  19. list atomic.Value
  20. mu sync.Mutex
  21. }
  22. func (c *cowHostList) String() string {
  23. return fmt.Sprintf("%+v", c.get())
  24. }
  25. func (c *cowHostList) get() []*HostInfo {
  26. // TODO(zariel): should we replace this with []*HostInfo?
  27. l, ok := c.list.Load().(*[]*HostInfo)
  28. if !ok {
  29. return nil
  30. }
  31. return *l
  32. }
  33. func (c *cowHostList) set(list []*HostInfo) {
  34. c.mu.Lock()
  35. c.list.Store(&list)
  36. c.mu.Unlock()
  37. }
  38. // add will add a host if it not already in the list
  39. func (c *cowHostList) add(host *HostInfo) bool {
  40. c.mu.Lock()
  41. l := c.get()
  42. if n := len(l); n == 0 {
  43. l = []*HostInfo{host}
  44. } else {
  45. newL := make([]*HostInfo, n+1)
  46. for i := 0; i < n; i++ {
  47. if host.Equal(l[i]) {
  48. c.mu.Unlock()
  49. return false
  50. }
  51. newL[i] = l[i]
  52. }
  53. newL[n] = host
  54. l = newL
  55. }
  56. c.list.Store(&l)
  57. c.mu.Unlock()
  58. return true
  59. }
  60. func (c *cowHostList) update(host *HostInfo) {
  61. c.mu.Lock()
  62. l := c.get()
  63. if len(l) == 0 {
  64. c.mu.Unlock()
  65. return
  66. }
  67. found := false
  68. newL := make([]*HostInfo, len(l))
  69. for i := range l {
  70. if host.Equal(l[i]) {
  71. newL[i] = host
  72. found = true
  73. } else {
  74. newL[i] = l[i]
  75. }
  76. }
  77. if found {
  78. c.list.Store(&newL)
  79. }
  80. c.mu.Unlock()
  81. }
  82. func (c *cowHostList) remove(ip net.IP) bool {
  83. c.mu.Lock()
  84. l := c.get()
  85. size := len(l)
  86. if size == 0 {
  87. c.mu.Unlock()
  88. return false
  89. }
  90. found := false
  91. newL := make([]*HostInfo, 0, size)
  92. for i := 0; i < len(l); i++ {
  93. if !l[i].ConnectAddress().Equal(ip) {
  94. newL = append(newL, l[i])
  95. } else {
  96. found = true
  97. }
  98. }
  99. if !found {
  100. c.mu.Unlock()
  101. return false
  102. }
  103. newL = newL[: size-1 : size-1]
  104. c.list.Store(&newL)
  105. c.mu.Unlock()
  106. return true
  107. }
  108. // RetryableQuery is an interface that represents a query or batch statement that
  109. // exposes the correct functions for the retry policy logic to evaluate correctly.
  110. type RetryableQuery interface {
  111. Attempts() int
  112. SetConsistency(c Consistency)
  113. GetConsistency() Consistency
  114. Context() context.Context
  115. }
  116. type RetryType uint16
  117. const (
  118. Retry RetryType = 0x00 // retry on same connection
  119. RetryNextHost RetryType = 0x01 // retry on another connection
  120. Ignore RetryType = 0x02 // ignore error and return result
  121. Rethrow RetryType = 0x03 // raise error and stop retrying
  122. )
  123. // RetryPolicy interface is used by gocql to determine if a query can be attempted
  124. // again after a retryable error has been received. The interface allows gocql
  125. // users to implement their own logic to determine if a query can be attempted
  126. // again.
  127. //
  128. // See SimpleRetryPolicy as an example of implementing and using a RetryPolicy
  129. // interface.
  130. type RetryPolicy interface {
  131. Attempt(RetryableQuery) bool
  132. GetRetryType(error) RetryType
  133. }
  134. // RetryPolicyWithAttemptTimeout is an optional interface retry policies can implement
  135. // in order to control the duration to use before a query attempt is considered
  136. // as a timeout and will potentially be retried.
  137. // It's not part of the RetryPolicy interface to remain backwards compatible.
  138. type RetryPolicyWithAttemptTimeout interface {
  139. AttemptTimeout() time.Duration
  140. RetryPolicy
  141. }
  142. // SimpleRetryPolicy has simple logic for attempting a query a fixed number of times.
  143. //
  144. // See below for examples of usage:
  145. //
  146. // //Assign to the cluster
  147. // cluster.RetryPolicy = &gocql.SimpleRetryPolicy{NumRetries: 3}
  148. //
  149. // //Assign to a query
  150. // query.RetryPolicy(&gocql.SimpleRetryPolicy{NumRetries: 1})
  151. //
  152. type SimpleRetryPolicy struct {
  153. NumRetries int //Number of times to retry a query
  154. }
  155. // Attempt tells gocql to attempt the query again based on query.Attempts being less
  156. // than the NumRetries defined in the policy.
  157. func (s *SimpleRetryPolicy) Attempt(q RetryableQuery) bool {
  158. return q.Attempts() <= s.NumRetries
  159. }
  160. func (s *SimpleRetryPolicy) GetRetryType(err error) RetryType {
  161. return RetryNextHost
  162. }
  163. // ExponentialBackoffRetryPolicy sleeps between attempts
  164. type ExponentialBackoffRetryPolicy struct {
  165. NumRetries int
  166. Min, Max time.Duration
  167. }
  168. func (e *ExponentialBackoffRetryPolicy) Attempt(q RetryableQuery) bool {
  169. if q.Attempts() > e.NumRetries {
  170. return false
  171. }
  172. time.Sleep(e.napTime(q.Attempts()))
  173. return true
  174. }
  175. // used to calculate exponentially growing time
  176. func getExponentialTime(min time.Duration, max time.Duration, attempts int) time.Duration {
  177. if min <= 0 {
  178. min = 100 * time.Millisecond
  179. }
  180. if max <= 0 {
  181. max = 10 * time.Second
  182. }
  183. minFloat := float64(min)
  184. napDuration := minFloat * math.Pow(2, float64(attempts-1))
  185. // add some jitter
  186. napDuration += rand.Float64()*minFloat - (minFloat / 2)
  187. if napDuration > float64(max) {
  188. return time.Duration(max)
  189. }
  190. return time.Duration(napDuration)
  191. }
  192. func (e *ExponentialBackoffRetryPolicy) GetRetryType(err error) RetryType {
  193. return RetryNextHost
  194. }
  195. // DowngradingConsistencyRetryPolicy: Next retry will be with the next consistency level
  196. // provided in the slice
  197. //
  198. // On a read timeout: the operation is retried with the next provided consistency
  199. // level.
  200. //
  201. // On a write timeout: if the operation is an :attr:`~.UNLOGGED_BATCH`
  202. // and at least one replica acknowledged the write, the operation is
  203. // retried with the next consistency level. Furthermore, for other
  204. // write types, if at least one replica acknowledged the write, the
  205. // timeout is ignored.
  206. //
  207. // On an unavailable exception: if at least one replica is alive, the
  208. // operation is retried with the next provided consistency level.
  209. type DowngradingConsistencyRetryPolicy struct {
  210. ConsistencyLevelsToTry []Consistency
  211. }
  212. func (d *DowngradingConsistencyRetryPolicy) Attempt(q RetryableQuery) bool {
  213. currentAttempt := q.Attempts()
  214. if currentAttempt > len(d.ConsistencyLevelsToTry) {
  215. return false
  216. } else if currentAttempt > 0 {
  217. q.SetConsistency(d.ConsistencyLevelsToTry[currentAttempt-1])
  218. if gocqlDebug {
  219. Logger.Printf("%T: set consistency to %q\n",
  220. d,
  221. d.ConsistencyLevelsToTry[currentAttempt-1])
  222. }
  223. }
  224. return true
  225. }
  226. func (d *DowngradingConsistencyRetryPolicy) GetRetryType(err error) RetryType {
  227. switch t := err.(type) {
  228. case *RequestErrUnavailable:
  229. if t.Alive > 0 {
  230. return Retry
  231. }
  232. return Rethrow
  233. case *RequestErrWriteTimeout:
  234. if t.WriteType == "SIMPLE" || t.WriteType == "BATCH" || t.WriteType == "COUNTER" {
  235. if t.Received > 0 {
  236. return Ignore
  237. }
  238. return Rethrow
  239. }
  240. if t.WriteType == "UNLOGGED_BATCH" {
  241. return Retry
  242. }
  243. return Rethrow
  244. case *RequestErrReadTimeout:
  245. return Retry
  246. default:
  247. return RetryNextHost
  248. }
  249. }
  250. func (e *ExponentialBackoffRetryPolicy) napTime(attempts int) time.Duration {
  251. return getExponentialTime(e.Min, e.Max, attempts)
  252. }
  253. type HostStateNotifier interface {
  254. AddHost(host *HostInfo)
  255. RemoveHost(host *HostInfo)
  256. HostUp(host *HostInfo)
  257. HostDown(host *HostInfo)
  258. }
  259. type KeyspaceUpdateEvent struct {
  260. Keyspace string
  261. Change string
  262. }
  263. // HostSelectionPolicy is an interface for selecting
  264. // the most appropriate host to execute a given query.
  265. type HostSelectionPolicy interface {
  266. HostStateNotifier
  267. SetPartitioner
  268. KeyspaceChanged(KeyspaceUpdateEvent)
  269. Init(*Session)
  270. IsLocal(host *HostInfo) bool
  271. //Pick returns an iteration function over selected hosts
  272. Pick(ExecutableQuery) NextHost
  273. }
  274. // SelectedHost is an interface returned when picking a host from a host
  275. // selection policy.
  276. type SelectedHost interface {
  277. Info() *HostInfo
  278. Mark(error)
  279. }
  280. type selectedHost HostInfo
  281. func (host *selectedHost) Info() *HostInfo {
  282. return (*HostInfo)(host)
  283. }
  284. func (host *selectedHost) Mark(err error) {}
  285. // NextHost is an iteration function over picked hosts
  286. type NextHost func() SelectedHost
  287. // RoundRobinHostPolicy is a round-robin load balancing policy, where each host
  288. // is tried sequentially for each query.
  289. func RoundRobinHostPolicy() HostSelectionPolicy {
  290. return &roundRobinHostPolicy{}
  291. }
  292. type roundRobinHostPolicy struct {
  293. hosts cowHostList
  294. pos uint32
  295. mu sync.RWMutex
  296. }
  297. func (r *roundRobinHostPolicy) IsLocal(*HostInfo) bool { return true }
  298. func (r *roundRobinHostPolicy) KeyspaceChanged(KeyspaceUpdateEvent) {}
  299. func (r *roundRobinHostPolicy) SetPartitioner(partitioner string) {}
  300. func (r *roundRobinHostPolicy) Init(*Session) {}
  301. func (r *roundRobinHostPolicy) Pick(qry ExecutableQuery) NextHost {
  302. // i is used to limit the number of attempts to find a host
  303. // to the number of hosts known to this policy
  304. var i int
  305. return func() SelectedHost {
  306. hosts := r.hosts.get()
  307. if len(hosts) == 0 {
  308. return nil
  309. }
  310. // always increment pos to evenly distribute traffic in case of
  311. // failures
  312. pos := atomic.AddUint32(&r.pos, 1) - 1
  313. if i >= len(hosts) {
  314. return nil
  315. }
  316. host := hosts[(pos)%uint32(len(hosts))]
  317. i++
  318. return (*selectedHost)(host)
  319. }
  320. }
  321. func (r *roundRobinHostPolicy) AddHost(host *HostInfo) {
  322. r.hosts.add(host)
  323. }
  324. func (r *roundRobinHostPolicy) RemoveHost(host *HostInfo) {
  325. r.hosts.remove(host.ConnectAddress())
  326. }
  327. func (r *roundRobinHostPolicy) HostUp(host *HostInfo) {
  328. r.AddHost(host)
  329. }
  330. func (r *roundRobinHostPolicy) HostDown(host *HostInfo) {
  331. r.RemoveHost(host)
  332. }
  333. func ShuffleReplicas() func(*tokenAwareHostPolicy) {
  334. return func(t *tokenAwareHostPolicy) {
  335. t.shuffleReplicas = true
  336. }
  337. }
  338. // TokenAwareHostPolicy is a token aware host selection policy, where hosts are
  339. // selected based on the partition key, so queries are sent to the host which
  340. // owns the partition. Fallback is used when routing information is not available.
  341. func TokenAwareHostPolicy(fallback HostSelectionPolicy, opts ...func(*tokenAwareHostPolicy)) HostSelectionPolicy {
  342. p := &tokenAwareHostPolicy{fallback: fallback}
  343. for _, opt := range opts {
  344. opt(p)
  345. }
  346. return p
  347. }
  348. type keyspaceMeta struct {
  349. replicas map[string]map[token][]*HostInfo
  350. }
  351. type tokenAwareHostPolicy struct {
  352. hosts cowHostList
  353. mu sync.RWMutex
  354. partitioner string
  355. fallback HostSelectionPolicy
  356. session *Session
  357. tokenRing atomic.Value // *tokenRing
  358. keyspaces atomic.Value // *keyspaceMeta
  359. shuffleReplicas bool
  360. }
  361. func (t *tokenAwareHostPolicy) Init(s *Session) {
  362. t.session = s
  363. }
  364. func (t *tokenAwareHostPolicy) IsLocal(host *HostInfo) bool {
  365. return t.fallback.IsLocal(host)
  366. }
  367. func (t *tokenAwareHostPolicy) KeyspaceChanged(update KeyspaceUpdateEvent) {
  368. meta, _ := t.keyspaces.Load().(*keyspaceMeta)
  369. var size = 1
  370. if meta != nil {
  371. size = len(meta.replicas)
  372. }
  373. newMeta := &keyspaceMeta{
  374. replicas: make(map[string]map[token][]*HostInfo, size),
  375. }
  376. ks, err := t.session.KeyspaceMetadata(update.Keyspace)
  377. if err == nil {
  378. strat := getStrategy(ks)
  379. tr := t.tokenRing.Load().(*tokenRing)
  380. if tr != nil {
  381. newMeta.replicas[update.Keyspace] = strat.replicaMap(t.hosts.get(), tr.tokens)
  382. }
  383. }
  384. if meta != nil {
  385. for ks, replicas := range meta.replicas {
  386. if ks != update.Keyspace {
  387. newMeta.replicas[ks] = replicas
  388. }
  389. }
  390. }
  391. t.keyspaces.Store(newMeta)
  392. }
  393. func (t *tokenAwareHostPolicy) SetPartitioner(partitioner string) {
  394. t.mu.Lock()
  395. defer t.mu.Unlock()
  396. if t.partitioner != partitioner {
  397. t.fallback.SetPartitioner(partitioner)
  398. t.partitioner = partitioner
  399. t.resetTokenRing(partitioner)
  400. }
  401. }
  402. func (t *tokenAwareHostPolicy) AddHost(host *HostInfo) {
  403. t.hosts.add(host)
  404. t.fallback.AddHost(host)
  405. t.mu.RLock()
  406. partitioner := t.partitioner
  407. t.mu.RUnlock()
  408. t.resetTokenRing(partitioner)
  409. }
  410. func (t *tokenAwareHostPolicy) RemoveHost(host *HostInfo) {
  411. t.hosts.remove(host.ConnectAddress())
  412. t.fallback.RemoveHost(host)
  413. t.mu.RLock()
  414. partitioner := t.partitioner
  415. t.mu.RUnlock()
  416. t.resetTokenRing(partitioner)
  417. }
  418. func (t *tokenAwareHostPolicy) HostUp(host *HostInfo) {
  419. // TODO: need to avoid doing all the work on AddHost on hostup/down
  420. // because it now expensive to calculate the replica map for each
  421. // token
  422. t.AddHost(host)
  423. }
  424. func (t *tokenAwareHostPolicy) HostDown(host *HostInfo) {
  425. t.RemoveHost(host)
  426. }
  427. func (t *tokenAwareHostPolicy) resetTokenRing(partitioner string) {
  428. if partitioner == "" {
  429. // partitioner not yet set
  430. return
  431. }
  432. // create a new token ring
  433. hosts := t.hosts.get()
  434. tokenRing, err := newTokenRing(partitioner, hosts)
  435. if err != nil {
  436. Logger.Printf("Unable to update the token ring due to error: %s", err)
  437. return
  438. }
  439. // replace the token ring
  440. t.tokenRing.Store(tokenRing)
  441. }
  442. func (t *tokenAwareHostPolicy) getReplicas(keyspace string, token token) ([]*HostInfo, bool) {
  443. meta, _ := t.keyspaces.Load().(*keyspaceMeta)
  444. if meta == nil {
  445. return nil, false
  446. }
  447. tokens, ok := meta.replicas[keyspace][token]
  448. return tokens, ok
  449. }
  450. func (t *tokenAwareHostPolicy) Pick(qry ExecutableQuery) NextHost {
  451. if qry == nil {
  452. return t.fallback.Pick(qry)
  453. }
  454. routingKey, err := qry.GetRoutingKey()
  455. if err != nil {
  456. return t.fallback.Pick(qry)
  457. } else if routingKey == nil {
  458. return t.fallback.Pick(qry)
  459. }
  460. tr, _ := t.tokenRing.Load().(*tokenRing)
  461. if tr == nil {
  462. return t.fallback.Pick(qry)
  463. }
  464. token := tr.partitioner.Hash(routingKey)
  465. primaryEndpoint := tr.GetHostForToken(token)
  466. if primaryEndpoint == nil || token == nil {
  467. return t.fallback.Pick(qry)
  468. }
  469. replicas, ok := t.getReplicas(qry.Keyspace(), token)
  470. if !ok {
  471. replicas = []*HostInfo{primaryEndpoint}
  472. } else if t.shuffleReplicas {
  473. replicas = shuffleHosts(replicas)
  474. }
  475. var (
  476. fallbackIter NextHost
  477. i int
  478. )
  479. used := make(map[*HostInfo]bool, len(replicas))
  480. return func() SelectedHost {
  481. for i < len(replicas) {
  482. h := replicas[i]
  483. i++
  484. if h.IsUp() && t.fallback.IsLocal(h) {
  485. used[h] = true
  486. return (*selectedHost)(h)
  487. }
  488. }
  489. if fallbackIter == nil {
  490. // fallback
  491. fallbackIter = t.fallback.Pick(qry)
  492. }
  493. // filter the token aware selected hosts from the fallback hosts
  494. for fallbackHost := fallbackIter(); fallbackHost != nil; fallbackHost = fallbackIter() {
  495. if !used[fallbackHost.Info()] {
  496. return fallbackHost
  497. }
  498. }
  499. return nil
  500. }
  501. }
  502. // HostPoolHostPolicy is a host policy which uses the bitly/go-hostpool library
  503. // to distribute queries between hosts and prevent sending queries to
  504. // unresponsive hosts. When creating the host pool that is passed to the policy
  505. // use an empty slice of hosts as the hostpool will be populated later by gocql.
  506. // See below for examples of usage:
  507. //
  508. // // Create host selection policy using a simple host pool
  509. // cluster.PoolConfig.HostSelectionPolicy = HostPoolHostPolicy(hostpool.New(nil))
  510. //
  511. // // Create host selection policy using an epsilon greedy pool
  512. // cluster.PoolConfig.HostSelectionPolicy = HostPoolHostPolicy(
  513. // hostpool.NewEpsilonGreedy(nil, 0, &hostpool.LinearEpsilonValueCalculator{}),
  514. // )
  515. //
  516. func HostPoolHostPolicy(hp hostpool.HostPool) HostSelectionPolicy {
  517. return &hostPoolHostPolicy{hostMap: map[string]*HostInfo{}, hp: hp}
  518. }
  519. type hostPoolHostPolicy struct {
  520. hp hostpool.HostPool
  521. mu sync.RWMutex
  522. hostMap map[string]*HostInfo
  523. }
  524. func (r *hostPoolHostPolicy) Init(*Session) {}
  525. func (r *hostPoolHostPolicy) KeyspaceChanged(KeyspaceUpdateEvent) {}
  526. func (r *hostPoolHostPolicy) SetPartitioner(string) {}
  527. func (r *hostPoolHostPolicy) IsLocal(*HostInfo) bool { return true }
  528. func (r *hostPoolHostPolicy) SetHosts(hosts []*HostInfo) {
  529. peers := make([]string, len(hosts))
  530. hostMap := make(map[string]*HostInfo, len(hosts))
  531. for i, host := range hosts {
  532. ip := host.ConnectAddress().String()
  533. peers[i] = ip
  534. hostMap[ip] = host
  535. }
  536. r.mu.Lock()
  537. r.hp.SetHosts(peers)
  538. r.hostMap = hostMap
  539. r.mu.Unlock()
  540. }
  541. func (r *hostPoolHostPolicy) AddHost(host *HostInfo) {
  542. ip := host.ConnectAddress().String()
  543. r.mu.Lock()
  544. defer r.mu.Unlock()
  545. // If the host addr is present and isn't nil return
  546. if h, ok := r.hostMap[ip]; ok && h != nil {
  547. return
  548. }
  549. // otherwise, add the host to the map
  550. r.hostMap[ip] = host
  551. // and construct a new peer list to give to the HostPool
  552. hosts := make([]string, 0, len(r.hostMap))
  553. for addr := range r.hostMap {
  554. hosts = append(hosts, addr)
  555. }
  556. r.hp.SetHosts(hosts)
  557. }
  558. func (r *hostPoolHostPolicy) RemoveHost(host *HostInfo) {
  559. ip := host.ConnectAddress().String()
  560. r.mu.Lock()
  561. defer r.mu.Unlock()
  562. if _, ok := r.hostMap[ip]; !ok {
  563. return
  564. }
  565. delete(r.hostMap, ip)
  566. hosts := make([]string, 0, len(r.hostMap))
  567. for _, host := range r.hostMap {
  568. hosts = append(hosts, host.ConnectAddress().String())
  569. }
  570. r.hp.SetHosts(hosts)
  571. }
  572. func (r *hostPoolHostPolicy) HostUp(host *HostInfo) {
  573. r.AddHost(host)
  574. }
  575. func (r *hostPoolHostPolicy) HostDown(host *HostInfo) {
  576. r.RemoveHost(host)
  577. }
  578. func (r *hostPoolHostPolicy) Pick(qry ExecutableQuery) NextHost {
  579. return func() SelectedHost {
  580. r.mu.RLock()
  581. defer r.mu.RUnlock()
  582. if len(r.hostMap) == 0 {
  583. return nil
  584. }
  585. hostR := r.hp.Get()
  586. host, ok := r.hostMap[hostR.Host()]
  587. if !ok {
  588. return nil
  589. }
  590. return selectedHostPoolHost{
  591. policy: r,
  592. info: host,
  593. hostR: hostR,
  594. }
  595. }
  596. }
  597. // selectedHostPoolHost is a host returned by the hostPoolHostPolicy and
  598. // implements the SelectedHost interface
  599. type selectedHostPoolHost struct {
  600. policy *hostPoolHostPolicy
  601. info *HostInfo
  602. hostR hostpool.HostPoolResponse
  603. }
  604. func (host selectedHostPoolHost) Info() *HostInfo {
  605. return host.info
  606. }
  607. func (host selectedHostPoolHost) Mark(err error) {
  608. ip := host.info.ConnectAddress().String()
  609. host.policy.mu.RLock()
  610. defer host.policy.mu.RUnlock()
  611. if _, ok := host.policy.hostMap[ip]; !ok {
  612. // host was removed between pick and mark
  613. return
  614. }
  615. host.hostR.Mark(err)
  616. }
  617. type dcAwareRR struct {
  618. local string
  619. pos uint32
  620. mu sync.RWMutex
  621. localHosts cowHostList
  622. remoteHosts cowHostList
  623. }
  624. // DCAwareRoundRobinPolicy is a host selection policies which will prioritize and
  625. // return hosts which are in the local datacentre before returning hosts in all
  626. // other datercentres
  627. func DCAwareRoundRobinPolicy(localDC string) HostSelectionPolicy {
  628. return &dcAwareRR{local: localDC}
  629. }
  630. func (d *dcAwareRR) Init(*Session) {}
  631. func (d *dcAwareRR) KeyspaceChanged(KeyspaceUpdateEvent) {}
  632. func (d *dcAwareRR) SetPartitioner(p string) {}
  633. func (d *dcAwareRR) IsLocal(host *HostInfo) bool {
  634. return host.DataCenter() == d.local
  635. }
  636. func (d *dcAwareRR) AddHost(host *HostInfo) {
  637. if host.DataCenter() == d.local {
  638. d.localHosts.add(host)
  639. } else {
  640. d.remoteHosts.add(host)
  641. }
  642. }
  643. func (d *dcAwareRR) RemoveHost(host *HostInfo) {
  644. if host.DataCenter() == d.local {
  645. d.localHosts.remove(host.ConnectAddress())
  646. } else {
  647. d.remoteHosts.remove(host.ConnectAddress())
  648. }
  649. }
  650. func (d *dcAwareRR) HostUp(host *HostInfo) { d.AddHost(host) }
  651. func (d *dcAwareRR) HostDown(host *HostInfo) { d.RemoveHost(host) }
  652. func (d *dcAwareRR) Pick(q ExecutableQuery) NextHost {
  653. var i int
  654. return func() SelectedHost {
  655. var hosts []*HostInfo
  656. localHosts := d.localHosts.get()
  657. remoteHosts := d.remoteHosts.get()
  658. if len(localHosts) != 0 {
  659. hosts = localHosts
  660. } else {
  661. hosts = remoteHosts
  662. }
  663. if len(hosts) == 0 {
  664. return nil
  665. }
  666. // always increment pos to evenly distribute traffic in case of
  667. // failures
  668. pos := atomic.AddUint32(&d.pos, 1) - 1
  669. if i >= len(localHosts)+len(remoteHosts) {
  670. return nil
  671. }
  672. host := hosts[(pos)%uint32(len(hosts))]
  673. i++
  674. return (*selectedHost)(host)
  675. }
  676. }
  677. // ConvictionPolicy interface is used by gocql to determine if a host should be
  678. // marked as DOWN based on the error and host info
  679. type ConvictionPolicy interface {
  680. // Implementations should return `true` if the host should be convicted, `false` otherwise.
  681. AddFailure(error error, host *HostInfo) bool
  682. //Implementations should clear out any convictions or state regarding the host.
  683. Reset(host *HostInfo)
  684. }
  685. // SimpleConvictionPolicy implements a ConvictionPolicy which convicts all hosts
  686. // regardless of error
  687. type SimpleConvictionPolicy struct {
  688. }
  689. func (e *SimpleConvictionPolicy) AddFailure(error error, host *HostInfo) bool {
  690. return true
  691. }
  692. func (e *SimpleConvictionPolicy) Reset(host *HostInfo) {}
  693. // ReconnectionPolicy interface is used by gocql to determine if reconnection
  694. // can be attempted after connection error. The interface allows gocql users
  695. // to implement their own logic to determine how to attempt reconnection.
  696. //
  697. type ReconnectionPolicy interface {
  698. GetInterval(currentRetry int) time.Duration
  699. GetMaxRetries() int
  700. }
  701. // ConstantReconnectionPolicy has simple logic for returning a fixed reconnection interval.
  702. //
  703. // Examples of usage:
  704. //
  705. // cluster.ReconnectionPolicy = &gocql.ConstantReconnectionPolicy{MaxRetries: 10, Interval: 8 * time.Second}
  706. //
  707. type ConstantReconnectionPolicy struct {
  708. MaxRetries int
  709. Interval time.Duration
  710. }
  711. func (c *ConstantReconnectionPolicy) GetInterval(currentRetry int) time.Duration {
  712. return c.Interval
  713. }
  714. func (c *ConstantReconnectionPolicy) GetMaxRetries() int {
  715. return c.MaxRetries
  716. }
  717. // ExponentialReconnectionPolicy returns a growing reconnection interval.
  718. type ExponentialReconnectionPolicy struct {
  719. MaxRetries int
  720. InitialInterval time.Duration
  721. }
  722. func (e *ExponentialReconnectionPolicy) GetInterval(currentRetry int) time.Duration {
  723. return getExponentialTime(e.InitialInterval, math.MaxInt16*time.Second, e.GetMaxRetries())
  724. }
  725. func (e *ExponentialReconnectionPolicy) GetMaxRetries() int {
  726. return e.MaxRetries
  727. }