topology.go 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. package gocql
  2. import (
  3. "fmt"
  4. "strconv"
  5. "strings"
  6. )
  7. type placementStrategy interface {
  8. replicaMap(hosts []*HostInfo, tokens []hostToken) map[token][]*HostInfo
  9. replicationFactor(dc string) int
  10. }
  11. func getReplicationFactorFromOpts(keyspace string, val interface{}) int {
  12. // TODO: dont really want to panic here, but is better
  13. // than spamming
  14. switch v := val.(type) {
  15. case int:
  16. if v <= 0 {
  17. panic(fmt.Sprintf("invalid replication_factor %d. Is the %q keyspace configured correctly?", v, keyspace))
  18. }
  19. return v
  20. case string:
  21. n, err := strconv.Atoi(v)
  22. if err != nil {
  23. panic(fmt.Sprintf("invalid replication_factor. Is the %q keyspace configured correctly? %v", keyspace, err))
  24. } else if n <= 0 {
  25. panic(fmt.Sprintf("invalid replication_factor %d. Is the %q keyspace configured correctly?", n, keyspace))
  26. }
  27. return n
  28. default:
  29. panic(fmt.Sprintf("unkown replication_factor type %T", v))
  30. }
  31. }
  32. func getStrategy(ks *KeyspaceMetadata) placementStrategy {
  33. switch {
  34. case strings.Contains(ks.StrategyClass, "SimpleStrategy"):
  35. return &simpleStrategy{rf: getReplicationFactorFromOpts(ks.Name, ks.StrategyOptions["replication_factor"])}
  36. case strings.Contains(ks.StrategyClass, "NetworkTopologyStrategy"):
  37. dcs := make(map[string]int)
  38. for dc, rf := range ks.StrategyOptions {
  39. if dc == "class" {
  40. continue
  41. }
  42. dcs[dc] = getReplicationFactorFromOpts(ks.Name+":dc="+dc, rf)
  43. }
  44. return &networkTopology{dcs: dcs}
  45. default:
  46. // TODO: handle unknown replicas and just return the primary host for a token
  47. panic(fmt.Sprintf("unsupported strategy class: %v", ks.StrategyClass))
  48. }
  49. }
  50. type simpleStrategy struct {
  51. rf int
  52. }
  53. func (s *simpleStrategy) replicationFactor(dc string) int {
  54. return s.rf
  55. }
  56. func (s *simpleStrategy) replicaMap(_ []*HostInfo, tokens []hostToken) map[token][]*HostInfo {
  57. tokenRing := make(map[token][]*HostInfo, len(tokens))
  58. for i, th := range tokens {
  59. replicas := make([]*HostInfo, 0, s.rf)
  60. for j := 0; j < len(tokens) && len(replicas) < s.rf; j++ {
  61. // TODO: need to ensure we dont add the same hosts twice
  62. h := tokens[(i+j)%len(tokens)]
  63. replicas = append(replicas, h.host)
  64. }
  65. tokenRing[th.token] = replicas
  66. }
  67. return tokenRing
  68. }
  69. type networkTopology struct {
  70. dcs map[string]int
  71. }
  72. func (n *networkTopology) replicationFactor(dc string) int {
  73. return n.dcs[dc]
  74. }
  75. func (n *networkTopology) haveRF(replicaCounts map[string]int) bool {
  76. if len(replicaCounts) != len(n.dcs) {
  77. return false
  78. }
  79. for dc, rf := range n.dcs {
  80. if rf != replicaCounts[dc] {
  81. return false
  82. }
  83. }
  84. return true
  85. }
  86. func (n *networkTopology) replicaMap(hosts []*HostInfo, tokens []hostToken) map[token][]*HostInfo {
  87. dcRacks := make(map[string]map[string]struct{})
  88. for _, h := range hosts {
  89. dc := h.DataCenter()
  90. rack := h.Rack()
  91. racks, ok := dcRacks[dc]
  92. if !ok {
  93. racks = make(map[string]struct{})
  94. dcRacks[dc] = racks
  95. }
  96. racks[rack] = struct{}{}
  97. }
  98. tokenRing := make(map[token][]*HostInfo, len(tokens))
  99. var totalRF int
  100. for _, rf := range n.dcs {
  101. totalRF += rf
  102. }
  103. for i, th := range tokens {
  104. // number of replicas per dc
  105. // TODO: recycle these
  106. replicasInDC := make(map[string]int, len(n.dcs))
  107. // dc -> racks
  108. seenDCRacks := make(map[string]map[string]struct{}, len(n.dcs))
  109. // skipped hosts in a dc
  110. skipped := make(map[string][]*HostInfo, len(n.dcs))
  111. replicas := make([]*HostInfo, 0, totalRF)
  112. for j := 0; j < len(tokens) && !n.haveRF(replicasInDC); j++ {
  113. // TODO: ensure we dont add the same host twice
  114. h := tokens[(i+j)%len(tokens)].host
  115. dc := h.DataCenter()
  116. rack := h.Rack()
  117. rf, ok := n.dcs[dc]
  118. if !ok {
  119. // skip this DC, dont know about it
  120. continue
  121. } else if replicasInDC[dc] >= rf {
  122. if replicasInDC[dc] > rf {
  123. panic(fmt.Sprintf("replica overflow. rf=%d have=%d in dc %q", rf, replicasInDC[dc], dc))
  124. }
  125. // have enough replicas in this DC
  126. continue
  127. } else if _, ok := dcRacks[dc][rack]; !ok {
  128. // dont know about this rack
  129. continue
  130. } else if len(replicas) >= totalRF {
  131. if replicasInDC[dc] > rf {
  132. panic(fmt.Sprintf("replica overflow. total rf=%d have=%d", totalRF, len(replicas)))
  133. }
  134. // we now have enough replicas
  135. break
  136. }
  137. racks := seenDCRacks[dc]
  138. if _, ok := racks[rack]; ok && len(racks) == len(dcRacks[dc]) {
  139. // we have been through all the racks and dont have RF yet, add this
  140. replicas = append(replicas, h)
  141. replicasInDC[dc]++
  142. } else if !ok {
  143. if racks == nil {
  144. racks = make(map[string]struct{}, 1)
  145. seenDCRacks[dc] = racks
  146. }
  147. // new rack
  148. racks[rack] = struct{}{}
  149. replicas = append(replicas, h)
  150. replicasInDC[dc]++
  151. if len(racks) == len(dcRacks[dc]) {
  152. // if we have been through all the racks, drain the rest of the skipped
  153. // hosts until we have RF. The next iteration will skip in the block
  154. // above
  155. skippedHosts := skipped[dc]
  156. var k int
  157. for ; k < len(skippedHosts) && replicasInDC[dc] < rf; k++ {
  158. sh := skippedHosts[k]
  159. replicas = append(replicas, sh)
  160. replicasInDC[dc]++
  161. }
  162. skipped[dc] = skippedHosts[k:]
  163. }
  164. } else {
  165. // already seen this rack, keep hold of this host incase
  166. // we dont get enough for rf
  167. skipped[dc] = append(skipped[dc], h)
  168. }
  169. }
  170. if len(replicas) == 0 || replicas[0] != th.host {
  171. panic("first replica is not the primary replica for the token")
  172. }
  173. tokenRing[th.token] = replicas
  174. }
  175. if len(tokenRing) != len(tokens) {
  176. panic(fmt.Sprintf("token map different size to token ring: got %d expected %d", len(tokenRing), len(tokens)))
  177. }
  178. return tokenRing
  179. }