topology.go 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. package gocql
  2. import (
  3. "fmt"
  4. "strconv"
  5. "strings"
  6. )
  7. type placementStrategy interface {
  8. replicaMap(hosts []*HostInfo, tokens []hostToken) map[token][]*HostInfo
  9. replicationFactor(dc string) int
  10. }
  11. func getReplicationFactorFromOpts(keyspace string, val interface{}) int {
  12. // TODO: dont really want to panic here, but is better
  13. // than spamming
  14. switch v := val.(type) {
  15. case int:
  16. if v <= 0 {
  17. panic(fmt.Sprintf("invalid replication_factor %d. Is the %q keyspace configured correctly?", v, keyspace))
  18. }
  19. return v
  20. case string:
  21. n, err := strconv.Atoi(v)
  22. if err != nil {
  23. panic(fmt.Sprintf("invalid replication_factor. Is the %q keyspace configured correctly? %v", keyspace, err))
  24. } else if n <= 0 {
  25. panic(fmt.Sprintf("invalid replication_factor %d. Is the %q keyspace configured correctly?", n, keyspace))
  26. }
  27. return n
  28. default:
  29. panic(fmt.Sprintf("unkown replication_factor type %T", v))
  30. }
  31. }
  32. func getStrategy(ks *KeyspaceMetadata) placementStrategy {
  33. switch {
  34. case strings.Contains(ks.StrategyClass, "SimpleStrategy"):
  35. return &simpleStrategy{rf: getReplicationFactorFromOpts(ks.Name, ks.StrategyOptions["replication_factor"])}
  36. case strings.Contains(ks.StrategyClass, "NetworkTopologyStrategy"):
  37. dcs := make(map[string]int)
  38. for dc, rf := range ks.StrategyOptions {
  39. dcs[dc] = getReplicationFactorFromOpts(ks.Name+":dc="+dc, rf)
  40. }
  41. return &networkTopology{dcs: dcs}
  42. default:
  43. // TODO: handle unknown replicas and just return the primary host for a token
  44. panic(fmt.Sprintf("unsupported strategy class: %v", ks.StrategyClass))
  45. }
  46. }
  47. type simpleStrategy struct {
  48. rf int
  49. }
  50. func (s *simpleStrategy) replicationFactor(dc string) int {
  51. return s.rf
  52. }
  53. func (s *simpleStrategy) replicaMap(_ []*HostInfo, tokens []hostToken) map[token][]*HostInfo {
  54. tokenRing := make(map[token][]*HostInfo, len(tokens))
  55. for i, th := range tokens {
  56. replicas := make([]*HostInfo, 0, s.rf)
  57. for j := 0; j < len(tokens) && len(replicas) < s.rf; j++ {
  58. // TODO: need to ensure we dont add the same hosts twice
  59. h := tokens[(i+j)%len(tokens)]
  60. replicas = append(replicas, h.host)
  61. }
  62. tokenRing[th.token] = replicas
  63. }
  64. return tokenRing
  65. }
  66. type networkTopology struct {
  67. dcs map[string]int
  68. }
  69. func (n *networkTopology) replicationFactor(dc string) int {
  70. return n.dcs[dc]
  71. }
  72. func (n *networkTopology) haveRF(replicaCounts map[string]int) bool {
  73. if len(replicaCounts) != len(n.dcs) {
  74. return false
  75. }
  76. for dc, rf := range n.dcs {
  77. if rf != replicaCounts[dc] {
  78. return false
  79. }
  80. }
  81. return true
  82. }
  83. func (n *networkTopology) replicaMap(hosts []*HostInfo, tokens []hostToken) map[token][]*HostInfo {
  84. dcRacks := make(map[string]map[string]struct{})
  85. for _, h := range hosts {
  86. dc := h.DataCenter()
  87. rack := h.Rack()
  88. racks, ok := dcRacks[dc]
  89. if !ok {
  90. racks = make(map[string]struct{})
  91. dcRacks[dc] = racks
  92. }
  93. racks[rack] = struct{}{}
  94. }
  95. tokenRing := make(map[token][]*HostInfo, len(tokens))
  96. var totalRF int
  97. for _, rf := range n.dcs {
  98. totalRF += rf
  99. }
  100. for i, th := range tokens {
  101. // number of replicas per dc
  102. // TODO: recycle these
  103. replicasInDC := make(map[string]int, len(n.dcs))
  104. // dc -> racks
  105. seenDCRacks := make(map[string]map[string]struct{}, len(n.dcs))
  106. // skipped hosts in a dc
  107. skipped := make(map[string][]*HostInfo, len(n.dcs))
  108. replicas := make([]*HostInfo, 0, totalRF)
  109. for j := 0; j < len(tokens) && !n.haveRF(replicasInDC); j++ {
  110. // TODO: ensure we dont add the same host twice
  111. h := tokens[(i+j)%len(tokens)].host
  112. dc := h.DataCenter()
  113. rack := h.Rack()
  114. rf, ok := n.dcs[dc]
  115. if !ok {
  116. // skip this DC, dont know about it
  117. continue
  118. } else if replicasInDC[dc] >= rf {
  119. if replicasInDC[dc] > rf {
  120. panic(fmt.Sprintf("replica overflow. rf=%d have=%d in dc %q", rf, replicasInDC[dc], dc))
  121. }
  122. // have enough replicas in this DC
  123. continue
  124. } else if _, ok := dcRacks[dc][rack]; !ok {
  125. // dont know about this rack
  126. continue
  127. } else if len(replicas) >= totalRF {
  128. if replicasInDC[dc] > rf {
  129. panic(fmt.Sprintf("replica overflow. total rf=%d have=%d", totalRF, len(replicas)))
  130. }
  131. // we now have enough replicas
  132. break
  133. }
  134. racks := seenDCRacks[dc]
  135. if _, ok := racks[rack]; ok && len(racks) == len(dcRacks[dc]) {
  136. // we have been through all the racks and dont have RF yet, add this
  137. replicas = append(replicas, h)
  138. replicasInDC[dc]++
  139. } else if !ok {
  140. if racks == nil {
  141. racks = make(map[string]struct{}, 1)
  142. seenDCRacks[dc] = racks
  143. }
  144. // new rack
  145. racks[rack] = struct{}{}
  146. replicas = append(replicas, h)
  147. replicasInDC[dc]++
  148. if len(racks) == len(dcRacks[dc]) {
  149. // if we have been through all the racks, drain the rest of the skipped
  150. // hosts until we have RF. The next iteration will skip in the block
  151. // above
  152. skippedHosts := skipped[dc]
  153. var k int
  154. for ; k < len(skippedHosts) && replicasInDC[dc] < rf; k++ {
  155. sh := skippedHosts[k]
  156. replicas = append(replicas, sh)
  157. replicasInDC[dc]++
  158. }
  159. skipped[dc] = skippedHosts[k:]
  160. }
  161. } else {
  162. // already seen this rack, keep hold of this host incase
  163. // we dont get enough for rf
  164. skipped[dc] = append(skipped[dc], h)
  165. }
  166. }
  167. if len(replicas) == 0 || replicas[0] != th.host {
  168. panic("first replica is not the primary replica for the token")
  169. }
  170. tokenRing[th.token] = replicas
  171. }
  172. if len(tokenRing) != len(tokens) {
  173. panic(fmt.Sprintf("token map different size to token ring: got %d expected %d", len(tokenRing), len(tokens)))
  174. }
  175. return tokenRing
  176. }