connectionpool.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. // Copyright (c) 2012 The gocql Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package gocql
  5. import (
  6. "crypto/tls"
  7. "crypto/x509"
  8. "errors"
  9. "fmt"
  10. "io/ioutil"
  11. "log"
  12. "math/rand"
  13. "net"
  14. "sync"
  15. "time"
  16. )
  17. // interface to implement to receive the host information
  18. type SetHosts interface {
  19. SetHosts(hosts []HostInfo)
  20. }
  21. // interface to implement to receive the partitioner value
  22. type SetPartitioner interface {
  23. SetPartitioner(partitioner string)
  24. }
  25. func setupTLSConfig(sslOpts *SslOptions) (*tls.Config, error) {
  26. // ca cert is optional
  27. if sslOpts.CaPath != "" {
  28. if sslOpts.RootCAs == nil {
  29. sslOpts.RootCAs = x509.NewCertPool()
  30. }
  31. pem, err := ioutil.ReadFile(sslOpts.CaPath)
  32. if err != nil {
  33. return nil, fmt.Errorf("connectionpool: unable to open CA certs: %v", err)
  34. }
  35. if !sslOpts.RootCAs.AppendCertsFromPEM(pem) {
  36. return nil, errors.New("connectionpool: failed parsing or CA certs")
  37. }
  38. }
  39. if sslOpts.CertPath != "" || sslOpts.KeyPath != "" {
  40. mycert, err := tls.LoadX509KeyPair(sslOpts.CertPath, sslOpts.KeyPath)
  41. if err != nil {
  42. return nil, fmt.Errorf("connectionpool: unable to load X509 key pair: %v", err)
  43. }
  44. sslOpts.Certificates = append(sslOpts.Certificates, mycert)
  45. }
  46. sslOpts.InsecureSkipVerify = !sslOpts.EnableHostVerification
  47. return &sslOpts.Config, nil
  48. }
  49. type policyConnPool struct {
  50. session *Session
  51. port int
  52. numConns int
  53. connCfg *ConnConfig
  54. keyspace string
  55. mu sync.RWMutex
  56. hostPolicy HostSelectionPolicy
  57. connPolicy func() ConnSelectionPolicy
  58. hostConnPools map[string]*hostConnPool
  59. }
  60. func connConfig(session *Session) (*ConnConfig, error) {
  61. cfg := session.cfg
  62. var (
  63. err error
  64. tlsConfig *tls.Config
  65. )
  66. // TODO(zariel): move tls config setup into session init.
  67. if cfg.SslOpts != nil {
  68. tlsConfig, err = setupTLSConfig(cfg.SslOpts)
  69. if err != nil {
  70. return nil, err
  71. }
  72. }
  73. return &ConnConfig{
  74. ProtoVersion: cfg.ProtoVersion,
  75. CQLVersion: cfg.CQLVersion,
  76. Timeout: cfg.Timeout,
  77. NumStreams: cfg.NumStreams,
  78. Compressor: cfg.Compressor,
  79. Authenticator: cfg.Authenticator,
  80. Keepalive: cfg.SocketKeepalive,
  81. tlsConfig: tlsConfig,
  82. }, nil
  83. }
  84. func newPolicyConnPool(session *Session, hostPolicy HostSelectionPolicy,
  85. connPolicy func() ConnSelectionPolicy) (*policyConnPool, error) {
  86. connCfg, err := connConfig(session)
  87. if err != nil {
  88. return nil, err
  89. }
  90. // create the pool
  91. pool := &policyConnPool{
  92. session: session,
  93. port: session.cfg.Port,
  94. numConns: session.cfg.NumConns,
  95. connCfg: connCfg,
  96. keyspace: session.cfg.Keyspace,
  97. hostPolicy: hostPolicy,
  98. connPolicy: connPolicy,
  99. hostConnPools: map[string]*hostConnPool{},
  100. }
  101. // TODO(zariel): fetch this from session metadata.
  102. hosts := make([]HostInfo, len(session.cfg.Hosts))
  103. for i, hostAddr := range session.cfg.Hosts {
  104. hosts[i].Peer = hostAddr
  105. }
  106. pool.SetHosts(hosts)
  107. return pool, nil
  108. }
  109. func (p *policyConnPool) SetHosts(hosts []HostInfo) {
  110. p.mu.Lock()
  111. defer p.mu.Unlock()
  112. toRemove := make(map[string]struct{})
  113. for addr := range p.hostConnPools {
  114. toRemove[addr] = struct{}{}
  115. }
  116. // TODO connect to hosts in parallel, but wait for pools to be
  117. // created before returning
  118. for i := range hosts {
  119. pool, exists := p.hostConnPools[hosts[i].Peer]
  120. if !exists {
  121. // create a connection pool for the host
  122. pool = newHostConnPool(
  123. p.session,
  124. hosts[i].Peer,
  125. p.port,
  126. p.numConns,
  127. p.connCfg,
  128. p.keyspace,
  129. p.connPolicy(),
  130. )
  131. p.hostConnPools[hosts[i].Peer] = pool
  132. } else {
  133. // still have this host, so don't remove it
  134. delete(toRemove, hosts[i].Peer)
  135. }
  136. }
  137. for addr := range toRemove {
  138. pool := p.hostConnPools[addr]
  139. delete(p.hostConnPools, addr)
  140. pool.Close()
  141. }
  142. // update the policy
  143. p.hostPolicy.SetHosts(hosts)
  144. }
  145. func (p *policyConnPool) SetPartitioner(partitioner string) {
  146. p.hostPolicy.SetPartitioner(partitioner)
  147. }
  148. func (p *policyConnPool) Size() int {
  149. p.mu.RLock()
  150. count := 0
  151. for _, pool := range p.hostConnPools {
  152. count += pool.Size()
  153. }
  154. p.mu.RUnlock()
  155. return count
  156. }
  157. func (p *policyConnPool) Pick(qry *Query) (SelectedHost, *Conn) {
  158. nextHost := p.hostPolicy.Pick(qry)
  159. var (
  160. host SelectedHost
  161. conn *Conn
  162. )
  163. p.mu.RLock()
  164. defer p.mu.RUnlock()
  165. for conn == nil {
  166. host = nextHost()
  167. if host == nil {
  168. break
  169. } else if host.Info() == nil {
  170. panic(fmt.Sprintf("policy %T returned no host info: %+v", p.hostPolicy, host))
  171. }
  172. pool, ok := p.hostConnPools[host.Info().Peer]
  173. if !ok {
  174. continue
  175. }
  176. conn = pool.Pick(qry)
  177. }
  178. return host, conn
  179. }
  180. func (p *policyConnPool) Close() {
  181. p.mu.Lock()
  182. defer p.mu.Unlock()
  183. // remove the hosts from the policy
  184. p.hostPolicy.SetHosts([]HostInfo{})
  185. // close the pools
  186. for addr, pool := range p.hostConnPools {
  187. delete(p.hostConnPools, addr)
  188. pool.Close()
  189. }
  190. }
  191. func (p *policyConnPool) addHost(host *HostInfo) {
  192. p.mu.Lock()
  193. defer p.mu.Unlock()
  194. pool, ok := p.hostConnPools[host.Peer]
  195. if ok {
  196. return
  197. }
  198. pool = newHostConnPool(
  199. p.session,
  200. host.Peer,
  201. p.port,
  202. p.numConns,
  203. p.connCfg,
  204. p.keyspace,
  205. p.connPolicy(),
  206. )
  207. p.hostConnPools[host.Peer] = pool
  208. }
  209. func (p *policyConnPool) removeHost(addr string) {
  210. p.hostPolicy.RemoveHost(addr)
  211. p.mu.Lock()
  212. pool, ok := p.hostConnPools[addr]
  213. if !ok {
  214. p.mu.Unlock()
  215. return
  216. }
  217. delete(p.hostConnPools, addr)
  218. p.mu.Unlock()
  219. pool.Close()
  220. }
  221. func (p *policyConnPool) hostUp(host *HostInfo) {
  222. // TODO(zariel): have a set of up hosts and down hosts, we can internally
  223. // detect down hosts, then try to reconnect to them.
  224. p.addHost(host)
  225. }
  226. func (p *policyConnPool) hostDown(addr string) {
  227. // TODO(zariel): mark host as down so we can try to connect to it later, for
  228. // now just treat it has removed.
  229. p.removeHost(addr)
  230. }
  231. // hostConnPool is a connection pool for a single host.
  232. // Connection selection is based on a provided ConnSelectionPolicy
  233. type hostConnPool struct {
  234. session *Session
  235. host string
  236. port int
  237. addr string
  238. size int
  239. connCfg *ConnConfig
  240. keyspace string
  241. policy ConnSelectionPolicy
  242. // protection for conns, closed, filling
  243. mu sync.RWMutex
  244. conns []*Conn
  245. closed bool
  246. filling bool
  247. }
  248. func newHostConnPool(session *Session, host string, port, size int, connCfg *ConnConfig,
  249. keyspace string, policy ConnSelectionPolicy) *hostConnPool {
  250. pool := &hostConnPool{
  251. session: session,
  252. host: host,
  253. port: port,
  254. addr: JoinHostPort(host, port),
  255. size: size,
  256. connCfg: connCfg,
  257. keyspace: keyspace,
  258. policy: policy,
  259. conns: make([]*Conn, 0, size),
  260. filling: false,
  261. closed: false,
  262. }
  263. // fill the pool with the initial connections before returning
  264. pool.fill()
  265. return pool
  266. }
  267. // Pick a connection from this connection pool for the given query.
  268. func (pool *hostConnPool) Pick(qry *Query) *Conn {
  269. pool.mu.RLock()
  270. if pool.closed {
  271. pool.mu.RUnlock()
  272. return nil
  273. }
  274. empty := len(pool.conns) == 0
  275. pool.mu.RUnlock()
  276. if empty {
  277. // try to fill the empty pool
  278. go pool.fill()
  279. return nil
  280. }
  281. return pool.policy.Pick(qry)
  282. }
  283. //Size returns the number of connections currently active in the pool
  284. func (pool *hostConnPool) Size() int {
  285. pool.mu.RLock()
  286. defer pool.mu.RUnlock()
  287. return len(pool.conns)
  288. }
  289. //Close the connection pool
  290. func (pool *hostConnPool) Close() {
  291. pool.mu.Lock()
  292. defer pool.mu.Unlock()
  293. if pool.closed {
  294. return
  295. }
  296. pool.closed = true
  297. // drain, but don't wait
  298. go pool.drain()
  299. }
  300. // Fill the connection pool
  301. func (pool *hostConnPool) fill() {
  302. pool.mu.RLock()
  303. // avoid filling a closed pool, or concurrent filling
  304. if pool.closed || pool.filling {
  305. pool.mu.RUnlock()
  306. return
  307. }
  308. // determine the filling work to be done
  309. startCount := len(pool.conns)
  310. fillCount := pool.size - startCount
  311. // avoid filling a full (or overfull) pool
  312. if fillCount <= 0 {
  313. pool.mu.RUnlock()
  314. return
  315. }
  316. // switch from read to write lock
  317. pool.mu.RUnlock()
  318. pool.mu.Lock()
  319. // double check everything since the lock was released
  320. startCount = len(pool.conns)
  321. fillCount = pool.size - startCount
  322. if pool.closed || pool.filling || fillCount <= 0 {
  323. // looks like another goroutine already beat this
  324. // goroutine to the filling
  325. pool.mu.Unlock()
  326. return
  327. }
  328. // ok fill the pool
  329. pool.filling = true
  330. // allow others to access the pool while filling
  331. pool.mu.Unlock()
  332. // only this goroutine should make calls to fill/empty the pool at this
  333. // point until after this routine or its subordinates calls
  334. // fillingStopped
  335. // fill only the first connection synchronously
  336. if startCount == 0 {
  337. err := pool.connect()
  338. pool.logConnectErr(err)
  339. if err != nil {
  340. // probably unreachable host
  341. go pool.fillingStopped()
  342. return
  343. }
  344. // filled one
  345. fillCount--
  346. // connect all connections to this host in sync
  347. for fillCount > 0 {
  348. err := pool.connect()
  349. pool.logConnectErr(err)
  350. // decrement, even on error
  351. fillCount--
  352. }
  353. go pool.fillingStopped()
  354. return
  355. }
  356. // fill the rest of the pool asynchronously
  357. go func() {
  358. for fillCount > 0 {
  359. err := pool.connect()
  360. pool.logConnectErr(err)
  361. // decrement, even on error
  362. fillCount--
  363. }
  364. // mark the end of filling
  365. pool.fillingStopped()
  366. }()
  367. }
  368. func (pool *hostConnPool) logConnectErr(err error) {
  369. if opErr, ok := err.(*net.OpError); ok && (opErr.Op == "dial" || opErr.Op == "read") {
  370. // connection refused
  371. // these are typical during a node outage so avoid log spam.
  372. } else if err != nil {
  373. // unexpected error
  374. log.Printf("error: failed to connect to %s due to error: %v", pool.addr, err)
  375. }
  376. }
  377. // transition back to a not-filling state.
  378. func (pool *hostConnPool) fillingStopped() {
  379. // wait for some time to avoid back-to-back filling
  380. // this provides some time between failed attempts
  381. // to fill the pool for the host to recover
  382. time.Sleep(time.Duration(rand.Int31n(100)+31) * time.Millisecond)
  383. pool.mu.Lock()
  384. pool.filling = false
  385. pool.mu.Unlock()
  386. }
  387. // create a new connection to the host and add it to the pool
  388. func (pool *hostConnPool) connect() error {
  389. // try to connect
  390. conn, err := Connect(pool.addr, pool.connCfg, pool, pool.session)
  391. if err != nil {
  392. return err
  393. }
  394. if pool.keyspace != "" {
  395. // set the keyspace
  396. if err := conn.UseKeyspace(pool.keyspace); err != nil {
  397. conn.Close()
  398. return err
  399. }
  400. }
  401. // add the Conn to the pool
  402. pool.mu.Lock()
  403. defer pool.mu.Unlock()
  404. if pool.closed {
  405. conn.Close()
  406. return nil
  407. }
  408. pool.conns = append(pool.conns, conn)
  409. pool.policy.SetConns(pool.conns)
  410. return nil
  411. }
  412. // handle any error from a Conn
  413. func (pool *hostConnPool) HandleError(conn *Conn, err error, closed bool) {
  414. if !closed {
  415. // still an open connection, so continue using it
  416. return
  417. }
  418. pool.mu.Lock()
  419. defer pool.mu.Unlock()
  420. if pool.closed {
  421. // pool closed
  422. return
  423. }
  424. // find the connection index
  425. for i, candidate := range pool.conns {
  426. if candidate == conn {
  427. // remove the connection, not preserving order
  428. pool.conns[i], pool.conns = pool.conns[len(pool.conns)-1], pool.conns[:len(pool.conns)-1]
  429. // update the policy
  430. pool.policy.SetConns(pool.conns)
  431. // lost a connection, so fill the pool
  432. go pool.fill()
  433. break
  434. }
  435. }
  436. }
  437. // removes and closes all connections from the pool
  438. func (pool *hostConnPool) drain() {
  439. pool.mu.Lock()
  440. defer pool.mu.Unlock()
  441. // empty the pool
  442. conns := pool.conns
  443. pool.conns = pool.conns[:0]
  444. // update the policy
  445. pool.policy.SetConns(pool.conns)
  446. // close the connections
  447. for _, conn := range conns {
  448. conn.Close()
  449. }
  450. }