connectionpool.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548
  1. // Copyright (c) 2012 The gocql Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package gocql
  5. import (
  6. "crypto/tls"
  7. "crypto/x509"
  8. "errors"
  9. "fmt"
  10. "io/ioutil"
  11. "log"
  12. "math/rand"
  13. "net"
  14. "sync"
  15. "time"
  16. )
  17. // interface to implement to receive the host information
  18. type SetHosts interface {
  19. SetHosts(hosts []*HostInfo)
  20. }
  21. // interface to implement to receive the partitioner value
  22. type SetPartitioner interface {
  23. SetPartitioner(partitioner string)
  24. }
  25. func setupTLSConfig(sslOpts *SslOptions) (*tls.Config, error) {
  26. // ca cert is optional
  27. if sslOpts.CaPath != "" {
  28. if sslOpts.RootCAs == nil {
  29. sslOpts.RootCAs = x509.NewCertPool()
  30. }
  31. pem, err := ioutil.ReadFile(sslOpts.CaPath)
  32. if err != nil {
  33. return nil, fmt.Errorf("connectionpool: unable to open CA certs: %v", err)
  34. }
  35. if !sslOpts.RootCAs.AppendCertsFromPEM(pem) {
  36. return nil, errors.New("connectionpool: failed parsing or CA certs")
  37. }
  38. }
  39. if sslOpts.CertPath != "" || sslOpts.KeyPath != "" {
  40. mycert, err := tls.LoadX509KeyPair(sslOpts.CertPath, sslOpts.KeyPath)
  41. if err != nil {
  42. return nil, fmt.Errorf("connectionpool: unable to load X509 key pair: %v", err)
  43. }
  44. sslOpts.Certificates = append(sslOpts.Certificates, mycert)
  45. }
  46. sslOpts.InsecureSkipVerify = !sslOpts.EnableHostVerification
  47. return &sslOpts.Config, nil
  48. }
  49. type policyConnPool struct {
  50. session *Session
  51. port int
  52. numConns int
  53. keyspace string
  54. mu sync.RWMutex
  55. hostPolicy HostSelectionPolicy
  56. connPolicy func() ConnSelectionPolicy
  57. hostConnPools map[string]*hostConnPool
  58. endpoints []string
  59. }
  60. func connConfig(session *Session) (*ConnConfig, error) {
  61. cfg := session.cfg
  62. var (
  63. err error
  64. tlsConfig *tls.Config
  65. )
  66. // TODO(zariel): move tls config setup into session init.
  67. if cfg.SslOpts != nil {
  68. tlsConfig, err = setupTLSConfig(cfg.SslOpts)
  69. if err != nil {
  70. return nil, err
  71. }
  72. }
  73. return &ConnConfig{
  74. ProtoVersion: cfg.ProtoVersion,
  75. CQLVersion: cfg.CQLVersion,
  76. Timeout: cfg.Timeout,
  77. Compressor: cfg.Compressor,
  78. Authenticator: cfg.Authenticator,
  79. Keepalive: cfg.SocketKeepalive,
  80. tlsConfig: tlsConfig,
  81. }, nil
  82. }
  83. func newPolicyConnPool(session *Session, hostPolicy HostSelectionPolicy,
  84. connPolicy func() ConnSelectionPolicy) *policyConnPool {
  85. // create the pool
  86. pool := &policyConnPool{
  87. session: session,
  88. port: session.cfg.Port,
  89. numConns: session.cfg.NumConns,
  90. keyspace: session.cfg.Keyspace,
  91. hostPolicy: hostPolicy,
  92. connPolicy: connPolicy,
  93. hostConnPools: map[string]*hostConnPool{},
  94. }
  95. pool.endpoints = make([]string, len(session.cfg.Hosts))
  96. copy(pool.endpoints, session.cfg.Hosts)
  97. return pool
  98. }
  99. func (p *policyConnPool) SetHosts(hosts []*HostInfo) {
  100. p.mu.Lock()
  101. defer p.mu.Unlock()
  102. toRemove := make(map[string]struct{})
  103. for addr := range p.hostConnPools {
  104. toRemove[addr] = struct{}{}
  105. }
  106. // TODO connect to hosts in parallel, but wait for pools to be
  107. // created before returning
  108. for _, host := range hosts {
  109. pool, exists := p.hostConnPools[host.Peer()]
  110. if !exists && host.IsUp() {
  111. // create a connection pool for the host
  112. pool = newHostConnPool(
  113. p.session,
  114. host,
  115. p.port,
  116. p.numConns,
  117. p.keyspace,
  118. p.connPolicy(),
  119. )
  120. p.hostConnPools[host.Peer()] = pool
  121. } else {
  122. // still have this host, so don't remove it
  123. delete(toRemove, host.Peer())
  124. }
  125. }
  126. for addr := range toRemove {
  127. pool := p.hostConnPools[addr]
  128. delete(p.hostConnPools, addr)
  129. pool.Close()
  130. }
  131. // update the policy
  132. p.hostPolicy.SetHosts(hosts)
  133. }
  134. func (p *policyConnPool) SetPartitioner(partitioner string) {
  135. p.hostPolicy.SetPartitioner(partitioner)
  136. }
  137. func (p *policyConnPool) Size() int {
  138. p.mu.RLock()
  139. count := 0
  140. for _, pool := range p.hostConnPools {
  141. count += pool.Size()
  142. }
  143. p.mu.RUnlock()
  144. return count
  145. }
  146. func (p *policyConnPool) Pick(qry *Query) (SelectedHost, *Conn) {
  147. nextHost := p.hostPolicy.Pick(qry)
  148. var (
  149. host SelectedHost
  150. conn *Conn
  151. )
  152. p.mu.RLock()
  153. defer p.mu.RUnlock()
  154. for conn == nil {
  155. host = nextHost()
  156. if host == nil {
  157. break
  158. } else if host.Info() == nil {
  159. panic(fmt.Sprintf("policy %T returned no host info: %+v", p.hostPolicy, host))
  160. }
  161. pool, ok := p.hostConnPools[host.Info().Peer()]
  162. if !ok {
  163. continue
  164. }
  165. conn = pool.Pick(qry)
  166. }
  167. return host, conn
  168. }
  169. func (p *policyConnPool) Close() {
  170. p.mu.Lock()
  171. defer p.mu.Unlock()
  172. // remove the hosts from the policy
  173. p.hostPolicy.SetHosts(nil)
  174. // close the pools
  175. for addr, pool := range p.hostConnPools {
  176. delete(p.hostConnPools, addr)
  177. pool.Close()
  178. }
  179. }
  180. func (p *policyConnPool) addHost(host *HostInfo) {
  181. p.mu.Lock()
  182. defer p.mu.Unlock()
  183. pool, ok := p.hostConnPools[host.Peer()]
  184. if ok {
  185. go pool.fill()
  186. return
  187. }
  188. pool = newHostConnPool(
  189. p.session,
  190. host,
  191. p.port,
  192. p.numConns,
  193. p.keyspace,
  194. p.connPolicy(),
  195. )
  196. p.hostConnPools[host.Peer()] = pool
  197. }
  198. func (p *policyConnPool) removeHost(addr string) {
  199. p.hostPolicy.RemoveHost(addr)
  200. p.mu.Lock()
  201. pool, ok := p.hostConnPools[addr]
  202. if !ok {
  203. p.mu.Unlock()
  204. return
  205. }
  206. delete(p.hostConnPools, addr)
  207. p.mu.Unlock()
  208. pool.Close()
  209. }
  210. func (p *policyConnPool) hostUp(host *HostInfo) {
  211. // TODO(zariel): have a set of up hosts and down hosts, we can internally
  212. // detect down hosts, then try to reconnect to them.
  213. p.addHost(host)
  214. }
  215. func (p *policyConnPool) hostDown(addr string) {
  216. // TODO(zariel): mark host as down so we can try to connect to it later, for
  217. // now just treat it has removed.
  218. p.removeHost(addr)
  219. }
  220. // hostConnPool is a connection pool for a single host.
  221. // Connection selection is based on a provided ConnSelectionPolicy
  222. type hostConnPool struct {
  223. session *Session
  224. host *HostInfo
  225. port int
  226. addr string
  227. size int
  228. keyspace string
  229. policy ConnSelectionPolicy
  230. // protection for conns, closed, filling
  231. mu sync.RWMutex
  232. conns []*Conn
  233. closed bool
  234. filling bool
  235. }
  236. func newHostConnPool(session *Session, host *HostInfo, port, size int,
  237. keyspace string, policy ConnSelectionPolicy) *hostConnPool {
  238. pool := &hostConnPool{
  239. session: session,
  240. host: host,
  241. port: port,
  242. addr: JoinHostPort(host.Peer(), port),
  243. size: size,
  244. keyspace: keyspace,
  245. policy: policy,
  246. conns: make([]*Conn, 0, size),
  247. filling: false,
  248. closed: false,
  249. }
  250. // fill the pool with the initial connections before returning
  251. pool.fill()
  252. return pool
  253. }
  254. // Pick a connection from this connection pool for the given query.
  255. func (pool *hostConnPool) Pick(qry *Query) *Conn {
  256. pool.mu.RLock()
  257. if pool.closed {
  258. pool.mu.RUnlock()
  259. return nil
  260. }
  261. size := len(pool.conns)
  262. pool.mu.RUnlock()
  263. if size < pool.size {
  264. // try to fill the pool
  265. go pool.fill()
  266. if size == 0 {
  267. return nil
  268. }
  269. }
  270. return pool.policy.Pick(qry)
  271. }
  272. //Size returns the number of connections currently active in the pool
  273. func (pool *hostConnPool) Size() int {
  274. pool.mu.RLock()
  275. defer pool.mu.RUnlock()
  276. return len(pool.conns)
  277. }
  278. //Close the connection pool
  279. func (pool *hostConnPool) Close() {
  280. pool.mu.Lock()
  281. defer pool.mu.Unlock()
  282. if pool.closed {
  283. return
  284. }
  285. pool.closed = true
  286. // drain, but don't wait
  287. go pool.drain()
  288. }
  289. // Fill the connection pool
  290. func (pool *hostConnPool) fill() {
  291. pool.mu.RLock()
  292. // avoid filling a closed pool, or concurrent filling
  293. if pool.closed || pool.filling {
  294. pool.mu.RUnlock()
  295. return
  296. }
  297. // determine the filling work to be done
  298. startCount := len(pool.conns)
  299. fillCount := pool.size - startCount
  300. // avoid filling a full (or overfull) pool
  301. if fillCount <= 0 {
  302. pool.mu.RUnlock()
  303. return
  304. }
  305. // switch from read to write lock
  306. pool.mu.RUnlock()
  307. pool.mu.Lock()
  308. // double check everything since the lock was released
  309. startCount = len(pool.conns)
  310. fillCount = pool.size - startCount
  311. if pool.closed || pool.filling || fillCount <= 0 {
  312. // looks like another goroutine already beat this
  313. // goroutine to the filling
  314. pool.mu.Unlock()
  315. return
  316. }
  317. // ok fill the pool
  318. pool.filling = true
  319. // allow others to access the pool while filling
  320. pool.mu.Unlock()
  321. // only this goroutine should make calls to fill/empty the pool at this
  322. // point until after this routine or its subordinates calls
  323. // fillingStopped
  324. // fill only the first connection synchronously
  325. if startCount == 0 {
  326. err := pool.connect()
  327. pool.logConnectErr(err)
  328. if err != nil {
  329. // probably unreachable host
  330. go pool.fillingStopped()
  331. // this is calle with the connetion pool mutex held, this call will
  332. // then recursivly try to lock it again. FIXME
  333. go pool.session.handleNodeDown(net.ParseIP(pool.host.Peer()), pool.port)
  334. return
  335. }
  336. // filled one
  337. fillCount--
  338. // connect all connections to this host in sync
  339. for fillCount > 0 {
  340. err := pool.connect()
  341. pool.logConnectErr(err)
  342. // decrement, even on error
  343. fillCount--
  344. }
  345. go pool.fillingStopped()
  346. return
  347. }
  348. // fill the rest of the pool asynchronously
  349. go func() {
  350. for fillCount > 0 {
  351. err := pool.connect()
  352. pool.logConnectErr(err)
  353. // decrement, even on error
  354. fillCount--
  355. }
  356. // mark the end of filling
  357. pool.fillingStopped()
  358. }()
  359. }
  360. func (pool *hostConnPool) logConnectErr(err error) {
  361. if opErr, ok := err.(*net.OpError); ok && (opErr.Op == "dial" || opErr.Op == "read") {
  362. // connection refused
  363. // these are typical during a node outage so avoid log spam.
  364. } else if err != nil {
  365. // unexpected error
  366. log.Printf("error: failed to connect to %s due to error: %v", pool.addr, err)
  367. }
  368. }
  369. // transition back to a not-filling state.
  370. func (pool *hostConnPool) fillingStopped() {
  371. // wait for some time to avoid back-to-back filling
  372. // this provides some time between failed attempts
  373. // to fill the pool for the host to recover
  374. time.Sleep(time.Duration(rand.Int31n(100)+31) * time.Millisecond)
  375. pool.mu.Lock()
  376. pool.filling = false
  377. pool.mu.Unlock()
  378. }
  379. // create a new connection to the host and add it to the pool
  380. func (pool *hostConnPool) connect() error {
  381. // try to connect
  382. conn, err := pool.session.connect(pool.addr, pool)
  383. if err != nil {
  384. return err
  385. }
  386. if pool.keyspace != "" {
  387. // set the keyspace
  388. if err := conn.UseKeyspace(pool.keyspace); err != nil {
  389. conn.Close()
  390. return err
  391. }
  392. }
  393. // add the Conn to the pool
  394. pool.mu.Lock()
  395. defer pool.mu.Unlock()
  396. if pool.closed {
  397. conn.Close()
  398. return nil
  399. }
  400. pool.conns = append(pool.conns, conn)
  401. conns := make([]*Conn, len(pool.conns))
  402. copy(conns, pool.conns)
  403. pool.policy.SetConns(conns)
  404. return nil
  405. }
  406. // handle any error from a Conn
  407. func (pool *hostConnPool) HandleError(conn *Conn, err error, closed bool) {
  408. if !closed {
  409. // still an open connection, so continue using it
  410. return
  411. }
  412. // TODO: track the number of errors per host and detect when a host is dead,
  413. // then also have something which can detect when a host comes back.
  414. pool.mu.Lock()
  415. defer pool.mu.Unlock()
  416. if pool.closed {
  417. // pool closed
  418. return
  419. }
  420. // find the connection index
  421. for i, candidate := range pool.conns {
  422. if candidate == conn {
  423. // remove the connection, not preserving order
  424. pool.conns[i], pool.conns = pool.conns[len(pool.conns)-1], pool.conns[:len(pool.conns)-1]
  425. // update the policy
  426. conns := make([]*Conn, len(pool.conns))
  427. copy(conns, pool.conns)
  428. pool.policy.SetConns(conns)
  429. // lost a connection, so fill the pool
  430. go pool.fill()
  431. break
  432. }
  433. }
  434. }
  435. // removes and closes all connections from the pool
  436. func (pool *hostConnPool) drain() {
  437. pool.mu.Lock()
  438. defer pool.mu.Unlock()
  439. // empty the pool
  440. conns := pool.conns
  441. pool.conns = pool.conns[:0:0]
  442. // update the policy
  443. pool.policy.SetConns(nil)
  444. // close the connections
  445. for _, conn := range conns {
  446. conn.Close()
  447. }
  448. }