conn_test.go 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120
  1. // Copyright (c) 2012 The gocql Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // +build all unit
  5. package gocql
  6. import (
  7. "bufio"
  8. "bytes"
  9. "context"
  10. "crypto/tls"
  11. "crypto/x509"
  12. "fmt"
  13. "io"
  14. "io/ioutil"
  15. "net"
  16. "strings"
  17. "sync"
  18. "sync/atomic"
  19. "testing"
  20. "time"
  21. "github.com/gocql/gocql/internal/streams"
  22. )
  23. const (
  24. defaultProto = protoVersion2
  25. )
  26. func TestApprove(t *testing.T) {
  27. tests := map[bool]bool{
  28. approve("org.apache.cassandra.auth.PasswordAuthenticator"): true,
  29. approve("com.instaclustr.cassandra.auth.SharedSecretAuthenticator"): true,
  30. approve("com.datastax.bdp.cassandra.auth.DseAuthenticator"): true,
  31. approve("com.apache.cassandra.auth.FakeAuthenticator"): false,
  32. }
  33. for k, v := range tests {
  34. if k != v {
  35. t.Fatalf("expected '%v', got '%v'", k, v)
  36. }
  37. }
  38. }
  39. func TestJoinHostPort(t *testing.T) {
  40. tests := map[string]string{
  41. "127.0.0.1:0": JoinHostPort("127.0.0.1", 0),
  42. "127.0.0.1:1": JoinHostPort("127.0.0.1:1", 9142),
  43. "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:0": JoinHostPort("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 0),
  44. "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1": JoinHostPort("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1", 9142),
  45. }
  46. for k, v := range tests {
  47. if k != v {
  48. t.Fatalf("expected '%v', got '%v'", k, v)
  49. }
  50. }
  51. }
  52. func testCluster(proto protoVersion, addresses ...string) *ClusterConfig {
  53. cluster := NewCluster(addresses...)
  54. cluster.ProtoVersion = int(proto)
  55. cluster.disableControlConn = true
  56. return cluster
  57. }
  58. func TestSimple(t *testing.T) {
  59. srv := NewTestServer(t, defaultProto, context.Background())
  60. defer srv.Stop()
  61. cluster := testCluster(defaultProto, srv.Address)
  62. db, err := cluster.CreateSession()
  63. if err != nil {
  64. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  65. }
  66. if err := db.Query("void").Exec(); err != nil {
  67. t.Fatalf("0x%x: %v", defaultProto, err)
  68. }
  69. }
  70. func TestSSLSimple(t *testing.T) {
  71. srv := NewSSLTestServer(t, defaultProto, context.Background())
  72. defer srv.Stop()
  73. db, err := createTestSslCluster(srv.Address, defaultProto, true).CreateSession()
  74. if err != nil {
  75. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  76. }
  77. if err := db.Query("void").Exec(); err != nil {
  78. t.Fatalf("0x%x: %v", defaultProto, err)
  79. }
  80. }
  81. func TestSSLSimpleNoClientCert(t *testing.T) {
  82. srv := NewSSLTestServer(t, defaultProto, context.Background())
  83. defer srv.Stop()
  84. db, err := createTestSslCluster(srv.Address, defaultProto, false).CreateSession()
  85. if err != nil {
  86. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  87. }
  88. if err := db.Query("void").Exec(); err != nil {
  89. t.Fatalf("0x%x: %v", defaultProto, err)
  90. }
  91. }
  92. func createTestSslCluster(addr string, proto protoVersion, useClientCert bool) *ClusterConfig {
  93. cluster := testCluster(proto, addr)
  94. sslOpts := &SslOptions{
  95. CaPath: "testdata/pki/ca.crt",
  96. EnableHostVerification: false,
  97. }
  98. if useClientCert {
  99. sslOpts.CertPath = "testdata/pki/gocql.crt"
  100. sslOpts.KeyPath = "testdata/pki/gocql.key"
  101. }
  102. cluster.SslOpts = sslOpts
  103. return cluster
  104. }
  105. func TestClosed(t *testing.T) {
  106. t.Skip("Skipping the execution of TestClosed for now to try to concentrate on more important test failures on Travis")
  107. srv := NewTestServer(t, defaultProto, context.Background())
  108. defer srv.Stop()
  109. session, err := newTestSession(defaultProto, srv.Address)
  110. if err != nil {
  111. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  112. }
  113. session.Close()
  114. if err := session.Query("void").Exec(); err != ErrSessionClosed {
  115. t.Fatalf("0x%x: expected %#v, got %#v", defaultProto, ErrSessionClosed, err)
  116. }
  117. }
  118. func newTestSession(proto protoVersion, addresses ...string) (*Session, error) {
  119. return testCluster(proto, addresses...).CreateSession()
  120. }
  121. func TestDNSLookupConnected(t *testing.T) {
  122. log := &testLogger{}
  123. Logger = log
  124. defer func() {
  125. Logger = &defaultLogger{}
  126. }()
  127. // Override the defaul DNS resolver and restore at the end
  128. failDNS = true
  129. defer func() { failDNS = false }()
  130. srv := NewTestServer(t, defaultProto, context.Background())
  131. defer srv.Stop()
  132. cluster := NewCluster("cassandra1.invalid", srv.Address, "cassandra2.invalid")
  133. cluster.ProtoVersion = int(defaultProto)
  134. cluster.disableControlConn = true
  135. // CreateSession() should attempt to resolve the DNS name "cassandraX.invalid"
  136. // and fail, but continue to connect via srv.Address
  137. _, err := cluster.CreateSession()
  138. if err != nil {
  139. t.Fatal("CreateSession() should have connected")
  140. }
  141. if !strings.Contains(log.String(), "gocql: dns error") {
  142. t.Fatalf("Expected to receive dns error log message - got '%s' instead", log.String())
  143. }
  144. }
  145. func TestDNSLookupError(t *testing.T) {
  146. log := &testLogger{}
  147. Logger = log
  148. defer func() {
  149. Logger = &defaultLogger{}
  150. }()
  151. // Override the defaul DNS resolver and restore at the end
  152. failDNS = true
  153. defer func() { failDNS = false }()
  154. cluster := NewCluster("cassandra1.invalid", "cassandra2.invalid")
  155. cluster.ProtoVersion = int(defaultProto)
  156. cluster.disableControlConn = true
  157. // CreateSession() should attempt to resolve each DNS name "cassandraX.invalid"
  158. // and fail since it could not resolve any dns entries
  159. _, err := cluster.CreateSession()
  160. if err == nil {
  161. t.Fatal("CreateSession() should have returned an error")
  162. }
  163. if !strings.Contains(log.String(), "gocql: dns error") {
  164. t.Fatalf("Expected to receive dns error log message - got '%s' instead", log.String())
  165. }
  166. if err.Error() != "gocql: unable to create session: failed to resolve any of the provided hostnames" {
  167. t.Fatalf("Expected CreateSession() to fail with message - got '%s' instead", err.Error())
  168. }
  169. }
  170. func TestStartupTimeout(t *testing.T) {
  171. ctx, cancel := context.WithCancel(context.Background())
  172. log := &testLogger{}
  173. Logger = log
  174. defer func() {
  175. Logger = &defaultLogger{}
  176. }()
  177. srv := NewTestServer(t, defaultProto, ctx)
  178. defer srv.Stop()
  179. // Tell the server to never respond to Startup frame
  180. atomic.StoreInt32(&srv.TimeoutOnStartup, 1)
  181. startTime := time.Now()
  182. cluster := NewCluster(srv.Address)
  183. cluster.ProtoVersion = int(defaultProto)
  184. cluster.disableControlConn = true
  185. // Set very long query connection timeout
  186. // so we know CreateSession() is using the ConnectTimeout
  187. cluster.Timeout = time.Second * 5
  188. // Create session should timeout during connect attempt
  189. _, err := cluster.CreateSession()
  190. if err == nil {
  191. t.Fatal("CreateSession() should have returned a timeout error")
  192. }
  193. elapsed := time.Since(startTime)
  194. if elapsed > time.Second*5 {
  195. t.Fatal("ConnectTimeout is not respected")
  196. }
  197. if !strings.Contains(err.Error(), "no connections were made when creating the session") {
  198. t.Fatalf("Expected to receive no connections error - got '%s'", err)
  199. }
  200. if !strings.Contains(log.String(), "no response to connection startup within timeout") {
  201. t.Fatalf("Expected to receive timeout log message - got '%s'", log.String())
  202. }
  203. cancel()
  204. }
  205. func TestTimeout(t *testing.T) {
  206. ctx, cancel := context.WithCancel(context.Background())
  207. srv := NewTestServer(t, defaultProto, ctx)
  208. defer srv.Stop()
  209. db, err := newTestSession(defaultProto, srv.Address)
  210. if err != nil {
  211. t.Fatalf("NewCluster: %v", err)
  212. }
  213. defer db.Close()
  214. var wg sync.WaitGroup
  215. wg.Add(1)
  216. go func() {
  217. defer wg.Done()
  218. select {
  219. case <-time.After(5 * time.Second):
  220. t.Errorf("no timeout")
  221. case <-ctx.Done():
  222. }
  223. }()
  224. if err := db.Query("kill").WithContext(ctx).Exec(); err == nil {
  225. t.Fatal("expected error got nil")
  226. }
  227. cancel()
  228. wg.Wait()
  229. }
  230. func TestCancel(t *testing.T) {
  231. ctx, cancel := context.WithCancel(context.Background())
  232. defer cancel()
  233. srv := NewTestServer(t, defaultProto, ctx)
  234. defer srv.Stop()
  235. cluster := testCluster(defaultProto, srv.Address)
  236. cluster.Timeout = 1 * time.Second
  237. db, err := cluster.CreateSession()
  238. if err != nil {
  239. t.Fatalf("NewCluster: %v", err)
  240. }
  241. defer db.Close()
  242. qry := db.Query("timeout")
  243. // Make sure we finish the query without leftovers
  244. var wg sync.WaitGroup
  245. wg.Add(1)
  246. go func() {
  247. if err := qry.Exec(); err != context.Canceled {
  248. t.Fatalf("expected to get context cancel error: '%v', got '%v'", context.Canceled, err)
  249. }
  250. wg.Done()
  251. }()
  252. // The query will timeout after about 1 seconds, so cancel it after a short pause
  253. time.AfterFunc(20*time.Millisecond, qry.Cancel)
  254. wg.Wait()
  255. }
  256. type testQueryObserver struct {
  257. metrics map[string]*queryMetrics
  258. verbose bool
  259. }
  260. func (o *testQueryObserver) ObserveQuery(ctx context.Context, q ObservedQuery) {
  261. host := q.Host.ConnectAddress().String()
  262. o.metrics[host] = q.Metrics
  263. if o.verbose {
  264. Logger.Printf("Observed query %q. Returned %v rows, took %v on host %q with %v attempts and total latency %v. Error: %q\n",
  265. q.Statement, q.Rows, q.End.Sub(q.Start), host, q.Metrics.Attempts, q.Metrics.TotalLatency, q.Err)
  266. }
  267. }
  268. func (o *testQueryObserver) GetMetrics(host *HostInfo) *queryMetrics {
  269. return o.metrics[host.ConnectAddress().String()]
  270. }
  271. // TestQueryRetry will test to make sure that gocql will execute
  272. // the exact amount of retry queries designated by the user.
  273. func TestQueryRetry(t *testing.T) {
  274. ctx, cancel := context.WithCancel(context.Background())
  275. defer cancel()
  276. srv := NewTestServer(t, defaultProto, ctx)
  277. defer srv.Stop()
  278. db, err := newTestSession(defaultProto, srv.Address)
  279. if err != nil {
  280. t.Fatalf("NewCluster: %v", err)
  281. }
  282. defer db.Close()
  283. go func() {
  284. select {
  285. case <-ctx.Done():
  286. return
  287. case <-time.After(5 * time.Second):
  288. t.Errorf("no timeout")
  289. }
  290. }()
  291. rt := &SimpleRetryPolicy{NumRetries: 1}
  292. qry := db.Query("kill").RetryPolicy(rt)
  293. if err := qry.Exec(); err == nil {
  294. t.Fatalf("expected error")
  295. }
  296. requests := atomic.LoadInt64(&srv.nKillReq)
  297. attempts := qry.Attempts()
  298. if requests != int64(attempts) {
  299. t.Fatalf("expected requests %v to match query attempts %v", requests, attempts)
  300. }
  301. // the query will only be attempted once, but is being retried
  302. if requests != int64(rt.NumRetries) {
  303. t.Fatalf("failed to retry the query %v time(s). Query executed %v times", rt.NumRetries, requests-1)
  304. }
  305. }
  306. func TestQueryMultinodeWithMetrics(t *testing.T) {
  307. // Build a 3 node cluster to test host metric mapping
  308. var nodes []*TestServer
  309. var addresses = []string{
  310. "127.0.0.1",
  311. "127.0.0.2",
  312. "127.0.0.3",
  313. }
  314. // Can do with 1 context for all servers
  315. ctx := context.Background()
  316. for _, ip := range addresses {
  317. srv := NewTestServerWithAddress(ip+":0", t, defaultProto, ctx)
  318. defer srv.Stop()
  319. nodes = append(nodes, srv)
  320. }
  321. db, err := newTestSession(defaultProto, nodes[0].Address, nodes[1].Address, nodes[2].Address)
  322. if err != nil {
  323. t.Fatalf("NewCluster: %v", err)
  324. }
  325. defer db.Close()
  326. // 1 retry per host
  327. rt := &SimpleRetryPolicy{NumRetries: 3}
  328. observer := &testQueryObserver{metrics: make(map[string]*queryMetrics), verbose: false}
  329. qry := db.Query("kill").RetryPolicy(rt).Observer(observer)
  330. if err := qry.Exec(); err == nil {
  331. t.Fatalf("expected error")
  332. }
  333. for i, ip := range addresses {
  334. host := &HostInfo{connectAddress: net.ParseIP(ip)}
  335. observedMetrics := observer.GetMetrics(host)
  336. requests := int(atomic.LoadInt64(&nodes[i].nKillReq))
  337. hostAttempts := qry.metrics[ip].Attempts
  338. if requests != hostAttempts {
  339. t.Fatalf("expected requests %v to match query attempts %v", requests, hostAttempts)
  340. }
  341. if hostAttempts != observedMetrics.Attempts {
  342. t.Fatalf("expected observed attempts %v to match query attempts %v on host %v", observedMetrics.Attempts, hostAttempts, ip)
  343. }
  344. hostLatency := qry.metrics[ip].TotalLatency
  345. observedLatency := observedMetrics.TotalLatency
  346. if hostLatency != observedLatency {
  347. t.Fatalf("expected observed latency %v to match query latency %v on host %v", observedLatency, hostLatency, ip)
  348. }
  349. }
  350. // the query will only be attempted once, but is being retried
  351. attempts := qry.Attempts()
  352. if attempts != rt.NumRetries {
  353. t.Fatalf("failed to retry the query %v time(s). Query executed %v times", rt.NumRetries, attempts)
  354. }
  355. }
  356. func TestStreams_Protocol1(t *testing.T) {
  357. srv := NewTestServer(t, protoVersion1, context.Background())
  358. defer srv.Stop()
  359. // TODO: these are more like session tests and should instead operate
  360. // on a single Conn
  361. cluster := testCluster(protoVersion1, srv.Address)
  362. cluster.NumConns = 1
  363. cluster.ProtoVersion = 1
  364. db, err := cluster.CreateSession()
  365. if err != nil {
  366. t.Fatal(err)
  367. }
  368. defer db.Close()
  369. var wg sync.WaitGroup
  370. for i := 1; i < 128; i++ {
  371. // here were just validating that if we send NumStream request we get
  372. // a response for every stream and the lengths for the queries are set
  373. // correctly.
  374. wg.Add(1)
  375. go func() {
  376. defer wg.Done()
  377. if err := db.Query("void").Exec(); err != nil {
  378. t.Error(err)
  379. }
  380. }()
  381. }
  382. wg.Wait()
  383. }
  384. func TestStreams_Protocol3(t *testing.T) {
  385. srv := NewTestServer(t, protoVersion3, context.Background())
  386. defer srv.Stop()
  387. // TODO: these are more like session tests and should instead operate
  388. // on a single Conn
  389. cluster := testCluster(protoVersion3, srv.Address)
  390. cluster.NumConns = 1
  391. cluster.ProtoVersion = 3
  392. db, err := cluster.CreateSession()
  393. if err != nil {
  394. t.Fatal(err)
  395. }
  396. defer db.Close()
  397. for i := 1; i < 32768; i++ {
  398. // the test server processes each conn synchronously
  399. // here were just validating that if we send NumStream request we get
  400. // a response for every stream and the lengths for the queries are set
  401. // correctly.
  402. if err = db.Query("void").Exec(); err != nil {
  403. t.Fatal(err)
  404. }
  405. }
  406. }
  407. func BenchmarkProtocolV3(b *testing.B) {
  408. srv := NewTestServer(b, protoVersion3, context.Background())
  409. defer srv.Stop()
  410. // TODO: these are more like session tests and should instead operate
  411. // on a single Conn
  412. cluster := NewCluster(srv.Address)
  413. cluster.NumConns = 1
  414. cluster.ProtoVersion = 3
  415. db, err := cluster.CreateSession()
  416. if err != nil {
  417. b.Fatal(err)
  418. }
  419. defer db.Close()
  420. b.ResetTimer()
  421. b.ReportAllocs()
  422. for i := 0; i < b.N; i++ {
  423. if err = db.Query("void").Exec(); err != nil {
  424. b.Fatal(err)
  425. }
  426. }
  427. }
  428. // This tests that the policy connection pool handles SSL correctly
  429. func TestPolicyConnPoolSSL(t *testing.T) {
  430. srv := NewSSLTestServer(t, defaultProto, context.Background())
  431. defer srv.Stop()
  432. cluster := createTestSslCluster(srv.Address, defaultProto, true)
  433. cluster.PoolConfig.HostSelectionPolicy = RoundRobinHostPolicy()
  434. db, err := cluster.CreateSession()
  435. if err != nil {
  436. t.Fatalf("failed to create new session: %v", err)
  437. }
  438. if err := db.Query("void").Exec(); err != nil {
  439. t.Fatalf("query failed due to error: %v", err)
  440. }
  441. db.Close()
  442. // wait for the pool to drain
  443. time.Sleep(100 * time.Millisecond)
  444. size := db.pool.Size()
  445. if size != 0 {
  446. t.Fatalf("connection pool did not drain, still contains %d connections", size)
  447. }
  448. }
  449. func TestQueryTimeout(t *testing.T) {
  450. srv := NewTestServer(t, defaultProto, context.Background())
  451. defer srv.Stop()
  452. cluster := testCluster(defaultProto, srv.Address)
  453. // Set the timeout arbitrarily low so that the query hits the timeout in a
  454. // timely manner.
  455. cluster.Timeout = 1 * time.Millisecond
  456. db, err := cluster.CreateSession()
  457. if err != nil {
  458. t.Fatalf("NewCluster: %v", err)
  459. }
  460. defer db.Close()
  461. ch := make(chan error, 1)
  462. go func() {
  463. err := db.Query("timeout").Exec()
  464. if err != nil {
  465. ch <- err
  466. return
  467. }
  468. t.Errorf("err was nil, expected to get a timeout after %v", db.cfg.Timeout)
  469. }()
  470. select {
  471. case err := <-ch:
  472. if err != ErrTimeoutNoResponse {
  473. t.Fatalf("expected to get %v for timeout got %v", ErrTimeoutNoResponse, err)
  474. }
  475. case <-time.After(10*time.Millisecond + db.cfg.Timeout):
  476. // ensure that the query goroutines have been scheduled
  477. t.Fatalf("query did not timeout after %v", db.cfg.Timeout)
  478. }
  479. }
  480. func BenchmarkSingleConn(b *testing.B) {
  481. srv := NewTestServer(b, 3, context.Background())
  482. defer srv.Stop()
  483. cluster := testCluster(3, srv.Address)
  484. // Set the timeout arbitrarily low so that the query hits the timeout in a
  485. // timely manner.
  486. cluster.Timeout = 500 * time.Millisecond
  487. cluster.NumConns = 1
  488. db, err := cluster.CreateSession()
  489. if err != nil {
  490. b.Fatalf("NewCluster: %v", err)
  491. }
  492. defer db.Close()
  493. b.ResetTimer()
  494. b.RunParallel(func(pb *testing.PB) {
  495. for pb.Next() {
  496. err := db.Query("void").Exec()
  497. if err != nil {
  498. b.Error(err)
  499. return
  500. }
  501. }
  502. })
  503. }
  504. func TestQueryTimeoutReuseStream(t *testing.T) {
  505. t.Skip("no longer tests anything")
  506. // TODO(zariel): move this to conn test, we really just want to check what
  507. // happens when a conn is
  508. srv := NewTestServer(t, defaultProto, context.Background())
  509. defer srv.Stop()
  510. cluster := testCluster(defaultProto, srv.Address)
  511. // Set the timeout arbitrarily low so that the query hits the timeout in a
  512. // timely manner.
  513. cluster.Timeout = 1 * time.Millisecond
  514. cluster.NumConns = 1
  515. db, err := cluster.CreateSession()
  516. if err != nil {
  517. t.Fatalf("NewCluster: %v", err)
  518. }
  519. defer db.Close()
  520. db.Query("slow").Exec()
  521. err = db.Query("void").Exec()
  522. if err != nil {
  523. t.Fatal(err)
  524. }
  525. }
  526. func TestQueryTimeoutClose(t *testing.T) {
  527. srv := NewTestServer(t, defaultProto, context.Background())
  528. defer srv.Stop()
  529. cluster := testCluster(defaultProto, srv.Address)
  530. // Set the timeout arbitrarily low so that the query hits the timeout in a
  531. // timely manner.
  532. cluster.Timeout = 1000 * time.Millisecond
  533. cluster.NumConns = 1
  534. db, err := cluster.CreateSession()
  535. if err != nil {
  536. t.Fatalf("NewCluster: %v", err)
  537. }
  538. ch := make(chan error)
  539. go func() {
  540. err := db.Query("timeout").Exec()
  541. ch <- err
  542. }()
  543. // ensure that the above goroutine gets sheduled
  544. time.Sleep(50 * time.Millisecond)
  545. db.Close()
  546. select {
  547. case err = <-ch:
  548. case <-time.After(1 * time.Second):
  549. t.Fatal("timedout waiting to get a response once cluster is closed")
  550. }
  551. if err != ErrConnectionClosed {
  552. t.Fatalf("expected to get %v got %v", ErrConnectionClosed, err)
  553. }
  554. }
  555. func TestStream0(t *testing.T) {
  556. // TODO: replace this with type check
  557. const expErr = "gocql: received unexpected frame on stream 0"
  558. var buf bytes.Buffer
  559. f := newFramer(nil, &buf, nil, protoVersion4)
  560. f.writeHeader(0, opResult, 0)
  561. f.writeInt(resultKindVoid)
  562. f.wbuf[0] |= 0x80
  563. if err := f.finishWrite(); err != nil {
  564. t.Fatal(err)
  565. }
  566. conn := &Conn{
  567. r: bufio.NewReader(&buf),
  568. streams: streams.New(protoVersion4),
  569. }
  570. err := conn.recv()
  571. if err == nil {
  572. t.Fatal("expected to get an error on stream 0")
  573. } else if !strings.HasPrefix(err.Error(), expErr) {
  574. t.Fatalf("expected to get error prefix %q got %q", expErr, err.Error())
  575. }
  576. }
  577. func TestConnClosedBlocked(t *testing.T) {
  578. t.Skip("FLAKE: skipping test flake see https://github.com/gocql/gocql/issues/1088")
  579. // issue 664
  580. const proto = 3
  581. srv := NewTestServer(t, proto, context.Background())
  582. defer srv.Stop()
  583. errorHandler := connErrorHandlerFn(func(conn *Conn, err error, closed bool) {
  584. t.Log(err)
  585. })
  586. s, err := srv.session()
  587. if err != nil {
  588. t.Fatal(err)
  589. }
  590. defer s.Close()
  591. conn, err := s.connect(srv.host(), errorHandler)
  592. if err != nil {
  593. t.Fatal(err)
  594. }
  595. if err := conn.conn.Close(); err != nil {
  596. t.Fatal(err)
  597. }
  598. // This will block indefintaly if #664 is not fixed
  599. err = conn.executeQuery(&Query{stmt: "void"}).Close()
  600. if !strings.HasSuffix(err.Error(), "use of closed network connection") {
  601. t.Fatalf("expected to get use of closed networking connection error got: %v\n", err)
  602. }
  603. }
  604. func TestContext_Timeout(t *testing.T) {
  605. srv := NewTestServer(t, defaultProto, context.Background())
  606. defer srv.Stop()
  607. cluster := testCluster(defaultProto, srv.Address)
  608. cluster.Timeout = 5 * time.Second
  609. db, err := cluster.CreateSession()
  610. if err != nil {
  611. t.Fatal(err)
  612. }
  613. defer db.Close()
  614. ctx, cancel := context.WithCancel(context.Background())
  615. cancel()
  616. err = db.Query("timeout").WithContext(ctx).Exec()
  617. if err != context.Canceled {
  618. t.Fatalf("expected to get context cancel error: %v got %v", context.Canceled, err)
  619. }
  620. }
  621. func TestWriteCoalescing(t *testing.T) {
  622. var buf bytes.Buffer
  623. w := &writeCoalescer{
  624. w: &buf,
  625. cond: sync.NewCond(&sync.Mutex{}),
  626. fcond: sync.NewCond(&sync.Mutex{}),
  627. }
  628. go func() {
  629. if _, err := w.Write([]byte("one")); err != nil {
  630. t.Error(err)
  631. }
  632. }()
  633. go func() {
  634. if _, err := w.Write([]byte("two")); err != nil {
  635. t.Error(err)
  636. }
  637. }()
  638. if buf.Len() != 0 {
  639. t.Fatalf("expected buffer to be empty have: %v", buf.String())
  640. }
  641. for true {
  642. w.cond.L.Lock()
  643. if len(w.buffers) == 2 {
  644. w.cond.L.Unlock()
  645. break
  646. }
  647. w.cond.L.Unlock()
  648. }
  649. w.flush()
  650. if got := buf.String(); got != "onetwo" && got != "twoone" {
  651. t.Fatalf("expected to get %q got %q", "onetwo or twoone", got)
  652. }
  653. }
  654. type recordingFrameHeaderObserver struct {
  655. t *testing.T
  656. mu sync.Mutex
  657. frames []ObservedFrameHeader
  658. }
  659. func (r *recordingFrameHeaderObserver) ObserveFrameHeader(ctx context.Context, frm ObservedFrameHeader) {
  660. r.mu.Lock()
  661. r.frames = append(r.frames, frm)
  662. r.mu.Unlock()
  663. }
  664. func (r *recordingFrameHeaderObserver) getFrames() []ObservedFrameHeader {
  665. r.mu.Lock()
  666. defer r.mu.Unlock()
  667. return r.frames
  668. }
  669. func TestFrameHeaderObserver(t *testing.T) {
  670. srv := NewTestServer(t, defaultProto, context.Background())
  671. defer srv.Stop()
  672. cluster := testCluster(defaultProto, srv.Address)
  673. cluster.NumConns = 1
  674. observer := &recordingFrameHeaderObserver{t: t}
  675. cluster.FrameHeaderObserver = observer
  676. db, err := cluster.CreateSession()
  677. if err != nil {
  678. t.Fatal(err)
  679. }
  680. if err := db.Query("void").Exec(); err != nil {
  681. t.Fatal(err)
  682. }
  683. frames := observer.getFrames()
  684. if len(frames) != 2 {
  685. t.Fatalf("Expected to receive 2 frames, instead received %d", len(frames))
  686. }
  687. readyFrame := frames[0]
  688. if readyFrame.Opcode != frameOp(opReady) {
  689. t.Fatalf("Expected to receive ready frame, instead received frame of opcode %d", readyFrame.Opcode)
  690. }
  691. voidResultFrame := frames[1]
  692. if voidResultFrame.Opcode != frameOp(opResult) {
  693. t.Fatalf("Expected to receive result frame, instead received frame of opcode %d", voidResultFrame.Opcode)
  694. }
  695. if voidResultFrame.Length != int32(4) {
  696. t.Fatalf("Expected to receive frame with body length 4, instead received body length %d", voidResultFrame.Length)
  697. }
  698. }
  699. func NewTestServerWithAddress(addr string, t testing.TB, protocol uint8, ctx context.Context) *TestServer {
  700. laddr, err := net.ResolveTCPAddr("tcp", addr)
  701. if err != nil {
  702. t.Fatal(err)
  703. }
  704. listen, err := net.ListenTCP("tcp", laddr)
  705. if err != nil {
  706. t.Fatal(err)
  707. }
  708. headerSize := 8
  709. if protocol > protoVersion2 {
  710. headerSize = 9
  711. }
  712. ctx, cancel := context.WithCancel(ctx)
  713. srv := &TestServer{
  714. Address: listen.Addr().String(),
  715. listen: listen,
  716. t: t,
  717. protocol: protocol,
  718. headerSize: headerSize,
  719. ctx: ctx,
  720. cancel: cancel,
  721. }
  722. go srv.closeWatch()
  723. go srv.serve()
  724. return srv
  725. }
  726. func NewTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer {
  727. return NewTestServerWithAddress("127.0.0.1:0", t, protocol, ctx)
  728. }
  729. func NewSSLTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer {
  730. pem, err := ioutil.ReadFile("testdata/pki/ca.crt")
  731. certPool := x509.NewCertPool()
  732. if !certPool.AppendCertsFromPEM(pem) {
  733. t.Fatalf("Failed parsing or appending certs")
  734. }
  735. mycert, err := tls.LoadX509KeyPair("testdata/pki/cassandra.crt", "testdata/pki/cassandra.key")
  736. if err != nil {
  737. t.Fatalf("could not load cert")
  738. }
  739. config := &tls.Config{
  740. Certificates: []tls.Certificate{mycert},
  741. RootCAs: certPool,
  742. }
  743. listen, err := tls.Listen("tcp", "127.0.0.1:0", config)
  744. if err != nil {
  745. t.Fatal(err)
  746. }
  747. headerSize := 8
  748. if protocol > protoVersion2 {
  749. headerSize = 9
  750. }
  751. ctx, cancel := context.WithCancel(ctx)
  752. srv := &TestServer{
  753. Address: listen.Addr().String(),
  754. listen: listen,
  755. t: t,
  756. protocol: protocol,
  757. headerSize: headerSize,
  758. ctx: ctx,
  759. cancel: cancel,
  760. }
  761. go srv.closeWatch()
  762. go srv.serve()
  763. return srv
  764. }
  765. type TestServer struct {
  766. Address string
  767. TimeoutOnStartup int32
  768. t testing.TB
  769. nreq uint64
  770. listen net.Listener
  771. nKillReq int64
  772. compressor Compressor
  773. protocol byte
  774. headerSize int
  775. ctx context.Context
  776. cancel context.CancelFunc
  777. quit chan struct{}
  778. mu sync.Mutex
  779. closed bool
  780. }
  781. func (srv *TestServer) session() (*Session, error) {
  782. return testCluster(protoVersion(srv.protocol), srv.Address).CreateSession()
  783. }
  784. func (srv *TestServer) host() *HostInfo {
  785. hosts, err := hostInfo(srv.Address, 9042)
  786. if err != nil {
  787. srv.t.Fatal(err)
  788. }
  789. return hosts[0]
  790. }
  791. func (srv *TestServer) closeWatch() {
  792. <-srv.ctx.Done()
  793. srv.mu.Lock()
  794. defer srv.mu.Unlock()
  795. srv.closeLocked()
  796. }
  797. func (srv *TestServer) serve() {
  798. defer srv.listen.Close()
  799. for !srv.isClosed() {
  800. conn, err := srv.listen.Accept()
  801. if err != nil {
  802. break
  803. }
  804. go func(conn net.Conn) {
  805. defer conn.Close()
  806. for !srv.isClosed() {
  807. framer, err := srv.readFrame(conn)
  808. if err != nil {
  809. if err == io.EOF {
  810. return
  811. }
  812. srv.errorLocked(err)
  813. return
  814. }
  815. atomic.AddUint64(&srv.nreq, 1)
  816. go srv.process(framer)
  817. }
  818. }(conn)
  819. }
  820. }
  821. func (srv *TestServer) isClosed() bool {
  822. srv.mu.Lock()
  823. defer srv.mu.Unlock()
  824. return srv.closed
  825. }
  826. func (srv *TestServer) closeLocked() {
  827. if srv.closed {
  828. return
  829. }
  830. srv.closed = true
  831. srv.listen.Close()
  832. srv.cancel()
  833. }
  834. func (srv *TestServer) Stop() {
  835. srv.mu.Lock()
  836. defer srv.mu.Unlock()
  837. srv.closeLocked()
  838. }
  839. func (srv *TestServer) errorLocked(err interface{}) {
  840. srv.mu.Lock()
  841. defer srv.mu.Unlock()
  842. if srv.closed {
  843. return
  844. }
  845. srv.t.Error(err)
  846. }
  847. func (srv *TestServer) process(f *framer) {
  848. head := f.header
  849. if head == nil {
  850. srv.errorLocked("process frame with a nil header")
  851. return
  852. }
  853. switch head.op {
  854. case opStartup:
  855. if atomic.LoadInt32(&srv.TimeoutOnStartup) > 0 {
  856. // Do not respond to startup command
  857. // wait until we get a cancel signal
  858. select {
  859. case <-srv.ctx.Done():
  860. return
  861. }
  862. }
  863. f.writeHeader(0, opReady, head.stream)
  864. case opOptions:
  865. f.writeHeader(0, opSupported, head.stream)
  866. f.writeShort(0)
  867. case opQuery:
  868. query := f.readLongString()
  869. first := query
  870. if n := strings.Index(query, " "); n > 0 {
  871. first = first[:n]
  872. }
  873. switch strings.ToLower(first) {
  874. case "kill":
  875. atomic.AddInt64(&srv.nKillReq, 1)
  876. f.writeHeader(0, opError, head.stream)
  877. f.writeInt(0x1001)
  878. f.writeString("query killed")
  879. case "use":
  880. f.writeInt(resultKindKeyspace)
  881. f.writeString(strings.TrimSpace(query[3:]))
  882. case "void":
  883. f.writeHeader(0, opResult, head.stream)
  884. f.writeInt(resultKindVoid)
  885. case "timeout":
  886. <-srv.ctx.Done()
  887. return
  888. case "slow":
  889. go func() {
  890. f.writeHeader(0, opResult, head.stream)
  891. f.writeInt(resultKindVoid)
  892. f.wbuf[0] = srv.protocol | 0x80
  893. select {
  894. case <-srv.ctx.Done():
  895. return
  896. case <-time.After(50 * time.Millisecond):
  897. f.finishWrite()
  898. }
  899. }()
  900. return
  901. default:
  902. f.writeHeader(0, opResult, head.stream)
  903. f.writeInt(resultKindVoid)
  904. }
  905. case opError:
  906. f.writeHeader(0, opError, head.stream)
  907. f.wbuf = append(f.wbuf, f.rbuf...)
  908. default:
  909. f.writeHeader(0, opError, head.stream)
  910. f.writeInt(0)
  911. f.writeString("not supported")
  912. }
  913. f.wbuf[0] = srv.protocol | 0x80
  914. if err := f.finishWrite(); err != nil {
  915. srv.errorLocked(err)
  916. }
  917. }
  918. func (srv *TestServer) readFrame(conn net.Conn) (*framer, error) {
  919. buf := make([]byte, srv.headerSize)
  920. head, err := readHeader(conn, buf)
  921. if err != nil {
  922. return nil, err
  923. }
  924. framer := newFramer(conn, conn, nil, srv.protocol)
  925. err = framer.readFrame(&head)
  926. if err != nil {
  927. return nil, err
  928. }
  929. // should be a request frame
  930. if head.version.response() {
  931. return nil, fmt.Errorf("expected to read a request frame got version: %v", head.version)
  932. } else if head.version.version() != srv.protocol {
  933. return nil, fmt.Errorf("expected to read protocol version 0x%x got 0x%x", srv.protocol, head.version.version())
  934. }
  935. return framer, nil
  936. }