conn_test.go 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216
  1. // Copyright (c) 2012 The gocql Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // +build all unit
  5. package gocql
  6. import (
  7. "bufio"
  8. "bytes"
  9. "context"
  10. "crypto/tls"
  11. "crypto/x509"
  12. "fmt"
  13. "io"
  14. "io/ioutil"
  15. "math/rand"
  16. "net"
  17. "os"
  18. "strings"
  19. "sync"
  20. "sync/atomic"
  21. "testing"
  22. "time"
  23. "github.com/gocql/gocql/internal/streams"
  24. )
  25. const (
  26. defaultProto = protoVersion2
  27. )
  28. func TestApprove(t *testing.T) {
  29. tests := map[bool]bool{
  30. approve("org.apache.cassandra.auth.PasswordAuthenticator"): true,
  31. approve("com.instaclustr.cassandra.auth.SharedSecretAuthenticator"): true,
  32. approve("com.datastax.bdp.cassandra.auth.DseAuthenticator"): true,
  33. approve("com.apache.cassandra.auth.FakeAuthenticator"): false,
  34. }
  35. for k, v := range tests {
  36. if k != v {
  37. t.Fatalf("expected '%v', got '%v'", k, v)
  38. }
  39. }
  40. }
  41. func TestJoinHostPort(t *testing.T) {
  42. tests := map[string]string{
  43. "127.0.0.1:0": JoinHostPort("127.0.0.1", 0),
  44. "127.0.0.1:1": JoinHostPort("127.0.0.1:1", 9142),
  45. "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:0": JoinHostPort("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 0),
  46. "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1": JoinHostPort("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1", 9142),
  47. }
  48. for k, v := range tests {
  49. if k != v {
  50. t.Fatalf("expected '%v', got '%v'", k, v)
  51. }
  52. }
  53. }
  54. func testCluster(proto protoVersion, addresses ...string) *ClusterConfig {
  55. cluster := NewCluster(addresses...)
  56. cluster.ProtoVersion = int(proto)
  57. cluster.disableControlConn = true
  58. return cluster
  59. }
  60. func TestSimple(t *testing.T) {
  61. srv := NewTestServer(t, defaultProto, context.Background())
  62. defer srv.Stop()
  63. cluster := testCluster(defaultProto, srv.Address)
  64. db, err := cluster.CreateSession()
  65. if err != nil {
  66. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  67. }
  68. if err := db.Query("void").Exec(); err != nil {
  69. t.Fatalf("0x%x: %v", defaultProto, err)
  70. }
  71. }
  72. func TestSSLSimple(t *testing.T) {
  73. srv := NewSSLTestServer(t, defaultProto, context.Background())
  74. defer srv.Stop()
  75. db, err := createTestSslCluster(srv.Address, defaultProto, true).CreateSession()
  76. if err != nil {
  77. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  78. }
  79. if err := db.Query("void").Exec(); err != nil {
  80. t.Fatalf("0x%x: %v", defaultProto, err)
  81. }
  82. }
  83. func TestSSLSimpleNoClientCert(t *testing.T) {
  84. srv := NewSSLTestServer(t, defaultProto, context.Background())
  85. defer srv.Stop()
  86. db, err := createTestSslCluster(srv.Address, defaultProto, false).CreateSession()
  87. if err != nil {
  88. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  89. }
  90. if err := db.Query("void").Exec(); err != nil {
  91. t.Fatalf("0x%x: %v", defaultProto, err)
  92. }
  93. }
  94. func createTestSslCluster(addr string, proto protoVersion, useClientCert bool) *ClusterConfig {
  95. cluster := testCluster(proto, addr)
  96. sslOpts := &SslOptions{
  97. CaPath: "testdata/pki/ca.crt",
  98. EnableHostVerification: false,
  99. }
  100. if useClientCert {
  101. sslOpts.CertPath = "testdata/pki/gocql.crt"
  102. sslOpts.KeyPath = "testdata/pki/gocql.key"
  103. }
  104. cluster.SslOpts = sslOpts
  105. return cluster
  106. }
  107. func TestClosed(t *testing.T) {
  108. t.Skip("Skipping the execution of TestClosed for now to try to concentrate on more important test failures on Travis")
  109. srv := NewTestServer(t, defaultProto, context.Background())
  110. defer srv.Stop()
  111. session, err := newTestSession(defaultProto, srv.Address)
  112. if err != nil {
  113. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  114. }
  115. session.Close()
  116. if err := session.Query("void").Exec(); err != ErrSessionClosed {
  117. t.Fatalf("0x%x: expected %#v, got %#v", defaultProto, ErrSessionClosed, err)
  118. }
  119. }
  120. func newTestSession(proto protoVersion, addresses ...string) (*Session, error) {
  121. return testCluster(proto, addresses...).CreateSession()
  122. }
  123. func TestDNSLookupConnected(t *testing.T) {
  124. log := &testLogger{}
  125. Logger = log
  126. defer func() {
  127. Logger = &defaultLogger{}
  128. }()
  129. // Override the defaul DNS resolver and restore at the end
  130. failDNS = true
  131. defer func() { failDNS = false }()
  132. srv := NewTestServer(t, defaultProto, context.Background())
  133. defer srv.Stop()
  134. cluster := NewCluster("cassandra1.invalid", srv.Address, "cassandra2.invalid")
  135. cluster.ProtoVersion = int(defaultProto)
  136. cluster.disableControlConn = true
  137. // CreateSession() should attempt to resolve the DNS name "cassandraX.invalid"
  138. // and fail, but continue to connect via srv.Address
  139. _, err := cluster.CreateSession()
  140. if err != nil {
  141. t.Fatal("CreateSession() should have connected")
  142. }
  143. if !strings.Contains(log.String(), "gocql: dns error") {
  144. t.Fatalf("Expected to receive dns error log message - got '%s' instead", log.String())
  145. }
  146. }
  147. func TestDNSLookupError(t *testing.T) {
  148. log := &testLogger{}
  149. Logger = log
  150. defer func() {
  151. Logger = &defaultLogger{}
  152. }()
  153. // Override the defaul DNS resolver and restore at the end
  154. failDNS = true
  155. defer func() { failDNS = false }()
  156. cluster := NewCluster("cassandra1.invalid", "cassandra2.invalid")
  157. cluster.ProtoVersion = int(defaultProto)
  158. cluster.disableControlConn = true
  159. // CreateSession() should attempt to resolve each DNS name "cassandraX.invalid"
  160. // and fail since it could not resolve any dns entries
  161. _, err := cluster.CreateSession()
  162. if err == nil {
  163. t.Fatal("CreateSession() should have returned an error")
  164. }
  165. if !strings.Contains(log.String(), "gocql: dns error") {
  166. t.Fatalf("Expected to receive dns error log message - got '%s' instead", log.String())
  167. }
  168. if err.Error() != "gocql: unable to create session: failed to resolve any of the provided hostnames" {
  169. t.Fatalf("Expected CreateSession() to fail with message - got '%s' instead", err.Error())
  170. }
  171. }
  172. func TestStartupTimeout(t *testing.T) {
  173. ctx, cancel := context.WithCancel(context.Background())
  174. log := &testLogger{}
  175. Logger = log
  176. defer func() {
  177. Logger = &defaultLogger{}
  178. }()
  179. srv := NewTestServer(t, defaultProto, ctx)
  180. defer srv.Stop()
  181. // Tell the server to never respond to Startup frame
  182. atomic.StoreInt32(&srv.TimeoutOnStartup, 1)
  183. startTime := time.Now()
  184. cluster := NewCluster(srv.Address)
  185. cluster.ProtoVersion = int(defaultProto)
  186. cluster.disableControlConn = true
  187. // Set very long query connection timeout
  188. // so we know CreateSession() is using the ConnectTimeout
  189. cluster.Timeout = time.Second * 5
  190. // Create session should timeout during connect attempt
  191. _, err := cluster.CreateSession()
  192. if err == nil {
  193. t.Fatal("CreateSession() should have returned a timeout error")
  194. }
  195. elapsed := time.Since(startTime)
  196. if elapsed > time.Second*5 {
  197. t.Fatal("ConnectTimeout is not respected")
  198. }
  199. if !strings.Contains(err.Error(), "no connections were made when creating the session") {
  200. t.Fatalf("Expected to receive no connections error - got '%s'", err)
  201. }
  202. if !strings.Contains(log.String(), "no response to connection startup within timeout") {
  203. t.Fatalf("Expected to receive timeout log message - got '%s'", log.String())
  204. }
  205. cancel()
  206. }
  207. func TestTimeout(t *testing.T) {
  208. ctx, cancel := context.WithCancel(context.Background())
  209. srv := NewTestServer(t, defaultProto, ctx)
  210. defer srv.Stop()
  211. db, err := newTestSession(defaultProto, srv.Address)
  212. if err != nil {
  213. t.Fatalf("NewCluster: %v", err)
  214. }
  215. defer db.Close()
  216. var wg sync.WaitGroup
  217. wg.Add(1)
  218. go func() {
  219. defer wg.Done()
  220. select {
  221. case <-time.After(5 * time.Second):
  222. t.Errorf("no timeout")
  223. case <-ctx.Done():
  224. }
  225. }()
  226. if err := db.Query("kill").WithContext(ctx).Exec(); err == nil {
  227. t.Fatal("expected error got nil")
  228. }
  229. cancel()
  230. wg.Wait()
  231. }
  232. func TestCancel(t *testing.T) {
  233. ctx, cancel := context.WithCancel(context.Background())
  234. defer cancel()
  235. srv := NewTestServer(t, defaultProto, ctx)
  236. defer srv.Stop()
  237. cluster := testCluster(defaultProto, srv.Address)
  238. cluster.Timeout = 1 * time.Second
  239. db, err := cluster.CreateSession()
  240. if err != nil {
  241. t.Fatalf("NewCluster: %v", err)
  242. }
  243. defer db.Close()
  244. qry := db.Query("timeout").WithContext(ctx)
  245. // Make sure we finish the query without leftovers
  246. var wg sync.WaitGroup
  247. wg.Add(1)
  248. go func() {
  249. if err := qry.Exec(); err != context.Canceled {
  250. t.Fatalf("expected to get context cancel error: '%v', got '%v'", context.Canceled, err)
  251. }
  252. wg.Done()
  253. }()
  254. // The query will timeout after about 1 seconds, so cancel it after a short pause
  255. time.AfterFunc(20*time.Millisecond, cancel)
  256. wg.Wait()
  257. }
  258. type testQueryObserver struct {
  259. metrics map[string]*hostMetrics
  260. verbose bool
  261. }
  262. func (o *testQueryObserver) ObserveQuery(ctx context.Context, q ObservedQuery) {
  263. host := q.Host.ConnectAddress().String()
  264. o.metrics[host] = q.Metrics
  265. if o.verbose {
  266. Logger.Printf("Observed query %q. Returned %v rows, took %v on host %q with %v attempts and total latency %v. Error: %q\n",
  267. q.Statement, q.Rows, q.End.Sub(q.Start), host, q.Metrics.Attempts, q.Metrics.TotalLatency, q.Err)
  268. }
  269. }
  270. func (o *testQueryObserver) GetMetrics(host *HostInfo) *hostMetrics {
  271. return o.metrics[host.ConnectAddress().String()]
  272. }
  273. // TestQueryRetry will test to make sure that gocql will execute
  274. // the exact amount of retry queries designated by the user.
  275. func TestQueryRetry(t *testing.T) {
  276. ctx, cancel := context.WithCancel(context.Background())
  277. defer cancel()
  278. srv := NewTestServer(t, defaultProto, ctx)
  279. defer srv.Stop()
  280. db, err := newTestSession(defaultProto, srv.Address)
  281. if err != nil {
  282. t.Fatalf("NewCluster: %v", err)
  283. }
  284. defer db.Close()
  285. go func() {
  286. select {
  287. case <-ctx.Done():
  288. return
  289. case <-time.After(5 * time.Second):
  290. t.Errorf("no timeout")
  291. }
  292. }()
  293. rt := &SimpleRetryPolicy{NumRetries: 1}
  294. qry := db.Query("kill").RetryPolicy(rt)
  295. if err := qry.Exec(); err == nil {
  296. t.Fatalf("expected error")
  297. }
  298. requests := atomic.LoadInt64(&srv.nKillReq)
  299. attempts := qry.Attempts()
  300. if requests != int64(attempts) {
  301. t.Fatalf("expected requests %v to match query attempts %v", requests, attempts)
  302. }
  303. // the query will only be attempted once, but is being retried
  304. if requests != int64(rt.NumRetries) {
  305. t.Fatalf("failed to retry the query %v time(s). Query executed %v times", rt.NumRetries, requests-1)
  306. }
  307. }
  308. func TestQueryMultinodeWithMetrics(t *testing.T) {
  309. log := &testLogger{}
  310. Logger = log
  311. defer func() {
  312. Logger = &defaultLogger{}
  313. os.Stdout.WriteString(log.String())
  314. }()
  315. // Build a 3 node cluster to test host metric mapping
  316. var nodes []*TestServer
  317. var addresses = []string{
  318. "127.0.0.1",
  319. "127.0.0.2",
  320. "127.0.0.3",
  321. }
  322. // Can do with 1 context for all servers
  323. ctx := context.Background()
  324. for _, ip := range addresses {
  325. srv := NewTestServerWithAddress(ip+":0", t, defaultProto, ctx)
  326. defer srv.Stop()
  327. nodes = append(nodes, srv)
  328. }
  329. db, err := newTestSession(defaultProto, nodes[0].Address, nodes[1].Address, nodes[2].Address)
  330. if err != nil {
  331. t.Fatalf("NewCluster: %v", err)
  332. }
  333. defer db.Close()
  334. // 1 retry per host
  335. rt := &SimpleRetryPolicy{NumRetries: 3}
  336. observer := &testQueryObserver{metrics: make(map[string]*hostMetrics), verbose: false}
  337. qry := db.Query("kill").RetryPolicy(rt).Observer(observer)
  338. if err := qry.Exec(); err == nil {
  339. t.Fatalf("expected error")
  340. }
  341. for i, ip := range addresses {
  342. host := &HostInfo{connectAddress: net.ParseIP(ip)}
  343. queryMetric := qry.getHostMetrics(host)
  344. observedMetrics := observer.GetMetrics(host)
  345. requests := int(atomic.LoadInt64(&nodes[i].nKillReq))
  346. hostAttempts := queryMetric.Attempts
  347. if requests != hostAttempts {
  348. t.Fatalf("expected requests %v to match query attempts %v", requests, hostAttempts)
  349. }
  350. if hostAttempts != observedMetrics.Attempts {
  351. t.Fatalf("expected observed attempts %v to match query attempts %v on host %v", observedMetrics.Attempts, hostAttempts, ip)
  352. }
  353. hostLatency := queryMetric.TotalLatency
  354. observedLatency := observedMetrics.TotalLatency
  355. if hostLatency != observedLatency {
  356. t.Fatalf("expected observed latency %v to match query latency %v on host %v", observedLatency, hostLatency, ip)
  357. }
  358. }
  359. // the query will only be attempted once, but is being retried
  360. attempts := qry.Attempts()
  361. if attempts != rt.NumRetries {
  362. t.Fatalf("failed to retry the query %v time(s). Query executed %v times", rt.NumRetries, attempts)
  363. }
  364. }
  365. type testRetryPolicy struct {
  366. NumRetries int
  367. }
  368. func (t *testRetryPolicy) Attempt(qry RetryableQuery) bool {
  369. return qry.Attempts() <= t.NumRetries
  370. }
  371. func (t *testRetryPolicy) GetRetryType(err error) RetryType {
  372. return Retry
  373. }
  374. func TestSpeculativeExecution(t *testing.T) {
  375. log := &testLogger{}
  376. Logger = log
  377. defer func() {
  378. Logger = &defaultLogger{}
  379. os.Stdout.WriteString(log.String())
  380. }()
  381. // Build a 3 node cluster
  382. var nodes []*TestServer
  383. var addresses = []string{
  384. "127.0.0.1",
  385. "127.0.0.2",
  386. "127.0.0.3",
  387. }
  388. // Can do with 1 context for all servers
  389. ctx := context.Background()
  390. for _, ip := range addresses {
  391. srv := NewTestServerWithAddress(ip+":0", t, defaultProto, ctx)
  392. defer srv.Stop()
  393. nodes = append(nodes, srv)
  394. }
  395. db, err := newTestSession(defaultProto, nodes[0].Address, nodes[1].Address, nodes[2].Address)
  396. if err != nil {
  397. t.Fatalf("NewCluster: %v", err)
  398. }
  399. defer db.Close()
  400. // Create a test retry policy, 6 retries will cover 2 executions
  401. rt := &testRetryPolicy{NumRetries: 8}
  402. // test Speculative policy with 1 additional execution
  403. sp := &SimpleSpeculativeExecution{NumAttempts: 1, TimeoutDelay: 200 * time.Millisecond}
  404. // Build the query
  405. qry := db.Query("speculative").RetryPolicy(rt).SetSpeculativeExecutionPolicy(sp).Idempotent(true)
  406. // Execute the query and close, check that it doesn't error out
  407. if err := qry.Exec(); err != nil {
  408. t.Errorf("The query failed with '%v'!\n", err)
  409. }
  410. requests1 := atomic.LoadInt64(&nodes[0].nKillReq)
  411. requests2 := atomic.LoadInt64(&nodes[1].nKillReq)
  412. requests3 := atomic.LoadInt64(&nodes[2].nKillReq)
  413. // Spec Attempts == 1, so expecting to see only 1 regular + 1 speculative = 2 nodes attempted
  414. if requests1 != 0 && requests2 != 0 && requests3 != 0 {
  415. t.Error("error: all 3 nodes were attempted, should have been only 2")
  416. }
  417. // Only the 4th request will generate results, so
  418. if requests1 != 4 && requests2 != 4 && requests3 != 4 {
  419. t.Error("error: none of 3 nodes was attempted 4 times!")
  420. }
  421. // "speculative" query will succeed on one arbitrary node after 4 attempts, so
  422. // expecting to see 4 (on successful node) + not more than 2 (as cancelled on another node) == 6
  423. if requests1+requests2+requests3 > 6 {
  424. t.Errorf("error: expected to see 6 attempts, got %v\n", requests1+requests2+requests3)
  425. }
  426. }
  427. func TestStreams_Protocol1(t *testing.T) {
  428. srv := NewTestServer(t, protoVersion1, context.Background())
  429. defer srv.Stop()
  430. // TODO: these are more like session tests and should instead operate
  431. // on a single Conn
  432. cluster := testCluster(protoVersion1, srv.Address)
  433. cluster.NumConns = 1
  434. cluster.ProtoVersion = 1
  435. db, err := cluster.CreateSession()
  436. if err != nil {
  437. t.Fatal(err)
  438. }
  439. defer db.Close()
  440. var wg sync.WaitGroup
  441. for i := 1; i < 128; i++ {
  442. // here were just validating that if we send NumStream request we get
  443. // a response for every stream and the lengths for the queries are set
  444. // correctly.
  445. wg.Add(1)
  446. go func() {
  447. defer wg.Done()
  448. if err := db.Query("void").Exec(); err != nil {
  449. t.Error(err)
  450. }
  451. }()
  452. }
  453. wg.Wait()
  454. }
  455. func TestStreams_Protocol3(t *testing.T) {
  456. srv := NewTestServer(t, protoVersion3, context.Background())
  457. defer srv.Stop()
  458. // TODO: these are more like session tests and should instead operate
  459. // on a single Conn
  460. cluster := testCluster(protoVersion3, srv.Address)
  461. cluster.NumConns = 1
  462. cluster.ProtoVersion = 3
  463. db, err := cluster.CreateSession()
  464. if err != nil {
  465. t.Fatal(err)
  466. }
  467. defer db.Close()
  468. for i := 1; i < 32768; i++ {
  469. // the test server processes each conn synchronously
  470. // here were just validating that if we send NumStream request we get
  471. // a response for every stream and the lengths for the queries are set
  472. // correctly.
  473. if err = db.Query("void").Exec(); err != nil {
  474. t.Fatal(err)
  475. }
  476. }
  477. }
  478. func BenchmarkProtocolV3(b *testing.B) {
  479. srv := NewTestServer(b, protoVersion3, context.Background())
  480. defer srv.Stop()
  481. // TODO: these are more like session tests and should instead operate
  482. // on a single Conn
  483. cluster := NewCluster(srv.Address)
  484. cluster.NumConns = 1
  485. cluster.ProtoVersion = 3
  486. db, err := cluster.CreateSession()
  487. if err != nil {
  488. b.Fatal(err)
  489. }
  490. defer db.Close()
  491. b.ResetTimer()
  492. b.ReportAllocs()
  493. for i := 0; i < b.N; i++ {
  494. if err = db.Query("void").Exec(); err != nil {
  495. b.Fatal(err)
  496. }
  497. }
  498. }
  499. // This tests that the policy connection pool handles SSL correctly
  500. func TestPolicyConnPoolSSL(t *testing.T) {
  501. srv := NewSSLTestServer(t, defaultProto, context.Background())
  502. defer srv.Stop()
  503. cluster := createTestSslCluster(srv.Address, defaultProto, true)
  504. cluster.PoolConfig.HostSelectionPolicy = RoundRobinHostPolicy()
  505. db, err := cluster.CreateSession()
  506. if err != nil {
  507. t.Fatalf("failed to create new session: %v", err)
  508. }
  509. if err := db.Query("void").Exec(); err != nil {
  510. t.Fatalf("query failed due to error: %v", err)
  511. }
  512. db.Close()
  513. // wait for the pool to drain
  514. time.Sleep(100 * time.Millisecond)
  515. size := db.pool.Size()
  516. if size != 0 {
  517. t.Fatalf("connection pool did not drain, still contains %d connections", size)
  518. }
  519. }
  520. func TestQueryTimeout(t *testing.T) {
  521. srv := NewTestServer(t, defaultProto, context.Background())
  522. defer srv.Stop()
  523. cluster := testCluster(defaultProto, srv.Address)
  524. // Set the timeout arbitrarily low so that the query hits the timeout in a
  525. // timely manner.
  526. cluster.Timeout = 1 * time.Millisecond
  527. db, err := cluster.CreateSession()
  528. if err != nil {
  529. t.Fatalf("NewCluster: %v", err)
  530. }
  531. defer db.Close()
  532. ch := make(chan error, 1)
  533. go func() {
  534. err := db.Query("timeout").Exec()
  535. if err != nil {
  536. ch <- err
  537. return
  538. }
  539. t.Errorf("err was nil, expected to get a timeout after %v", db.cfg.Timeout)
  540. }()
  541. select {
  542. case err := <-ch:
  543. if err != ErrTimeoutNoResponse {
  544. t.Fatalf("expected to get %v for timeout got %v", ErrTimeoutNoResponse, err)
  545. }
  546. case <-time.After(40*time.Millisecond + db.cfg.Timeout):
  547. // ensure that the query goroutines have been scheduled
  548. t.Fatalf("query did not timeout after %v", db.cfg.Timeout)
  549. }
  550. }
  551. func BenchmarkSingleConn(b *testing.B) {
  552. srv := NewTestServer(b, 3, context.Background())
  553. defer srv.Stop()
  554. cluster := testCluster(3, srv.Address)
  555. // Set the timeout arbitrarily low so that the query hits the timeout in a
  556. // timely manner.
  557. cluster.Timeout = 500 * time.Millisecond
  558. cluster.NumConns = 1
  559. db, err := cluster.CreateSession()
  560. if err != nil {
  561. b.Fatalf("NewCluster: %v", err)
  562. }
  563. defer db.Close()
  564. b.ResetTimer()
  565. b.RunParallel(func(pb *testing.PB) {
  566. for pb.Next() {
  567. err := db.Query("void").Exec()
  568. if err != nil {
  569. b.Error(err)
  570. return
  571. }
  572. }
  573. })
  574. }
  575. func TestQueryTimeoutReuseStream(t *testing.T) {
  576. t.Skip("no longer tests anything")
  577. // TODO(zariel): move this to conn test, we really just want to check what
  578. // happens when a conn is
  579. srv := NewTestServer(t, defaultProto, context.Background())
  580. defer srv.Stop()
  581. cluster := testCluster(defaultProto, srv.Address)
  582. // Set the timeout arbitrarily low so that the query hits the timeout in a
  583. // timely manner.
  584. cluster.Timeout = 1 * time.Millisecond
  585. cluster.NumConns = 1
  586. db, err := cluster.CreateSession()
  587. if err != nil {
  588. t.Fatalf("NewCluster: %v", err)
  589. }
  590. defer db.Close()
  591. db.Query("slow").Exec()
  592. err = db.Query("void").Exec()
  593. if err != nil {
  594. t.Fatal(err)
  595. }
  596. }
  597. func TestQueryTimeoutClose(t *testing.T) {
  598. srv := NewTestServer(t, defaultProto, context.Background())
  599. defer srv.Stop()
  600. cluster := testCluster(defaultProto, srv.Address)
  601. // Set the timeout arbitrarily low so that the query hits the timeout in a
  602. // timely manner.
  603. cluster.Timeout = 1000 * time.Millisecond
  604. cluster.NumConns = 1
  605. db, err := cluster.CreateSession()
  606. if err != nil {
  607. t.Fatalf("NewCluster: %v", err)
  608. }
  609. ch := make(chan error)
  610. go func() {
  611. err := db.Query("timeout").Exec()
  612. ch <- err
  613. }()
  614. // ensure that the above goroutine gets sheduled
  615. time.Sleep(50 * time.Millisecond)
  616. db.Close()
  617. select {
  618. case err = <-ch:
  619. case <-time.After(1 * time.Second):
  620. t.Fatal("timedout waiting to get a response once cluster is closed")
  621. }
  622. if err != ErrConnectionClosed {
  623. t.Fatalf("expected to get %v got %v", ErrConnectionClosed, err)
  624. }
  625. }
  626. func TestStream0(t *testing.T) {
  627. // TODO: replace this with type check
  628. const expErr = "gocql: received unexpected frame on stream 0"
  629. var buf bytes.Buffer
  630. f := newFramer(nil, &buf, nil, protoVersion4)
  631. f.writeHeader(0, opResult, 0)
  632. f.writeInt(resultKindVoid)
  633. f.wbuf[0] |= 0x80
  634. if err := f.finishWrite(); err != nil {
  635. t.Fatal(err)
  636. }
  637. conn := &Conn{
  638. r: bufio.NewReader(&buf),
  639. streams: streams.New(protoVersion4),
  640. }
  641. err := conn.recv()
  642. if err == nil {
  643. t.Fatal("expected to get an error on stream 0")
  644. } else if !strings.HasPrefix(err.Error(), expErr) {
  645. t.Fatalf("expected to get error prefix %q got %q", expErr, err.Error())
  646. }
  647. }
  648. func TestContext_Timeout(t *testing.T) {
  649. ctx, cancel := context.WithCancel(context.Background())
  650. defer cancel()
  651. srv := NewTestServer(t, defaultProto, ctx)
  652. defer srv.Stop()
  653. cluster := testCluster(defaultProto, srv.Address)
  654. cluster.Timeout = 5 * time.Second
  655. db, err := cluster.CreateSession()
  656. if err != nil {
  657. t.Fatal(err)
  658. }
  659. defer db.Close()
  660. ctx, cancel = context.WithCancel(ctx)
  661. cancel()
  662. err = db.Query("timeout").WithContext(ctx).Exec()
  663. if err != context.Canceled {
  664. t.Fatalf("expected to get context cancel error: %v got %v", context.Canceled, err)
  665. }
  666. }
  667. func TestWriteCoalescing(t *testing.T) {
  668. ctx, cancel := context.WithCancel(context.Background())
  669. defer cancel()
  670. var buf bytes.Buffer
  671. w := &writeCoalescer{
  672. w: &buf,
  673. writeCh: make(chan struct{}),
  674. cond: sync.NewCond(&sync.Mutex{}),
  675. quit: ctx.Done(),
  676. running: true,
  677. }
  678. go func() {
  679. if _, err := w.Write([]byte("one")); err != nil {
  680. t.Error(err)
  681. }
  682. }()
  683. go func() {
  684. if _, err := w.Write([]byte("two")); err != nil {
  685. t.Error(err)
  686. }
  687. }()
  688. if buf.Len() != 0 {
  689. t.Fatalf("expected buffer to be empty have: %v", buf.String())
  690. }
  691. for true {
  692. w.cond.L.Lock()
  693. if len(w.buffers) == 2 {
  694. w.cond.L.Unlock()
  695. break
  696. }
  697. w.cond.L.Unlock()
  698. }
  699. w.flush()
  700. if got := buf.String(); got != "onetwo" && got != "twoone" {
  701. t.Fatalf("expected to get %q got %q", "onetwo or twoone", got)
  702. }
  703. }
  704. func TestWriteCoalescing_WriteAfterClose(t *testing.T) {
  705. ctx, cancel := context.WithCancel(context.Background())
  706. defer cancel()
  707. var buf bytes.Buffer
  708. w := newWriteCoalescer(&buf, 5*time.Millisecond, ctx.Done())
  709. // ensure 1 write works
  710. if _, err := w.Write([]byte("one")); err != nil {
  711. t.Fatal(err)
  712. }
  713. if v := buf.String(); v != "one" {
  714. t.Fatalf("expected buffer to be %q got %q", "one", v)
  715. }
  716. // now close and do a write, we should error
  717. cancel()
  718. if _, err := w.Write([]byte("two")); err == nil {
  719. t.Fatal("expected to get error for write after closing")
  720. } else if err != io.EOF {
  721. t.Fatalf("expected to get EOF got %v", err)
  722. }
  723. }
  724. type recordingFrameHeaderObserver struct {
  725. t *testing.T
  726. mu sync.Mutex
  727. frames []ObservedFrameHeader
  728. }
  729. func (r *recordingFrameHeaderObserver) ObserveFrameHeader(ctx context.Context, frm ObservedFrameHeader) {
  730. r.mu.Lock()
  731. r.frames = append(r.frames, frm)
  732. r.mu.Unlock()
  733. }
  734. func (r *recordingFrameHeaderObserver) getFrames() []ObservedFrameHeader {
  735. r.mu.Lock()
  736. defer r.mu.Unlock()
  737. return r.frames
  738. }
  739. func TestFrameHeaderObserver(t *testing.T) {
  740. srv := NewTestServer(t, defaultProto, context.Background())
  741. defer srv.Stop()
  742. cluster := testCluster(defaultProto, srv.Address)
  743. cluster.NumConns = 1
  744. observer := &recordingFrameHeaderObserver{t: t}
  745. cluster.FrameHeaderObserver = observer
  746. db, err := cluster.CreateSession()
  747. if err != nil {
  748. t.Fatal(err)
  749. }
  750. if err := db.Query("void").Exec(); err != nil {
  751. t.Fatal(err)
  752. }
  753. frames := observer.getFrames()
  754. expFrames := []frameOp{opSupported, opReady, opResult}
  755. if len(frames) != len(expFrames) {
  756. t.Fatalf("Expected to receive %d frames, instead received %d", len(expFrames), len(frames))
  757. }
  758. for i, op := range expFrames {
  759. if op != frames[i].Opcode {
  760. t.Fatalf("expected frame %d to be %v got %v", i, op, frames[i])
  761. }
  762. }
  763. voidResultFrame := frames[2]
  764. if voidResultFrame.Length != int32(4) {
  765. t.Fatalf("Expected to receive frame with body length 4, instead received body length %d", voidResultFrame.Length)
  766. }
  767. }
  768. func NewTestServerWithAddress(addr string, t testing.TB, protocol uint8, ctx context.Context) *TestServer {
  769. laddr, err := net.ResolveTCPAddr("tcp", addr)
  770. if err != nil {
  771. t.Fatal(err)
  772. }
  773. listen, err := net.ListenTCP("tcp", laddr)
  774. if err != nil {
  775. t.Fatal(err)
  776. }
  777. headerSize := 8
  778. if protocol > protoVersion2 {
  779. headerSize = 9
  780. }
  781. ctx, cancel := context.WithCancel(ctx)
  782. srv := &TestServer{
  783. Address: listen.Addr().String(),
  784. listen: listen,
  785. t: t,
  786. protocol: protocol,
  787. headerSize: headerSize,
  788. ctx: ctx,
  789. cancel: cancel,
  790. }
  791. go srv.closeWatch()
  792. go srv.serve()
  793. return srv
  794. }
  795. func NewTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer {
  796. return NewTestServerWithAddress("127.0.0.1:0", t, protocol, ctx)
  797. }
  798. func NewSSLTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer {
  799. pem, err := ioutil.ReadFile("testdata/pki/ca.crt")
  800. certPool := x509.NewCertPool()
  801. if !certPool.AppendCertsFromPEM(pem) {
  802. t.Fatalf("Failed parsing or appending certs")
  803. }
  804. mycert, err := tls.LoadX509KeyPair("testdata/pki/cassandra.crt", "testdata/pki/cassandra.key")
  805. if err != nil {
  806. t.Fatalf("could not load cert")
  807. }
  808. config := &tls.Config{
  809. Certificates: []tls.Certificate{mycert},
  810. RootCAs: certPool,
  811. }
  812. listen, err := tls.Listen("tcp", "127.0.0.1:0", config)
  813. if err != nil {
  814. t.Fatal(err)
  815. }
  816. headerSize := 8
  817. if protocol > protoVersion2 {
  818. headerSize = 9
  819. }
  820. ctx, cancel := context.WithCancel(ctx)
  821. srv := &TestServer{
  822. Address: listen.Addr().String(),
  823. listen: listen,
  824. t: t,
  825. protocol: protocol,
  826. headerSize: headerSize,
  827. ctx: ctx,
  828. cancel: cancel,
  829. }
  830. go srv.closeWatch()
  831. go srv.serve()
  832. return srv
  833. }
  834. type TestServer struct {
  835. Address string
  836. TimeoutOnStartup int32
  837. t testing.TB
  838. nreq uint64
  839. listen net.Listener
  840. nKillReq int64
  841. compressor Compressor
  842. protocol byte
  843. headerSize int
  844. ctx context.Context
  845. cancel context.CancelFunc
  846. quit chan struct{}
  847. mu sync.Mutex
  848. closed bool
  849. }
  850. func (srv *TestServer) session() (*Session, error) {
  851. return testCluster(protoVersion(srv.protocol), srv.Address).CreateSession()
  852. }
  853. func (srv *TestServer) host() *HostInfo {
  854. hosts, err := hostInfo(srv.Address, 9042)
  855. if err != nil {
  856. srv.t.Fatal(err)
  857. }
  858. return hosts[0]
  859. }
  860. func (srv *TestServer) closeWatch() {
  861. <-srv.ctx.Done()
  862. srv.mu.Lock()
  863. defer srv.mu.Unlock()
  864. srv.closeLocked()
  865. }
  866. func (srv *TestServer) serve() {
  867. defer srv.listen.Close()
  868. for !srv.isClosed() {
  869. conn, err := srv.listen.Accept()
  870. if err != nil {
  871. break
  872. }
  873. go func(conn net.Conn) {
  874. defer conn.Close()
  875. for !srv.isClosed() {
  876. framer, err := srv.readFrame(conn)
  877. if err != nil {
  878. if err == io.EOF {
  879. return
  880. }
  881. srv.errorLocked(err)
  882. return
  883. }
  884. atomic.AddUint64(&srv.nreq, 1)
  885. go srv.process(framer)
  886. }
  887. }(conn)
  888. }
  889. }
  890. func (srv *TestServer) isClosed() bool {
  891. srv.mu.Lock()
  892. defer srv.mu.Unlock()
  893. return srv.closed
  894. }
  895. func (srv *TestServer) closeLocked() {
  896. if srv.closed {
  897. return
  898. }
  899. srv.closed = true
  900. srv.listen.Close()
  901. srv.cancel()
  902. }
  903. func (srv *TestServer) Stop() {
  904. srv.mu.Lock()
  905. defer srv.mu.Unlock()
  906. srv.closeLocked()
  907. }
  908. func (srv *TestServer) errorLocked(err interface{}) {
  909. srv.mu.Lock()
  910. defer srv.mu.Unlock()
  911. if srv.closed {
  912. return
  913. }
  914. srv.t.Error(err)
  915. }
  916. func (srv *TestServer) process(f *framer) {
  917. head := f.header
  918. if head == nil {
  919. srv.errorLocked("process frame with a nil header")
  920. return
  921. }
  922. switch head.op {
  923. case opStartup:
  924. if atomic.LoadInt32(&srv.TimeoutOnStartup) > 0 {
  925. // Do not respond to startup command
  926. // wait until we get a cancel signal
  927. select {
  928. case <-srv.ctx.Done():
  929. return
  930. }
  931. }
  932. f.writeHeader(0, opReady, head.stream)
  933. case opOptions:
  934. f.writeHeader(0, opSupported, head.stream)
  935. f.writeShort(0)
  936. case opQuery:
  937. query := f.readLongString()
  938. first := query
  939. if n := strings.Index(query, " "); n > 0 {
  940. first = first[:n]
  941. }
  942. switch strings.ToLower(first) {
  943. case "kill":
  944. atomic.AddInt64(&srv.nKillReq, 1)
  945. f.writeHeader(0, opError, head.stream)
  946. f.writeInt(0x1001)
  947. f.writeString("query killed")
  948. case "use":
  949. f.writeInt(resultKindKeyspace)
  950. f.writeString(strings.TrimSpace(query[3:]))
  951. case "void":
  952. f.writeHeader(0, opResult, head.stream)
  953. f.writeInt(resultKindVoid)
  954. case "timeout":
  955. <-srv.ctx.Done()
  956. return
  957. case "slow":
  958. go func() {
  959. f.writeHeader(0, opResult, head.stream)
  960. f.writeInt(resultKindVoid)
  961. f.wbuf[0] = srv.protocol | 0x80
  962. select {
  963. case <-srv.ctx.Done():
  964. return
  965. case <-time.After(50 * time.Millisecond):
  966. f.finishWrite()
  967. }
  968. }()
  969. return
  970. case "speculative":
  971. atomic.AddInt64(&srv.nKillReq, 1)
  972. if atomic.LoadInt64(&srv.nKillReq) > 3 {
  973. f.writeHeader(0, opResult, head.stream)
  974. f.writeInt(resultKindVoid)
  975. f.writeString("speculative query success on the node " + srv.Address)
  976. } else {
  977. f.writeHeader(0, opError, head.stream)
  978. f.writeInt(0x1001)
  979. f.writeString("speculative error")
  980. rand.Seed(time.Now().UnixNano())
  981. <-time.After(time.Millisecond * 120)
  982. }
  983. default:
  984. f.writeHeader(0, opResult, head.stream)
  985. f.writeInt(resultKindVoid)
  986. }
  987. case opError:
  988. f.writeHeader(0, opError, head.stream)
  989. f.wbuf = append(f.wbuf, f.rbuf...)
  990. default:
  991. f.writeHeader(0, opError, head.stream)
  992. f.writeInt(0)
  993. f.writeString("not supported")
  994. }
  995. f.wbuf[0] = srv.protocol | 0x80
  996. if err := f.finishWrite(); err != nil {
  997. srv.errorLocked(err)
  998. }
  999. }
  1000. func (srv *TestServer) readFrame(conn net.Conn) (*framer, error) {
  1001. buf := make([]byte, srv.headerSize)
  1002. head, err := readHeader(conn, buf)
  1003. if err != nil {
  1004. return nil, err
  1005. }
  1006. framer := newFramer(conn, conn, nil, srv.protocol)
  1007. err = framer.readFrame(&head)
  1008. if err != nil {
  1009. return nil, err
  1010. }
  1011. // should be a request frame
  1012. if head.version.response() {
  1013. return nil, fmt.Errorf("expected to read a request frame got version: %v", head.version)
  1014. } else if head.version.version() != srv.protocol {
  1015. return nil, fmt.Errorf("expected to read protocol version 0x%x got 0x%x", srv.protocol, head.version.version())
  1016. }
  1017. return framer, nil
  1018. }