conn_test.go 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245
  1. // Copyright (c) 2012 The gocql Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // +build all unit
  5. package gocql
  6. import (
  7. "bufio"
  8. "bytes"
  9. "context"
  10. "crypto/tls"
  11. "crypto/x509"
  12. "fmt"
  13. "io"
  14. "io/ioutil"
  15. "math/rand"
  16. "net"
  17. "os"
  18. "strings"
  19. "sync"
  20. "sync/atomic"
  21. "testing"
  22. "time"
  23. "github.com/gocql/gocql/internal/streams"
  24. )
  25. const (
  26. defaultProto = protoVersion2
  27. )
  28. func TestApprove(t *testing.T) {
  29. tests := map[bool]bool{
  30. approve("org.apache.cassandra.auth.PasswordAuthenticator"): true,
  31. approve("com.instaclustr.cassandra.auth.SharedSecretAuthenticator"): true,
  32. approve("com.datastax.bdp.cassandra.auth.DseAuthenticator"): true,
  33. approve("com.apache.cassandra.auth.FakeAuthenticator"): false,
  34. }
  35. for k, v := range tests {
  36. if k != v {
  37. t.Fatalf("expected '%v', got '%v'", k, v)
  38. }
  39. }
  40. }
  41. func TestJoinHostPort(t *testing.T) {
  42. tests := map[string]string{
  43. "127.0.0.1:0": JoinHostPort("127.0.0.1", 0),
  44. "127.0.0.1:1": JoinHostPort("127.0.0.1:1", 9142),
  45. "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:0": JoinHostPort("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 0),
  46. "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1": JoinHostPort("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1", 9142),
  47. }
  48. for k, v := range tests {
  49. if k != v {
  50. t.Fatalf("expected '%v', got '%v'", k, v)
  51. }
  52. }
  53. }
  54. func testCluster(proto protoVersion, addresses ...string) *ClusterConfig {
  55. cluster := NewCluster(addresses...)
  56. cluster.ProtoVersion = int(proto)
  57. cluster.disableControlConn = true
  58. return cluster
  59. }
  60. func TestSimple(t *testing.T) {
  61. srv := NewTestServer(t, defaultProto, context.Background())
  62. defer srv.Stop()
  63. cluster := testCluster(defaultProto, srv.Address)
  64. db, err := cluster.CreateSession()
  65. if err != nil {
  66. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  67. }
  68. if err := db.Query("void").Exec(); err != nil {
  69. t.Fatalf("0x%x: %v", defaultProto, err)
  70. }
  71. }
  72. func TestSSLSimple(t *testing.T) {
  73. srv := NewSSLTestServer(t, defaultProto, context.Background())
  74. defer srv.Stop()
  75. db, err := createTestSslCluster(srv.Address, defaultProto, true).CreateSession()
  76. if err != nil {
  77. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  78. }
  79. if err := db.Query("void").Exec(); err != nil {
  80. t.Fatalf("0x%x: %v", defaultProto, err)
  81. }
  82. }
  83. func TestSSLSimpleNoClientCert(t *testing.T) {
  84. srv := NewSSLTestServer(t, defaultProto, context.Background())
  85. defer srv.Stop()
  86. db, err := createTestSslCluster(srv.Address, defaultProto, false).CreateSession()
  87. if err != nil {
  88. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  89. }
  90. if err := db.Query("void").Exec(); err != nil {
  91. t.Fatalf("0x%x: %v", defaultProto, err)
  92. }
  93. }
  94. func createTestSslCluster(addr string, proto protoVersion, useClientCert bool) *ClusterConfig {
  95. cluster := testCluster(proto, addr)
  96. sslOpts := &SslOptions{
  97. CaPath: "testdata/pki/ca.crt",
  98. EnableHostVerification: false,
  99. }
  100. if useClientCert {
  101. sslOpts.CertPath = "testdata/pki/gocql.crt"
  102. sslOpts.KeyPath = "testdata/pki/gocql.key"
  103. }
  104. cluster.SslOpts = sslOpts
  105. return cluster
  106. }
  107. func TestClosed(t *testing.T) {
  108. t.Skip("Skipping the execution of TestClosed for now to try to concentrate on more important test failures on Travis")
  109. srv := NewTestServer(t, defaultProto, context.Background())
  110. defer srv.Stop()
  111. session, err := newTestSession(defaultProto, srv.Address)
  112. if err != nil {
  113. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  114. }
  115. session.Close()
  116. if err := session.Query("void").Exec(); err != ErrSessionClosed {
  117. t.Fatalf("0x%x: expected %#v, got %#v", defaultProto, ErrSessionClosed, err)
  118. }
  119. }
  120. func newTestSession(proto protoVersion, addresses ...string) (*Session, error) {
  121. return testCluster(proto, addresses...).CreateSession()
  122. }
  123. func TestDNSLookupConnected(t *testing.T) {
  124. log := &testLogger{}
  125. Logger = log
  126. defer func() {
  127. Logger = &defaultLogger{}
  128. }()
  129. // Override the defaul DNS resolver and restore at the end
  130. failDNS = true
  131. defer func() { failDNS = false }()
  132. srv := NewTestServer(t, defaultProto, context.Background())
  133. defer srv.Stop()
  134. cluster := NewCluster("cassandra1.invalid", srv.Address, "cassandra2.invalid")
  135. cluster.ProtoVersion = int(defaultProto)
  136. cluster.disableControlConn = true
  137. // CreateSession() should attempt to resolve the DNS name "cassandraX.invalid"
  138. // and fail, but continue to connect via srv.Address
  139. _, err := cluster.CreateSession()
  140. if err != nil {
  141. t.Fatal("CreateSession() should have connected")
  142. }
  143. if !strings.Contains(log.String(), "gocql: dns error") {
  144. t.Fatalf("Expected to receive dns error log message - got '%s' instead", log.String())
  145. }
  146. }
  147. func TestDNSLookupError(t *testing.T) {
  148. log := &testLogger{}
  149. Logger = log
  150. defer func() {
  151. Logger = &defaultLogger{}
  152. }()
  153. // Override the defaul DNS resolver and restore at the end
  154. failDNS = true
  155. defer func() { failDNS = false }()
  156. cluster := NewCluster("cassandra1.invalid", "cassandra2.invalid")
  157. cluster.ProtoVersion = int(defaultProto)
  158. cluster.disableControlConn = true
  159. // CreateSession() should attempt to resolve each DNS name "cassandraX.invalid"
  160. // and fail since it could not resolve any dns entries
  161. _, err := cluster.CreateSession()
  162. if err == nil {
  163. t.Fatal("CreateSession() should have returned an error")
  164. }
  165. if !strings.Contains(log.String(), "gocql: dns error") {
  166. t.Fatalf("Expected to receive dns error log message - got '%s' instead", log.String())
  167. }
  168. if err.Error() != "gocql: unable to create session: failed to resolve any of the provided hostnames" {
  169. t.Fatalf("Expected CreateSession() to fail with message - got '%s' instead", err.Error())
  170. }
  171. }
  172. func TestStartupTimeout(t *testing.T) {
  173. ctx, cancel := context.WithCancel(context.Background())
  174. log := &testLogger{}
  175. Logger = log
  176. defer func() {
  177. Logger = &defaultLogger{}
  178. }()
  179. srv := NewTestServer(t, defaultProto, ctx)
  180. defer srv.Stop()
  181. // Tell the server to never respond to Startup frame
  182. atomic.StoreInt32(&srv.TimeoutOnStartup, 1)
  183. startTime := time.Now()
  184. cluster := NewCluster(srv.Address)
  185. cluster.ProtoVersion = int(defaultProto)
  186. cluster.disableControlConn = true
  187. // Set very long query connection timeout
  188. // so we know CreateSession() is using the ConnectTimeout
  189. cluster.Timeout = time.Second * 5
  190. // Create session should timeout during connect attempt
  191. _, err := cluster.CreateSession()
  192. if err == nil {
  193. t.Fatal("CreateSession() should have returned a timeout error")
  194. }
  195. elapsed := time.Since(startTime)
  196. if elapsed > time.Second*5 {
  197. t.Fatal("ConnectTimeout is not respected")
  198. }
  199. if !strings.Contains(err.Error(), "no connections were made when creating the session") {
  200. t.Fatalf("Expected to receive no connections error - got '%s'", err)
  201. }
  202. if !strings.Contains(log.String(), "no response to connection startup within timeout") {
  203. t.Fatalf("Expected to receive timeout log message - got '%s'", log.String())
  204. }
  205. cancel()
  206. }
  207. func TestTimeout(t *testing.T) {
  208. ctx, cancel := context.WithCancel(context.Background())
  209. srv := NewTestServer(t, defaultProto, ctx)
  210. defer srv.Stop()
  211. db, err := newTestSession(defaultProto, srv.Address)
  212. if err != nil {
  213. t.Fatalf("NewCluster: %v", err)
  214. }
  215. defer db.Close()
  216. var wg sync.WaitGroup
  217. wg.Add(1)
  218. go func() {
  219. defer wg.Done()
  220. select {
  221. case <-time.After(5 * time.Second):
  222. t.Errorf("no timeout")
  223. case <-ctx.Done():
  224. }
  225. }()
  226. if err := db.Query("kill").WithContext(ctx).Exec(); err == nil {
  227. t.Fatal("expected error got nil")
  228. }
  229. cancel()
  230. wg.Wait()
  231. }
  232. func TestCancel(t *testing.T) {
  233. ctx, cancel := context.WithCancel(context.Background())
  234. defer cancel()
  235. srv := NewTestServer(t, defaultProto, ctx)
  236. defer srv.Stop()
  237. cluster := testCluster(defaultProto, srv.Address)
  238. cluster.Timeout = 1 * time.Second
  239. db, err := cluster.CreateSession()
  240. if err != nil {
  241. t.Fatalf("NewCluster: %v", err)
  242. }
  243. defer db.Close()
  244. qry := db.Query("timeout")
  245. // Make sure we finish the query without leftovers
  246. var wg sync.WaitGroup
  247. wg.Add(1)
  248. go func() {
  249. if err := qry.Exec(); err != context.Canceled {
  250. t.Fatalf("expected to get context cancel error: '%v', got '%v'", context.Canceled, err)
  251. }
  252. wg.Done()
  253. }()
  254. // The query will timeout after about 1 seconds, so cancel it after a short pause
  255. time.AfterFunc(20*time.Millisecond, qry.Cancel)
  256. wg.Wait()
  257. }
  258. type testQueryObserver struct {
  259. metrics map[string]*hostMetrics
  260. verbose bool
  261. }
  262. func (o *testQueryObserver) ObserveQuery(ctx context.Context, q ObservedQuery) {
  263. host := q.Host.ConnectAddress().String()
  264. o.metrics[host] = q.Metrics
  265. if o.verbose {
  266. Logger.Printf("Observed query %q. Returned %v rows, took %v on host %q with %v attempts and total latency %v. Error: %q\n",
  267. q.Statement, q.Rows, q.End.Sub(q.Start), host, q.Metrics.Attempts, q.Metrics.TotalLatency, q.Err)
  268. }
  269. }
  270. func (o *testQueryObserver) GetMetrics(host *HostInfo) *hostMetrics {
  271. return o.metrics[host.ConnectAddress().String()]
  272. }
  273. // TestQueryRetry will test to make sure that gocql will execute
  274. // the exact amount of retry queries designated by the user.
  275. func TestQueryRetry(t *testing.T) {
  276. ctx, cancel := context.WithCancel(context.Background())
  277. defer cancel()
  278. srv := NewTestServer(t, defaultProto, ctx)
  279. defer srv.Stop()
  280. db, err := newTestSession(defaultProto, srv.Address)
  281. if err != nil {
  282. t.Fatalf("NewCluster: %v", err)
  283. }
  284. defer db.Close()
  285. go func() {
  286. select {
  287. case <-ctx.Done():
  288. return
  289. case <-time.After(5 * time.Second):
  290. t.Errorf("no timeout")
  291. }
  292. }()
  293. rt := &SimpleRetryPolicy{NumRetries: 1}
  294. qry := db.Query("kill").RetryPolicy(rt)
  295. if err := qry.Exec(); err == nil {
  296. t.Fatalf("expected error")
  297. }
  298. requests := atomic.LoadInt64(&srv.nKillReq)
  299. attempts := qry.Attempts()
  300. if requests != int64(attempts) {
  301. t.Fatalf("expected requests %v to match query attempts %v", requests, attempts)
  302. }
  303. // the query will only be attempted once, but is being retried
  304. if requests != int64(rt.NumRetries) {
  305. t.Fatalf("failed to retry the query %v time(s). Query executed %v times", rt.NumRetries, requests-1)
  306. }
  307. }
  308. func TestQueryMultinodeWithMetrics(t *testing.T) {
  309. log := &testLogger{}
  310. Logger = log
  311. defer func() {
  312. Logger = &defaultLogger{}
  313. os.Stdout.WriteString(log.String())
  314. }()
  315. // Build a 3 node cluster to test host metric mapping
  316. var nodes []*TestServer
  317. var addresses = []string{
  318. "127.0.0.1",
  319. "127.0.0.2",
  320. "127.0.0.3",
  321. }
  322. // Can do with 1 context for all servers
  323. ctx := context.Background()
  324. for _, ip := range addresses {
  325. srv := NewTestServerWithAddress(ip+":0", t, defaultProto, ctx)
  326. defer srv.Stop()
  327. nodes = append(nodes, srv)
  328. }
  329. db, err := newTestSession(defaultProto, nodes[0].Address, nodes[1].Address, nodes[2].Address)
  330. if err != nil {
  331. t.Fatalf("NewCluster: %v", err)
  332. }
  333. defer db.Close()
  334. // 1 retry per host
  335. rt := &SimpleRetryPolicy{NumRetries: 3}
  336. observer := &testQueryObserver{metrics: make(map[string]*hostMetrics), verbose: false}
  337. qry := db.Query("kill").RetryPolicy(rt).Observer(observer)
  338. if err := qry.Exec(); err == nil {
  339. t.Fatalf("expected error")
  340. }
  341. for i, ip := range addresses {
  342. host := &HostInfo{connectAddress: net.ParseIP(ip)}
  343. queryMetric := qry.getHostMetrics(host)
  344. observedMetrics := observer.GetMetrics(host)
  345. requests := int(atomic.LoadInt64(&nodes[i].nKillReq))
  346. hostAttempts := queryMetric.Attempts
  347. if requests != hostAttempts {
  348. t.Fatalf("expected requests %v to match query attempts %v", requests, hostAttempts)
  349. }
  350. if hostAttempts != observedMetrics.Attempts {
  351. t.Fatalf("expected observed attempts %v to match query attempts %v on host %v", observedMetrics.Attempts, hostAttempts, ip)
  352. }
  353. hostLatency := queryMetric.TotalLatency
  354. observedLatency := observedMetrics.TotalLatency
  355. if hostLatency != observedLatency {
  356. t.Fatalf("expected observed latency %v to match query latency %v on host %v", observedLatency, hostLatency, ip)
  357. }
  358. }
  359. // the query will only be attempted once, but is being retried
  360. attempts := qry.Attempts()
  361. if attempts != rt.NumRetries {
  362. t.Fatalf("failed to retry the query %v time(s). Query executed %v times", rt.NumRetries, attempts)
  363. }
  364. }
  365. type testRetryPolicy struct {
  366. NumRetries int
  367. }
  368. func (t *testRetryPolicy) Attempt(qry RetryableQuery) bool {
  369. return qry.Attempts() <= t.NumRetries
  370. }
  371. func (t *testRetryPolicy) GetRetryType(err error) RetryType {
  372. return Retry
  373. }
  374. func TestSpeculativeExecution(t *testing.T) {
  375. log := &testLogger{}
  376. Logger = log
  377. defer func() {
  378. Logger = &defaultLogger{}
  379. os.Stdout.WriteString(log.String())
  380. }()
  381. // Build a 3 node cluster
  382. var nodes []*TestServer
  383. var addresses = []string{
  384. "127.0.0.1",
  385. "127.0.0.2",
  386. "127.0.0.3",
  387. }
  388. // Can do with 1 context for all servers
  389. ctx := context.Background()
  390. for _, ip := range addresses {
  391. srv := NewTestServerWithAddress(ip+":0", t, defaultProto, ctx)
  392. defer srv.Stop()
  393. nodes = append(nodes, srv)
  394. }
  395. db, err := newTestSession(defaultProto, nodes[0].Address, nodes[1].Address, nodes[2].Address)
  396. if err != nil {
  397. t.Fatalf("NewCluster: %v", err)
  398. }
  399. defer db.Close()
  400. // Create a test retry policy, 6 retries will cover 2 executions
  401. rt := &testRetryPolicy{NumRetries: 8}
  402. // test Speculative policy with 1 additional execution
  403. sp := &SimpleSpeculativeExecution{NumAttempts: 1, TimeoutDelay: 200 * time.Millisecond}
  404. // Build the query
  405. qry := db.Query("speculative").RetryPolicy(rt).SetSpeculativeExecutionPolicy(sp).Idempotent(true)
  406. // Execute the query and close, check that it doesn't error out
  407. if err := qry.Exec(); err != nil {
  408. t.Errorf("The query failed with '%v'!\n", err)
  409. }
  410. requests1 := atomic.LoadInt64(&nodes[0].nKillReq)
  411. requests2 := atomic.LoadInt64(&nodes[1].nKillReq)
  412. requests3 := atomic.LoadInt64(&nodes[2].nKillReq)
  413. // Spec Attempts == 1, so expecting to see only 1 regular + 1 speculative = 2 nodes attempted
  414. if requests1 != 0 && requests2 != 0 && requests3 != 0 {
  415. t.Error("error: all 3 nodes were attempted, should have been only 2")
  416. }
  417. // Only the 4th request will generate results, so
  418. if requests1 != 4 && requests2 != 4 && requests3 != 4 {
  419. t.Error("error: none of 3 nodes was attempted 4 times!")
  420. }
  421. // "speculative" query will succeed on one arbitrary node after 4 attempts, so
  422. // expecting to see 4 (on successful node) + not more than 2 (as cancelled on another node) == 6
  423. if requests1+requests2+requests3 > 6 {
  424. t.Errorf("error: expected to see 6 attempts, got %v\n", requests1+requests2+requests3)
  425. }
  426. }
  427. func TestStreams_Protocol1(t *testing.T) {
  428. srv := NewTestServer(t, protoVersion1, context.Background())
  429. defer srv.Stop()
  430. // TODO: these are more like session tests and should instead operate
  431. // on a single Conn
  432. cluster := testCluster(protoVersion1, srv.Address)
  433. cluster.NumConns = 1
  434. cluster.ProtoVersion = 1
  435. db, err := cluster.CreateSession()
  436. if err != nil {
  437. t.Fatal(err)
  438. }
  439. defer db.Close()
  440. var wg sync.WaitGroup
  441. for i := 1; i < 128; i++ {
  442. // here were just validating that if we send NumStream request we get
  443. // a response for every stream and the lengths for the queries are set
  444. // correctly.
  445. wg.Add(1)
  446. go func() {
  447. defer wg.Done()
  448. if err := db.Query("void").Exec(); err != nil {
  449. t.Error(err)
  450. }
  451. }()
  452. }
  453. wg.Wait()
  454. }
  455. func TestStreams_Protocol3(t *testing.T) {
  456. srv := NewTestServer(t, protoVersion3, context.Background())
  457. defer srv.Stop()
  458. // TODO: these are more like session tests and should instead operate
  459. // on a single Conn
  460. cluster := testCluster(protoVersion3, srv.Address)
  461. cluster.NumConns = 1
  462. cluster.ProtoVersion = 3
  463. db, err := cluster.CreateSession()
  464. if err != nil {
  465. t.Fatal(err)
  466. }
  467. defer db.Close()
  468. for i := 1; i < 32768; i++ {
  469. // the test server processes each conn synchronously
  470. // here were just validating that if we send NumStream request we get
  471. // a response for every stream and the lengths for the queries are set
  472. // correctly.
  473. if err = db.Query("void").Exec(); err != nil {
  474. t.Fatal(err)
  475. }
  476. }
  477. }
  478. func BenchmarkProtocolV3(b *testing.B) {
  479. srv := NewTestServer(b, protoVersion3, context.Background())
  480. defer srv.Stop()
  481. // TODO: these are more like session tests and should instead operate
  482. // on a single Conn
  483. cluster := NewCluster(srv.Address)
  484. cluster.NumConns = 1
  485. cluster.ProtoVersion = 3
  486. db, err := cluster.CreateSession()
  487. if err != nil {
  488. b.Fatal(err)
  489. }
  490. defer db.Close()
  491. b.ResetTimer()
  492. b.ReportAllocs()
  493. for i := 0; i < b.N; i++ {
  494. if err = db.Query("void").Exec(); err != nil {
  495. b.Fatal(err)
  496. }
  497. }
  498. }
  499. // This tests that the policy connection pool handles SSL correctly
  500. func TestPolicyConnPoolSSL(t *testing.T) {
  501. srv := NewSSLTestServer(t, defaultProto, context.Background())
  502. defer srv.Stop()
  503. cluster := createTestSslCluster(srv.Address, defaultProto, true)
  504. cluster.PoolConfig.HostSelectionPolicy = RoundRobinHostPolicy()
  505. db, err := cluster.CreateSession()
  506. if err != nil {
  507. t.Fatalf("failed to create new session: %v", err)
  508. }
  509. if err := db.Query("void").Exec(); err != nil {
  510. t.Fatalf("query failed due to error: %v", err)
  511. }
  512. db.Close()
  513. // wait for the pool to drain
  514. time.Sleep(100 * time.Millisecond)
  515. size := db.pool.Size()
  516. if size != 0 {
  517. t.Fatalf("connection pool did not drain, still contains %d connections", size)
  518. }
  519. }
  520. func TestQueryTimeout(t *testing.T) {
  521. srv := NewTestServer(t, defaultProto, context.Background())
  522. defer srv.Stop()
  523. cluster := testCluster(defaultProto, srv.Address)
  524. // Set the timeout arbitrarily low so that the query hits the timeout in a
  525. // timely manner.
  526. cluster.Timeout = 1 * time.Millisecond
  527. db, err := cluster.CreateSession()
  528. if err != nil {
  529. t.Fatalf("NewCluster: %v", err)
  530. }
  531. defer db.Close()
  532. ch := make(chan error, 1)
  533. go func() {
  534. err := db.Query("timeout").Exec()
  535. if err != nil {
  536. ch <- err
  537. return
  538. }
  539. t.Errorf("err was nil, expected to get a timeout after %v", db.cfg.Timeout)
  540. }()
  541. select {
  542. case err := <-ch:
  543. if err != ErrTimeoutNoResponse {
  544. t.Fatalf("expected to get %v for timeout got %v", ErrTimeoutNoResponse, err)
  545. }
  546. case <-time.After(40*time.Millisecond + db.cfg.Timeout):
  547. // ensure that the query goroutines have been scheduled
  548. t.Fatalf("query did not timeout after %v", db.cfg.Timeout)
  549. }
  550. }
  551. func BenchmarkSingleConn(b *testing.B) {
  552. srv := NewTestServer(b, 3, context.Background())
  553. defer srv.Stop()
  554. cluster := testCluster(3, srv.Address)
  555. // Set the timeout arbitrarily low so that the query hits the timeout in a
  556. // timely manner.
  557. cluster.Timeout = 500 * time.Millisecond
  558. cluster.NumConns = 1
  559. db, err := cluster.CreateSession()
  560. if err != nil {
  561. b.Fatalf("NewCluster: %v", err)
  562. }
  563. defer db.Close()
  564. b.ResetTimer()
  565. b.RunParallel(func(pb *testing.PB) {
  566. for pb.Next() {
  567. err := db.Query("void").Exec()
  568. if err != nil {
  569. b.Error(err)
  570. return
  571. }
  572. }
  573. })
  574. }
  575. func TestQueryTimeoutReuseStream(t *testing.T) {
  576. t.Skip("no longer tests anything")
  577. // TODO(zariel): move this to conn test, we really just want to check what
  578. // happens when a conn is
  579. srv := NewTestServer(t, defaultProto, context.Background())
  580. defer srv.Stop()
  581. cluster := testCluster(defaultProto, srv.Address)
  582. // Set the timeout arbitrarily low so that the query hits the timeout in a
  583. // timely manner.
  584. cluster.Timeout = 1 * time.Millisecond
  585. cluster.NumConns = 1
  586. db, err := cluster.CreateSession()
  587. if err != nil {
  588. t.Fatalf("NewCluster: %v", err)
  589. }
  590. defer db.Close()
  591. db.Query("slow").Exec()
  592. err = db.Query("void").Exec()
  593. if err != nil {
  594. t.Fatal(err)
  595. }
  596. }
  597. func TestQueryTimeoutClose(t *testing.T) {
  598. srv := NewTestServer(t, defaultProto, context.Background())
  599. defer srv.Stop()
  600. cluster := testCluster(defaultProto, srv.Address)
  601. // Set the timeout arbitrarily low so that the query hits the timeout in a
  602. // timely manner.
  603. cluster.Timeout = 1000 * time.Millisecond
  604. cluster.NumConns = 1
  605. db, err := cluster.CreateSession()
  606. if err != nil {
  607. t.Fatalf("NewCluster: %v", err)
  608. }
  609. ch := make(chan error)
  610. go func() {
  611. err := db.Query("timeout").Exec()
  612. ch <- err
  613. }()
  614. // ensure that the above goroutine gets sheduled
  615. time.Sleep(50 * time.Millisecond)
  616. db.Close()
  617. select {
  618. case err = <-ch:
  619. case <-time.After(1 * time.Second):
  620. t.Fatal("timedout waiting to get a response once cluster is closed")
  621. }
  622. if err != ErrConnectionClosed {
  623. t.Fatalf("expected to get %v got %v", ErrConnectionClosed, err)
  624. }
  625. }
  626. func TestStream0(t *testing.T) {
  627. // TODO: replace this with type check
  628. const expErr = "gocql: received unexpected frame on stream 0"
  629. var buf bytes.Buffer
  630. f := newFramer(nil, &buf, nil, protoVersion4)
  631. f.writeHeader(0, opResult, 0)
  632. f.writeInt(resultKindVoid)
  633. f.wbuf[0] |= 0x80
  634. if err := f.finishWrite(); err != nil {
  635. t.Fatal(err)
  636. }
  637. conn := &Conn{
  638. r: bufio.NewReader(&buf),
  639. streams: streams.New(protoVersion4),
  640. }
  641. err := conn.recv()
  642. if err == nil {
  643. t.Fatal("expected to get an error on stream 0")
  644. } else if !strings.HasPrefix(err.Error(), expErr) {
  645. t.Fatalf("expected to get error prefix %q got %q", expErr, err.Error())
  646. }
  647. }
  648. func TestConnClosedBlocked(t *testing.T) {
  649. t.Skip("FLAKE: skipping test flake see https://github.com/gocql/gocql/issues/1088")
  650. // issue 664
  651. const proto = 3
  652. srv := NewTestServer(t, proto, context.Background())
  653. defer srv.Stop()
  654. errorHandler := connErrorHandlerFn(func(conn *Conn, err error, closed bool) {
  655. t.Log(err)
  656. })
  657. s, err := srv.session()
  658. if err != nil {
  659. t.Fatal(err)
  660. }
  661. defer s.Close()
  662. conn, err := s.connect(srv.host(), errorHandler)
  663. if err != nil {
  664. t.Fatal(err)
  665. }
  666. if err := conn.conn.Close(); err != nil {
  667. t.Fatal(err)
  668. }
  669. // This will block indefintaly if #664 is not fixed
  670. err = conn.executeQuery(&Query{stmt: "void"}).Close()
  671. if !strings.HasSuffix(err.Error(), "use of closed network connection") {
  672. t.Fatalf("expected to get use of closed networking connection error got: %v\n", err)
  673. }
  674. }
  675. func TestContext_Timeout(t *testing.T) {
  676. srv := NewTestServer(t, defaultProto, context.Background())
  677. defer srv.Stop()
  678. cluster := testCluster(defaultProto, srv.Address)
  679. cluster.Timeout = 5 * time.Second
  680. db, err := cluster.CreateSession()
  681. if err != nil {
  682. t.Fatal(err)
  683. }
  684. defer db.Close()
  685. ctx, cancel := context.WithCancel(context.Background())
  686. cancel()
  687. err = db.Query("timeout").WithContext(ctx).Exec()
  688. if err != context.Canceled {
  689. t.Fatalf("expected to get context cancel error: %v got %v", context.Canceled, err)
  690. }
  691. }
  692. func TestWriteCoalescing(t *testing.T) {
  693. ctx, cancel := context.WithCancel(context.Background())
  694. defer cancel()
  695. var buf bytes.Buffer
  696. w := &writeCoalescer{
  697. w: &buf,
  698. writeCh: make(chan struct{}),
  699. cond: sync.NewCond(&sync.Mutex{}),
  700. quit: ctx.Done(),
  701. running: true,
  702. }
  703. go func() {
  704. if _, err := w.Write([]byte("one")); err != nil {
  705. t.Error(err)
  706. }
  707. }()
  708. go func() {
  709. if _, err := w.Write([]byte("two")); err != nil {
  710. t.Error(err)
  711. }
  712. }()
  713. if buf.Len() != 0 {
  714. t.Fatalf("expected buffer to be empty have: %v", buf.String())
  715. }
  716. for true {
  717. w.cond.L.Lock()
  718. if len(w.buffers) == 2 {
  719. w.cond.L.Unlock()
  720. break
  721. }
  722. w.cond.L.Unlock()
  723. }
  724. w.flush()
  725. if got := buf.String(); got != "onetwo" && got != "twoone" {
  726. t.Fatalf("expected to get %q got %q", "onetwo or twoone", got)
  727. }
  728. }
  729. func TestWriteCoalescing_WriteAfterClose(t *testing.T) {
  730. ctx, cancel := context.WithCancel(context.Background())
  731. defer cancel()
  732. var buf bytes.Buffer
  733. w := newWriteCoalescer(&buf, 5*time.Millisecond, ctx.Done())
  734. // ensure 1 write works
  735. if _, err := w.Write([]byte("one")); err != nil {
  736. t.Fatal(err)
  737. }
  738. if v := buf.String(); v != "one" {
  739. t.Fatalf("expected buffer to be %q got %q", "one", v)
  740. }
  741. // now close and do a write, we should error
  742. cancel()
  743. if _, err := w.Write([]byte("two")); err == nil {
  744. t.Fatal("expected to get error for write after closing")
  745. } else if err != io.EOF {
  746. t.Fatalf("expected to get EOF got %v", err)
  747. }
  748. }
  749. type recordingFrameHeaderObserver struct {
  750. t *testing.T
  751. mu sync.Mutex
  752. frames []ObservedFrameHeader
  753. }
  754. func (r *recordingFrameHeaderObserver) ObserveFrameHeader(ctx context.Context, frm ObservedFrameHeader) {
  755. r.mu.Lock()
  756. r.frames = append(r.frames, frm)
  757. r.mu.Unlock()
  758. }
  759. func (r *recordingFrameHeaderObserver) getFrames() []ObservedFrameHeader {
  760. r.mu.Lock()
  761. defer r.mu.Unlock()
  762. return r.frames
  763. }
  764. func TestFrameHeaderObserver(t *testing.T) {
  765. srv := NewTestServer(t, defaultProto, context.Background())
  766. defer srv.Stop()
  767. cluster := testCluster(defaultProto, srv.Address)
  768. cluster.NumConns = 1
  769. observer := &recordingFrameHeaderObserver{t: t}
  770. cluster.FrameHeaderObserver = observer
  771. db, err := cluster.CreateSession()
  772. if err != nil {
  773. t.Fatal(err)
  774. }
  775. if err := db.Query("void").Exec(); err != nil {
  776. t.Fatal(err)
  777. }
  778. frames := observer.getFrames()
  779. expFrames := []frameOp{opSupported, opReady, opResult}
  780. if len(frames) != len(expFrames) {
  781. t.Fatalf("Expected to receive %d frames, instead received %d", len(expFrames), len(frames))
  782. }
  783. for i, op := range expFrames {
  784. if op != frames[i].Opcode {
  785. t.Fatalf("expected frame %d to be %v got %v", i, op, frames[i])
  786. }
  787. }
  788. voidResultFrame := frames[2]
  789. if voidResultFrame.Length != int32(4) {
  790. t.Fatalf("Expected to receive frame with body length 4, instead received body length %d", voidResultFrame.Length)
  791. }
  792. }
  793. func NewTestServerWithAddress(addr string, t testing.TB, protocol uint8, ctx context.Context) *TestServer {
  794. laddr, err := net.ResolveTCPAddr("tcp", addr)
  795. if err != nil {
  796. t.Fatal(err)
  797. }
  798. listen, err := net.ListenTCP("tcp", laddr)
  799. if err != nil {
  800. t.Fatal(err)
  801. }
  802. headerSize := 8
  803. if protocol > protoVersion2 {
  804. headerSize = 9
  805. }
  806. ctx, cancel := context.WithCancel(ctx)
  807. srv := &TestServer{
  808. Address: listen.Addr().String(),
  809. listen: listen,
  810. t: t,
  811. protocol: protocol,
  812. headerSize: headerSize,
  813. ctx: ctx,
  814. cancel: cancel,
  815. }
  816. go srv.closeWatch()
  817. go srv.serve()
  818. return srv
  819. }
  820. func NewTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer {
  821. return NewTestServerWithAddress("127.0.0.1:0", t, protocol, ctx)
  822. }
  823. func NewSSLTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer {
  824. pem, err := ioutil.ReadFile("testdata/pki/ca.crt")
  825. certPool := x509.NewCertPool()
  826. if !certPool.AppendCertsFromPEM(pem) {
  827. t.Fatalf("Failed parsing or appending certs")
  828. }
  829. mycert, err := tls.LoadX509KeyPair("testdata/pki/cassandra.crt", "testdata/pki/cassandra.key")
  830. if err != nil {
  831. t.Fatalf("could not load cert")
  832. }
  833. config := &tls.Config{
  834. Certificates: []tls.Certificate{mycert},
  835. RootCAs: certPool,
  836. }
  837. listen, err := tls.Listen("tcp", "127.0.0.1:0", config)
  838. if err != nil {
  839. t.Fatal(err)
  840. }
  841. headerSize := 8
  842. if protocol > protoVersion2 {
  843. headerSize = 9
  844. }
  845. ctx, cancel := context.WithCancel(ctx)
  846. srv := &TestServer{
  847. Address: listen.Addr().String(),
  848. listen: listen,
  849. t: t,
  850. protocol: protocol,
  851. headerSize: headerSize,
  852. ctx: ctx,
  853. cancel: cancel,
  854. }
  855. go srv.closeWatch()
  856. go srv.serve()
  857. return srv
  858. }
  859. type TestServer struct {
  860. Address string
  861. TimeoutOnStartup int32
  862. t testing.TB
  863. nreq uint64
  864. listen net.Listener
  865. nKillReq int64
  866. compressor Compressor
  867. protocol byte
  868. headerSize int
  869. ctx context.Context
  870. cancel context.CancelFunc
  871. quit chan struct{}
  872. mu sync.Mutex
  873. closed bool
  874. }
  875. func (srv *TestServer) session() (*Session, error) {
  876. return testCluster(protoVersion(srv.protocol), srv.Address).CreateSession()
  877. }
  878. func (srv *TestServer) host() *HostInfo {
  879. hosts, err := hostInfo(srv.Address, 9042)
  880. if err != nil {
  881. srv.t.Fatal(err)
  882. }
  883. return hosts[0]
  884. }
  885. func (srv *TestServer) closeWatch() {
  886. <-srv.ctx.Done()
  887. srv.mu.Lock()
  888. defer srv.mu.Unlock()
  889. srv.closeLocked()
  890. }
  891. func (srv *TestServer) serve() {
  892. defer srv.listen.Close()
  893. for !srv.isClosed() {
  894. conn, err := srv.listen.Accept()
  895. if err != nil {
  896. break
  897. }
  898. go func(conn net.Conn) {
  899. defer conn.Close()
  900. for !srv.isClosed() {
  901. framer, err := srv.readFrame(conn)
  902. if err != nil {
  903. if err == io.EOF {
  904. return
  905. }
  906. srv.errorLocked(err)
  907. return
  908. }
  909. atomic.AddUint64(&srv.nreq, 1)
  910. go srv.process(framer)
  911. }
  912. }(conn)
  913. }
  914. }
  915. func (srv *TestServer) isClosed() bool {
  916. srv.mu.Lock()
  917. defer srv.mu.Unlock()
  918. return srv.closed
  919. }
  920. func (srv *TestServer) closeLocked() {
  921. if srv.closed {
  922. return
  923. }
  924. srv.closed = true
  925. srv.listen.Close()
  926. srv.cancel()
  927. }
  928. func (srv *TestServer) Stop() {
  929. srv.mu.Lock()
  930. defer srv.mu.Unlock()
  931. srv.closeLocked()
  932. }
  933. func (srv *TestServer) errorLocked(err interface{}) {
  934. srv.mu.Lock()
  935. defer srv.mu.Unlock()
  936. if srv.closed {
  937. return
  938. }
  939. srv.t.Error(err)
  940. }
  941. func (srv *TestServer) process(f *framer) {
  942. head := f.header
  943. if head == nil {
  944. srv.errorLocked("process frame with a nil header")
  945. return
  946. }
  947. switch head.op {
  948. case opStartup:
  949. if atomic.LoadInt32(&srv.TimeoutOnStartup) > 0 {
  950. // Do not respond to startup command
  951. // wait until we get a cancel signal
  952. select {
  953. case <-srv.ctx.Done():
  954. return
  955. }
  956. }
  957. f.writeHeader(0, opReady, head.stream)
  958. case opOptions:
  959. f.writeHeader(0, opSupported, head.stream)
  960. f.writeShort(0)
  961. case opQuery:
  962. query := f.readLongString()
  963. first := query
  964. if n := strings.Index(query, " "); n > 0 {
  965. first = first[:n]
  966. }
  967. switch strings.ToLower(first) {
  968. case "kill":
  969. atomic.AddInt64(&srv.nKillReq, 1)
  970. f.writeHeader(0, opError, head.stream)
  971. f.writeInt(0x1001)
  972. f.writeString("query killed")
  973. case "use":
  974. f.writeInt(resultKindKeyspace)
  975. f.writeString(strings.TrimSpace(query[3:]))
  976. case "void":
  977. f.writeHeader(0, opResult, head.stream)
  978. f.writeInt(resultKindVoid)
  979. case "timeout":
  980. <-srv.ctx.Done()
  981. return
  982. case "slow":
  983. go func() {
  984. f.writeHeader(0, opResult, head.stream)
  985. f.writeInt(resultKindVoid)
  986. f.wbuf[0] = srv.protocol | 0x80
  987. select {
  988. case <-srv.ctx.Done():
  989. return
  990. case <-time.After(50 * time.Millisecond):
  991. f.finishWrite()
  992. }
  993. }()
  994. return
  995. case "speculative":
  996. atomic.AddInt64(&srv.nKillReq, 1)
  997. if atomic.LoadInt64(&srv.nKillReq) > 3 {
  998. f.writeHeader(0, opResult, head.stream)
  999. f.writeInt(resultKindVoid)
  1000. f.writeString("speculative query success on the node " + srv.Address)
  1001. } else {
  1002. f.writeHeader(0, opError, head.stream)
  1003. f.writeInt(0x1001)
  1004. f.writeString("speculative error")
  1005. rand.Seed(time.Now().UnixNano())
  1006. <-time.After(time.Millisecond * 120)
  1007. }
  1008. default:
  1009. f.writeHeader(0, opResult, head.stream)
  1010. f.writeInt(resultKindVoid)
  1011. }
  1012. case opError:
  1013. f.writeHeader(0, opError, head.stream)
  1014. f.wbuf = append(f.wbuf, f.rbuf...)
  1015. default:
  1016. f.writeHeader(0, opError, head.stream)
  1017. f.writeInt(0)
  1018. f.writeString("not supported")
  1019. }
  1020. f.wbuf[0] = srv.protocol | 0x80
  1021. if err := f.finishWrite(); err != nil {
  1022. srv.errorLocked(err)
  1023. }
  1024. }
  1025. func (srv *TestServer) readFrame(conn net.Conn) (*framer, error) {
  1026. buf := make([]byte, srv.headerSize)
  1027. head, err := readHeader(conn, buf)
  1028. if err != nil {
  1029. return nil, err
  1030. }
  1031. framer := newFramer(conn, conn, nil, srv.protocol)
  1032. err = framer.readFrame(&head)
  1033. if err != nil {
  1034. return nil, err
  1035. }
  1036. // should be a request frame
  1037. if head.version.response() {
  1038. return nil, fmt.Errorf("expected to read a request frame got version: %v", head.version)
  1039. } else if head.version.version() != srv.protocol {
  1040. return nil, fmt.Errorf("expected to read protocol version 0x%x got 0x%x", srv.protocol, head.version.version())
  1041. }
  1042. return framer, nil
  1043. }