conn_test.go 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118
  1. // Copyright (c) 2012 The gocql Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // +build all unit
  5. package gocql
  6. import (
  7. "bufio"
  8. "bytes"
  9. "context"
  10. "crypto/tls"
  11. "crypto/x509"
  12. "fmt"
  13. "io"
  14. "io/ioutil"
  15. "net"
  16. "strings"
  17. "sync"
  18. "sync/atomic"
  19. "testing"
  20. "time"
  21. "github.com/gocql/gocql/internal/streams"
  22. )
  23. const (
  24. defaultProto = protoVersion2
  25. )
  26. func TestApprove(t *testing.T) {
  27. tests := map[bool]bool{
  28. approve("org.apache.cassandra.auth.PasswordAuthenticator"): true,
  29. approve("com.instaclustr.cassandra.auth.SharedSecretAuthenticator"): true,
  30. approve("com.datastax.bdp.cassandra.auth.DseAuthenticator"): true,
  31. approve("com.apache.cassandra.auth.FakeAuthenticator"): false,
  32. }
  33. for k, v := range tests {
  34. if k != v {
  35. t.Fatalf("expected '%v', got '%v'", k, v)
  36. }
  37. }
  38. }
  39. func TestJoinHostPort(t *testing.T) {
  40. tests := map[string]string{
  41. "127.0.0.1:0": JoinHostPort("127.0.0.1", 0),
  42. "127.0.0.1:1": JoinHostPort("127.0.0.1:1", 9142),
  43. "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:0": JoinHostPort("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 0),
  44. "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1": JoinHostPort("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1", 9142),
  45. }
  46. for k, v := range tests {
  47. if k != v {
  48. t.Fatalf("expected '%v', got '%v'", k, v)
  49. }
  50. }
  51. }
  52. func testCluster(proto protoVersion, addresses ...string) *ClusterConfig {
  53. cluster := NewCluster(addresses...)
  54. cluster.ProtoVersion = int(proto)
  55. cluster.disableControlConn = true
  56. return cluster
  57. }
  58. func TestSimple(t *testing.T) {
  59. srv := NewTestServer(t, defaultProto, context.Background())
  60. defer srv.Stop()
  61. cluster := testCluster(defaultProto, srv.Address)
  62. db, err := cluster.CreateSession()
  63. if err != nil {
  64. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  65. }
  66. if err := db.Query("void").Exec(); err != nil {
  67. t.Fatalf("0x%x: %v", defaultProto, err)
  68. }
  69. }
  70. func TestSSLSimple(t *testing.T) {
  71. srv := NewSSLTestServer(t, defaultProto, context.Background())
  72. defer srv.Stop()
  73. db, err := createTestSslCluster(srv.Address, defaultProto, true).CreateSession()
  74. if err != nil {
  75. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  76. }
  77. if err := db.Query("void").Exec(); err != nil {
  78. t.Fatalf("0x%x: %v", defaultProto, err)
  79. }
  80. }
  81. func TestSSLSimpleNoClientCert(t *testing.T) {
  82. srv := NewSSLTestServer(t, defaultProto, context.Background())
  83. defer srv.Stop()
  84. db, err := createTestSslCluster(srv.Address, defaultProto, false).CreateSession()
  85. if err != nil {
  86. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  87. }
  88. if err := db.Query("void").Exec(); err != nil {
  89. t.Fatalf("0x%x: %v", defaultProto, err)
  90. }
  91. }
  92. func createTestSslCluster(addr string, proto protoVersion, useClientCert bool) *ClusterConfig {
  93. cluster := testCluster(proto, addr)
  94. sslOpts := &SslOptions{
  95. CaPath: "testdata/pki/ca.crt",
  96. EnableHostVerification: false,
  97. }
  98. if useClientCert {
  99. sslOpts.CertPath = "testdata/pki/gocql.crt"
  100. sslOpts.KeyPath = "testdata/pki/gocql.key"
  101. }
  102. cluster.SslOpts = sslOpts
  103. return cluster
  104. }
  105. func TestClosed(t *testing.T) {
  106. t.Skip("Skipping the execution of TestClosed for now to try to concentrate on more important test failures on Travis")
  107. srv := NewTestServer(t, defaultProto, context.Background())
  108. defer srv.Stop()
  109. session, err := newTestSession(defaultProto, srv.Address)
  110. if err != nil {
  111. t.Fatalf("0x%x: NewCluster: %v", defaultProto, err)
  112. }
  113. session.Close()
  114. if err := session.Query("void").Exec(); err != ErrSessionClosed {
  115. t.Fatalf("0x%x: expected %#v, got %#v", defaultProto, ErrSessionClosed, err)
  116. }
  117. }
  118. func newTestSession(proto protoVersion, addresses ...string) (*Session, error) {
  119. return testCluster(proto, addresses...).CreateSession()
  120. }
  121. func TestDNSLookupConnected(t *testing.T) {
  122. log := &testLogger{}
  123. Logger = log
  124. defer func() {
  125. Logger = &defaultLogger{}
  126. }()
  127. // Override the defaul DNS resolver and restore at the end
  128. failDNS = true
  129. defer func() { failDNS = false }()
  130. srv := NewTestServer(t, defaultProto, context.Background())
  131. defer srv.Stop()
  132. cluster := NewCluster("cassandra1.invalid", srv.Address, "cassandra2.invalid")
  133. cluster.ProtoVersion = int(defaultProto)
  134. cluster.disableControlConn = true
  135. // CreateSession() should attempt to resolve the DNS name "cassandraX.invalid"
  136. // and fail, but continue to connect via srv.Address
  137. _, err := cluster.CreateSession()
  138. if err != nil {
  139. t.Fatal("CreateSession() should have connected")
  140. }
  141. if !strings.Contains(log.String(), "gocql: dns error") {
  142. t.Fatalf("Expected to receive dns error log message - got '%s' instead", log.String())
  143. }
  144. }
  145. func TestDNSLookupError(t *testing.T) {
  146. log := &testLogger{}
  147. Logger = log
  148. defer func() {
  149. Logger = &defaultLogger{}
  150. }()
  151. // Override the defaul DNS resolver and restore at the end
  152. failDNS = true
  153. defer func() { failDNS = false }()
  154. cluster := NewCluster("cassandra1.invalid", "cassandra2.invalid")
  155. cluster.ProtoVersion = int(defaultProto)
  156. cluster.disableControlConn = true
  157. // CreateSession() should attempt to resolve each DNS name "cassandraX.invalid"
  158. // and fail since it could not resolve any dns entries
  159. _, err := cluster.CreateSession()
  160. if err == nil {
  161. t.Fatal("CreateSession() should have returned an error")
  162. }
  163. if !strings.Contains(log.String(), "gocql: dns error") {
  164. t.Fatalf("Expected to receive dns error log message - got '%s' instead", log.String())
  165. }
  166. if err.Error() != "gocql: unable to create session: failed to resolve any of the provided hostnames" {
  167. t.Fatalf("Expected CreateSession() to fail with message - got '%s' instead", err.Error())
  168. }
  169. }
  170. func TestStartupTimeout(t *testing.T) {
  171. ctx, cancel := context.WithCancel(context.Background())
  172. log := &testLogger{}
  173. Logger = log
  174. defer func() {
  175. Logger = &defaultLogger{}
  176. }()
  177. srv := NewTestServer(t, defaultProto, ctx)
  178. defer srv.Stop()
  179. // Tell the server to never respond to Startup frame
  180. atomic.StoreInt32(&srv.TimeoutOnStartup, 1)
  181. startTime := time.Now()
  182. cluster := NewCluster(srv.Address)
  183. cluster.ProtoVersion = int(defaultProto)
  184. cluster.disableControlConn = true
  185. // Set very long query connection timeout
  186. // so we know CreateSession() is using the ConnectTimeout
  187. cluster.Timeout = time.Second * 5
  188. // Create session should timeout during connect attempt
  189. _, err := cluster.CreateSession()
  190. if err == nil {
  191. t.Fatal("CreateSession() should have returned a timeout error")
  192. }
  193. elapsed := time.Since(startTime)
  194. if elapsed > time.Second*5 {
  195. t.Fatal("ConnectTimeout is not respected")
  196. }
  197. if !strings.Contains(err.Error(), "no connections were made when creating the session") {
  198. t.Fatalf("Expected to receive no connections error - got '%s'", err)
  199. }
  200. if !strings.Contains(log.String(), "no response to connection startup within timeout") {
  201. t.Fatalf("Expected to receive timeout log message - got '%s'", log.String())
  202. }
  203. cancel()
  204. }
  205. func TestTimeout(t *testing.T) {
  206. ctx, cancel := context.WithCancel(context.Background())
  207. srv := NewTestServer(t, defaultProto, ctx)
  208. defer srv.Stop()
  209. db, err := newTestSession(defaultProto, srv.Address)
  210. if err != nil {
  211. t.Fatalf("NewCluster: %v", err)
  212. }
  213. defer db.Close()
  214. var wg sync.WaitGroup
  215. wg.Add(1)
  216. go func() {
  217. defer wg.Done()
  218. select {
  219. case <-time.After(5 * time.Second):
  220. t.Errorf("no timeout")
  221. case <-ctx.Done():
  222. }
  223. }()
  224. if err := db.Query("kill").WithContext(ctx).Exec(); err == nil {
  225. t.Fatal("expected error got nil")
  226. }
  227. cancel()
  228. wg.Wait()
  229. }
  230. func TestCancel(t *testing.T) {
  231. ctx, cancel := context.WithCancel(context.Background())
  232. defer cancel()
  233. srv := NewTestServer(t, defaultProto, ctx)
  234. defer srv.Stop()
  235. cluster := testCluster(defaultProto, srv.Address)
  236. cluster.Timeout = 1 * time.Second
  237. db, err := cluster.CreateSession()
  238. if err != nil {
  239. t.Fatalf("NewCluster: %v", err)
  240. }
  241. defer db.Close()
  242. qry := db.Query("timeout")
  243. // Make sure we finish the query without leftovers
  244. var wg sync.WaitGroup
  245. wg.Add(1)
  246. go func() {
  247. if err := qry.Exec(); err != context.Canceled {
  248. t.Fatalf("expected to get context cancel error: '%v', got '%v'", context.Canceled, err)
  249. }
  250. wg.Done()
  251. }()
  252. // The query will timeout after about 1 seconds, so cancel it after a short pause
  253. time.AfterFunc(20*time.Millisecond, qry.Cancel)
  254. wg.Wait()
  255. }
  256. type testQueryObserver struct {
  257. metrics map[string]*queryMetrics
  258. verbose bool
  259. }
  260. func (o *testQueryObserver) ObserveQuery(ctx context.Context, q ObservedQuery) {
  261. host := q.Host.ConnectAddress().String()
  262. o.metrics[host] = q.Metrics
  263. if o.verbose {
  264. Logger.Printf("Observed query %q. Returned %v rows, took %v on host %q with %v attempts and total latency %v. Error: %q\n",
  265. q.Statement, q.Rows, q.End.Sub(q.Start), host, q.Metrics.Attempts, q.Metrics.TotalLatency, q.Err)
  266. }
  267. }
  268. func (o *testQueryObserver) GetMetrics(host *HostInfo) *queryMetrics {
  269. return o.metrics[host.ConnectAddress().String()]
  270. }
  271. // TestQueryRetry will test to make sure that gocql will execute
  272. // the exact amount of retry queries designated by the user.
  273. func TestQueryRetry(t *testing.T) {
  274. ctx, cancel := context.WithCancel(context.Background())
  275. defer cancel()
  276. srv := NewTestServer(t, defaultProto, ctx)
  277. defer srv.Stop()
  278. db, err := newTestSession(defaultProto, srv.Address)
  279. if err != nil {
  280. t.Fatalf("NewCluster: %v", err)
  281. }
  282. defer db.Close()
  283. go func() {
  284. select {
  285. case <-ctx.Done():
  286. return
  287. case <-time.After(5 * time.Second):
  288. t.Errorf("no timeout")
  289. }
  290. }()
  291. rt := &SimpleRetryPolicy{NumRetries: 1}
  292. qry := db.Query("kill").RetryPolicy(rt)
  293. if err := qry.Exec(); err == nil {
  294. t.Fatalf("expected error")
  295. }
  296. requests := atomic.LoadInt64(&srv.nKillReq)
  297. attempts := qry.Attempts()
  298. if requests != int64(attempts) {
  299. t.Fatalf("expected requests %v to match query attempts %v", requests, attempts)
  300. }
  301. // the query will only be attempted once, but is being retried
  302. if requests != int64(rt.NumRetries) {
  303. t.Fatalf("failed to retry the query %v time(s). Query executed %v times", rt.NumRetries, requests-1)
  304. }
  305. }
  306. func TestQueryMultinodeWithMetrics(t *testing.T) {
  307. // Build a 3 node cluster to test host metric mapping
  308. var nodes []*TestServer
  309. var addresses = []string{
  310. "127.0.0.1",
  311. "127.0.0.2",
  312. "127.0.0.3",
  313. }
  314. // Can do with 1 context for all servers
  315. ctx := context.Background()
  316. for _, ip := range addresses {
  317. srv := NewTestServerWithAddress(ip+":0", t, defaultProto, ctx)
  318. defer srv.Stop()
  319. nodes = append(nodes, srv)
  320. }
  321. db, err := newTestSession(defaultProto, nodes[0].Address, nodes[1].Address, nodes[2].Address)
  322. if err != nil {
  323. t.Fatalf("NewCluster: %v", err)
  324. }
  325. defer db.Close()
  326. // 1 retry per host
  327. rt := &SimpleRetryPolicy{NumRetries: 3}
  328. observer := &testQueryObserver{metrics: make(map[string]*queryMetrics), verbose: false}
  329. qry := db.Query("kill").RetryPolicy(rt).Observer(observer)
  330. if err := qry.Exec(); err == nil {
  331. t.Fatalf("expected error")
  332. }
  333. for i, ip := range addresses {
  334. host := &HostInfo{connectAddress: net.ParseIP(ip)}
  335. observedMetrics := observer.GetMetrics(host)
  336. requests := int(atomic.LoadInt64(&nodes[i].nKillReq))
  337. hostAttempts := qry.metrics[ip].Attempts
  338. if requests != hostAttempts {
  339. t.Fatalf("expected requests %v to match query attempts %v", requests, hostAttempts)
  340. }
  341. if hostAttempts != observedMetrics.Attempts {
  342. t.Fatalf("expected observed attempts %v to match query attempts %v on host %v", observedMetrics.Attempts, hostAttempts, ip)
  343. }
  344. hostLatency := qry.metrics[ip].TotalLatency
  345. observedLatency := observedMetrics.TotalLatency
  346. if hostLatency != observedLatency {
  347. t.Fatalf("expected observed latency %v to match query latency %v on host %v", observedLatency, hostLatency, ip)
  348. }
  349. }
  350. // the query will only be attempted once, but is being retried
  351. attempts := qry.Attempts()
  352. if attempts != rt.NumRetries {
  353. t.Fatalf("failed to retry the query %v time(s). Query executed %v times", rt.NumRetries, attempts)
  354. }
  355. }
  356. func TestStreams_Protocol1(t *testing.T) {
  357. srv := NewTestServer(t, protoVersion1, context.Background())
  358. defer srv.Stop()
  359. // TODO: these are more like session tests and should instead operate
  360. // on a single Conn
  361. cluster := testCluster(protoVersion1, srv.Address)
  362. cluster.NumConns = 1
  363. cluster.ProtoVersion = 1
  364. db, err := cluster.CreateSession()
  365. if err != nil {
  366. t.Fatal(err)
  367. }
  368. defer db.Close()
  369. var wg sync.WaitGroup
  370. for i := 1; i < 128; i++ {
  371. // here were just validating that if we send NumStream request we get
  372. // a response for every stream and the lengths for the queries are set
  373. // correctly.
  374. wg.Add(1)
  375. go func() {
  376. defer wg.Done()
  377. if err := db.Query("void").Exec(); err != nil {
  378. t.Error(err)
  379. }
  380. }()
  381. }
  382. wg.Wait()
  383. }
  384. func TestStreams_Protocol3(t *testing.T) {
  385. srv := NewTestServer(t, protoVersion3, context.Background())
  386. defer srv.Stop()
  387. // TODO: these are more like session tests and should instead operate
  388. // on a single Conn
  389. cluster := testCluster(protoVersion3, srv.Address)
  390. cluster.NumConns = 1
  391. cluster.ProtoVersion = 3
  392. db, err := cluster.CreateSession()
  393. if err != nil {
  394. t.Fatal(err)
  395. }
  396. defer db.Close()
  397. for i := 1; i < 32768; i++ {
  398. // the test server processes each conn synchronously
  399. // here were just validating that if we send NumStream request we get
  400. // a response for every stream and the lengths for the queries are set
  401. // correctly.
  402. if err = db.Query("void").Exec(); err != nil {
  403. t.Fatal(err)
  404. }
  405. }
  406. }
  407. func BenchmarkProtocolV3(b *testing.B) {
  408. srv := NewTestServer(b, protoVersion3, context.Background())
  409. defer srv.Stop()
  410. // TODO: these are more like session tests and should instead operate
  411. // on a single Conn
  412. cluster := NewCluster(srv.Address)
  413. cluster.NumConns = 1
  414. cluster.ProtoVersion = 3
  415. db, err := cluster.CreateSession()
  416. if err != nil {
  417. b.Fatal(err)
  418. }
  419. defer db.Close()
  420. b.ResetTimer()
  421. b.ReportAllocs()
  422. for i := 0; i < b.N; i++ {
  423. if err = db.Query("void").Exec(); err != nil {
  424. b.Fatal(err)
  425. }
  426. }
  427. }
  428. // This tests that the policy connection pool handles SSL correctly
  429. func TestPolicyConnPoolSSL(t *testing.T) {
  430. srv := NewSSLTestServer(t, defaultProto, context.Background())
  431. defer srv.Stop()
  432. cluster := createTestSslCluster(srv.Address, defaultProto, true)
  433. cluster.PoolConfig.HostSelectionPolicy = RoundRobinHostPolicy()
  434. db, err := cluster.CreateSession()
  435. if err != nil {
  436. t.Fatalf("failed to create new session: %v", err)
  437. }
  438. if err := db.Query("void").Exec(); err != nil {
  439. t.Fatalf("query failed due to error: %v", err)
  440. }
  441. db.Close()
  442. // wait for the pool to drain
  443. time.Sleep(100 * time.Millisecond)
  444. size := db.pool.Size()
  445. if size != 0 {
  446. t.Fatalf("connection pool did not drain, still contains %d connections", size)
  447. }
  448. }
  449. func TestQueryTimeout(t *testing.T) {
  450. srv := NewTestServer(t, defaultProto, context.Background())
  451. defer srv.Stop()
  452. cluster := testCluster(defaultProto, srv.Address)
  453. // Set the timeout arbitrarily low so that the query hits the timeout in a
  454. // timely manner.
  455. cluster.Timeout = 1 * time.Millisecond
  456. db, err := cluster.CreateSession()
  457. if err != nil {
  458. t.Fatalf("NewCluster: %v", err)
  459. }
  460. defer db.Close()
  461. ch := make(chan error, 1)
  462. go func() {
  463. err := db.Query("timeout").Exec()
  464. if err != nil {
  465. ch <- err
  466. return
  467. }
  468. t.Errorf("err was nil, expected to get a timeout after %v", db.cfg.Timeout)
  469. }()
  470. select {
  471. case err := <-ch:
  472. if err != ErrTimeoutNoResponse {
  473. t.Fatalf("expected to get %v for timeout got %v", ErrTimeoutNoResponse, err)
  474. }
  475. case <-time.After(10*time.Millisecond + db.cfg.Timeout):
  476. // ensure that the query goroutines have been scheduled
  477. t.Fatalf("query did not timeout after %v", db.cfg.Timeout)
  478. }
  479. }
  480. func BenchmarkSingleConn(b *testing.B) {
  481. srv := NewTestServer(b, 3, context.Background())
  482. defer srv.Stop()
  483. cluster := testCluster(3, srv.Address)
  484. // Set the timeout arbitrarily low so that the query hits the timeout in a
  485. // timely manner.
  486. cluster.Timeout = 500 * time.Millisecond
  487. cluster.NumConns = 1
  488. db, err := cluster.CreateSession()
  489. if err != nil {
  490. b.Fatalf("NewCluster: %v", err)
  491. }
  492. defer db.Close()
  493. b.ResetTimer()
  494. b.RunParallel(func(pb *testing.PB) {
  495. for pb.Next() {
  496. err := db.Query("void").Exec()
  497. if err != nil {
  498. b.Error(err)
  499. return
  500. }
  501. }
  502. })
  503. }
  504. func TestQueryTimeoutReuseStream(t *testing.T) {
  505. t.Skip("no longer tests anything")
  506. // TODO(zariel): move this to conn test, we really just want to check what
  507. // happens when a conn is
  508. srv := NewTestServer(t, defaultProto, context.Background())
  509. defer srv.Stop()
  510. cluster := testCluster(defaultProto, srv.Address)
  511. // Set the timeout arbitrarily low so that the query hits the timeout in a
  512. // timely manner.
  513. cluster.Timeout = 1 * time.Millisecond
  514. cluster.NumConns = 1
  515. db, err := cluster.CreateSession()
  516. if err != nil {
  517. t.Fatalf("NewCluster: %v", err)
  518. }
  519. defer db.Close()
  520. db.Query("slow").Exec()
  521. err = db.Query("void").Exec()
  522. if err != nil {
  523. t.Fatal(err)
  524. }
  525. }
  526. func TestQueryTimeoutClose(t *testing.T) {
  527. srv := NewTestServer(t, defaultProto, context.Background())
  528. defer srv.Stop()
  529. cluster := testCluster(defaultProto, srv.Address)
  530. // Set the timeout arbitrarily low so that the query hits the timeout in a
  531. // timely manner.
  532. cluster.Timeout = 1000 * time.Millisecond
  533. cluster.NumConns = 1
  534. db, err := cluster.CreateSession()
  535. if err != nil {
  536. t.Fatalf("NewCluster: %v", err)
  537. }
  538. ch := make(chan error)
  539. go func() {
  540. err := db.Query("timeout").Exec()
  541. ch <- err
  542. }()
  543. // ensure that the above goroutine gets sheduled
  544. time.Sleep(50 * time.Millisecond)
  545. db.Close()
  546. select {
  547. case err = <-ch:
  548. case <-time.After(1 * time.Second):
  549. t.Fatal("timedout waiting to get a response once cluster is closed")
  550. }
  551. if err != ErrConnectionClosed {
  552. t.Fatalf("expected to get %v got %v", ErrConnectionClosed, err)
  553. }
  554. }
  555. func TestStream0(t *testing.T) {
  556. // TODO: replace this with type check
  557. const expErr = "gocql: received unexpected frame on stream 0"
  558. var buf bytes.Buffer
  559. f := newFramer(nil, &buf, nil, protoVersion4)
  560. f.writeHeader(0, opResult, 0)
  561. f.writeInt(resultKindVoid)
  562. f.wbuf[0] |= 0x80
  563. if err := f.finishWrite(); err != nil {
  564. t.Fatal(err)
  565. }
  566. conn := &Conn{
  567. r: bufio.NewReader(&buf),
  568. streams: streams.New(protoVersion4),
  569. }
  570. err := conn.recv()
  571. if err == nil {
  572. t.Fatal("expected to get an error on stream 0")
  573. } else if !strings.HasPrefix(err.Error(), expErr) {
  574. t.Fatalf("expected to get error prefix %q got %q", expErr, err.Error())
  575. }
  576. }
  577. func TestConnClosedBlocked(t *testing.T) {
  578. t.Skip("FLAKE: skipping test flake see https://github.com/gocql/gocql/issues/1088")
  579. // issue 664
  580. const proto = 3
  581. srv := NewTestServer(t, proto, context.Background())
  582. defer srv.Stop()
  583. errorHandler := connErrorHandlerFn(func(conn *Conn, err error, closed bool) {
  584. t.Log(err)
  585. })
  586. s, err := srv.session()
  587. if err != nil {
  588. t.Fatal(err)
  589. }
  590. defer s.Close()
  591. conn, err := s.connect(srv.host(), errorHandler)
  592. if err != nil {
  593. t.Fatal(err)
  594. }
  595. if err := conn.conn.Close(); err != nil {
  596. t.Fatal(err)
  597. }
  598. // This will block indefintaly if #664 is not fixed
  599. err = conn.executeQuery(&Query{stmt: "void"}).Close()
  600. if !strings.HasSuffix(err.Error(), "use of closed network connection") {
  601. t.Fatalf("expected to get use of closed networking connection error got: %v\n", err)
  602. }
  603. }
  604. func TestContext_Timeout(t *testing.T) {
  605. srv := NewTestServer(t, defaultProto, context.Background())
  606. defer srv.Stop()
  607. cluster := testCluster(defaultProto, srv.Address)
  608. cluster.Timeout = 5 * time.Second
  609. db, err := cluster.CreateSession()
  610. if err != nil {
  611. t.Fatal(err)
  612. }
  613. defer db.Close()
  614. ctx, cancel := context.WithCancel(context.Background())
  615. cancel()
  616. err = db.Query("timeout").WithContext(ctx).Exec()
  617. if err != context.Canceled {
  618. t.Fatalf("expected to get context cancel error: %v got %v", context.Canceled, err)
  619. }
  620. }
  621. func TestWriteCoalescing(t *testing.T) {
  622. var buf bytes.Buffer
  623. w := &writeCoalescer{
  624. w: &buf,
  625. cond: sync.NewCond(&sync.Mutex{}),
  626. }
  627. var wg sync.WaitGroup
  628. wg.Add(1)
  629. go func() {
  630. wg.Done()
  631. if _, err := w.Write([]byte("one")); err != nil {
  632. t.Error(err)
  633. }
  634. }()
  635. wg.Wait()
  636. wg.Add(1)
  637. go func() {
  638. wg.Done()
  639. if _, err := w.Write([]byte("two")); err != nil {
  640. t.Error(err)
  641. }
  642. }()
  643. wg.Wait()
  644. if buf.Len() != 0 {
  645. t.Fatalf("expected buffer to be empty have: %v", buf.String())
  646. }
  647. w.flush()
  648. if got := buf.String(); got != "onetwo" {
  649. t.Fatalf("expected to get %q got %q", "onetwo", got)
  650. }
  651. }
  652. type recordingFrameHeaderObserver struct {
  653. t *testing.T
  654. mu sync.Mutex
  655. frames []ObservedFrameHeader
  656. }
  657. func (r *recordingFrameHeaderObserver) ObserveFrameHeader(ctx context.Context, frm ObservedFrameHeader) {
  658. r.mu.Lock()
  659. r.frames = append(r.frames, frm)
  660. r.mu.Unlock()
  661. }
  662. func (r *recordingFrameHeaderObserver) getFrames() []ObservedFrameHeader {
  663. r.mu.Lock()
  664. defer r.mu.Unlock()
  665. return r.frames
  666. }
  667. func TestFrameHeaderObserver(t *testing.T) {
  668. srv := NewTestServer(t, defaultProto, context.Background())
  669. defer srv.Stop()
  670. cluster := testCluster(defaultProto, srv.Address)
  671. cluster.NumConns = 1
  672. observer := &recordingFrameHeaderObserver{t: t}
  673. cluster.FrameHeaderObserver = observer
  674. db, err := cluster.CreateSession()
  675. if err != nil {
  676. t.Fatal(err)
  677. }
  678. if err := db.Query("void").Exec(); err != nil {
  679. t.Fatal(err)
  680. }
  681. frames := observer.getFrames()
  682. if len(frames) != 2 {
  683. t.Fatalf("Expected to receive 2 frames, instead received %d", len(frames))
  684. }
  685. readyFrame := frames[0]
  686. if readyFrame.Opcode != frameOp(opReady) {
  687. t.Fatalf("Expected to receive ready frame, instead received frame of opcode %d", readyFrame.Opcode)
  688. }
  689. voidResultFrame := frames[1]
  690. if voidResultFrame.Opcode != frameOp(opResult) {
  691. t.Fatalf("Expected to receive result frame, instead received frame of opcode %d", voidResultFrame.Opcode)
  692. }
  693. if voidResultFrame.Length != int32(4) {
  694. t.Fatalf("Expected to receive frame with body length 4, instead received body length %d", voidResultFrame.Length)
  695. }
  696. }
  697. func NewTestServerWithAddress(addr string, t testing.TB, protocol uint8, ctx context.Context) *TestServer {
  698. laddr, err := net.ResolveTCPAddr("tcp", addr)
  699. if err != nil {
  700. t.Fatal(err)
  701. }
  702. listen, err := net.ListenTCP("tcp", laddr)
  703. if err != nil {
  704. t.Fatal(err)
  705. }
  706. headerSize := 8
  707. if protocol > protoVersion2 {
  708. headerSize = 9
  709. }
  710. ctx, cancel := context.WithCancel(ctx)
  711. srv := &TestServer{
  712. Address: listen.Addr().String(),
  713. listen: listen,
  714. t: t,
  715. protocol: protocol,
  716. headerSize: headerSize,
  717. ctx: ctx,
  718. cancel: cancel,
  719. }
  720. go srv.closeWatch()
  721. go srv.serve()
  722. return srv
  723. }
  724. func NewTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer {
  725. return NewTestServerWithAddress("127.0.0.1:0", t, protocol, ctx)
  726. }
  727. func NewSSLTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer {
  728. pem, err := ioutil.ReadFile("testdata/pki/ca.crt")
  729. certPool := x509.NewCertPool()
  730. if !certPool.AppendCertsFromPEM(pem) {
  731. t.Fatalf("Failed parsing or appending certs")
  732. }
  733. mycert, err := tls.LoadX509KeyPair("testdata/pki/cassandra.crt", "testdata/pki/cassandra.key")
  734. if err != nil {
  735. t.Fatalf("could not load cert")
  736. }
  737. config := &tls.Config{
  738. Certificates: []tls.Certificate{mycert},
  739. RootCAs: certPool,
  740. }
  741. listen, err := tls.Listen("tcp", "127.0.0.1:0", config)
  742. if err != nil {
  743. t.Fatal(err)
  744. }
  745. headerSize := 8
  746. if protocol > protoVersion2 {
  747. headerSize = 9
  748. }
  749. ctx, cancel := context.WithCancel(ctx)
  750. srv := &TestServer{
  751. Address: listen.Addr().String(),
  752. listen: listen,
  753. t: t,
  754. protocol: protocol,
  755. headerSize: headerSize,
  756. ctx: ctx,
  757. cancel: cancel,
  758. }
  759. go srv.closeWatch()
  760. go srv.serve()
  761. return srv
  762. }
  763. type TestServer struct {
  764. Address string
  765. TimeoutOnStartup int32
  766. t testing.TB
  767. nreq uint64
  768. listen net.Listener
  769. nKillReq int64
  770. compressor Compressor
  771. protocol byte
  772. headerSize int
  773. ctx context.Context
  774. cancel context.CancelFunc
  775. quit chan struct{}
  776. mu sync.Mutex
  777. closed bool
  778. }
  779. func (srv *TestServer) session() (*Session, error) {
  780. return testCluster(protoVersion(srv.protocol), srv.Address).CreateSession()
  781. }
  782. func (srv *TestServer) host() *HostInfo {
  783. hosts, err := hostInfo(srv.Address, 9042)
  784. if err != nil {
  785. srv.t.Fatal(err)
  786. }
  787. return hosts[0]
  788. }
  789. func (srv *TestServer) closeWatch() {
  790. <-srv.ctx.Done()
  791. srv.mu.Lock()
  792. defer srv.mu.Unlock()
  793. srv.closeLocked()
  794. }
  795. func (srv *TestServer) serve() {
  796. defer srv.listen.Close()
  797. for !srv.isClosed() {
  798. conn, err := srv.listen.Accept()
  799. if err != nil {
  800. break
  801. }
  802. go func(conn net.Conn) {
  803. defer conn.Close()
  804. for !srv.isClosed() {
  805. framer, err := srv.readFrame(conn)
  806. if err != nil {
  807. if err == io.EOF {
  808. return
  809. }
  810. srv.errorLocked(err)
  811. return
  812. }
  813. atomic.AddUint64(&srv.nreq, 1)
  814. go srv.process(framer)
  815. }
  816. }(conn)
  817. }
  818. }
  819. func (srv *TestServer) isClosed() bool {
  820. srv.mu.Lock()
  821. defer srv.mu.Unlock()
  822. return srv.closed
  823. }
  824. func (srv *TestServer) closeLocked() {
  825. if srv.closed {
  826. return
  827. }
  828. srv.closed = true
  829. srv.listen.Close()
  830. srv.cancel()
  831. }
  832. func (srv *TestServer) Stop() {
  833. srv.mu.Lock()
  834. defer srv.mu.Unlock()
  835. srv.closeLocked()
  836. }
  837. func (srv *TestServer) errorLocked(err interface{}) {
  838. srv.mu.Lock()
  839. defer srv.mu.Unlock()
  840. if srv.closed {
  841. return
  842. }
  843. srv.t.Error(err)
  844. }
  845. func (srv *TestServer) process(f *framer) {
  846. head := f.header
  847. if head == nil {
  848. srv.errorLocked("process frame with a nil header")
  849. return
  850. }
  851. switch head.op {
  852. case opStartup:
  853. if atomic.LoadInt32(&srv.TimeoutOnStartup) > 0 {
  854. // Do not respond to startup command
  855. // wait until we get a cancel signal
  856. select {
  857. case <-srv.ctx.Done():
  858. return
  859. }
  860. }
  861. f.writeHeader(0, opReady, head.stream)
  862. case opOptions:
  863. f.writeHeader(0, opSupported, head.stream)
  864. f.writeShort(0)
  865. case opQuery:
  866. query := f.readLongString()
  867. first := query
  868. if n := strings.Index(query, " "); n > 0 {
  869. first = first[:n]
  870. }
  871. switch strings.ToLower(first) {
  872. case "kill":
  873. atomic.AddInt64(&srv.nKillReq, 1)
  874. f.writeHeader(0, opError, head.stream)
  875. f.writeInt(0x1001)
  876. f.writeString("query killed")
  877. case "use":
  878. f.writeInt(resultKindKeyspace)
  879. f.writeString(strings.TrimSpace(query[3:]))
  880. case "void":
  881. f.writeHeader(0, opResult, head.stream)
  882. f.writeInt(resultKindVoid)
  883. case "timeout":
  884. <-srv.ctx.Done()
  885. return
  886. case "slow":
  887. go func() {
  888. f.writeHeader(0, opResult, head.stream)
  889. f.writeInt(resultKindVoid)
  890. f.wbuf[0] = srv.protocol | 0x80
  891. select {
  892. case <-srv.ctx.Done():
  893. return
  894. case <-time.After(50 * time.Millisecond):
  895. f.finishWrite()
  896. }
  897. }()
  898. return
  899. default:
  900. f.writeHeader(0, opResult, head.stream)
  901. f.writeInt(resultKindVoid)
  902. }
  903. case opError:
  904. f.writeHeader(0, opError, head.stream)
  905. f.wbuf = append(f.wbuf, f.rbuf...)
  906. default:
  907. f.writeHeader(0, opError, head.stream)
  908. f.writeInt(0)
  909. f.writeString("not supported")
  910. }
  911. f.wbuf[0] = srv.protocol | 0x80
  912. if err := f.finishWrite(); err != nil {
  913. srv.errorLocked(err)
  914. }
  915. }
  916. func (srv *TestServer) readFrame(conn net.Conn) (*framer, error) {
  917. buf := make([]byte, srv.headerSize)
  918. head, err := readHeader(conn, buf)
  919. if err != nil {
  920. return nil, err
  921. }
  922. framer := newFramer(conn, conn, nil, srv.protocol)
  923. err = framer.readFrame(&head)
  924. if err != nil {
  925. return nil, err
  926. }
  927. // should be a request frame
  928. if head.version.response() {
  929. return nil, fmt.Errorf("expected to read a request frame got version: %v", head.version)
  930. } else if head.version.version() != srv.protocol {
  931. return nil, fmt.Errorf("expected to read protocol version 0x%x got 0x%x", srv.protocol, head.version.version())
  932. }
  933. return framer, nil
  934. }