session.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. // Copyright (c) 2012 The gocql Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package gocql
  5. import (
  6. "errors"
  7. "fmt"
  8. "io"
  9. "strings"
  10. "sync"
  11. "time"
  12. "unicode"
  13. )
  14. // Session is the interface used by users to interact with the database.
  15. //
  16. // It's safe for concurrent use by multiple goroutines and a typical usage
  17. // scenario is to have one global session object to interact with the
  18. // whole Cassandra cluster.
  19. //
  20. // This type extends the Node interface by adding a convinient query builder
  21. // and automatically sets a default consinstency level on all operations
  22. // that do not have a consistency level set.
  23. type Session struct {
  24. Pool ConnectionPool
  25. cons Consistency
  26. pageSize int
  27. prefetch float64
  28. trace Tracer
  29. mu sync.RWMutex
  30. cfg ClusterConfig
  31. closeMu sync.RWMutex
  32. isClosed bool
  33. }
  34. // NewSession wraps an existing Node.
  35. func NewSession(p ConnectionPool, c ClusterConfig) *Session {
  36. return &Session{Pool: p, cons: Quorum, prefetch: 0.25, cfg: c}
  37. }
  38. // SetConsistency sets the default consistency level for this session. This
  39. // setting can also be changed on a per-query basis and the default value
  40. // is Quorum.
  41. func (s *Session) SetConsistency(cons Consistency) {
  42. s.mu.Lock()
  43. s.cons = cons
  44. s.mu.Unlock()
  45. }
  46. // SetPageSize sets the default page size for this session. A value <= 0 will
  47. // disable paging. This setting can also be changed on a per-query basis.
  48. func (s *Session) SetPageSize(n int) {
  49. s.mu.Lock()
  50. s.pageSize = n
  51. s.mu.Unlock()
  52. }
  53. // SetPrefetch sets the default threshold for pre-fetching new pages. If
  54. // there are only p*pageSize rows remaining, the next page will be requested
  55. // automatically. This value can also be changed on a per-query basis and
  56. // the default value is 0.25.
  57. func (s *Session) SetPrefetch(p float64) {
  58. s.mu.Lock()
  59. s.prefetch = p
  60. s.mu.Unlock()
  61. }
  62. // SetTrace sets the default tracer for this session. This setting can also
  63. // be changed on a per-query basis.
  64. func (s *Session) SetTrace(trace Tracer) {
  65. s.mu.Lock()
  66. s.trace = trace
  67. s.mu.Unlock()
  68. }
  69. // Query generates a new query object for interacting with the database.
  70. // Further details of the query may be tweaked using the resulting query
  71. // value before the query is executed. Query is automatically prepared
  72. // if it has not previously been executed.
  73. func (s *Session) Query(stmt string, values ...interface{}) *Query {
  74. s.mu.RLock()
  75. qry := &Query{stmt: stmt, values: values, cons: s.cons,
  76. session: s, pageSize: s.pageSize, trace: s.trace,
  77. prefetch: s.prefetch, rt: s.cfg.RetryPolicy}
  78. s.mu.RUnlock()
  79. return qry
  80. }
  81. func (s *Session) Bind(stmt string, b func(q *QueryInfo) []interface{}) *Query {
  82. s.mu.RLock()
  83. qry := &Query{stmt: stmt, binding: b, cons: s.cons,
  84. session: s, pageSize: s.pageSize, trace: s.trace,
  85. prefetch: s.prefetch, rt: s.cfg.RetryPolicy}
  86. s.mu.RUnlock()
  87. return qry
  88. }
  89. // Close closes all connections. The session is unusable after this
  90. // operation.
  91. func (s *Session) Close() {
  92. s.closeMu.Lock()
  93. defer s.closeMu.Unlock()
  94. if s.isClosed {
  95. return
  96. }
  97. s.isClosed = true
  98. s.Pool.Close()
  99. }
  100. func (s *Session) Closed() bool {
  101. s.closeMu.RLock()
  102. closed := s.isClosed
  103. s.closeMu.RUnlock()
  104. return closed
  105. }
  106. func (s *Session) executeQuery(qry *Query) *Iter {
  107. // fail fast
  108. if s.Closed() {
  109. return &Iter{err: ErrSessionClosed}
  110. }
  111. var iter *Iter
  112. for count := 0; count <= qry.rt.NumRetries; count++ {
  113. conn := s.Pool.Pick(qry)
  114. //Assign the error unavailable to the iterator
  115. if conn == nil {
  116. iter = &Iter{err: ErrNoConnections}
  117. break
  118. }
  119. iter = conn.executeQuery(qry)
  120. //Exit for loop if the query was successful
  121. if iter.err == nil {
  122. break
  123. }
  124. }
  125. return iter
  126. }
  127. // ExecuteBatch executes a batch operation and returns nil if successful
  128. // otherwise an error is returned describing the failure.
  129. func (s *Session) ExecuteBatch(batch *Batch) error {
  130. // fail fast
  131. if s.Closed() {
  132. return ErrSessionClosed
  133. }
  134. // Prevent the execution of the batch if greater than the limit
  135. // Currently batches have a limit of 65536 queries.
  136. // https://datastax-oss.atlassian.net/browse/JAVA-229
  137. if batch.Size() > BatchSizeMaximum {
  138. return ErrTooManyStmts
  139. }
  140. var err error
  141. for count := 0; count <= batch.rt.NumRetries; count++ {
  142. conn := s.Pool.Pick(nil)
  143. //Assign the error unavailable and break loop
  144. if conn == nil {
  145. err = ErrNoConnections
  146. break
  147. }
  148. err = conn.executeBatch(batch)
  149. //Exit loop if operation executed correctly
  150. if err == nil {
  151. return nil
  152. }
  153. }
  154. return err
  155. }
  156. // Query represents a CQL statement that can be executed.
  157. type Query struct {
  158. stmt string
  159. values []interface{}
  160. cons Consistency
  161. pageSize int
  162. pageState []byte
  163. prefetch float64
  164. trace Tracer
  165. session *Session
  166. rt RetryPolicy
  167. binding func(q *QueryInfo) []interface{}
  168. }
  169. // Consistency sets the consistency level for this query. If no consistency
  170. // level have been set, the default consistency level of the cluster
  171. // is used.
  172. func (q *Query) Consistency(c Consistency) *Query {
  173. q.cons = c
  174. return q
  175. }
  176. // Trace enables tracing of this query. Look at the documentation of the
  177. // Tracer interface to learn more about tracing.
  178. func (q *Query) Trace(trace Tracer) *Query {
  179. q.trace = trace
  180. return q
  181. }
  182. // PageSize will tell the iterator to fetch the result in pages of size n.
  183. // This is useful for iterating over large result sets, but setting the
  184. // page size to low might decrease the performance. This feature is only
  185. // available in Cassandra 2 and onwards.
  186. func (q *Query) PageSize(n int) *Query {
  187. q.pageSize = n
  188. return q
  189. }
  190. func (q *Query) shouldPrepare() bool {
  191. stmt := strings.TrimLeftFunc(strings.TrimRightFunc(q.stmt, func(r rune) bool {
  192. return unicode.IsSpace(r) || r == ';'
  193. }), unicode.IsSpace)
  194. var stmtType string
  195. if n := strings.IndexFunc(stmt, unicode.IsSpace); n >= 0 {
  196. stmtType = strings.ToLower(stmt[:n])
  197. }
  198. if stmtType == "begin" {
  199. if n := strings.LastIndexFunc(stmt, unicode.IsSpace); n >= 0 {
  200. stmtType = strings.ToLower(stmt[n+1:])
  201. }
  202. }
  203. switch stmtType {
  204. case "select", "insert", "update", "delete", "batch":
  205. return true
  206. }
  207. return false
  208. }
  209. // SetPrefetch sets the default threshold for pre-fetching new pages. If
  210. // there are only p*pageSize rows remaining, the next page will be requested
  211. // automatically.
  212. func (q *Query) Prefetch(p float64) *Query {
  213. q.prefetch = p
  214. return q
  215. }
  216. // RetryPolicy sets the policy to use when retrying the query.
  217. func (q *Query) RetryPolicy(r RetryPolicy) *Query {
  218. q.rt = r
  219. return q
  220. }
  221. // Exec executes the query without returning any rows.
  222. func (q *Query) Exec() error {
  223. iter := q.Iter()
  224. return iter.err
  225. }
  226. // Iter executes the query and returns an iterator capable of iterating
  227. // over all results.
  228. func (q *Query) Iter() *Iter {
  229. if strings.Index(strings.ToLower(q.stmt), "use") == 0 {
  230. return &Iter{err: ErrUseStmt}
  231. }
  232. return q.session.executeQuery(q)
  233. }
  234. // Scan executes the query, copies the columns of the first selected
  235. // row into the values pointed at by dest and discards the rest. If no rows
  236. // were selected, ErrNotFound is returned.
  237. func (q *Query) Scan(dest ...interface{}) error {
  238. iter := q.Iter()
  239. if iter.err != nil {
  240. return iter.err
  241. }
  242. if len(iter.rows) == 0 {
  243. return ErrNotFound
  244. }
  245. iter.Scan(dest...)
  246. return iter.Close()
  247. }
  248. // ScanCAS executes a lightweight transaction (i.e. an UPDATE or INSERT
  249. // statement containing an IF clause). If the transaction fails because
  250. // the existing values did not match, the previos values will be stored
  251. // in dest.
  252. func (q *Query) ScanCAS(dest ...interface{}) (applied bool, err error) {
  253. iter := q.Iter()
  254. if iter.err != nil {
  255. return false, iter.err
  256. }
  257. if len(iter.rows) == 0 {
  258. return false, ErrNotFound
  259. }
  260. if len(iter.Columns()) > 1 {
  261. dest = append([]interface{}{&applied}, dest...)
  262. iter.Scan(dest...)
  263. } else {
  264. iter.Scan(&applied)
  265. }
  266. return applied, iter.Close()
  267. }
  268. // Iter represents an iterator that can be used to iterate over all rows that
  269. // were returned by a query. The iterator might send additional queries to the
  270. // database during the iteration if paging was enabled.
  271. type Iter struct {
  272. err error
  273. pos int
  274. rows [][][]byte
  275. columns []ColumnInfo
  276. next *nextIter
  277. }
  278. // Columns returns the name and type of the selected columns.
  279. func (iter *Iter) Columns() []ColumnInfo {
  280. return iter.columns
  281. }
  282. // Scan consumes the next row of the iterator and copies the columns of the
  283. // current row into the values pointed at by dest. Use nil as a dest value
  284. // to skip the corresponding column. Scan might send additional queries
  285. // to the database to retrieve the next set of rows if paging was enabled.
  286. //
  287. // Scan returns true if the row was successfully unmarshaled or false if the
  288. // end of the result set was reached or if an error occurred. Close should
  289. // be called afterwards to retrieve any potential errors.
  290. func (iter *Iter) Scan(dest ...interface{}) bool {
  291. if iter.err != nil {
  292. return false
  293. }
  294. if iter.pos >= len(iter.rows) {
  295. if iter.next != nil {
  296. *iter = *iter.next.fetch()
  297. return iter.Scan(dest...)
  298. }
  299. return false
  300. }
  301. if iter.next != nil && iter.pos == iter.next.pos {
  302. go iter.next.fetch()
  303. }
  304. if len(dest) != len(iter.columns) {
  305. iter.err = errors.New("count mismatch")
  306. return false
  307. }
  308. for i := 0; i < len(iter.columns); i++ {
  309. if dest[i] == nil {
  310. continue
  311. }
  312. err := Unmarshal(iter.columns[i].TypeInfo, iter.rows[iter.pos][i], dest[i])
  313. if err != nil {
  314. iter.err = err
  315. return false
  316. }
  317. }
  318. iter.pos++
  319. return true
  320. }
  321. // Close closes the iterator and returns any errors that happened during
  322. // the query or the iteration.
  323. func (iter *Iter) Close() error {
  324. return iter.err
  325. }
  326. type nextIter struct {
  327. qry Query
  328. pos int
  329. once sync.Once
  330. next *Iter
  331. }
  332. func (n *nextIter) fetch() *Iter {
  333. n.once.Do(func() {
  334. n.next = n.qry.session.executeQuery(&n.qry)
  335. })
  336. return n.next
  337. }
  338. type Batch struct {
  339. Type BatchType
  340. Entries []BatchEntry
  341. Cons Consistency
  342. rt RetryPolicy
  343. }
  344. // NewBatch creates a new batch operation without defaults from the cluster
  345. func NewBatch(typ BatchType) *Batch {
  346. return &Batch{Type: typ}
  347. }
  348. // NewBatch creates a new batch operation using defaults defined in the cluster
  349. func (s *Session) NewBatch(typ BatchType) *Batch {
  350. return &Batch{Type: typ, rt: s.cfg.RetryPolicy}
  351. }
  352. // Query adds the query to the batch operation
  353. func (b *Batch) Query(stmt string, args ...interface{}) {
  354. b.Entries = append(b.Entries, BatchEntry{Stmt: stmt, Args: args})
  355. }
  356. func (b *Batch) Bind(stmt string, bind func(q *QueryInfo) []interface{}) {
  357. b.Entries = append(b.Entries, BatchEntry{Stmt: stmt, binding: bind})
  358. }
  359. // RetryPolicy sets the retry policy to use when executing the batch operation
  360. func (b *Batch) RetryPolicy(r RetryPolicy) *Batch {
  361. b.rt = r
  362. return b
  363. }
  364. // Size returns the number of batch statements to be executed by the batch operation.
  365. func (b *Batch) Size() int {
  366. return len(b.Entries)
  367. }
  368. type BatchType int
  369. const (
  370. LoggedBatch BatchType = 0
  371. UnloggedBatch BatchType = 1
  372. CounterBatch BatchType = 2
  373. )
  374. type BatchEntry struct {
  375. Stmt string
  376. Args []interface{}
  377. binding func(q *QueryInfo) []interface{}
  378. }
  379. type Consistency int
  380. const (
  381. Any Consistency = 1 + iota
  382. One
  383. Two
  384. Three
  385. Quorum
  386. All
  387. LocalQuorum
  388. EachQuorum
  389. Serial
  390. LocalSerial
  391. )
  392. var ConsistencyNames = []string{
  393. 0: "default",
  394. Any: "any",
  395. One: "one",
  396. Two: "two",
  397. Three: "three",
  398. Quorum: "quorum",
  399. All: "all",
  400. LocalQuorum: "localquorum",
  401. EachQuorum: "eachquorum",
  402. Serial: "serial",
  403. LocalSerial: "localserial",
  404. }
  405. func (c Consistency) String() string {
  406. return ConsistencyNames[c]
  407. }
  408. type ColumnInfo struct {
  409. Keyspace string
  410. Table string
  411. Name string
  412. TypeInfo *TypeInfo
  413. }
  414. // Tracer is the interface implemented by query tracers. Tracers have the
  415. // ability to obtain a detailed event log of all events that happened during
  416. // the execution of a query from Cassandra. Gathering this information might
  417. // be essential for debugging and optimizing queries, but this feature should
  418. // not be used on production systems with very high load.
  419. type Tracer interface {
  420. Trace(traceId []byte)
  421. }
  422. type traceWriter struct {
  423. session *Session
  424. w io.Writer
  425. mu sync.Mutex
  426. }
  427. // NewTraceWriter returns a simple Tracer implementation that outputs
  428. // the event log in a textual format.
  429. func NewTraceWriter(session *Session, w io.Writer) Tracer {
  430. return traceWriter{session: session, w: w}
  431. }
  432. func (t traceWriter) Trace(traceId []byte) {
  433. var (
  434. coordinator string
  435. duration int
  436. )
  437. t.session.Query(`SELECT coordinator, duration
  438. FROM system_traces.sessions
  439. WHERE session_id = ?`, traceId).
  440. Consistency(One).Scan(&coordinator, &duration)
  441. iter := t.session.Query(`SELECT event_id, activity, source, source_elapsed
  442. FROM system_traces.events
  443. WHERE session_id = ?`, traceId).
  444. Consistency(One).Iter()
  445. var (
  446. timestamp time.Time
  447. activity string
  448. source string
  449. elapsed int
  450. )
  451. t.mu.Lock()
  452. defer t.mu.Unlock()
  453. fmt.Fprintf(t.w, "Tracing session %016x (coordinator: %s, duration: %v):\n",
  454. traceId, coordinator, time.Duration(duration)*time.Microsecond)
  455. for iter.Scan(&timestamp, &activity, &source, &elapsed) {
  456. fmt.Fprintf(t.w, "%s: %s (source: %s, elapsed: %d)\n",
  457. timestamp.Format("2006/01/02 15:04:05.999999"), activity, source, elapsed)
  458. }
  459. if err := iter.Close(); err != nil {
  460. fmt.Fprintln(t.w, "Error:", err)
  461. }
  462. }
  463. type Error struct {
  464. Code int
  465. Message string
  466. }
  467. func (e Error) Error() string {
  468. return e.Message
  469. }
  470. var (
  471. ErrNotFound = errors.New("not found")
  472. ErrUnavailable = errors.New("unavailable")
  473. ErrUnsupported = errors.New("feature not supported")
  474. ErrTooManyStmts = errors.New("too many statements")
  475. ErrUseStmt = errors.New("use statements aren't supported. Please see https://github.com/gocql/gocql for explaination.")
  476. ErrSessionClosed = errors.New("session has been closed")
  477. ErrNoConnections = errors.New("no connections available")
  478. )
  479. type ErrProtocol struct{ error }
  480. func NewErrProtocol(format string, args ...interface{}) error {
  481. return ErrProtocol{fmt.Errorf(format, args...)}
  482. }
  483. // BatchSizeMaximum is the maximum number of statements a batch operation can have.
  484. // This limit is set by cassandra and could change in the future.
  485. const BatchSizeMaximum = 65535