session.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502
  1. // Copyright (c) 2012 The gocql Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package gocql
  5. import (
  6. "errors"
  7. "fmt"
  8. "io"
  9. "strings"
  10. "sync"
  11. "time"
  12. "unicode"
  13. )
  14. // Session is the interface used by users to interact with the database.
  15. //
  16. // It's safe for concurrent use by multiple goroutines and a typical usage
  17. // scenario is to have one global session object to interact with the
  18. // whole Cassandra cluster.
  19. //
  20. // This type extends the Node interface by adding a convinient query builder
  21. // and automatically sets a default consinstency level on all operations
  22. // that do not have a consistency level set.
  23. type Session struct {
  24. Node Node
  25. cons Consistency
  26. pageSize int
  27. prefetch float64
  28. trace Tracer
  29. mu sync.RWMutex
  30. cfg ClusterConfig
  31. }
  32. // NewSession wraps an existing Node.
  33. func NewSession(c *clusterImpl) *Session {
  34. return &Session{Node: c, cons: Quorum, prefetch: 0.25, cfg: c.cfg}
  35. }
  36. // SetConsistency sets the default consistency level for this session. This
  37. // setting can also be changed on a per-query basis and the default value
  38. // is Quorum.
  39. func (s *Session) SetConsistency(cons Consistency) {
  40. s.mu.Lock()
  41. s.cons = cons
  42. s.mu.Unlock()
  43. }
  44. // SetPageSize sets the default page size for this session. A value <= 0 will
  45. // disable paging. This setting can also be changed on a per-query basis.
  46. func (s *Session) SetPageSize(n int) {
  47. s.mu.Lock()
  48. s.pageSize = n
  49. s.mu.Unlock()
  50. }
  51. // SetPrefetch sets the default threshold for pre-fetching new pages. If
  52. // there are only p*pageSize rows remaining, the next page will be requested
  53. // automatically. This value can also be changed on a per-query basis and
  54. // the default value is 0.25.
  55. func (s *Session) SetPrefetch(p float64) {
  56. s.mu.Lock()
  57. s.prefetch = p
  58. s.mu.Unlock()
  59. }
  60. // SetTrace sets the default tracer for this session. This setting can also
  61. // be changed on a per-query basis.
  62. func (s *Session) SetTrace(trace Tracer) {
  63. s.mu.Lock()
  64. s.trace = trace
  65. s.mu.Unlock()
  66. }
  67. // Query generates a new query object for interacting with the database.
  68. // Further details of the query may be tweaked using the resulting query
  69. // value before the query is executed.
  70. func (s *Session) Query(stmt string, values ...interface{}) *Query {
  71. s.mu.RLock()
  72. qry := &Query{stmt: stmt, values: values, cons: s.cons,
  73. session: s, pageSize: s.pageSize, trace: s.trace,
  74. prefetch: s.prefetch, rt: s.cfg.RetryPolicy}
  75. s.mu.RUnlock()
  76. return qry
  77. }
  78. // Close closes all connections. The session is unusable after this
  79. // operation.
  80. func (s *Session) Close() {
  81. s.Node.Close()
  82. }
  83. func (s *Session) executeQuery(qry *Query) *Iter {
  84. var itr *Iter
  85. count := 0
  86. for count <= qry.rt.NumRetries {
  87. conn := s.Node.Pick(nil)
  88. //Assign the error unavailable to the iterator
  89. if conn == nil {
  90. itr = &Iter{err: ErrUnavailable}
  91. break
  92. }
  93. itr = conn.executeQuery(qry)
  94. //Exit for loop if the query was successful
  95. if itr.err == nil {
  96. break
  97. }
  98. count++
  99. }
  100. return itr
  101. }
  102. // ExecuteBatch executes a batch operation and returns nil if successful
  103. // otherwise an error is returned describing the failure.
  104. func (s *Session) ExecuteBatch(batch *Batch) error {
  105. // Prevent the execution of the batch if greater than the limit
  106. // Currently batches have a limit of 65536 queries.
  107. // https://datastax-oss.atlassian.net/browse/JAVA-229
  108. if batch.Size() > BatchSizeMaximum {
  109. return ErrTooManyStmts
  110. }
  111. var err error
  112. count := 0
  113. for count <= batch.rt.NumRetries {
  114. conn := s.Node.Pick(nil)
  115. //Assign the error unavailable and break loop
  116. if conn == nil {
  117. err = ErrUnavailable
  118. break
  119. }
  120. err = conn.executeBatch(batch)
  121. //Exit loop if operation executed correctly
  122. if err == nil {
  123. break
  124. }
  125. count++
  126. }
  127. return err
  128. }
  129. // Query represents a CQL statement that can be executed.
  130. type Query struct {
  131. stmt string
  132. values []interface{}
  133. cons Consistency
  134. pageSize int
  135. pageState []byte
  136. prefetch float64
  137. trace Tracer
  138. session *Session
  139. rt RetryPolicy
  140. }
  141. // Consistency sets the consistency level for this query. If no consistency
  142. // level have been set, the default consistency level of the cluster
  143. // is used.
  144. func (q *Query) Consistency(c Consistency) *Query {
  145. q.cons = c
  146. return q
  147. }
  148. // Trace enables tracing of this query. Look at the documentation of the
  149. // Tracer interface to learn more about tracing.
  150. func (q *Query) Trace(trace Tracer) *Query {
  151. q.trace = trace
  152. return q
  153. }
  154. // PageSize will tell the iterator to fetch the result in pages of size n.
  155. // This is useful for iterating over large result sets, but setting the
  156. // page size to low might decrease the performance. This feature is only
  157. // available in Cassandra 2 and onwards.
  158. func (q *Query) PageSize(n int) *Query {
  159. q.pageSize = n
  160. return q
  161. }
  162. func (q *Query) ShouldPrepare() bool {
  163. stmt := strings.TrimLeftFunc(strings.TrimRightFunc(q.stmt, func(r rune) bool {
  164. return unicode.IsSpace(r) || r == ';'
  165. }), unicode.IsSpace)
  166. var stmtType string
  167. if n := strings.IndexFunc(stmt, unicode.IsSpace); n >= 0 {
  168. stmtType = strings.ToLower(stmt[:n])
  169. }
  170. if stmtType == "begin" {
  171. if n := strings.LastIndexFunc(stmt, unicode.IsSpace); n >= 0 {
  172. stmtType = strings.ToLower(stmt[n+1:])
  173. }
  174. }
  175. switch stmtType {
  176. case "select", "insert", "update", "delete", "batch":
  177. return true
  178. }
  179. return false
  180. }
  181. // SetPrefetch sets the default threshold for pre-fetching new pages. If
  182. // there are only p*pageSize rows remaining, the next page will be requested
  183. // automatically.
  184. func (q *Query) Prefetch(p float64) *Query {
  185. q.prefetch = p
  186. return q
  187. }
  188. // RetryPolicy sets the policy to use when retrying the query.
  189. func (q *Query) RetryPolicy(r RetryPolicy) *Query {
  190. q.rt = r
  191. return q
  192. }
  193. // Exec executes the query without returning any rows.
  194. func (q *Query) Exec() error {
  195. iter := q.Iter()
  196. return iter.err
  197. }
  198. // Iter executes the query and returns an iterator capable of iterating
  199. // over all results.
  200. func (q *Query) Iter() *Iter {
  201. if strings.Index(strings.ToLower(q.stmt), "use") == 0 {
  202. return &Iter{err: ErrUseStmt}
  203. }
  204. return q.session.executeQuery(q)
  205. }
  206. // Scan executes the query, copies the columns of the first selected
  207. // row into the values pointed at by dest and discards the rest. If no rows
  208. // were selected, ErrNotFound is returned.
  209. func (q *Query) Scan(dest ...interface{}) error {
  210. iter := q.Iter()
  211. if iter.err != nil {
  212. return iter.err
  213. }
  214. if len(iter.rows) == 0 {
  215. return ErrNotFound
  216. }
  217. iter.Scan(dest...)
  218. return iter.Close()
  219. }
  220. // ScanCAS executes a lightweight transaction (i.e. an UPDATE or INSERT
  221. // statement containing an IF clause). If the transaction fails because
  222. // the existing values did not match, the previos values will be stored
  223. // in dest.
  224. func (q *Query) ScanCAS(dest ...interface{}) (applied bool, err error) {
  225. iter := q.Iter()
  226. if iter.err != nil {
  227. return false, iter.err
  228. }
  229. if len(iter.rows) == 0 {
  230. return false, ErrNotFound
  231. }
  232. if len(iter.Columns()) > 1 {
  233. dest = append([]interface{}{&applied}, dest...)
  234. iter.Scan(dest...)
  235. } else {
  236. iter.Scan(&applied)
  237. }
  238. return applied, iter.Close()
  239. }
  240. // Iter represents an iterator that can be used to iterate over all rows that
  241. // were returned by a query. The iterator might send additional queries to the
  242. // database during the iteration if paging was enabled.
  243. type Iter struct {
  244. err error
  245. pos int
  246. rows [][][]byte
  247. columns []ColumnInfo
  248. next *nextIter
  249. }
  250. // Columns returns the name and type of the selected columns.
  251. func (iter *Iter) Columns() []ColumnInfo {
  252. return iter.columns
  253. }
  254. // Scan consumes the next row of the iterator and copies the columns of the
  255. // current row into the values pointed at by dest. Use nil as a dest value
  256. // to skip the corresponding column. Scan might send additional queries
  257. // to the database to retrieve the next set of rows if paging was enabled.
  258. //
  259. // Scan returns true if the row was successfully unmarshaled or false if the
  260. // end of the result set was reached or if an error occurred. Close should
  261. // be called afterwards to retrieve any potential errors.
  262. func (iter *Iter) Scan(dest ...interface{}) bool {
  263. if iter.err != nil {
  264. return false
  265. }
  266. if iter.pos >= len(iter.rows) {
  267. if iter.next != nil {
  268. *iter = *iter.next.fetch()
  269. return iter.Scan(dest...)
  270. }
  271. return false
  272. }
  273. if iter.next != nil && iter.pos == iter.next.pos {
  274. go iter.next.fetch()
  275. }
  276. if len(dest) != len(iter.columns) {
  277. iter.err = errors.New("count mismatch")
  278. return false
  279. }
  280. for i := 0; i < len(iter.columns); i++ {
  281. if dest[i] == nil {
  282. continue
  283. }
  284. err := Unmarshal(iter.columns[i].TypeInfo, iter.rows[iter.pos][i], dest[i])
  285. if err != nil {
  286. iter.err = err
  287. return false
  288. }
  289. }
  290. iter.pos++
  291. return true
  292. }
  293. // Close closes the iterator and returns any errors that happened during
  294. // the query or the iteration.
  295. func (iter *Iter) Close() error {
  296. return iter.err
  297. }
  298. type nextIter struct {
  299. qry Query
  300. pos int
  301. once sync.Once
  302. next *Iter
  303. }
  304. func (n *nextIter) fetch() *Iter {
  305. n.once.Do(func() {
  306. n.next = n.qry.session.executeQuery(&n.qry)
  307. })
  308. return n.next
  309. }
  310. type Batch struct {
  311. Type BatchType
  312. Entries []BatchEntry
  313. Cons Consistency
  314. rt RetryPolicy
  315. }
  316. // NewBatch creates a new batch operation without defaults from the cluster
  317. func NewBatch(typ BatchType) *Batch {
  318. return &Batch{Type: typ}
  319. }
  320. // NewBatch creates a new batch operation using defaults defined in the cluster
  321. func (s *Session) NewBatch(typ BatchType) *Batch {
  322. return &Batch{Type: typ, rt: s.cfg.RetryPolicy}
  323. }
  324. // Query adds the query to the batch operation
  325. func (b *Batch) Query(stmt string, args ...interface{}) {
  326. b.Entries = append(b.Entries, BatchEntry{Stmt: stmt, Args: args})
  327. }
  328. // RetryPolicy sets the retry policy to use when executing the batch operation
  329. func (b *Batch) RetryPolicy(r RetryPolicy) *Batch {
  330. b.rt = r
  331. return b
  332. }
  333. // Size returns the number of batch statements to be executed by the batch operation.
  334. func (b *Batch) Size() int {
  335. return len(b.Entries)
  336. }
  337. type BatchType int
  338. const (
  339. LoggedBatch BatchType = 0
  340. UnloggedBatch BatchType = 1
  341. CounterBatch BatchType = 2
  342. )
  343. type BatchEntry struct {
  344. Stmt string
  345. Args []interface{}
  346. }
  347. type Consistency int
  348. const (
  349. Any Consistency = 1 + iota
  350. One
  351. Two
  352. Three
  353. Quorum
  354. All
  355. LocalQuorum
  356. EachQuorum
  357. Serial
  358. LocalSerial
  359. )
  360. var consinstencyNames = []string{
  361. 0: "default",
  362. Any: "any",
  363. One: "one",
  364. Two: "two",
  365. Three: "three",
  366. Quorum: "quorum",
  367. All: "all",
  368. LocalQuorum: "localquorum",
  369. EachQuorum: "eachquorum",
  370. Serial: "serial",
  371. LocalSerial: "localserial",
  372. }
  373. func (c Consistency) String() string {
  374. return consinstencyNames[c]
  375. }
  376. type ColumnInfo struct {
  377. Keyspace string
  378. Table string
  379. Name string
  380. TypeInfo *TypeInfo
  381. }
  382. // Tracer is the interface implemented by query tracers. Tracers have the
  383. // ability to obtain a detailed event log of all events that happened during
  384. // the execution of a query from Cassandra. Gathering this information might
  385. // be essential for debugging and optimizing queries, but this feature should
  386. // not be used on production systems with very high load.
  387. type Tracer interface {
  388. Trace(traceId []byte)
  389. }
  390. type traceWriter struct {
  391. session *Session
  392. w io.Writer
  393. mu sync.Mutex
  394. }
  395. // NewTraceWriter returns a simple Tracer implementation that outputs
  396. // the event log in a textual format.
  397. func NewTraceWriter(session *Session, w io.Writer) Tracer {
  398. return traceWriter{session: session, w: w}
  399. }
  400. func (t traceWriter) Trace(traceId []byte) {
  401. var (
  402. coordinator string
  403. duration int
  404. )
  405. t.session.Query(`SELECT coordinator, duration
  406. FROM system_traces.sessions
  407. WHERE session_id = ?`, traceId).
  408. Consistency(One).Scan(&coordinator, &duration)
  409. iter := t.session.Query(`SELECT event_id, activity, source, source_elapsed
  410. FROM system_traces.events
  411. WHERE session_id = ?`, traceId).
  412. Consistency(One).Iter()
  413. var (
  414. timestamp time.Time
  415. activity string
  416. source string
  417. elapsed int
  418. )
  419. t.mu.Lock()
  420. defer t.mu.Unlock()
  421. fmt.Fprintf(t.w, "Tracing session %016x (coordinator: %s, duration: %v):\n",
  422. traceId, coordinator, time.Duration(duration)*time.Microsecond)
  423. for iter.Scan(&timestamp, &activity, &source, &elapsed) {
  424. fmt.Fprintf(t.w, "%s: %s (source: %s, elapsed: %d)\n",
  425. timestamp.Format("2006/01/02 15:04:05.999999"), activity, source, elapsed)
  426. }
  427. if err := iter.Close(); err != nil {
  428. fmt.Fprintln(t.w, "Error:", err)
  429. }
  430. }
  431. type Error struct {
  432. Code int
  433. Message string
  434. }
  435. func (e Error) Error() string {
  436. return e.Message
  437. }
  438. var (
  439. ErrNotFound = errors.New("not found")
  440. ErrUnavailable = errors.New("unavailable")
  441. ErrProtocol = errors.New("protocol error")
  442. ErrUnsupported = errors.New("feature not supported")
  443. ErrTooManyStmts = errors.New("too many statements")
  444. ErrUseStmt = errors.New("use statements aren't supported. Please see https://github.com/gocql/gocql for explaination.")
  445. )
  446. // BatchSizeMaximum is the maximum number of statements a batch operation can have.
  447. // This limit is set by cassandra and could change in the future.
  448. const BatchSizeMaximum = 65535