batch_tx.go 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package backend
  15. import (
  16. "bytes"
  17. "math"
  18. "sync"
  19. "sync/atomic"
  20. "time"
  21. bolt "github.com/coreos/bbolt"
  22. )
  23. type BatchTx interface {
  24. ReadTx
  25. UnsafeCreateBucket(name []byte)
  26. UnsafePut(bucketName []byte, key []byte, value []byte)
  27. UnsafeSeqPut(bucketName []byte, key []byte, value []byte)
  28. UnsafeDelete(bucketName []byte, key []byte)
  29. // Commit commits a previous tx and begins a new writable one.
  30. Commit()
  31. // CommitAndStop commits the previous tx and does not create a new one.
  32. CommitAndStop()
  33. }
  34. type batchTx struct {
  35. sync.Mutex
  36. tx *bolt.Tx
  37. backend *backend
  38. pending int
  39. }
  40. func (t *batchTx) UnsafeCreateBucket(name []byte) {
  41. _, err := t.tx.CreateBucket(name)
  42. if err != nil && err != bolt.ErrBucketExists {
  43. plog.Fatalf("cannot create bucket %s (%v)", name, err)
  44. }
  45. t.pending++
  46. }
  47. // UnsafePut must be called holding the lock on the tx.
  48. func (t *batchTx) UnsafePut(bucketName []byte, key []byte, value []byte) {
  49. t.unsafePut(bucketName, key, value, false)
  50. }
  51. // UnsafeSeqPut must be called holding the lock on the tx.
  52. func (t *batchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
  53. t.unsafePut(bucketName, key, value, true)
  54. }
  55. func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq bool) {
  56. bucket := t.tx.Bucket(bucketName)
  57. if bucket == nil {
  58. plog.Fatalf("bucket %s does not exist", bucketName)
  59. }
  60. if seq {
  61. // it is useful to increase fill percent when the workloads are mostly append-only.
  62. // this can delay the page split and reduce space usage.
  63. bucket.FillPercent = 0.9
  64. }
  65. if err := bucket.Put(key, value); err != nil {
  66. plog.Fatalf("cannot put key into bucket (%v)", err)
  67. }
  68. t.pending++
  69. }
  70. // UnsafeRange must be called holding the lock on the tx.
  71. func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
  72. bucket := t.tx.Bucket(bucketName)
  73. if bucket == nil {
  74. plog.Fatalf("bucket %s does not exist", bucketName)
  75. }
  76. return unsafeRange(bucket.Cursor(), key, endKey, limit)
  77. }
  78. func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) {
  79. if limit <= 0 {
  80. limit = math.MaxInt64
  81. }
  82. var isMatch func(b []byte) bool
  83. if len(endKey) > 0 {
  84. isMatch = func(b []byte) bool { return bytes.Compare(b, endKey) < 0 }
  85. } else {
  86. isMatch = func(b []byte) bool { return bytes.Equal(b, key) }
  87. limit = 1
  88. }
  89. for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() {
  90. vs = append(vs, cv)
  91. keys = append(keys, ck)
  92. if limit == int64(len(keys)) {
  93. break
  94. }
  95. }
  96. return keys, vs
  97. }
  98. // UnsafeDelete must be called holding the lock on the tx.
  99. func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) {
  100. bucket := t.tx.Bucket(bucketName)
  101. if bucket == nil {
  102. plog.Fatalf("bucket %s does not exist", bucketName)
  103. }
  104. err := bucket.Delete(key)
  105. if err != nil {
  106. plog.Fatalf("cannot delete key from bucket (%v)", err)
  107. }
  108. t.pending++
  109. }
  110. // UnsafeForEach must be called holding the lock on the tx.
  111. func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
  112. return unsafeForEach(t.tx, bucketName, visitor)
  113. }
  114. func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error {
  115. if b := tx.Bucket(bucket); b != nil {
  116. return b.ForEach(visitor)
  117. }
  118. return nil
  119. }
  120. // Commit commits a previous tx and begins a new writable one.
  121. func (t *batchTx) Commit() {
  122. t.Lock()
  123. t.commit(false)
  124. t.Unlock()
  125. }
  126. // CommitAndStop commits the previous tx and does not create a new one.
  127. func (t *batchTx) CommitAndStop() {
  128. t.Lock()
  129. t.commit(true)
  130. t.Unlock()
  131. }
  132. func (t *batchTx) Unlock() {
  133. if t.pending >= t.backend.batchLimit {
  134. t.commit(false)
  135. }
  136. t.Mutex.Unlock()
  137. }
  138. func (t *batchTx) commit(stop bool) {
  139. // commit the last tx
  140. if t.tx != nil {
  141. if t.pending == 0 && !stop {
  142. return
  143. }
  144. start := time.Now()
  145. // gofail: var beforeCommit struct{}
  146. err := t.tx.Commit()
  147. // gofail: var afterCommit struct{}
  148. commitDurations.Observe(time.Since(start).Seconds())
  149. atomic.AddInt64(&t.backend.commits, 1)
  150. t.pending = 0
  151. if err != nil {
  152. plog.Fatalf("cannot commit tx (%s)", err)
  153. }
  154. }
  155. if !stop {
  156. t.tx = t.backend.begin(true)
  157. }
  158. }
  159. type batchTxBuffered struct {
  160. batchTx
  161. buf txWriteBuffer
  162. }
  163. func newBatchTxBuffered(backend *backend) *batchTxBuffered {
  164. tx := &batchTxBuffered{
  165. batchTx: batchTx{backend: backend},
  166. buf: txWriteBuffer{
  167. txBuffer: txBuffer{make(map[string]*bucketBuffer)},
  168. seq: true,
  169. },
  170. }
  171. tx.Commit()
  172. return tx
  173. }
  174. func (t *batchTxBuffered) Unlock() {
  175. if t.pending != 0 {
  176. t.backend.readTx.mu.Lock()
  177. t.buf.writeback(&t.backend.readTx.buf)
  178. t.backend.readTx.mu.Unlock()
  179. if t.pending >= t.backend.batchLimit {
  180. t.commit(false)
  181. }
  182. }
  183. t.batchTx.Unlock()
  184. }
  185. func (t *batchTxBuffered) Commit() {
  186. t.Lock()
  187. t.commit(false)
  188. t.Unlock()
  189. }
  190. func (t *batchTxBuffered) CommitAndStop() {
  191. t.Lock()
  192. t.commit(true)
  193. t.Unlock()
  194. }
  195. func (t *batchTxBuffered) commit(stop bool) {
  196. // all read txs must be closed to acquire boltdb commit rwlock
  197. t.backend.readTx.mu.Lock()
  198. t.unsafeCommit(stop)
  199. t.backend.readTx.mu.Unlock()
  200. }
  201. func (t *batchTxBuffered) unsafeCommit(stop bool) {
  202. if t.backend.readTx.tx != nil {
  203. if err := t.backend.readTx.tx.Rollback(); err != nil {
  204. plog.Fatalf("cannot rollback tx (%s)", err)
  205. }
  206. t.backend.readTx.reset()
  207. }
  208. t.batchTx.commit(stop)
  209. if !stop {
  210. t.backend.readTx.tx = t.backend.begin(false)
  211. }
  212. }
  213. func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) {
  214. t.batchTx.UnsafePut(bucketName, key, value)
  215. t.buf.put(bucketName, key, value)
  216. }
  217. func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
  218. t.batchTx.UnsafeSeqPut(bucketName, key, value)
  219. t.buf.putSeq(bucketName, key, value)
  220. }