batch_tx.go 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package backend
  15. import (
  16. "bytes"
  17. "fmt"
  18. "math"
  19. "sync"
  20. "sync/atomic"
  21. "time"
  22. "github.com/boltdb/bolt"
  23. )
  24. type BatchTx interface {
  25. ReadTx
  26. UnsafeCreateBucket(name []byte)
  27. UnsafePut(bucketName []byte, key []byte, value []byte)
  28. UnsafeSeqPut(bucketName []byte, key []byte, value []byte)
  29. UnsafeDelete(bucketName []byte, key []byte)
  30. // Commit commits a previous tx and begins a new writable one.
  31. Commit()
  32. // CommitAndStop commits the previous tx and does not create a new one.
  33. CommitAndStop()
  34. }
  35. type batchTx struct {
  36. sync.Mutex
  37. tx *bolt.Tx
  38. backend *backend
  39. pending int
  40. }
  41. func (t *batchTx) UnsafeCreateBucket(name []byte) {
  42. _, err := t.tx.CreateBucket(name)
  43. if err != nil && err != bolt.ErrBucketExists {
  44. plog.Fatalf("cannot create bucket %s (%v)", name, err)
  45. }
  46. t.pending++
  47. }
  48. // UnsafePut must be called holding the lock on the tx.
  49. func (t *batchTx) UnsafePut(bucketName []byte, key []byte, value []byte) {
  50. t.unsafePut(bucketName, key, value, false)
  51. }
  52. // UnsafeSeqPut must be called holding the lock on the tx.
  53. func (t *batchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
  54. t.unsafePut(bucketName, key, value, true)
  55. }
  56. func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq bool) {
  57. bucket := t.tx.Bucket(bucketName)
  58. if bucket == nil {
  59. plog.Fatalf("bucket %s does not exist", bucketName)
  60. }
  61. if seq {
  62. // it is useful to increase fill percent when the workloads are mostly append-only.
  63. // this can delay the page split and reduce space usage.
  64. bucket.FillPercent = 0.9
  65. }
  66. if err := bucket.Put(key, value); err != nil {
  67. plog.Fatalf("cannot put key into bucket (%v)", err)
  68. }
  69. t.pending++
  70. }
  71. // UnsafeRange must be called holding the lock on the tx.
  72. func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
  73. k, v, err := unsafeRange(t.tx, bucketName, key, endKey, limit)
  74. if err != nil {
  75. plog.Fatal(err)
  76. }
  77. return k, v
  78. }
  79. func unsafeRange(tx *bolt.Tx, bucketName, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte, err error) {
  80. bucket := tx.Bucket(bucketName)
  81. if bucket == nil {
  82. return nil, nil, fmt.Errorf("bucket %s does not exist", bucketName)
  83. }
  84. if len(endKey) == 0 {
  85. if v := bucket.Get(key); v != nil {
  86. return append(keys, key), append(vs, v), nil
  87. }
  88. return nil, nil, nil
  89. }
  90. if limit <= 0 {
  91. limit = math.MaxInt64
  92. }
  93. c := bucket.Cursor()
  94. for ck, cv := c.Seek(key); ck != nil && bytes.Compare(ck, endKey) < 0; ck, cv = c.Next() {
  95. vs = append(vs, cv)
  96. keys = append(keys, ck)
  97. if limit == int64(len(keys)) {
  98. break
  99. }
  100. }
  101. return keys, vs, nil
  102. }
  103. // UnsafeDelete must be called holding the lock on the tx.
  104. func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) {
  105. bucket := t.tx.Bucket(bucketName)
  106. if bucket == nil {
  107. plog.Fatalf("bucket %s does not exist", bucketName)
  108. }
  109. err := bucket.Delete(key)
  110. if err != nil {
  111. plog.Fatalf("cannot delete key from bucket (%v)", err)
  112. }
  113. t.pending++
  114. }
  115. // UnsafeForEach must be called holding the lock on the tx.
  116. func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
  117. return unsafeForEach(t.tx, bucketName, visitor)
  118. }
  119. func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error {
  120. if b := tx.Bucket(bucket); b != nil {
  121. return b.ForEach(visitor)
  122. }
  123. return nil
  124. }
  125. // Commit commits a previous tx and begins a new writable one.
  126. func (t *batchTx) Commit() {
  127. t.Lock()
  128. defer t.Unlock()
  129. t.commit(false)
  130. }
  131. // CommitAndStop commits the previous tx and does not create a new one.
  132. func (t *batchTx) CommitAndStop() {
  133. t.Lock()
  134. defer t.Unlock()
  135. t.commit(true)
  136. }
  137. func (t *batchTx) Unlock() {
  138. if t.pending >= t.backend.batchLimit {
  139. t.commit(false)
  140. }
  141. t.Mutex.Unlock()
  142. }
  143. func (t *batchTx) commit(stop bool) {
  144. // commit the last tx
  145. if t.tx != nil {
  146. if t.pending == 0 && !stop {
  147. t.backend.mu.RLock()
  148. defer t.backend.mu.RUnlock()
  149. // t.tx.DB()==nil if 'CommitAndStop' calls 'batchTx.commit(true)',
  150. // which initializes *bolt.Tx.db and *bolt.Tx.meta as nil; panics t.tx.Size().
  151. // Server must make sure 'batchTx.commit(false)' does not follow
  152. // 'batchTx.commit(true)' (e.g. stopping backend, and inflight Hash call).
  153. atomic.StoreInt64(&t.backend.size, t.tx.Size())
  154. return
  155. }
  156. start := time.Now()
  157. // gofail: var beforeCommit struct{}
  158. err := t.tx.Commit()
  159. // gofail: var afterCommit struct{}
  160. commitDurations.Observe(time.Since(start).Seconds())
  161. atomic.AddInt64(&t.backend.commits, 1)
  162. t.pending = 0
  163. if err != nil {
  164. plog.Fatalf("cannot commit tx (%s)", err)
  165. }
  166. }
  167. if !stop {
  168. t.tx = t.backend.begin(true)
  169. }
  170. }
  171. type batchTxBuffered struct {
  172. batchTx
  173. buf txWriteBuffer
  174. }
  175. func newBatchTxBuffered(backend *backend) *batchTxBuffered {
  176. tx := &batchTxBuffered{
  177. batchTx: batchTx{backend: backend},
  178. buf: txWriteBuffer{
  179. txBuffer: txBuffer{make(map[string]*bucketBuffer)},
  180. seq: true,
  181. },
  182. }
  183. tx.Commit()
  184. return tx
  185. }
  186. func (t *batchTxBuffered) Unlock() {
  187. if t.pending != 0 {
  188. t.backend.readTx.mu.Lock()
  189. t.buf.writeback(&t.backend.readTx.buf)
  190. t.backend.readTx.mu.Unlock()
  191. if t.pending >= t.backend.batchLimit {
  192. t.commit(false)
  193. }
  194. }
  195. t.batchTx.Unlock()
  196. }
  197. func (t *batchTxBuffered) Commit() {
  198. t.Lock()
  199. defer t.Unlock()
  200. t.commit(false)
  201. }
  202. func (t *batchTxBuffered) CommitAndStop() {
  203. t.Lock()
  204. defer t.Unlock()
  205. t.commit(true)
  206. }
  207. func (t *batchTxBuffered) commit(stop bool) {
  208. // all read txs must be closed to acquire boltdb commit rwlock
  209. t.backend.readTx.mu.Lock()
  210. defer t.backend.readTx.mu.Unlock()
  211. t.unsafeCommit(stop)
  212. }
  213. func (t *batchTxBuffered) unsafeCommit(stop bool) {
  214. if t.backend.readTx.tx != nil {
  215. if err := t.backend.readTx.tx.Rollback(); err != nil {
  216. plog.Fatalf("cannot rollback tx (%s)", err)
  217. }
  218. t.backend.readTx.buf.reset()
  219. t.backend.readTx.tx = nil
  220. }
  221. t.batchTx.commit(stop)
  222. if !stop {
  223. t.backend.readTx.tx = t.backend.begin(false)
  224. }
  225. }
  226. func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) {
  227. t.batchTx.UnsafePut(bucketName, key, value)
  228. t.buf.put(bucketName, key, value)
  229. }
  230. func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
  231. t.batchTx.UnsafeSeqPut(bucketName, key, value)
  232. t.buf.putSeq(bucketName, key, value)
  233. }