batch_tx.go 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package backend
  15. import (
  16. "bytes"
  17. "fmt"
  18. "math"
  19. "sync"
  20. "sync/atomic"
  21. "time"
  22. "github.com/boltdb/bolt"
  23. )
  24. type BatchTx interface {
  25. ReadTx
  26. UnsafeCreateBucket(name []byte)
  27. UnsafePut(bucketName []byte, key []byte, value []byte)
  28. UnsafeSeqPut(bucketName []byte, key []byte, value []byte)
  29. UnsafeDelete(bucketName []byte, key []byte)
  30. // Commit commits a previous tx and begins a new writable one.
  31. Commit()
  32. // CommitAndStop commits the previous tx and does not create a new one.
  33. CommitAndStop()
  34. }
  35. type batchTx struct {
  36. sync.Mutex
  37. tx *bolt.Tx
  38. backend *backend
  39. pending int
  40. }
  41. func (t *batchTx) UnsafeCreateBucket(name []byte) {
  42. _, err := t.tx.CreateBucket(name)
  43. if err != nil && err != bolt.ErrBucketExists {
  44. plog.Fatalf("cannot create bucket %s (%v)", name, err)
  45. }
  46. t.pending++
  47. }
  48. // UnsafePut must be called holding the lock on the tx.
  49. func (t *batchTx) UnsafePut(bucketName []byte, key []byte, value []byte) {
  50. t.unsafePut(bucketName, key, value, false)
  51. }
  52. // UnsafeSeqPut must be called holding the lock on the tx.
  53. func (t *batchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
  54. t.unsafePut(bucketName, key, value, true)
  55. }
  56. func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq bool) {
  57. bucket := t.tx.Bucket(bucketName)
  58. if bucket == nil {
  59. plog.Fatalf("bucket %s does not exist", bucketName)
  60. }
  61. if seq {
  62. // it is useful to increase fill percent when the workloads are mostly append-only.
  63. // this can delay the page split and reduce space usage.
  64. bucket.FillPercent = 0.9
  65. }
  66. if err := bucket.Put(key, value); err != nil {
  67. plog.Fatalf("cannot put key into bucket (%v)", err)
  68. }
  69. t.pending++
  70. }
  71. // UnsafeRange must be called holding the lock on the tx.
  72. func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
  73. k, v, err := unsafeRange(t.tx, bucketName, key, endKey, limit)
  74. if err != nil {
  75. plog.Fatal(err)
  76. }
  77. return k, v
  78. }
  79. func unsafeRange(tx *bolt.Tx, bucketName, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte, err error) {
  80. bucket := tx.Bucket(bucketName)
  81. if bucket == nil {
  82. return nil, nil, fmt.Errorf("bucket %s does not exist", bucketName)
  83. }
  84. if len(endKey) == 0 {
  85. if v := bucket.Get(key); v != nil {
  86. return append(keys, key), append(vs, v), nil
  87. }
  88. return nil, nil, nil
  89. }
  90. if limit <= 0 {
  91. limit = math.MaxInt64
  92. }
  93. c := bucket.Cursor()
  94. for ck, cv := c.Seek(key); ck != nil && bytes.Compare(ck, endKey) < 0; ck, cv = c.Next() {
  95. vs = append(vs, cv)
  96. keys = append(keys, ck)
  97. if limit == int64(len(keys)) {
  98. break
  99. }
  100. }
  101. return keys, vs, nil
  102. }
  103. // UnsafeDelete must be called holding the lock on the tx.
  104. func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) {
  105. bucket := t.tx.Bucket(bucketName)
  106. if bucket == nil {
  107. plog.Fatalf("bucket %s does not exist", bucketName)
  108. }
  109. err := bucket.Delete(key)
  110. if err != nil {
  111. plog.Fatalf("cannot delete key from bucket (%v)", err)
  112. }
  113. t.pending++
  114. }
  115. // UnsafeForEach must be called holding the lock on the tx.
  116. func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
  117. return unsafeForEach(t.tx, bucketName, visitor)
  118. }
  119. func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error {
  120. if b := tx.Bucket(bucket); b != nil {
  121. return b.ForEach(visitor)
  122. }
  123. return nil
  124. }
  125. // Commit commits a previous tx and begins a new writable one.
  126. func (t *batchTx) Commit() {
  127. t.Lock()
  128. defer t.Unlock()
  129. t.commit(false)
  130. }
  131. // CommitAndStop commits the previous tx and does not create a new one.
  132. func (t *batchTx) CommitAndStop() {
  133. t.Lock()
  134. defer t.Unlock()
  135. t.commit(true)
  136. }
  137. func (t *batchTx) Unlock() {
  138. if t.pending >= t.backend.batchLimit {
  139. t.commit(false)
  140. }
  141. t.Mutex.Unlock()
  142. }
  143. func (t *batchTx) commit(stop bool) {
  144. // commit the last tx
  145. if t.tx != nil {
  146. if t.pending == 0 && !stop {
  147. t.backend.mu.RLock()
  148. defer t.backend.mu.RUnlock()
  149. // batchTx.commit(true) calls *bolt.Tx.Commit, which
  150. // initializes *bolt.Tx.db and *bolt.Tx.meta as nil,
  151. // and subsequent *bolt.Tx.Size() call panics.
  152. //
  153. // This nil pointer reference panic happens when:
  154. // 1. batchTx.commit(false) from newBatchTx
  155. // 2. batchTx.commit(true) from stopping backend
  156. // 3. batchTx.commit(false) from inflight mvcc Hash call
  157. //
  158. // Check if db is nil to prevent this panic
  159. if t.tx.DB() != nil {
  160. atomic.StoreInt64(&t.backend.size, t.tx.Size())
  161. }
  162. return
  163. }
  164. start := time.Now()
  165. // gofail: var beforeCommit struct{}
  166. err := t.tx.Commit()
  167. // gofail: var afterCommit struct{}
  168. commitDurations.Observe(time.Since(start).Seconds())
  169. atomic.AddInt64(&t.backend.commits, 1)
  170. t.pending = 0
  171. if err != nil {
  172. plog.Fatalf("cannot commit tx (%s)", err)
  173. }
  174. }
  175. if !stop {
  176. t.tx = t.backend.begin(true)
  177. }
  178. }
  179. type batchTxBuffered struct {
  180. batchTx
  181. buf txWriteBuffer
  182. }
  183. func newBatchTxBuffered(backend *backend) *batchTxBuffered {
  184. tx := &batchTxBuffered{
  185. batchTx: batchTx{backend: backend},
  186. buf: txWriteBuffer{
  187. txBuffer: txBuffer{make(map[string]*bucketBuffer)},
  188. seq: true,
  189. },
  190. }
  191. tx.Commit()
  192. return tx
  193. }
  194. func (t *batchTxBuffered) Unlock() {
  195. if t.pending != 0 {
  196. t.backend.readTx.mu.Lock()
  197. t.buf.writeback(&t.backend.readTx.buf)
  198. t.backend.readTx.mu.Unlock()
  199. if t.pending >= t.backend.batchLimit {
  200. t.commit(false)
  201. }
  202. }
  203. t.batchTx.Unlock()
  204. }
  205. func (t *batchTxBuffered) Commit() {
  206. t.Lock()
  207. defer t.Unlock()
  208. t.commit(false)
  209. }
  210. func (t *batchTxBuffered) CommitAndStop() {
  211. t.Lock()
  212. defer t.Unlock()
  213. t.commit(true)
  214. }
  215. func (t *batchTxBuffered) commit(stop bool) {
  216. // all read txs must be closed to acquire boltdb commit rwlock
  217. t.backend.readTx.mu.Lock()
  218. defer t.backend.readTx.mu.Unlock()
  219. t.unsafeCommit(stop)
  220. }
  221. func (t *batchTxBuffered) unsafeCommit(stop bool) {
  222. if t.backend.readTx.tx != nil {
  223. if err := t.backend.readTx.tx.Rollback(); err != nil {
  224. plog.Fatalf("cannot rollback tx (%s)", err)
  225. }
  226. t.backend.readTx.buf.reset()
  227. t.backend.readTx.tx = nil
  228. }
  229. t.batchTx.commit(stop)
  230. if !stop {
  231. t.backend.readTx.tx = t.backend.begin(false)
  232. }
  233. }
  234. func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) {
  235. t.batchTx.UnsafePut(bucketName, key, value)
  236. t.buf.put(bucketName, key, value)
  237. }
  238. func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
  239. t.batchTx.UnsafeSeqPut(bucketName, key, value)
  240. t.buf.putSeq(bucketName, key, value)
  241. }