backend.go 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package backend
  15. import (
  16. "fmt"
  17. "hash/crc32"
  18. "io"
  19. "io/ioutil"
  20. "os"
  21. "path/filepath"
  22. "sync"
  23. "sync/atomic"
  24. "time"
  25. "github.com/boltdb/bolt"
  26. "github.com/coreos/pkg/capnslog"
  27. )
  28. var (
  29. defaultBatchLimit = 10000
  30. defaultBatchInterval = 100 * time.Millisecond
  31. defragLimit = 10000
  32. // initialMmapSize is the initial size of the mmapped region. Setting this larger than
  33. // the potential max db size can prevent writer from blocking reader.
  34. // This only works for linux.
  35. initialMmapSize = uint64(10 * 1024 * 1024 * 1024)
  36. plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend")
  37. )
  38. type Backend interface {
  39. ReadTx() ReadTx
  40. BatchTx() BatchTx
  41. Snapshot() Snapshot
  42. Hash(ignores map[IgnoreKey]struct{}) (uint32, error)
  43. // Size returns the current size of the backend.
  44. Size() int64
  45. Defrag() error
  46. ForceCommit()
  47. Close() error
  48. }
  49. type Snapshot interface {
  50. // Size gets the size of the snapshot.
  51. Size() int64
  52. // WriteTo writes the snapshot into the given writer.
  53. WriteTo(w io.Writer) (n int64, err error)
  54. // Close closes the snapshot.
  55. Close() error
  56. }
  57. type backend struct {
  58. // size and commits are used with atomic operations so they must be
  59. // 64-bit aligned, otherwise 32-bit tests will crash
  60. // size is the number of bytes in the backend
  61. size int64
  62. // commits counts number of commits since start
  63. commits int64
  64. mu sync.RWMutex
  65. db *bolt.DB
  66. batchInterval time.Duration
  67. batchLimit int
  68. batchTx *batchTxBuffered
  69. readTx *readTx
  70. stopc chan struct{}
  71. donec chan struct{}
  72. }
  73. type BackendConfig struct {
  74. // Path is the file path to the backend file.
  75. Path string
  76. // BatchInterval is the maximum time before flushing the BatchTx.
  77. BatchInterval time.Duration
  78. // BatchLimit is the maximum puts before flushing the BatchTx.
  79. BatchLimit int
  80. // MmapSize is the number of bytes to mmap for the backend.
  81. MmapSize uint64
  82. }
  83. func DefaultBackendConfig() BackendConfig {
  84. return BackendConfig{
  85. BatchInterval: defaultBatchInterval,
  86. BatchLimit: defaultBatchLimit,
  87. MmapSize: initialMmapSize,
  88. }
  89. }
  90. func New(bcfg BackendConfig) Backend {
  91. return newBackend(bcfg)
  92. }
  93. func NewDefaultBackend(path string) Backend {
  94. bcfg := DefaultBackendConfig()
  95. bcfg.Path = path
  96. return newBackend(bcfg)
  97. }
  98. func newBackend(bcfg BackendConfig) *backend {
  99. bopts := &bolt.Options{}
  100. if boltOpenOptions != nil {
  101. *bopts = *boltOpenOptions
  102. }
  103. bopts.InitialMmapSize = int(bcfg.MmapSize)
  104. db, err := bolt.Open(bcfg.Path, 0600, bopts)
  105. if err != nil {
  106. plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err)
  107. }
  108. // In future, may want to make buffering optional for low-concurrency systems
  109. // or dynamically swap between buffered/non-buffered depending on workload.
  110. b := &backend{
  111. db: db,
  112. batchInterval: bcfg.BatchInterval,
  113. batchLimit: bcfg.BatchLimit,
  114. readTx: &readTx{buf: txReadBuffer{
  115. txBuffer: txBuffer{make(map[string]*bucketBuffer)}},
  116. },
  117. stopc: make(chan struct{}),
  118. donec: make(chan struct{}),
  119. }
  120. b.batchTx = newBatchTxBuffered(b)
  121. go b.run()
  122. return b
  123. }
  124. // BatchTx returns the current batch tx in coalescer. The tx can be used for read and
  125. // write operations. The write result can be retrieved within the same tx immediately.
  126. // The write result is isolated with other txs until the current one get committed.
  127. func (b *backend) BatchTx() BatchTx {
  128. return b.batchTx
  129. }
  130. func (b *backend) ReadTx() ReadTx { return b.readTx }
  131. // ForceCommit forces the current batching tx to commit.
  132. func (b *backend) ForceCommit() {
  133. b.batchTx.Commit()
  134. }
  135. func (b *backend) Snapshot() Snapshot {
  136. b.batchTx.Commit()
  137. b.mu.RLock()
  138. defer b.mu.RUnlock()
  139. tx, err := b.db.Begin(false)
  140. if err != nil {
  141. plog.Fatalf("cannot begin tx (%s)", err)
  142. }
  143. return &snapshot{tx}
  144. }
  145. type IgnoreKey struct {
  146. Bucket string
  147. Key string
  148. }
  149. func (b *backend) Hash(ignores map[IgnoreKey]struct{}) (uint32, error) {
  150. h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
  151. b.mu.RLock()
  152. defer b.mu.RUnlock()
  153. err := b.db.View(func(tx *bolt.Tx) error {
  154. c := tx.Cursor()
  155. for next, _ := c.First(); next != nil; next, _ = c.Next() {
  156. b := tx.Bucket(next)
  157. if b == nil {
  158. return fmt.Errorf("cannot get hash of bucket %s", string(next))
  159. }
  160. h.Write(next)
  161. b.ForEach(func(k, v []byte) error {
  162. bk := IgnoreKey{Bucket: string(next), Key: string(k)}
  163. if _, ok := ignores[bk]; !ok {
  164. h.Write(k)
  165. h.Write(v)
  166. }
  167. return nil
  168. })
  169. }
  170. return nil
  171. })
  172. if err != nil {
  173. return 0, err
  174. }
  175. return h.Sum32(), nil
  176. }
  177. func (b *backend) Size() int64 {
  178. return atomic.LoadInt64(&b.size)
  179. }
  180. func (b *backend) run() {
  181. defer close(b.donec)
  182. t := time.NewTimer(b.batchInterval)
  183. defer t.Stop()
  184. for {
  185. select {
  186. case <-t.C:
  187. case <-b.stopc:
  188. b.batchTx.CommitAndStop()
  189. return
  190. }
  191. b.batchTx.Commit()
  192. t.Reset(b.batchInterval)
  193. }
  194. }
  195. func (b *backend) Close() error {
  196. close(b.stopc)
  197. <-b.donec
  198. return b.db.Close()
  199. }
  200. // Commits returns total number of commits since start
  201. func (b *backend) Commits() int64 {
  202. return atomic.LoadInt64(&b.commits)
  203. }
  204. func (b *backend) Defrag() error {
  205. err := b.defrag()
  206. if err != nil {
  207. return err
  208. }
  209. // commit to update metadata like db.size
  210. b.batchTx.Commit()
  211. return nil
  212. }
  213. func (b *backend) defrag() error {
  214. // TODO: make this non-blocking?
  215. // lock batchTx to ensure nobody is using previous tx, and then
  216. // close previous ongoing tx.
  217. b.batchTx.Lock()
  218. defer b.batchTx.Unlock()
  219. // lock database after lock tx to avoid deadlock.
  220. b.mu.Lock()
  221. defer b.mu.Unlock()
  222. // block concurrent read requests while resetting tx
  223. b.readTx.mu.Lock()
  224. defer b.readTx.mu.Unlock()
  225. b.batchTx.unsafeCommit(true)
  226. b.batchTx.tx = nil
  227. tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions)
  228. if err != nil {
  229. return err
  230. }
  231. err = defragdb(b.db, tmpdb, defragLimit)
  232. if err != nil {
  233. tmpdb.Close()
  234. os.RemoveAll(tmpdb.Path())
  235. return err
  236. }
  237. dbp := b.db.Path()
  238. tdbp := tmpdb.Path()
  239. err = b.db.Close()
  240. if err != nil {
  241. plog.Fatalf("cannot close database (%s)", err)
  242. }
  243. err = tmpdb.Close()
  244. if err != nil {
  245. plog.Fatalf("cannot close database (%s)", err)
  246. }
  247. err = os.Rename(tdbp, dbp)
  248. if err != nil {
  249. plog.Fatalf("cannot rename database (%s)", err)
  250. }
  251. b.db, err = bolt.Open(dbp, 0600, boltOpenOptions)
  252. if err != nil {
  253. plog.Panicf("cannot open database at %s (%v)", dbp, err)
  254. }
  255. b.batchTx.tx, err = b.db.Begin(true)
  256. if err != nil {
  257. plog.Fatalf("cannot begin tx (%s)", err)
  258. }
  259. b.readTx.buf.reset()
  260. b.readTx.tx = b.unsafeBegin(false)
  261. atomic.StoreInt64(&b.size, b.readTx.tx.Size())
  262. return nil
  263. }
  264. func defragdb(odb, tmpdb *bolt.DB, limit int) error {
  265. // open a tx on tmpdb for writes
  266. tmptx, err := tmpdb.Begin(true)
  267. if err != nil {
  268. return err
  269. }
  270. // open a tx on old db for read
  271. tx, err := odb.Begin(false)
  272. if err != nil {
  273. return err
  274. }
  275. defer tx.Rollback()
  276. c := tx.Cursor()
  277. count := 0
  278. for next, _ := c.First(); next != nil; next, _ = c.Next() {
  279. b := tx.Bucket(next)
  280. if b == nil {
  281. return fmt.Errorf("backend: cannot defrag bucket %s", string(next))
  282. }
  283. tmpb, berr := tmptx.CreateBucketIfNotExists(next)
  284. tmpb.FillPercent = 0.9 // for seq write in for each
  285. if berr != nil {
  286. return berr
  287. }
  288. b.ForEach(func(k, v []byte) error {
  289. count++
  290. if count > limit {
  291. err = tmptx.Commit()
  292. if err != nil {
  293. return err
  294. }
  295. tmptx, err = tmpdb.Begin(true)
  296. if err != nil {
  297. return err
  298. }
  299. tmpb = tmptx.Bucket(next)
  300. tmpb.FillPercent = 0.9 // for seq write in for each
  301. count = 0
  302. }
  303. return tmpb.Put(k, v)
  304. })
  305. }
  306. return tmptx.Commit()
  307. }
  308. func (b *backend) begin(write bool) *bolt.Tx {
  309. b.mu.RLock()
  310. tx := b.unsafeBegin(write)
  311. b.mu.RUnlock()
  312. atomic.StoreInt64(&b.size, tx.Size())
  313. return tx
  314. }
  315. func (b *backend) unsafeBegin(write bool) *bolt.Tx {
  316. tx, err := b.db.Begin(write)
  317. if err != nil {
  318. plog.Fatalf("cannot begin tx (%s)", err)
  319. }
  320. return tx
  321. }
  322. // NewTmpBackend creates a backend implementation for testing.
  323. func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) {
  324. dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test")
  325. if err != nil {
  326. plog.Fatal(err)
  327. }
  328. tmpPath := filepath.Join(dir, "database")
  329. bcfg := DefaultBackendConfig()
  330. bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = tmpPath, batchInterval, batchLimit
  331. return newBackend(bcfg), tmpPath
  332. }
  333. func NewDefaultTmpBackend() (*backend, string) {
  334. return NewTmpBackend(defaultBatchInterval, defaultBatchLimit)
  335. }
  336. type snapshot struct {
  337. *bolt.Tx
  338. }
  339. func (s *snapshot) Close() error { return s.Tx.Rollback() }