backend.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package backend
  15. import (
  16. "fmt"
  17. "hash/crc32"
  18. "io"
  19. "io/ioutil"
  20. "os"
  21. "path/filepath"
  22. "sync"
  23. "sync/atomic"
  24. "time"
  25. bolt "github.com/coreos/bbolt"
  26. "github.com/coreos/pkg/capnslog"
  27. )
  28. var (
  29. defaultBatchLimit = 10000
  30. defaultBatchInterval = 100 * time.Millisecond
  31. defragLimit = 10000
  32. // initialMmapSize is the initial size of the mmapped region. Setting this larger than
  33. // the potential max db size can prevent writer from blocking reader.
  34. // This only works for linux.
  35. initialMmapSize = uint64(10 * 1024 * 1024 * 1024)
  36. plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend")
  37. // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning.
  38. minSnapshotWarningTimeout = time.Duration(30 * time.Second)
  39. )
  40. type Backend interface {
  41. ReadTx() ReadTx
  42. BatchTx() BatchTx
  43. Snapshot() Snapshot
  44. Hash(ignores map[IgnoreKey]struct{}) (uint32, error)
  45. // Size returns the current size of the backend physically allocated.
  46. // The backend can hold DB space that is not utilized at the moment,
  47. // since it can conduct pre-allocation or spare unused space for recycling.
  48. // Use SizeInUse() instead for the actual DB size.
  49. Size() int64
  50. // SizeInUse returns the current size of the backend logically in use.
  51. // Since the backend can manage free space in a non-byte unit such as
  52. // number of pages, the returned value can be not exactly accurate in bytes.
  53. SizeInUse() int64
  54. Defrag() error
  55. ForceCommit()
  56. Close() error
  57. }
  58. type Snapshot interface {
  59. // Size gets the size of the snapshot.
  60. Size() int64
  61. // WriteTo writes the snapshot into the given writer.
  62. WriteTo(w io.Writer) (n int64, err error)
  63. // Close closes the snapshot.
  64. Close() error
  65. }
  66. type backend struct {
  67. // size and commits are used with atomic operations so they must be
  68. // 64-bit aligned, otherwise 32-bit tests will crash
  69. // size is the number of bytes allocated in the backend
  70. size int64
  71. // sizeInUse is the number of bytes actually used in the backend
  72. sizeInUse int64
  73. // commits counts number of commits since start
  74. commits int64
  75. mu sync.RWMutex
  76. db *bolt.DB
  77. batchInterval time.Duration
  78. batchLimit int
  79. batchTx *batchTxBuffered
  80. readTx *readTx
  81. stopc chan struct{}
  82. donec chan struct{}
  83. }
  84. type BackendConfig struct {
  85. // Path is the file path to the backend file.
  86. Path string
  87. // BatchInterval is the maximum time before flushing the BatchTx.
  88. BatchInterval time.Duration
  89. // BatchLimit is the maximum puts before flushing the BatchTx.
  90. BatchLimit int
  91. // MmapSize is the number of bytes to mmap for the backend.
  92. MmapSize uint64
  93. }
  94. func DefaultBackendConfig() BackendConfig {
  95. return BackendConfig{
  96. BatchInterval: defaultBatchInterval,
  97. BatchLimit: defaultBatchLimit,
  98. MmapSize: initialMmapSize,
  99. }
  100. }
  101. func New(bcfg BackendConfig) Backend {
  102. return newBackend(bcfg)
  103. }
  104. func NewDefaultBackend(path string) Backend {
  105. bcfg := DefaultBackendConfig()
  106. bcfg.Path = path
  107. return newBackend(bcfg)
  108. }
  109. func newBackend(bcfg BackendConfig) *backend {
  110. bopts := &bolt.Options{}
  111. if boltOpenOptions != nil {
  112. *bopts = *boltOpenOptions
  113. }
  114. bopts.InitialMmapSize = bcfg.mmapSize()
  115. db, err := bolt.Open(bcfg.Path, 0600, bopts)
  116. if err != nil {
  117. plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err)
  118. }
  119. // In future, may want to make buffering optional for low-concurrency systems
  120. // or dynamically swap between buffered/non-buffered depending on workload.
  121. b := &backend{
  122. db: db,
  123. batchInterval: bcfg.BatchInterval,
  124. batchLimit: bcfg.BatchLimit,
  125. readTx: &readTx{
  126. buf: txReadBuffer{
  127. txBuffer: txBuffer{make(map[string]*bucketBuffer)},
  128. },
  129. buckets: make(map[string]*bolt.Bucket),
  130. },
  131. stopc: make(chan struct{}),
  132. donec: make(chan struct{}),
  133. }
  134. b.batchTx = newBatchTxBuffered(b)
  135. go b.run()
  136. return b
  137. }
  138. // BatchTx returns the current batch tx in coalescer. The tx can be used for read and
  139. // write operations. The write result can be retrieved within the same tx immediately.
  140. // The write result is isolated with other txs until the current one get committed.
  141. func (b *backend) BatchTx() BatchTx {
  142. return b.batchTx
  143. }
  144. func (b *backend) ReadTx() ReadTx { return b.readTx }
  145. // ForceCommit forces the current batching tx to commit.
  146. func (b *backend) ForceCommit() {
  147. b.batchTx.Commit()
  148. }
  149. func (b *backend) Snapshot() Snapshot {
  150. b.batchTx.Commit()
  151. b.mu.RLock()
  152. defer b.mu.RUnlock()
  153. tx, err := b.db.Begin(false)
  154. if err != nil {
  155. plog.Fatalf("cannot begin tx (%s)", err)
  156. }
  157. stopc, donec := make(chan struct{}), make(chan struct{})
  158. dbBytes := tx.Size()
  159. go func() {
  160. defer close(donec)
  161. // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection
  162. // assuming a min tcp throughput of 100MB/s.
  163. var sendRateBytes int64 = 100 * 1024 * 1014
  164. warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second)))
  165. if warningTimeout < minSnapshotWarningTimeout {
  166. warningTimeout = minSnapshotWarningTimeout
  167. }
  168. start := time.Now()
  169. ticker := time.NewTicker(warningTimeout)
  170. defer ticker.Stop()
  171. for {
  172. select {
  173. case <-ticker.C:
  174. plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start)
  175. case <-stopc:
  176. snapshotDurations.Observe(time.Since(start).Seconds())
  177. return
  178. }
  179. }
  180. }()
  181. return &snapshot{tx, stopc, donec}
  182. }
  183. type IgnoreKey struct {
  184. Bucket string
  185. Key string
  186. }
  187. func (b *backend) Hash(ignores map[IgnoreKey]struct{}) (uint32, error) {
  188. h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
  189. b.mu.RLock()
  190. defer b.mu.RUnlock()
  191. err := b.db.View(func(tx *bolt.Tx) error {
  192. c := tx.Cursor()
  193. for next, _ := c.First(); next != nil; next, _ = c.Next() {
  194. b := tx.Bucket(next)
  195. if b == nil {
  196. return fmt.Errorf("cannot get hash of bucket %s", string(next))
  197. }
  198. h.Write(next)
  199. b.ForEach(func(k, v []byte) error {
  200. bk := IgnoreKey{Bucket: string(next), Key: string(k)}
  201. if _, ok := ignores[bk]; !ok {
  202. h.Write(k)
  203. h.Write(v)
  204. }
  205. return nil
  206. })
  207. }
  208. return nil
  209. })
  210. if err != nil {
  211. return 0, err
  212. }
  213. return h.Sum32(), nil
  214. }
  215. func (b *backend) Size() int64 {
  216. return atomic.LoadInt64(&b.size)
  217. }
  218. func (b *backend) SizeInUse() int64 {
  219. return atomic.LoadInt64(&b.sizeInUse)
  220. }
  221. func (b *backend) run() {
  222. defer close(b.donec)
  223. t := time.NewTimer(b.batchInterval)
  224. defer t.Stop()
  225. for {
  226. select {
  227. case <-t.C:
  228. case <-b.stopc:
  229. b.batchTx.CommitAndStop()
  230. return
  231. }
  232. b.batchTx.Commit()
  233. t.Reset(b.batchInterval)
  234. }
  235. }
  236. func (b *backend) Close() error {
  237. close(b.stopc)
  238. <-b.donec
  239. return b.db.Close()
  240. }
  241. // Commits returns total number of commits since start
  242. func (b *backend) Commits() int64 {
  243. return atomic.LoadInt64(&b.commits)
  244. }
  245. func (b *backend) Defrag() error {
  246. err := b.defrag()
  247. if err != nil {
  248. return err
  249. }
  250. // commit to update metadata like db.size
  251. b.batchTx.Commit()
  252. return nil
  253. }
  254. func (b *backend) defrag() error {
  255. // TODO: make this non-blocking?
  256. // lock batchTx to ensure nobody is using previous tx, and then
  257. // close previous ongoing tx.
  258. b.batchTx.Lock()
  259. defer b.batchTx.Unlock()
  260. // lock database after lock tx to avoid deadlock.
  261. b.mu.Lock()
  262. defer b.mu.Unlock()
  263. // block concurrent read requests while resetting tx
  264. b.readTx.mu.Lock()
  265. defer b.readTx.mu.Unlock()
  266. b.batchTx.unsafeCommit(true)
  267. b.batchTx.tx = nil
  268. tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions)
  269. if err != nil {
  270. return err
  271. }
  272. err = defragdb(b.db, tmpdb, defragLimit)
  273. if err != nil {
  274. tmpdb.Close()
  275. os.RemoveAll(tmpdb.Path())
  276. return err
  277. }
  278. dbp := b.db.Path()
  279. tdbp := tmpdb.Path()
  280. err = b.db.Close()
  281. if err != nil {
  282. plog.Fatalf("cannot close database (%s)", err)
  283. }
  284. err = tmpdb.Close()
  285. if err != nil {
  286. plog.Fatalf("cannot close database (%s)", err)
  287. }
  288. err = os.Rename(tdbp, dbp)
  289. if err != nil {
  290. plog.Fatalf("cannot rename database (%s)", err)
  291. }
  292. b.db, err = bolt.Open(dbp, 0600, boltOpenOptions)
  293. if err != nil {
  294. plog.Panicf("cannot open database at %s (%v)", dbp, err)
  295. }
  296. b.batchTx.tx, err = b.db.Begin(true)
  297. if err != nil {
  298. plog.Fatalf("cannot begin tx (%s)", err)
  299. }
  300. b.readTx.reset()
  301. b.readTx.tx = b.unsafeBegin(false)
  302. size := b.readTx.tx.Size()
  303. db := b.readTx.tx.DB()
  304. atomic.StoreInt64(&b.size, size)
  305. atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
  306. return nil
  307. }
  308. func defragdb(odb, tmpdb *bolt.DB, limit int) error {
  309. // open a tx on tmpdb for writes
  310. tmptx, err := tmpdb.Begin(true)
  311. if err != nil {
  312. return err
  313. }
  314. // open a tx on old db for read
  315. tx, err := odb.Begin(false)
  316. if err != nil {
  317. return err
  318. }
  319. defer tx.Rollback()
  320. c := tx.Cursor()
  321. count := 0
  322. for next, _ := c.First(); next != nil; next, _ = c.Next() {
  323. b := tx.Bucket(next)
  324. if b == nil {
  325. return fmt.Errorf("backend: cannot defrag bucket %s", string(next))
  326. }
  327. tmpb, berr := tmptx.CreateBucketIfNotExists(next)
  328. if berr != nil {
  329. return berr
  330. }
  331. tmpb.FillPercent = 0.9 // for seq write in for each
  332. b.ForEach(func(k, v []byte) error {
  333. count++
  334. if count > limit {
  335. err = tmptx.Commit()
  336. if err != nil {
  337. return err
  338. }
  339. tmptx, err = tmpdb.Begin(true)
  340. if err != nil {
  341. return err
  342. }
  343. tmpb = tmptx.Bucket(next)
  344. tmpb.FillPercent = 0.9 // for seq write in for each
  345. count = 0
  346. }
  347. return tmpb.Put(k, v)
  348. })
  349. }
  350. return tmptx.Commit()
  351. }
  352. func (b *backend) begin(write bool) *bolt.Tx {
  353. b.mu.RLock()
  354. tx := b.unsafeBegin(write)
  355. b.mu.RUnlock()
  356. size := tx.Size()
  357. db := tx.DB()
  358. atomic.StoreInt64(&b.size, size)
  359. atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
  360. return tx
  361. }
  362. func (b *backend) unsafeBegin(write bool) *bolt.Tx {
  363. tx, err := b.db.Begin(write)
  364. if err != nil {
  365. plog.Fatalf("cannot begin tx (%s)", err)
  366. }
  367. return tx
  368. }
  369. // NewTmpBackend creates a backend implementation for testing.
  370. func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) {
  371. dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test")
  372. if err != nil {
  373. plog.Fatal(err)
  374. }
  375. tmpPath := filepath.Join(dir, "database")
  376. bcfg := DefaultBackendConfig()
  377. bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = tmpPath, batchInterval, batchLimit
  378. return newBackend(bcfg), tmpPath
  379. }
  380. func NewDefaultTmpBackend() (*backend, string) {
  381. return NewTmpBackend(defaultBatchInterval, defaultBatchLimit)
  382. }
  383. type snapshot struct {
  384. *bolt.Tx
  385. stopc chan struct{}
  386. donec chan struct{}
  387. }
  388. func (s *snapshot) Close() error {
  389. close(s.stopc)
  390. <-s.donec
  391. return s.Tx.Rollback()
  392. }