kvstore.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "encoding/binary"
  17. "errors"
  18. "hash/crc32"
  19. "math"
  20. "sync"
  21. "sync/atomic"
  22. "time"
  23. "github.com/coreos/etcd/lease"
  24. "github.com/coreos/etcd/mvcc/backend"
  25. "github.com/coreos/etcd/mvcc/mvccpb"
  26. "github.com/coreos/etcd/pkg/schedule"
  27. "github.com/coreos/pkg/capnslog"
  28. "golang.org/x/net/context"
  29. )
  30. var (
  31. keyBucketName = []byte("key")
  32. metaBucketName = []byte("meta")
  33. consistentIndexKeyName = []byte("consistent_index")
  34. scheduledCompactKeyName = []byte("scheduledCompactRev")
  35. finishedCompactKeyName = []byte("finishedCompactRev")
  36. ErrCompacted = errors.New("mvcc: required revision has been compacted")
  37. ErrFutureRev = errors.New("mvcc: required revision is a future revision")
  38. ErrCanceled = errors.New("mvcc: watcher is canceled")
  39. ErrClosed = errors.New("mvcc: closed")
  40. plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc")
  41. emptyKeep = make(map[revision]struct{})
  42. )
  43. const (
  44. // markedRevBytesLen is the byte length of marked revision.
  45. // The first `revBytesLen` bytes represents a normal revision. The last
  46. // one byte is the mark.
  47. markedRevBytesLen = revBytesLen + 1
  48. markBytePosition = markedRevBytesLen - 1
  49. markTombstone byte = 't'
  50. )
  51. var restoreChunkKeys = 10000 // non-const for testing
  52. // ConsistentIndexGetter is an interface that wraps the Get method.
  53. // Consistent index is the offset of an entry in a consistent replicated log.
  54. type ConsistentIndexGetter interface {
  55. // ConsistentIndex returns the consistent index of current executing entry.
  56. ConsistentIndex() uint64
  57. }
  58. type store struct {
  59. ReadView
  60. WriteView
  61. // consistentIndex caches the "consistent_index" key's value. Accessed
  62. // through atomics so must be 64-bit aligned.
  63. consistentIndex uint64
  64. // mu read locks for txns and write locks for non-txn store changes.
  65. mu sync.RWMutex
  66. ig ConsistentIndexGetter
  67. b backend.Backend
  68. kvindex index
  69. le lease.Lessor
  70. // revMuLock protects currentRev and compactMainRev.
  71. // Locked at end of write txn and released after write txn unlock lock.
  72. // Locked before locking read txn and released after locking.
  73. revMu sync.RWMutex
  74. // currentRev is the revision of the last completed transaction.
  75. currentRev int64
  76. // compactMainRev is the main revision of the last compaction.
  77. compactMainRev int64
  78. // bytesBuf8 is a byte slice of length 8
  79. // to avoid a repetitive allocation in saveIndex.
  80. bytesBuf8 []byte
  81. fifoSched schedule.Scheduler
  82. stopc chan struct{}
  83. // keepMu protects keep
  84. keepMu sync.RWMutex
  85. // keep contains all revisions <= compactMainRev to be kept for the
  86. // ongoing compaction; nil otherwise.
  87. keep map[revision]struct{}
  88. }
  89. // NewStore returns a new store. It is useful to create a store inside
  90. // mvcc pkg. It should only be used for testing externally.
  91. func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store {
  92. s := &store{
  93. b: b,
  94. ig: ig,
  95. kvindex: newTreeIndex(),
  96. le: le,
  97. currentRev: 1,
  98. compactMainRev: -1,
  99. bytesBuf8: make([]byte, 8),
  100. fifoSched: schedule.NewFIFOScheduler(),
  101. stopc: make(chan struct{}),
  102. }
  103. s.ReadView = &readView{s}
  104. s.WriteView = &writeView{s}
  105. if s.le != nil {
  106. s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() })
  107. }
  108. tx := s.b.BatchTx()
  109. tx.Lock()
  110. tx.UnsafeCreateBucket(keyBucketName)
  111. tx.UnsafeCreateBucket(metaBucketName)
  112. tx.Unlock()
  113. s.b.ForceCommit()
  114. if err := s.restore(); err != nil {
  115. // TODO: return the error instead of panic here?
  116. panic("failed to recover store from backend")
  117. }
  118. return s
  119. }
  120. func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) {
  121. if ctx == nil || ctx.Err() != nil {
  122. s.mu.Lock()
  123. select {
  124. case <-s.stopc:
  125. default:
  126. f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
  127. s.fifoSched.Schedule(f)
  128. }
  129. s.mu.Unlock()
  130. return
  131. }
  132. close(ch)
  133. }
  134. func (s *store) Hash() (hash uint32, revision int64, err error) {
  135. s.b.ForceCommit()
  136. h, err := s.b.Hash(DefaultIgnores)
  137. return h, s.currentRev, err
  138. }
  139. func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev int64, err error) {
  140. s.mu.Lock()
  141. s.revMu.RLock()
  142. compactRev, currentRev = s.compactMainRev, s.currentRev
  143. s.revMu.RUnlock()
  144. if rev > 0 && rev <= compactRev {
  145. s.mu.Unlock()
  146. return 0, 0, compactRev, ErrCompacted
  147. } else if rev > 0 && rev > currentRev {
  148. s.mu.Unlock()
  149. return 0, currentRev, 0, ErrFutureRev
  150. }
  151. s.keepMu.Lock()
  152. if s.keep == nil {
  153. // ForceCommit ensures that txnRead begins after backend
  154. // has committed all the changes from the prev completed compaction.
  155. s.b.ForceCommit()
  156. s.keep = emptyKeep
  157. }
  158. keep := s.keep
  159. s.keepMu.Unlock()
  160. tx := s.b.ReadTx()
  161. tx.Lock()
  162. defer tx.Unlock()
  163. s.mu.Unlock()
  164. if rev == 0 {
  165. rev = currentRev
  166. }
  167. upper := revision{main: rev + 1}
  168. lower := revision{main: compactRev + 1}
  169. h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
  170. h.Write(keyBucketName)
  171. err = tx.UnsafeForEach(keyBucketName, func(k, v []byte) error {
  172. kr := bytesToRev(k)
  173. if !upper.GreaterThan(kr) {
  174. return nil
  175. }
  176. // skip revisions that are scheduled for deletion
  177. // due to compacting; don't skip if there isn't one.
  178. if lower.GreaterThan(kr) && len(keep) > 0 {
  179. if _, ok := keep[kr]; !ok {
  180. return nil
  181. }
  182. }
  183. h.Write(k)
  184. h.Write(v)
  185. return nil
  186. })
  187. return h.Sum32(), currentRev, compactRev, err
  188. }
  189. func (s *store) Compact(rev int64) (<-chan struct{}, error) {
  190. s.mu.Lock()
  191. defer s.mu.Unlock()
  192. s.revMu.Lock()
  193. defer s.revMu.Unlock()
  194. if rev <= s.compactMainRev {
  195. ch := make(chan struct{})
  196. f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
  197. s.fifoSched.Schedule(f)
  198. return ch, ErrCompacted
  199. }
  200. if rev > s.currentRev {
  201. return nil, ErrFutureRev
  202. }
  203. start := time.Now()
  204. s.compactMainRev = rev
  205. rbytes := newRevBytes()
  206. revToBytes(revision{main: rev}, rbytes)
  207. tx := s.b.BatchTx()
  208. tx.Lock()
  209. tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes)
  210. tx.Unlock()
  211. // ensure that desired compaction is persisted
  212. s.b.ForceCommit()
  213. keep := s.kvindex.Compact(rev)
  214. s.keepMu.Lock()
  215. s.keep = keep
  216. s.keepMu.Unlock()
  217. ch := make(chan struct{})
  218. var j = func(ctx context.Context) {
  219. if ctx.Err() != nil {
  220. s.compactBarrier(ctx, ch)
  221. return
  222. }
  223. if !s.scheduleCompaction(rev, keep) {
  224. s.compactBarrier(nil, ch)
  225. return
  226. }
  227. close(ch)
  228. s.keepMu.Lock()
  229. s.keep = nil
  230. s.keepMu.Unlock()
  231. }
  232. s.fifoSched.Schedule(j)
  233. indexCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond))
  234. return ch, nil
  235. }
  236. // DefaultIgnores is a map of keys to ignore in hash checking.
  237. var DefaultIgnores map[backend.IgnoreKey]struct{}
  238. func init() {
  239. DefaultIgnores = map[backend.IgnoreKey]struct{}{
  240. // consistent index might be changed due to v2 internal sync, which
  241. // is not controllable by the user.
  242. {Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {},
  243. }
  244. }
  245. func (s *store) Commit() {
  246. s.mu.Lock()
  247. defer s.mu.Unlock()
  248. tx := s.b.BatchTx()
  249. tx.Lock()
  250. s.saveIndex(tx)
  251. tx.Unlock()
  252. s.b.ForceCommit()
  253. }
  254. func (s *store) Restore(b backend.Backend) error {
  255. s.mu.Lock()
  256. defer s.mu.Unlock()
  257. close(s.stopc)
  258. s.fifoSched.Stop()
  259. atomic.StoreUint64(&s.consistentIndex, 0)
  260. s.b = b
  261. s.kvindex = newTreeIndex()
  262. s.currentRev = 1
  263. s.compactMainRev = -1
  264. s.fifoSched = schedule.NewFIFOScheduler()
  265. s.stopc = make(chan struct{})
  266. return s.restore()
  267. }
  268. func (s *store) restore() error {
  269. reportDbTotalSizeInBytesMu.Lock()
  270. b := s.b
  271. reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
  272. reportDbTotalSizeInBytesMu.Unlock()
  273. min, max := newRevBytes(), newRevBytes()
  274. revToBytes(revision{main: 1}, min)
  275. revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max)
  276. keyToLease := make(map[string]lease.LeaseID)
  277. // restore index
  278. tx := s.b.BatchTx()
  279. tx.Lock()
  280. _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0)
  281. if len(finishedCompactBytes) != 0 {
  282. s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main
  283. plog.Printf("restore compact to %d", s.compactMainRev)
  284. }
  285. _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0)
  286. scheduledCompact := int64(0)
  287. if len(scheduledCompactBytes) != 0 {
  288. scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main
  289. }
  290. // index keys concurrently as they're loaded in from tx
  291. rkvc, revc := restoreIntoIndex(s.kvindex)
  292. for {
  293. keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys))
  294. if len(keys) == 0 {
  295. break
  296. }
  297. // rkvc blocks if the total pending keys exceeds the restore
  298. // chunk size to keep keys from consuming too much memory.
  299. restoreChunk(rkvc, keys, vals, keyToLease)
  300. if len(keys) < restoreChunkKeys {
  301. // partial set implies final set
  302. break
  303. }
  304. // next set begins after where this one ended
  305. newMin := bytesToRev(keys[len(keys)-1][:revBytesLen])
  306. newMin.sub++
  307. revToBytes(newMin, min)
  308. }
  309. close(rkvc)
  310. s.currentRev = <-revc
  311. // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
  312. // the correct revision should be set to compaction revision in the case, not the largest revision
  313. // we have seen.
  314. if s.currentRev < s.compactMainRev {
  315. s.currentRev = s.compactMainRev
  316. }
  317. if scheduledCompact <= s.compactMainRev {
  318. scheduledCompact = 0
  319. }
  320. for key, lid := range keyToLease {
  321. if s.le == nil {
  322. panic("no lessor to attach lease")
  323. }
  324. err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}})
  325. if err != nil {
  326. plog.Errorf("unexpected Attach error: %v", err)
  327. }
  328. }
  329. tx.Unlock()
  330. if scheduledCompact != 0 {
  331. s.Compact(scheduledCompact)
  332. plog.Printf("resume scheduled compaction at %d", scheduledCompact)
  333. }
  334. return nil
  335. }
  336. type revKeyValue struct {
  337. key []byte
  338. kv mvccpb.KeyValue
  339. kstr string
  340. }
  341. func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) {
  342. rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1)
  343. go func() {
  344. currentRev := int64(1)
  345. defer func() { revc <- currentRev }()
  346. // restore the tree index from streaming the unordered index.
  347. kiCache := make(map[string]*keyIndex, restoreChunkKeys)
  348. for rkv := range rkvc {
  349. ki, ok := kiCache[rkv.kstr]
  350. // purge kiCache if many keys but still missing in the cache
  351. if !ok && len(kiCache) >= restoreChunkKeys {
  352. i := 10
  353. for k := range kiCache {
  354. delete(kiCache, k)
  355. if i--; i == 0 {
  356. break
  357. }
  358. }
  359. }
  360. // cache miss, fetch from tree index if there
  361. if !ok {
  362. ki = &keyIndex{key: rkv.kv.Key}
  363. if idxKey := idx.KeyIndex(ki); idxKey != nil {
  364. kiCache[rkv.kstr], ki = idxKey, idxKey
  365. ok = true
  366. }
  367. }
  368. rev := bytesToRev(rkv.key)
  369. currentRev = rev.main
  370. if ok {
  371. if isTombstone(rkv.key) {
  372. ki.tombstone(rev.main, rev.sub)
  373. continue
  374. }
  375. ki.put(rev.main, rev.sub)
  376. } else if !isTombstone(rkv.key) {
  377. ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version)
  378. idx.Insert(ki)
  379. kiCache[rkv.kstr] = ki
  380. }
  381. }
  382. }()
  383. return rkvc, revc
  384. }
  385. func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) {
  386. for i, key := range keys {
  387. rkv := revKeyValue{key: key}
  388. if err := rkv.kv.Unmarshal(vals[i]); err != nil {
  389. plog.Fatalf("cannot unmarshal event: %v", err)
  390. }
  391. rkv.kstr = string(rkv.kv.Key)
  392. if isTombstone(key) {
  393. delete(keyToLease, rkv.kstr)
  394. } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease {
  395. keyToLease[rkv.kstr] = lid
  396. } else {
  397. delete(keyToLease, rkv.kstr)
  398. }
  399. kvc <- rkv
  400. }
  401. }
  402. func (s *store) Close() error {
  403. close(s.stopc)
  404. s.fifoSched.Stop()
  405. return nil
  406. }
  407. func (s *store) saveIndex(tx backend.BatchTx) {
  408. if s.ig == nil {
  409. return
  410. }
  411. bs := s.bytesBuf8
  412. ci := s.ig.ConsistentIndex()
  413. binary.BigEndian.PutUint64(bs, ci)
  414. // put the index into the underlying backend
  415. // tx has been locked in TxnBegin, so there is no need to lock it again
  416. tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs)
  417. atomic.StoreUint64(&s.consistentIndex, ci)
  418. }
  419. func (s *store) ConsistentIndex() uint64 {
  420. if ci := atomic.LoadUint64(&s.consistentIndex); ci > 0 {
  421. return ci
  422. }
  423. tx := s.b.BatchTx()
  424. tx.Lock()
  425. defer tx.Unlock()
  426. _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0)
  427. if len(vs) == 0 {
  428. return 0
  429. }
  430. v := binary.BigEndian.Uint64(vs[0])
  431. atomic.StoreUint64(&s.consistentIndex, v)
  432. return v
  433. }
  434. // appendMarkTombstone appends tombstone mark to normal revision bytes.
  435. func appendMarkTombstone(b []byte) []byte {
  436. if len(b) != revBytesLen {
  437. plog.Panicf("cannot append mark to non normal revision bytes")
  438. }
  439. return append(b, markTombstone)
  440. }
  441. // isTombstone checks whether the revision bytes is a tombstone.
  442. func isTombstone(b []byte) bool {
  443. return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone
  444. }