kvstore.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mvcc
  15. import (
  16. "context"
  17. "encoding/binary"
  18. "errors"
  19. "fmt"
  20. "hash/crc32"
  21. "math"
  22. "sync"
  23. "sync/atomic"
  24. "time"
  25. "go.etcd.io/etcd/lease"
  26. "go.etcd.io/etcd/mvcc/backend"
  27. "go.etcd.io/etcd/mvcc/mvccpb"
  28. "go.etcd.io/etcd/pkg/schedule"
  29. "github.com/coreos/pkg/capnslog"
  30. "go.uber.org/zap"
  31. )
  32. var (
  33. keyBucketName = []byte("key")
  34. metaBucketName = []byte("meta")
  35. consistentIndexKeyName = []byte("consistent_index")
  36. scheduledCompactKeyName = []byte("scheduledCompactRev")
  37. finishedCompactKeyName = []byte("finishedCompactRev")
  38. ErrCompacted = errors.New("mvcc: required revision has been compacted")
  39. ErrFutureRev = errors.New("mvcc: required revision is a future revision")
  40. ErrCanceled = errors.New("mvcc: watcher is canceled")
  41. ErrClosed = errors.New("mvcc: closed")
  42. plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "mvcc")
  43. )
  44. const (
  45. // markedRevBytesLen is the byte length of marked revision.
  46. // The first `revBytesLen` bytes represents a normal revision. The last
  47. // one byte is the mark.
  48. markedRevBytesLen = revBytesLen + 1
  49. markBytePosition = markedRevBytesLen - 1
  50. markTombstone byte = 't'
  51. )
  52. var restoreChunkKeys = 10000 // non-const for testing
  53. var defaultCompactBatchLimit = 1000
  54. // ConsistentIndexGetter is an interface that wraps the Get method.
  55. // Consistent index is the offset of an entry in a consistent replicated log.
  56. type ConsistentIndexGetter interface {
  57. // ConsistentIndex returns the consistent index of current executing entry.
  58. ConsistentIndex() uint64
  59. }
  60. type StoreConfig struct {
  61. CompactionBatchLimit int
  62. }
  63. type store struct {
  64. ReadView
  65. WriteView
  66. cfg StoreConfig
  67. // consistentIndex caches the "consistent_index" key's value. Accessed
  68. // through atomics so must be 64-bit aligned.
  69. consistentIndex uint64
  70. // mu read locks for txns and write locks for non-txn store changes.
  71. mu sync.RWMutex
  72. ig ConsistentIndexGetter
  73. b backend.Backend
  74. kvindex index
  75. le lease.Lessor
  76. // revMuLock protects currentRev and compactMainRev.
  77. // Locked at end of write txn and released after write txn unlock lock.
  78. // Locked before locking read txn and released after locking.
  79. revMu sync.RWMutex
  80. // currentRev is the revision of the last completed transaction.
  81. currentRev int64
  82. // compactMainRev is the main revision of the last compaction.
  83. compactMainRev int64
  84. // bytesBuf8 is a byte slice of length 8
  85. // to avoid a repetitive allocation in saveIndex.
  86. bytesBuf8 []byte
  87. fifoSched schedule.Scheduler
  88. stopc chan struct{}
  89. lg *zap.Logger
  90. }
  91. // NewStore returns a new store. It is useful to create a store inside
  92. // mvcc pkg. It should only be used for testing externally.
  93. func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter, cfg StoreConfig) *store {
  94. if cfg.CompactionBatchLimit == 0 {
  95. cfg.CompactionBatchLimit = defaultCompactBatchLimit
  96. }
  97. s := &store{
  98. cfg: cfg,
  99. b: b,
  100. ig: ig,
  101. kvindex: newTreeIndex(lg),
  102. le: le,
  103. currentRev: 1,
  104. compactMainRev: -1,
  105. bytesBuf8: make([]byte, 8),
  106. fifoSched: schedule.NewFIFOScheduler(),
  107. stopc: make(chan struct{}),
  108. lg: lg,
  109. }
  110. s.ReadView = &readView{s}
  111. s.WriteView = &writeView{s}
  112. if s.le != nil {
  113. s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() })
  114. }
  115. tx := s.b.BatchTx()
  116. tx.Lock()
  117. tx.UnsafeCreateBucket(keyBucketName)
  118. tx.UnsafeCreateBucket(metaBucketName)
  119. tx.Unlock()
  120. s.b.ForceCommit()
  121. s.mu.Lock()
  122. defer s.mu.Unlock()
  123. if err := s.restore(); err != nil {
  124. // TODO: return the error instead of panic here?
  125. panic("failed to recover store from backend")
  126. }
  127. return s
  128. }
  129. func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) {
  130. if ctx == nil || ctx.Err() != nil {
  131. s.mu.Lock()
  132. select {
  133. case <-s.stopc:
  134. default:
  135. f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
  136. s.fifoSched.Schedule(f)
  137. }
  138. s.mu.Unlock()
  139. return
  140. }
  141. close(ch)
  142. }
  143. func (s *store) Hash() (hash uint32, revision int64, err error) {
  144. start := time.Now()
  145. s.b.ForceCommit()
  146. h, err := s.b.Hash(DefaultIgnores)
  147. hashSec.Observe(time.Since(start).Seconds())
  148. return h, s.currentRev, err
  149. }
  150. func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev int64, err error) {
  151. start := time.Now()
  152. s.mu.RLock()
  153. s.revMu.RLock()
  154. compactRev, currentRev = s.compactMainRev, s.currentRev
  155. s.revMu.RUnlock()
  156. if rev > 0 && rev <= compactRev {
  157. s.mu.RUnlock()
  158. return 0, 0, compactRev, ErrCompacted
  159. } else if rev > 0 && rev > currentRev {
  160. s.mu.RUnlock()
  161. return 0, currentRev, 0, ErrFutureRev
  162. }
  163. if rev == 0 {
  164. rev = currentRev
  165. }
  166. keep := s.kvindex.Keep(rev)
  167. tx := s.b.ReadTx()
  168. tx.RLock()
  169. defer tx.RUnlock()
  170. s.mu.RUnlock()
  171. upper := revision{main: rev + 1}
  172. lower := revision{main: compactRev + 1}
  173. h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
  174. h.Write(keyBucketName)
  175. err = tx.UnsafeForEach(keyBucketName, func(k, v []byte) error {
  176. kr := bytesToRev(k)
  177. if !upper.GreaterThan(kr) {
  178. return nil
  179. }
  180. // skip revisions that are scheduled for deletion
  181. // due to compacting; don't skip if there isn't one.
  182. if lower.GreaterThan(kr) && len(keep) > 0 {
  183. if _, ok := keep[kr]; !ok {
  184. return nil
  185. }
  186. }
  187. h.Write(k)
  188. h.Write(v)
  189. return nil
  190. })
  191. hash = h.Sum32()
  192. hashRevSec.Observe(time.Since(start).Seconds())
  193. return hash, currentRev, compactRev, err
  194. }
  195. func (s *store) updateCompactRev(rev int64) (<-chan struct{}, error) {
  196. s.revMu.Lock()
  197. if rev <= s.compactMainRev {
  198. ch := make(chan struct{})
  199. f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
  200. s.fifoSched.Schedule(f)
  201. s.revMu.Unlock()
  202. return ch, ErrCompacted
  203. }
  204. if rev > s.currentRev {
  205. s.revMu.Unlock()
  206. return nil, ErrFutureRev
  207. }
  208. s.compactMainRev = rev
  209. rbytes := newRevBytes()
  210. revToBytes(revision{main: rev}, rbytes)
  211. tx := s.b.BatchTx()
  212. tx.Lock()
  213. tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes)
  214. tx.Unlock()
  215. // ensure that desired compaction is persisted
  216. s.b.ForceCommit()
  217. s.revMu.Unlock()
  218. return nil, nil
  219. }
  220. func (s *store) compact(rev int64) (<-chan struct{}, error) {
  221. start := time.Now()
  222. keep := s.kvindex.Compact(rev)
  223. ch := make(chan struct{})
  224. var j = func(ctx context.Context) {
  225. if ctx.Err() != nil {
  226. s.compactBarrier(ctx, ch)
  227. return
  228. }
  229. if !s.scheduleCompaction(rev, keep) {
  230. s.compactBarrier(nil, ch)
  231. return
  232. }
  233. close(ch)
  234. }
  235. s.fifoSched.Schedule(j)
  236. indexCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond))
  237. return ch, nil
  238. }
  239. func (s *store) compactLockfree(rev int64) (<-chan struct{}, error) {
  240. ch, err := s.updateCompactRev(rev)
  241. if nil != err {
  242. return ch, err
  243. }
  244. return s.compact(rev)
  245. }
  246. func (s *store) Compact(rev int64) (<-chan struct{}, error) {
  247. s.mu.Lock()
  248. ch, err := s.updateCompactRev(rev)
  249. if err != nil {
  250. s.mu.Unlock()
  251. return ch, err
  252. }
  253. s.mu.Unlock()
  254. return s.compact(rev)
  255. }
  256. // DefaultIgnores is a map of keys to ignore in hash checking.
  257. var DefaultIgnores map[backend.IgnoreKey]struct{}
  258. func init() {
  259. DefaultIgnores = map[backend.IgnoreKey]struct{}{
  260. // consistent index might be changed due to v2 internal sync, which
  261. // is not controllable by the user.
  262. {Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {},
  263. }
  264. }
  265. func (s *store) Commit() {
  266. s.mu.Lock()
  267. defer s.mu.Unlock()
  268. tx := s.b.BatchTx()
  269. tx.Lock()
  270. s.saveIndex(tx)
  271. tx.Unlock()
  272. s.b.ForceCommit()
  273. }
  274. func (s *store) Restore(b backend.Backend) error {
  275. s.mu.Lock()
  276. defer s.mu.Unlock()
  277. close(s.stopc)
  278. s.fifoSched.Stop()
  279. atomic.StoreUint64(&s.consistentIndex, 0)
  280. s.b = b
  281. s.kvindex = newTreeIndex(s.lg)
  282. s.currentRev = 1
  283. s.compactMainRev = -1
  284. s.fifoSched = schedule.NewFIFOScheduler()
  285. s.stopc = make(chan struct{})
  286. return s.restore()
  287. }
  288. func (s *store) restore() error {
  289. b := s.b
  290. reportDbTotalSizeInBytesMu.Lock()
  291. reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
  292. reportDbTotalSizeInBytesMu.Unlock()
  293. reportDbTotalSizeInBytesDebugMu.Lock()
  294. reportDbTotalSizeInBytesDebug = func() float64 { return float64(b.Size()) }
  295. reportDbTotalSizeInBytesDebugMu.Unlock()
  296. reportDbTotalSizeInUseInBytesMu.Lock()
  297. reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) }
  298. reportDbTotalSizeInUseInBytesMu.Unlock()
  299. reportDbOpenReadTxNMu.Lock()
  300. reportDbOpenReadTxN = func() float64 { return float64(b.OpenReadTxN()) }
  301. reportDbOpenReadTxNMu.Unlock()
  302. min, max := newRevBytes(), newRevBytes()
  303. revToBytes(revision{main: 1}, min)
  304. revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max)
  305. keyToLease := make(map[string]lease.LeaseID)
  306. // restore index
  307. tx := s.b.BatchTx()
  308. tx.Lock()
  309. _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0)
  310. if len(finishedCompactBytes) != 0 {
  311. s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main
  312. if s.lg != nil {
  313. s.lg.Info(
  314. "restored last compact revision",
  315. zap.String("meta-bucket-name", string(metaBucketName)),
  316. zap.String("meta-bucket-name-key", string(finishedCompactKeyName)),
  317. zap.Int64("restored-compact-revision", s.compactMainRev),
  318. )
  319. } else {
  320. plog.Printf("restore compact to %d", s.compactMainRev)
  321. }
  322. }
  323. _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0)
  324. scheduledCompact := int64(0)
  325. if len(scheduledCompactBytes) != 0 {
  326. scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main
  327. }
  328. // index keys concurrently as they're loaded in from tx
  329. keysGauge.Set(0)
  330. rkvc, revc := restoreIntoIndex(s.lg, s.kvindex)
  331. for {
  332. keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys))
  333. if len(keys) == 0 {
  334. break
  335. }
  336. // rkvc blocks if the total pending keys exceeds the restore
  337. // chunk size to keep keys from consuming too much memory.
  338. restoreChunk(s.lg, rkvc, keys, vals, keyToLease)
  339. if len(keys) < restoreChunkKeys {
  340. // partial set implies final set
  341. break
  342. }
  343. // next set begins after where this one ended
  344. newMin := bytesToRev(keys[len(keys)-1][:revBytesLen])
  345. newMin.sub++
  346. revToBytes(newMin, min)
  347. }
  348. close(rkvc)
  349. s.currentRev = <-revc
  350. // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
  351. // the correct revision should be set to compaction revision in the case, not the largest revision
  352. // we have seen.
  353. if s.currentRev < s.compactMainRev {
  354. s.currentRev = s.compactMainRev
  355. }
  356. if scheduledCompact <= s.compactMainRev {
  357. scheduledCompact = 0
  358. }
  359. for key, lid := range keyToLease {
  360. if s.le == nil {
  361. panic("no lessor to attach lease")
  362. }
  363. err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}})
  364. if err != nil {
  365. if s.lg != nil {
  366. s.lg.Warn(
  367. "failed to attach a lease",
  368. zap.String("lease-id", fmt.Sprintf("%016x", lid)),
  369. zap.Error(err),
  370. )
  371. } else {
  372. plog.Errorf("unexpected Attach error: %v", err)
  373. }
  374. }
  375. }
  376. tx.Unlock()
  377. if scheduledCompact != 0 {
  378. s.compactLockfree(scheduledCompact)
  379. if s.lg != nil {
  380. s.lg.Info(
  381. "resume scheduled compaction",
  382. zap.String("meta-bucket-name", string(metaBucketName)),
  383. zap.String("meta-bucket-name-key", string(scheduledCompactKeyName)),
  384. zap.Int64("scheduled-compact-revision", scheduledCompact),
  385. )
  386. } else {
  387. plog.Printf("resume scheduled compaction at %d", scheduledCompact)
  388. }
  389. }
  390. return nil
  391. }
  392. type revKeyValue struct {
  393. key []byte
  394. kv mvccpb.KeyValue
  395. kstr string
  396. }
  397. func restoreIntoIndex(lg *zap.Logger, idx index) (chan<- revKeyValue, <-chan int64) {
  398. rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1)
  399. go func() {
  400. currentRev := int64(1)
  401. defer func() { revc <- currentRev }()
  402. // restore the tree index from streaming the unordered index.
  403. kiCache := make(map[string]*keyIndex, restoreChunkKeys)
  404. for rkv := range rkvc {
  405. ki, ok := kiCache[rkv.kstr]
  406. // purge kiCache if many keys but still missing in the cache
  407. if !ok && len(kiCache) >= restoreChunkKeys {
  408. i := 10
  409. for k := range kiCache {
  410. delete(kiCache, k)
  411. if i--; i == 0 {
  412. break
  413. }
  414. }
  415. }
  416. // cache miss, fetch from tree index if there
  417. if !ok {
  418. ki = &keyIndex{key: rkv.kv.Key}
  419. if idxKey := idx.KeyIndex(ki); idxKey != nil {
  420. kiCache[rkv.kstr], ki = idxKey, idxKey
  421. ok = true
  422. }
  423. }
  424. rev := bytesToRev(rkv.key)
  425. currentRev = rev.main
  426. if ok {
  427. if isTombstone(rkv.key) {
  428. ki.tombstone(lg, rev.main, rev.sub)
  429. continue
  430. }
  431. ki.put(lg, rev.main, rev.sub)
  432. } else if !isTombstone(rkv.key) {
  433. ki.restore(lg, revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version)
  434. idx.Insert(ki)
  435. kiCache[rkv.kstr] = ki
  436. }
  437. }
  438. }()
  439. return rkvc, revc
  440. }
  441. func restoreChunk(lg *zap.Logger, kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) {
  442. for i, key := range keys {
  443. rkv := revKeyValue{key: key}
  444. if err := rkv.kv.Unmarshal(vals[i]); err != nil {
  445. if lg != nil {
  446. lg.Fatal("failed to unmarshal mvccpb.KeyValue", zap.Error(err))
  447. } else {
  448. plog.Fatalf("cannot unmarshal event: %v", err)
  449. }
  450. }
  451. rkv.kstr = string(rkv.kv.Key)
  452. if isTombstone(key) {
  453. delete(keyToLease, rkv.kstr)
  454. } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease {
  455. keyToLease[rkv.kstr] = lid
  456. } else {
  457. delete(keyToLease, rkv.kstr)
  458. }
  459. kvc <- rkv
  460. }
  461. }
  462. func (s *store) Close() error {
  463. close(s.stopc)
  464. s.fifoSched.Stop()
  465. return nil
  466. }
  467. func (s *store) saveIndex(tx backend.BatchTx) {
  468. if s.ig == nil {
  469. return
  470. }
  471. bs := s.bytesBuf8
  472. ci := s.ig.ConsistentIndex()
  473. binary.BigEndian.PutUint64(bs, ci)
  474. // put the index into the underlying backend
  475. // tx has been locked in TxnBegin, so there is no need to lock it again
  476. tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs)
  477. atomic.StoreUint64(&s.consistentIndex, ci)
  478. }
  479. func (s *store) ConsistentIndex() uint64 {
  480. if ci := atomic.LoadUint64(&s.consistentIndex); ci > 0 {
  481. return ci
  482. }
  483. tx := s.b.BatchTx()
  484. tx.Lock()
  485. defer tx.Unlock()
  486. _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0)
  487. if len(vs) == 0 {
  488. return 0
  489. }
  490. v := binary.BigEndian.Uint64(vs[0])
  491. atomic.StoreUint64(&s.consistentIndex, v)
  492. return v
  493. }
  494. // appendMarkTombstone appends tombstone mark to normal revision bytes.
  495. func appendMarkTombstone(lg *zap.Logger, b []byte) []byte {
  496. if len(b) != revBytesLen {
  497. if lg != nil {
  498. lg.Panic(
  499. "cannot append tombstone mark to non-normal revision bytes",
  500. zap.Int("expected-revision-bytes-size", revBytesLen),
  501. zap.Int("given-revision-bytes-size", len(b)),
  502. )
  503. } else {
  504. plog.Panicf("cannot append mark to non normal revision bytes")
  505. }
  506. }
  507. return append(b, markTombstone)
  508. }
  509. // isTombstone checks whether the revision bytes is a tombstone.
  510. func isTombstone(b []byte) bool {
  511. return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone
  512. }