kvstore.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. // Copyright 2015 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package storage
  15. import (
  16. "errors"
  17. "log"
  18. "math"
  19. "math/rand"
  20. "sync"
  21. "time"
  22. "github.com/coreos/etcd/lease"
  23. "github.com/coreos/etcd/storage/backend"
  24. "github.com/coreos/etcd/storage/storagepb"
  25. )
  26. var (
  27. keyBucketName = []byte("key")
  28. metaBucketName = []byte("meta")
  29. // markedRevBytesLen is the byte length of marked revision.
  30. // The first `revBytesLen` bytes represents a normal revision. The last
  31. // one byte is the mark.
  32. markedRevBytesLen = revBytesLen + 1
  33. markBytePosition = markedRevBytesLen - 1
  34. markTombstone byte = 't'
  35. NoLease = lease.LeaseID(0)
  36. scheduledCompactKeyName = []byte("scheduledCompactRev")
  37. finishedCompactKeyName = []byte("finishedCompactRev")
  38. ErrTxnIDMismatch = errors.New("storage: txn id mismatch")
  39. ErrCompacted = errors.New("storage: required revision has been compacted")
  40. ErrFutureRev = errors.New("storage: required revision is a future revision")
  41. ErrCanceled = errors.New("storage: watcher is canceled")
  42. )
  43. type store struct {
  44. mu sync.Mutex // guards the following
  45. b backend.Backend
  46. kvindex index
  47. currentRev revision
  48. // the main revision of the last compaction
  49. compactMainRev int64
  50. tx backend.BatchTx
  51. txnID int64 // tracks the current txnID to verify txn operations
  52. wg sync.WaitGroup
  53. stopc chan struct{}
  54. }
  55. // NewStore returns a new store. It is useful to create a store inside
  56. // storage pkg. It should only be used for testing externally.
  57. func NewStore(b backend.Backend) *store {
  58. s := &store{
  59. b: b,
  60. kvindex: newTreeIndex(),
  61. currentRev: revision{},
  62. compactMainRev: -1,
  63. stopc: make(chan struct{}),
  64. }
  65. tx := s.b.BatchTx()
  66. tx.Lock()
  67. tx.UnsafeCreateBucket(keyBucketName)
  68. tx.UnsafeCreateBucket(metaBucketName)
  69. tx.Unlock()
  70. s.b.ForceCommit()
  71. return s
  72. }
  73. func (s *store) Rev() int64 {
  74. s.mu.Lock()
  75. defer s.mu.Unlock()
  76. return s.currentRev.main
  77. }
  78. func (s *store) Put(key, value []byte, lease lease.LeaseID) int64 {
  79. id := s.TxnBegin()
  80. s.put(key, value, lease)
  81. s.txnEnd(id)
  82. putCounter.Inc()
  83. return int64(s.currentRev.main)
  84. }
  85. func (s *store) Range(key, end []byte, limit, rangeRev int64) (kvs []storagepb.KeyValue, rev int64, err error) {
  86. id := s.TxnBegin()
  87. kvs, rev, err = s.rangeKeys(key, end, limit, rangeRev)
  88. s.txnEnd(id)
  89. rangeCounter.Inc()
  90. return kvs, rev, err
  91. }
  92. func (s *store) DeleteRange(key, end []byte) (n, rev int64) {
  93. id := s.TxnBegin()
  94. n = s.deleteRange(key, end)
  95. s.txnEnd(id)
  96. deleteCounter.Inc()
  97. return n, int64(s.currentRev.main)
  98. }
  99. func (s *store) TxnBegin() int64 {
  100. s.mu.Lock()
  101. s.currentRev.sub = 0
  102. s.tx = s.b.BatchTx()
  103. s.tx.Lock()
  104. s.txnID = rand.Int63()
  105. return s.txnID
  106. }
  107. func (s *store) TxnEnd(txnID int64) error {
  108. err := s.txnEnd(txnID)
  109. if err != nil {
  110. return err
  111. }
  112. txnCounter.Inc()
  113. return nil
  114. }
  115. // txnEnd is used for unlocking an internal txn. It does
  116. // not increase the txnCounter.
  117. func (s *store) txnEnd(txnID int64) error {
  118. if txnID != s.txnID {
  119. return ErrTxnIDMismatch
  120. }
  121. s.tx.Unlock()
  122. if s.currentRev.sub != 0 {
  123. s.currentRev.main += 1
  124. }
  125. s.currentRev.sub = 0
  126. dbTotalSize.Set(float64(s.b.Size()))
  127. s.mu.Unlock()
  128. return nil
  129. }
  130. func (s *store) TxnRange(txnID int64, key, end []byte, limit, rangeRev int64) (kvs []storagepb.KeyValue, rev int64, err error) {
  131. if txnID != s.txnID {
  132. return nil, 0, ErrTxnIDMismatch
  133. }
  134. return s.rangeKeys(key, end, limit, rangeRev)
  135. }
  136. func (s *store) TxnPut(txnID int64, key, value []byte, lease lease.LeaseID) (rev int64, err error) {
  137. if txnID != s.txnID {
  138. return 0, ErrTxnIDMismatch
  139. }
  140. s.put(key, value, lease)
  141. return int64(s.currentRev.main + 1), nil
  142. }
  143. func (s *store) TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) {
  144. if txnID != s.txnID {
  145. return 0, 0, ErrTxnIDMismatch
  146. }
  147. n = s.deleteRange(key, end)
  148. if n != 0 || s.currentRev.sub != 0 {
  149. rev = int64(s.currentRev.main + 1)
  150. } else {
  151. rev = int64(s.currentRev.main)
  152. }
  153. return n, rev, nil
  154. }
  155. func (s *store) Compact(rev int64) error {
  156. s.mu.Lock()
  157. defer s.mu.Unlock()
  158. if rev <= s.compactMainRev {
  159. return ErrCompacted
  160. }
  161. if rev > s.currentRev.main {
  162. return ErrFutureRev
  163. }
  164. start := time.Now()
  165. s.compactMainRev = rev
  166. rbytes := newRevBytes()
  167. revToBytes(revision{main: rev}, rbytes)
  168. tx := s.b.BatchTx()
  169. tx.Lock()
  170. tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes)
  171. tx.Unlock()
  172. // ensure that desired compaction is persisted
  173. s.b.ForceCommit()
  174. keep := s.kvindex.Compact(rev)
  175. s.wg.Add(1)
  176. go s.scheduleCompaction(rev, keep)
  177. indexCompactionPauseDurations.Observe(float64(time.Now().Sub(start) / time.Millisecond))
  178. return nil
  179. }
  180. func (s *store) Hash() (uint32, error) {
  181. s.b.ForceCommit()
  182. return s.b.Hash()
  183. }
  184. func (s *store) Snapshot() Snapshot {
  185. s.b.ForceCommit()
  186. return s.b.Snapshot()
  187. }
  188. func (s *store) Commit() { s.b.ForceCommit() }
  189. func (s *store) Restore() error {
  190. s.mu.Lock()
  191. defer s.mu.Unlock()
  192. min, max := newRevBytes(), newRevBytes()
  193. revToBytes(revision{}, min)
  194. revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max)
  195. // restore index
  196. tx := s.b.BatchTx()
  197. tx.Lock()
  198. _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0)
  199. if len(finishedCompactBytes) != 0 {
  200. s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main
  201. log.Printf("storage: restore compact to %d", s.compactMainRev)
  202. }
  203. // TODO: limit N to reduce max memory usage
  204. keys, vals := tx.UnsafeRange(keyBucketName, min, max, 0)
  205. for i, key := range keys {
  206. var kv storagepb.KeyValue
  207. if err := kv.Unmarshal(vals[i]); err != nil {
  208. log.Fatalf("storage: cannot unmarshal event: %v", err)
  209. }
  210. rev := bytesToRev(key[:revBytesLen])
  211. // restore index
  212. switch {
  213. case isTombstone(key):
  214. s.kvindex.Tombstone(kv.Key, rev)
  215. default:
  216. s.kvindex.Restore(kv.Key, revision{kv.CreateRevision, 0}, rev, kv.Version)
  217. }
  218. // update revision
  219. s.currentRev = rev
  220. }
  221. _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0)
  222. if len(scheduledCompactBytes) != 0 {
  223. scheduledCompact := bytesToRev(scheduledCompactBytes[0]).main
  224. if scheduledCompact > s.compactMainRev {
  225. log.Printf("storage: resume scheduled compaction at %d", scheduledCompact)
  226. go s.Compact(scheduledCompact)
  227. }
  228. }
  229. tx.Unlock()
  230. return nil
  231. }
  232. func (s *store) Close() error {
  233. close(s.stopc)
  234. s.wg.Wait()
  235. return nil
  236. }
  237. func (a *store) Equal(b *store) bool {
  238. if a.currentRev != b.currentRev {
  239. return false
  240. }
  241. if a.compactMainRev != b.compactMainRev {
  242. return false
  243. }
  244. return a.kvindex.Equal(b.kvindex)
  245. }
  246. // range is a keyword in Go, add Keys suffix.
  247. func (s *store) rangeKeys(key, end []byte, limit, rangeRev int64) (kvs []storagepb.KeyValue, rev int64, err error) {
  248. curRev := int64(s.currentRev.main)
  249. if s.currentRev.sub > 0 {
  250. curRev += 1
  251. }
  252. if rangeRev > curRev {
  253. return nil, s.currentRev.main, ErrFutureRev
  254. }
  255. if rangeRev <= 0 {
  256. rev = curRev
  257. } else {
  258. rev = rangeRev
  259. }
  260. if rev <= s.compactMainRev {
  261. return nil, 0, ErrCompacted
  262. }
  263. _, revpairs := s.kvindex.Range(key, end, int64(rev))
  264. if len(revpairs) == 0 {
  265. return nil, rev, nil
  266. }
  267. for _, revpair := range revpairs {
  268. start, end := revBytesRange(revpair)
  269. _, vs := s.tx.UnsafeRange(keyBucketName, start, end, 0)
  270. if len(vs) != 1 {
  271. log.Fatalf("storage: range cannot find rev (%d,%d)", revpair.main, revpair.sub)
  272. }
  273. var kv storagepb.KeyValue
  274. if err := kv.Unmarshal(vs[0]); err != nil {
  275. log.Fatalf("storage: cannot unmarshal event: %v", err)
  276. }
  277. kvs = append(kvs, kv)
  278. if limit > 0 && len(kvs) >= int(limit) {
  279. break
  280. }
  281. }
  282. return kvs, rev, nil
  283. }
  284. func (s *store) put(key, value []byte, lease lease.LeaseID) {
  285. rev := s.currentRev.main + 1
  286. c := rev
  287. // if the key exists before, use its previous created
  288. _, created, ver, err := s.kvindex.Get(key, rev)
  289. if err == nil {
  290. c = created.main
  291. }
  292. ibytes := newRevBytes()
  293. revToBytes(revision{main: rev, sub: s.currentRev.sub}, ibytes)
  294. ver = ver + 1
  295. kv := storagepb.KeyValue{
  296. Key: key,
  297. Value: value,
  298. CreateRevision: c,
  299. ModRevision: rev,
  300. Version: ver,
  301. Lease: int64(lease),
  302. }
  303. d, err := kv.Marshal()
  304. if err != nil {
  305. log.Fatalf("storage: cannot marshal event: %v", err)
  306. }
  307. s.tx.UnsafePut(keyBucketName, ibytes, d)
  308. s.kvindex.Put(key, revision{main: rev, sub: s.currentRev.sub})
  309. s.currentRev.sub += 1
  310. }
  311. func (s *store) deleteRange(key, end []byte) int64 {
  312. rrev := s.currentRev.main
  313. if s.currentRev.sub > 0 {
  314. rrev += 1
  315. }
  316. keys, _ := s.kvindex.Range(key, end, rrev)
  317. if len(keys) == 0 {
  318. return 0
  319. }
  320. for _, key := range keys {
  321. s.delete(key)
  322. }
  323. return int64(len(keys))
  324. }
  325. func (s *store) delete(key []byte) {
  326. mainrev := s.currentRev.main + 1
  327. ibytes := newRevBytes()
  328. revToBytes(revision{main: mainrev, sub: s.currentRev.sub}, ibytes)
  329. ibytes = appendMarkTombstone(ibytes)
  330. kv := storagepb.KeyValue{
  331. Key: key,
  332. }
  333. d, err := kv.Marshal()
  334. if err != nil {
  335. log.Fatalf("storage: cannot marshal event: %v", err)
  336. }
  337. s.tx.UnsafePut(keyBucketName, ibytes, d)
  338. err = s.kvindex.Tombstone(key, revision{main: mainrev, sub: s.currentRev.sub})
  339. if err != nil {
  340. log.Fatalf("storage: cannot tombstone an existing key (%s): %v", string(key), err)
  341. }
  342. s.currentRev.sub += 1
  343. }
  344. // appendMarkTombstone appends tombstone mark to normal revision bytes.
  345. func appendMarkTombstone(b []byte) []byte {
  346. if len(b) != revBytesLen {
  347. log.Panicf("cannot append mark to non normal revision bytes")
  348. }
  349. return append(b, markTombstone)
  350. }
  351. // isTombstone checks whether the revision bytes is a tombstone.
  352. func isTombstone(b []byte) bool {
  353. return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone
  354. }
  355. // revBytesRange returns the range of revision bytes at
  356. // the given revision.
  357. func revBytesRange(rev revision) (start, end []byte) {
  358. start = newRevBytes()
  359. revToBytes(rev, start)
  360. end = newRevBytes()
  361. endRev := revision{main: rev.main, sub: rev.sub + 1}
  362. revToBytes(endRev, end)
  363. return start, end
  364. }