kvstore.go 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. package storage
  2. import (
  3. "errors"
  4. "io"
  5. "log"
  6. "math"
  7. "math/rand"
  8. "sync"
  9. "time"
  10. "github.com/coreos/etcd/storage/backend"
  11. "github.com/coreos/etcd/storage/storagepb"
  12. )
  13. var (
  14. batchLimit = 10000
  15. batchInterval = 100 * time.Millisecond
  16. keyBucketName = []byte("key")
  17. metaBucketName = []byte("meta")
  18. scheduledCompactKeyName = []byte("scheduledCompactRev")
  19. finishedCompactKeyName = []byte("finishedCompactRev")
  20. ErrTnxIDMismatch = errors.New("storage: tnx id mismatch")
  21. ErrCompacted = errors.New("storage: required reversion has been compacted")
  22. ErrFutureRev = errors.New("storage: required reversion is a future reversion")
  23. )
  24. type store struct {
  25. mu sync.RWMutex
  26. b backend.Backend
  27. kvindex index
  28. currentRev reversion
  29. // the main reversion of the last compaction
  30. compactMainRev int64
  31. tmu sync.Mutex // protect the tnxID field
  32. tnxID int64 // tracks the current tnxID to verify tnx operations
  33. wg sync.WaitGroup
  34. stopc chan struct{}
  35. }
  36. func New(path string) KV {
  37. return newStore(path)
  38. }
  39. func newStore(path string) *store {
  40. s := &store{
  41. b: backend.New(path, batchInterval, batchLimit),
  42. kvindex: newTreeIndex(),
  43. currentRev: reversion{},
  44. compactMainRev: -1,
  45. stopc: make(chan struct{}),
  46. }
  47. tx := s.b.BatchTx()
  48. tx.Lock()
  49. tx.UnsafeCreateBucket(keyBucketName)
  50. tx.UnsafeCreateBucket(metaBucketName)
  51. tx.Unlock()
  52. s.b.ForceCommit()
  53. return s
  54. }
  55. func (s *store) Put(key, value []byte) int64 {
  56. id := s.TnxBegin()
  57. s.put(key, value, s.currentRev.main+1)
  58. s.TnxEnd(id)
  59. return int64(s.currentRev.main)
  60. }
  61. func (s *store) Range(key, end []byte, limit, rangeRev int64) (kvs []storagepb.KeyValue, rev int64, err error) {
  62. id := s.TnxBegin()
  63. kvs, rev, err = s.rangeKeys(key, end, limit, rangeRev)
  64. s.TnxEnd(id)
  65. return kvs, rev, err
  66. }
  67. func (s *store) DeleteRange(key, end []byte) (n, rev int64) {
  68. id := s.TnxBegin()
  69. n = s.deleteRange(key, end, s.currentRev.main+1)
  70. s.TnxEnd(id)
  71. return n, int64(s.currentRev.main)
  72. }
  73. func (s *store) TnxBegin() int64 {
  74. s.mu.Lock()
  75. s.currentRev.sub = 0
  76. s.tmu.Lock()
  77. defer s.tmu.Unlock()
  78. s.tnxID = rand.Int63()
  79. return s.tnxID
  80. }
  81. func (s *store) TnxEnd(tnxID int64) error {
  82. s.tmu.Lock()
  83. defer s.tmu.Unlock()
  84. if tnxID != s.tnxID {
  85. return ErrTnxIDMismatch
  86. }
  87. if s.currentRev.sub != 0 {
  88. s.currentRev.main += 1
  89. }
  90. s.currentRev.sub = 0
  91. s.mu.Unlock()
  92. return nil
  93. }
  94. func (s *store) TnxRange(tnxID int64, key, end []byte, limit, rangeRev int64) (kvs []storagepb.KeyValue, rev int64, err error) {
  95. s.tmu.Lock()
  96. defer s.tmu.Unlock()
  97. if tnxID != s.tnxID {
  98. return nil, 0, ErrTnxIDMismatch
  99. }
  100. return s.rangeKeys(key, end, limit, rangeRev)
  101. }
  102. func (s *store) TnxPut(tnxID int64, key, value []byte) (rev int64, err error) {
  103. s.tmu.Lock()
  104. defer s.tmu.Unlock()
  105. if tnxID != s.tnxID {
  106. return 0, ErrTnxIDMismatch
  107. }
  108. s.put(key, value, s.currentRev.main+1)
  109. return int64(s.currentRev.main + 1), nil
  110. }
  111. func (s *store) TnxDeleteRange(tnxID int64, key, end []byte) (n, rev int64, err error) {
  112. s.tmu.Lock()
  113. defer s.tmu.Unlock()
  114. if tnxID != s.tnxID {
  115. return 0, 0, ErrTnxIDMismatch
  116. }
  117. n = s.deleteRange(key, end, s.currentRev.main+1)
  118. if n != 0 || s.currentRev.sub != 0 {
  119. rev = int64(s.currentRev.main + 1)
  120. }
  121. return n, rev, nil
  122. }
  123. func (s *store) Compact(rev int64) error {
  124. s.mu.Lock()
  125. defer s.mu.Unlock()
  126. if rev <= s.compactMainRev {
  127. return ErrCompacted
  128. }
  129. s.compactMainRev = rev
  130. rbytes := newRevBytes()
  131. revToBytes(reversion{main: rev}, rbytes)
  132. tx := s.b.BatchTx()
  133. tx.Lock()
  134. tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes)
  135. tx.Unlock()
  136. keep := s.kvindex.Compact(rev)
  137. s.wg.Add(1)
  138. go s.scheduleCompaction(rev, keep)
  139. return nil
  140. }
  141. func (s *store) Snapshot(w io.Writer) (int64, error) {
  142. s.b.ForceCommit()
  143. return s.b.Snapshot(w)
  144. }
  145. func (s *store) Restore() error {
  146. s.mu.Lock()
  147. defer s.mu.Unlock()
  148. min, max := newRevBytes(), newRevBytes()
  149. revToBytes(reversion{}, min)
  150. revToBytes(reversion{main: math.MaxInt64, sub: math.MaxInt64}, max)
  151. // restore index
  152. tx := s.b.BatchTx()
  153. tx.Lock()
  154. _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0)
  155. if len(finishedCompactBytes) != 0 {
  156. s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main
  157. log.Printf("storage: restore compact to %d", s.compactMainRev)
  158. }
  159. // TODO: limit N to reduce max memory usage
  160. keys, vals := tx.UnsafeRange(keyBucketName, min, max, 0)
  161. for i, key := range keys {
  162. e := &storagepb.Event{}
  163. if err := e.Unmarshal(vals[i]); err != nil {
  164. log.Fatalf("storage: cannot unmarshal event: %v", err)
  165. }
  166. rev := bytesToRev(key)
  167. // restore index
  168. switch e.Type {
  169. case storagepb.PUT:
  170. s.kvindex.Restore(e.Kv.Key, reversion{e.Kv.CreateIndex, 0}, rev, e.Kv.Version)
  171. case storagepb.DELETE:
  172. s.kvindex.Tombstone(e.Kv.Key, rev)
  173. default:
  174. log.Panicf("storage: unexpected event type %s", e.Type)
  175. }
  176. // update reversion
  177. s.currentRev = rev
  178. }
  179. _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0)
  180. if len(scheduledCompactBytes) != 0 {
  181. scheduledCompact := bytesToRev(scheduledCompactBytes[0]).main
  182. if scheduledCompact > s.compactMainRev {
  183. log.Printf("storage: resume scheduled compaction at %d", scheduledCompact)
  184. go s.Compact(scheduledCompact)
  185. }
  186. }
  187. tx.Unlock()
  188. return nil
  189. }
  190. func (s *store) Close() error {
  191. close(s.stopc)
  192. s.wg.Wait()
  193. return s.b.Close()
  194. }
  195. func (a *store) Equal(b *store) bool {
  196. if a.currentRev != b.currentRev {
  197. return false
  198. }
  199. if a.compactMainRev != b.compactMainRev {
  200. return false
  201. }
  202. return a.kvindex.Equal(b.kvindex)
  203. }
  204. // range is a keyword in Go, add Keys suffix.
  205. func (s *store) rangeKeys(key, end []byte, limit, rangeRev int64) (kvs []storagepb.KeyValue, rev int64, err error) {
  206. if rangeRev > s.currentRev.main {
  207. return nil, s.currentRev.main, ErrFutureRev
  208. }
  209. if rangeRev <= 0 {
  210. rev = int64(s.currentRev.main)
  211. if s.currentRev.sub > 0 {
  212. rev += 1
  213. }
  214. } else {
  215. rev = rangeRev
  216. }
  217. if rev <= s.compactMainRev {
  218. return nil, 0, ErrCompacted
  219. }
  220. _, revpairs := s.kvindex.Range(key, end, int64(rev))
  221. if len(revpairs) == 0 {
  222. return nil, rev, nil
  223. }
  224. tx := s.b.BatchTx()
  225. tx.Lock()
  226. defer tx.Unlock()
  227. for _, revpair := range revpairs {
  228. revbytes := newRevBytes()
  229. revToBytes(revpair, revbytes)
  230. _, vs := tx.UnsafeRange(keyBucketName, revbytes, nil, 0)
  231. if len(vs) != 1 {
  232. log.Fatalf("storage: range cannot find rev (%d,%d)", revpair.main, revpair.sub)
  233. }
  234. e := &storagepb.Event{}
  235. if err := e.Unmarshal(vs[0]); err != nil {
  236. log.Fatalf("storage: cannot unmarshal event: %v", err)
  237. }
  238. if e.Type == storagepb.PUT {
  239. kvs = append(kvs, *e.Kv)
  240. }
  241. if limit > 0 && len(kvs) >= int(limit) {
  242. break
  243. }
  244. }
  245. return kvs, rev, nil
  246. }
  247. func (s *store) put(key, value []byte, rev int64) {
  248. c := rev
  249. // if the key exists before, use its previous created
  250. _, created, ver, err := s.kvindex.Get(key, rev)
  251. if err == nil {
  252. c = created.main
  253. }
  254. ibytes := newRevBytes()
  255. revToBytes(reversion{main: rev, sub: s.currentRev.sub}, ibytes)
  256. ver = ver + 1
  257. event := storagepb.Event{
  258. Type: storagepb.PUT,
  259. Kv: &storagepb.KeyValue{
  260. Key: key,
  261. Value: value,
  262. CreateIndex: c,
  263. ModIndex: rev,
  264. Version: ver,
  265. },
  266. }
  267. d, err := event.Marshal()
  268. if err != nil {
  269. log.Fatalf("storage: cannot marshal event: %v", err)
  270. }
  271. tx := s.b.BatchTx()
  272. tx.Lock()
  273. defer tx.Unlock()
  274. tx.UnsafePut(keyBucketName, ibytes, d)
  275. s.kvindex.Put(key, reversion{main: rev, sub: s.currentRev.sub})
  276. s.currentRev.sub += 1
  277. }
  278. func (s *store) deleteRange(key, end []byte, rev int64) int64 {
  279. var n int64
  280. rrev := rev
  281. if s.currentRev.sub > 0 {
  282. rrev += 1
  283. }
  284. keys, _ := s.kvindex.Range(key, end, rrev)
  285. if len(keys) == 0 {
  286. return 0
  287. }
  288. for _, key := range keys {
  289. ok := s.delete(key, rev)
  290. if ok {
  291. n++
  292. }
  293. }
  294. return n
  295. }
  296. func (s *store) delete(key []byte, mainrev int64) bool {
  297. grev := mainrev
  298. if s.currentRev.sub > 0 {
  299. grev += 1
  300. }
  301. rev, _, _, err := s.kvindex.Get(key, grev)
  302. if err != nil {
  303. // key not exist
  304. return false
  305. }
  306. tx := s.b.BatchTx()
  307. tx.Lock()
  308. defer tx.Unlock()
  309. revbytes := newRevBytes()
  310. revToBytes(rev, revbytes)
  311. _, vs := tx.UnsafeRange(keyBucketName, revbytes, nil, 0)
  312. if len(vs) != 1 {
  313. log.Fatalf("storage: delete cannot find rev (%d,%d)", rev.main, rev.sub)
  314. }
  315. e := &storagepb.Event{}
  316. if err := e.Unmarshal(vs[0]); err != nil {
  317. log.Fatalf("storage: cannot unmarshal event: %v", err)
  318. }
  319. if e.Type == storagepb.DELETE {
  320. return false
  321. }
  322. ibytes := newRevBytes()
  323. revToBytes(reversion{main: mainrev, sub: s.currentRev.sub}, ibytes)
  324. event := storagepb.Event{
  325. Type: storagepb.DELETE,
  326. Kv: &storagepb.KeyValue{
  327. Key: key,
  328. },
  329. }
  330. d, err := event.Marshal()
  331. if err != nil {
  332. log.Fatalf("storage: cannot marshal event: %v", err)
  333. }
  334. tx.UnsafePut(keyBucketName, ibytes, d)
  335. err = s.kvindex.Tombstone(key, reversion{main: mainrev, sub: s.currentRev.sub})
  336. if err != nil {
  337. log.Fatalf("storage: cannot tombstone an existing key (%s): %v", string(key), err)
  338. }
  339. s.currentRev.sub += 1
  340. return true
  341. }