backend_test.go 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package backend
  15. import (
  16. "fmt"
  17. "io/ioutil"
  18. "os"
  19. "reflect"
  20. "testing"
  21. "time"
  22. bolt "go.etcd.io/bbolt"
  23. )
  24. func TestBackendClose(t *testing.T) {
  25. b, tmpPath := NewTmpBackend(time.Hour, 10000)
  26. defer os.Remove(tmpPath)
  27. // check close could work
  28. done := make(chan struct{})
  29. go func() {
  30. err := b.Close()
  31. if err != nil {
  32. t.Errorf("close error = %v, want nil", err)
  33. }
  34. done <- struct{}{}
  35. }()
  36. select {
  37. case <-done:
  38. case <-time.After(10 * time.Second):
  39. t.Errorf("failed to close database in 10s")
  40. }
  41. }
  42. func TestBackendSnapshot(t *testing.T) {
  43. b, tmpPath := NewTmpBackend(time.Hour, 10000)
  44. defer cleanup(b, tmpPath)
  45. tx := b.BatchTx()
  46. tx.Lock()
  47. tx.UnsafeCreateBucket([]byte("test"))
  48. tx.UnsafePut([]byte("test"), []byte("foo"), []byte("bar"))
  49. tx.Unlock()
  50. b.ForceCommit()
  51. // write snapshot to a new file
  52. f, err := ioutil.TempFile(os.TempDir(), "etcd_backend_test")
  53. if err != nil {
  54. t.Fatal(err)
  55. }
  56. snap := b.Snapshot()
  57. defer snap.Close()
  58. if _, err := snap.WriteTo(f); err != nil {
  59. t.Fatal(err)
  60. }
  61. f.Close()
  62. // bootstrap new backend from the snapshot
  63. bcfg := DefaultBackendConfig()
  64. bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = f.Name(), time.Hour, 10000
  65. nb := New(bcfg)
  66. defer cleanup(nb, f.Name())
  67. newTx := nb.BatchTx()
  68. newTx.Lock()
  69. ks, _ := newTx.UnsafeRange([]byte("test"), []byte("foo"), []byte("goo"), 0)
  70. if len(ks) != 1 {
  71. t.Errorf("len(kvs) = %d, want 1", len(ks))
  72. }
  73. newTx.Unlock()
  74. }
  75. func TestBackendBatchIntervalCommit(t *testing.T) {
  76. // start backend with super short batch interval so
  77. // we do not need to wait long before commit to happen.
  78. b, tmpPath := NewTmpBackend(time.Nanosecond, 10000)
  79. defer cleanup(b, tmpPath)
  80. pc := b.Commits()
  81. tx := b.BatchTx()
  82. tx.Lock()
  83. tx.UnsafeCreateBucket([]byte("test"))
  84. tx.UnsafePut([]byte("test"), []byte("foo"), []byte("bar"))
  85. tx.Unlock()
  86. for i := 0; i < 10; i++ {
  87. if b.Commits() >= pc+1 {
  88. break
  89. }
  90. time.Sleep(time.Duration(i*100) * time.Millisecond)
  91. }
  92. // check whether put happens via db view
  93. b.db.View(func(tx *bolt.Tx) error {
  94. bucket := tx.Bucket([]byte("test"))
  95. if bucket == nil {
  96. t.Errorf("bucket test does not exit")
  97. return nil
  98. }
  99. v := bucket.Get([]byte("foo"))
  100. if v == nil {
  101. t.Errorf("foo key failed to written in backend")
  102. }
  103. return nil
  104. })
  105. }
  106. func TestBackendDefrag(t *testing.T) {
  107. b, tmpPath := NewDefaultTmpBackend()
  108. defer cleanup(b, tmpPath)
  109. tx := b.BatchTx()
  110. tx.Lock()
  111. tx.UnsafeCreateBucket([]byte("test"))
  112. for i := 0; i < defragLimit+100; i++ {
  113. tx.UnsafePut([]byte("test"), []byte(fmt.Sprintf("foo_%d", i)), []byte("bar"))
  114. }
  115. tx.Unlock()
  116. b.ForceCommit()
  117. // remove some keys to ensure the disk space will be reclaimed after defrag
  118. tx = b.BatchTx()
  119. tx.Lock()
  120. for i := 0; i < 50; i++ {
  121. tx.UnsafeDelete([]byte("test"), []byte(fmt.Sprintf("foo_%d", i)))
  122. }
  123. tx.Unlock()
  124. b.ForceCommit()
  125. size := b.Size()
  126. // shrink and check hash
  127. oh, err := b.Hash(nil)
  128. if err != nil {
  129. t.Fatal(err)
  130. }
  131. err = b.Defrag()
  132. if err != nil {
  133. t.Fatal(err)
  134. }
  135. nh, err := b.Hash(nil)
  136. if err != nil {
  137. t.Fatal(err)
  138. }
  139. if oh != nh {
  140. t.Errorf("hash = %v, want %v", nh, oh)
  141. }
  142. nsize := b.Size()
  143. if nsize >= size {
  144. t.Errorf("new size = %v, want < %d", nsize, size)
  145. }
  146. // try put more keys after shrink.
  147. tx = b.BatchTx()
  148. tx.Lock()
  149. tx.UnsafeCreateBucket([]byte("test"))
  150. tx.UnsafePut([]byte("test"), []byte("more"), []byte("bar"))
  151. tx.Unlock()
  152. b.ForceCommit()
  153. }
  154. // TestBackendWriteback ensures writes are stored to the read txn on write txn unlock.
  155. func TestBackendWriteback(t *testing.T) {
  156. b, tmpPath := NewDefaultTmpBackend()
  157. defer cleanup(b, tmpPath)
  158. tx := b.BatchTx()
  159. tx.Lock()
  160. tx.UnsafeCreateBucket([]byte("key"))
  161. tx.UnsafePut([]byte("key"), []byte("abc"), []byte("bar"))
  162. tx.UnsafePut([]byte("key"), []byte("def"), []byte("baz"))
  163. tx.UnsafePut([]byte("key"), []byte("overwrite"), []byte("1"))
  164. tx.Unlock()
  165. // overwrites should be propagated too
  166. tx.Lock()
  167. tx.UnsafePut([]byte("key"), []byte("overwrite"), []byte("2"))
  168. tx.Unlock()
  169. keys := []struct {
  170. key []byte
  171. end []byte
  172. limit int64
  173. wkey [][]byte
  174. wval [][]byte
  175. }{
  176. {
  177. key: []byte("abc"),
  178. end: nil,
  179. wkey: [][]byte{[]byte("abc")},
  180. wval: [][]byte{[]byte("bar")},
  181. },
  182. {
  183. key: []byte("abc"),
  184. end: []byte("def"),
  185. wkey: [][]byte{[]byte("abc")},
  186. wval: [][]byte{[]byte("bar")},
  187. },
  188. {
  189. key: []byte("abc"),
  190. end: []byte("deg"),
  191. wkey: [][]byte{[]byte("abc"), []byte("def")},
  192. wval: [][]byte{[]byte("bar"), []byte("baz")},
  193. },
  194. {
  195. key: []byte("abc"),
  196. end: []byte("\xff"),
  197. limit: 1,
  198. wkey: [][]byte{[]byte("abc")},
  199. wval: [][]byte{[]byte("bar")},
  200. },
  201. {
  202. key: []byte("abc"),
  203. end: []byte("\xff"),
  204. wkey: [][]byte{[]byte("abc"), []byte("def"), []byte("overwrite")},
  205. wval: [][]byte{[]byte("bar"), []byte("baz"), []byte("2")},
  206. },
  207. }
  208. rtx := b.ReadTx()
  209. for i, tt := range keys {
  210. rtx.RLock()
  211. k, v := rtx.UnsafeRange([]byte("key"), tt.key, tt.end, tt.limit)
  212. rtx.RUnlock()
  213. if !reflect.DeepEqual(tt.wkey, k) || !reflect.DeepEqual(tt.wval, v) {
  214. t.Errorf("#%d: want k=%+v, v=%+v; got k=%+v, v=%+v", i, tt.wkey, tt.wval, k, v)
  215. }
  216. }
  217. }
  218. // TestConcurrentReadTx ensures that current read transaction can see all prior writes stored in read buffer
  219. func TestConcurrentReadTx(t *testing.T) {
  220. b, tmpPath := NewTmpBackend(time.Hour, 10000)
  221. defer cleanup(b, tmpPath)
  222. wtx1 := b.BatchTx()
  223. wtx1.Lock()
  224. wtx1.UnsafeCreateBucket([]byte("key"))
  225. wtx1.UnsafePut([]byte("key"), []byte("abc"), []byte("ABC"))
  226. wtx1.UnsafePut([]byte("key"), []byte("overwrite"), []byte("1"))
  227. wtx1.Unlock()
  228. wtx2 := b.BatchTx()
  229. wtx2.Lock()
  230. wtx2.UnsafePut([]byte("key"), []byte("def"), []byte("DEF"))
  231. wtx2.UnsafePut([]byte("key"), []byte("overwrite"), []byte("2"))
  232. wtx2.Unlock()
  233. rtx := b.ConcurrentReadTx()
  234. rtx.RLock() // no-op
  235. k, v := rtx.UnsafeRange([]byte("key"), []byte("abc"), []byte("\xff"), 0)
  236. rtx.RUnlock()
  237. wKey := [][]byte{[]byte("abc"), []byte("def"), []byte("overwrite")}
  238. wVal := [][]byte{[]byte("ABC"), []byte("DEF"), []byte("2")}
  239. if !reflect.DeepEqual(wKey, k) || !reflect.DeepEqual(wVal, v) {
  240. t.Errorf("want k=%+v, v=%+v; got k=%+v, v=%+v", wKey, wVal, k, v)
  241. }
  242. }
  243. // TestBackendWritebackForEach checks that partially written / buffered
  244. // data is visited in the same order as fully committed data.
  245. func TestBackendWritebackForEach(t *testing.T) {
  246. b, tmpPath := NewTmpBackend(time.Hour, 10000)
  247. defer cleanup(b, tmpPath)
  248. tx := b.BatchTx()
  249. tx.Lock()
  250. tx.UnsafeCreateBucket([]byte("key"))
  251. for i := 0; i < 5; i++ {
  252. k := []byte(fmt.Sprintf("%04d", i))
  253. tx.UnsafePut([]byte("key"), k, []byte("bar"))
  254. }
  255. tx.Unlock()
  256. // writeback
  257. b.ForceCommit()
  258. tx.Lock()
  259. tx.UnsafeCreateBucket([]byte("key"))
  260. for i := 5; i < 20; i++ {
  261. k := []byte(fmt.Sprintf("%04d", i))
  262. tx.UnsafePut([]byte("key"), k, []byte("bar"))
  263. }
  264. tx.Unlock()
  265. seq := ""
  266. getSeq := func(k, v []byte) error {
  267. seq += string(k)
  268. return nil
  269. }
  270. rtx := b.ReadTx()
  271. rtx.RLock()
  272. rtx.UnsafeForEach([]byte("key"), getSeq)
  273. rtx.RUnlock()
  274. partialSeq := seq
  275. seq = ""
  276. b.ForceCommit()
  277. tx.Lock()
  278. tx.UnsafeForEach([]byte("key"), getSeq)
  279. tx.Unlock()
  280. if seq != partialSeq {
  281. t.Fatalf("expected %q, got %q", seq, partialSeq)
  282. }
  283. }
  284. func cleanup(b Backend, path string) {
  285. b.Close()
  286. os.Remove(path)
  287. }