v3_snapshot.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. // Copyright 2018 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package snapshot
  15. import (
  16. "context"
  17. "crypto/sha256"
  18. "encoding/json"
  19. "fmt"
  20. "hash/crc32"
  21. "io"
  22. "math"
  23. "os"
  24. "path/filepath"
  25. "reflect"
  26. "github.com/coreos/etcd/clientv3"
  27. "github.com/coreos/etcd/etcdserver"
  28. "github.com/coreos/etcd/etcdserver/etcdserverpb"
  29. "github.com/coreos/etcd/etcdserver/membership"
  30. "github.com/coreos/etcd/internal/lease"
  31. "github.com/coreos/etcd/internal/mvcc"
  32. "github.com/coreos/etcd/internal/mvcc/backend"
  33. "github.com/coreos/etcd/internal/raftsnap"
  34. "github.com/coreos/etcd/internal/store"
  35. "github.com/coreos/etcd/pkg/fileutil"
  36. "github.com/coreos/etcd/pkg/logger"
  37. "github.com/coreos/etcd/pkg/types"
  38. "github.com/coreos/etcd/raft"
  39. "github.com/coreos/etcd/raft/raftpb"
  40. "github.com/coreos/etcd/wal"
  41. "github.com/coreos/etcd/wal/walpb"
  42. bolt "github.com/coreos/bbolt"
  43. )
  44. // Manager defines snapshot methods.
  45. type Manager interface {
  46. // Save fetches snapshot from remote etcd server and saves data to target path.
  47. // If the context "ctx" is canceled or timed out, snapshot save stream will error out
  48. // (e.g. context.Canceled, context.DeadlineExceeded).
  49. Save(ctx context.Context, dbPath string) error
  50. // Status returns the snapshot file information.
  51. Status(dbPath string) (Status, error)
  52. // Restore restores a new etcd data directory from given snapshot file.
  53. // It returns an error if specified data directory already exists, to
  54. // prevent unintended data directory overwrites.
  55. Restore(dbPath string, cfg RestoreConfig) error
  56. }
  57. // Status is the snapshot file status.
  58. type Status struct {
  59. Hash uint32 `json:"hash"`
  60. Revision int64 `json:"revision"`
  61. TotalKey int `json:"totalKey"`
  62. TotalSize int64 `json:"totalSize"`
  63. }
  64. // RestoreConfig configures snapshot restore operation.
  65. type RestoreConfig struct {
  66. // Name is the human-readable name of this member.
  67. Name string
  68. // OutputDataDir is the target data directory to save restored data.
  69. // OutputDataDir should not conflict with existing etcd data directory.
  70. // If OutputDataDir already exists, it will return an error to prevent
  71. // unintended data directory overwrites.
  72. // Defaults to "[Name].etcd" if not given.
  73. OutputDataDir string
  74. // OutputWALDir is the target WAL data directory.
  75. // Defaults to "[OutputDataDir]/member/wal" if not given.
  76. OutputWALDir string
  77. // InitialCluster is the initial cluster configuration for restore bootstrap.
  78. InitialCluster types.URLsMap
  79. // InitialClusterToken is the initial cluster token for etcd cluster during restore bootstrap.
  80. InitialClusterToken string
  81. // PeerURLs is a list of member's peer URLs to advertise to the rest of the cluster.
  82. PeerURLs types.URLs
  83. // SkipHashCheck is "true" to ignore snapshot integrity hash value
  84. // (required if copied from data directory).
  85. SkipHashCheck bool
  86. }
  87. // NewV3 returns a new snapshot Manager for v3.x snapshot.
  88. // "*clientv3.Client" is only used for "Save" method.
  89. // Otherwise, pass "nil".
  90. func NewV3(cli *clientv3.Client, lg logger.Logger) Manager {
  91. if lg == nil {
  92. lg = logger.NewDiscardLogger()
  93. }
  94. return &v3Manager{cli: cli, logger: lg}
  95. }
  96. type v3Manager struct {
  97. cli *clientv3.Client
  98. name string
  99. dbPath string
  100. walDir string
  101. snapDir string
  102. cl *membership.RaftCluster
  103. skipHashCheck bool
  104. logger logger.Logger
  105. }
  106. func (s *v3Manager) Save(ctx context.Context, dbPath string) error {
  107. partpath := dbPath + ".part"
  108. f, err := os.Create(partpath)
  109. if err != nil {
  110. os.RemoveAll(partpath)
  111. return fmt.Errorf("could not open %s (%v)", partpath, err)
  112. }
  113. s.logger.Infof("created temporary db file %q", partpath)
  114. var rd io.ReadCloser
  115. rd, err = s.cli.Snapshot(ctx)
  116. if err != nil {
  117. os.RemoveAll(partpath)
  118. return err
  119. }
  120. s.logger.Infof("copying from snapshot stream")
  121. if _, err = io.Copy(f, rd); err != nil {
  122. os.RemoveAll(partpath)
  123. return err
  124. }
  125. if err = fileutil.Fsync(f); err != nil {
  126. os.RemoveAll(partpath)
  127. return err
  128. }
  129. if err = f.Close(); err != nil {
  130. os.RemoveAll(partpath)
  131. return err
  132. }
  133. s.logger.Infof("renaming from %q to %q", partpath, dbPath)
  134. if err = os.Rename(partpath, dbPath); err != nil {
  135. os.RemoveAll(partpath)
  136. return fmt.Errorf("could not rename %s to %s (%v)", partpath, dbPath, err)
  137. }
  138. return nil
  139. }
  140. func (s *v3Manager) Status(dbPath string) (ds Status, err error) {
  141. if _, err = os.Stat(dbPath); err != nil {
  142. return ds, err
  143. }
  144. db, err := bolt.Open(dbPath, 0400, &bolt.Options{ReadOnly: true})
  145. if err != nil {
  146. return ds, err
  147. }
  148. defer db.Close()
  149. h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
  150. if err = db.View(func(tx *bolt.Tx) error {
  151. ds.TotalSize = tx.Size()
  152. c := tx.Cursor()
  153. for next, _ := c.First(); next != nil; next, _ = c.Next() {
  154. b := tx.Bucket(next)
  155. if b == nil {
  156. return fmt.Errorf("cannot get hash of bucket %s", string(next))
  157. }
  158. h.Write(next)
  159. iskeyb := (string(next) == "key")
  160. b.ForEach(func(k, v []byte) error {
  161. h.Write(k)
  162. h.Write(v)
  163. if iskeyb {
  164. rev := bytesToRev(k)
  165. ds.Revision = rev.main
  166. }
  167. ds.TotalKey++
  168. return nil
  169. })
  170. }
  171. return nil
  172. }); err != nil {
  173. return ds, err
  174. }
  175. ds.Hash = h.Sum32()
  176. return ds, nil
  177. }
  178. func (s *v3Manager) Restore(dbPath string, cfg RestoreConfig) error {
  179. srv := etcdserver.ServerConfig{
  180. Name: cfg.Name,
  181. InitialClusterToken: cfg.InitialClusterToken,
  182. InitialPeerURLsMap: cfg.InitialCluster,
  183. PeerURLs: cfg.PeerURLs,
  184. }
  185. if err := srv.VerifyBootstrap(); err != nil {
  186. return err
  187. }
  188. var err error
  189. s.cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialCluster)
  190. if err != nil {
  191. return err
  192. }
  193. dataDir := cfg.OutputDataDir
  194. if dataDir == "" {
  195. dataDir = cfg.Name + ".etcd"
  196. }
  197. if _, err = os.Stat(dataDir); err == nil {
  198. return fmt.Errorf("data-dir %q exists", dataDir)
  199. }
  200. walDir := cfg.OutputWALDir
  201. if walDir == "" {
  202. walDir = filepath.Join(dataDir, "member", "wal")
  203. } else if _, err = os.Stat(walDir); err == nil {
  204. return fmt.Errorf("wal-dir %q exists", walDir)
  205. }
  206. s.logger.Infof("restoring snapshot file %q to data-dir %q, wal-dir %q", dbPath, dataDir, walDir)
  207. s.name = cfg.Name
  208. s.dbPath = dbPath
  209. s.walDir = walDir
  210. s.snapDir = filepath.Join(dataDir, "member", "snap")
  211. s.skipHashCheck = cfg.SkipHashCheck
  212. s.logger.Infof("writing snapshot directory %q", s.snapDir)
  213. if err = s.saveDB(); err != nil {
  214. return err
  215. }
  216. s.logger.Infof("writing WAL directory %q and raft snapshot to %q", s.walDir, s.snapDir)
  217. err = s.saveWALAndSnap()
  218. if err == nil {
  219. s.logger.Infof("finished restore %q to data directory %q, wal directory %q", dbPath, dataDir, walDir)
  220. }
  221. return err
  222. }
  223. // saveDB copies the database snapshot to the snapshot directory
  224. func (s *v3Manager) saveDB() error {
  225. f, ferr := os.OpenFile(s.dbPath, os.O_RDONLY, 0600)
  226. if ferr != nil {
  227. return ferr
  228. }
  229. defer f.Close()
  230. // get snapshot integrity hash
  231. if _, err := f.Seek(-sha256.Size, io.SeekEnd); err != nil {
  232. return err
  233. }
  234. sha := make([]byte, sha256.Size)
  235. if _, err := f.Read(sha); err != nil {
  236. return err
  237. }
  238. if _, err := f.Seek(0, io.SeekStart); err != nil {
  239. return err
  240. }
  241. if err := fileutil.CreateDirAll(s.snapDir); err != nil {
  242. return err
  243. }
  244. dbpath := filepath.Join(s.snapDir, "db")
  245. db, dberr := os.OpenFile(dbpath, os.O_RDWR|os.O_CREATE, 0600)
  246. if dberr != nil {
  247. return dberr
  248. }
  249. if _, err := io.Copy(db, f); err != nil {
  250. return err
  251. }
  252. // truncate away integrity hash, if any.
  253. off, serr := db.Seek(0, io.SeekEnd)
  254. if serr != nil {
  255. return serr
  256. }
  257. hasHash := (off % 512) == sha256.Size
  258. if hasHash {
  259. if err := db.Truncate(off - sha256.Size); err != nil {
  260. return err
  261. }
  262. }
  263. if !hasHash && !s.skipHashCheck {
  264. return fmt.Errorf("snapshot missing hash but --skip-hash-check=false")
  265. }
  266. if hasHash && !s.skipHashCheck {
  267. // check for match
  268. if _, err := db.Seek(0, io.SeekStart); err != nil {
  269. return err
  270. }
  271. h := sha256.New()
  272. if _, err := io.Copy(h, db); err != nil {
  273. return err
  274. }
  275. dbsha := h.Sum(nil)
  276. if !reflect.DeepEqual(sha, dbsha) {
  277. return fmt.Errorf("expected sha256 %v, got %v", sha, dbsha)
  278. }
  279. }
  280. // db hash is OK, can now modify DB so it can be part of a new cluster
  281. db.Close()
  282. commit := len(s.cl.Members())
  283. // update consistentIndex so applies go through on etcdserver despite
  284. // having a new raft instance
  285. be := backend.NewDefaultBackend(dbpath)
  286. // a lessor never timeouts leases
  287. lessor := lease.NewLessor(be, math.MaxInt64)
  288. mvs := mvcc.NewStore(be, lessor, (*initIndex)(&commit))
  289. txn := mvs.Write()
  290. btx := be.BatchTx()
  291. del := func(k, v []byte) error {
  292. txn.DeleteRange(k, nil)
  293. return nil
  294. }
  295. // delete stored members from old cluster since using new members
  296. btx.UnsafeForEach([]byte("members"), del)
  297. // todo: add back new members when we start to deprecate old snap file.
  298. btx.UnsafeForEach([]byte("members_removed"), del)
  299. // trigger write-out of new consistent index
  300. txn.End()
  301. mvs.Commit()
  302. mvs.Close()
  303. be.Close()
  304. return nil
  305. }
  306. // saveWALAndSnap creates a WAL for the initial cluster
  307. func (s *v3Manager) saveWALAndSnap() error {
  308. if err := fileutil.CreateDirAll(s.walDir); err != nil {
  309. return err
  310. }
  311. // add members again to persist them to the store we create.
  312. st := store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
  313. s.cl.SetStore(st)
  314. for _, m := range s.cl.Members() {
  315. s.cl.AddMember(m)
  316. }
  317. m := s.cl.MemberByName(s.name)
  318. md := &etcdserverpb.Metadata{NodeID: uint64(m.ID), ClusterID: uint64(s.cl.ID())}
  319. metadata, merr := md.Marshal()
  320. if merr != nil {
  321. return merr
  322. }
  323. w, walerr := wal.Create(s.walDir, metadata)
  324. if walerr != nil {
  325. return walerr
  326. }
  327. defer w.Close()
  328. peers := make([]raft.Peer, len(s.cl.MemberIDs()))
  329. for i, id := range s.cl.MemberIDs() {
  330. ctx, err := json.Marshal((*s.cl).Member(id))
  331. if err != nil {
  332. return err
  333. }
  334. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  335. }
  336. ents := make([]raftpb.Entry, len(peers))
  337. nodeIDs := make([]uint64, len(peers))
  338. for i, p := range peers {
  339. nodeIDs[i] = p.ID
  340. cc := raftpb.ConfChange{
  341. Type: raftpb.ConfChangeAddNode,
  342. NodeID: p.ID,
  343. Context: p.Context,
  344. }
  345. d, err := cc.Marshal()
  346. if err != nil {
  347. return err
  348. }
  349. ents[i] = raftpb.Entry{
  350. Type: raftpb.EntryConfChange,
  351. Term: 1,
  352. Index: uint64(i + 1),
  353. Data: d,
  354. }
  355. }
  356. commit, term := uint64(len(ents)), uint64(1)
  357. if err := w.Save(raftpb.HardState{
  358. Term: term,
  359. Vote: peers[0].ID,
  360. Commit: commit,
  361. }, ents); err != nil {
  362. return err
  363. }
  364. b, berr := st.Save()
  365. if berr != nil {
  366. return berr
  367. }
  368. raftSnap := raftpb.Snapshot{
  369. Data: b,
  370. Metadata: raftpb.SnapshotMetadata{
  371. Index: commit,
  372. Term: term,
  373. ConfState: raftpb.ConfState{
  374. Nodes: nodeIDs,
  375. },
  376. },
  377. }
  378. sn := raftsnap.New(s.snapDir)
  379. if err := sn.SaveSnap(raftSnap); err != nil {
  380. return err
  381. }
  382. err := w.SaveSnapshot(walpb.Snapshot{Index: commit, Term: term})
  383. if err == nil {
  384. s.logger.Infof("wrote WAL snapshot to %q", s.walDir)
  385. }
  386. return err
  387. }