snapshot_command.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package command
  15. import (
  16. "context"
  17. "crypto/sha256"
  18. "encoding/binary"
  19. "encoding/json"
  20. "fmt"
  21. "hash/crc32"
  22. "io"
  23. "math"
  24. "os"
  25. "path/filepath"
  26. "reflect"
  27. "strings"
  28. "github.com/coreos/etcd/etcdserver"
  29. "github.com/coreos/etcd/etcdserver/etcdserverpb"
  30. "github.com/coreos/etcd/etcdserver/membership"
  31. "github.com/coreos/etcd/lease"
  32. "github.com/coreos/etcd/mvcc"
  33. "github.com/coreos/etcd/mvcc/backend"
  34. "github.com/coreos/etcd/pkg/fileutil"
  35. "github.com/coreos/etcd/pkg/types"
  36. "github.com/coreos/etcd/raft"
  37. "github.com/coreos/etcd/raft/raftpb"
  38. "github.com/coreos/etcd/snap"
  39. "github.com/coreos/etcd/snapshot"
  40. "github.com/coreos/etcd/store"
  41. "github.com/coreos/etcd/wal"
  42. "github.com/coreos/etcd/wal/walpb"
  43. bolt "github.com/coreos/bbolt"
  44. "github.com/spf13/cobra"
  45. )
  46. const (
  47. defaultName = "default"
  48. defaultInitialAdvertisePeerURLs = "http://localhost:2380"
  49. )
  50. var (
  51. restoreCluster string
  52. restoreClusterToken string
  53. restoreDataDir string
  54. restoreWalDir string
  55. restorePeerURLs string
  56. restoreName string
  57. skipHashCheck bool
  58. )
  59. // NewSnapshotCommand returns the cobra command for "snapshot".
  60. func NewSnapshotCommand() *cobra.Command {
  61. cmd := &cobra.Command{
  62. Use: "snapshot <subcommand>",
  63. Short: "Manages etcd node snapshots",
  64. }
  65. cmd.AddCommand(NewSnapshotSaveCommand())
  66. cmd.AddCommand(NewSnapshotRestoreCommand())
  67. cmd.AddCommand(newSnapshotStatusCommand())
  68. return cmd
  69. }
  70. func NewSnapshotSaveCommand() *cobra.Command {
  71. return &cobra.Command{
  72. Use: "save <filename>",
  73. Short: "Stores an etcd node backend snapshot to a given file",
  74. Run: snapshotSaveCommandFunc,
  75. }
  76. }
  77. func newSnapshotStatusCommand() *cobra.Command {
  78. return &cobra.Command{
  79. Use: "status <filename>",
  80. Short: "Gets backend snapshot status of a given file",
  81. Long: `When --write-out is set to simple, this command prints out comma-separated status lists for each endpoint.
  82. The items in the lists are hash, revision, total keys, total size.
  83. `,
  84. Run: snapshotStatusCommandFunc,
  85. }
  86. }
  87. func NewSnapshotRestoreCommand() *cobra.Command {
  88. cmd := &cobra.Command{
  89. Use: "restore <filename> [options]",
  90. Short: "Restores an etcd member snapshot to an etcd directory",
  91. Run: snapshotRestoreCommandFunc,
  92. }
  93. cmd.Flags().StringVar(&restoreDataDir, "data-dir", "", "Path to the data directory")
  94. cmd.Flags().StringVar(&restoreWalDir, "wal-dir", "", "Path to the WAL directory (use --data-dir if none given)")
  95. cmd.Flags().StringVar(&restoreCluster, "initial-cluster", initialClusterFromName(defaultName), "Initial cluster configuration for restore bootstrap")
  96. cmd.Flags().StringVar(&restoreClusterToken, "initial-cluster-token", "etcd-cluster", "Initial cluster token for the etcd cluster during restore bootstrap")
  97. cmd.Flags().StringVar(&restorePeerURLs, "initial-advertise-peer-urls", defaultInitialAdvertisePeerURLs, "List of this member's peer URLs to advertise to the rest of the cluster")
  98. cmd.Flags().StringVar(&restoreName, "name", defaultName, "Human-readable name for this member")
  99. cmd.Flags().BoolVar(&skipHashCheck, "skip-hash-check", false, "Ignore snapshot integrity hash value (required if copied from data directory)")
  100. return cmd
  101. }
  102. func snapshotSaveCommandFunc(cmd *cobra.Command, args []string) {
  103. if len(args) != 1 {
  104. err := fmt.Errorf("snapshot save expects one argument")
  105. ExitWithError(ExitBadArgs, err)
  106. }
  107. path := args[0]
  108. partpath := path + ".part"
  109. f, err := os.Create(partpath)
  110. if err != nil {
  111. exiterr := fmt.Errorf("could not open %s (%v)", partpath, err)
  112. ExitWithError(ExitBadArgs, exiterr)
  113. }
  114. c := mustClientFromCmd(cmd)
  115. r, serr := c.Snapshot(context.TODO())
  116. if serr != nil {
  117. os.RemoveAll(partpath)
  118. ExitWithError(ExitInterrupted, serr)
  119. }
  120. if _, rerr := io.Copy(f, r); rerr != nil {
  121. os.RemoveAll(partpath)
  122. ExitWithError(ExitInterrupted, rerr)
  123. }
  124. fileutil.Fsync(f)
  125. f.Close()
  126. if rerr := os.Rename(partpath, path); rerr != nil {
  127. exiterr := fmt.Errorf("could not rename %s to %s (%v)", partpath, path, rerr)
  128. ExitWithError(ExitIO, exiterr)
  129. }
  130. fmt.Printf("Snapshot saved at %s\n", path)
  131. }
  132. func snapshotStatusCommandFunc(cmd *cobra.Command, args []string) {
  133. if len(args) != 1 {
  134. err := fmt.Errorf("snapshot status requires exactly one argument")
  135. ExitWithError(ExitBadArgs, err)
  136. }
  137. initDisplayFromCmd(cmd)
  138. ds := dbStatus(args[0])
  139. display.DBStatus(ds)
  140. }
  141. func snapshotRestoreCommandFunc(cmd *cobra.Command, args []string) {
  142. if len(args) != 1 {
  143. err := fmt.Errorf("snapshot restore requires exactly one argument")
  144. ExitWithError(ExitBadArgs, err)
  145. }
  146. urlmap, uerr := types.NewURLsMap(restoreCluster)
  147. if uerr != nil {
  148. ExitWithError(ExitBadArgs, uerr)
  149. }
  150. cfg := etcdserver.ServerConfig{
  151. InitialClusterToken: restoreClusterToken,
  152. InitialPeerURLsMap: urlmap,
  153. PeerURLs: types.MustNewURLs(strings.Split(restorePeerURLs, ",")),
  154. Name: restoreName,
  155. }
  156. if err := cfg.VerifyBootstrap(); err != nil {
  157. ExitWithError(ExitBadArgs, err)
  158. }
  159. cl, cerr := membership.NewClusterFromURLsMap(restoreClusterToken, urlmap)
  160. if cerr != nil {
  161. ExitWithError(ExitBadArgs, cerr)
  162. }
  163. basedir := restoreDataDir
  164. if basedir == "" {
  165. basedir = restoreName + ".etcd"
  166. }
  167. waldir := restoreWalDir
  168. if waldir == "" {
  169. waldir = filepath.Join(basedir, "member", "wal")
  170. }
  171. snapdir := filepath.Join(basedir, "member", "snap")
  172. if _, err := os.Stat(basedir); err == nil {
  173. ExitWithError(ExitInvalidInput, fmt.Errorf("data-dir %q exists", basedir))
  174. }
  175. makeDB(snapdir, args[0], len(cl.Members()))
  176. makeWALAndSnap(waldir, snapdir, cl)
  177. }
  178. func initialClusterFromName(name string) string {
  179. n := name
  180. if name == "" {
  181. n = defaultName
  182. }
  183. return fmt.Sprintf("%s=http://localhost:2380", n)
  184. }
  185. // makeWAL creates a WAL for the initial cluster
  186. func makeWALAndSnap(waldir, snapdir string, cl *membership.RaftCluster) {
  187. if err := fileutil.CreateDirAll(waldir); err != nil {
  188. ExitWithError(ExitIO, err)
  189. }
  190. // add members again to persist them to the store we create.
  191. st := store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
  192. cl.SetStore(st)
  193. for _, m := range cl.Members() {
  194. cl.AddMember(m)
  195. }
  196. m := cl.MemberByName(restoreName)
  197. md := &etcdserverpb.Metadata{NodeID: uint64(m.ID), ClusterID: uint64(cl.ID())}
  198. metadata, merr := md.Marshal()
  199. if merr != nil {
  200. ExitWithError(ExitInvalidInput, merr)
  201. }
  202. w, walerr := wal.Create(waldir, metadata)
  203. if walerr != nil {
  204. ExitWithError(ExitIO, walerr)
  205. }
  206. defer w.Close()
  207. peers := make([]raft.Peer, len(cl.MemberIDs()))
  208. for i, id := range cl.MemberIDs() {
  209. ctx, err := json.Marshal((*cl).Member(id))
  210. if err != nil {
  211. ExitWithError(ExitInvalidInput, err)
  212. }
  213. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  214. }
  215. ents := make([]raftpb.Entry, len(peers))
  216. nodeIDs := make([]uint64, len(peers))
  217. for i, p := range peers {
  218. nodeIDs[i] = p.ID
  219. cc := raftpb.ConfChange{
  220. Type: raftpb.ConfChangeAddNode,
  221. NodeID: p.ID,
  222. Context: p.Context}
  223. d, err := cc.Marshal()
  224. if err != nil {
  225. ExitWithError(ExitInvalidInput, err)
  226. }
  227. e := raftpb.Entry{
  228. Type: raftpb.EntryConfChange,
  229. Term: 1,
  230. Index: uint64(i + 1),
  231. Data: d,
  232. }
  233. ents[i] = e
  234. }
  235. commit, term := uint64(len(ents)), uint64(1)
  236. if err := w.Save(raftpb.HardState{
  237. Term: term,
  238. Vote: peers[0].ID,
  239. Commit: commit}, ents); err != nil {
  240. ExitWithError(ExitIO, err)
  241. }
  242. b, berr := st.Save()
  243. if berr != nil {
  244. ExitWithError(ExitError, berr)
  245. }
  246. raftSnap := raftpb.Snapshot{
  247. Data: b,
  248. Metadata: raftpb.SnapshotMetadata{
  249. Index: commit,
  250. Term: term,
  251. ConfState: raftpb.ConfState{
  252. Nodes: nodeIDs,
  253. },
  254. },
  255. }
  256. snapshotter := snap.New(snapdir)
  257. if err := snapshotter.SaveSnap(raftSnap); err != nil {
  258. panic(err)
  259. }
  260. if err := w.SaveSnapshot(walpb.Snapshot{Index: commit, Term: term}); err != nil {
  261. ExitWithError(ExitIO, err)
  262. }
  263. }
  264. // initIndex implements ConsistentIndexGetter so the snapshot won't block
  265. // the new raft instance by waiting for a future raft index.
  266. type initIndex int
  267. func (i *initIndex) ConsistentIndex() uint64 { return uint64(*i) }
  268. // makeDB copies the database snapshot to the snapshot directory
  269. func makeDB(snapdir, dbfile string, commit int) {
  270. f, ferr := os.OpenFile(dbfile, os.O_RDONLY, 0600)
  271. if ferr != nil {
  272. ExitWithError(ExitInvalidInput, ferr)
  273. }
  274. defer f.Close()
  275. // get snapshot integrity hash
  276. if _, err := f.Seek(-sha256.Size, io.SeekEnd); err != nil {
  277. ExitWithError(ExitIO, err)
  278. }
  279. sha := make([]byte, sha256.Size)
  280. if _, err := f.Read(sha); err != nil {
  281. ExitWithError(ExitIO, err)
  282. }
  283. if _, err := f.Seek(0, io.SeekStart); err != nil {
  284. ExitWithError(ExitIO, err)
  285. }
  286. if err := fileutil.CreateDirAll(snapdir); err != nil {
  287. ExitWithError(ExitIO, err)
  288. }
  289. dbpath := filepath.Join(snapdir, "db")
  290. db, dberr := os.OpenFile(dbpath, os.O_RDWR|os.O_CREATE, 0600)
  291. if dberr != nil {
  292. ExitWithError(ExitIO, dberr)
  293. }
  294. if _, err := io.Copy(db, f); err != nil {
  295. ExitWithError(ExitIO, err)
  296. }
  297. // truncate away integrity hash, if any.
  298. off, serr := db.Seek(0, io.SeekEnd)
  299. if serr != nil {
  300. ExitWithError(ExitIO, serr)
  301. }
  302. hasHash := (off % 512) == sha256.Size
  303. if hasHash {
  304. if err := db.Truncate(off - sha256.Size); err != nil {
  305. ExitWithError(ExitIO, err)
  306. }
  307. }
  308. if !hasHash && !skipHashCheck {
  309. err := fmt.Errorf("snapshot missing hash but --skip-hash-check=false")
  310. ExitWithError(ExitBadArgs, err)
  311. }
  312. if hasHash && !skipHashCheck {
  313. // check for match
  314. if _, err := db.Seek(0, io.SeekStart); err != nil {
  315. ExitWithError(ExitIO, err)
  316. }
  317. h := sha256.New()
  318. if _, err := io.Copy(h, db); err != nil {
  319. ExitWithError(ExitIO, err)
  320. }
  321. dbsha := h.Sum(nil)
  322. if !reflect.DeepEqual(sha, dbsha) {
  323. err := fmt.Errorf("expected sha256 %v, got %v", sha, dbsha)
  324. ExitWithError(ExitInvalidInput, err)
  325. }
  326. }
  327. // db hash is OK, can now modify DB so it can be part of a new cluster
  328. db.Close()
  329. // update consistentIndex so applies go through on etcdserver despite
  330. // having a new raft instance
  331. be := backend.NewDefaultBackend(dbpath)
  332. // a lessor never timeouts leases
  333. lessor := lease.NewLessor(be, math.MaxInt64)
  334. s := mvcc.NewStore(be, lessor, (*initIndex)(&commit))
  335. txn := s.Write()
  336. btx := be.BatchTx()
  337. del := func(k, v []byte) error {
  338. txn.DeleteRange(k, nil)
  339. return nil
  340. }
  341. // delete stored members from old cluster since using new members
  342. btx.UnsafeForEach([]byte("members"), del)
  343. // todo: add back new members when we start to deprecate old snap file.
  344. btx.UnsafeForEach([]byte("members_removed"), del)
  345. // trigger write-out of new consistent index
  346. txn.End()
  347. s.Commit()
  348. s.Close()
  349. be.Close()
  350. }
  351. func dbStatus(p string) snapshot.Status {
  352. if _, err := os.Stat(p); err != nil {
  353. ExitWithError(ExitError, err)
  354. }
  355. ds := snapshot.Status{}
  356. db, err := bolt.Open(p, 0400, &bolt.Options{ReadOnly: true})
  357. if err != nil {
  358. ExitWithError(ExitError, err)
  359. }
  360. defer db.Close()
  361. h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
  362. err = db.View(func(tx *bolt.Tx) error {
  363. ds.TotalSize = tx.Size()
  364. c := tx.Cursor()
  365. for next, _ := c.First(); next != nil; next, _ = c.Next() {
  366. b := tx.Bucket(next)
  367. if b == nil {
  368. return fmt.Errorf("cannot get hash of bucket %s", string(next))
  369. }
  370. h.Write(next)
  371. iskeyb := (string(next) == "key")
  372. b.ForEach(func(k, v []byte) error {
  373. h.Write(k)
  374. h.Write(v)
  375. if iskeyb {
  376. rev := bytesToRev(k)
  377. ds.Revision = rev.main
  378. }
  379. ds.TotalKey++
  380. return nil
  381. })
  382. }
  383. return nil
  384. })
  385. if err != nil {
  386. ExitWithError(ExitError, err)
  387. }
  388. ds.Hash = h.Sum32()
  389. return ds
  390. }
  391. type revision struct {
  392. main int64
  393. sub int64
  394. }
  395. func bytesToRev(bytes []byte) revision {
  396. return revision{
  397. main: int64(binary.BigEndian.Uint64(bytes[0:8])),
  398. sub: int64(binary.BigEndian.Uint64(bytes[9:])),
  399. }
  400. }