snapshot_command.go 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. // Copyright 2016 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package command
  15. import (
  16. "encoding/binary"
  17. "encoding/json"
  18. "fmt"
  19. "hash/crc32"
  20. "io"
  21. "os"
  22. "path"
  23. "strings"
  24. "github.com/boltdb/bolt"
  25. "github.com/coreos/etcd/etcdserver"
  26. "github.com/coreos/etcd/etcdserver/etcdserverpb"
  27. "github.com/coreos/etcd/etcdserver/membership"
  28. "github.com/coreos/etcd/pkg/types"
  29. "github.com/coreos/etcd/raft"
  30. "github.com/coreos/etcd/raft/raftpb"
  31. "github.com/coreos/etcd/storage"
  32. "github.com/coreos/etcd/storage/backend"
  33. "github.com/coreos/etcd/wal"
  34. "github.com/spf13/cobra"
  35. "golang.org/x/net/context"
  36. )
  37. const (
  38. defaultName = "default"
  39. defaultInitialAdvertisePeerURLs = "http://localhost:2380,http://localhost:7001"
  40. )
  41. var (
  42. restoreCluster string
  43. restoreClusterToken string
  44. restoreDataDir string
  45. restorePeerURLs string
  46. restoreName string
  47. )
  48. // NewSnapshotCommand returns the cobra command for "snapshot".
  49. func NewSnapshotCommand() *cobra.Command {
  50. cmd := &cobra.Command{
  51. Use: "snapshot",
  52. Short: "snapshot manages etcd node snapshots.",
  53. }
  54. cmd.AddCommand(NewSnapshotSaveCommand())
  55. cmd.AddCommand(NewSnapshotRestoreCommand())
  56. cmd.AddCommand(newSnapshotStatusCommand())
  57. return cmd
  58. }
  59. func NewSnapshotSaveCommand() *cobra.Command {
  60. return &cobra.Command{
  61. Use: "save <filename>",
  62. Short: "save stores an etcd node backend snapshot to a given file.",
  63. Run: snapshotSaveCommandFunc,
  64. }
  65. }
  66. func newSnapshotStatusCommand() *cobra.Command {
  67. return &cobra.Command{
  68. Use: "status <filename>",
  69. Short: "status gets backend snapshot status of a given file.",
  70. Run: snapshotStatusCommandFunc,
  71. }
  72. }
  73. func NewSnapshotRestoreCommand() *cobra.Command {
  74. cmd := &cobra.Command{
  75. Use: "restore <filename>",
  76. Short: "restore an etcd member snapshot to an etcd directory",
  77. Run: snapshotRestoreCommandFunc,
  78. }
  79. cmd.Flags().StringVar(&restoreDataDir, "data-dir", "", "Path to the data directory.")
  80. cmd.Flags().StringVar(&restoreCluster, "initial-cluster", initialClusterFromName(defaultName), "Initial cluster configuration for restore bootstrap.")
  81. cmd.Flags().StringVar(&restoreClusterToken, "initial-cluster-token", "etcd-cluster", "Initial cluster token for the etcd cluster during restore bootstrap.")
  82. cmd.Flags().StringVar(&restorePeerURLs, "initial-advertise-peer-urls", defaultInitialAdvertisePeerURLs, "List of this member's peer URLs to advertise to the rest of the cluster.")
  83. cmd.Flags().StringVar(&restoreName, "name", defaultName, "Human-readable name for this member.")
  84. return cmd
  85. }
  86. func snapshotSaveCommandFunc(cmd *cobra.Command, args []string) {
  87. if len(args) != 1 {
  88. err := fmt.Errorf("snapshot save expects one argument")
  89. ExitWithError(ExitBadArgs, err)
  90. }
  91. path := args[0]
  92. partpath := path + ".part"
  93. f, err := os.Create(partpath)
  94. defer f.Close()
  95. if err != nil {
  96. exiterr := fmt.Errorf("could not open %s (%v)", partpath, err)
  97. ExitWithError(ExitBadArgs, exiterr)
  98. }
  99. c := mustClientFromCmd(cmd)
  100. r, serr := c.Snapshot(context.TODO())
  101. if serr != nil {
  102. os.RemoveAll(partpath)
  103. ExitWithError(ExitInterrupted, serr)
  104. }
  105. if _, rerr := io.Copy(f, r); rerr != nil {
  106. os.RemoveAll(partpath)
  107. ExitWithError(ExitInterrupted, rerr)
  108. }
  109. f.Sync()
  110. if rerr := os.Rename(partpath, path); rerr != nil {
  111. exiterr := fmt.Errorf("could not rename %s to %s (%v)", partpath, path, rerr)
  112. ExitWithError(ExitIO, exiterr)
  113. }
  114. fmt.Printf("Snapshot saved at %s\n", path)
  115. }
  116. func snapshotStatusCommandFunc(cmd *cobra.Command, args []string) {
  117. if len(args) != 1 {
  118. err := fmt.Errorf("snapshot status requires exactly one argument")
  119. ExitWithError(ExitBadArgs, err)
  120. }
  121. initDisplayFromCmd(cmd)
  122. ds := dbStatus(args[0])
  123. display.DBStatus(ds)
  124. }
  125. func snapshotRestoreCommandFunc(cmd *cobra.Command, args []string) {
  126. if len(args) != 1 {
  127. err := fmt.Errorf("snapshot restore requires exactly one argument")
  128. ExitWithError(ExitBadArgs, err)
  129. }
  130. urlmap, uerr := types.NewURLsMap(restoreCluster)
  131. if uerr != nil {
  132. ExitWithError(ExitBadArgs, uerr)
  133. }
  134. cfg := etcdserver.ServerConfig{
  135. InitialClusterToken: restoreClusterToken,
  136. InitialPeerURLsMap: urlmap,
  137. PeerURLs: types.MustNewURLs(strings.Split(restorePeerURLs, ",")),
  138. Name: restoreName,
  139. }
  140. if err := cfg.VerifyBootstrap(); err != nil {
  141. ExitWithError(ExitBadArgs, err)
  142. }
  143. cl, cerr := membership.NewClusterFromURLsMap(restoreClusterToken, urlmap)
  144. if cerr != nil {
  145. ExitWithError(ExitBadArgs, cerr)
  146. }
  147. basedir := restoreDataDir
  148. if basedir == "" {
  149. basedir = restoreName + ".etcd"
  150. }
  151. waldir := path.Join(basedir, "member", "wal")
  152. snapdir := path.Join(basedir, "member", "snap")
  153. if _, err := os.Stat(basedir); err == nil {
  154. ExitWithError(ExitInvalidInput, fmt.Errorf("data-dir %q exists", basedir))
  155. }
  156. makeDB(snapdir, args[0])
  157. makeWAL(waldir, cl)
  158. }
  159. func initialClusterFromName(name string) string {
  160. n := name
  161. if name == "" {
  162. n = defaultName
  163. }
  164. return fmt.Sprintf("%s=http://localhost:2380,%s=http://localhost:7001", n, n)
  165. }
  166. // makeWAL creates a WAL for the initial cluster
  167. func makeWAL(waldir string, cl *membership.RaftCluster) {
  168. if err := os.MkdirAll(waldir, 0755); err != nil {
  169. ExitWithError(ExitIO, err)
  170. }
  171. m := cl.MemberByName(restoreName)
  172. md := &etcdserverpb.Metadata{NodeID: uint64(m.ID), ClusterID: uint64(cl.ID())}
  173. metadata, merr := md.Marshal()
  174. if merr != nil {
  175. ExitWithError(ExitInvalidInput, merr)
  176. }
  177. w, walerr := wal.Create(waldir, metadata)
  178. if walerr != nil {
  179. ExitWithError(ExitIO, walerr)
  180. }
  181. defer w.Close()
  182. peers := make([]raft.Peer, len(cl.MemberIDs()))
  183. for i, id := range cl.MemberIDs() {
  184. ctx, err := json.Marshal((*cl).Member(id))
  185. if err != nil {
  186. ExitWithError(ExitInvalidInput, err)
  187. }
  188. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  189. }
  190. ents := make([]raftpb.Entry, len(peers))
  191. for i, p := range peers {
  192. cc := raftpb.ConfChange{
  193. Type: raftpb.ConfChangeAddNode,
  194. NodeID: p.ID,
  195. Context: p.Context}
  196. d, err := cc.Marshal()
  197. if err != nil {
  198. ExitWithError(ExitInvalidInput, err)
  199. }
  200. e := raftpb.Entry{
  201. Type: raftpb.EntryConfChange,
  202. Term: 1,
  203. Index: uint64(i + 1),
  204. Data: d,
  205. }
  206. ents[i] = e
  207. }
  208. w.Save(raftpb.HardState{
  209. Term: 1,
  210. Vote: peers[0].ID,
  211. Commit: uint64(len(ents))}, ents)
  212. }
  213. // initIndex implements ConsistentIndexGetter so the snapshot won't block
  214. // the new raft instance by waiting for a future raft index.
  215. type initIndex struct{}
  216. func (*initIndex) ConsistentIndex() uint64 { return 1 }
  217. // makeDB copies the database snapshot to the snapshot directory
  218. func makeDB(snapdir, dbfile string) {
  219. f, ferr := os.OpenFile(dbfile, os.O_RDONLY, 0600)
  220. if ferr != nil {
  221. ExitWithError(ExitInvalidInput, ferr)
  222. }
  223. defer f.Close()
  224. if err := os.MkdirAll(snapdir, 0755); err != nil {
  225. ExitWithError(ExitIO, err)
  226. }
  227. dbpath := path.Join(snapdir, "db")
  228. db, dberr := os.OpenFile(dbpath, os.O_WRONLY|os.O_CREATE, 0600)
  229. if dberr != nil {
  230. ExitWithError(ExitIO, dberr)
  231. }
  232. if _, err := io.Copy(db, f); err != nil {
  233. ExitWithError(ExitIO, err)
  234. }
  235. db.Close()
  236. // update consistentIndex so applies go through on etcdserver despite
  237. // having a new raft instance
  238. be := backend.NewDefaultBackend(dbpath)
  239. s := storage.NewStore(be, nil, &initIndex{})
  240. id := s.TxnBegin()
  241. btx := be.BatchTx()
  242. del := func(k, v []byte) error {
  243. _, _, err := s.TxnDeleteRange(id, k, nil)
  244. return err
  245. }
  246. // delete stored members from old cluster since using new members
  247. btx.UnsafeForEach([]byte("members"), del)
  248. btx.UnsafeForEach([]byte("members_removed"), del)
  249. // trigger write-out of new consistent index
  250. s.TxnEnd(id)
  251. s.Commit()
  252. s.Close()
  253. }
  254. type dbstatus struct {
  255. Hash uint32 `json:"hash"`
  256. Revision int64 `json:"revision"`
  257. TotalKey int `json:"totalKey"`
  258. TotalSize int64 `json:"totalSize"`
  259. }
  260. func dbStatus(p string) dbstatus {
  261. if _, err := os.Stat(p); err != nil {
  262. ExitWithError(ExitError, err)
  263. }
  264. ds := dbstatus{}
  265. db, err := bolt.Open(p, 0600, nil)
  266. if err != nil {
  267. ExitWithError(ExitError, err)
  268. }
  269. h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
  270. err = db.View(func(tx *bolt.Tx) error {
  271. ds.TotalSize = tx.Size()
  272. c := tx.Cursor()
  273. for next, _ := c.First(); next != nil; next, _ = c.Next() {
  274. b := tx.Bucket(next)
  275. if b == nil {
  276. return fmt.Errorf("cannot get hash of bucket %s", string(next))
  277. }
  278. h.Write(next)
  279. iskeyb := (string(next) == "key")
  280. b.ForEach(func(k, v []byte) error {
  281. h.Write(k)
  282. h.Write(v)
  283. if iskeyb {
  284. rev := bytesToRev(k)
  285. ds.Revision = rev.main
  286. }
  287. ds.TotalKey++
  288. return nil
  289. })
  290. }
  291. return nil
  292. })
  293. if err != nil {
  294. ExitWithError(ExitError, err)
  295. }
  296. ds.Hash = h.Sum32()
  297. return ds
  298. }
  299. type revision struct {
  300. main int64
  301. sub int64
  302. }
  303. func bytesToRev(bytes []byte) revision {
  304. return revision{
  305. main: int64(binary.BigEndian.Uint64(bytes[0:8])),
  306. sub: int64(binary.BigEndian.Uint64(bytes[9:])),
  307. }
  308. }