server.go 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987
  1. /*
  2. Copyright 2014 CoreOS, Inc.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package etcdserver
  14. import (
  15. "encoding/json"
  16. "fmt"
  17. "io/ioutil"
  18. "log"
  19. "math/rand"
  20. "net/http"
  21. "os"
  22. "path"
  23. "regexp"
  24. "sort"
  25. "sync/atomic"
  26. "time"
  27. "github.com/coreos/etcd/discovery"
  28. "github.com/coreos/etcd/etcdserver/etcdhttp/httptypes"
  29. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  30. "github.com/coreos/etcd/etcdserver/stats"
  31. "github.com/coreos/etcd/pkg/fileutil"
  32. "github.com/coreos/etcd/pkg/idutil"
  33. "github.com/coreos/etcd/pkg/pbutil"
  34. "github.com/coreos/etcd/pkg/timeutil"
  35. "github.com/coreos/etcd/pkg/types"
  36. "github.com/coreos/etcd/pkg/wait"
  37. "github.com/coreos/etcd/raft"
  38. "github.com/coreos/etcd/raft/raftpb"
  39. "github.com/coreos/etcd/rafthttp"
  40. "github.com/coreos/etcd/snap"
  41. "github.com/coreos/etcd/store"
  42. "github.com/coreos/etcd/wal"
  43. "github.com/coreos/etcd/wal/walpb"
  44. "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
  45. )
  46. const (
  47. // owner can make/remove files inside the directory
  48. privateDirMode = 0700
  49. defaultSyncTimeout = time.Second
  50. DefaultSnapCount = 10000
  51. // TODO: calculate based on heartbeat interval
  52. defaultPublishRetryInterval = 5 * time.Second
  53. StoreAdminPrefix = "/0"
  54. StoreKeysPrefix = "/1"
  55. purgeFileInterval = 30 * time.Second
  56. )
  57. var (
  58. storeMembersPrefix = path.Join(StoreAdminPrefix, "members")
  59. storeRemovedMembersPrefix = path.Join(StoreAdminPrefix, "removed_members")
  60. storeMemberAttributeRegexp = regexp.MustCompile(path.Join(storeMembersPrefix, "[[:xdigit:]]{1,16}", attributesSuffix))
  61. )
  62. func init() {
  63. rand.Seed(time.Now().UnixNano())
  64. }
  65. type Response struct {
  66. Event *store.Event
  67. Watcher store.Watcher
  68. err error
  69. }
  70. type Server interface {
  71. // Start performs any initialization of the Server necessary for it to
  72. // begin serving requests. It must be called before Do or Process.
  73. // Start must be non-blocking; any long-running server functionality
  74. // should be implemented in goroutines.
  75. Start()
  76. // Stop terminates the Server and performs any necessary finalization.
  77. // Do and Process cannot be called after Stop has been invoked.
  78. Stop()
  79. // ID returns the ID of the Server.
  80. ID() types.ID
  81. // Leader returns the ID of the leader Server.
  82. Leader() types.ID
  83. // Do takes a request and attempts to fulfill it, returning a Response.
  84. Do(ctx context.Context, r pb.Request) (Response, error)
  85. // Process takes a raft message and applies it to the server's raft state
  86. // machine, respecting any timeout of the given context.
  87. Process(ctx context.Context, m raftpb.Message) error
  88. // AddMember attempts to add a member into the cluster. It will return
  89. // ErrIDRemoved if member ID is removed from the cluster, or return
  90. // ErrIDExists if member ID exists in the cluster.
  91. AddMember(ctx context.Context, memb Member) error
  92. // RemoveMember attempts to remove a member from the cluster. It will
  93. // return ErrIDRemoved if member ID is removed from the cluster, or return
  94. // ErrIDNotFound if member ID is not in the cluster.
  95. RemoveMember(ctx context.Context, id uint64) error
  96. // UpdateMember attempts to update a existing member in the cluster. It will
  97. // return ErrIDNotFound if the member ID does not exist.
  98. UpdateMember(ctx context.Context, updateMemb Member) error
  99. }
  100. type RaftTimer interface {
  101. Index() uint64
  102. Term() uint64
  103. }
  104. // EtcdServer is the production implementation of the Server interface
  105. type EtcdServer struct {
  106. cfg *ServerConfig
  107. w wait.Wait
  108. stop chan struct{}
  109. done chan struct{}
  110. errorc chan error
  111. id types.ID
  112. attributes Attributes
  113. Cluster *Cluster
  114. node raft.Node
  115. raftStorage *raft.MemoryStorage
  116. storage Storage
  117. store store.Store
  118. stats *stats.ServerStats
  119. lstats *stats.LeaderStats
  120. // transport specifies the transport to send and receive msgs to members.
  121. // Sending messages MUST NOT block. It is okay to drop messages, since
  122. // clients should timeout and reissue their messages.
  123. // If transport is nil, server will panic.
  124. transport rafthttp.Transporter
  125. Ticker <-chan time.Time
  126. SyncTicker <-chan time.Time
  127. snapCount uint64 // number of entries to trigger a snapshot
  128. // Cache of the latest raft index and raft term the server has seen
  129. raftIndex uint64
  130. raftTerm uint64
  131. raftLead uint64
  132. reqIDGen *idutil.Generator
  133. }
  134. // NewServer creates a new EtcdServer from the supplied configuration. The
  135. // configuration is considered static for the lifetime of the EtcdServer.
  136. func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
  137. st := store.New()
  138. var w *wal.WAL
  139. var n raft.Node
  140. var s *raft.MemoryStorage
  141. var id types.ID
  142. walVersion, err := wal.DetectVersion(cfg.DataDir)
  143. if err != nil {
  144. return nil, err
  145. }
  146. if walVersion == wal.WALUnknown {
  147. return nil, fmt.Errorf("unknown wal version in data dir %s", cfg.DataDir)
  148. }
  149. haveWAL := walVersion != wal.WALNotExist
  150. ss := snap.New(cfg.SnapDir())
  151. switch {
  152. case !haveWAL && !cfg.NewCluster:
  153. us := getOtherPeerURLs(cfg.Cluster, cfg.Name)
  154. existingCluster, err := GetClusterFromPeers(us)
  155. if err != nil {
  156. return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", err)
  157. }
  158. if err := ValidateClusterAndAssignIDs(cfg.Cluster, existingCluster); err != nil {
  159. return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
  160. }
  161. cfg.Cluster.SetID(existingCluster.id)
  162. cfg.Cluster.SetStore(st)
  163. cfg.Print()
  164. id, n, s, w = startNode(cfg, nil)
  165. case !haveWAL && cfg.NewCluster:
  166. if err := cfg.VerifyBootstrapConfig(); err != nil {
  167. return nil, err
  168. }
  169. m := cfg.Cluster.MemberByName(cfg.Name)
  170. if isBootstrapped(cfg.Cluster, cfg.Name) {
  171. return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
  172. }
  173. if cfg.ShouldDiscover() {
  174. s, err := discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.Cluster.String())
  175. if err != nil {
  176. return nil, err
  177. }
  178. if cfg.Cluster, err = NewClusterFromString(cfg.Cluster.token, s); err != nil {
  179. return nil, err
  180. }
  181. }
  182. cfg.Cluster.SetStore(st)
  183. cfg.PrintWithInitial()
  184. id, n, s, w = startNode(cfg, cfg.Cluster.MemberIDs())
  185. case haveWAL:
  186. if walVersion != wal.WALv0_5 {
  187. if err := upgradeWAL(cfg, walVersion); err != nil {
  188. return nil, err
  189. }
  190. }
  191. if cfg.ShouldDiscover() {
  192. log.Printf("etcdserver: discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
  193. }
  194. snapshot, err := ss.Load()
  195. if err != nil && err != snap.ErrNoSnapshot {
  196. return nil, err
  197. }
  198. if snapshot != nil {
  199. if err := st.Recovery(snapshot.Data); err != nil {
  200. log.Panicf("etcdserver: recovered store from snapshot error: %v", err)
  201. }
  202. log.Printf("etcdserver: recovered store from snapshot at index %d", snapshot.Metadata.Index)
  203. }
  204. cfg.Cluster = NewClusterFromStore(cfg.Cluster.token, st)
  205. cfg.Print()
  206. if snapshot != nil {
  207. log.Printf("etcdserver: loaded cluster information from store: %s", cfg.Cluster)
  208. }
  209. if !cfg.ForceNewCluster {
  210. id, n, s, w = restartNode(cfg, snapshot)
  211. } else {
  212. id, n, s, w = restartAsStandaloneNode(cfg, snapshot)
  213. }
  214. default:
  215. return nil, fmt.Errorf("unsupported bootstrap config")
  216. }
  217. sstats := &stats.ServerStats{
  218. Name: cfg.Name,
  219. ID: id.String(),
  220. }
  221. lstats := stats.NewLeaderStats(id.String())
  222. srv := &EtcdServer{
  223. cfg: cfg,
  224. errorc: make(chan error, 1),
  225. store: st,
  226. node: n,
  227. raftStorage: s,
  228. id: id,
  229. attributes: Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
  230. Cluster: cfg.Cluster,
  231. storage: NewStorage(w, ss),
  232. stats: sstats,
  233. lstats: lstats,
  234. Ticker: time.Tick(100 * time.Millisecond),
  235. SyncTicker: time.Tick(500 * time.Millisecond),
  236. snapCount: cfg.SnapCount,
  237. reqIDGen: idutil.NewGenerator(uint8(id), time.Now()),
  238. }
  239. tr := rafthttp.NewTransporter(cfg.Transport, id, cfg.Cluster.ID(), srv, srv.errorc, sstats, lstats)
  240. // add all the remote members into sendhub
  241. for _, m := range cfg.Cluster.Members() {
  242. if m.Name != cfg.Name {
  243. tr.AddPeer(m.ID, m.PeerURLs)
  244. }
  245. }
  246. srv.transport = tr
  247. return srv, nil
  248. }
  249. // Start prepares and starts server in a new goroutine. It is no longer safe to
  250. // modify a server's fields after it has been sent to Start.
  251. // It also starts a goroutine to publish its server information.
  252. func (s *EtcdServer) Start() {
  253. s.start()
  254. go s.publish(defaultPublishRetryInterval)
  255. go s.purgeFile()
  256. }
  257. // start prepares and starts server in a new goroutine. It is no longer safe to
  258. // modify a server's fields after it has been sent to Start.
  259. // This function is just used for testing.
  260. func (s *EtcdServer) start() {
  261. if s.snapCount == 0 {
  262. log.Printf("etcdserver: set snapshot count to default %d", DefaultSnapCount)
  263. s.snapCount = DefaultSnapCount
  264. }
  265. s.w = wait.New()
  266. s.done = make(chan struct{})
  267. s.stop = make(chan struct{})
  268. s.stats.Initialize()
  269. // TODO: if this is an empty log, writes all peer infos
  270. // into the first entry
  271. go s.run()
  272. }
  273. func (s *EtcdServer) purgeFile() {
  274. var serrc, werrc <-chan error
  275. if s.cfg.MaxSnapFiles > 0 {
  276. serrc = fileutil.PurgeFile(s.cfg.SnapDir(), "snap", s.cfg.MaxSnapFiles, purgeFileInterval, s.done)
  277. }
  278. if s.cfg.MaxWALFiles > 0 {
  279. werrc = fileutil.PurgeFile(s.cfg.WALDir(), "wal", s.cfg.MaxWALFiles, purgeFileInterval, s.done)
  280. }
  281. select {
  282. case e := <-werrc:
  283. log.Fatalf("etcdserver: failed to purge wal file %v", e)
  284. case e := <-serrc:
  285. log.Fatalf("etcdserver: failed to purge snap file %v", e)
  286. case <-s.done:
  287. return
  288. }
  289. }
  290. func (s *EtcdServer) ID() types.ID { return s.id }
  291. func (s *EtcdServer) RaftHandler() http.Handler { return s.transport.Handler() }
  292. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  293. if s.Cluster.IsIDRemoved(types.ID(m.From)) {
  294. log.Printf("etcdserver: reject message from removed member %s", types.ID(m.From).String())
  295. return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
  296. }
  297. if m.Type == raftpb.MsgApp {
  298. s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
  299. }
  300. return s.node.Step(ctx, m)
  301. }
  302. func (s *EtcdServer) run() {
  303. var syncC <-chan time.Time
  304. var shouldstop bool
  305. // load initial state from raft storage
  306. snap, err := s.raftStorage.Snapshot()
  307. if err != nil {
  308. log.Panicf("etcdserver: get snapshot from raft storage error: %v", err)
  309. }
  310. // snapi indicates the index of the last submitted snapshot request
  311. snapi := snap.Metadata.Index
  312. appliedi := snap.Metadata.Index
  313. confState := snap.Metadata.ConfState
  314. defer func() {
  315. s.node.Stop()
  316. s.transport.Stop()
  317. if err := s.storage.Close(); err != nil {
  318. log.Panicf("etcdserver: close storage error: %v", err)
  319. }
  320. close(s.done)
  321. }()
  322. for {
  323. select {
  324. case <-s.Ticker:
  325. s.node.Tick()
  326. case rd := <-s.node.Ready():
  327. if rd.SoftState != nil {
  328. atomic.StoreUint64(&s.raftLead, rd.SoftState.Lead)
  329. if rd.RaftState == raft.StateLeader {
  330. syncC = s.SyncTicker
  331. // TODO: remove the nil checking
  332. // current test utility does not provide the stats
  333. if s.stats != nil {
  334. s.stats.BecomeLeader()
  335. }
  336. } else {
  337. syncC = nil
  338. }
  339. }
  340. // apply snapshot to storage if it is more updated than current snapi
  341. if !raft.IsEmptySnap(rd.Snapshot) && rd.Snapshot.Metadata.Index > snapi {
  342. if err := s.storage.SaveSnap(rd.Snapshot); err != nil {
  343. log.Fatalf("etcdserver: save snapshot error: %v", err)
  344. }
  345. s.raftStorage.ApplySnapshot(rd.Snapshot)
  346. snapi = rd.Snapshot.Metadata.Index
  347. log.Printf("etcdserver: saved incoming snapshot at index %d", snapi)
  348. }
  349. if err := s.storage.Save(rd.HardState, rd.Entries); err != nil {
  350. log.Fatalf("etcdserver: save state and entries error: %v", err)
  351. }
  352. s.raftStorage.Append(rd.Entries)
  353. s.send(rd.Messages)
  354. // recover from snapshot if it is more updated than current applied
  355. if !raft.IsEmptySnap(rd.Snapshot) && rd.Snapshot.Metadata.Index > appliedi {
  356. if err := s.store.Recovery(rd.Snapshot.Data); err != nil {
  357. log.Panicf("recovery store error: %v", err)
  358. }
  359. s.Cluster.Recover()
  360. appliedi = rd.Snapshot.Metadata.Index
  361. log.Printf("etcdserver: recovered from incoming snapshot at index %d", snapi)
  362. }
  363. // TODO(bmizerany): do this in the background, but take
  364. // care to apply entries in a single goroutine, and not
  365. // race them.
  366. if len(rd.CommittedEntries) != 0 {
  367. firsti := rd.CommittedEntries[0].Index
  368. if firsti > appliedi+1 {
  369. log.Panicf("etcdserver: first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, appliedi)
  370. }
  371. var ents []raftpb.Entry
  372. if appliedi+1-firsti < uint64(len(rd.CommittedEntries)) {
  373. ents = rd.CommittedEntries[appliedi+1-firsti:]
  374. }
  375. if len(ents) > 0 {
  376. if appliedi, shouldstop = s.apply(ents, &confState); shouldstop {
  377. go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
  378. }
  379. }
  380. }
  381. s.node.Advance()
  382. if appliedi-snapi > s.snapCount {
  383. log.Printf("etcdserver: start to snapshot (applied: %d, lastsnap: %d)", appliedi, snapi)
  384. s.snapshot(appliedi, &confState)
  385. snapi = appliedi
  386. }
  387. case <-syncC:
  388. s.sync(defaultSyncTimeout)
  389. case err := <-s.errorc:
  390. log.Printf("etcdserver: %s", err)
  391. log.Printf("etcdserver: the data-dir used by this member must be removed.")
  392. return
  393. case <-s.stop:
  394. return
  395. }
  396. }
  397. }
  398. // Stop stops the server gracefully, and shuts down the running goroutine.
  399. // Stop should be called after a Start(s), otherwise it will block forever.
  400. func (s *EtcdServer) Stop() {
  401. select {
  402. case s.stop <- struct{}{}:
  403. case <-s.done:
  404. return
  405. }
  406. <-s.done
  407. }
  408. func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
  409. time.Sleep(d)
  410. select {
  411. case s.errorc <- err:
  412. default:
  413. }
  414. }
  415. // StopNotify returns a channel that receives a empty struct
  416. // when the server is stopped.
  417. func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
  418. // Do interprets r and performs an operation on s.store according to r.Method
  419. // and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
  420. // Quorum == true, r will be sent through consensus before performing its
  421. // respective operation. Do will block until an action is performed or there is
  422. // an error.
  423. func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
  424. r.ID = s.reqIDGen.Next()
  425. if r.Method == "GET" && r.Quorum {
  426. r.Method = "QGET"
  427. }
  428. switch r.Method {
  429. case "POST", "PUT", "DELETE", "QGET":
  430. data, err := r.Marshal()
  431. if err != nil {
  432. return Response{}, err
  433. }
  434. ch := s.w.Register(r.ID)
  435. s.node.Propose(ctx, data)
  436. select {
  437. case x := <-ch:
  438. resp := x.(Response)
  439. return resp, resp.err
  440. case <-ctx.Done():
  441. s.w.Trigger(r.ID, nil) // GC wait
  442. return Response{}, parseCtxErr(ctx.Err())
  443. case <-s.done:
  444. return Response{}, ErrStopped
  445. }
  446. case "GET":
  447. switch {
  448. case r.Wait:
  449. wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
  450. if err != nil {
  451. return Response{}, err
  452. }
  453. return Response{Watcher: wc}, nil
  454. default:
  455. ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
  456. if err != nil {
  457. return Response{}, err
  458. }
  459. return Response{Event: ev}, nil
  460. }
  461. case "HEAD":
  462. ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
  463. if err != nil {
  464. return Response{}, err
  465. }
  466. return Response{Event: ev}, nil
  467. default:
  468. return Response{}, ErrUnknownMethod
  469. }
  470. }
  471. func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
  472. func (s *EtcdServer) LeaderStats() []byte {
  473. lead := atomic.LoadUint64(&s.raftLead)
  474. if lead != uint64(s.id) {
  475. return nil
  476. }
  477. return s.lstats.JSON()
  478. }
  479. func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
  480. func (s *EtcdServer) AddMember(ctx context.Context, memb Member) error {
  481. // TODO: move Member to protobuf type
  482. b, err := json.Marshal(memb)
  483. if err != nil {
  484. return err
  485. }
  486. cc := raftpb.ConfChange{
  487. Type: raftpb.ConfChangeAddNode,
  488. NodeID: uint64(memb.ID),
  489. Context: b,
  490. }
  491. return s.configure(ctx, cc)
  492. }
  493. func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
  494. cc := raftpb.ConfChange{
  495. Type: raftpb.ConfChangeRemoveNode,
  496. NodeID: id,
  497. }
  498. return s.configure(ctx, cc)
  499. }
  500. func (s *EtcdServer) UpdateMember(ctx context.Context, memb Member) error {
  501. b, err := json.Marshal(memb)
  502. if err != nil {
  503. return err
  504. }
  505. cc := raftpb.ConfChange{
  506. Type: raftpb.ConfChangeUpdateNode,
  507. NodeID: uint64(memb.ID),
  508. Context: b,
  509. }
  510. return s.configure(ctx, cc)
  511. }
  512. // Implement the RaftTimer interface
  513. func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.raftIndex) }
  514. func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.raftTerm) }
  515. // Only for testing purpose
  516. // TODO: add Raft server interface to expose raft related info:
  517. // Index, Term, Lead, Committed, Applied, LastIndex, etc.
  518. func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.raftLead) }
  519. func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) }
  520. // configure sends a configuration change through consensus and
  521. // then waits for it to be applied to the server. It
  522. // will block until the change is performed or there is an error.
  523. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error {
  524. cc.ID = s.reqIDGen.Next()
  525. ch := s.w.Register(cc.ID)
  526. if err := s.node.ProposeConfChange(ctx, cc); err != nil {
  527. s.w.Trigger(cc.ID, nil)
  528. return err
  529. }
  530. select {
  531. case x := <-ch:
  532. if err, ok := x.(error); ok {
  533. return err
  534. }
  535. if x != nil {
  536. log.Panicf("return type should always be error")
  537. }
  538. return nil
  539. case <-ctx.Done():
  540. s.w.Trigger(cc.ID, nil) // GC wait
  541. return parseCtxErr(ctx.Err())
  542. case <-s.done:
  543. return ErrStopped
  544. }
  545. }
  546. // sync proposes a SYNC request and is non-blocking.
  547. // This makes no guarantee that the request will be proposed or performed.
  548. // The request will be cancelled after the given timeout.
  549. func (s *EtcdServer) sync(timeout time.Duration) {
  550. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  551. req := pb.Request{
  552. Method: "SYNC",
  553. ID: s.reqIDGen.Next(),
  554. Time: time.Now().UnixNano(),
  555. }
  556. data := pbutil.MustMarshal(&req)
  557. // There is no promise that node has leader when do SYNC request,
  558. // so it uses goroutine to propose.
  559. go func() {
  560. s.node.Propose(ctx, data)
  561. cancel()
  562. }()
  563. }
  564. // publish registers server information into the cluster. The information
  565. // is the JSON representation of this server's member struct, updated with the
  566. // static clientURLs of the server.
  567. // The function keeps attempting to register until it succeeds,
  568. // or its server is stopped.
  569. func (s *EtcdServer) publish(retryInterval time.Duration) {
  570. b, err := json.Marshal(s.attributes)
  571. if err != nil {
  572. log.Printf("etcdserver: json marshal error: %v", err)
  573. return
  574. }
  575. req := pb.Request{
  576. Method: "PUT",
  577. Path: MemberAttributesStorePath(s.id),
  578. Val: string(b),
  579. }
  580. for {
  581. ctx, cancel := context.WithTimeout(context.Background(), retryInterval)
  582. _, err := s.Do(ctx, req)
  583. cancel()
  584. switch err {
  585. case nil:
  586. log.Printf("etcdserver: published %+v to cluster %s", s.attributes, s.Cluster.ID())
  587. return
  588. case ErrStopped:
  589. log.Printf("etcdserver: aborting publish because server is stopped")
  590. return
  591. default:
  592. log.Printf("etcdserver: publish error: %v", err)
  593. }
  594. }
  595. }
  596. func (s *EtcdServer) send(ms []raftpb.Message) {
  597. for _, m := range ms {
  598. if !s.Cluster.IsIDRemoved(types.ID(m.To)) {
  599. m.To = 0
  600. }
  601. }
  602. s.transport.Send(ms)
  603. }
  604. // apply takes entries received from Raft (after it has been committed) and
  605. // applies them to the current state of the EtcdServer.
  606. // The given entries should not be empty.
  607. func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint64, bool) {
  608. var applied uint64
  609. var shouldstop bool
  610. var err error
  611. for i := range es {
  612. e := es[i]
  613. switch e.Type {
  614. case raftpb.EntryNormal:
  615. var r pb.Request
  616. pbutil.MustUnmarshal(&r, e.Data)
  617. s.w.Trigger(r.ID, s.applyRequest(r))
  618. case raftpb.EntryConfChange:
  619. var cc raftpb.ConfChange
  620. pbutil.MustUnmarshal(&cc, e.Data)
  621. shouldstop, err = s.applyConfChange(cc, confState)
  622. s.w.Trigger(cc.ID, err)
  623. default:
  624. log.Panicf("entry type should be either EntryNormal or EntryConfChange")
  625. }
  626. atomic.StoreUint64(&s.raftIndex, e.Index)
  627. atomic.StoreUint64(&s.raftTerm, e.Term)
  628. applied = e.Index
  629. }
  630. return applied, shouldstop
  631. }
  632. // applyRequest interprets r as a call to store.X and returns a Response interpreted
  633. // from store.Event
  634. func (s *EtcdServer) applyRequest(r pb.Request) Response {
  635. f := func(ev *store.Event, err error) Response {
  636. return Response{Event: ev, err: err}
  637. }
  638. expr := timeutil.UnixNanoToTime(r.Expiration)
  639. switch r.Method {
  640. case "POST":
  641. return f(s.store.Create(r.Path, r.Dir, r.Val, true, expr))
  642. case "PUT":
  643. exists, existsSet := pbutil.GetBool(r.PrevExist)
  644. switch {
  645. case existsSet:
  646. if exists {
  647. return f(s.store.Update(r.Path, r.Val, expr))
  648. }
  649. return f(s.store.Create(r.Path, r.Dir, r.Val, false, expr))
  650. case r.PrevIndex > 0 || r.PrevValue != "":
  651. return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, expr))
  652. default:
  653. if storeMemberAttributeRegexp.MatchString(r.Path) {
  654. id := mustParseMemberIDFromKey(path.Dir(r.Path))
  655. var attr Attributes
  656. if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
  657. log.Panicf("unmarshal %s should never fail: %v", r.Val, err)
  658. }
  659. s.Cluster.UpdateAttributes(id, attr)
  660. }
  661. return f(s.store.Set(r.Path, r.Dir, r.Val, expr))
  662. }
  663. case "DELETE":
  664. switch {
  665. case r.PrevIndex > 0 || r.PrevValue != "":
  666. return f(s.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
  667. default:
  668. return f(s.store.Delete(r.Path, r.Dir, r.Recursive))
  669. }
  670. case "QGET":
  671. return f(s.store.Get(r.Path, r.Recursive, r.Sorted))
  672. case "SYNC":
  673. s.store.DeleteExpiredKeys(time.Unix(0, r.Time))
  674. return Response{}
  675. default:
  676. // This should never be reached, but just in case:
  677. return Response{err: ErrUnknownMethod}
  678. }
  679. }
  680. // applyConfChange applies a ConfChange to the server. It is only
  681. // invoked with a ConfChange that has already passed through Raft
  682. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
  683. if err := s.Cluster.ValidateConfigurationChange(cc); err != nil {
  684. cc.NodeID = raft.None
  685. s.node.ApplyConfChange(cc)
  686. return false, err
  687. }
  688. *confState = *s.node.ApplyConfChange(cc)
  689. switch cc.Type {
  690. case raftpb.ConfChangeAddNode:
  691. m := new(Member)
  692. if err := json.Unmarshal(cc.Context, m); err != nil {
  693. log.Panicf("unmarshal member should never fail: %v", err)
  694. }
  695. if cc.NodeID != uint64(m.ID) {
  696. log.Panicf("nodeID should always be equal to member ID")
  697. }
  698. s.Cluster.AddMember(m)
  699. if m.ID == s.id {
  700. log.Printf("etcdserver: added local member %s %v to cluster %s", m.ID, m.PeerURLs, s.Cluster.ID())
  701. } else {
  702. s.transport.AddPeer(m.ID, m.PeerURLs)
  703. log.Printf("etcdserver: added member %s %v to cluster %s", m.ID, m.PeerURLs, s.Cluster.ID())
  704. }
  705. case raftpb.ConfChangeRemoveNode:
  706. id := types.ID(cc.NodeID)
  707. s.Cluster.RemoveMember(id)
  708. if id == s.id {
  709. return true, nil
  710. } else {
  711. s.transport.RemovePeer(id)
  712. log.Printf("etcdserver: removed member %s from cluster %s", id, s.Cluster.ID())
  713. }
  714. case raftpb.ConfChangeUpdateNode:
  715. m := new(Member)
  716. if err := json.Unmarshal(cc.Context, m); err != nil {
  717. log.Panicf("unmarshal member should never fail: %v", err)
  718. }
  719. if cc.NodeID != uint64(m.ID) {
  720. log.Panicf("nodeID should always be equal to member ID")
  721. }
  722. s.Cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
  723. if m.ID == s.id {
  724. log.Printf("etcdserver: update local member %s %v in cluster %s", m.ID, m.PeerURLs, s.Cluster.ID())
  725. } else {
  726. s.transport.UpdatePeer(m.ID, m.PeerURLs)
  727. log.Printf("etcdserver: update member %s %v in cluster %s", m.ID, m.PeerURLs, s.Cluster.ID())
  728. }
  729. }
  730. return false, nil
  731. }
  732. // TODO: non-blocking snapshot
  733. func (s *EtcdServer) snapshot(snapi uint64, confState *raftpb.ConfState) {
  734. d, err := s.store.Save()
  735. // TODO: current store will never fail to do a snapshot
  736. // what should we do if the store might fail?
  737. if err != nil {
  738. log.Panicf("etcdserver: store save should never fail: %v", err)
  739. }
  740. err = s.raftStorage.Compact(snapi, confState, d)
  741. if err != nil {
  742. // the snapshot was done asynchronously with the progress of raft.
  743. // raft might have already got a newer snapshot and called compact.
  744. if err == raft.ErrCompacted {
  745. return
  746. }
  747. log.Panicf("etcdserver: unexpected compaction error %v", err)
  748. }
  749. log.Printf("etcdserver: compacted log at index %d", snapi)
  750. if err := s.storage.Cut(); err != nil {
  751. log.Panicf("etcdserver: rotate wal file should never fail: %v", err)
  752. }
  753. snap, err := s.raftStorage.Snapshot()
  754. if err != nil {
  755. log.Panicf("etcdserver: snapshot error: %v", err)
  756. }
  757. if err := s.storage.SaveSnap(snap); err != nil {
  758. log.Fatalf("etcdserver: save snapshot error: %v", err)
  759. }
  760. log.Printf("etcdserver: saved snapshot at index %d", snap.Metadata.Index)
  761. }
  762. // for testing
  763. func (s *EtcdServer) PauseSending() {
  764. p := s.transport.(rafthttp.Pausable)
  765. p.Pause()
  766. }
  767. func (s *EtcdServer) ResumeSending() {
  768. p := s.transport.(rafthttp.Pausable)
  769. p.Resume()
  770. }
  771. func startNode(cfg *ServerConfig, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  772. var err error
  773. member := cfg.Cluster.MemberByName(cfg.Name)
  774. metadata := pbutil.MustMarshal(
  775. &pb.Metadata{
  776. NodeID: uint64(member.ID),
  777. ClusterID: uint64(cfg.Cluster.ID()),
  778. },
  779. )
  780. if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
  781. log.Fatalf("etcdserver create snapshot directory error: %v", err)
  782. }
  783. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  784. log.Fatalf("etcdserver: create wal error: %v", err)
  785. }
  786. peers := make([]raft.Peer, len(ids))
  787. for i, id := range ids {
  788. ctx, err := json.Marshal((*cfg.Cluster).Member(id))
  789. if err != nil {
  790. log.Panicf("marshal member should never fail: %v", err)
  791. }
  792. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  793. }
  794. id = member.ID
  795. log.Printf("etcdserver: start member %s in cluster %s", id, cfg.Cluster.ID())
  796. election := cfg.ElectionTimeoutTicks
  797. if election == 0 {
  798. election = 10
  799. }
  800. s = raft.NewMemoryStorage()
  801. n = raft.StartNode(uint64(id), peers, election, 1, s)
  802. return
  803. }
  804. func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  805. var walsnap walpb.Snapshot
  806. if snapshot != nil {
  807. walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
  808. }
  809. w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
  810. cfg.Cluster.SetID(cid)
  811. log.Printf("etcdserver: restart member %s in cluster %s at commit index %d", id, cfg.Cluster.ID(), st.Commit)
  812. election := cfg.ElectionTimeoutTicks
  813. if election == 0 {
  814. election = 10
  815. }
  816. s := raft.NewMemoryStorage()
  817. if snapshot != nil {
  818. s.ApplySnapshot(*snapshot)
  819. }
  820. s.SetHardState(st)
  821. s.Append(ents)
  822. n := raft.RestartNode(uint64(id), election, 1, s)
  823. return id, n, s, w
  824. }
  825. // isBootstrapped tries to check if the given member has been bootstrapped
  826. // in the given cluster.
  827. func isBootstrapped(cl *Cluster, member string) bool {
  828. us := getOtherPeerURLs(cl, member)
  829. rcl, err := getClusterFromPeers(us, false)
  830. if err != nil {
  831. return false
  832. }
  833. id := cl.MemberByName(member).ID
  834. m := rcl.Member(id)
  835. if m == nil {
  836. return false
  837. }
  838. if len(m.ClientURLs) > 0 {
  839. return true
  840. }
  841. return false
  842. }
  843. // GetClusterFromPeers takes a set of URLs representing etcd peers, and
  844. // attempts to construct a Cluster by accessing the members endpoint on one of
  845. // these URLs. The first URL to provide a response is used. If no URLs provide
  846. // a response, or a Cluster cannot be successfully created from a received
  847. // response, an error is returned.
  848. func GetClusterFromPeers(urls []string) (*Cluster, error) {
  849. return getClusterFromPeers(urls, true)
  850. }
  851. // If logerr is true, it prints out more error messages.
  852. func getClusterFromPeers(urls []string, logerr bool) (*Cluster, error) {
  853. cc := &http.Client{
  854. Transport: &http.Transport{
  855. ResponseHeaderTimeout: 500 * time.Millisecond,
  856. },
  857. Timeout: time.Second,
  858. }
  859. for _, u := range urls {
  860. resp, err := cc.Get(u + "/members")
  861. if err != nil {
  862. if logerr {
  863. log.Printf("etcdserver: could not get cluster response from %s: %v", u, err)
  864. }
  865. continue
  866. }
  867. b, err := ioutil.ReadAll(resp.Body)
  868. if err != nil {
  869. if logerr {
  870. log.Printf("etcdserver: could not read the body of cluster response: %v", err)
  871. }
  872. continue
  873. }
  874. var membs []*Member
  875. if err := json.Unmarshal(b, &membs); err != nil {
  876. if logerr {
  877. log.Printf("etcdserver: could not unmarshal cluster response: %v", err)
  878. }
  879. continue
  880. }
  881. id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
  882. if err != nil {
  883. if logerr {
  884. log.Printf("etcdserver: could not parse the cluster ID from cluster res: %v", err)
  885. }
  886. continue
  887. }
  888. return NewClusterFromMembers("", id, membs), nil
  889. }
  890. return nil, fmt.Errorf("etcdserver: could not retrieve cluster information from the given urls")
  891. }
  892. // getOtherPeerURLs returns peer urls of other members in the cluster. The
  893. // returned list is sorted in ascending lexicographical order.
  894. func getOtherPeerURLs(cl ClusterInfo, self string) []string {
  895. us := make([]string, 0)
  896. for _, m := range cl.Members() {
  897. if m.Name == self {
  898. continue
  899. }
  900. us = append(us, m.PeerURLs...)
  901. }
  902. sort.Strings(us)
  903. return us
  904. }