server.go 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066
  1. /*
  2. Copyright 2014 CoreOS, Inc.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package etcdserver
  14. import (
  15. "encoding/json"
  16. "errors"
  17. "fmt"
  18. "io/ioutil"
  19. "log"
  20. "math/rand"
  21. "net/http"
  22. "os"
  23. "path"
  24. "regexp"
  25. "sort"
  26. "sync/atomic"
  27. "time"
  28. "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
  29. "github.com/coreos/etcd/discovery"
  30. "github.com/coreos/etcd/etcdserver/etcdhttp/httptypes"
  31. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  32. "github.com/coreos/etcd/etcdserver/stats"
  33. "github.com/coreos/etcd/migrate"
  34. "github.com/coreos/etcd/pkg/fileutil"
  35. "github.com/coreos/etcd/pkg/pbutil"
  36. "github.com/coreos/etcd/pkg/types"
  37. "github.com/coreos/etcd/pkg/wait"
  38. "github.com/coreos/etcd/raft"
  39. "github.com/coreos/etcd/raft/raftpb"
  40. "github.com/coreos/etcd/rafthttp"
  41. "github.com/coreos/etcd/snap"
  42. "github.com/coreos/etcd/store"
  43. "github.com/coreos/etcd/wal"
  44. )
  45. const (
  46. // owner can make/remove files inside the directory
  47. privateDirMode = 0700
  48. defaultSyncTimeout = time.Second
  49. DefaultSnapCount = 10000
  50. // TODO: calculate based on heartbeat interval
  51. defaultPublishRetryInterval = 5 * time.Second
  52. StoreAdminPrefix = "/0"
  53. StoreKeysPrefix = "/1"
  54. purgeFileInterval = 30 * time.Second
  55. )
  56. var (
  57. ErrUnknownMethod = errors.New("etcdserver: unknown method")
  58. ErrStopped = errors.New("etcdserver: server stopped")
  59. ErrIDRemoved = errors.New("etcdserver: ID removed")
  60. ErrIDExists = errors.New("etcdserver: ID exists")
  61. ErrIDNotFound = errors.New("etcdserver: ID not found")
  62. ErrPeerURLexists = errors.New("etcdserver: peerURL exists")
  63. ErrCanceled = errors.New("etcdserver: request cancelled")
  64. ErrTimeout = errors.New("etcdserver: request timed out")
  65. storeMembersPrefix = path.Join(StoreAdminPrefix, "members")
  66. storeRemovedMembersPrefix = path.Join(StoreAdminPrefix, "removed_members")
  67. storeMemberAttributeRegexp = regexp.MustCompile(path.Join(storeMembersPrefix, "[[:xdigit:]]{1,16}", attributesSuffix))
  68. )
  69. func init() {
  70. rand.Seed(time.Now().UnixNano())
  71. }
  72. type Response struct {
  73. Event *store.Event
  74. Watcher store.Watcher
  75. err error
  76. }
  77. type Storage interface {
  78. // Save function saves ents and state to the underlying stable storage.
  79. // Save MUST block until st and ents are on stable storage.
  80. Save(st raftpb.HardState, ents []raftpb.Entry) error
  81. // SaveSnap function saves snapshot to the underlying stable storage.
  82. SaveSnap(snap raftpb.Snapshot) error
  83. // TODO: WAL should be able to control cut itself. After implement self-controlled cut,
  84. // remove it in this interface.
  85. // Cut cuts out a new wal file for saving new state and entries.
  86. Cut() error
  87. // Close closes the Storage and performs finalization.
  88. Close() error
  89. }
  90. type Server interface {
  91. // Start performs any initialization of the Server necessary for it to
  92. // begin serving requests. It must be called before Do or Process.
  93. // Start must be non-blocking; any long-running server functionality
  94. // should be implemented in goroutines.
  95. Start()
  96. // Stop terminates the Server and performs any necessary finalization.
  97. // Do and Process cannot be called after Stop has been invoked.
  98. Stop()
  99. // ID returns the ID of the Server.
  100. ID() types.ID
  101. // Do takes a request and attempts to fulfill it, returning a Response.
  102. Do(ctx context.Context, r pb.Request) (Response, error)
  103. // Process takes a raft message and applies it to the server's raft state
  104. // machine, respecting any timeout of the given context.
  105. Process(ctx context.Context, m raftpb.Message) error
  106. // AddMember attempts to add a member into the cluster. It will return
  107. // ErrIDRemoved if member ID is removed from the cluster, or return
  108. // ErrIDExists if member ID exists in the cluster.
  109. AddMember(ctx context.Context, memb Member) error
  110. // RemoveMember attempts to remove a member from the cluster. It will
  111. // return ErrIDRemoved if member ID is removed from the cluster, or return
  112. // ErrIDNotFound if member ID is not in the cluster.
  113. RemoveMember(ctx context.Context, id uint64) error
  114. // UpdateMember attempts to update a existing member in the cluster. It will
  115. // return ErrIDNotFound if the member ID does not exist.
  116. UpdateMember(ctx context.Context, updateMemb Member) error
  117. }
  118. type Stats interface {
  119. // SelfStats returns the struct representing statistics of this server
  120. SelfStats() []byte
  121. // LeaderStats returns the statistics of all followers in the cluster
  122. // if this server is leader. Otherwise, nil is returned.
  123. LeaderStats() []byte
  124. // StoreStats returns statistics of the store backing this EtcdServer
  125. StoreStats() []byte
  126. }
  127. type RaftTimer interface {
  128. Index() uint64
  129. Term() uint64
  130. }
  131. // EtcdServer is the production implementation of the Server interface
  132. type EtcdServer struct {
  133. cfg *ServerConfig
  134. w wait.Wait
  135. done chan struct{}
  136. stop chan struct{}
  137. id types.ID
  138. attributes Attributes
  139. Cluster *Cluster
  140. node raft.Node
  141. raftStorage *raft.MemoryStorage
  142. store store.Store
  143. stats *stats.ServerStats
  144. lstats *stats.LeaderStats
  145. // sender specifies the sender to send msgs to members. sending msgs
  146. // MUST NOT block. It is okay to drop messages, since clients should
  147. // timeout and reissue their messages. If send is nil, server will
  148. // panic.
  149. sendhub SendHub
  150. storage Storage
  151. Ticker <-chan time.Time
  152. SyncTicker <-chan time.Time
  153. snapCount uint64 // number of entries to trigger a snapshot
  154. // Cache of the latest raft index and raft term the server has seen
  155. raftIndex uint64
  156. raftTerm uint64
  157. raftLead uint64
  158. }
  159. // UpgradeWAL converts an older version of the EtcdServer data to the newest version.
  160. // It must ensure that, after upgrading, the most recent version is present.
  161. func UpgradeWAL(cfg *ServerConfig, ver wal.WalVersion) error {
  162. if ver == wal.WALv0_4 {
  163. log.Print("Converting v0.4 log to v0.5")
  164. err := migrate.Migrate4To5(cfg.DataDir, cfg.Name)
  165. if err != nil {
  166. log.Fatalf("Failed migrating data-dir: %v", err)
  167. return err
  168. }
  169. }
  170. return nil
  171. }
  172. // NewServer creates a new EtcdServer from the supplied configuration. The
  173. // configuration is considered static for the lifetime of the EtcdServer.
  174. func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
  175. st := store.New()
  176. var w *wal.WAL
  177. var n raft.Node
  178. var s *raft.MemoryStorage
  179. var id types.ID
  180. walVersion, err := wal.DetectVersion(cfg.DataDir)
  181. if err != nil {
  182. return nil, err
  183. }
  184. if walVersion == wal.WALUnknown {
  185. return nil, fmt.Errorf("unknown wal version in data dir %s", cfg.DataDir)
  186. }
  187. haveWAL := walVersion != wal.WALNotExist
  188. if haveWAL && walVersion != wal.WALv0_5 {
  189. err := UpgradeWAL(cfg, walVersion)
  190. if err != nil {
  191. return nil, err
  192. }
  193. }
  194. ss := snap.New(cfg.SnapDir())
  195. switch {
  196. case !haveWAL && !cfg.NewCluster:
  197. us := getOtherPeerURLs(cfg.Cluster, cfg.Name)
  198. existingCluster, err := GetClusterFromPeers(us)
  199. if err != nil {
  200. return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", err)
  201. }
  202. if err := ValidateClusterAndAssignIDs(cfg.Cluster, existingCluster); err != nil {
  203. return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
  204. }
  205. cfg.Cluster.SetID(existingCluster.id)
  206. cfg.Cluster.SetStore(st)
  207. cfg.Print()
  208. id, n, s, w = startNode(cfg, nil)
  209. case !haveWAL && cfg.NewCluster:
  210. if err := cfg.VerifyBootstrapConfig(); err != nil {
  211. return nil, err
  212. }
  213. if err := checkClientURLsEmptyFromPeers(cfg.Cluster, cfg.Name); err != nil {
  214. return nil, err
  215. }
  216. m := cfg.Cluster.MemberByName(cfg.Name)
  217. if cfg.ShouldDiscover() {
  218. s, err := discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.Cluster.String())
  219. if err != nil {
  220. return nil, err
  221. }
  222. if cfg.Cluster, err = NewClusterFromString(cfg.Cluster.token, s); err != nil {
  223. return nil, err
  224. }
  225. }
  226. cfg.Cluster.SetStore(st)
  227. cfg.PrintWithInitial()
  228. id, n, s, w = startNode(cfg, cfg.Cluster.MemberIDs())
  229. case haveWAL:
  230. if cfg.ShouldDiscover() {
  231. log.Printf("etcdserver: discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
  232. }
  233. var index uint64
  234. snapshot, err := ss.Load()
  235. if err != nil && err != snap.ErrNoSnapshot {
  236. return nil, err
  237. }
  238. if snapshot != nil {
  239. if err := st.Recovery(snapshot.Data); err != nil {
  240. log.Panicf("etcdserver: recovered store from snapshot error: %v", err)
  241. }
  242. log.Printf("etcdserver: recovered store from snapshot at index %d", snapshot.Metadata.Index)
  243. index = snapshot.Metadata.Index
  244. }
  245. cfg.Cluster = NewClusterFromStore(cfg.Cluster.token, st)
  246. cfg.Print()
  247. if snapshot != nil {
  248. log.Printf("etcdserver: loaded cluster information from store: %s", cfg.Cluster)
  249. }
  250. if !cfg.ForceNewCluster {
  251. id, n, s, w = restartNode(cfg, index+1, snapshot)
  252. } else {
  253. id, n, s, w = restartAsStandaloneNode(cfg, index+1, snapshot)
  254. }
  255. default:
  256. return nil, fmt.Errorf("unsupported bootstrap config")
  257. }
  258. sstats := &stats.ServerStats{
  259. Name: cfg.Name,
  260. ID: id.String(),
  261. }
  262. lstats := stats.NewLeaderStats(id.String())
  263. srv := &EtcdServer{
  264. cfg: cfg,
  265. store: st,
  266. node: n,
  267. raftStorage: s,
  268. id: id,
  269. attributes: Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
  270. Cluster: cfg.Cluster,
  271. storage: struct {
  272. *wal.WAL
  273. *snap.Snapshotter
  274. }{w, ss},
  275. stats: sstats,
  276. lstats: lstats,
  277. Ticker: time.Tick(100 * time.Millisecond),
  278. SyncTicker: time.Tick(500 * time.Millisecond),
  279. snapCount: cfg.SnapCount,
  280. }
  281. srv.sendhub = newSendHub(cfg.Transport, cfg.Cluster, srv, sstats, lstats)
  282. for _, m := range getOtherMembers(cfg.Cluster, cfg.Name) {
  283. srv.sendhub.Add(m)
  284. }
  285. return srv, nil
  286. }
  287. // Start prepares and starts server in a new goroutine. It is no longer safe to
  288. // modify a server's fields after it has been sent to Start.
  289. // It also starts a goroutine to publish its server information.
  290. func (s *EtcdServer) Start() {
  291. s.start()
  292. go s.publish(defaultPublishRetryInterval)
  293. go s.purgeFile()
  294. }
  295. // start prepares and starts server in a new goroutine. It is no longer safe to
  296. // modify a server's fields after it has been sent to Start.
  297. // This function is just used for testing.
  298. func (s *EtcdServer) start() {
  299. if s.snapCount == 0 {
  300. log.Printf("etcdserver: set snapshot count to default %d", DefaultSnapCount)
  301. s.snapCount = DefaultSnapCount
  302. }
  303. s.w = wait.New()
  304. s.done = make(chan struct{})
  305. s.stop = make(chan struct{})
  306. s.stats.Initialize()
  307. // TODO: if this is an empty log, writes all peer infos
  308. // into the first entry
  309. go s.run()
  310. }
  311. func (s *EtcdServer) purgeFile() {
  312. var serrc, werrc <-chan error
  313. if s.cfg.MaxSnapFiles > 0 {
  314. serrc = fileutil.PurgeFile(s.cfg.SnapDir(), "snap", s.cfg.MaxSnapFiles, purgeFileInterval, s.done)
  315. }
  316. if s.cfg.MaxWALFiles > 0 {
  317. werrc = fileutil.PurgeFile(s.cfg.WALDir(), "wal", s.cfg.MaxWALFiles, purgeFileInterval, s.done)
  318. }
  319. select {
  320. case e := <-werrc:
  321. log.Fatalf("etcdserver: failed to purge wal file %v", e)
  322. case e := <-serrc:
  323. log.Fatalf("etcdserver: failed to purge snap file %v", e)
  324. case <-s.done:
  325. return
  326. }
  327. }
  328. func (s *EtcdServer) ID() types.ID { return s.id }
  329. func (s *EtcdServer) SenderFinder() rafthttp.SenderFinder { return s.sendhub }
  330. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  331. if s.Cluster.IsIDRemoved(types.ID(m.From)) {
  332. log.Printf("etcdserver: reject message from removed member %s", types.ID(m.From).String())
  333. return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
  334. }
  335. if m.Type == raftpb.MsgApp {
  336. s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
  337. }
  338. return s.node.Step(ctx, m)
  339. }
  340. func (s *EtcdServer) run() {
  341. var syncC <-chan time.Time
  342. var shouldstop bool
  343. shouldstopC := s.sendhub.ShouldStopNotify()
  344. // load initial state from raft storage
  345. snap, err := s.raftStorage.Snapshot()
  346. if err != nil {
  347. log.Panicf("etcdserver: get snapshot from raft storage error: %v", err)
  348. }
  349. // snapi indicates the index of the last submitted snapshot request
  350. snapi := snap.Metadata.Index
  351. appliedi := snap.Metadata.Index
  352. confState := snap.Metadata.ConfState
  353. defer func() {
  354. s.node.Stop()
  355. s.sendhub.Stop()
  356. if err := s.storage.Close(); err != nil {
  357. log.Panicf("etcdserver: close storage error: %v", err)
  358. }
  359. close(s.done)
  360. }()
  361. for {
  362. select {
  363. case <-s.Ticker:
  364. s.node.Tick()
  365. case rd := <-s.node.Ready():
  366. if rd.SoftState != nil {
  367. atomic.StoreUint64(&s.raftLead, rd.SoftState.Lead)
  368. if rd.RaftState == raft.StateLeader {
  369. syncC = s.SyncTicker
  370. } else {
  371. syncC = nil
  372. }
  373. }
  374. // apply snapshot to storage if it is more updated than current snapi
  375. if !raft.IsEmptySnap(rd.Snapshot) && rd.Snapshot.Metadata.Index > snapi {
  376. if err := s.storage.SaveSnap(rd.Snapshot); err != nil {
  377. log.Fatalf("etcdserver: save snapshot error: %v", err)
  378. }
  379. s.raftStorage.ApplySnapshot(rd.Snapshot)
  380. snapi = rd.Snapshot.Metadata.Index
  381. log.Printf("etcdserver: saved incoming snapshot at index %d", snapi)
  382. }
  383. if err := s.storage.Save(rd.HardState, rd.Entries); err != nil {
  384. log.Fatalf("etcdserver: save state and entries error: %v", err)
  385. }
  386. s.raftStorage.Append(rd.Entries)
  387. s.sendhub.Send(rd.Messages)
  388. // recover from snapshot if it is more updated than current applied
  389. if !raft.IsEmptySnap(rd.Snapshot) && rd.Snapshot.Metadata.Index > appliedi {
  390. if err := s.store.Recovery(rd.Snapshot.Data); err != nil {
  391. log.Panicf("recovery store error: %v", err)
  392. }
  393. s.Cluster.Recover()
  394. appliedi = rd.Snapshot.Metadata.Index
  395. log.Printf("etcdserver: recovered from incoming snapshot at index %d", snapi)
  396. }
  397. // TODO(bmizerany): do this in the background, but take
  398. // care to apply entries in a single goroutine, and not
  399. // race them.
  400. if len(rd.CommittedEntries) != 0 {
  401. firsti := rd.CommittedEntries[0].Index
  402. if firsti > appliedi+1 {
  403. log.Panicf("etcdserver: first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, appliedi)
  404. }
  405. var ents []raftpb.Entry
  406. if appliedi+1-firsti < uint64(len(rd.CommittedEntries)) {
  407. ents = rd.CommittedEntries[appliedi+1-firsti:]
  408. }
  409. if len(ents) > 0 {
  410. if appliedi, shouldstop = s.apply(ents, &confState); shouldstop {
  411. return
  412. }
  413. }
  414. }
  415. s.node.Advance()
  416. if appliedi-snapi > s.snapCount {
  417. log.Printf("etcdserver: start to snapshot (applied: %d, lastsnap: %d)", appliedi, snapi)
  418. s.snapshot(appliedi, &confState)
  419. snapi = appliedi
  420. }
  421. case <-syncC:
  422. s.sync(defaultSyncTimeout)
  423. case <-shouldstopC:
  424. return
  425. case <-s.stop:
  426. return
  427. }
  428. }
  429. }
  430. // Stop stops the server gracefully, and shuts down the running goroutine.
  431. // Stop should be called after a Start(s), otherwise it will block forever.
  432. func (s *EtcdServer) Stop() {
  433. select {
  434. case s.stop <- struct{}{}:
  435. case <-s.done:
  436. return
  437. }
  438. <-s.done
  439. }
  440. // StopNotify returns a channel that receives a empty struct
  441. // when the server is stopped.
  442. func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
  443. // Do interprets r and performs an operation on s.store according to r.Method
  444. // and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
  445. // Quorum == true, r will be sent through consensus before performing its
  446. // respective operation. Do will block until an action is performed or there is
  447. // an error.
  448. func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
  449. if r.ID == 0 {
  450. log.Panicf("request ID should never be 0")
  451. }
  452. if r.Method == "GET" && r.Quorum {
  453. r.Method = "QGET"
  454. }
  455. switch r.Method {
  456. case "POST", "PUT", "DELETE", "QGET":
  457. data, err := r.Marshal()
  458. if err != nil {
  459. return Response{}, err
  460. }
  461. ch := s.w.Register(r.ID)
  462. s.node.Propose(ctx, data)
  463. select {
  464. case x := <-ch:
  465. resp := x.(Response)
  466. return resp, resp.err
  467. case <-ctx.Done():
  468. s.w.Trigger(r.ID, nil) // GC wait
  469. return Response{}, parseCtxErr(ctx.Err())
  470. case <-s.done:
  471. return Response{}, ErrStopped
  472. }
  473. case "GET":
  474. switch {
  475. case r.Wait:
  476. wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
  477. if err != nil {
  478. return Response{}, err
  479. }
  480. return Response{Watcher: wc}, nil
  481. default:
  482. ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
  483. if err != nil {
  484. return Response{}, err
  485. }
  486. return Response{Event: ev}, nil
  487. }
  488. case "HEAD":
  489. ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
  490. if err != nil {
  491. return Response{}, err
  492. }
  493. return Response{Event: ev}, nil
  494. default:
  495. return Response{}, ErrUnknownMethod
  496. }
  497. }
  498. func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
  499. func (s *EtcdServer) LeaderStats() []byte {
  500. // TODO(jonboulle): need to lock access to lstats, set it to nil when not leader, ...
  501. return s.lstats.JSON()
  502. }
  503. func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
  504. func (s *EtcdServer) AddMember(ctx context.Context, memb Member) error {
  505. // TODO: move Member to protobuf type
  506. b, err := json.Marshal(memb)
  507. if err != nil {
  508. return err
  509. }
  510. cc := raftpb.ConfChange{
  511. ID: GenID(),
  512. Type: raftpb.ConfChangeAddNode,
  513. NodeID: uint64(memb.ID),
  514. Context: b,
  515. }
  516. return s.configure(ctx, cc)
  517. }
  518. func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
  519. cc := raftpb.ConfChange{
  520. ID: GenID(),
  521. Type: raftpb.ConfChangeRemoveNode,
  522. NodeID: id,
  523. }
  524. return s.configure(ctx, cc)
  525. }
  526. func (s *EtcdServer) UpdateMember(ctx context.Context, memb Member) error {
  527. b, err := json.Marshal(memb)
  528. if err != nil {
  529. return err
  530. }
  531. cc := raftpb.ConfChange{
  532. ID: GenID(),
  533. Type: raftpb.ConfChangeUpdateNode,
  534. NodeID: uint64(memb.ID),
  535. Context: b,
  536. }
  537. return s.configure(ctx, cc)
  538. }
  539. // Implement the RaftTimer interface
  540. func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.raftIndex) }
  541. func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.raftTerm) }
  542. // Only for testing purpose
  543. // TODO: add Raft server interface to expose raft related info:
  544. // Index, Term, Lead, Committed, Applied, LastIndex, etc.
  545. func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.raftLead) }
  546. // configure sends a configuration change through consensus and
  547. // then waits for it to be applied to the server. It
  548. // will block until the change is performed or there is an error.
  549. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error {
  550. ch := s.w.Register(cc.ID)
  551. if err := s.node.ProposeConfChange(ctx, cc); err != nil {
  552. s.w.Trigger(cc.ID, nil)
  553. return err
  554. }
  555. select {
  556. case x := <-ch:
  557. if err, ok := x.(error); ok {
  558. return err
  559. }
  560. if x != nil {
  561. log.Panicf("return type should always be error")
  562. }
  563. return nil
  564. case <-ctx.Done():
  565. s.w.Trigger(cc.ID, nil) // GC wait
  566. return parseCtxErr(ctx.Err())
  567. case <-s.done:
  568. return ErrStopped
  569. }
  570. }
  571. // sync proposes a SYNC request and is non-blocking.
  572. // This makes no guarantee that the request will be proposed or performed.
  573. // The request will be cancelled after the given timeout.
  574. func (s *EtcdServer) sync(timeout time.Duration) {
  575. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  576. req := pb.Request{
  577. Method: "SYNC",
  578. ID: GenID(),
  579. Time: time.Now().UnixNano(),
  580. }
  581. data := pbutil.MustMarshal(&req)
  582. // There is no promise that node has leader when do SYNC request,
  583. // so it uses goroutine to propose.
  584. go func() {
  585. s.node.Propose(ctx, data)
  586. cancel()
  587. }()
  588. }
  589. // publish registers server information into the cluster. The information
  590. // is the JSON representation of this server's member struct, updated with the
  591. // static clientURLs of the server.
  592. // The function keeps attempting to register until it succeeds,
  593. // or its server is stopped.
  594. func (s *EtcdServer) publish(retryInterval time.Duration) {
  595. b, err := json.Marshal(s.attributes)
  596. if err != nil {
  597. log.Printf("etcdserver: json marshal error: %v", err)
  598. return
  599. }
  600. req := pb.Request{
  601. ID: GenID(),
  602. Method: "PUT",
  603. Path: MemberAttributesStorePath(s.id),
  604. Val: string(b),
  605. }
  606. for {
  607. ctx, cancel := context.WithTimeout(context.Background(), retryInterval)
  608. _, err := s.Do(ctx, req)
  609. cancel()
  610. switch err {
  611. case nil:
  612. log.Printf("etcdserver: published %+v to cluster %s", s.attributes, s.Cluster.ID())
  613. return
  614. case ErrStopped:
  615. log.Printf("etcdserver: aborting publish because server is stopped")
  616. return
  617. default:
  618. log.Printf("etcdserver: publish error: %v", err)
  619. }
  620. }
  621. }
  622. func getExpirationTime(r *pb.Request) time.Time {
  623. var t time.Time
  624. if r.Expiration != 0 {
  625. t = time.Unix(0, r.Expiration)
  626. }
  627. return t
  628. }
  629. // apply takes entries received from Raft (after it has been committed) and
  630. // applies them to the current state of the EtcdServer.
  631. // The given entries should not be empty.
  632. func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint64, bool) {
  633. var applied uint64
  634. for i := range es {
  635. e := es[i]
  636. switch e.Type {
  637. case raftpb.EntryNormal:
  638. var r pb.Request
  639. pbutil.MustUnmarshal(&r, e.Data)
  640. s.w.Trigger(r.ID, s.applyRequest(r))
  641. case raftpb.EntryConfChange:
  642. var cc raftpb.ConfChange
  643. pbutil.MustUnmarshal(&cc, e.Data)
  644. shouldstop, err := s.applyConfChange(cc, confState)
  645. s.w.Trigger(cc.ID, err)
  646. if shouldstop {
  647. return applied, true
  648. }
  649. default:
  650. log.Panicf("entry type should be either EntryNormal or EntryConfChange")
  651. }
  652. atomic.StoreUint64(&s.raftIndex, e.Index)
  653. atomic.StoreUint64(&s.raftTerm, e.Term)
  654. applied = e.Index
  655. }
  656. return applied, false
  657. }
  658. // applyRequest interprets r as a call to store.X and returns a Response interpreted
  659. // from store.Event
  660. func (s *EtcdServer) applyRequest(r pb.Request) Response {
  661. f := func(ev *store.Event, err error) Response {
  662. return Response{Event: ev, err: err}
  663. }
  664. expr := getExpirationTime(&r)
  665. switch r.Method {
  666. case "POST":
  667. return f(s.store.Create(r.Path, r.Dir, r.Val, true, expr))
  668. case "PUT":
  669. exists, existsSet := getBool(r.PrevExist)
  670. switch {
  671. case existsSet:
  672. if exists {
  673. return f(s.store.Update(r.Path, r.Val, expr))
  674. }
  675. return f(s.store.Create(r.Path, r.Dir, r.Val, false, expr))
  676. case r.PrevIndex > 0 || r.PrevValue != "":
  677. return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, expr))
  678. default:
  679. if storeMemberAttributeRegexp.MatchString(r.Path) {
  680. id := mustParseMemberIDFromKey(path.Dir(r.Path))
  681. var attr Attributes
  682. if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
  683. log.Panicf("unmarshal %s should never fail: %v", r.Val, err)
  684. }
  685. s.Cluster.UpdateMemberAttributes(id, attr)
  686. }
  687. return f(s.store.Set(r.Path, r.Dir, r.Val, expr))
  688. }
  689. case "DELETE":
  690. switch {
  691. case r.PrevIndex > 0 || r.PrevValue != "":
  692. return f(s.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
  693. default:
  694. return f(s.store.Delete(r.Path, r.Dir, r.Recursive))
  695. }
  696. case "QGET":
  697. return f(s.store.Get(r.Path, r.Recursive, r.Sorted))
  698. case "SYNC":
  699. s.store.DeleteExpiredKeys(time.Unix(0, r.Time))
  700. return Response{}
  701. default:
  702. // This should never be reached, but just in case:
  703. return Response{err: ErrUnknownMethod}
  704. }
  705. }
  706. // applyConfChange applies a ConfChange to the server. It is only
  707. // invoked with a ConfChange that has already passed through Raft
  708. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
  709. if err := s.Cluster.ValidateConfigurationChange(cc); err != nil {
  710. cc.NodeID = raft.None
  711. s.node.ApplyConfChange(cc)
  712. return false, err
  713. }
  714. *confState = *s.node.ApplyConfChange(cc)
  715. switch cc.Type {
  716. case raftpb.ConfChangeAddNode:
  717. m := new(Member)
  718. if err := json.Unmarshal(cc.Context, m); err != nil {
  719. log.Panicf("unmarshal member should never fail: %v", err)
  720. }
  721. if cc.NodeID != uint64(m.ID) {
  722. log.Panicf("nodeID should always be equal to member ID")
  723. }
  724. s.Cluster.AddMember(m)
  725. if m.ID == s.id {
  726. log.Printf("etcdserver: added local member %s %v to cluster %s", m.ID, m.PeerURLs, s.Cluster.ID())
  727. } else {
  728. s.sendhub.Add(m)
  729. log.Printf("etcdserver: added member %s %v to cluster %s", m.ID, m.PeerURLs, s.Cluster.ID())
  730. }
  731. case raftpb.ConfChangeRemoveNode:
  732. id := types.ID(cc.NodeID)
  733. s.Cluster.RemoveMember(id)
  734. if id == s.id {
  735. log.Printf("etcdserver: removed local member %s from cluster %s", id, s.Cluster.ID())
  736. log.Println("etcdserver: the data-dir used by this member must be removed so that this host can be re-added with a new member ID")
  737. return true, nil
  738. } else {
  739. s.sendhub.Remove(id)
  740. log.Printf("etcdserver: removed member %s from cluster %s", id, s.Cluster.ID())
  741. }
  742. case raftpb.ConfChangeUpdateNode:
  743. m := new(Member)
  744. if err := json.Unmarshal(cc.Context, m); err != nil {
  745. log.Panicf("unmarshal member should never fail: %v", err)
  746. }
  747. if cc.NodeID != uint64(m.ID) {
  748. log.Panicf("nodeID should always be equal to member ID")
  749. }
  750. s.Cluster.UpdateMember(m)
  751. if m.ID == s.id {
  752. log.Printf("etcdserver: update local member %s %v in cluster %s", m.ID, m.PeerURLs, s.Cluster.ID())
  753. } else {
  754. s.sendhub.Update(m)
  755. log.Printf("etcdserver: update member %s %v in cluster %s", m.ID, m.PeerURLs, s.Cluster.ID())
  756. }
  757. }
  758. return false, nil
  759. }
  760. // TODO: non-blocking snapshot
  761. func (s *EtcdServer) snapshot(snapi uint64, confState *raftpb.ConfState) {
  762. d, err := s.store.Save()
  763. // TODO: current store will never fail to do a snapshot
  764. // what should we do if the store might fail?
  765. if err != nil {
  766. log.Panicf("etcdserver: store save should never fail: %v", err)
  767. }
  768. err = s.raftStorage.Compact(snapi, confState, d)
  769. if err != nil {
  770. // the snapshot was done asynchronously with the progress of raft.
  771. // raft might have already got a newer snapshot and called compact.
  772. if err == raft.ErrCompacted {
  773. return
  774. }
  775. log.Panicf("etcdserver: unexpected compaction error %v", err)
  776. }
  777. log.Printf("etcdserver: compacted log at index %d", snapi)
  778. if err := s.storage.Cut(); err != nil {
  779. log.Panicf("etcdserver: rotate wal file should never fail: %v", err)
  780. }
  781. snap, err := s.raftStorage.Snapshot()
  782. if err != nil {
  783. log.Panicf("etcdserver: snapshot error: %v", err)
  784. }
  785. if err := s.storage.SaveSnap(snap); err != nil {
  786. log.Fatalf("etcdserver: save snapshot error: %v", err)
  787. }
  788. log.Printf("etcdserver: saved snapshot at index %d", snap.Metadata.Index)
  789. }
  790. // for testing
  791. func (s *EtcdServer) PauseSending() {
  792. hub := s.sendhub.(*sendHub)
  793. hub.pause()
  794. }
  795. func (s *EtcdServer) ResumeSending() {
  796. hub := s.sendhub.(*sendHub)
  797. hub.resume()
  798. }
  799. // checkClientURLsEmptyFromPeers does its best to get the cluster from peers,
  800. // and if this succeeds, checks that the member of the given id exists in the
  801. // cluster, and its ClientURLs is empty.
  802. func checkClientURLsEmptyFromPeers(cl *Cluster, name string) error {
  803. us := getOtherPeerURLs(cl, name)
  804. rcl, err := getClusterFromPeers(us, false)
  805. if err != nil {
  806. return nil
  807. }
  808. id := cl.MemberByName(name).ID
  809. m := rcl.Member(id)
  810. if m == nil {
  811. return nil
  812. }
  813. if len(m.ClientURLs) > 0 {
  814. return fmt.Errorf("etcdserver: member with id %s has started and registered its client urls", id)
  815. }
  816. return nil
  817. }
  818. // GetClusterFromPeers takes a set of URLs representing etcd peers, and
  819. // attempts to construct a Cluster by accessing the members endpoint on one of
  820. // these URLs. The first URL to provide a response is used. If no URLs provide
  821. // a response, or a Cluster cannot be successfully created from a received
  822. // response, an error is returned.
  823. func GetClusterFromPeers(urls []string) (*Cluster, error) {
  824. return getClusterFromPeers(urls, true)
  825. }
  826. // If logerr is true, it prints out more error messages.
  827. func getClusterFromPeers(urls []string, logerr bool) (*Cluster, error) {
  828. cc := &http.Client{
  829. Transport: &http.Transport{
  830. ResponseHeaderTimeout: 500 * time.Millisecond,
  831. },
  832. Timeout: time.Second,
  833. }
  834. for _, u := range urls {
  835. resp, err := cc.Get(u + "/members")
  836. if err != nil {
  837. if logerr {
  838. log.Printf("etcdserver: could not get cluster response from %s: %v", u, err)
  839. }
  840. continue
  841. }
  842. b, err := ioutil.ReadAll(resp.Body)
  843. if err != nil {
  844. if logerr {
  845. log.Printf("etcdserver: could not read the body of cluster response: %v", err)
  846. }
  847. continue
  848. }
  849. var membs []*Member
  850. if err := json.Unmarshal(b, &membs); err != nil {
  851. if logerr {
  852. log.Printf("etcdserver: could not unmarshal cluster response: %v", err)
  853. }
  854. continue
  855. }
  856. id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
  857. if err != nil {
  858. if logerr {
  859. log.Printf("etcdserver: could not parse the cluster ID from cluster res: %v", err)
  860. }
  861. continue
  862. }
  863. return NewClusterFromMembers("", id, membs), nil
  864. }
  865. return nil, fmt.Errorf("etcdserver: could not retrieve cluster information from the given urls")
  866. }
  867. func startNode(cfg *ServerConfig, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
  868. var err error
  869. member := cfg.Cluster.MemberByName(cfg.Name)
  870. metadata := pbutil.MustMarshal(
  871. &pb.Metadata{
  872. NodeID: uint64(member.ID),
  873. ClusterID: uint64(cfg.Cluster.ID()),
  874. },
  875. )
  876. if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
  877. log.Fatalf("etcdserver create snapshot directory error: %v", err)
  878. }
  879. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  880. log.Fatalf("etcdserver: create wal error: %v", err)
  881. }
  882. peers := make([]raft.Peer, len(ids))
  883. for i, id := range ids {
  884. ctx, err := json.Marshal((*cfg.Cluster).Member(id))
  885. if err != nil {
  886. log.Panicf("marshal member should never fail: %v", err)
  887. }
  888. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  889. }
  890. id = member.ID
  891. log.Printf("etcdserver: start member %s in cluster %s", id, cfg.Cluster.ID())
  892. s = raft.NewMemoryStorage()
  893. n = raft.StartNode(uint64(id), peers, 10, 1, s)
  894. return
  895. }
  896. func getOtherMembers(cl ClusterInfo, self string) []*Member {
  897. var ms []*Member
  898. for _, m := range cl.Members() {
  899. if m.Name != self {
  900. ms = append(ms, m)
  901. }
  902. }
  903. return ms
  904. }
  905. // getOtherPeerURLs returns peer urls of other members in the cluster. The
  906. // returned list is sorted in ascending lexicographical order.
  907. func getOtherPeerURLs(cl ClusterInfo, self string) []string {
  908. us := make([]string, 0)
  909. for _, m := range cl.Members() {
  910. if m.Name == self {
  911. continue
  912. }
  913. us = append(us, m.PeerURLs...)
  914. }
  915. sort.Strings(us)
  916. return us
  917. }
  918. func restartNode(cfg *ServerConfig, index uint64, snapshot *raftpb.Snapshot) (types.ID, raft.Node, *raft.MemoryStorage, *wal.WAL) {
  919. w, id, cid, st, ents := readWAL(cfg.WALDir(), index)
  920. cfg.Cluster.SetID(cid)
  921. log.Printf("etcdserver: restart member %s in cluster %s at commit index %d", id, cfg.Cluster.ID(), st.Commit)
  922. s := raft.NewMemoryStorage()
  923. if snapshot != nil {
  924. s.ApplySnapshot(*snapshot)
  925. }
  926. s.SetHardState(st)
  927. s.Append(ents)
  928. n := raft.RestartNode(uint64(id), 10, 1, s)
  929. return id, n, s, w
  930. }
  931. func readWAL(waldir string, index uint64) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
  932. var err error
  933. if w, err = wal.OpenAtIndex(waldir, index); err != nil {
  934. log.Fatalf("etcdserver: open wal error: %v", err)
  935. }
  936. var wmetadata []byte
  937. if wmetadata, st, ents, err = w.ReadAll(); err != nil {
  938. log.Fatalf("etcdserver: read wal error: %v", err)
  939. }
  940. var metadata pb.Metadata
  941. pbutil.MustUnmarshal(&metadata, wmetadata)
  942. id = types.ID(metadata.NodeID)
  943. cid = types.ID(metadata.ClusterID)
  944. return
  945. }
  946. // TODO: move the function to /id pkg maybe?
  947. // GenID generates a random id that is not equal to 0.
  948. func GenID() (n uint64) {
  949. for n == 0 {
  950. n = uint64(rand.Int63())
  951. }
  952. return
  953. }
  954. func parseCtxErr(err error) error {
  955. switch err {
  956. case context.Canceled:
  957. return ErrCanceled
  958. case context.DeadlineExceeded:
  959. return ErrTimeout
  960. default:
  961. return err
  962. }
  963. }
  964. func getBool(v *bool) (vv bool, set bool) {
  965. if v == nil {
  966. return false, false
  967. }
  968. return *v, true
  969. }
  970. func containsUint64(a []uint64, x uint64) bool {
  971. for _, v := range a {
  972. if v == x {
  973. return true
  974. }
  975. }
  976. return false
  977. }