server.go 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940
  1. /*
  2. Copyright 2014 CoreOS, Inc.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package etcdserver
  14. import (
  15. "encoding/json"
  16. "errors"
  17. "fmt"
  18. "io/ioutil"
  19. "log"
  20. "math/rand"
  21. "net/http"
  22. "os"
  23. "path"
  24. "regexp"
  25. "sort"
  26. "sync/atomic"
  27. "time"
  28. "github.com/coreos/etcd/Godeps/_workspace/src/code.google.com/p/go.net/context"
  29. "github.com/coreos/etcd/discovery"
  30. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  31. "github.com/coreos/etcd/etcdserver/stats"
  32. "github.com/coreos/etcd/pkg/pbutil"
  33. "github.com/coreos/etcd/pkg/types"
  34. "github.com/coreos/etcd/pkg/wait"
  35. "github.com/coreos/etcd/raft"
  36. "github.com/coreos/etcd/raft/raftpb"
  37. "github.com/coreos/etcd/snap"
  38. "github.com/coreos/etcd/store"
  39. "github.com/coreos/etcd/wal"
  40. )
  41. const (
  42. // owner can make/remove files inside the directory
  43. privateDirMode = 0700
  44. defaultSyncTimeout = time.Second
  45. DefaultSnapCount = 10000
  46. // TODO: calculate based on heartbeat interval
  47. defaultPublishRetryInterval = 5 * time.Second
  48. StoreAdminPrefix = "/0"
  49. StoreKeysPrefix = "/1"
  50. )
  51. var (
  52. ErrUnknownMethod = errors.New("etcdserver: unknown method")
  53. ErrStopped = errors.New("etcdserver: server stopped")
  54. ErrRemoved = errors.New("etcdserver: server removed")
  55. ErrIDRemoved = errors.New("etcdserver: ID removed")
  56. ErrIDExists = errors.New("etcdserver: ID exists")
  57. ErrIDNotFound = errors.New("etcdserver: ID not found")
  58. ErrPeerURLexists = errors.New("etcdserver: peerURL exists")
  59. ErrCanceled = errors.New("etcdserver: request cancelled")
  60. ErrTimeout = errors.New("etcdserver: request timed out")
  61. storeMembersPrefix = path.Join(StoreAdminPrefix, "members")
  62. storeRemovedMembersPrefix = path.Join(StoreAdminPrefix, "removed_members")
  63. storeMemberAttributeRegexp = regexp.MustCompile(path.Join(storeMembersPrefix, "[[:xdigit:]]{1,16}", attributesSuffix))
  64. )
  65. func init() {
  66. rand.Seed(time.Now().UnixNano())
  67. }
  68. type Response struct {
  69. Event *store.Event
  70. Watcher store.Watcher
  71. err error
  72. }
  73. type Sender interface {
  74. Send(m []raftpb.Message)
  75. Add(m *Member)
  76. Remove(id types.ID)
  77. Update(m *Member)
  78. Stop()
  79. ShouldStopNotify() <-chan struct{}
  80. }
  81. type Storage interface {
  82. // Save function saves ents and state to the underlying stable storage.
  83. // Save MUST block until st and ents are on stable storage.
  84. Save(st raftpb.HardState, ents []raftpb.Entry) error
  85. // SaveSnap function saves snapshot to the underlying stable storage.
  86. SaveSnap(snap raftpb.Snapshot) error
  87. // TODO: WAL should be able to control cut itself. After implement self-controled cut,
  88. // remove it in this interface.
  89. // Cut cuts out a new wal file for saving new state and entries.
  90. Cut() error
  91. }
  92. type Server interface {
  93. // Start performs any initialization of the Server necessary for it to
  94. // begin serving requests. It must be called before Do or Process.
  95. // Start must be non-blocking; any long-running server functionality
  96. // should be implemented in goroutines.
  97. Start()
  98. // Stop terminates the Server and performs any necessary finalization.
  99. // Do and Process cannot be called after Stop has been invoked.
  100. Stop()
  101. // ID returns the ID of the Server.
  102. ID() types.ID
  103. // Do takes a request and attempts to fulfill it, returning a Response.
  104. Do(ctx context.Context, r pb.Request) (Response, error)
  105. // Process takes a raft message and applies it to the server's raft state
  106. // machine, respecting any timeout of the given context.
  107. Process(ctx context.Context, m raftpb.Message) error
  108. // AddMember attempts to add a member into the cluster. It will return
  109. // ErrIDRemoved if member ID is removed from the cluster, or return
  110. // ErrIDExists if member ID exists in the cluster.
  111. AddMember(ctx context.Context, memb Member) error
  112. // RemoveMember attempts to remove a member from the cluster. It will
  113. // return ErrIDRemoved if member ID is removed from the cluster, or return
  114. // ErrIDNotFound if member ID is not in the cluster.
  115. RemoveMember(ctx context.Context, id uint64) error
  116. // UpdateMember attempts to update a existing member in the cluster. It will
  117. // return ErrIDNotFound if the member ID does not exist.
  118. UpdateMember(ctx context.Context, updateMemb Member) error
  119. }
  120. type Stats interface {
  121. // SelfStats returns the struct representing statistics of this server
  122. SelfStats() []byte
  123. // LeaderStats returns the statistics of all followers in the cluster
  124. // if this server is leader. Otherwise, nil is returned.
  125. LeaderStats() []byte
  126. // StoreStats returns statistics of the store backing this EtcdServer
  127. StoreStats() []byte
  128. // UpdateRecvApp updates the underlying statistics in response to a receiving an Append request
  129. UpdateRecvApp(from types.ID, length int64)
  130. }
  131. type RaftTimer interface {
  132. Index() uint64
  133. Term() uint64
  134. }
  135. // EtcdServer is the production implementation of the Server interface
  136. type EtcdServer struct {
  137. w wait.Wait
  138. done chan struct{}
  139. stop chan struct{}
  140. id types.ID
  141. attributes Attributes
  142. Cluster *Cluster
  143. node raft.Node
  144. store store.Store
  145. stats *stats.ServerStats
  146. lstats *stats.LeaderStats
  147. // sender specifies the sender to send msgs to members. sending msgs
  148. // MUST NOT block. It is okay to drop messages, since clients should
  149. // timeout and reissue their messages. If send is nil, server will
  150. // panic.
  151. sender Sender
  152. storage Storage
  153. Ticker <-chan time.Time
  154. SyncTicker <-chan time.Time
  155. snapCount uint64 // number of entries to trigger a snapshot
  156. // Cache of the latest raft index and raft term the server has seen
  157. raftIndex uint64
  158. raftTerm uint64
  159. }
  160. // NewServer creates a new EtcdServer from the supplied configuration. The
  161. // configuration is considered static for the lifetime of the EtcdServer.
  162. func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
  163. if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
  164. return nil, fmt.Errorf("cannot create snapshot directory: %v", err)
  165. }
  166. ss := snap.New(cfg.SnapDir())
  167. st := store.New()
  168. var w *wal.WAL
  169. var n raft.Node
  170. var id types.ID
  171. haveWAL := wal.Exist(cfg.WALDir())
  172. switch {
  173. case !haveWAL && !cfg.NewCluster:
  174. us := getOtherPeerURLs(cfg.Cluster, cfg.Name)
  175. cl, err := GetClusterFromPeers(us)
  176. if err != nil {
  177. return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", err)
  178. }
  179. if err := cfg.Cluster.ValidateAndAssignIDs(cl.Members()); err != nil {
  180. return nil, fmt.Errorf("error validating IDs from cluster %s: %v", cl, err)
  181. }
  182. cfg.Cluster.SetID(cl.id)
  183. cfg.Cluster.SetStore(st)
  184. cfg.Print()
  185. id, n, w = startNode(cfg, nil)
  186. case !haveWAL && cfg.NewCluster:
  187. if err := cfg.VerifyBootstrapConfig(); err != nil {
  188. return nil, err
  189. }
  190. if err := checkClientURLsEmptyFromPeers(cfg.Cluster, cfg.Name); err != nil {
  191. return nil, err
  192. }
  193. m := cfg.Cluster.MemberByName(cfg.Name)
  194. if cfg.ShouldDiscover() {
  195. s, err := discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.Cluster.String())
  196. if err != nil {
  197. return nil, err
  198. }
  199. if cfg.Cluster, err = NewClusterFromString(cfg.Cluster.token, s); err != nil {
  200. return nil, err
  201. }
  202. }
  203. cfg.Cluster.SetStore(st)
  204. cfg.PrintWithInitial()
  205. id, n, w = startNode(cfg, cfg.Cluster.MemberIDs())
  206. case haveWAL:
  207. if cfg.ShouldDiscover() {
  208. log.Printf("etcdserver: warn: ignoring discovery: etcd has already been initialized and has a valid log in %q", cfg.WALDir())
  209. }
  210. var index uint64
  211. snapshot, err := ss.Load()
  212. if err != nil && err != snap.ErrNoSnapshot {
  213. return nil, err
  214. }
  215. if snapshot != nil {
  216. log.Printf("etcdserver: recovering from snapshot at index %d", snapshot.Index)
  217. st.Recovery(snapshot.Data)
  218. index = snapshot.Index
  219. }
  220. cfg.Cluster = NewClusterFromStore(cfg.Cluster.token, st)
  221. cfg.Print()
  222. if snapshot != nil {
  223. log.Printf("etcdserver: loaded peers from snapshot: %s", cfg.Cluster)
  224. }
  225. if !cfg.ForceNewCluster {
  226. id, n, w = restartNode(cfg, index, snapshot)
  227. } else {
  228. id, n, w = restartAsStandaloneNode(cfg, index, snapshot)
  229. }
  230. default:
  231. return nil, fmt.Errorf("unsupported bootstrap config")
  232. }
  233. sstats := &stats.ServerStats{
  234. Name: cfg.Name,
  235. ID: id.String(),
  236. }
  237. lstats := stats.NewLeaderStats(id.String())
  238. shub := newSendHub(cfg.Transport, cfg.Cluster, sstats, lstats)
  239. s := &EtcdServer{
  240. store: st,
  241. node: n,
  242. id: id,
  243. attributes: Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
  244. Cluster: cfg.Cluster,
  245. storage: struct {
  246. *wal.WAL
  247. *snap.Snapshotter
  248. }{w, ss},
  249. stats: sstats,
  250. lstats: lstats,
  251. sender: shub,
  252. Ticker: time.Tick(100 * time.Millisecond),
  253. SyncTicker: time.Tick(500 * time.Millisecond),
  254. snapCount: cfg.SnapCount,
  255. }
  256. return s, nil
  257. }
  258. // Start prepares and starts server in a new goroutine. It is no longer safe to
  259. // modify a server's fields after it has been sent to Start.
  260. // It also starts a goroutine to publish its server information.
  261. func (s *EtcdServer) Start() {
  262. s.start()
  263. go s.publish(defaultPublishRetryInterval)
  264. }
  265. // start prepares and starts server in a new goroutine. It is no longer safe to
  266. // modify a server's fields after it has been sent to Start.
  267. // This function is just used for testing.
  268. func (s *EtcdServer) start() {
  269. if s.snapCount == 0 {
  270. log.Printf("etcdserver: set snapshot count to default %d", DefaultSnapCount)
  271. s.snapCount = DefaultSnapCount
  272. }
  273. s.w = wait.New()
  274. s.done = make(chan struct{})
  275. s.stop = make(chan struct{})
  276. s.stats.Initialize()
  277. // TODO: if this is an empty log, writes all peer infos
  278. // into the first entry
  279. go s.run()
  280. }
  281. func (s *EtcdServer) ID() types.ID { return s.id }
  282. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  283. if s.Cluster.IsIDRemoved(types.ID(m.From)) {
  284. return ErrRemoved
  285. }
  286. return s.node.Step(ctx, m)
  287. }
  288. func (s *EtcdServer) run() {
  289. var syncC <-chan time.Time
  290. // snapi indicates the index of the last submitted snapshot request
  291. var snapi, appliedi uint64
  292. var nodes []uint64
  293. var shouldstop bool
  294. shouldstopC := s.sender.ShouldStopNotify()
  295. defer func() {
  296. s.node.Stop()
  297. s.sender.Stop()
  298. close(s.done)
  299. }()
  300. for {
  301. select {
  302. case <-s.Ticker:
  303. s.node.Tick()
  304. case rd := <-s.node.Ready():
  305. if rd.SoftState != nil {
  306. nodes = rd.SoftState.Nodes
  307. if rd.RaftState == raft.StateLeader {
  308. syncC = s.SyncTicker
  309. } else {
  310. syncC = nil
  311. }
  312. }
  313. if err := s.storage.Save(rd.HardState, rd.Entries); err != nil {
  314. log.Fatalf("etcdserver: save state and entries error: %v", err)
  315. }
  316. if err := s.storage.SaveSnap(rd.Snapshot); err != nil {
  317. log.Fatalf("etcdserver: create snapshot error: %v", err)
  318. }
  319. s.sender.Send(rd.Messages)
  320. // recover from snapshot if it is more updated than current applied
  321. if rd.Snapshot.Index > appliedi {
  322. if err := s.store.Recovery(rd.Snapshot.Data); err != nil {
  323. log.Panicf("recovery store error: %v", err)
  324. }
  325. s.Cluster.Recover()
  326. appliedi = rd.Snapshot.Index
  327. }
  328. // TODO(bmizerany): do this in the background, but take
  329. // care to apply entries in a single goroutine, and not
  330. // race them.
  331. if len(rd.CommittedEntries) != 0 {
  332. firsti := rd.CommittedEntries[0].Index
  333. if appliedi == 0 {
  334. appliedi = firsti - 1
  335. }
  336. if firsti > appliedi+1 {
  337. log.Panicf("etcdserver: first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, appliedi)
  338. }
  339. var ents []raftpb.Entry
  340. if appliedi+1-firsti < uint64(len(rd.CommittedEntries)) {
  341. ents = rd.CommittedEntries[appliedi+1-firsti:]
  342. }
  343. if appliedi, shouldstop = s.apply(ents); shouldstop {
  344. return
  345. }
  346. }
  347. s.node.Advance()
  348. if rd.Snapshot.Index > snapi {
  349. snapi = rd.Snapshot.Index
  350. }
  351. if appliedi-snapi > s.snapCount {
  352. s.snapshot(appliedi, nodes)
  353. snapi = appliedi
  354. }
  355. case <-syncC:
  356. s.sync(defaultSyncTimeout)
  357. case <-shouldstopC:
  358. return
  359. case <-s.stop:
  360. return
  361. }
  362. }
  363. }
  364. // Stop stops the server gracefully, and shuts down the running goroutine.
  365. // Stop should be called after a Start(s), otherwise it will block forever.
  366. func (s *EtcdServer) Stop() {
  367. select {
  368. case s.stop <- struct{}{}:
  369. case <-s.done:
  370. return
  371. }
  372. <-s.done
  373. }
  374. // StopNotify returns a channel that receives a empty struct
  375. // when the server is stopped.
  376. func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
  377. // Do interprets r and performs an operation on s.store according to r.Method
  378. // and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
  379. // Quorum == true, r will be sent through consensus before performing its
  380. // respective operation. Do will block until an action is performed or there is
  381. // an error.
  382. func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
  383. if r.ID == 0 {
  384. log.Panicf("request ID should never be 0")
  385. }
  386. if r.Method == "GET" && r.Quorum {
  387. r.Method = "QGET"
  388. }
  389. switch r.Method {
  390. case "POST", "PUT", "DELETE", "QGET":
  391. data, err := r.Marshal()
  392. if err != nil {
  393. return Response{}, err
  394. }
  395. ch := s.w.Register(r.ID)
  396. s.node.Propose(ctx, data)
  397. select {
  398. case x := <-ch:
  399. resp := x.(Response)
  400. return resp, resp.err
  401. case <-ctx.Done():
  402. s.w.Trigger(r.ID, nil) // GC wait
  403. return Response{}, parseCtxErr(ctx.Err())
  404. case <-s.done:
  405. return Response{}, ErrStopped
  406. }
  407. case "GET":
  408. switch {
  409. case r.Wait:
  410. wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
  411. if err != nil {
  412. return Response{}, err
  413. }
  414. return Response{Watcher: wc}, nil
  415. default:
  416. ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
  417. if err != nil {
  418. return Response{}, err
  419. }
  420. return Response{Event: ev}, nil
  421. }
  422. case "HEAD":
  423. ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
  424. if err != nil {
  425. return Response{}, err
  426. }
  427. return Response{Event: ev}, nil
  428. default:
  429. return Response{}, ErrUnknownMethod
  430. }
  431. }
  432. func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
  433. func (s *EtcdServer) LeaderStats() []byte {
  434. // TODO(jonboulle): need to lock access to lstats, set it to nil when not leader, ...
  435. return s.lstats.JSON()
  436. }
  437. func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
  438. func (s *EtcdServer) UpdateRecvApp(from types.ID, length int64) {
  439. s.stats.RecvAppendReq(from.String(), int(length))
  440. }
  441. func (s *EtcdServer) AddMember(ctx context.Context, memb Member) error {
  442. // TODO: move Member to protobuf type
  443. b, err := json.Marshal(memb)
  444. if err != nil {
  445. return err
  446. }
  447. cc := raftpb.ConfChange{
  448. ID: GenID(),
  449. Type: raftpb.ConfChangeAddNode,
  450. NodeID: uint64(memb.ID),
  451. Context: b,
  452. }
  453. return s.configure(ctx, cc)
  454. }
  455. func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
  456. cc := raftpb.ConfChange{
  457. ID: GenID(),
  458. Type: raftpb.ConfChangeRemoveNode,
  459. NodeID: id,
  460. }
  461. return s.configure(ctx, cc)
  462. }
  463. func (s *EtcdServer) UpdateMember(ctx context.Context, memb Member) error {
  464. b, err := json.Marshal(memb)
  465. if err != nil {
  466. return err
  467. }
  468. cc := raftpb.ConfChange{
  469. ID: GenID(),
  470. Type: raftpb.ConfChangeUpdateNode,
  471. NodeID: uint64(memb.ID),
  472. Context: b,
  473. }
  474. return s.configure(ctx, cc)
  475. }
  476. // Implement the RaftTimer interface
  477. func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.raftIndex) }
  478. func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.raftTerm) }
  479. // configure sends a configuration change through consensus and
  480. // then waits for it to be applied to the server. It
  481. // will block until the change is performed or there is an error.
  482. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error {
  483. ch := s.w.Register(cc.ID)
  484. if err := s.node.ProposeConfChange(ctx, cc); err != nil {
  485. s.w.Trigger(cc.ID, nil)
  486. return err
  487. }
  488. select {
  489. case x := <-ch:
  490. if err, ok := x.(error); ok {
  491. return err
  492. }
  493. if x != nil {
  494. log.Panicf("return type should always be error")
  495. }
  496. return nil
  497. case <-ctx.Done():
  498. s.w.Trigger(cc.ID, nil) // GC wait
  499. return parseCtxErr(ctx.Err())
  500. case <-s.done:
  501. return ErrStopped
  502. }
  503. }
  504. // sync proposes a SYNC request and is non-blocking.
  505. // This makes no guarantee that the request will be proposed or performed.
  506. // The request will be cancelled after the given timeout.
  507. func (s *EtcdServer) sync(timeout time.Duration) {
  508. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  509. req := pb.Request{
  510. Method: "SYNC",
  511. ID: GenID(),
  512. Time: time.Now().UnixNano(),
  513. }
  514. data := pbutil.MustMarshal(&req)
  515. // There is no promise that node has leader when do SYNC request,
  516. // so it uses goroutine to propose.
  517. go func() {
  518. s.node.Propose(ctx, data)
  519. cancel()
  520. }()
  521. }
  522. // publish registers server information into the cluster. The information
  523. // is the JSON representation of this server's member struct, updated with the
  524. // static clientURLs of the server.
  525. // The function keeps attempting to register until it succeeds,
  526. // or its server is stopped.
  527. func (s *EtcdServer) publish(retryInterval time.Duration) {
  528. b, err := json.Marshal(s.attributes)
  529. if err != nil {
  530. log.Printf("etcdserver: json marshal error: %v", err)
  531. return
  532. }
  533. req := pb.Request{
  534. ID: GenID(),
  535. Method: "PUT",
  536. Path: MemberAttributesStorePath(s.id),
  537. Val: string(b),
  538. }
  539. for {
  540. ctx, cancel := context.WithTimeout(context.Background(), retryInterval)
  541. _, err := s.Do(ctx, req)
  542. cancel()
  543. switch err {
  544. case nil:
  545. log.Printf("etcdserver: published %+v to cluster %s", s.attributes, s.Cluster.ID())
  546. return
  547. case ErrStopped:
  548. log.Printf("etcdserver: aborting publish because server is stopped")
  549. return
  550. default:
  551. log.Printf("etcdserver: publish error: %v", err)
  552. }
  553. }
  554. }
  555. func getExpirationTime(r *pb.Request) time.Time {
  556. var t time.Time
  557. if r.Expiration != 0 {
  558. t = time.Unix(0, r.Expiration)
  559. }
  560. return t
  561. }
  562. // apply takes an Entry received from Raft (after it has been committed) and
  563. // applies it to the current state of the EtcdServer
  564. func (s *EtcdServer) apply(es []raftpb.Entry) (uint64, bool) {
  565. var applied uint64
  566. for i := range es {
  567. e := es[i]
  568. switch e.Type {
  569. case raftpb.EntryNormal:
  570. var r pb.Request
  571. pbutil.MustUnmarshal(&r, e.Data)
  572. s.w.Trigger(r.ID, s.applyRequest(r))
  573. case raftpb.EntryConfChange:
  574. var cc raftpb.ConfChange
  575. pbutil.MustUnmarshal(&cc, e.Data)
  576. shouldstop, err := s.applyConfChange(cc)
  577. s.w.Trigger(cc.ID, err)
  578. if shouldstop {
  579. return applied, true
  580. }
  581. default:
  582. log.Panicf("entry type should be either EntryNormal or EntryConfChange")
  583. }
  584. atomic.StoreUint64(&s.raftIndex, e.Index)
  585. atomic.StoreUint64(&s.raftTerm, e.Term)
  586. applied = e.Index
  587. }
  588. return applied, false
  589. }
  590. // applyRequest interprets r as a call to store.X and returns a Response interpreted
  591. // from store.Event
  592. func (s *EtcdServer) applyRequest(r pb.Request) Response {
  593. f := func(ev *store.Event, err error) Response {
  594. return Response{Event: ev, err: err}
  595. }
  596. expr := getExpirationTime(&r)
  597. switch r.Method {
  598. case "POST":
  599. return f(s.store.Create(r.Path, r.Dir, r.Val, true, expr))
  600. case "PUT":
  601. exists, existsSet := getBool(r.PrevExist)
  602. switch {
  603. case existsSet:
  604. if exists {
  605. return f(s.store.Update(r.Path, r.Val, expr))
  606. }
  607. return f(s.store.Create(r.Path, r.Dir, r.Val, false, expr))
  608. case r.PrevIndex > 0 || r.PrevValue != "":
  609. return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, expr))
  610. default:
  611. if storeMemberAttributeRegexp.MatchString(r.Path) {
  612. id := mustParseMemberIDFromKey(path.Dir(r.Path))
  613. var attr Attributes
  614. if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
  615. log.Panicf("unmarshal %s should never fail: %v", r.Val, err)
  616. }
  617. s.Cluster.UpdateMemberAttributes(id, attr)
  618. }
  619. return f(s.store.Set(r.Path, r.Dir, r.Val, expr))
  620. }
  621. case "DELETE":
  622. switch {
  623. case r.PrevIndex > 0 || r.PrevValue != "":
  624. return f(s.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
  625. default:
  626. return f(s.store.Delete(r.Path, r.Dir, r.Recursive))
  627. }
  628. case "QGET":
  629. return f(s.store.Get(r.Path, r.Recursive, r.Sorted))
  630. case "SYNC":
  631. s.store.DeleteExpiredKeys(time.Unix(0, r.Time))
  632. return Response{}
  633. default:
  634. // This should never be reached, but just in case:
  635. return Response{err: ErrUnknownMethod}
  636. }
  637. }
  638. // applyConfChange applies a ConfChange to the server. It is only
  639. // invoked with a ConfChange that has already passed through Raft
  640. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange) (bool, error) {
  641. if err := s.Cluster.ValidateConfigurationChange(cc); err != nil {
  642. cc.NodeID = raft.None
  643. s.node.ApplyConfChange(cc)
  644. return false, err
  645. }
  646. s.node.ApplyConfChange(cc)
  647. switch cc.Type {
  648. case raftpb.ConfChangeAddNode:
  649. m := new(Member)
  650. if err := json.Unmarshal(cc.Context, m); err != nil {
  651. log.Panicf("unmarshal member should never fail: %v", err)
  652. }
  653. if cc.NodeID != uint64(m.ID) {
  654. log.Panicf("nodeID should always be equal to member ID")
  655. }
  656. s.Cluster.AddMember(m)
  657. if m.ID == s.id {
  658. log.Printf("etcdserver: added local member %s %v to cluster %s", m.ID, m.PeerURLs, s.Cluster.ID())
  659. } else {
  660. s.sender.Add(m)
  661. log.Printf("etcdserver: added member %s %v to cluster %s", m.ID, m.PeerURLs, s.Cluster.ID())
  662. }
  663. case raftpb.ConfChangeRemoveNode:
  664. id := types.ID(cc.NodeID)
  665. s.Cluster.RemoveMember(id)
  666. if id == s.id {
  667. log.Printf("etcdserver: removed local member %s from cluster %s", id, s.Cluster.ID())
  668. log.Println("etcdserver: the data-dir used by this member must be removed so that this host can be re-added with a new member ID")
  669. return true, nil
  670. } else {
  671. s.sender.Remove(id)
  672. log.Printf("etcdserver: removed member %s from cluster %s", id, s.Cluster.ID())
  673. }
  674. case raftpb.ConfChangeUpdateNode:
  675. m := new(Member)
  676. if err := json.Unmarshal(cc.Context, m); err != nil {
  677. log.Panicf("unmarshal member should never fail: %v", err)
  678. }
  679. if cc.NodeID != uint64(m.ID) {
  680. log.Panicf("nodeID should always be equal to member ID")
  681. }
  682. s.Cluster.UpdateMember(m)
  683. if m.ID == s.id {
  684. log.Printf("etcdserver: update local member %s %v in cluster %s", m.ID, m.PeerURLs, s.Cluster.ID())
  685. } else {
  686. s.sender.Update(m)
  687. log.Printf("etcdserver: update member %s %v in cluster %s", m.ID, m.PeerURLs, s.Cluster.ID())
  688. }
  689. }
  690. return false, nil
  691. }
  692. // TODO: non-blocking snapshot
  693. func (s *EtcdServer) snapshot(snapi uint64, snapnodes []uint64) {
  694. d, err := s.store.Save()
  695. // TODO: current store will never fail to do a snapshot
  696. // what should we do if the store might fail?
  697. if err != nil {
  698. log.Panicf("store save should never fail: %v", err)
  699. }
  700. s.node.Compact(snapi, snapnodes, d)
  701. if err := s.storage.Cut(); err != nil {
  702. log.Panicf("rotate wal file should never fail: %v", err)
  703. }
  704. }
  705. // checkClientURLsEmptyFromPeers does its best to get the cluster from peers,
  706. // and if this succeeds, checks that the member of the given id exists in the
  707. // cluster, and its ClientURLs is empty.
  708. func checkClientURLsEmptyFromPeers(cl *Cluster, name string) error {
  709. us := getOtherPeerURLs(cl, name)
  710. rcl, err := getClusterFromPeers(us, false)
  711. if err != nil {
  712. return nil
  713. }
  714. id := cl.MemberByName(name).ID
  715. m := rcl.Member(id)
  716. if m == nil {
  717. return nil
  718. }
  719. if len(m.ClientURLs) > 0 {
  720. return fmt.Errorf("etcdserver: member with id %s has started and registered its client urls", id)
  721. }
  722. return nil
  723. }
  724. // GetClusterFromPeers takes a set of URLs representing etcd peers, and
  725. // attempts to construct a Cluster by accessing the members endpoint on one of
  726. // these URLs. The first URL to provide a response is used. If no URLs provide
  727. // a response, or a Cluster cannot be successfully created from a received
  728. // response, an error is returned.
  729. func GetClusterFromPeers(urls []string) (*Cluster, error) {
  730. return getClusterFromPeers(urls, true)
  731. }
  732. // If logerr is true, it prints out more error messages.
  733. func getClusterFromPeers(urls []string, logerr bool) (*Cluster, error) {
  734. cc := &http.Client{
  735. Transport: &http.Transport{
  736. ResponseHeaderTimeout: 500 * time.Millisecond,
  737. },
  738. Timeout: time.Second,
  739. }
  740. for _, u := range urls {
  741. resp, err := cc.Get(u + "/members")
  742. if err != nil {
  743. if logerr {
  744. log.Printf("etcdserver: could not get cluster response from %s: %v", u, err)
  745. }
  746. continue
  747. }
  748. b, err := ioutil.ReadAll(resp.Body)
  749. if err != nil {
  750. if logerr {
  751. log.Printf("etcdserver: could not read the body of cluster response: %v", err)
  752. }
  753. continue
  754. }
  755. var membs []*Member
  756. if err := json.Unmarshal(b, &membs); err != nil {
  757. if logerr {
  758. log.Printf("etcdserver: could not unmarshal cluster response: %v", err)
  759. }
  760. continue
  761. }
  762. id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
  763. if err != nil {
  764. if logerr {
  765. log.Printf("etcdserver: could not parse the cluster ID from cluster res: %v", err)
  766. }
  767. continue
  768. }
  769. return NewClusterFromMembers("", id, membs), nil
  770. }
  771. return nil, fmt.Errorf("etcdserver: could not retrieve cluster information from the given urls")
  772. }
  773. func startNode(cfg *ServerConfig, ids []types.ID) (id types.ID, n raft.Node, w *wal.WAL) {
  774. var err error
  775. member := cfg.Cluster.MemberByName(cfg.Name)
  776. metadata := pbutil.MustMarshal(
  777. &pb.Metadata{
  778. NodeID: uint64(member.ID),
  779. ClusterID: uint64(cfg.Cluster.ID()),
  780. },
  781. )
  782. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  783. log.Fatalf("etcdserver: create wal error: %v", err)
  784. }
  785. peers := make([]raft.Peer, len(ids))
  786. for i, id := range ids {
  787. ctx, err := json.Marshal((*cfg.Cluster).Member(id))
  788. if err != nil {
  789. log.Panicf("marshal member should never fail: %v", err)
  790. }
  791. peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
  792. }
  793. id = member.ID
  794. log.Printf("etcdserver: start member %s in cluster %s", id, cfg.Cluster.ID())
  795. n = raft.StartNode(uint64(id), peers, 10, 1)
  796. return
  797. }
  798. // getOtherPeerURLs returns peer urls of other members in the cluster. The
  799. // returned list is sorted in ascending lexicographical order.
  800. func getOtherPeerURLs(cl ClusterInfo, self string) []string {
  801. us := make([]string, 0)
  802. for _, m := range cl.Members() {
  803. if m.Name == self {
  804. continue
  805. }
  806. us = append(us, m.PeerURLs...)
  807. }
  808. sort.Strings(us)
  809. return us
  810. }
  811. func restartNode(cfg *ServerConfig, index uint64, snapshot *raftpb.Snapshot) (types.ID, raft.Node, *wal.WAL) {
  812. w, id, cid, st, ents := readWAL(cfg.WALDir(), index)
  813. cfg.Cluster.SetID(cid)
  814. log.Printf("etcdserver: restart member %s in cluster %s at commit index %d", id, cfg.Cluster.ID(), st.Commit)
  815. n := raft.RestartNode(uint64(id), 10, 1, snapshot, st, ents)
  816. return id, n, w
  817. }
  818. func readWAL(waldir string, index uint64) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
  819. var err error
  820. if w, err = wal.OpenAtIndex(waldir, index); err != nil {
  821. log.Fatalf("etcdserver: open wal error: %v", err)
  822. }
  823. var wmetadata []byte
  824. if wmetadata, st, ents, err = w.ReadAll(); err != nil {
  825. log.Fatalf("etcdserver: read wal error: %v", err)
  826. }
  827. var metadata pb.Metadata
  828. pbutil.MustUnmarshal(&metadata, wmetadata)
  829. id = types.ID(metadata.NodeID)
  830. cid = types.ID(metadata.ClusterID)
  831. return
  832. }
  833. // TODO: move the function to /id pkg maybe?
  834. // GenID generates a random id that is not equal to 0.
  835. func GenID() (n uint64) {
  836. for n == 0 {
  837. n = uint64(rand.Int63())
  838. }
  839. return
  840. }
  841. func parseCtxErr(err error) error {
  842. switch err {
  843. case context.Canceled:
  844. return ErrCanceled
  845. case context.DeadlineExceeded:
  846. return ErrTimeout
  847. default:
  848. return err
  849. }
  850. }
  851. func getBool(v *bool) (vv bool, set bool) {
  852. if v == nil {
  853. return false, false
  854. }
  855. return *v, true
  856. }
  857. func containsUint64(a []uint64, x uint64) bool {
  858. for _, v := range a {
  859. if v == x {
  860. return true
  861. }
  862. }
  863. return false
  864. }