server.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637
  1. package etcdserver
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "log"
  6. "math/rand"
  7. "os"
  8. "sync/atomic"
  9. "time"
  10. "github.com/coreos/etcd/Godeps/_workspace/src/code.google.com/p/go.net/context"
  11. "github.com/coreos/etcd/discovery"
  12. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  13. "github.com/coreos/etcd/pkg/pbutil"
  14. "github.com/coreos/etcd/raft"
  15. "github.com/coreos/etcd/raft/raftpb"
  16. "github.com/coreos/etcd/snap"
  17. "github.com/coreos/etcd/store"
  18. "github.com/coreos/etcd/wait"
  19. "github.com/coreos/etcd/wal"
  20. )
  21. const (
  22. // owner can make/remove files inside the directory
  23. privateDirMode = 0700
  24. defaultSyncTimeout = time.Second
  25. DefaultSnapCount = 10000
  26. // TODO: calculate based on heartbeat interval
  27. defaultPublishRetryInterval = 5 * time.Second
  28. )
  29. var (
  30. ErrUnknownMethod = errors.New("etcdserver: unknown method")
  31. ErrStopped = errors.New("etcdserver: server stopped")
  32. ErrIDRemoved = errors.New("etcdserver: ID removed")
  33. ErrIDExists = errors.New("etcdserver: ID exists")
  34. ErrIDNotFound = errors.New("etcdserver: ID not found")
  35. )
  36. func init() {
  37. rand.Seed(time.Now().UnixNano())
  38. }
  39. type sendFunc func(m []raftpb.Message)
  40. type Response struct {
  41. Event *store.Event
  42. Watcher store.Watcher
  43. err error
  44. }
  45. type Storage interface {
  46. // Save function saves ents and state to the underlying stable storage.
  47. // Save MUST block until st and ents are on stable storage.
  48. Save(st raftpb.HardState, ents []raftpb.Entry)
  49. // SaveSnap function saves snapshot to the underlying stable storage.
  50. SaveSnap(snap raftpb.Snapshot)
  51. // TODO: WAL should be able to control cut itself. After implement self-controled cut,
  52. // remove it in this interface.
  53. // Cut cuts out a new wal file for saving new state and entries.
  54. Cut() error
  55. }
  56. type Server interface {
  57. // Start performs any initialization of the Server necessary for it to
  58. // begin serving requests. It must be called before Do or Process.
  59. // Start must be non-blocking; any long-running server functionality
  60. // should be implemented in goroutines.
  61. Start()
  62. // Stop terminates the Server and performs any necessary finalization.
  63. // Do and Process cannot be called after Stop has been invoked.
  64. Stop()
  65. // Do takes a request and attempts to fulfil it, returning a Response.
  66. Do(ctx context.Context, r pb.Request) (Response, error)
  67. // Process takes a raft message and applies it to the server's raft state
  68. // machine, respecting any timeout of the given context.
  69. Process(ctx context.Context, m raftpb.Message) error
  70. // AddMember attempts to add a member into the cluster. It will return
  71. // ErrIDRemoved if member ID is removed from the cluster, or return
  72. // ErrIDExists if member ID exists in the cluster.
  73. AddMember(ctx context.Context, memb Member) error
  74. // RemoveMember attempts to remove a member from the cluster. It will
  75. // return ErrIDRemoved if member ID is removed from the cluster, or return
  76. // ErrIDNotFound if member ID is not in the cluster.
  77. RemoveMember(ctx context.Context, id uint64) error
  78. }
  79. type RaftTimer interface {
  80. Index() uint64
  81. Term() uint64
  82. }
  83. // EtcdServer is the production implementation of the Server interface
  84. type EtcdServer struct {
  85. w wait.Wait
  86. done chan struct{}
  87. id uint64
  88. attributes Attributes
  89. ClusterStore ClusterStore
  90. node raft.Node
  91. store store.Store
  92. // send specifies the send function for sending msgs to members. send
  93. // MUST NOT block. It is okay to drop messages, since clients should
  94. // timeout and reissue their messages. If send is nil, server will
  95. // panic.
  96. send sendFunc
  97. storage Storage
  98. ticker <-chan time.Time
  99. syncTicker <-chan time.Time
  100. snapCount uint64 // number of entries to trigger a snapshot
  101. // Cache of the latest raft index and raft term the server has seen
  102. raftIndex uint64
  103. raftTerm uint64
  104. }
  105. // NewServer creates a new EtcdServer from the supplied configuration. The
  106. // configuration is considered static for the lifetime of the EtcdServer.
  107. func NewServer(cfg *ServerConfig) *EtcdServer {
  108. if err := cfg.Verify(); err != nil {
  109. log.Fatalln(err)
  110. }
  111. if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
  112. log.Fatalf("etcdserver: cannot create snapshot directory: %v", err)
  113. }
  114. ss := snap.New(cfg.SnapDir())
  115. st := store.New()
  116. var w *wal.WAL
  117. var n raft.Node
  118. if !wal.Exist(cfg.WALDir()) {
  119. if !cfg.IsBootstrap() {
  120. log.Fatalf("etcdserver: initial cluster state unset and no wal or discovery URL found")
  121. }
  122. if cfg.ShouldDiscover() {
  123. d, err := discovery.New(cfg.DiscoveryURL, cfg.ID(), cfg.Cluster.String())
  124. if err != nil {
  125. log.Fatalf("etcdserver: cannot init discovery %v", err)
  126. }
  127. s, err := d.Discover()
  128. if err != nil {
  129. log.Fatalf("etcdserver: %v", err)
  130. }
  131. if err = cfg.Cluster.Set(s); err != nil {
  132. log.Fatalf("etcdserver: %v", err)
  133. }
  134. }
  135. n, w = startNode(cfg)
  136. } else {
  137. if cfg.ShouldDiscover() {
  138. log.Printf("etcdserver: warn: ignoring discovery: etcd has already been initialized and has a valid log in %q", cfg.WALDir())
  139. }
  140. var index uint64
  141. snapshot, err := ss.Load()
  142. if err != nil && err != snap.ErrNoSnapshot {
  143. log.Fatal(err)
  144. }
  145. if snapshot != nil {
  146. log.Printf("etcdserver: recovering from snapshot at index %d", snapshot.Index)
  147. st.Recovery(snapshot.Data)
  148. index = snapshot.Index
  149. }
  150. n, w = restartNode(cfg, index, snapshot)
  151. }
  152. cls := &clusterStore{Store: st}
  153. s := &EtcdServer{
  154. store: st,
  155. node: n,
  156. id: cfg.ID(),
  157. attributes: Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
  158. storage: struct {
  159. *wal.WAL
  160. *snap.Snapshotter
  161. }{w, ss},
  162. send: Sender(cfg.Transport, cls),
  163. ticker: time.Tick(100 * time.Millisecond),
  164. syncTicker: time.Tick(500 * time.Millisecond),
  165. snapCount: cfg.SnapCount,
  166. ClusterStore: cls,
  167. }
  168. return s
  169. }
  170. // Start prepares and starts server in a new goroutine. It is no longer safe to
  171. // modify a server's fields after it has been sent to Start.
  172. // It also starts a goroutine to publish its server information.
  173. func (s *EtcdServer) Start() {
  174. s.start()
  175. go s.publish(defaultPublishRetryInterval)
  176. }
  177. // start prepares and starts server in a new goroutine. It is no longer safe to
  178. // modify a server's fields after it has been sent to Start.
  179. // This function is just used for testing.
  180. func (s *EtcdServer) start() {
  181. if s.snapCount == 0 {
  182. log.Printf("etcdserver: set snapshot count to default %d", DefaultSnapCount)
  183. s.snapCount = DefaultSnapCount
  184. }
  185. s.w = wait.New()
  186. s.done = make(chan struct{})
  187. // TODO: if this is an empty log, writes all peer infos
  188. // into the first entry
  189. go s.run()
  190. }
  191. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  192. return s.node.Step(ctx, m)
  193. }
  194. func (s *EtcdServer) run() {
  195. var syncC <-chan time.Time
  196. // snapi indicates the index of the last submitted snapshot request
  197. var snapi, appliedi uint64
  198. var nodes, removedNodes []uint64
  199. for {
  200. select {
  201. case <-s.ticker:
  202. s.node.Tick()
  203. case rd := <-s.node.Ready():
  204. if rd.SoftState != nil {
  205. nodes = rd.SoftState.Nodes
  206. removedNodes = rd.SoftState.RemovedNodes
  207. if rd.RaftState == raft.StateLeader {
  208. syncC = s.syncTicker
  209. } else {
  210. syncC = nil
  211. }
  212. if rd.SoftState.ShouldStop {
  213. s.Stop()
  214. return
  215. }
  216. }
  217. s.storage.Save(rd.HardState, rd.Entries)
  218. s.storage.SaveSnap(rd.Snapshot)
  219. s.send(rd.Messages)
  220. // TODO(bmizerany): do this in the background, but take
  221. // care to apply entries in a single goroutine, and not
  222. // race them.
  223. // TODO: apply configuration change into ClusterStore.
  224. if len(rd.CommittedEntries) != 0 {
  225. appliedi = s.apply(rd.CommittedEntries, nodes, removedNodes)
  226. }
  227. if rd.Snapshot.Index > snapi {
  228. snapi = rd.Snapshot.Index
  229. }
  230. // recover from snapshot if it is more updated than current applied
  231. if rd.Snapshot.Index > appliedi {
  232. if err := s.store.Recovery(rd.Snapshot.Data); err != nil {
  233. panic("TODO: this is bad, what do we do about it?")
  234. }
  235. appliedi = rd.Snapshot.Index
  236. }
  237. if appliedi-snapi > s.snapCount {
  238. s.snapshot(appliedi, nodes)
  239. snapi = appliedi
  240. }
  241. case <-syncC:
  242. s.sync(defaultSyncTimeout)
  243. case <-s.done:
  244. return
  245. }
  246. }
  247. }
  248. // Stop stops the server, and shuts down the running goroutine. Stop should be
  249. // called after a Start(s), otherwise it will block forever.
  250. func (s *EtcdServer) Stop() {
  251. s.node.Stop()
  252. close(s.done)
  253. }
  254. // Do interprets r and performs an operation on s.store according to r.Method
  255. // and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
  256. // Quorum == true, r will be sent through consensus before performing its
  257. // respective operation. Do will block until an action is performed or there is
  258. // an error.
  259. func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
  260. if r.ID == 0 {
  261. panic("r.ID cannot be 0")
  262. }
  263. if r.Method == "GET" && r.Quorum {
  264. r.Method = "QGET"
  265. }
  266. switch r.Method {
  267. case "POST", "PUT", "DELETE", "QGET":
  268. data, err := r.Marshal()
  269. if err != nil {
  270. return Response{}, err
  271. }
  272. ch := s.w.Register(r.ID)
  273. s.node.Propose(ctx, data)
  274. select {
  275. case x := <-ch:
  276. resp := x.(Response)
  277. return resp, resp.err
  278. case <-ctx.Done():
  279. s.w.Trigger(r.ID, nil) // GC wait
  280. return Response{}, ctx.Err()
  281. case <-s.done:
  282. return Response{}, ErrStopped
  283. }
  284. case "GET":
  285. switch {
  286. case r.Wait:
  287. wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
  288. if err != nil {
  289. return Response{}, err
  290. }
  291. return Response{Watcher: wc}, nil
  292. default:
  293. ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
  294. if err != nil {
  295. return Response{}, err
  296. }
  297. return Response{Event: ev}, nil
  298. }
  299. default:
  300. return Response{}, ErrUnknownMethod
  301. }
  302. }
  303. func (s *EtcdServer) AddMember(ctx context.Context, memb Member) error {
  304. // TODO: move Member to protobuf type
  305. b, err := json.Marshal(memb)
  306. if err != nil {
  307. return err
  308. }
  309. cc := raftpb.ConfChange{
  310. ID: GenID(),
  311. Type: raftpb.ConfChangeAddNode,
  312. NodeID: memb.ID,
  313. Context: b,
  314. }
  315. return s.configure(ctx, cc)
  316. }
  317. func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
  318. cc := raftpb.ConfChange{
  319. ID: GenID(),
  320. Type: raftpb.ConfChangeRemoveNode,
  321. NodeID: id,
  322. }
  323. return s.configure(ctx, cc)
  324. }
  325. // Implement the RaftTimer interface
  326. func (s *EtcdServer) Index() uint64 {
  327. return atomic.LoadUint64(&s.raftIndex)
  328. }
  329. func (s *EtcdServer) Term() uint64 {
  330. return atomic.LoadUint64(&s.raftTerm)
  331. }
  332. // configure sends configuration change through consensus then performs it.
  333. // It will block until the change is performed or there is an error.
  334. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error {
  335. ch := s.w.Register(cc.ID)
  336. if err := s.node.ProposeConfChange(ctx, cc); err != nil {
  337. log.Printf("configure error: %v", err)
  338. s.w.Trigger(cc.ID, nil)
  339. return err
  340. }
  341. select {
  342. case x := <-ch:
  343. if err, ok := x.(error); ok {
  344. return err
  345. }
  346. if x != nil {
  347. log.Panicf("unexpected return type")
  348. }
  349. return nil
  350. case <-ctx.Done():
  351. s.w.Trigger(cc.ID, nil) // GC wait
  352. return ctx.Err()
  353. case <-s.done:
  354. return ErrStopped
  355. }
  356. }
  357. // sync proposes a SYNC request and is non-blocking.
  358. // This makes no guarantee that the request will be proposed or performed.
  359. // The request will be cancelled after the given timeout.
  360. func (s *EtcdServer) sync(timeout time.Duration) {
  361. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  362. req := pb.Request{
  363. Method: "SYNC",
  364. ID: GenID(),
  365. Time: time.Now().UnixNano(),
  366. }
  367. data := pbutil.MustMarshal(&req)
  368. // There is no promise that node has leader when do SYNC request,
  369. // so it uses goroutine to propose.
  370. go func() {
  371. s.node.Propose(ctx, data)
  372. cancel()
  373. }()
  374. }
  375. // publish registers server information into the cluster. The information
  376. // is the JSON representation of this server's member struct, updated with the
  377. // static clientURLs of the server.
  378. // The function keeps attempting to register until it succeeds,
  379. // or its server is stopped.
  380. func (s *EtcdServer) publish(retryInterval time.Duration) {
  381. b, err := json.Marshal(s.attributes)
  382. if err != nil {
  383. log.Printf("etcdserver: json marshal error: %v", err)
  384. return
  385. }
  386. req := pb.Request{
  387. ID: GenID(),
  388. Method: "PUT",
  389. Path: Member{ID: s.id}.storeKey() + attributesSuffix,
  390. Val: string(b),
  391. }
  392. for {
  393. ctx, cancel := context.WithTimeout(context.Background(), retryInterval)
  394. _, err := s.Do(ctx, req)
  395. cancel()
  396. switch err {
  397. case nil:
  398. log.Printf("etcdserver: published %+v to the cluster", s.attributes)
  399. return
  400. case ErrStopped:
  401. log.Printf("etcdserver: aborting publish because server is stopped")
  402. return
  403. default:
  404. log.Printf("etcdserver: publish error: %v", err)
  405. }
  406. }
  407. }
  408. func getExpirationTime(r *pb.Request) time.Time {
  409. var t time.Time
  410. if r.Expiration != 0 {
  411. t = time.Unix(0, r.Expiration)
  412. }
  413. return t
  414. }
  415. func (s *EtcdServer) apply(es []raftpb.Entry, nodes, removedNodes []uint64) uint64 {
  416. var applied uint64
  417. for i := range es {
  418. e := es[i]
  419. switch e.Type {
  420. case raftpb.EntryNormal:
  421. var r pb.Request
  422. pbutil.MustUnmarshal(&r, e.Data)
  423. s.w.Trigger(r.ID, s.applyRequest(r))
  424. case raftpb.EntryConfChange:
  425. var cc raftpb.ConfChange
  426. pbutil.MustUnmarshal(&cc, e.Data)
  427. s.w.Trigger(cc.ID, s.applyConfChange(cc, nodes, removedNodes))
  428. default:
  429. panic("unexpected entry type")
  430. }
  431. atomic.StoreUint64(&s.raftIndex, e.Index)
  432. atomic.StoreUint64(&s.raftTerm, e.Term)
  433. applied = e.Index
  434. }
  435. return applied
  436. }
  437. // applyRequest interprets r as a call to store.X and returns a Response interpreted
  438. // from store.Event
  439. func (s *EtcdServer) applyRequest(r pb.Request) Response {
  440. f := func(ev *store.Event, err error) Response {
  441. return Response{Event: ev, err: err}
  442. }
  443. expr := getExpirationTime(&r)
  444. switch r.Method {
  445. case "POST":
  446. return f(s.store.Create(r.Path, r.Dir, r.Val, true, expr))
  447. case "PUT":
  448. exists, existsSet := getBool(r.PrevExist)
  449. switch {
  450. case existsSet:
  451. if exists {
  452. return f(s.store.Update(r.Path, r.Val, expr))
  453. }
  454. return f(s.store.Create(r.Path, r.Dir, r.Val, false, expr))
  455. case r.PrevIndex > 0 || r.PrevValue != "":
  456. return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, expr))
  457. default:
  458. return f(s.store.Set(r.Path, r.Dir, r.Val, expr))
  459. }
  460. case "DELETE":
  461. switch {
  462. case r.PrevIndex > 0 || r.PrevValue != "":
  463. return f(s.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
  464. default:
  465. return f(s.store.Delete(r.Path, r.Dir, r.Recursive))
  466. }
  467. case "QGET":
  468. return f(s.store.Get(r.Path, r.Recursive, r.Sorted))
  469. case "SYNC":
  470. s.store.DeleteExpiredKeys(time.Unix(0, r.Time))
  471. return Response{}
  472. default:
  473. // This should never be reached, but just in case:
  474. return Response{err: ErrUnknownMethod}
  475. }
  476. }
  477. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, nodes, removedNodes []uint64) error {
  478. if err := checkConfChange(cc, nodes, removedNodes); err != nil {
  479. cc.NodeID = raft.None
  480. s.node.ApplyConfChange(cc)
  481. return err
  482. }
  483. s.node.ApplyConfChange(cc)
  484. switch cc.Type {
  485. case raftpb.ConfChangeAddNode:
  486. var m Member
  487. if err := json.Unmarshal(cc.Context, &m); err != nil {
  488. panic("unexpected unmarshal error")
  489. }
  490. if cc.NodeID != m.ID {
  491. panic("unexpected nodeID mismatch")
  492. }
  493. s.ClusterStore.Add(m)
  494. case raftpb.ConfChangeRemoveNode:
  495. s.ClusterStore.Remove(cc.NodeID)
  496. }
  497. return nil
  498. }
  499. func checkConfChange(cc raftpb.ConfChange, nodes, removedNodes []uint64) error {
  500. if containsUint64(removedNodes, cc.NodeID) {
  501. return ErrIDRemoved
  502. }
  503. switch cc.Type {
  504. case raftpb.ConfChangeAddNode:
  505. if containsUint64(nodes, cc.NodeID) {
  506. return ErrIDExists
  507. }
  508. case raftpb.ConfChangeRemoveNode:
  509. if !containsUint64(nodes, cc.NodeID) {
  510. return ErrIDNotFound
  511. }
  512. default:
  513. panic("unexpected ConfChange type")
  514. }
  515. return nil
  516. }
  517. // TODO: non-blocking snapshot
  518. func (s *EtcdServer) snapshot(snapi uint64, snapnodes []uint64) {
  519. d, err := s.store.Save()
  520. // TODO: current store will never fail to do a snapshot
  521. // what should we do if the store might fail?
  522. if err != nil {
  523. panic("TODO: this is bad, what do we do about it?")
  524. }
  525. s.node.Compact(snapi, snapnodes, d)
  526. s.storage.Cut()
  527. }
  528. func startNode(cfg *ServerConfig) (n raft.Node, w *wal.WAL) {
  529. var err error
  530. metadata := pbutil.MustMarshal(&pb.Metadata{NodeID: cfg.ID()})
  531. if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
  532. log.Fatal(err)
  533. }
  534. ids := cfg.Cluster.IDs()
  535. peers := make([]raft.Peer, len(ids))
  536. for i, id := range ids {
  537. ctx, err := json.Marshal((*cfg.Cluster)[id])
  538. if err != nil {
  539. log.Fatal(err)
  540. }
  541. peers[i] = raft.Peer{ID: id, Context: ctx}
  542. }
  543. log.Printf("etcdserver: start node %d", cfg.ID())
  544. n = raft.StartNode(cfg.ID(), peers, 10, 1)
  545. return
  546. }
  547. func restartNode(cfg *ServerConfig, index uint64, snapshot *raftpb.Snapshot) (n raft.Node, w *wal.WAL) {
  548. var err error
  549. // restart a node from previous wal
  550. if w, err = wal.OpenAtIndex(cfg.WALDir(), index); err != nil {
  551. log.Fatal(err)
  552. }
  553. wmetadata, st, ents, err := w.ReadAll()
  554. if err != nil {
  555. log.Fatal(err)
  556. }
  557. var metadata pb.Metadata
  558. pbutil.MustUnmarshal(&metadata, wmetadata)
  559. log.Printf("etcdserver: restart node %d at commit index %d", metadata.NodeID, st.Commit)
  560. n = raft.RestartNode(metadata.NodeID, 10, 1, snapshot, st, ents)
  561. return
  562. }
  563. // TODO: move the function to /id pkg maybe?
  564. // GenID generates a random id that is not equal to 0.
  565. func GenID() (n uint64) {
  566. for n == 0 {
  567. n = uint64(rand.Int63())
  568. }
  569. return
  570. }
  571. func getBool(v *bool) (vv bool, set bool) {
  572. if v == nil {
  573. return false, false
  574. }
  575. return *v, true
  576. }
  577. func containsUint64(a []uint64, x uint64) bool {
  578. for _, v := range a {
  579. if v == x {
  580. return true
  581. }
  582. }
  583. return false
  584. }