server.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587
  1. package etcdserver
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "log"
  6. "math/rand"
  7. "os"
  8. "path"
  9. "sync/atomic"
  10. "time"
  11. "github.com/coreos/etcd/discovery"
  12. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  13. "github.com/coreos/etcd/raft"
  14. "github.com/coreos/etcd/raft/raftpb"
  15. "github.com/coreos/etcd/snap"
  16. "github.com/coreos/etcd/store"
  17. "github.com/coreos/etcd/third_party/code.google.com/p/go.net/context"
  18. "github.com/coreos/etcd/wait"
  19. "github.com/coreos/etcd/wal"
  20. )
  21. const (
  22. // owner can make/remove files inside the directory
  23. privateDirMode = 0700
  24. defaultSyncTimeout = time.Second
  25. DefaultSnapCount = 10000
  26. // TODO: calculate based on heartbeat interval
  27. defaultPublishRetryInterval = 5 * time.Second
  28. )
  29. var (
  30. ErrUnknownMethod = errors.New("etcdserver: unknown method")
  31. ErrStopped = errors.New("etcdserver: server stopped")
  32. )
  33. func init() {
  34. rand.Seed(time.Now().UnixNano())
  35. }
  36. type sendFunc func(m []raftpb.Message)
  37. type Response struct {
  38. Event *store.Event
  39. Watcher store.Watcher
  40. err error
  41. }
  42. type Storage interface {
  43. // Save function saves ents and state to the underlying stable storage.
  44. // Save MUST block until st and ents are on stable storage.
  45. Save(st raftpb.HardState, ents []raftpb.Entry)
  46. // SaveSnap function saves snapshot to the underlying stable storage.
  47. SaveSnap(snap raftpb.Snapshot)
  48. // TODO: WAL should be able to control cut itself. After implement self-controled cut,
  49. // remove it in this interface.
  50. // Cut cuts out a new wal file for saving new state and entries.
  51. Cut() error
  52. }
  53. type Server interface {
  54. // Start performs any initialization of the Server necessary for it to
  55. // begin serving requests. It must be called before Do or Process.
  56. // Start must be non-blocking; any long-running server functionality
  57. // should be implemented in goroutines.
  58. Start()
  59. // Stop terminates the Server and performs any necessary finalization.
  60. // Do and Process cannot be called after Stop has been invoked.
  61. Stop()
  62. // Do takes a request and attempts to fulfil it, returning a Response.
  63. Do(ctx context.Context, r pb.Request) (Response, error)
  64. // Process takes a raft message and applies it to the server's raft state
  65. // machine, respecting any timeout of the given context.
  66. Process(ctx context.Context, m raftpb.Message) error
  67. }
  68. type RaftTimer interface {
  69. Index() uint64
  70. Term() uint64
  71. }
  72. // NewServer creates a new EtcdServer from the supplied configuration. The
  73. // configuration is considered static for the lifetime of the EtcdServer.
  74. func NewServer(cfg *ServerConfig) *EtcdServer {
  75. err := cfg.Verify()
  76. if err != nil {
  77. log.Fatalln(err)
  78. }
  79. snapdir := path.Join(cfg.DataDir, "snap")
  80. if err := os.MkdirAll(snapdir, privateDirMode); err != nil {
  81. log.Fatalf("etcdserver: cannot create snapshot directory: %v", err)
  82. }
  83. ss := snap.New(snapdir)
  84. st := store.New()
  85. var w *wal.WAL
  86. var n raft.Node
  87. m := cfg.Cluster.FindName(cfg.Name)
  88. waldir := path.Join(cfg.DataDir, "wal")
  89. if !wal.Exist(waldir) {
  90. if cfg.DiscoveryURL != "" {
  91. d, err := discovery.New(cfg.DiscoveryURL, m.ID, cfg.Cluster.String())
  92. if err != nil {
  93. log.Fatalf("etcd: cannot init discovery %v", err)
  94. }
  95. s, err := d.Discover()
  96. if err != nil {
  97. log.Fatalf("etcd: %v", err)
  98. }
  99. if err = cfg.Cluster.Set(s); err != nil {
  100. log.Fatalf("etcd: %v", err)
  101. }
  102. } else if (cfg.ClusterState) != ClusterStateValueNew {
  103. log.Fatalf("etcd: initial cluster state unset and no wal or discovery URL found")
  104. }
  105. i := pb.Info{ID: m.ID}
  106. b, err := i.Marshal()
  107. if err != nil {
  108. log.Fatal(err)
  109. }
  110. if w, err = wal.Create(waldir, b); err != nil {
  111. log.Fatal(err)
  112. }
  113. ids := cfg.Cluster.IDs()
  114. peers := make([]raft.Peer, len(ids))
  115. for i, id := range ids {
  116. ctx, err := json.Marshal((*cfg.Cluster)[id])
  117. if err != nil {
  118. log.Fatal(err)
  119. }
  120. peers[i] = raft.Peer{ID: id, Context: ctx}
  121. }
  122. n = raft.StartNode(m.ID, peers, 10, 1)
  123. } else {
  124. if cfg.DiscoveryURL != "" {
  125. log.Printf("etcd: warn: ignoring discovery URL: etcd has already been initialized and has a valid log in %q", waldir)
  126. }
  127. var index uint64
  128. snapshot, err := ss.Load()
  129. if err != nil && err != snap.ErrNoSnapshot {
  130. log.Fatal(err)
  131. }
  132. if snapshot != nil {
  133. log.Printf("etcdserver: restart from snapshot at index %d", snapshot.Index)
  134. st.Recovery(snapshot.Data)
  135. index = snapshot.Index
  136. }
  137. // restart a node from previous wal
  138. if w, err = wal.OpenAtIndex(waldir, index); err != nil {
  139. log.Fatal(err)
  140. }
  141. md, st, ents, err := w.ReadAll()
  142. if err != nil {
  143. log.Fatal(err)
  144. }
  145. var info pb.Info
  146. if err := info.Unmarshal(md); err != nil {
  147. log.Fatal(err)
  148. }
  149. // TODO(xiangli): save/recovery nodeID?
  150. if info.ID != m.ID {
  151. log.Fatalf("unexpected nodeid %x, want %x: nodeid should always be the same until we support name/peerURLs update or dynamic configuration", info.ID, m.ID)
  152. }
  153. n = raft.RestartNode(m.ID, 10, 1, snapshot, st, ents)
  154. }
  155. cls := &clusterStore{Store: st}
  156. s := &EtcdServer{
  157. store: st,
  158. node: n,
  159. id: m.ID,
  160. attributes: Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
  161. storage: struct {
  162. *wal.WAL
  163. *snap.Snapshotter
  164. }{w, ss},
  165. send: Sender(cfg.Transport, cls),
  166. ticker: time.Tick(100 * time.Millisecond),
  167. syncTicker: time.Tick(500 * time.Millisecond),
  168. snapCount: cfg.SnapCount,
  169. ClusterStore: cls,
  170. }
  171. return s
  172. }
  173. // EtcdServer is the production implementation of the Server interface
  174. type EtcdServer struct {
  175. w wait.Wait
  176. done chan struct{}
  177. id uint64
  178. attributes Attributes
  179. ClusterStore ClusterStore
  180. node raft.Node
  181. store store.Store
  182. // send specifies the send function for sending msgs to members. send
  183. // MUST NOT block. It is okay to drop messages, since clients should
  184. // timeout and reissue their messages. If send is nil, server will
  185. // panic.
  186. send sendFunc
  187. storage Storage
  188. ticker <-chan time.Time
  189. syncTicker <-chan time.Time
  190. snapCount uint64 // number of entries to trigger a snapshot
  191. // Cache of the latest raft index and raft term the server has seen
  192. raftIndex uint64
  193. raftTerm uint64
  194. }
  195. // Start prepares and starts server in a new goroutine. It is no longer safe to
  196. // modify a server's fields after it has been sent to Start.
  197. // It also starts a goroutine to publish its server information.
  198. func (s *EtcdServer) Start() {
  199. s.start()
  200. go s.publish(defaultPublishRetryInterval)
  201. }
  202. // start prepares and starts server in a new goroutine. It is no longer safe to
  203. // modify a server's fields after it has been sent to Start.
  204. // This function is just used for testing.
  205. func (s *EtcdServer) start() {
  206. if s.snapCount == 0 {
  207. log.Printf("etcdserver: set snapshot count to default %d", DefaultSnapCount)
  208. s.snapCount = DefaultSnapCount
  209. }
  210. s.w = wait.New()
  211. s.done = make(chan struct{})
  212. // TODO: if this is an empty log, writes all peer infos
  213. // into the first entry
  214. go s.run()
  215. }
  216. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  217. return s.node.Step(ctx, m)
  218. }
  219. func (s *EtcdServer) run() {
  220. var syncC <-chan time.Time
  221. // snapi indicates the index of the last submitted snapshot request
  222. var snapi, appliedi uint64
  223. var nodes []uint64
  224. for {
  225. select {
  226. case <-s.ticker:
  227. s.node.Tick()
  228. case rd := <-s.node.Ready():
  229. s.storage.Save(rd.HardState, rd.Entries)
  230. s.storage.SaveSnap(rd.Snapshot)
  231. s.send(rd.Messages)
  232. // TODO(bmizerany): do this in the background, but take
  233. // care to apply entries in a single goroutine, and not
  234. // race them.
  235. // TODO: apply configuration change into ClusterStore.
  236. for _, e := range rd.CommittedEntries {
  237. switch e.Type {
  238. case raftpb.EntryNormal:
  239. var r pb.Request
  240. if err := r.Unmarshal(e.Data); err != nil {
  241. panic("TODO: this is bad, what do we do about it?")
  242. }
  243. s.w.Trigger(r.ID, s.applyRequest(r))
  244. case raftpb.EntryConfChange:
  245. var cc raftpb.ConfChange
  246. if err := cc.Unmarshal(e.Data); err != nil {
  247. panic("TODO: this is bad, what do we do about it?")
  248. }
  249. s.applyConfChange(cc)
  250. s.w.Trigger(cc.ID, nil)
  251. default:
  252. panic("unexpected entry type")
  253. }
  254. atomic.StoreUint64(&s.raftIndex, e.Index)
  255. atomic.StoreUint64(&s.raftTerm, e.Term)
  256. appliedi = e.Index
  257. }
  258. if rd.SoftState != nil {
  259. nodes = rd.SoftState.Nodes
  260. if rd.RaftState == raft.StateLeader {
  261. syncC = s.syncTicker
  262. } else {
  263. syncC = nil
  264. }
  265. if rd.SoftState.ShouldStop {
  266. s.Stop()
  267. return
  268. }
  269. }
  270. if rd.Snapshot.Index > snapi {
  271. snapi = rd.Snapshot.Index
  272. }
  273. // recover from snapshot if it is more updated than current applied
  274. if rd.Snapshot.Index > appliedi {
  275. if err := s.store.Recovery(rd.Snapshot.Data); err != nil {
  276. panic("TODO: this is bad, what do we do about it?")
  277. }
  278. appliedi = rd.Snapshot.Index
  279. }
  280. if appliedi-snapi > s.snapCount {
  281. s.snapshot(appliedi, nodes)
  282. snapi = appliedi
  283. }
  284. case <-syncC:
  285. s.sync(defaultSyncTimeout)
  286. case <-s.done:
  287. return
  288. }
  289. }
  290. }
  291. // Stop stops the server, and shuts down the running goroutine. Stop should be
  292. // called after a Start(s), otherwise it will block forever.
  293. func (s *EtcdServer) Stop() {
  294. s.node.Stop()
  295. close(s.done)
  296. }
  297. // Do interprets r and performs an operation on s.store according to r.Method
  298. // and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
  299. // Quorum == true, r will be sent through consensus before performing its
  300. // respective operation. Do will block until an action is performed or there is
  301. // an error.
  302. func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
  303. if r.ID == 0 {
  304. panic("r.Id cannot be 0")
  305. }
  306. if r.Method == "GET" && r.Quorum {
  307. r.Method = "QGET"
  308. }
  309. switch r.Method {
  310. case "POST", "PUT", "DELETE", "QGET":
  311. data, err := r.Marshal()
  312. if err != nil {
  313. return Response{}, err
  314. }
  315. ch := s.w.Register(r.ID)
  316. s.node.Propose(ctx, data)
  317. select {
  318. case x := <-ch:
  319. resp := x.(Response)
  320. return resp, resp.err
  321. case <-ctx.Done():
  322. s.w.Trigger(r.ID, nil) // GC wait
  323. return Response{}, ctx.Err()
  324. case <-s.done:
  325. return Response{}, ErrStopped
  326. }
  327. case "GET":
  328. switch {
  329. case r.Wait:
  330. wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
  331. if err != nil {
  332. return Response{}, err
  333. }
  334. return Response{Watcher: wc}, nil
  335. default:
  336. ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
  337. if err != nil {
  338. return Response{}, err
  339. }
  340. return Response{Event: ev}, nil
  341. }
  342. default:
  343. return Response{}, ErrUnknownMethod
  344. }
  345. }
  346. func (s *EtcdServer) AddMember(ctx context.Context, memb Member) error {
  347. // TODO: move Member to protobuf type
  348. b, err := json.Marshal(memb)
  349. if err != nil {
  350. return err
  351. }
  352. cc := raftpb.ConfChange{
  353. ID: GenID(),
  354. Type: raftpb.ConfChangeAddNode,
  355. NodeID: memb.ID,
  356. Context: b,
  357. }
  358. return s.configure(ctx, cc)
  359. }
  360. func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error {
  361. cc := raftpb.ConfChange{
  362. ID: GenID(),
  363. Type: raftpb.ConfChangeRemoveNode,
  364. NodeID: id,
  365. }
  366. return s.configure(ctx, cc)
  367. }
  368. // Implement the RaftTimer interface
  369. func (s *EtcdServer) Index() uint64 {
  370. return atomic.LoadUint64(&s.raftIndex)
  371. }
  372. func (s *EtcdServer) Term() uint64 {
  373. return atomic.LoadUint64(&s.raftTerm)
  374. }
  375. // configure sends configuration change through consensus then performs it.
  376. // It will block until the change is performed or there is an error.
  377. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error {
  378. ch := s.w.Register(cc.ID)
  379. if err := s.node.ProposeConfChange(ctx, cc); err != nil {
  380. log.Printf("configure error: %v", err)
  381. s.w.Trigger(cc.ID, nil)
  382. return err
  383. }
  384. select {
  385. case <-ch:
  386. return nil
  387. case <-ctx.Done():
  388. s.w.Trigger(cc.ID, nil) // GC wait
  389. return ctx.Err()
  390. case <-s.done:
  391. return ErrStopped
  392. }
  393. }
  394. // sync proposes a SYNC request and is non-blocking.
  395. // This makes no guarantee that the request will be proposed or performed.
  396. // The request will be cancelled after the given timeout.
  397. func (s *EtcdServer) sync(timeout time.Duration) {
  398. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  399. req := pb.Request{
  400. Method: "SYNC",
  401. ID: GenID(),
  402. Time: time.Now().UnixNano(),
  403. }
  404. data, err := req.Marshal()
  405. if err != nil {
  406. log.Printf("marshal request %#v error: %v", req, err)
  407. return
  408. }
  409. // There is no promise that node has leader when do SYNC request,
  410. // so it uses goroutine to propose.
  411. go func() {
  412. s.node.Propose(ctx, data)
  413. cancel()
  414. }()
  415. }
  416. // publish registers server information into the cluster. The information
  417. // is the JSON representation of this server's member struct, updated with the
  418. // static clientURLs of the server.
  419. // The function keeps attempting to register until it succeeds,
  420. // or its server is stopped.
  421. func (s *EtcdServer) publish(retryInterval time.Duration) {
  422. b, err := json.Marshal(s.attributes)
  423. if err != nil {
  424. log.Printf("etcdserver: json marshal error: %v", err)
  425. return
  426. }
  427. req := pb.Request{
  428. ID: GenID(),
  429. Method: "PUT",
  430. Path: Member{ID: s.id}.storeKey() + attributesSuffix,
  431. Val: string(b),
  432. }
  433. for {
  434. ctx, cancel := context.WithTimeout(context.Background(), retryInterval)
  435. _, err := s.Do(ctx, req)
  436. cancel()
  437. switch err {
  438. case nil:
  439. log.Printf("etcdserver: published %+v to the cluster", s.attributes)
  440. return
  441. case ErrStopped:
  442. log.Printf("etcdserver: aborting publish because server is stopped")
  443. return
  444. default:
  445. log.Printf("etcdserver: publish error: %v", err)
  446. }
  447. }
  448. }
  449. func getExpirationTime(r *pb.Request) time.Time {
  450. var t time.Time
  451. if r.Expiration != 0 {
  452. t = time.Unix(0, r.Expiration)
  453. }
  454. return t
  455. }
  456. // applyRequest interprets r as a call to store.X and returns a Response interpreted
  457. // from store.Event
  458. func (s *EtcdServer) applyRequest(r pb.Request) Response {
  459. f := func(ev *store.Event, err error) Response {
  460. return Response{Event: ev, err: err}
  461. }
  462. expr := getExpirationTime(&r)
  463. switch r.Method {
  464. case "POST":
  465. return f(s.store.Create(r.Path, r.Dir, r.Val, true, expr))
  466. case "PUT":
  467. exists, existsSet := getBool(r.PrevExist)
  468. switch {
  469. case existsSet:
  470. if exists {
  471. return f(s.store.Update(r.Path, r.Val, expr))
  472. }
  473. return f(s.store.Create(r.Path, r.Dir, r.Val, false, expr))
  474. case r.PrevIndex > 0 || r.PrevValue != "":
  475. return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, expr))
  476. default:
  477. return f(s.store.Set(r.Path, r.Dir, r.Val, expr))
  478. }
  479. case "DELETE":
  480. switch {
  481. case r.PrevIndex > 0 || r.PrevValue != "":
  482. return f(s.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
  483. default:
  484. return f(s.store.Delete(r.Path, r.Dir, r.Recursive))
  485. }
  486. case "QGET":
  487. return f(s.store.Get(r.Path, r.Recursive, r.Sorted))
  488. case "SYNC":
  489. s.store.DeleteExpiredKeys(time.Unix(0, r.Time))
  490. return Response{}
  491. default:
  492. // This should never be reached, but just in case:
  493. return Response{err: ErrUnknownMethod}
  494. }
  495. }
  496. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange) {
  497. s.node.ApplyConfChange(cc)
  498. switch cc.Type {
  499. case raftpb.ConfChangeAddNode:
  500. var m Member
  501. if err := json.Unmarshal(cc.Context, &m); err != nil {
  502. panic("unexpected unmarshal error")
  503. }
  504. if cc.NodeID != m.ID {
  505. panic("unexpected nodeID mismatch")
  506. }
  507. s.ClusterStore.Add(m)
  508. case raftpb.ConfChangeRemoveNode:
  509. s.ClusterStore.Remove(cc.NodeID)
  510. default:
  511. panic("unexpected ConfChange type")
  512. }
  513. }
  514. // TODO: non-blocking snapshot
  515. func (s *EtcdServer) snapshot(snapi uint64, snapnodes []uint64) {
  516. d, err := s.store.Save()
  517. // TODO: current store will never fail to do a snapshot
  518. // what should we do if the store might fail?
  519. if err != nil {
  520. panic("TODO: this is bad, what do we do about it?")
  521. }
  522. s.node.Compact(snapi, snapnodes, d)
  523. s.storage.Cut()
  524. }
  525. // TODO: move the function to /id pkg maybe?
  526. // GenID generates a random id that is not equal to 0.
  527. func GenID() (n uint64) {
  528. for n == 0 {
  529. n = uint64(rand.Int63())
  530. }
  531. return
  532. }
  533. func getBool(v *bool) (vv bool, set bool) {
  534. if v == nil {
  535. return false, false
  536. }
  537. return *v, true
  538. }