server.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. package etcdserver
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "log"
  6. "math/rand"
  7. "os"
  8. "path"
  9. "sync/atomic"
  10. "time"
  11. "github.com/coreos/etcd/discovery"
  12. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  13. "github.com/coreos/etcd/pkg/types"
  14. "github.com/coreos/etcd/raft"
  15. "github.com/coreos/etcd/raft/raftpb"
  16. "github.com/coreos/etcd/snap"
  17. "github.com/coreos/etcd/store"
  18. "github.com/coreos/etcd/third_party/code.google.com/p/go.net/context"
  19. "github.com/coreos/etcd/wait"
  20. "github.com/coreos/etcd/wal"
  21. )
  22. const (
  23. // owner can make/remove files inside the directory
  24. privateDirMode = 0700
  25. defaultSyncTimeout = time.Second
  26. DefaultSnapCount = 10000
  27. // TODO: calculate based on heartbeat interval
  28. defaultPublishRetryInterval = 5 * time.Second
  29. )
  30. var (
  31. ErrUnknownMethod = errors.New("etcdserver: unknown method")
  32. ErrStopped = errors.New("etcdserver: server stopped")
  33. )
  34. func init() {
  35. rand.Seed(time.Now().UnixNano())
  36. }
  37. type sendFunc func(m []raftpb.Message)
  38. type Response struct {
  39. Event *store.Event
  40. Watcher store.Watcher
  41. err error
  42. }
  43. type Storage interface {
  44. // Save function saves ents and state to the underlying stable storage.
  45. // Save MUST block until st and ents are on stable storage.
  46. Save(st raftpb.HardState, ents []raftpb.Entry)
  47. // SaveSnap function saves snapshot to the underlying stable storage.
  48. SaveSnap(snap raftpb.Snapshot)
  49. // TODO: WAL should be able to control cut itself. After implement self-controled cut,
  50. // remove it in this interface.
  51. // Cut cuts out a new wal file for saving new state and entries.
  52. Cut() error
  53. }
  54. type Server interface {
  55. // Start performs any initialization of the Server necessary for it to
  56. // begin serving requests. It must be called before Do or Process.
  57. // Start must be non-blocking; any long-running server functionality
  58. // should be implemented in goroutines.
  59. Start()
  60. // Stop terminates the Server and performs any necessary finalization.
  61. // Do and Process cannot be called after Stop has been invoked.
  62. Stop()
  63. // Do takes a request and attempts to fulfil it, returning a Response.
  64. Do(ctx context.Context, r pb.Request) (Response, error)
  65. // Process takes a raft message and applies it to the server's raft state
  66. // machine, respecting any timeout of the given context.
  67. Process(ctx context.Context, m raftpb.Message) error
  68. }
  69. type RaftTimer interface {
  70. Index() int64
  71. Term() int64
  72. }
  73. // NewServer creates a new EtcdServer from the supplied configuration. The
  74. // configuration is considered static for the lifetime of the EtcdServer.
  75. func NewServer(cfg *ServerConfig) *EtcdServer {
  76. err := cfg.Verify()
  77. if err != nil {
  78. log.Fatalln(err)
  79. }
  80. snapdir := path.Join(cfg.DataDir, "snap")
  81. if err := os.MkdirAll(snapdir, privateDirMode); err != nil {
  82. log.Fatalf("etcdserver: cannot create snapshot directory: %v", err)
  83. }
  84. ss := snap.New(snapdir)
  85. st := store.New()
  86. var w *wal.WAL
  87. var n raft.Node
  88. m := cfg.Cluster.FindName(cfg.Name)
  89. waldir := path.Join(cfg.DataDir, "wal")
  90. if !wal.Exist(waldir) {
  91. if cfg.DiscoveryURL != "" {
  92. d, err := discovery.New(cfg.DiscoveryURL, m.ID, cfg.Cluster.String())
  93. if err != nil {
  94. log.Fatalf("etcd: cannot init discovery %v", err)
  95. }
  96. s, err := d.Discover()
  97. if err != nil {
  98. log.Fatalf("etcd: %v", err)
  99. }
  100. if err = cfg.Cluster.Set(s); err != nil {
  101. log.Fatalf("etcd: %v", err)
  102. }
  103. } else if (cfg.ClusterState) != ClusterStateValueNew {
  104. log.Fatalf("etcd: initial cluster state unset and no wal or discovery URL found")
  105. }
  106. if w, err = wal.Create(waldir); err != nil {
  107. log.Fatal(err)
  108. }
  109. // TODO: add context for PeerURLs
  110. n = raft.StartNode(m.ID, cfg.Cluster.IDs(), 10, 1)
  111. } else {
  112. if cfg.DiscoveryURL != "" {
  113. log.Printf("etcd: warn: ignoring discovery URL: etcd has already been initialized and has a valid log in %q", waldir)
  114. }
  115. var index int64
  116. snapshot, err := ss.Load()
  117. if err != nil && err != snap.ErrNoSnapshot {
  118. log.Fatal(err)
  119. }
  120. if snapshot != nil {
  121. log.Printf("etcdserver: restart from snapshot at index %d", snapshot.Index)
  122. st.Recovery(snapshot.Data)
  123. index = snapshot.Index
  124. }
  125. // restart a node from previous wal
  126. if w, err = wal.OpenAtIndex(waldir, index); err != nil {
  127. log.Fatal(err)
  128. }
  129. wid, st, ents, err := w.ReadAll()
  130. if err != nil {
  131. log.Fatal(err)
  132. }
  133. // TODO(xiangli): save/recovery nodeID?
  134. if wid != 0 {
  135. log.Fatalf("unexpected nodeid %d: nodeid should always be zero until we save nodeid into wal", wid)
  136. }
  137. n = raft.RestartNode(m.ID, cfg.Cluster.IDs(), 10, 1, snapshot, st, ents)
  138. }
  139. cls := NewClusterStore(st, *cfg.Cluster)
  140. s := &EtcdServer{
  141. store: st,
  142. node: n,
  143. name: cfg.Name,
  144. storage: struct {
  145. *wal.WAL
  146. *snap.Snapshotter
  147. }{w, ss},
  148. send: Sender(cfg.Transport, cls),
  149. clientURLs: cfg.ClientURLs,
  150. ticker: time.Tick(100 * time.Millisecond),
  151. syncTicker: time.Tick(500 * time.Millisecond),
  152. snapCount: cfg.SnapCount,
  153. ClusterStore: cls,
  154. }
  155. return s
  156. }
  157. // EtcdServer is the production implementation of the Server interface
  158. type EtcdServer struct {
  159. w wait.Wait
  160. done chan struct{}
  161. name string
  162. clientURLs types.URLs
  163. ClusterStore ClusterStore
  164. node raft.Node
  165. store store.Store
  166. // send specifies the send function for sending msgs to members. send
  167. // MUST NOT block. It is okay to drop messages, since clients should
  168. // timeout and reissue their messages. If send is nil, server will
  169. // panic.
  170. send sendFunc
  171. storage Storage
  172. ticker <-chan time.Time
  173. syncTicker <-chan time.Time
  174. snapCount int64 // number of entries to trigger a snapshot
  175. // Cache of the latest raft index and raft term the server has seen
  176. raftIndex int64
  177. raftTerm int64
  178. }
  179. // Start prepares and starts server in a new goroutine. It is no longer safe to
  180. // modify a server's fields after it has been sent to Start.
  181. // It also starts a goroutine to publish its server information.
  182. func (s *EtcdServer) Start() {
  183. s.start()
  184. go s.publish(defaultPublishRetryInterval)
  185. }
  186. // start prepares and starts server in a new goroutine. It is no longer safe to
  187. // modify a server's fields after it has been sent to Start.
  188. // This function is just used for testing.
  189. func (s *EtcdServer) start() {
  190. if s.snapCount == 0 {
  191. log.Printf("etcdserver: set snapshot count to default %d", DefaultSnapCount)
  192. s.snapCount = DefaultSnapCount
  193. }
  194. s.w = wait.New()
  195. s.done = make(chan struct{})
  196. // TODO: if this is an empty log, writes all peer infos
  197. // into the first entry
  198. go s.run()
  199. }
  200. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  201. return s.node.Step(ctx, m)
  202. }
  203. func (s *EtcdServer) run() {
  204. var syncC <-chan time.Time
  205. // snapi indicates the index of the last submitted snapshot request
  206. var snapi, appliedi int64
  207. var nodes []int64
  208. for {
  209. select {
  210. case <-s.ticker:
  211. s.node.Tick()
  212. case rd := <-s.node.Ready():
  213. s.storage.Save(rd.HardState, rd.Entries)
  214. s.storage.SaveSnap(rd.Snapshot)
  215. s.send(rd.Messages)
  216. // TODO(bmizerany): do this in the background, but take
  217. // care to apply entries in a single goroutine, and not
  218. // race them.
  219. // TODO: apply configuration change into ClusterStore.
  220. for _, e := range rd.CommittedEntries {
  221. switch e.Type {
  222. case raftpb.EntryNormal:
  223. var r pb.Request
  224. if err := r.Unmarshal(e.Data); err != nil {
  225. panic("TODO: this is bad, what do we do about it?")
  226. }
  227. s.w.Trigger(r.ID, s.applyRequest(r))
  228. case raftpb.EntryConfChange:
  229. var cc raftpb.ConfChange
  230. if err := cc.Unmarshal(e.Data); err != nil {
  231. panic("TODO: this is bad, what do we do about it?")
  232. }
  233. s.applyConfChange(cc)
  234. s.w.Trigger(cc.ID, nil)
  235. default:
  236. panic("unexpected entry type")
  237. }
  238. atomic.StoreInt64(&s.raftIndex, e.Index)
  239. atomic.StoreInt64(&s.raftTerm, e.Term)
  240. appliedi = e.Index
  241. }
  242. if rd.SoftState != nil {
  243. nodes = rd.SoftState.Nodes
  244. if rd.RaftState == raft.StateLeader {
  245. syncC = s.syncTicker
  246. } else {
  247. syncC = nil
  248. }
  249. if rd.SoftState.ShouldStop {
  250. s.Stop()
  251. return
  252. }
  253. }
  254. if rd.Snapshot.Index > snapi {
  255. snapi = rd.Snapshot.Index
  256. }
  257. // recover from snapshot if it is more updated than current applied
  258. if rd.Snapshot.Index > appliedi {
  259. if err := s.store.Recovery(rd.Snapshot.Data); err != nil {
  260. panic("TODO: this is bad, what do we do about it?")
  261. }
  262. appliedi = rd.Snapshot.Index
  263. }
  264. if appliedi-snapi > s.snapCount {
  265. s.snapshot(appliedi, nodes)
  266. snapi = appliedi
  267. }
  268. case <-syncC:
  269. s.sync(defaultSyncTimeout)
  270. case <-s.done:
  271. return
  272. }
  273. }
  274. }
  275. // Stop stops the server, and shuts down the running goroutine. Stop should be
  276. // called after a Start(s), otherwise it will block forever.
  277. func (s *EtcdServer) Stop() {
  278. s.node.Stop()
  279. close(s.done)
  280. }
  281. // Do interprets r and performs an operation on s.store according to r.Method
  282. // and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
  283. // Quorum == true, r will be sent through consensus before performing its
  284. // respective operation. Do will block until an action is performed or there is
  285. // an error.
  286. func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
  287. if r.ID == 0 {
  288. panic("r.Id cannot be 0")
  289. }
  290. if r.Method == "GET" && r.Quorum {
  291. r.Method = "QGET"
  292. }
  293. switch r.Method {
  294. case "POST", "PUT", "DELETE", "QGET":
  295. data, err := r.Marshal()
  296. if err != nil {
  297. return Response{}, err
  298. }
  299. ch := s.w.Register(r.ID)
  300. s.node.Propose(ctx, data)
  301. select {
  302. case x := <-ch:
  303. resp := x.(Response)
  304. return resp, resp.err
  305. case <-ctx.Done():
  306. s.w.Trigger(r.ID, nil) // GC wait
  307. return Response{}, ctx.Err()
  308. case <-s.done:
  309. return Response{}, ErrStopped
  310. }
  311. case "GET":
  312. switch {
  313. case r.Wait:
  314. wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
  315. if err != nil {
  316. return Response{}, err
  317. }
  318. return Response{Watcher: wc}, nil
  319. default:
  320. ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
  321. if err != nil {
  322. return Response{}, err
  323. }
  324. return Response{Event: ev}, nil
  325. }
  326. default:
  327. return Response{}, ErrUnknownMethod
  328. }
  329. }
  330. func (s *EtcdServer) AddMember(ctx context.Context, memb Member) error {
  331. // TODO: move Member to protobuf type
  332. b, err := json.Marshal(memb)
  333. if err != nil {
  334. return err
  335. }
  336. cc := raftpb.ConfChange{
  337. ID: GenID(),
  338. Type: raftpb.ConfChangeAddNode,
  339. NodeID: memb.ID,
  340. Context: b,
  341. }
  342. return s.configure(ctx, cc)
  343. }
  344. func (s *EtcdServer) RemoveMember(ctx context.Context, id int64) error {
  345. cc := raftpb.ConfChange{
  346. ID: GenID(),
  347. Type: raftpb.ConfChangeRemoveNode,
  348. NodeID: id,
  349. }
  350. return s.configure(ctx, cc)
  351. }
  352. // Implement the RaftTimer interface
  353. func (s *EtcdServer) Index() int64 {
  354. return atomic.LoadInt64(&s.raftIndex)
  355. }
  356. func (s *EtcdServer) Term() int64 {
  357. return atomic.LoadInt64(&s.raftTerm)
  358. }
  359. // configure sends configuration change through consensus then performs it.
  360. // It will block until the change is performed or there is an error.
  361. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error {
  362. ch := s.w.Register(cc.ID)
  363. if err := s.node.ProposeConfChange(ctx, cc); err != nil {
  364. log.Printf("configure error: %v", err)
  365. s.w.Trigger(cc.ID, nil)
  366. return err
  367. }
  368. select {
  369. case <-ch:
  370. return nil
  371. case <-ctx.Done():
  372. s.w.Trigger(cc.ID, nil) // GC wait
  373. return ctx.Err()
  374. case <-s.done:
  375. return ErrStopped
  376. }
  377. }
  378. // sync proposes a SYNC request and is non-blocking.
  379. // This makes no guarantee that the request will be proposed or performed.
  380. // The request will be cancelled after the given timeout.
  381. func (s *EtcdServer) sync(timeout time.Duration) {
  382. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  383. req := pb.Request{
  384. Method: "SYNC",
  385. ID: GenID(),
  386. Time: time.Now().UnixNano(),
  387. }
  388. data, err := req.Marshal()
  389. if err != nil {
  390. log.Printf("marshal request %#v error: %v", req, err)
  391. return
  392. }
  393. // There is no promise that node has leader when do SYNC request,
  394. // so it uses goroutine to propose.
  395. go func() {
  396. s.node.Propose(ctx, data)
  397. cancel()
  398. }()
  399. }
  400. // publish registers server information into the cluster. The information
  401. // is the JSON representation of this server's member struct, updated with the
  402. // static clientURLs of the server.
  403. // The function keeps attempting to register until it succeeds,
  404. // or its server is stopped.
  405. // TODO: take care of info fetched from cluster store after having reconfig.
  406. func (s *EtcdServer) publish(retryInterval time.Duration) {
  407. m := *s.ClusterStore.Get().FindName(s.name)
  408. m.ClientURLs = s.clientURLs.StringSlice()
  409. b, err := json.Marshal(m)
  410. if err != nil {
  411. log.Printf("etcdserver: json marshal error: %v", err)
  412. return
  413. }
  414. req := pb.Request{
  415. ID: GenID(),
  416. Method: "PUT",
  417. Path: m.storeKey(),
  418. Val: string(b),
  419. }
  420. for {
  421. ctx, cancel := context.WithTimeout(context.Background(), retryInterval)
  422. _, err := s.Do(ctx, req)
  423. cancel()
  424. switch err {
  425. case nil:
  426. log.Printf("etcdserver: published %+v to the cluster", m)
  427. return
  428. case ErrStopped:
  429. log.Printf("etcdserver: aborting publish because server is stopped")
  430. return
  431. default:
  432. log.Printf("etcdserver: publish error: %v", err)
  433. }
  434. }
  435. }
  436. func getExpirationTime(r *pb.Request) time.Time {
  437. var t time.Time
  438. if r.Expiration != 0 {
  439. t = time.Unix(0, r.Expiration)
  440. }
  441. return t
  442. }
  443. // applyRequest interprets r as a call to store.X and returns a Response interpreted
  444. // from store.Event
  445. func (s *EtcdServer) applyRequest(r pb.Request) Response {
  446. f := func(ev *store.Event, err error) Response {
  447. return Response{Event: ev, err: err}
  448. }
  449. expr := getExpirationTime(&r)
  450. switch r.Method {
  451. case "POST":
  452. return f(s.store.Create(r.Path, r.Dir, r.Val, true, expr))
  453. case "PUT":
  454. exists, existsSet := getBool(r.PrevExist)
  455. switch {
  456. case existsSet:
  457. if exists {
  458. return f(s.store.Update(r.Path, r.Val, expr))
  459. }
  460. return f(s.store.Create(r.Path, r.Dir, r.Val, false, expr))
  461. case r.PrevIndex > 0 || r.PrevValue != "":
  462. return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, expr))
  463. default:
  464. return f(s.store.Set(r.Path, r.Dir, r.Val, expr))
  465. }
  466. case "DELETE":
  467. switch {
  468. case r.PrevIndex > 0 || r.PrevValue != "":
  469. return f(s.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
  470. default:
  471. return f(s.store.Delete(r.Path, r.Dir, r.Recursive))
  472. }
  473. case "QGET":
  474. return f(s.store.Get(r.Path, r.Recursive, r.Sorted))
  475. case "SYNC":
  476. s.store.DeleteExpiredKeys(time.Unix(0, r.Time))
  477. return Response{}
  478. default:
  479. // This should never be reached, but just in case:
  480. return Response{err: ErrUnknownMethod}
  481. }
  482. }
  483. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange) {
  484. s.node.ApplyConfChange(cc)
  485. switch cc.Type {
  486. case raftpb.ConfChangeAddNode:
  487. // TODO(yichengq): this is the hack and should be removed SOON.
  488. // Bootstrap write addNode entries into log, which don't set Context
  489. // value. They don't need to be applied because now we do it explicitly
  490. // before server starts. This hack makes etcd work, and will be removed
  491. // in the following PR.
  492. if cc.Context == nil {
  493. break
  494. }
  495. var m Member
  496. if err := json.Unmarshal(cc.Context, &m); err != nil {
  497. panic("unexpected unmarshal error")
  498. }
  499. if cc.NodeID != m.ID {
  500. panic("unexpected nodeID mismatch")
  501. }
  502. s.ClusterStore.Add(m)
  503. case raftpb.ConfChangeRemoveNode:
  504. s.ClusterStore.Remove(cc.NodeID)
  505. default:
  506. panic("unexpected ConfChange type")
  507. }
  508. }
  509. // TODO: non-blocking snapshot
  510. func (s *EtcdServer) snapshot(snapi int64, snapnodes []int64) {
  511. d, err := s.store.Save()
  512. // TODO: current store will never fail to do a snapshot
  513. // what should we do if the store might fail?
  514. if err != nil {
  515. panic("TODO: this is bad, what do we do about it?")
  516. }
  517. s.node.Compact(snapi, snapnodes, d)
  518. s.storage.Cut()
  519. }
  520. // TODO: move the function to /id pkg maybe?
  521. // GenID generates a random id that is not equal to 0.
  522. func GenID() (n int64) {
  523. for n == 0 {
  524. n = rand.Int63()
  525. }
  526. return
  527. }
  528. func getBool(v *bool) (vv bool, set bool) {
  529. if v == nil {
  530. return false, false
  531. }
  532. return *v, true
  533. }