server.go 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "encoding/json"
  17. "expvar"
  18. "fmt"
  19. "math"
  20. "math/rand"
  21. "net/http"
  22. "os"
  23. "path"
  24. "path/filepath"
  25. "regexp"
  26. "sync"
  27. "sync/atomic"
  28. "time"
  29. "github.com/coreos/etcd/alarm"
  30. "github.com/coreos/etcd/auth"
  31. "github.com/coreos/etcd/compactor"
  32. "github.com/coreos/etcd/discovery"
  33. "github.com/coreos/etcd/etcdserver/api"
  34. "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
  35. pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
  36. "github.com/coreos/etcd/etcdserver/membership"
  37. "github.com/coreos/etcd/etcdserver/stats"
  38. "github.com/coreos/etcd/lease"
  39. "github.com/coreos/etcd/mvcc"
  40. "github.com/coreos/etcd/mvcc/backend"
  41. "github.com/coreos/etcd/pkg/fileutil"
  42. "github.com/coreos/etcd/pkg/idutil"
  43. "github.com/coreos/etcd/pkg/pbutil"
  44. "github.com/coreos/etcd/pkg/runtime"
  45. "github.com/coreos/etcd/pkg/schedule"
  46. "github.com/coreos/etcd/pkg/types"
  47. "github.com/coreos/etcd/pkg/wait"
  48. "github.com/coreos/etcd/raft"
  49. "github.com/coreos/etcd/raft/raftpb"
  50. "github.com/coreos/etcd/rafthttp"
  51. "github.com/coreos/etcd/snap"
  52. "github.com/coreos/etcd/store"
  53. "github.com/coreos/etcd/version"
  54. "github.com/coreos/etcd/wal"
  55. "github.com/coreos/go-semver/semver"
  56. "github.com/coreos/pkg/capnslog"
  57. "golang.org/x/net/context"
  58. )
  59. const (
  60. DefaultSnapCount = 100000
  61. StoreClusterPrefix = "/0"
  62. StoreKeysPrefix = "/1"
  63. // HealthInterval is the minimum time the cluster should be healthy
  64. // before accepting add member requests.
  65. HealthInterval = 5 * time.Second
  66. purgeFileInterval = 30 * time.Second
  67. // monitorVersionInterval should be smaller than the timeout
  68. // on the connection. Or we will not be able to reuse the connection
  69. // (since it will timeout).
  70. monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
  71. databaseFilename = "db"
  72. // max number of in-flight snapshot messages etcdserver allows to have
  73. // This number is more than enough for most clusters with 5 machines.
  74. maxInFlightMsgSnap = 16
  75. releaseDelayAfterSnapshot = 30 * time.Second
  76. // maxPendingRevokes is the maximum number of outstanding expired lease revocations.
  77. maxPendingRevokes = 16
  78. )
  79. var (
  80. plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver")
  81. storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes"))
  82. )
  83. func init() {
  84. rand.Seed(time.Now().UnixNano())
  85. expvar.Publish(
  86. "file_descriptor_limit",
  87. expvar.Func(
  88. func() interface{} {
  89. n, _ := runtime.FDLimit()
  90. return n
  91. },
  92. ),
  93. )
  94. }
  95. type Response struct {
  96. Event *store.Event
  97. Watcher store.Watcher
  98. err error
  99. }
  100. type Server interface {
  101. // Start performs any initialization of the Server necessary for it to
  102. // begin serving requests. It must be called before Do or Process.
  103. // Start must be non-blocking; any long-running server functionality
  104. // should be implemented in goroutines.
  105. Start()
  106. // Stop terminates the Server and performs any necessary finalization.
  107. // Do and Process cannot be called after Stop has been invoked.
  108. Stop()
  109. // ID returns the ID of the Server.
  110. ID() types.ID
  111. // Leader returns the ID of the leader Server.
  112. Leader() types.ID
  113. // Do takes a request and attempts to fulfill it, returning a Response.
  114. Do(ctx context.Context, r pb.Request) (Response, error)
  115. // Process takes a raft message and applies it to the server's raft state
  116. // machine, respecting any timeout of the given context.
  117. Process(ctx context.Context, m raftpb.Message) error
  118. // AddMember attempts to add a member into the cluster. It will return
  119. // ErrIDRemoved if member ID is removed from the cluster, or return
  120. // ErrIDExists if member ID exists in the cluster.
  121. AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error)
  122. // RemoveMember attempts to remove a member from the cluster. It will
  123. // return ErrIDRemoved if member ID is removed from the cluster, or return
  124. // ErrIDNotFound if member ID is not in the cluster.
  125. RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error)
  126. // UpdateMember attempts to update an existing member in the cluster. It will
  127. // return ErrIDNotFound if the member ID does not exist.
  128. UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error)
  129. // ClusterVersion is the cluster-wide minimum major.minor version.
  130. // Cluster version is set to the min version that an etcd member is
  131. // compatible with when first bootstrap.
  132. //
  133. // ClusterVersion is nil until the cluster is bootstrapped (has a quorum).
  134. //
  135. // During a rolling upgrades, the ClusterVersion will be updated
  136. // automatically after a sync. (5 second by default)
  137. //
  138. // The API/raft component can utilize ClusterVersion to determine if
  139. // it can accept a client request or a raft RPC.
  140. // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and
  141. // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since
  142. // this feature is introduced post 2.0.
  143. ClusterVersion() *semver.Version
  144. }
  145. // EtcdServer is the production implementation of the Server interface
  146. type EtcdServer struct {
  147. // inflightSnapshots holds count the number of snapshots currently inflight.
  148. inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned.
  149. appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
  150. committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
  151. // consistIndex used to hold the offset of current executing entry
  152. // It is initialized to 0 before executing any entry.
  153. consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
  154. Cfg *ServerConfig
  155. readych chan struct{}
  156. r raftNode
  157. snapCount uint64
  158. w wait.Wait
  159. readMu sync.RWMutex
  160. // read routine notifies etcd server that it waits for reading by sending an empty struct to
  161. // readwaitC
  162. readwaitc chan struct{}
  163. // readNotifier is used to notify the read routine that it can process the request
  164. // when there is no error
  165. readNotifier *notifier
  166. // stop signals the run goroutine should shutdown.
  167. stop chan struct{}
  168. // stopping is closed by run goroutine on shutdown.
  169. stopping chan struct{}
  170. // done is closed when all goroutines from start() complete.
  171. done chan struct{}
  172. errorc chan error
  173. id types.ID
  174. attributes membership.Attributes
  175. cluster *membership.RaftCluster
  176. store store.Store
  177. applyV2 ApplierV2
  178. // applyV3 is the applier with auth and quotas
  179. applyV3 applierV3
  180. // applyV3Base is the core applier without auth or quotas
  181. applyV3Base applierV3
  182. applyWait wait.WaitTime
  183. kv mvcc.ConsistentWatchableKV
  184. lessor lease.Lessor
  185. bemu sync.Mutex
  186. be backend.Backend
  187. authStore auth.AuthStore
  188. alarmStore *alarm.AlarmStore
  189. stats *stats.ServerStats
  190. lstats *stats.LeaderStats
  191. SyncTicker *time.Ticker
  192. // compactor is used to auto-compact the KV.
  193. compactor *compactor.Periodic
  194. // peerRt used to send requests (version, lease) to peers.
  195. peerRt http.RoundTripper
  196. reqIDGen *idutil.Generator
  197. // forceVersionC is used to force the version monitor loop
  198. // to detect the cluster version immediately.
  199. forceVersionC chan struct{}
  200. // wgMu blocks concurrent waitgroup mutation while server stopping
  201. wgMu sync.RWMutex
  202. // wg is used to wait for the go routines that depends on the server state
  203. // to exit when stopping the server.
  204. wg sync.WaitGroup
  205. // ctx is used for etcd-initiated requests that may need to be canceled
  206. // on etcd server shutdown.
  207. ctx context.Context
  208. cancel context.CancelFunc
  209. leadTimeMu sync.RWMutex
  210. leadElectedTime time.Time
  211. }
  212. // NewServer creates a new EtcdServer from the supplied configuration. The
  213. // configuration is considered static for the lifetime of the EtcdServer.
  214. func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
  215. st := store.New(StoreClusterPrefix, StoreKeysPrefix)
  216. var (
  217. w *wal.WAL
  218. n raft.Node
  219. s *raft.MemoryStorage
  220. id types.ID
  221. cl *membership.RaftCluster
  222. )
  223. if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
  224. return nil, fmt.Errorf("cannot access data directory: %v", terr)
  225. }
  226. haveWAL := wal.Exist(cfg.WALDir())
  227. if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
  228. plog.Fatalf("create snapshot directory error: %v", err)
  229. }
  230. ss := snap.New(cfg.SnapDir())
  231. bepath := filepath.Join(cfg.SnapDir(), databaseFilename)
  232. beExist := fileutil.Exist(bepath)
  233. var be backend.Backend
  234. beOpened := make(chan struct{})
  235. go func() {
  236. be = newBackend(bepath, cfg.QuotaBackendBytes)
  237. beOpened <- struct{}{}
  238. }()
  239. select {
  240. case <-beOpened:
  241. case <-time.After(time.Second):
  242. plog.Warningf("another etcd process is running with the same data dir and holding the file lock.")
  243. plog.Warningf("waiting for it to exit before starting...")
  244. <-beOpened
  245. }
  246. defer func() {
  247. if err != nil {
  248. be.Close()
  249. }
  250. }()
  251. prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
  252. if err != nil {
  253. return nil, err
  254. }
  255. var (
  256. remotes []*membership.Member
  257. snapshot *raftpb.Snapshot
  258. )
  259. switch {
  260. case !haveWAL && !cfg.NewCluster:
  261. if err = cfg.VerifyJoinExisting(); err != nil {
  262. return nil, err
  263. }
  264. cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
  265. if err != nil {
  266. return nil, err
  267. }
  268. existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt)
  269. if gerr != nil {
  270. return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
  271. }
  272. if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
  273. return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
  274. }
  275. if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) {
  276. return nil, fmt.Errorf("incompatible with current running cluster")
  277. }
  278. remotes = existingCluster.Members()
  279. cl.SetID(existingCluster.ID())
  280. cl.SetStore(st)
  281. cl.SetBackend(be)
  282. cfg.Print()
  283. id, n, s, w = startNode(cfg, cl, nil)
  284. case !haveWAL && cfg.NewCluster:
  285. if err = cfg.VerifyBootstrap(); err != nil {
  286. return nil, err
  287. }
  288. cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
  289. if err != nil {
  290. return nil, err
  291. }
  292. m := cl.MemberByName(cfg.Name)
  293. if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
  294. return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
  295. }
  296. if cfg.ShouldDiscover() {
  297. var str string
  298. str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
  299. if err != nil {
  300. return nil, &DiscoveryError{Op: "join", Err: err}
  301. }
  302. var urlsmap types.URLsMap
  303. urlsmap, err = types.NewURLsMap(str)
  304. if err != nil {
  305. return nil, err
  306. }
  307. if checkDuplicateURL(urlsmap) {
  308. return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
  309. }
  310. if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil {
  311. return nil, err
  312. }
  313. }
  314. cl.SetStore(st)
  315. cl.SetBackend(be)
  316. cfg.PrintWithInitial()
  317. id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
  318. case haveWAL:
  319. if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
  320. return nil, fmt.Errorf("cannot write to member directory: %v", err)
  321. }
  322. if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
  323. return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
  324. }
  325. if cfg.ShouldDiscover() {
  326. plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
  327. }
  328. snapshot, err = ss.Load()
  329. if err != nil && err != snap.ErrNoSnapshot {
  330. return nil, err
  331. }
  332. if snapshot != nil {
  333. if err = st.Recovery(snapshot.Data); err != nil {
  334. plog.Panicf("recovered store from snapshot error: %v", err)
  335. }
  336. plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
  337. }
  338. cfg.Print()
  339. if !cfg.ForceNewCluster {
  340. id, cl, n, s, w = restartNode(cfg, snapshot)
  341. } else {
  342. id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
  343. }
  344. cl.SetStore(st)
  345. cl.SetBackend(be)
  346. cl.Recover(api.UpdateCapability)
  347. if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
  348. os.RemoveAll(bepath)
  349. return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
  350. }
  351. default:
  352. return nil, fmt.Errorf("unsupported bootstrap config")
  353. }
  354. if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
  355. return nil, fmt.Errorf("cannot access member directory: %v", terr)
  356. }
  357. sstats := &stats.ServerStats{
  358. Name: cfg.Name,
  359. ID: id.String(),
  360. }
  361. sstats.Initialize()
  362. lstats := stats.NewLeaderStats(id.String())
  363. heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
  364. srv = &EtcdServer{
  365. readych: make(chan struct{}),
  366. Cfg: cfg,
  367. snapCount: cfg.SnapCount,
  368. errorc: make(chan error, 1),
  369. store: st,
  370. r: *newRaftNode(
  371. raftNodeConfig{
  372. isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
  373. Node: n,
  374. heartbeat: heartbeat,
  375. raftStorage: s,
  376. storage: NewStorage(w, ss),
  377. },
  378. ),
  379. id: id,
  380. attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
  381. cluster: cl,
  382. stats: sstats,
  383. lstats: lstats,
  384. SyncTicker: time.NewTicker(500 * time.Millisecond),
  385. peerRt: prt,
  386. reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
  387. forceVersionC: make(chan struct{}),
  388. }
  389. srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
  390. srv.be = be
  391. minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
  392. // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
  393. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
  394. srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
  395. srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex)
  396. if beExist {
  397. kvindex := srv.kv.ConsistentIndex()
  398. // TODO: remove kvindex != 0 checking when we do not expect users to upgrade
  399. // etcd from pre-3.0 release.
  400. if snapshot != nil && kvindex < snapshot.Metadata.Index {
  401. if kvindex != 0 {
  402. return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index)
  403. }
  404. plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
  405. }
  406. }
  407. srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
  408. tp, err := auth.NewTokenProvider(cfg.AuthToken,
  409. func(index uint64) <-chan struct{} {
  410. return srv.applyWait.Wait(index)
  411. },
  412. )
  413. if err != nil {
  414. plog.Errorf("failed to create token provider: %s", err)
  415. return nil, err
  416. }
  417. srv.authStore = auth.NewAuthStore(srv.be, tp)
  418. if h := cfg.AutoCompactionRetention; h != 0 {
  419. srv.compactor = compactor.NewPeriodic(h, srv.kv, srv)
  420. srv.compactor.Run()
  421. }
  422. srv.applyV3Base = &applierV3backend{srv}
  423. if err = srv.restoreAlarms(); err != nil {
  424. return nil, err
  425. }
  426. // TODO: move transport initialization near the definition of remote
  427. tr := &rafthttp.Transport{
  428. TLSInfo: cfg.PeerTLSInfo,
  429. DialTimeout: cfg.peerDialTimeout(),
  430. ID: id,
  431. URLs: cfg.PeerURLs,
  432. ClusterID: cl.ID(),
  433. Raft: srv,
  434. Snapshotter: ss,
  435. ServerStats: sstats,
  436. LeaderStats: lstats,
  437. ErrorC: srv.errorc,
  438. }
  439. if err = tr.Start(); err != nil {
  440. return nil, err
  441. }
  442. // add all remotes into transport
  443. for _, m := range remotes {
  444. if m.ID != id {
  445. tr.AddRemote(m.ID, m.PeerURLs)
  446. }
  447. }
  448. for _, m := range cl.Members() {
  449. if m.ID != id {
  450. tr.AddPeer(m.ID, m.PeerURLs)
  451. }
  452. }
  453. srv.r.transport = tr
  454. return srv, nil
  455. }
  456. // Start prepares and starts server in a new goroutine. It is no longer safe to
  457. // modify a server's fields after it has been sent to Start.
  458. // It also starts a goroutine to publish its server information.
  459. func (s *EtcdServer) Start() {
  460. s.start()
  461. s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
  462. s.goAttach(s.purgeFile)
  463. s.goAttach(func() { monitorFileDescriptor(s.stopping) })
  464. s.goAttach(s.monitorVersions)
  465. s.goAttach(s.linearizableReadLoop)
  466. }
  467. // start prepares and starts server in a new goroutine. It is no longer safe to
  468. // modify a server's fields after it has been sent to Start.
  469. // This function is just used for testing.
  470. func (s *EtcdServer) start() {
  471. if s.snapCount == 0 {
  472. plog.Infof("set snapshot count to default %d", DefaultSnapCount)
  473. s.snapCount = DefaultSnapCount
  474. }
  475. s.w = wait.New()
  476. s.applyWait = wait.NewTimeList()
  477. s.done = make(chan struct{})
  478. s.stop = make(chan struct{})
  479. s.stopping = make(chan struct{})
  480. s.ctx, s.cancel = context.WithCancel(context.Background())
  481. s.readwaitc = make(chan struct{}, 1)
  482. s.readNotifier = newNotifier()
  483. if s.ClusterVersion() != nil {
  484. plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
  485. } else {
  486. plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version)
  487. }
  488. // TODO: if this is an empty log, writes all peer infos
  489. // into the first entry
  490. go s.run()
  491. }
  492. func (s *EtcdServer) purgeFile() {
  493. var serrc, werrc <-chan error
  494. if s.Cfg.MaxSnapFiles > 0 {
  495. serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
  496. }
  497. if s.Cfg.MaxWALFiles > 0 {
  498. werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done)
  499. }
  500. select {
  501. case e := <-werrc:
  502. plog.Fatalf("failed to purge wal file %v", e)
  503. case e := <-serrc:
  504. plog.Fatalf("failed to purge snap file %v", e)
  505. case <-s.stopping:
  506. return
  507. }
  508. }
  509. func (s *EtcdServer) ID() types.ID { return s.id }
  510. func (s *EtcdServer) Cluster() *membership.RaftCluster { return s.cluster }
  511. func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
  512. func (s *EtcdServer) Lessor() lease.Lessor { return s.lessor }
  513. func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
  514. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  515. if s.cluster.IsIDRemoved(types.ID(m.From)) {
  516. plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
  517. return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
  518. }
  519. if m.Type == raftpb.MsgApp {
  520. s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
  521. }
  522. return s.r.Step(ctx, m)
  523. }
  524. func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) }
  525. func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) }
  526. // ReportSnapshot reports snapshot sent status to the raft state machine,
  527. // and clears the used snapshot from the snapshot store.
  528. func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
  529. s.r.ReportSnapshot(id, status)
  530. }
  531. type etcdProgress struct {
  532. confState raftpb.ConfState
  533. snapi uint64
  534. appliedt uint64
  535. appliedi uint64
  536. }
  537. // raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
  538. // and helps decouple state machine logic from Raft algorithms.
  539. // TODO: add a state machine interface to apply the commit entries and do snapshot/recover
  540. type raftReadyHandler struct {
  541. updateLeadership func(newLeader bool)
  542. updateCommittedIndex func(uint64)
  543. }
  544. func (s *EtcdServer) run() {
  545. sn, err := s.r.raftStorage.Snapshot()
  546. if err != nil {
  547. plog.Panicf("get snapshot from raft storage error: %v", err)
  548. }
  549. // asynchronously accept apply packets, dispatch progress in-order
  550. sched := schedule.NewFIFOScheduler()
  551. var (
  552. smu sync.RWMutex
  553. syncC <-chan time.Time
  554. )
  555. setSyncC := func(ch <-chan time.Time) {
  556. smu.Lock()
  557. syncC = ch
  558. smu.Unlock()
  559. }
  560. getSyncC := func() (ch <-chan time.Time) {
  561. smu.RLock()
  562. ch = syncC
  563. smu.RUnlock()
  564. return
  565. }
  566. rh := &raftReadyHandler{
  567. updateLeadership: func(newLeader bool) {
  568. if !s.isLeader() {
  569. if s.lessor != nil {
  570. s.lessor.Demote()
  571. }
  572. if s.compactor != nil {
  573. s.compactor.Pause()
  574. }
  575. setSyncC(nil)
  576. } else {
  577. if newLeader {
  578. t := time.Now()
  579. s.leadTimeMu.Lock()
  580. s.leadElectedTime = t
  581. s.leadTimeMu.Unlock()
  582. }
  583. setSyncC(s.SyncTicker.C)
  584. if s.compactor != nil {
  585. s.compactor.Resume()
  586. }
  587. }
  588. // TODO: remove the nil checking
  589. // current test utility does not provide the stats
  590. if s.stats != nil {
  591. s.stats.BecomeLeader()
  592. }
  593. },
  594. updateCommittedIndex: func(ci uint64) {
  595. cci := s.getCommittedIndex()
  596. if ci > cci {
  597. s.setCommittedIndex(ci)
  598. }
  599. },
  600. }
  601. s.r.start(rh)
  602. ep := etcdProgress{
  603. confState: sn.Metadata.ConfState,
  604. snapi: sn.Metadata.Index,
  605. appliedt: sn.Metadata.Term,
  606. appliedi: sn.Metadata.Index,
  607. }
  608. defer func() {
  609. s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
  610. close(s.stopping)
  611. s.wgMu.Unlock()
  612. s.cancel()
  613. sched.Stop()
  614. // wait for gouroutines before closing raft so wal stays open
  615. s.wg.Wait()
  616. s.SyncTicker.Stop()
  617. // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
  618. // by adding a peer after raft stops the transport
  619. s.r.stop()
  620. // kv, lessor and backend can be nil if running without v3 enabled
  621. // or running unit tests.
  622. if s.lessor != nil {
  623. s.lessor.Stop()
  624. }
  625. if s.kv != nil {
  626. s.kv.Close()
  627. }
  628. if s.authStore != nil {
  629. s.authStore.Close()
  630. }
  631. if s.be != nil {
  632. s.be.Close()
  633. }
  634. if s.compactor != nil {
  635. s.compactor.Stop()
  636. }
  637. close(s.done)
  638. }()
  639. var expiredLeaseC <-chan []*lease.Lease
  640. if s.lessor != nil {
  641. expiredLeaseC = s.lessor.ExpiredLeasesC()
  642. }
  643. for {
  644. select {
  645. case ap := <-s.r.apply():
  646. f := func(context.Context) { s.applyAll(&ep, &ap) }
  647. sched.Schedule(f)
  648. case leases := <-expiredLeaseC:
  649. s.goAttach(func() {
  650. // Increases throughput of expired leases deletion process through parallelization
  651. c := make(chan struct{}, maxPendingRevokes)
  652. for _, lease := range leases {
  653. select {
  654. case c <- struct{}{}:
  655. case <-s.stopping:
  656. return
  657. }
  658. lid := lease.ID
  659. s.goAttach(func() {
  660. s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
  661. <-c
  662. })
  663. }
  664. })
  665. case err := <-s.errorc:
  666. plog.Errorf("%s", err)
  667. plog.Infof("the data-dir used by this member must be removed.")
  668. return
  669. case <-getSyncC():
  670. if s.store.HasTTLKeys() {
  671. s.sync(s.Cfg.ReqTimeout())
  672. }
  673. case <-s.stop:
  674. return
  675. }
  676. }
  677. }
  678. func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
  679. s.applySnapshot(ep, apply)
  680. st := time.Now()
  681. s.applyEntries(ep, apply)
  682. d := time.Since(st)
  683. entriesNum := len(apply.entries)
  684. if entriesNum != 0 && d > time.Duration(entriesNum)*warnApplyDuration {
  685. plog.Warningf("apply entries took too long [%v for %d entries]", d, len(apply.entries))
  686. plog.Warningf("avoid queries with large range/delete range!")
  687. }
  688. proposalsApplied.Set(float64(ep.appliedi))
  689. s.applyWait.Trigger(ep.appliedi)
  690. // wait for the raft routine to finish the disk writes before triggering a
  691. // snapshot. or applied index might be greater than the last index in raft
  692. // storage, since the raft routine might be slower than apply routine.
  693. <-apply.raftDone
  694. s.triggerSnapshot(ep)
  695. select {
  696. // snapshot requested via send()
  697. case m := <-s.r.msgSnapC:
  698. merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
  699. s.sendMergedSnap(merged)
  700. default:
  701. }
  702. }
  703. func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
  704. if raft.IsEmptySnap(apply.snapshot) {
  705. return
  706. }
  707. plog.Infof("applying snapshot at index %d...", ep.snapi)
  708. defer plog.Infof("finished applying incoming snapshot at index %d", ep.snapi)
  709. if apply.snapshot.Metadata.Index <= ep.appliedi {
  710. plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
  711. apply.snapshot.Metadata.Index, ep.appliedi)
  712. }
  713. snapfn, err := s.r.storage.DBFilePath(apply.snapshot.Metadata.Index)
  714. if err != nil {
  715. plog.Panicf("get database snapshot file path error: %v", err)
  716. }
  717. fn := filepath.Join(s.Cfg.SnapDir(), databaseFilename)
  718. if err := os.Rename(snapfn, fn); err != nil {
  719. plog.Panicf("rename snapshot file error: %v", err)
  720. }
  721. newbe := newBackend(fn, s.Cfg.QuotaBackendBytes)
  722. // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
  723. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
  724. if s.lessor != nil {
  725. plog.Info("recovering lessor...")
  726. s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() })
  727. plog.Info("finished recovering lessor")
  728. }
  729. plog.Info("restoring mvcc store...")
  730. if err := s.kv.Restore(newbe); err != nil {
  731. plog.Panicf("restore KV error: %v", err)
  732. }
  733. s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex())
  734. plog.Info("finished restoring mvcc store")
  735. // Closing old backend might block until all the txns
  736. // on the backend are finished.
  737. // We do not want to wait on closing the old backend.
  738. s.bemu.Lock()
  739. oldbe := s.be
  740. go func() {
  741. plog.Info("closing old backend...")
  742. defer plog.Info("finished closing old backend")
  743. if err := oldbe.Close(); err != nil {
  744. plog.Panicf("close backend error: %v", err)
  745. }
  746. }()
  747. s.be = newbe
  748. s.bemu.Unlock()
  749. plog.Info("recovering alarms...")
  750. if err := s.restoreAlarms(); err != nil {
  751. plog.Panicf("restore alarms error: %v", err)
  752. }
  753. plog.Info("finished recovering alarms")
  754. if s.authStore != nil {
  755. plog.Info("recovering auth store...")
  756. s.authStore.Recover(newbe)
  757. plog.Info("finished recovering auth store")
  758. }
  759. plog.Info("recovering store v2...")
  760. if err := s.store.Recovery(apply.snapshot.Data); err != nil {
  761. plog.Panicf("recovery store error: %v", err)
  762. }
  763. plog.Info("finished recovering store v2")
  764. s.cluster.SetBackend(s.be)
  765. plog.Info("recovering cluster configuration...")
  766. s.cluster.Recover(api.UpdateCapability)
  767. plog.Info("finished recovering cluster configuration")
  768. plog.Info("removing old peers from network...")
  769. // recover raft transport
  770. s.r.transport.RemoveAllPeers()
  771. plog.Info("finished removing old peers from network")
  772. plog.Info("adding peers from new cluster configuration into network...")
  773. for _, m := range s.cluster.Members() {
  774. if m.ID == s.ID() {
  775. continue
  776. }
  777. s.r.transport.AddPeer(m.ID, m.PeerURLs)
  778. }
  779. plog.Info("finished adding peers from new cluster configuration into network...")
  780. ep.appliedt = apply.snapshot.Metadata.Term
  781. ep.appliedi = apply.snapshot.Metadata.Index
  782. ep.snapi = ep.appliedi
  783. ep.confState = apply.snapshot.Metadata.ConfState
  784. }
  785. func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
  786. if len(apply.entries) == 0 {
  787. return
  788. }
  789. firsti := apply.entries[0].Index
  790. if firsti > ep.appliedi+1 {
  791. plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi)
  792. }
  793. var ents []raftpb.Entry
  794. if ep.appliedi+1-firsti < uint64(len(apply.entries)) {
  795. ents = apply.entries[ep.appliedi+1-firsti:]
  796. }
  797. if len(ents) == 0 {
  798. return
  799. }
  800. var shouldstop bool
  801. if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
  802. go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
  803. }
  804. }
  805. func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
  806. if ep.appliedi-ep.snapi <= s.snapCount {
  807. return
  808. }
  809. plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi)
  810. s.snapshot(ep.appliedi, ep.confState)
  811. ep.snapi = ep.appliedi
  812. }
  813. func (s *EtcdServer) isMultiNode() bool {
  814. return s.cluster != nil && len(s.cluster.MemberIDs()) > 1
  815. }
  816. func (s *EtcdServer) isLeader() bool {
  817. return uint64(s.ID()) == s.Lead()
  818. }
  819. // transferLeadership transfers the leader to the given transferee.
  820. // TODO: maybe expose to client?
  821. func (s *EtcdServer) transferLeadership(ctx context.Context, lead, transferee uint64) error {
  822. now := time.Now()
  823. interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
  824. plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee))
  825. s.r.TransferLeadership(ctx, lead, transferee)
  826. for s.Lead() != transferee {
  827. select {
  828. case <-ctx.Done(): // time out
  829. return ErrTimeoutLeaderTransfer
  830. case <-time.After(interval):
  831. }
  832. }
  833. // TODO: drain all requests, or drop all messages to the old leader
  834. plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now))
  835. return nil
  836. }
  837. // TransferLeadership transfers the leader to the chosen transferee.
  838. func (s *EtcdServer) TransferLeadership() error {
  839. if !s.isLeader() {
  840. plog.Printf("skipped leadership transfer for stopping non-leader member")
  841. return nil
  842. }
  843. if !s.isMultiNode() {
  844. plog.Printf("skipped leadership transfer for single member cluster")
  845. return nil
  846. }
  847. transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs())
  848. if !ok {
  849. return ErrUnhealthy
  850. }
  851. tm := s.Cfg.ReqTimeout()
  852. ctx, cancel := context.WithTimeout(s.ctx, tm)
  853. err := s.transferLeadership(ctx, s.Lead(), uint64(transferee))
  854. cancel()
  855. return err
  856. }
  857. // HardStop stops the server without coordination with other members in the cluster.
  858. func (s *EtcdServer) HardStop() {
  859. select {
  860. case s.stop <- struct{}{}:
  861. case <-s.done:
  862. return
  863. }
  864. <-s.done
  865. }
  866. // Stop stops the server gracefully, and shuts down the running goroutine.
  867. // Stop should be called after a Start(s), otherwise it will block forever.
  868. // When stopping leader, Stop transfers its leadership to one of its peers
  869. // before stopping the server.
  870. func (s *EtcdServer) Stop() {
  871. if err := s.TransferLeadership(); err != nil {
  872. plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err)
  873. }
  874. s.HardStop()
  875. }
  876. // ReadyNotify returns a channel that will be closed when the server
  877. // is ready to serve client requests
  878. func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
  879. func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
  880. select {
  881. case <-time.After(d):
  882. case <-s.done:
  883. }
  884. select {
  885. case s.errorc <- err:
  886. default:
  887. }
  888. }
  889. // StopNotify returns a channel that receives a empty struct
  890. // when the server is stopped.
  891. func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
  892. func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
  893. func (s *EtcdServer) LeaderStats() []byte {
  894. lead := atomic.LoadUint64(&s.r.lead)
  895. if lead != uint64(s.id) {
  896. return nil
  897. }
  898. return s.lstats.JSON()
  899. }
  900. func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
  901. func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
  902. if s.authStore == nil {
  903. // In the context of ordinary etcd process, s.authStore will never be nil.
  904. // This branch is for handling cases in server_test.go
  905. return nil
  906. }
  907. // Note that this permission check is done in the API layer,
  908. // so TOCTOU problem can be caused potentially in a schedule like this:
  909. // update membership with user A -> revoke root role of A -> apply membership change
  910. // in the state machine layer
  911. // However, both of membership change and role management requires the root privilege.
  912. // So careful operation by admins can prevent the problem.
  913. authInfo, err := s.AuthInfoFromCtx(ctx)
  914. if err != nil {
  915. return err
  916. }
  917. return s.AuthStore().IsAdminPermitted(authInfo)
  918. }
  919. func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
  920. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  921. return nil, err
  922. }
  923. if s.Cfg.StrictReconfigCheck {
  924. // by default StrictReconfigCheck is enabled; reject new members if unhealthy
  925. if !s.cluster.IsReadyToAddNewMember() {
  926. plog.Warningf("not enough started members, rejecting member add %+v", memb)
  927. return nil, ErrNotEnoughStartedMembers
  928. }
  929. if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) {
  930. plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb)
  931. return nil, ErrUnhealthy
  932. }
  933. }
  934. // TODO: move Member to protobuf type
  935. b, err := json.Marshal(memb)
  936. if err != nil {
  937. return nil, err
  938. }
  939. cc := raftpb.ConfChange{
  940. Type: raftpb.ConfChangeAddNode,
  941. NodeID: uint64(memb.ID),
  942. Context: b,
  943. }
  944. return s.configure(ctx, cc)
  945. }
  946. func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
  947. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  948. return nil, err
  949. }
  950. // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
  951. if err := s.mayRemoveMember(types.ID(id)); err != nil {
  952. return nil, err
  953. }
  954. cc := raftpb.ConfChange{
  955. Type: raftpb.ConfChangeRemoveNode,
  956. NodeID: id,
  957. }
  958. return s.configure(ctx, cc)
  959. }
  960. func (s *EtcdServer) mayRemoveMember(id types.ID) error {
  961. if !s.Cfg.StrictReconfigCheck {
  962. return nil
  963. }
  964. if !s.cluster.IsReadyToRemoveMember(uint64(id)) {
  965. plog.Warningf("not enough started members, rejecting remove member %s", id)
  966. return ErrNotEnoughStartedMembers
  967. }
  968. // downed member is safe to remove since it's not part of the active quorum
  969. if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
  970. return nil
  971. }
  972. // protect quorum if some members are down
  973. m := s.cluster.Members()
  974. active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
  975. if (active - 1) < 1+((len(m)-1)/2) {
  976. plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id)
  977. return ErrUnhealthy
  978. }
  979. return nil
  980. }
  981. func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
  982. b, merr := json.Marshal(memb)
  983. if merr != nil {
  984. return nil, merr
  985. }
  986. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  987. return nil, err
  988. }
  989. cc := raftpb.ConfChange{
  990. Type: raftpb.ConfChangeUpdateNode,
  991. NodeID: uint64(memb.ID),
  992. Context: b,
  993. }
  994. return s.configure(ctx, cc)
  995. }
  996. // Implement the RaftTimer interface
  997. func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.r.index) }
  998. func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.r.term) }
  999. // Lead is only for testing purposes.
  1000. // TODO: add Raft server interface to expose raft related info:
  1001. // Index, Term, Lead, Committed, Applied, LastIndex, etc.
  1002. func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) }
  1003. func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) }
  1004. type confChangeResponse struct {
  1005. membs []*membership.Member
  1006. err error
  1007. }
  1008. // configure sends a configuration change through consensus and
  1009. // then waits for it to be applied to the server. It
  1010. // will block until the change is performed or there is an error.
  1011. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) {
  1012. cc.ID = s.reqIDGen.Next()
  1013. ch := s.w.Register(cc.ID)
  1014. start := time.Now()
  1015. if err := s.r.ProposeConfChange(ctx, cc); err != nil {
  1016. s.w.Trigger(cc.ID, nil)
  1017. return nil, err
  1018. }
  1019. select {
  1020. case x := <-ch:
  1021. if x == nil {
  1022. plog.Panicf("configure trigger value should never be nil")
  1023. }
  1024. resp := x.(*confChangeResponse)
  1025. return resp.membs, resp.err
  1026. case <-ctx.Done():
  1027. s.w.Trigger(cc.ID, nil) // GC wait
  1028. return nil, s.parseProposeCtxErr(ctx.Err(), start)
  1029. case <-s.stopping:
  1030. return nil, ErrStopped
  1031. }
  1032. }
  1033. // sync proposes a SYNC request and is non-blocking.
  1034. // This makes no guarantee that the request will be proposed or performed.
  1035. // The request will be canceled after the given timeout.
  1036. func (s *EtcdServer) sync(timeout time.Duration) {
  1037. req := pb.Request{
  1038. Method: "SYNC",
  1039. ID: s.reqIDGen.Next(),
  1040. Time: time.Now().UnixNano(),
  1041. }
  1042. data := pbutil.MustMarshal(&req)
  1043. // There is no promise that node has leader when do SYNC request,
  1044. // so it uses goroutine to propose.
  1045. ctx, cancel := context.WithTimeout(s.ctx, timeout)
  1046. s.goAttach(func() {
  1047. s.r.Propose(ctx, data)
  1048. cancel()
  1049. })
  1050. }
  1051. // publish registers server information into the cluster. The information
  1052. // is the JSON representation of this server's member struct, updated with the
  1053. // static clientURLs of the server.
  1054. // The function keeps attempting to register until it succeeds,
  1055. // or its server is stopped.
  1056. func (s *EtcdServer) publish(timeout time.Duration) {
  1057. b, err := json.Marshal(s.attributes)
  1058. if err != nil {
  1059. plog.Panicf("json marshal error: %v", err)
  1060. return
  1061. }
  1062. req := pb.Request{
  1063. Method: "PUT",
  1064. Path: membership.MemberAttributesStorePath(s.id),
  1065. Val: string(b),
  1066. }
  1067. for {
  1068. ctx, cancel := context.WithTimeout(s.ctx, timeout)
  1069. _, err := s.Do(ctx, req)
  1070. cancel()
  1071. switch err {
  1072. case nil:
  1073. close(s.readych)
  1074. plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID())
  1075. return
  1076. case ErrStopped:
  1077. plog.Infof("aborting publish because server is stopped")
  1078. return
  1079. default:
  1080. plog.Errorf("publish error: %v", err)
  1081. }
  1082. }
  1083. }
  1084. func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
  1085. atomic.AddInt64(&s.inflightSnapshots, 1)
  1086. s.r.transport.SendSnapshot(merged)
  1087. s.goAttach(func() {
  1088. select {
  1089. case ok := <-merged.CloseNotify():
  1090. // delay releasing inflight snapshot for another 30 seconds to
  1091. // block log compaction.
  1092. // If the follower still fails to catch up, it is probably just too slow
  1093. // to catch up. We cannot avoid the snapshot cycle anyway.
  1094. if ok {
  1095. select {
  1096. case <-time.After(releaseDelayAfterSnapshot):
  1097. case <-s.stopping:
  1098. }
  1099. }
  1100. atomic.AddInt64(&s.inflightSnapshots, -1)
  1101. case <-s.stopping:
  1102. return
  1103. }
  1104. })
  1105. }
  1106. // apply takes entries received from Raft (after it has been committed) and
  1107. // applies them to the current state of the EtcdServer.
  1108. // The given entries should not be empty.
  1109. func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) {
  1110. for i := range es {
  1111. e := es[i]
  1112. switch e.Type {
  1113. case raftpb.EntryNormal:
  1114. s.applyEntryNormal(&e)
  1115. case raftpb.EntryConfChange:
  1116. var cc raftpb.ConfChange
  1117. pbutil.MustUnmarshal(&cc, e.Data)
  1118. removedSelf, err := s.applyConfChange(cc, confState)
  1119. shouldStop = shouldStop || removedSelf
  1120. s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err})
  1121. default:
  1122. plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
  1123. }
  1124. atomic.StoreUint64(&s.r.index, e.Index)
  1125. atomic.StoreUint64(&s.r.term, e.Term)
  1126. appliedt = e.Term
  1127. appliedi = e.Index
  1128. }
  1129. return appliedt, appliedi, shouldStop
  1130. }
  1131. // applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
  1132. func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
  1133. shouldApplyV3 := false
  1134. if e.Index > s.consistIndex.ConsistentIndex() {
  1135. // set the consistent index of current executing entry
  1136. s.consistIndex.setConsistentIndex(e.Index)
  1137. shouldApplyV3 = true
  1138. }
  1139. defer s.setAppliedIndex(e.Index)
  1140. // raft state machine may generate noop entry when leader confirmation.
  1141. // skip it in advance to avoid some potential bug in the future
  1142. if len(e.Data) == 0 {
  1143. select {
  1144. case s.forceVersionC <- struct{}{}:
  1145. default:
  1146. }
  1147. // promote lessor when the local member is leader and finished
  1148. // applying all entries from the last term.
  1149. if s.isLeader() {
  1150. s.lessor.Promote(s.Cfg.electionTimeout())
  1151. }
  1152. return
  1153. }
  1154. var raftReq pb.InternalRaftRequest
  1155. if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible
  1156. var r pb.Request
  1157. pbutil.MustUnmarshal(&r, e.Data)
  1158. s.w.Trigger(r.ID, s.applyV2Request(&r))
  1159. return
  1160. }
  1161. if raftReq.V2 != nil {
  1162. req := raftReq.V2
  1163. s.w.Trigger(req.ID, s.applyV2Request(req))
  1164. return
  1165. }
  1166. // do not re-apply applied entries.
  1167. if !shouldApplyV3 {
  1168. return
  1169. }
  1170. id := raftReq.ID
  1171. if id == 0 {
  1172. id = raftReq.Header.ID
  1173. }
  1174. var ar *applyResult
  1175. needResult := s.w.IsRegistered(id)
  1176. if needResult || !noSideEffect(&raftReq) {
  1177. if !needResult && raftReq.Txn != nil {
  1178. removeNeedlessRangeReqs(raftReq.Txn)
  1179. }
  1180. ar = s.applyV3.Apply(&raftReq)
  1181. }
  1182. if ar == nil {
  1183. return
  1184. }
  1185. if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
  1186. s.w.Trigger(id, ar)
  1187. return
  1188. }
  1189. plog.Errorf("applying raft message exceeded backend quota")
  1190. s.goAttach(func() {
  1191. a := &pb.AlarmRequest{
  1192. MemberID: uint64(s.ID()),
  1193. Action: pb.AlarmRequest_ACTIVATE,
  1194. Alarm: pb.AlarmType_NOSPACE,
  1195. }
  1196. r := pb.InternalRaftRequest{Alarm: a}
  1197. s.processInternalRaftRequest(s.ctx, r)
  1198. s.w.Trigger(id, ar)
  1199. })
  1200. }
  1201. // applyConfChange applies a ConfChange to the server. It is only
  1202. // invoked with a ConfChange that has already passed through Raft
  1203. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
  1204. if err := s.cluster.ValidateConfigurationChange(cc); err != nil {
  1205. cc.NodeID = raft.None
  1206. s.r.ApplyConfChange(cc)
  1207. return false, err
  1208. }
  1209. *confState = *s.r.ApplyConfChange(cc)
  1210. switch cc.Type {
  1211. case raftpb.ConfChangeAddNode:
  1212. m := new(membership.Member)
  1213. if err := json.Unmarshal(cc.Context, m); err != nil {
  1214. plog.Panicf("unmarshal member should never fail: %v", err)
  1215. }
  1216. if cc.NodeID != uint64(m.ID) {
  1217. plog.Panicf("nodeID should always be equal to member ID")
  1218. }
  1219. s.cluster.AddMember(m)
  1220. if m.ID != s.id {
  1221. s.r.transport.AddPeer(m.ID, m.PeerURLs)
  1222. }
  1223. case raftpb.ConfChangeRemoveNode:
  1224. id := types.ID(cc.NodeID)
  1225. s.cluster.RemoveMember(id)
  1226. if id == s.id {
  1227. return true, nil
  1228. }
  1229. s.r.transport.RemovePeer(id)
  1230. case raftpb.ConfChangeUpdateNode:
  1231. m := new(membership.Member)
  1232. if err := json.Unmarshal(cc.Context, m); err != nil {
  1233. plog.Panicf("unmarshal member should never fail: %v", err)
  1234. }
  1235. if cc.NodeID != uint64(m.ID) {
  1236. plog.Panicf("nodeID should always be equal to member ID")
  1237. }
  1238. s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
  1239. if m.ID != s.id {
  1240. s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
  1241. }
  1242. }
  1243. return false, nil
  1244. }
  1245. // TODO: non-blocking snapshot
  1246. func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
  1247. clone := s.store.Clone()
  1248. // commit kv to write metadata (for example: consistent index) to disk.
  1249. // KV().commit() updates the consistent index in backend.
  1250. // All operations that update consistent index must be called sequentially
  1251. // from applyAll function.
  1252. // So KV().Commit() cannot run in parallel with apply. It has to be called outside
  1253. // the go routine created below.
  1254. s.KV().Commit()
  1255. s.goAttach(func() {
  1256. d, err := clone.SaveNoCopy()
  1257. // TODO: current store will never fail to do a snapshot
  1258. // what should we do if the store might fail?
  1259. if err != nil {
  1260. plog.Panicf("store save should never fail: %v", err)
  1261. }
  1262. snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d)
  1263. if err != nil {
  1264. // the snapshot was done asynchronously with the progress of raft.
  1265. // raft might have already got a newer snapshot.
  1266. if err == raft.ErrSnapOutOfDate {
  1267. return
  1268. }
  1269. plog.Panicf("unexpected create snapshot error %v", err)
  1270. }
  1271. // SaveSnap saves the snapshot and releases the locked wal files
  1272. // to the snapshot index.
  1273. if err = s.r.storage.SaveSnap(snap); err != nil {
  1274. plog.Fatalf("save snapshot error: %v", err)
  1275. }
  1276. plog.Infof("saved snapshot at index %d", snap.Metadata.Index)
  1277. // When sending a snapshot, etcd will pause compaction.
  1278. // After receives a snapshot, the slow follower needs to get all the entries right after
  1279. // the snapshot sent to catch up. If we do not pause compaction, the log entries right after
  1280. // the snapshot sent might already be compacted. It happens when the snapshot takes long time
  1281. // to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
  1282. if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
  1283. plog.Infof("skip compaction since there is an inflight snapshot")
  1284. return
  1285. }
  1286. // keep some in memory log entries for slow followers.
  1287. compacti := uint64(1)
  1288. if snapi > numberOfCatchUpEntries {
  1289. compacti = snapi - numberOfCatchUpEntries
  1290. }
  1291. err = s.r.raftStorage.Compact(compacti)
  1292. if err != nil {
  1293. // the compaction was done asynchronously with the progress of raft.
  1294. // raft log might already been compact.
  1295. if err == raft.ErrCompacted {
  1296. return
  1297. }
  1298. plog.Panicf("unexpected compaction error %v", err)
  1299. }
  1300. plog.Infof("compacted raft log at %d", compacti)
  1301. })
  1302. }
  1303. // CutPeer drops messages to the specified peer.
  1304. func (s *EtcdServer) CutPeer(id types.ID) {
  1305. tr, ok := s.r.transport.(*rafthttp.Transport)
  1306. if ok {
  1307. tr.CutPeer(id)
  1308. }
  1309. }
  1310. // MendPeer recovers the message dropping behavior of the given peer.
  1311. func (s *EtcdServer) MendPeer(id types.ID) {
  1312. tr, ok := s.r.transport.(*rafthttp.Transport)
  1313. if ok {
  1314. tr.MendPeer(id)
  1315. }
  1316. }
  1317. func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
  1318. func (s *EtcdServer) ResumeSending() { s.r.resumeSending() }
  1319. func (s *EtcdServer) ClusterVersion() *semver.Version {
  1320. if s.cluster == nil {
  1321. return nil
  1322. }
  1323. return s.cluster.Version()
  1324. }
  1325. // monitorVersions checks the member's version every monitorVersionInterval.
  1326. // It updates the cluster version if all members agrees on a higher one.
  1327. // It prints out log if there is a member with a higher version than the
  1328. // local version.
  1329. func (s *EtcdServer) monitorVersions() {
  1330. for {
  1331. select {
  1332. case <-s.forceVersionC:
  1333. case <-time.After(monitorVersionInterval):
  1334. case <-s.stopping:
  1335. return
  1336. }
  1337. if s.Leader() != s.ID() {
  1338. continue
  1339. }
  1340. v := decideClusterVersion(getVersions(s.cluster, s.id, s.peerRt))
  1341. if v != nil {
  1342. // only keep major.minor version for comparison
  1343. v = &semver.Version{
  1344. Major: v.Major,
  1345. Minor: v.Minor,
  1346. }
  1347. }
  1348. // if the current version is nil:
  1349. // 1. use the decided version if possible
  1350. // 2. or use the min cluster version
  1351. if s.cluster.Version() == nil {
  1352. verStr := version.MinClusterVersion
  1353. if v != nil {
  1354. verStr = v.String()
  1355. }
  1356. s.goAttach(func() { s.updateClusterVersion(verStr) })
  1357. continue
  1358. }
  1359. // update cluster version only if the decided version is greater than
  1360. // the current cluster version
  1361. if v != nil && s.cluster.Version().LessThan(*v) {
  1362. s.goAttach(func() { s.updateClusterVersion(v.String()) })
  1363. }
  1364. }
  1365. }
  1366. func (s *EtcdServer) updateClusterVersion(ver string) {
  1367. if s.cluster.Version() == nil {
  1368. plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver))
  1369. } else {
  1370. plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver))
  1371. }
  1372. req := pb.Request{
  1373. Method: "PUT",
  1374. Path: membership.StoreClusterVersionKey(),
  1375. Val: ver,
  1376. }
  1377. ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
  1378. _, err := s.Do(ctx, req)
  1379. cancel()
  1380. switch err {
  1381. case nil:
  1382. return
  1383. case ErrStopped:
  1384. plog.Infof("aborting update cluster version because server is stopped")
  1385. return
  1386. default:
  1387. plog.Errorf("error updating cluster version (%v)", err)
  1388. }
  1389. }
  1390. func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
  1391. switch err {
  1392. case context.Canceled:
  1393. return ErrCanceled
  1394. case context.DeadlineExceeded:
  1395. s.leadTimeMu.RLock()
  1396. curLeadElected := s.leadElectedTime
  1397. s.leadTimeMu.RUnlock()
  1398. prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
  1399. if start.After(prevLeadLost) && start.Before(curLeadElected) {
  1400. return ErrTimeoutDueToLeaderFail
  1401. }
  1402. lead := types.ID(atomic.LoadUint64(&s.r.lead))
  1403. switch lead {
  1404. case types.ID(raft.None):
  1405. // TODO: return error to specify it happens because the cluster does not have leader now
  1406. case s.ID():
  1407. if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) {
  1408. return ErrTimeoutDueToConnectionLost
  1409. }
  1410. default:
  1411. if !isConnectedSince(s.r.transport, start, lead) {
  1412. return ErrTimeoutDueToConnectionLost
  1413. }
  1414. }
  1415. return ErrTimeout
  1416. default:
  1417. return err
  1418. }
  1419. }
  1420. func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv }
  1421. func (s *EtcdServer) Backend() backend.Backend {
  1422. s.bemu.Lock()
  1423. defer s.bemu.Unlock()
  1424. return s.be
  1425. }
  1426. func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore }
  1427. func (s *EtcdServer) restoreAlarms() error {
  1428. s.applyV3 = s.newApplierV3()
  1429. as, err := alarm.NewAlarmStore(s)
  1430. if err != nil {
  1431. return err
  1432. }
  1433. s.alarmStore = as
  1434. if len(as.Get(pb.AlarmType_NOSPACE)) > 0 {
  1435. s.applyV3 = newApplierV3Capped(s.applyV3)
  1436. }
  1437. return nil
  1438. }
  1439. func (s *EtcdServer) getAppliedIndex() uint64 {
  1440. return atomic.LoadUint64(&s.appliedIndex)
  1441. }
  1442. func (s *EtcdServer) setAppliedIndex(v uint64) {
  1443. atomic.StoreUint64(&s.appliedIndex, v)
  1444. }
  1445. func (s *EtcdServer) getCommittedIndex() uint64 {
  1446. return atomic.LoadUint64(&s.committedIndex)
  1447. }
  1448. func (s *EtcdServer) setCommittedIndex(v uint64) {
  1449. atomic.StoreUint64(&s.committedIndex, v)
  1450. }
  1451. // goAttach creates a goroutine on a given function and tracks it using
  1452. // the etcdserver waitgroup.
  1453. func (s *EtcdServer) goAttach(f func()) {
  1454. s.wgMu.RLock() // this blocks with ongoing close(s.stopping)
  1455. defer s.wgMu.RUnlock()
  1456. select {
  1457. case <-s.stopping:
  1458. plog.Warning("server has stopped (skipping goAttach)")
  1459. return
  1460. default:
  1461. }
  1462. // now safe to add since waitgroup wait has not started yet
  1463. s.wg.Add(1)
  1464. go func() {
  1465. defer s.wg.Done()
  1466. f()
  1467. }()
  1468. }
  1469. func newBackend(path string, quotaBytes int64) backend.Backend {
  1470. bcfg := backend.DefaultBackendConfig()
  1471. bcfg.Path = path
  1472. if quotaBytes > 0 && quotaBytes != DefaultQuotaBytes {
  1473. // permit 10% excess over quota for disarm
  1474. bcfg.MmapSize = uint64(quotaBytes + quotaBytes/10)
  1475. }
  1476. return backend.New(bcfg)
  1477. }