server.go 75 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "context"
  17. "encoding/json"
  18. "expvar"
  19. "fmt"
  20. "math"
  21. "math/rand"
  22. "net/http"
  23. "os"
  24. "path"
  25. "regexp"
  26. "sync"
  27. "sync/atomic"
  28. "time"
  29. "go.etcd.io/etcd/auth"
  30. "go.etcd.io/etcd/etcdserver/api"
  31. "go.etcd.io/etcd/etcdserver/api/membership"
  32. "go.etcd.io/etcd/etcdserver/api/rafthttp"
  33. "go.etcd.io/etcd/etcdserver/api/snap"
  34. "go.etcd.io/etcd/etcdserver/api/v2discovery"
  35. "go.etcd.io/etcd/etcdserver/api/v2http/httptypes"
  36. stats "go.etcd.io/etcd/etcdserver/api/v2stats"
  37. "go.etcd.io/etcd/etcdserver/api/v2store"
  38. "go.etcd.io/etcd/etcdserver/api/v3alarm"
  39. "go.etcd.io/etcd/etcdserver/api/v3compactor"
  40. pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
  41. "go.etcd.io/etcd/lease"
  42. "go.etcd.io/etcd/lease/leasehttp"
  43. "go.etcd.io/etcd/mvcc"
  44. "go.etcd.io/etcd/mvcc/backend"
  45. "go.etcd.io/etcd/pkg/fileutil"
  46. "go.etcd.io/etcd/pkg/idutil"
  47. "go.etcd.io/etcd/pkg/pbutil"
  48. "go.etcd.io/etcd/pkg/runtime"
  49. "go.etcd.io/etcd/pkg/schedule"
  50. "go.etcd.io/etcd/pkg/types"
  51. "go.etcd.io/etcd/pkg/wait"
  52. "go.etcd.io/etcd/raft"
  53. "go.etcd.io/etcd/raft/raftpb"
  54. "go.etcd.io/etcd/version"
  55. "go.etcd.io/etcd/wal"
  56. "github.com/coreos/go-semver/semver"
  57. "github.com/coreos/pkg/capnslog"
  58. humanize "github.com/dustin/go-humanize"
  59. "github.com/prometheus/client_golang/prometheus"
  60. "go.uber.org/zap"
  61. )
  62. const (
  63. DefaultSnapshotCount = 100000
  64. // DefaultSnapshotCatchUpEntries is the number of entries for a slow follower
  65. // to catch-up after compacting the raft storage entries.
  66. // We expect the follower has a millisecond level latency with the leader.
  67. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  68. // follower to catch up.
  69. DefaultSnapshotCatchUpEntries uint64 = 5000
  70. StoreClusterPrefix = "/0"
  71. StoreKeysPrefix = "/1"
  72. // HealthInterval is the minimum time the cluster should be healthy
  73. // before accepting add member requests.
  74. HealthInterval = 5 * time.Second
  75. purgeFileInterval = 30 * time.Second
  76. // monitorVersionInterval should be smaller than the timeout
  77. // on the connection. Or we will not be able to reuse the connection
  78. // (since it will timeout).
  79. monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
  80. // max number of in-flight snapshot messages etcdserver allows to have
  81. // This number is more than enough for most clusters with 5 machines.
  82. maxInFlightMsgSnap = 16
  83. releaseDelayAfterSnapshot = 30 * time.Second
  84. // maxPendingRevokes is the maximum number of outstanding expired lease revocations.
  85. maxPendingRevokes = 16
  86. recommendedMaxRequestBytes = 10 * 1024 * 1024
  87. readyPercent = 0.9
  88. )
  89. var (
  90. plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver")
  91. storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes"))
  92. )
  93. func init() {
  94. rand.Seed(time.Now().UnixNano())
  95. expvar.Publish(
  96. "file_descriptor_limit",
  97. expvar.Func(
  98. func() interface{} {
  99. n, _ := runtime.FDLimit()
  100. return n
  101. },
  102. ),
  103. )
  104. }
  105. type Response struct {
  106. Term uint64
  107. Index uint64
  108. Event *v2store.Event
  109. Watcher v2store.Watcher
  110. Err error
  111. }
  112. type ServerV2 interface {
  113. Server
  114. Leader() types.ID
  115. // Do takes a V2 request and attempts to fulfill it, returning a Response.
  116. Do(ctx context.Context, r pb.Request) (Response, error)
  117. stats.Stats
  118. ClientCertAuthEnabled() bool
  119. }
  120. type ServerV3 interface {
  121. Server
  122. RaftStatusGetter
  123. }
  124. func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled }
  125. type Server interface {
  126. // AddMember attempts to add a member into the cluster. It will return
  127. // ErrIDRemoved if member ID is removed from the cluster, or return
  128. // ErrIDExists if member ID exists in the cluster.
  129. AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error)
  130. // RemoveMember attempts to remove a member from the cluster. It will
  131. // return ErrIDRemoved if member ID is removed from the cluster, or return
  132. // ErrIDNotFound if member ID is not in the cluster.
  133. RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error)
  134. // UpdateMember attempts to update an existing member in the cluster. It will
  135. // return ErrIDNotFound if the member ID does not exist.
  136. UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error)
  137. // PromoteMember attempts to promote a non-voting node to a voting node. It will
  138. // return ErrIDNotFound if the member ID does not exist.
  139. // return ErrLearnerNotReady if the member are not ready.
  140. // return ErrMemberNotLearner if the member is not a learner.
  141. PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error)
  142. // ClusterVersion is the cluster-wide minimum major.minor version.
  143. // Cluster version is set to the min version that an etcd member is
  144. // compatible with when first bootstrap.
  145. //
  146. // ClusterVersion is nil until the cluster is bootstrapped (has a quorum).
  147. //
  148. // During a rolling upgrades, the ClusterVersion will be updated
  149. // automatically after a sync. (5 second by default)
  150. //
  151. // The API/raft component can utilize ClusterVersion to determine if
  152. // it can accept a client request or a raft RPC.
  153. // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and
  154. // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since
  155. // this feature is introduced post 2.0.
  156. ClusterVersion() *semver.Version
  157. Cluster() api.Cluster
  158. Alarms() []*pb.AlarmMember
  159. }
  160. // EtcdServer is the production implementation of the Server interface
  161. type EtcdServer struct {
  162. // inflightSnapshots holds count the number of snapshots currently inflight.
  163. inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned.
  164. appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
  165. committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
  166. term uint64 // must use atomic operations to access; keep 64-bit aligned.
  167. lead uint64 // must use atomic operations to access; keep 64-bit aligned.
  168. // consistIndex used to hold the offset of current executing entry
  169. // It is initialized to 0 before executing any entry.
  170. consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
  171. r raftNode // uses 64-bit atomics; keep 64-bit aligned.
  172. readych chan struct{}
  173. Cfg ServerConfig
  174. lgMu *sync.RWMutex
  175. lg *zap.Logger
  176. w wait.Wait
  177. readMu sync.RWMutex
  178. // read routine notifies etcd server that it waits for reading by sending an empty struct to
  179. // readwaitC
  180. readwaitc chan struct{}
  181. // readNotifier is used to notify the read routine that it can process the request
  182. // when there is no error
  183. readNotifier *notifier
  184. // stop signals the run goroutine should shutdown.
  185. stop chan struct{}
  186. // stopping is closed by run goroutine on shutdown.
  187. stopping chan struct{}
  188. // done is closed when all goroutines from start() complete.
  189. done chan struct{}
  190. // leaderChanged is used to notify the linearizable read loop to drop the old read requests.
  191. leaderChanged chan struct{}
  192. leaderChangedMu sync.RWMutex
  193. errorc chan error
  194. id types.ID
  195. attributes membership.Attributes
  196. cluster *membership.RaftCluster
  197. v2store v2store.Store
  198. snapshotter *snap.Snapshotter
  199. applyV2 ApplierV2
  200. // applyV3 is the applier with auth and quotas
  201. applyV3 applierV3
  202. // applyV3Base is the core applier without auth or quotas
  203. applyV3Base applierV3
  204. applyWait wait.WaitTime
  205. kv mvcc.ConsistentWatchableKV
  206. lessor lease.Lessor
  207. bemu sync.Mutex
  208. be backend.Backend
  209. authStore auth.AuthStore
  210. alarmStore *v3alarm.AlarmStore
  211. stats *stats.ServerStats
  212. lstats *stats.LeaderStats
  213. SyncTicker *time.Ticker
  214. // compactor is used to auto-compact the KV.
  215. compactor v3compactor.Compactor
  216. // peerRt used to send requests (version, lease) to peers.
  217. peerRt http.RoundTripper
  218. reqIDGen *idutil.Generator
  219. // forceVersionC is used to force the version monitor loop
  220. // to detect the cluster version immediately.
  221. forceVersionC chan struct{}
  222. // wgMu blocks concurrent waitgroup mutation while server stopping
  223. wgMu sync.RWMutex
  224. // wg is used to wait for the go routines that depends on the server state
  225. // to exit when stopping the server.
  226. wg sync.WaitGroup
  227. // ctx is used for etcd-initiated requests that may need to be canceled
  228. // on etcd server shutdown.
  229. ctx context.Context
  230. cancel context.CancelFunc
  231. leadTimeMu sync.RWMutex
  232. leadElectedTime time.Time
  233. *AccessController
  234. }
  235. // NewServer creates a new EtcdServer from the supplied configuration. The
  236. // configuration is considered static for the lifetime of the EtcdServer.
  237. func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
  238. st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
  239. var (
  240. w *wal.WAL
  241. n raft.Node
  242. s *raft.MemoryStorage
  243. id types.ID
  244. cl *membership.RaftCluster
  245. )
  246. if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
  247. if cfg.Logger != nil {
  248. cfg.Logger.Warn(
  249. "exceeded recommended request limit",
  250. zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
  251. zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
  252. zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
  253. zap.String("recommended-request-size", humanize.Bytes(uint64(recommendedMaxRequestBytes))),
  254. )
  255. } else {
  256. plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes)
  257. }
  258. }
  259. if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
  260. return nil, fmt.Errorf("cannot access data directory: %v", terr)
  261. }
  262. haveWAL := wal.Exist(cfg.WALDir())
  263. if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
  264. if cfg.Logger != nil {
  265. cfg.Logger.Fatal(
  266. "failed to create snapshot directory",
  267. zap.String("path", cfg.SnapDir()),
  268. zap.Error(err),
  269. )
  270. } else {
  271. plog.Fatalf("create snapshot directory error: %v", err)
  272. }
  273. }
  274. ss := snap.New(cfg.Logger, cfg.SnapDir())
  275. bepath := cfg.backendPath()
  276. beExist := fileutil.Exist(bepath)
  277. be := openBackend(cfg)
  278. defer func() {
  279. if err != nil {
  280. be.Close()
  281. }
  282. }()
  283. prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
  284. if err != nil {
  285. return nil, err
  286. }
  287. var (
  288. remotes []*membership.Member
  289. snapshot *raftpb.Snapshot
  290. )
  291. switch {
  292. case !haveWAL && !cfg.NewCluster:
  293. if err = cfg.VerifyJoinExisting(); err != nil {
  294. return nil, err
  295. }
  296. cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
  297. if err != nil {
  298. return nil, err
  299. }
  300. existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt)
  301. if gerr != nil {
  302. return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
  303. }
  304. if err = membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil {
  305. return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
  306. }
  307. if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) {
  308. return nil, fmt.Errorf("incompatible with current running cluster")
  309. }
  310. remotes = existingCluster.Members()
  311. cl.SetID(types.ID(0), existingCluster.ID())
  312. cl.SetStore(st)
  313. cl.SetBackend(be)
  314. id, n, s, w = startNode(cfg, cl, nil)
  315. cl.SetID(id, existingCluster.ID())
  316. case !haveWAL && cfg.NewCluster:
  317. if err = cfg.VerifyBootstrap(); err != nil {
  318. return nil, err
  319. }
  320. cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
  321. if err != nil {
  322. return nil, err
  323. }
  324. m := cl.MemberByName(cfg.Name)
  325. if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
  326. return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
  327. }
  328. if cfg.ShouldDiscover() {
  329. var str string
  330. str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
  331. if err != nil {
  332. return nil, &DiscoveryError{Op: "join", Err: err}
  333. }
  334. var urlsmap types.URLsMap
  335. urlsmap, err = types.NewURLsMap(str)
  336. if err != nil {
  337. return nil, err
  338. }
  339. if checkDuplicateURL(urlsmap) {
  340. return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
  341. }
  342. if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil {
  343. return nil, err
  344. }
  345. }
  346. cl.SetStore(st)
  347. cl.SetBackend(be)
  348. id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
  349. cl.SetID(id, cl.ID())
  350. case haveWAL:
  351. if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
  352. return nil, fmt.Errorf("cannot write to member directory: %v", err)
  353. }
  354. if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
  355. return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
  356. }
  357. if cfg.ShouldDiscover() {
  358. if cfg.Logger != nil {
  359. cfg.Logger.Warn(
  360. "discovery token is ignored since cluster already initialized; valid logs are found",
  361. zap.String("wal-dir", cfg.WALDir()),
  362. )
  363. } else {
  364. plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
  365. }
  366. }
  367. snapshot, err = ss.Load()
  368. if err != nil && err != snap.ErrNoSnapshot {
  369. return nil, err
  370. }
  371. if snapshot != nil {
  372. if err = st.Recovery(snapshot.Data); err != nil {
  373. if cfg.Logger != nil {
  374. cfg.Logger.Panic("failed to recover from snapshot")
  375. } else {
  376. plog.Panicf("recovered store from snapshot error: %v", err)
  377. }
  378. }
  379. if cfg.Logger != nil {
  380. cfg.Logger.Info(
  381. "recovered v2 store from snapshot",
  382. zap.Uint64("snapshot-index", snapshot.Metadata.Index),
  383. zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))),
  384. )
  385. } else {
  386. plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
  387. }
  388. if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil {
  389. if cfg.Logger != nil {
  390. cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
  391. } else {
  392. plog.Panicf("recovering backend from snapshot error: %v", err)
  393. }
  394. }
  395. if cfg.Logger != nil {
  396. s1, s2 := be.Size(), be.SizeInUse()
  397. cfg.Logger.Info(
  398. "recovered v3 backend from snapshot",
  399. zap.Int64("backend-size-bytes", s1),
  400. zap.String("backend-size", humanize.Bytes(uint64(s1))),
  401. zap.Int64("backend-size-in-use-bytes", s2),
  402. zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
  403. )
  404. }
  405. }
  406. if !cfg.ForceNewCluster {
  407. id, cl, n, s, w = restartNode(cfg, snapshot)
  408. } else {
  409. id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
  410. }
  411. cl.SetStore(st)
  412. cl.SetBackend(be)
  413. cl.Recover(api.UpdateCapability)
  414. if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
  415. os.RemoveAll(bepath)
  416. return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
  417. }
  418. default:
  419. return nil, fmt.Errorf("unsupported bootstrap config")
  420. }
  421. if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
  422. return nil, fmt.Errorf("cannot access member directory: %v", terr)
  423. }
  424. sstats := stats.NewServerStats(cfg.Name, id.String())
  425. lstats := stats.NewLeaderStats(id.String())
  426. heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
  427. srv = &EtcdServer{
  428. readych: make(chan struct{}),
  429. Cfg: cfg,
  430. lgMu: new(sync.RWMutex),
  431. lg: cfg.Logger,
  432. errorc: make(chan error, 1),
  433. v2store: st,
  434. snapshotter: ss,
  435. r: *newRaftNode(
  436. raftNodeConfig{
  437. lg: cfg.Logger,
  438. isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
  439. Node: n,
  440. heartbeat: heartbeat,
  441. raftStorage: s,
  442. storage: NewStorage(w, ss),
  443. },
  444. ),
  445. id: id,
  446. attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
  447. cluster: cl,
  448. stats: sstats,
  449. lstats: lstats,
  450. SyncTicker: time.NewTicker(500 * time.Millisecond),
  451. peerRt: prt,
  452. reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
  453. forceVersionC: make(chan struct{}),
  454. AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist},
  455. }
  456. serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
  457. srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
  458. srv.be = be
  459. minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
  460. // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
  461. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
  462. srv.lessor = lease.NewLessor(
  463. srv.getLogger(),
  464. srv.be,
  465. lease.LessorConfig{
  466. MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())),
  467. CheckpointInterval: cfg.LeaseCheckpointInterval,
  468. ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(),
  469. })
  470. srv.kv = mvcc.New(srv.getLogger(), srv.be, srv.lessor, &srv.consistIndex, mvcc.StoreConfig{CompactionBatchLimit: cfg.CompactionBatchLimit})
  471. if beExist {
  472. kvindex := srv.kv.ConsistentIndex()
  473. // TODO: remove kvindex != 0 checking when we do not expect users to upgrade
  474. // etcd from pre-3.0 release.
  475. if snapshot != nil && kvindex < snapshot.Metadata.Index {
  476. if kvindex != 0 {
  477. return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", bepath, kvindex, snapshot.Metadata.Index)
  478. }
  479. if cfg.Logger != nil {
  480. cfg.Logger.Warn(
  481. "consistent index was never saved",
  482. zap.Uint64("snapshot-index", snapshot.Metadata.Index),
  483. )
  484. } else {
  485. plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
  486. }
  487. }
  488. }
  489. newSrv := srv // since srv == nil in defer if srv is returned as nil
  490. defer func() {
  491. // closing backend without first closing kv can cause
  492. // resumed compactions to fail with closed tx errors
  493. if err != nil {
  494. newSrv.kv.Close()
  495. }
  496. }()
  497. srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
  498. tp, err := auth.NewTokenProvider(cfg.Logger, cfg.AuthToken,
  499. func(index uint64) <-chan struct{} {
  500. return srv.applyWait.Wait(index)
  501. },
  502. )
  503. if err != nil {
  504. if cfg.Logger != nil {
  505. cfg.Logger.Warn("failed to create token provider", zap.Error(err))
  506. } else {
  507. plog.Errorf("failed to create token provider: %s", err)
  508. }
  509. return nil, err
  510. }
  511. srv.authStore = auth.NewAuthStore(srv.getLogger(), srv.be, tp, int(cfg.BcryptCost))
  512. if num := cfg.AutoCompactionRetention; num != 0 {
  513. srv.compactor, err = v3compactor.New(cfg.Logger, cfg.AutoCompactionMode, num, srv.kv, srv)
  514. if err != nil {
  515. return nil, err
  516. }
  517. srv.compactor.Run()
  518. }
  519. srv.applyV3Base = srv.newApplierV3Backend()
  520. if err = srv.restoreAlarms(); err != nil {
  521. return nil, err
  522. }
  523. if srv.Cfg.EnableLeaseCheckpoint {
  524. // setting checkpointer enables lease checkpoint feature.
  525. srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) {
  526. srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp})
  527. })
  528. }
  529. // TODO: move transport initialization near the definition of remote
  530. tr := &rafthttp.Transport{
  531. Logger: cfg.Logger,
  532. TLSInfo: cfg.PeerTLSInfo,
  533. DialTimeout: cfg.peerDialTimeout(),
  534. ID: id,
  535. URLs: cfg.PeerURLs,
  536. ClusterID: cl.ID(),
  537. Raft: srv,
  538. Snapshotter: ss,
  539. ServerStats: sstats,
  540. LeaderStats: lstats,
  541. ErrorC: srv.errorc,
  542. }
  543. if err = tr.Start(); err != nil {
  544. return nil, err
  545. }
  546. // add all remotes into transport
  547. for _, m := range remotes {
  548. if m.ID != id {
  549. tr.AddRemote(m.ID, m.PeerURLs)
  550. }
  551. }
  552. for _, m := range cl.Members() {
  553. if m.ID != id {
  554. tr.AddPeer(m.ID, m.PeerURLs)
  555. }
  556. }
  557. srv.r.transport = tr
  558. return srv, nil
  559. }
  560. func (s *EtcdServer) getLogger() *zap.Logger {
  561. s.lgMu.RLock()
  562. l := s.lg
  563. s.lgMu.RUnlock()
  564. return l
  565. }
  566. func tickToDur(ticks int, tickMs uint) string {
  567. return fmt.Sprintf("%v", time.Duration(ticks)*time.Duration(tickMs)*time.Millisecond)
  568. }
  569. func (s *EtcdServer) adjustTicks() {
  570. lg := s.getLogger()
  571. clusterN := len(s.cluster.Members())
  572. // single-node fresh start, or single-node recovers from snapshot
  573. if clusterN == 1 {
  574. ticks := s.Cfg.ElectionTicks - 1
  575. if lg != nil {
  576. lg.Info(
  577. "started as single-node; fast-forwarding election ticks",
  578. zap.String("local-member-id", s.ID().String()),
  579. zap.Int("forward-ticks", ticks),
  580. zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
  581. zap.Int("election-ticks", s.Cfg.ElectionTicks),
  582. zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)),
  583. )
  584. } else {
  585. plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks)
  586. }
  587. s.r.advanceTicks(ticks)
  588. return
  589. }
  590. if !s.Cfg.InitialElectionTickAdvance {
  591. if lg != nil {
  592. lg.Info("skipping initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks))
  593. }
  594. return
  595. }
  596. if lg != nil {
  597. lg.Info("starting initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks))
  598. }
  599. // retry up to "rafthttp.ConnReadTimeout", which is 5-sec
  600. // until peer connection reports; otherwise:
  601. // 1. all connections failed, or
  602. // 2. no active peers, or
  603. // 3. restarted single-node with no snapshot
  604. // then, do nothing, because advancing ticks would have no effect
  605. waitTime := rafthttp.ConnReadTimeout
  606. itv := 50 * time.Millisecond
  607. for i := int64(0); i < int64(waitTime/itv); i++ {
  608. select {
  609. case <-time.After(itv):
  610. case <-s.stopping:
  611. return
  612. }
  613. peerN := s.r.transport.ActivePeers()
  614. if peerN > 1 {
  615. // multi-node received peer connection reports
  616. // adjust ticks, in case slow leader message receive
  617. ticks := s.Cfg.ElectionTicks - 2
  618. if lg != nil {
  619. lg.Info(
  620. "initialized peer connections; fast-forwarding election ticks",
  621. zap.String("local-member-id", s.ID().String()),
  622. zap.Int("forward-ticks", ticks),
  623. zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
  624. zap.Int("election-ticks", s.Cfg.ElectionTicks),
  625. zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)),
  626. zap.Int("active-remote-members", peerN),
  627. )
  628. } else {
  629. plog.Infof("%s initialized peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN)
  630. }
  631. s.r.advanceTicks(ticks)
  632. return
  633. }
  634. }
  635. }
  636. // Start performs any initialization of the Server necessary for it to
  637. // begin serving requests. It must be called before Do or Process.
  638. // Start must be non-blocking; any long-running server functionality
  639. // should be implemented in goroutines.
  640. func (s *EtcdServer) Start() {
  641. s.start()
  642. s.goAttach(func() { s.adjustTicks() })
  643. s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
  644. s.goAttach(s.purgeFile)
  645. s.goAttach(func() { monitorFileDescriptor(s.getLogger(), s.stopping) })
  646. s.goAttach(s.monitorVersions)
  647. s.goAttach(s.linearizableReadLoop)
  648. s.goAttach(s.monitorKVHash)
  649. }
  650. // start prepares and starts server in a new goroutine. It is no longer safe to
  651. // modify a server's fields after it has been sent to Start.
  652. // This function is just used for testing.
  653. func (s *EtcdServer) start() {
  654. lg := s.getLogger()
  655. if s.Cfg.SnapshotCount == 0 {
  656. if lg != nil {
  657. lg.Info(
  658. "updating snapshot-count to default",
  659. zap.Uint64("given-snapshot-count", s.Cfg.SnapshotCount),
  660. zap.Uint64("updated-snapshot-count", DefaultSnapshotCount),
  661. )
  662. } else {
  663. plog.Infof("set snapshot count to default %d", DefaultSnapshotCount)
  664. }
  665. s.Cfg.SnapshotCount = DefaultSnapshotCount
  666. }
  667. if s.Cfg.SnapshotCatchUpEntries == 0 {
  668. if lg != nil {
  669. lg.Info(
  670. "updating snapshot catch-up entries to default",
  671. zap.Uint64("given-snapshot-catchup-entries", s.Cfg.SnapshotCatchUpEntries),
  672. zap.Uint64("updated-snapshot-catchup-entries", DefaultSnapshotCatchUpEntries),
  673. )
  674. }
  675. s.Cfg.SnapshotCatchUpEntries = DefaultSnapshotCatchUpEntries
  676. }
  677. s.w = wait.New()
  678. s.applyWait = wait.NewTimeList()
  679. s.done = make(chan struct{})
  680. s.stop = make(chan struct{})
  681. s.stopping = make(chan struct{})
  682. s.ctx, s.cancel = context.WithCancel(context.Background())
  683. s.readwaitc = make(chan struct{}, 1)
  684. s.readNotifier = newNotifier()
  685. s.leaderChanged = make(chan struct{})
  686. if s.ClusterVersion() != nil {
  687. if lg != nil {
  688. lg.Info(
  689. "starting etcd server",
  690. zap.String("local-member-id", s.ID().String()),
  691. zap.String("local-server-version", version.Version),
  692. zap.String("cluster-id", s.Cluster().ID().String()),
  693. zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())),
  694. )
  695. } else {
  696. plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
  697. }
  698. membership.ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": s.ClusterVersion().String()}).Set(1)
  699. } else {
  700. if lg != nil {
  701. lg.Info(
  702. "starting etcd server",
  703. zap.String("local-member-id", s.ID().String()),
  704. zap.String("local-server-version", version.Version),
  705. zap.String("cluster-version", "to_be_decided"),
  706. )
  707. } else {
  708. plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version)
  709. }
  710. }
  711. // TODO: if this is an empty log, writes all peer infos
  712. // into the first entry
  713. go s.run()
  714. }
  715. func (s *EtcdServer) purgeFile() {
  716. var dberrc, serrc, werrc <-chan error
  717. if s.Cfg.MaxSnapFiles > 0 {
  718. dberrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
  719. serrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
  720. }
  721. if s.Cfg.MaxWALFiles > 0 {
  722. werrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done)
  723. }
  724. lg := s.getLogger()
  725. select {
  726. case e := <-dberrc:
  727. if lg != nil {
  728. lg.Fatal("failed to purge snap db file", zap.Error(e))
  729. } else {
  730. plog.Fatalf("failed to purge snap db file %v", e)
  731. }
  732. case e := <-serrc:
  733. if lg != nil {
  734. lg.Fatal("failed to purge snap file", zap.Error(e))
  735. } else {
  736. plog.Fatalf("failed to purge snap file %v", e)
  737. }
  738. case e := <-werrc:
  739. if lg != nil {
  740. lg.Fatal("failed to purge wal file", zap.Error(e))
  741. } else {
  742. plog.Fatalf("failed to purge wal file %v", e)
  743. }
  744. case <-s.stopping:
  745. return
  746. }
  747. }
  748. func (s *EtcdServer) Cluster() api.Cluster { return s.cluster }
  749. func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
  750. type ServerPeer interface {
  751. ServerV2
  752. RaftHandler() http.Handler
  753. LeaseHandler() http.Handler
  754. }
  755. func (s *EtcdServer) LeaseHandler() http.Handler {
  756. if s.lessor == nil {
  757. return nil
  758. }
  759. return leasehttp.NewHandler(s.lessor, s.ApplyWait)
  760. }
  761. func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
  762. // Process takes a raft message and applies it to the server's raft state
  763. // machine, respecting any timeout of the given context.
  764. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  765. if s.cluster.IsIDRemoved(types.ID(m.From)) {
  766. if lg := s.getLogger(); lg != nil {
  767. lg.Warn(
  768. "rejected Raft message from removed member",
  769. zap.String("local-member-id", s.ID().String()),
  770. zap.String("removed-member-id", types.ID(m.From).String()),
  771. )
  772. } else {
  773. plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
  774. }
  775. return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
  776. }
  777. if m.Type == raftpb.MsgApp {
  778. s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
  779. }
  780. return s.r.Step(ctx, m)
  781. }
  782. func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) }
  783. func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) }
  784. // ReportSnapshot reports snapshot sent status to the raft state machine,
  785. // and clears the used snapshot from the snapshot store.
  786. func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
  787. s.r.ReportSnapshot(id, status)
  788. }
  789. type etcdProgress struct {
  790. confState raftpb.ConfState
  791. snapi uint64
  792. appliedt uint64
  793. appliedi uint64
  794. }
  795. // raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
  796. // and helps decouple state machine logic from Raft algorithms.
  797. // TODO: add a state machine interface to apply the commit entries and do snapshot/recover
  798. type raftReadyHandler struct {
  799. getLead func() (lead uint64)
  800. updateLead func(lead uint64)
  801. updateLeadership func(newLeader bool)
  802. updateCommittedIndex func(uint64)
  803. }
  804. func (s *EtcdServer) run() {
  805. lg := s.getLogger()
  806. sn, err := s.r.raftStorage.Snapshot()
  807. if err != nil {
  808. if lg != nil {
  809. lg.Panic("failed to get snapshot from Raft storage", zap.Error(err))
  810. } else {
  811. plog.Panicf("get snapshot from raft storage error: %v", err)
  812. }
  813. }
  814. // asynchronously accept apply packets, dispatch progress in-order
  815. sched := schedule.NewFIFOScheduler()
  816. var (
  817. smu sync.RWMutex
  818. syncC <-chan time.Time
  819. )
  820. setSyncC := func(ch <-chan time.Time) {
  821. smu.Lock()
  822. syncC = ch
  823. smu.Unlock()
  824. }
  825. getSyncC := func() (ch <-chan time.Time) {
  826. smu.RLock()
  827. ch = syncC
  828. smu.RUnlock()
  829. return
  830. }
  831. rh := &raftReadyHandler{
  832. getLead: func() (lead uint64) { return s.getLead() },
  833. updateLead: func(lead uint64) { s.setLead(lead) },
  834. updateLeadership: func(newLeader bool) {
  835. if !s.isLeader() {
  836. if s.lessor != nil {
  837. s.lessor.Demote()
  838. }
  839. if s.compactor != nil {
  840. s.compactor.Pause()
  841. }
  842. setSyncC(nil)
  843. } else {
  844. if newLeader {
  845. t := time.Now()
  846. s.leadTimeMu.Lock()
  847. s.leadElectedTime = t
  848. s.leadTimeMu.Unlock()
  849. }
  850. setSyncC(s.SyncTicker.C)
  851. if s.compactor != nil {
  852. s.compactor.Resume()
  853. }
  854. }
  855. if newLeader {
  856. s.leaderChangedMu.Lock()
  857. lc := s.leaderChanged
  858. s.leaderChanged = make(chan struct{})
  859. close(lc)
  860. s.leaderChangedMu.Unlock()
  861. }
  862. // TODO: remove the nil checking
  863. // current test utility does not provide the stats
  864. if s.stats != nil {
  865. s.stats.BecomeLeader()
  866. }
  867. },
  868. updateCommittedIndex: func(ci uint64) {
  869. cci := s.getCommittedIndex()
  870. if ci > cci {
  871. s.setCommittedIndex(ci)
  872. }
  873. },
  874. }
  875. s.r.start(rh)
  876. ep := etcdProgress{
  877. confState: sn.Metadata.ConfState,
  878. snapi: sn.Metadata.Index,
  879. appliedt: sn.Metadata.Term,
  880. appliedi: sn.Metadata.Index,
  881. }
  882. defer func() {
  883. s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
  884. close(s.stopping)
  885. s.wgMu.Unlock()
  886. s.cancel()
  887. sched.Stop()
  888. // wait for gouroutines before closing raft so wal stays open
  889. s.wg.Wait()
  890. s.SyncTicker.Stop()
  891. // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
  892. // by adding a peer after raft stops the transport
  893. s.r.stop()
  894. // kv, lessor and backend can be nil if running without v3 enabled
  895. // or running unit tests.
  896. if s.lessor != nil {
  897. s.lessor.Stop()
  898. }
  899. if s.kv != nil {
  900. s.kv.Close()
  901. }
  902. if s.authStore != nil {
  903. s.authStore.Close()
  904. }
  905. if s.be != nil {
  906. s.be.Close()
  907. }
  908. if s.compactor != nil {
  909. s.compactor.Stop()
  910. }
  911. close(s.done)
  912. }()
  913. var expiredLeaseC <-chan []*lease.Lease
  914. if s.lessor != nil {
  915. expiredLeaseC = s.lessor.ExpiredLeasesC()
  916. }
  917. for {
  918. select {
  919. case ap := <-s.r.apply():
  920. f := func(context.Context) { s.applyAll(&ep, &ap) }
  921. sched.Schedule(f)
  922. case leases := <-expiredLeaseC:
  923. s.goAttach(func() {
  924. // Increases throughput of expired leases deletion process through parallelization
  925. c := make(chan struct{}, maxPendingRevokes)
  926. for _, lease := range leases {
  927. select {
  928. case c <- struct{}{}:
  929. case <-s.stopping:
  930. return
  931. }
  932. lid := lease.ID
  933. s.goAttach(func() {
  934. ctx := s.authStore.WithRoot(s.ctx)
  935. _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
  936. if lerr == nil {
  937. leaseExpired.Inc()
  938. } else {
  939. if lg != nil {
  940. lg.Warn(
  941. "failed to revoke lease",
  942. zap.String("lease-id", fmt.Sprintf("%016x", lid)),
  943. zap.Error(lerr),
  944. )
  945. } else {
  946. plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error())
  947. }
  948. }
  949. <-c
  950. })
  951. }
  952. })
  953. case err := <-s.errorc:
  954. if lg != nil {
  955. lg.Warn("server error", zap.Error(err))
  956. lg.Warn("data-dir used by this member must be removed")
  957. } else {
  958. plog.Errorf("%s", err)
  959. plog.Infof("the data-dir used by this member must be removed.")
  960. }
  961. return
  962. case <-getSyncC():
  963. if s.v2store.HasTTLKeys() {
  964. s.sync(s.Cfg.ReqTimeout())
  965. }
  966. case <-s.stop:
  967. return
  968. }
  969. }
  970. }
  971. func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
  972. s.applySnapshot(ep, apply)
  973. s.applyEntries(ep, apply)
  974. proposalsApplied.Set(float64(ep.appliedi))
  975. s.applyWait.Trigger(ep.appliedi)
  976. // wait for the raft routine to finish the disk writes before triggering a
  977. // snapshot. or applied index might be greater than the last index in raft
  978. // storage, since the raft routine might be slower than apply routine.
  979. <-apply.notifyc
  980. s.triggerSnapshot(ep)
  981. select {
  982. // snapshot requested via send()
  983. case m := <-s.r.msgSnapC:
  984. merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
  985. s.sendMergedSnap(merged)
  986. default:
  987. }
  988. }
  989. func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
  990. if raft.IsEmptySnap(apply.snapshot) {
  991. return
  992. }
  993. applySnapshotInProgress.Inc()
  994. lg := s.getLogger()
  995. if lg != nil {
  996. lg.Info(
  997. "applying snapshot",
  998. zap.Uint64("current-snapshot-index", ep.snapi),
  999. zap.Uint64("current-applied-index", ep.appliedi),
  1000. zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
  1001. zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
  1002. )
  1003. } else {
  1004. plog.Infof("applying snapshot at index %d...", ep.snapi)
  1005. }
  1006. defer func() {
  1007. if lg != nil {
  1008. lg.Info(
  1009. "applied snapshot",
  1010. zap.Uint64("current-snapshot-index", ep.snapi),
  1011. zap.Uint64("current-applied-index", ep.appliedi),
  1012. zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
  1013. zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
  1014. )
  1015. } else {
  1016. plog.Infof("finished applying incoming snapshot at index %d", ep.snapi)
  1017. }
  1018. applySnapshotInProgress.Dec()
  1019. }()
  1020. if apply.snapshot.Metadata.Index <= ep.appliedi {
  1021. if lg != nil {
  1022. lg.Panic(
  1023. "unexpected leader snapshot from outdated index",
  1024. zap.Uint64("current-snapshot-index", ep.snapi),
  1025. zap.Uint64("current-applied-index", ep.appliedi),
  1026. zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
  1027. zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
  1028. )
  1029. } else {
  1030. plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
  1031. apply.snapshot.Metadata.Index, ep.appliedi)
  1032. }
  1033. }
  1034. // wait for raftNode to persist snapshot onto the disk
  1035. <-apply.notifyc
  1036. newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot)
  1037. if err != nil {
  1038. if lg != nil {
  1039. lg.Panic("failed to open snapshot backend", zap.Error(err))
  1040. } else {
  1041. plog.Panic(err)
  1042. }
  1043. }
  1044. // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
  1045. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
  1046. if s.lessor != nil {
  1047. if lg != nil {
  1048. lg.Info("restoring lease store")
  1049. } else {
  1050. plog.Info("recovering lessor...")
  1051. }
  1052. s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() })
  1053. if lg != nil {
  1054. lg.Info("restored lease store")
  1055. } else {
  1056. plog.Info("finished recovering lessor")
  1057. }
  1058. }
  1059. if lg != nil {
  1060. lg.Info("restoring mvcc store")
  1061. } else {
  1062. plog.Info("restoring mvcc store...")
  1063. }
  1064. if err := s.kv.Restore(newbe); err != nil {
  1065. if lg != nil {
  1066. lg.Panic("failed to restore mvcc store", zap.Error(err))
  1067. } else {
  1068. plog.Panicf("restore KV error: %v", err)
  1069. }
  1070. }
  1071. s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex())
  1072. if lg != nil {
  1073. lg.Info("restored mvcc store")
  1074. } else {
  1075. plog.Info("finished restoring mvcc store")
  1076. }
  1077. // Closing old backend might block until all the txns
  1078. // on the backend are finished.
  1079. // We do not want to wait on closing the old backend.
  1080. s.bemu.Lock()
  1081. oldbe := s.be
  1082. go func() {
  1083. if lg != nil {
  1084. lg.Info("closing old backend file")
  1085. } else {
  1086. plog.Info("closing old backend...")
  1087. }
  1088. defer func() {
  1089. if lg != nil {
  1090. lg.Info("closed old backend file")
  1091. } else {
  1092. plog.Info("finished closing old backend")
  1093. }
  1094. }()
  1095. if err := oldbe.Close(); err != nil {
  1096. if lg != nil {
  1097. lg.Panic("failed to close old backend", zap.Error(err))
  1098. } else {
  1099. plog.Panicf("close backend error: %v", err)
  1100. }
  1101. }
  1102. }()
  1103. s.be = newbe
  1104. s.bemu.Unlock()
  1105. if lg != nil {
  1106. lg.Info("restoring alarm store")
  1107. } else {
  1108. plog.Info("recovering alarms...")
  1109. }
  1110. if err := s.restoreAlarms(); err != nil {
  1111. if lg != nil {
  1112. lg.Panic("failed to restore alarm store", zap.Error(err))
  1113. } else {
  1114. plog.Panicf("restore alarms error: %v", err)
  1115. }
  1116. }
  1117. if lg != nil {
  1118. lg.Info("restored alarm store")
  1119. } else {
  1120. plog.Info("finished recovering alarms")
  1121. }
  1122. if s.authStore != nil {
  1123. if lg != nil {
  1124. lg.Info("restoring auth store")
  1125. } else {
  1126. plog.Info("recovering auth store...")
  1127. }
  1128. s.authStore.Recover(newbe)
  1129. if lg != nil {
  1130. lg.Info("restored auth store")
  1131. } else {
  1132. plog.Info("finished recovering auth store")
  1133. }
  1134. }
  1135. if lg != nil {
  1136. lg.Info("restoring v2 store")
  1137. } else {
  1138. plog.Info("recovering store v2...")
  1139. }
  1140. if err := s.v2store.Recovery(apply.snapshot.Data); err != nil {
  1141. if lg != nil {
  1142. lg.Panic("failed to restore v2 store", zap.Error(err))
  1143. } else {
  1144. plog.Panicf("recovery store error: %v", err)
  1145. }
  1146. }
  1147. if lg != nil {
  1148. lg.Info("restored v2 store")
  1149. } else {
  1150. plog.Info("finished recovering store v2")
  1151. }
  1152. s.cluster.SetBackend(newbe)
  1153. if lg != nil {
  1154. lg.Info("restoring cluster configuration")
  1155. } else {
  1156. plog.Info("recovering cluster configuration...")
  1157. }
  1158. s.cluster.Recover(api.UpdateCapability)
  1159. if lg != nil {
  1160. lg.Info("restored cluster configuration")
  1161. lg.Info("removing old peers from network")
  1162. } else {
  1163. plog.Info("finished recovering cluster configuration")
  1164. plog.Info("removing old peers from network...")
  1165. }
  1166. // recover raft transport
  1167. s.r.transport.RemoveAllPeers()
  1168. if lg != nil {
  1169. lg.Info("removed old peers from network")
  1170. lg.Info("adding peers from new cluster configuration")
  1171. } else {
  1172. plog.Info("finished removing old peers from network")
  1173. plog.Info("adding peers from new cluster configuration into network...")
  1174. }
  1175. for _, m := range s.cluster.Members() {
  1176. if m.ID == s.ID() {
  1177. continue
  1178. }
  1179. s.r.transport.AddPeer(m.ID, m.PeerURLs)
  1180. }
  1181. if lg != nil {
  1182. lg.Info("added peers from new cluster configuration")
  1183. } else {
  1184. plog.Info("finished adding peers from new cluster configuration into network...")
  1185. }
  1186. ep.appliedt = apply.snapshot.Metadata.Term
  1187. ep.appliedi = apply.snapshot.Metadata.Index
  1188. ep.snapi = ep.appliedi
  1189. ep.confState = apply.snapshot.Metadata.ConfState
  1190. }
  1191. func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
  1192. if len(apply.entries) == 0 {
  1193. return
  1194. }
  1195. firsti := apply.entries[0].Index
  1196. if firsti > ep.appliedi+1 {
  1197. if lg := s.getLogger(); lg != nil {
  1198. lg.Panic(
  1199. "unexpected committed entry index",
  1200. zap.Uint64("current-applied-index", ep.appliedi),
  1201. zap.Uint64("first-committed-entry-index", firsti),
  1202. )
  1203. } else {
  1204. plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi)
  1205. }
  1206. }
  1207. var ents []raftpb.Entry
  1208. if ep.appliedi+1-firsti < uint64(len(apply.entries)) {
  1209. ents = apply.entries[ep.appliedi+1-firsti:]
  1210. }
  1211. if len(ents) == 0 {
  1212. return
  1213. }
  1214. var shouldstop bool
  1215. if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
  1216. go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
  1217. }
  1218. }
  1219. func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
  1220. if ep.appliedi-ep.snapi <= s.Cfg.SnapshotCount {
  1221. return
  1222. }
  1223. if lg := s.getLogger(); lg != nil {
  1224. lg.Info(
  1225. "triggering snapshot",
  1226. zap.String("local-member-id", s.ID().String()),
  1227. zap.Uint64("local-member-applied-index", ep.appliedi),
  1228. zap.Uint64("local-member-snapshot-index", ep.snapi),
  1229. zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount),
  1230. )
  1231. } else {
  1232. plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi)
  1233. }
  1234. s.snapshot(ep.appliedi, ep.confState)
  1235. ep.snapi = ep.appliedi
  1236. }
  1237. func (s *EtcdServer) hasMultipleVotingMembers() bool {
  1238. return s.cluster != nil && len(s.cluster.VotingMemberIDs()) > 1
  1239. }
  1240. func (s *EtcdServer) isLeader() bool {
  1241. return uint64(s.ID()) == s.Lead()
  1242. }
  1243. // MoveLeader transfers the leader to the given transferee.
  1244. func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error {
  1245. if !s.cluster.IsMemberExist(types.ID(transferee)) || s.cluster.Member(types.ID(transferee)).IsLearner {
  1246. return ErrBadLeaderTransferee
  1247. }
  1248. now := time.Now()
  1249. interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
  1250. if lg := s.getLogger(); lg != nil {
  1251. lg.Info(
  1252. "leadership transfer starting",
  1253. zap.String("local-member-id", s.ID().String()),
  1254. zap.String("current-leader-member-id", types.ID(lead).String()),
  1255. zap.String("transferee-member-id", types.ID(transferee).String()),
  1256. )
  1257. } else {
  1258. plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee))
  1259. }
  1260. s.r.TransferLeadership(ctx, lead, transferee)
  1261. for s.Lead() != transferee {
  1262. select {
  1263. case <-ctx.Done(): // time out
  1264. return ErrTimeoutLeaderTransfer
  1265. case <-time.After(interval):
  1266. }
  1267. }
  1268. // TODO: drain all requests, or drop all messages to the old leader
  1269. if lg := s.getLogger(); lg != nil {
  1270. lg.Info(
  1271. "leadership transfer finished",
  1272. zap.String("local-member-id", s.ID().String()),
  1273. zap.String("old-leader-member-id", types.ID(lead).String()),
  1274. zap.String("new-leader-member-id", types.ID(transferee).String()),
  1275. zap.Duration("took", time.Since(now)),
  1276. )
  1277. } else {
  1278. plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now))
  1279. }
  1280. return nil
  1281. }
  1282. // TransferLeadership transfers the leader to the chosen transferee.
  1283. func (s *EtcdServer) TransferLeadership() error {
  1284. if !s.isLeader() {
  1285. if lg := s.getLogger(); lg != nil {
  1286. lg.Info(
  1287. "skipped leadership transfer; local server is not leader",
  1288. zap.String("local-member-id", s.ID().String()),
  1289. zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
  1290. )
  1291. } else {
  1292. plog.Printf("skipped leadership transfer for stopping non-leader member")
  1293. }
  1294. return nil
  1295. }
  1296. if !s.hasMultipleVotingMembers() {
  1297. if lg := s.getLogger(); lg != nil {
  1298. lg.Info(
  1299. "skipped leadership transfer for single voting member cluster",
  1300. zap.String("local-member-id", s.ID().String()),
  1301. zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
  1302. )
  1303. } else {
  1304. plog.Printf("skipped leadership transfer for single voting member cluster")
  1305. }
  1306. return nil
  1307. }
  1308. transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs())
  1309. if !ok {
  1310. return ErrUnhealthy
  1311. }
  1312. tm := s.Cfg.ReqTimeout()
  1313. ctx, cancel := context.WithTimeout(s.ctx, tm)
  1314. err := s.MoveLeader(ctx, s.Lead(), uint64(transferee))
  1315. cancel()
  1316. return err
  1317. }
  1318. // HardStop stops the server without coordination with other members in the cluster.
  1319. func (s *EtcdServer) HardStop() {
  1320. select {
  1321. case s.stop <- struct{}{}:
  1322. case <-s.done:
  1323. return
  1324. }
  1325. <-s.done
  1326. }
  1327. // Stop stops the server gracefully, and shuts down the running goroutine.
  1328. // Stop should be called after a Start(s), otherwise it will block forever.
  1329. // When stopping leader, Stop transfers its leadership to one of its peers
  1330. // before stopping the server.
  1331. // Stop terminates the Server and performs any necessary finalization.
  1332. // Do and Process cannot be called after Stop has been invoked.
  1333. func (s *EtcdServer) Stop() {
  1334. if err := s.TransferLeadership(); err != nil {
  1335. if lg := s.getLogger(); lg != nil {
  1336. lg.Warn("leadership transfer failed", zap.String("local-member-id", s.ID().String()), zap.Error(err))
  1337. } else {
  1338. plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err)
  1339. }
  1340. }
  1341. s.HardStop()
  1342. }
  1343. // ReadyNotify returns a channel that will be closed when the server
  1344. // is ready to serve client requests
  1345. func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
  1346. func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
  1347. select {
  1348. case <-time.After(d):
  1349. case <-s.done:
  1350. }
  1351. select {
  1352. case s.errorc <- err:
  1353. default:
  1354. }
  1355. }
  1356. // StopNotify returns a channel that receives a empty struct
  1357. // when the server is stopped.
  1358. func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
  1359. func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
  1360. func (s *EtcdServer) LeaderStats() []byte {
  1361. lead := s.getLead()
  1362. if lead != uint64(s.id) {
  1363. return nil
  1364. }
  1365. return s.lstats.JSON()
  1366. }
  1367. func (s *EtcdServer) StoreStats() []byte { return s.v2store.JsonStats() }
  1368. func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
  1369. if s.authStore == nil {
  1370. // In the context of ordinary etcd process, s.authStore will never be nil.
  1371. // This branch is for handling cases in server_test.go
  1372. return nil
  1373. }
  1374. // Note that this permission check is done in the API layer,
  1375. // so TOCTOU problem can be caused potentially in a schedule like this:
  1376. // update membership with user A -> revoke root role of A -> apply membership change
  1377. // in the state machine layer
  1378. // However, both of membership change and role management requires the root privilege.
  1379. // So careful operation by admins can prevent the problem.
  1380. authInfo, err := s.AuthInfoFromCtx(ctx)
  1381. if err != nil {
  1382. return err
  1383. }
  1384. return s.AuthStore().IsAdminPermitted(authInfo)
  1385. }
  1386. func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
  1387. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  1388. return nil, err
  1389. }
  1390. // TODO: move Member to protobuf type
  1391. b, err := json.Marshal(memb)
  1392. if err != nil {
  1393. return nil, err
  1394. }
  1395. // by default StrictReconfigCheck is enabled; reject new members if unhealthy.
  1396. if err := s.mayAddMember(memb); err != nil {
  1397. return nil, err
  1398. }
  1399. cc := raftpb.ConfChange{
  1400. Type: raftpb.ConfChangeAddNode,
  1401. NodeID: uint64(memb.ID),
  1402. Context: b,
  1403. }
  1404. if memb.IsLearner {
  1405. cc.Type = raftpb.ConfChangeAddLearnerNode
  1406. }
  1407. return s.configure(ctx, cc)
  1408. }
  1409. func (s *EtcdServer) mayAddMember(memb membership.Member) error {
  1410. if !s.Cfg.StrictReconfigCheck {
  1411. return nil
  1412. }
  1413. // protect quorum when adding voting member
  1414. if !memb.IsLearner && !s.cluster.IsReadyToAddVotingMember() {
  1415. if lg := s.getLogger(); lg != nil {
  1416. lg.Warn(
  1417. "rejecting member add request; not enough healthy members",
  1418. zap.String("local-member-id", s.ID().String()),
  1419. zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
  1420. zap.Error(ErrNotEnoughStartedMembers),
  1421. )
  1422. } else {
  1423. plog.Warningf("not enough started members, rejecting member add %+v", memb)
  1424. }
  1425. return ErrNotEnoughStartedMembers
  1426. }
  1427. if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.VotingMembers()) {
  1428. if lg := s.getLogger(); lg != nil {
  1429. lg.Warn(
  1430. "rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum",
  1431. zap.String("local-member-id", s.ID().String()),
  1432. zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
  1433. zap.Error(ErrUnhealthy),
  1434. )
  1435. } else {
  1436. plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb)
  1437. }
  1438. return ErrUnhealthy
  1439. }
  1440. return nil
  1441. }
  1442. func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
  1443. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  1444. return nil, err
  1445. }
  1446. // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
  1447. if err := s.mayRemoveMember(types.ID(id)); err != nil {
  1448. return nil, err
  1449. }
  1450. cc := raftpb.ConfChange{
  1451. Type: raftpb.ConfChangeRemoveNode,
  1452. NodeID: id,
  1453. }
  1454. return s.configure(ctx, cc)
  1455. }
  1456. // PromoteMember promotes a learner node to a voting node.
  1457. func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
  1458. // only raft leader has information on whether the to-be-promoted learner node is ready. If promoteMember call
  1459. // fails with ErrNotLeader, forward the request to leader node via HTTP. If promoteMember call fails with error
  1460. // other than ErrNotLeader, return the error.
  1461. resp, err := s.promoteMember(ctx, id)
  1462. if err == nil {
  1463. learnerPromoteSucceed.Inc()
  1464. return resp, nil
  1465. }
  1466. if err != ErrNotLeader {
  1467. learnerPromoteFailed.WithLabelValues(err.Error()).Inc()
  1468. return resp, err
  1469. }
  1470. cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
  1471. defer cancel()
  1472. // forward to leader
  1473. for cctx.Err() == nil {
  1474. leader, err := s.waitLeader(cctx)
  1475. if err != nil {
  1476. return nil, err
  1477. }
  1478. for _, url := range leader.PeerURLs {
  1479. resp, err := promoteMemberHTTP(cctx, url, id, s.peerRt)
  1480. if err == nil {
  1481. return resp, nil
  1482. }
  1483. // If member promotion failed, return early. Otherwise keep retry.
  1484. if err == ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner {
  1485. return nil, err
  1486. }
  1487. }
  1488. }
  1489. if cctx.Err() == context.DeadlineExceeded {
  1490. return nil, ErrTimeout
  1491. }
  1492. return nil, ErrCanceled
  1493. }
  1494. // promoteMember checks whether the to-be-promoted learner node is ready before sending the promote
  1495. // request to raft.
  1496. // The function returns ErrNotLeader if the local node is not raft leader (therefore does not have
  1497. // enough information to determine if the learner node is ready), returns ErrLearnerNotReady if the
  1498. // local node is leader (therefore has enough information) but decided the learner node is not ready
  1499. // to be promoted.
  1500. func (s *EtcdServer) promoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
  1501. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  1502. return nil, err
  1503. }
  1504. // check if we can promote this learner.
  1505. if err := s.mayPromoteMember(types.ID(id)); err != nil {
  1506. return nil, err
  1507. }
  1508. // build the context for the promote confChange. mark IsLearner to false and IsPromote to true.
  1509. promoteChangeContext := membership.ConfigChangeContext{
  1510. Member: membership.Member{
  1511. ID: types.ID(id),
  1512. },
  1513. IsPromote: true,
  1514. }
  1515. b, err := json.Marshal(promoteChangeContext)
  1516. if err != nil {
  1517. return nil, err
  1518. }
  1519. cc := raftpb.ConfChange{
  1520. Type: raftpb.ConfChangeAddNode,
  1521. NodeID: id,
  1522. Context: b,
  1523. }
  1524. return s.configure(ctx, cc)
  1525. }
  1526. func (s *EtcdServer) mayPromoteMember(id types.ID) error {
  1527. err := s.isLearnerReady(uint64(id))
  1528. if err != nil {
  1529. return err
  1530. }
  1531. if !s.Cfg.StrictReconfigCheck {
  1532. return nil
  1533. }
  1534. if !s.cluster.IsReadyToPromoteMember(uint64(id)) {
  1535. if lg := s.getLogger(); lg != nil {
  1536. lg.Warn(
  1537. "rejecting member promote request; not enough healthy members",
  1538. zap.String("local-member-id", s.ID().String()),
  1539. zap.String("requested-member-remove-id", id.String()),
  1540. zap.Error(ErrNotEnoughStartedMembers),
  1541. )
  1542. } else {
  1543. plog.Warningf("not enough started members, rejecting promote member %s", id)
  1544. }
  1545. return ErrNotEnoughStartedMembers
  1546. }
  1547. return nil
  1548. }
  1549. // check whether the learner catches up with leader or not.
  1550. // Note: it will return nil if member is not found in cluster or if member is not learner.
  1551. // These two conditions will be checked before apply phase later.
  1552. func (s *EtcdServer) isLearnerReady(id uint64) error {
  1553. rs := s.raftStatus()
  1554. // leader's raftStatus.Progress is not nil
  1555. if rs.Progress == nil {
  1556. return ErrNotLeader
  1557. }
  1558. var learnerMatch uint64
  1559. isFound := false
  1560. leaderID := rs.ID
  1561. for memberID, progress := range rs.Progress {
  1562. if id == memberID {
  1563. // check its status
  1564. learnerMatch = progress.Match
  1565. isFound = true
  1566. break
  1567. }
  1568. }
  1569. if isFound {
  1570. leaderMatch := rs.Progress[leaderID].Match
  1571. // the learner's Match not caught up with leader yet
  1572. if float64(learnerMatch) < float64(leaderMatch)*readyPercent {
  1573. return ErrLearnerNotReady
  1574. }
  1575. }
  1576. return nil
  1577. }
  1578. func (s *EtcdServer) mayRemoveMember(id types.ID) error {
  1579. if !s.Cfg.StrictReconfigCheck {
  1580. return nil
  1581. }
  1582. isLearner := s.cluster.IsMemberExist(id) && s.cluster.Member(id).IsLearner
  1583. // no need to check quorum when removing non-voting member
  1584. if isLearner {
  1585. return nil
  1586. }
  1587. if !s.cluster.IsReadyToRemoveVotingMember(uint64(id)) {
  1588. if lg := s.getLogger(); lg != nil {
  1589. lg.Warn(
  1590. "rejecting member remove request; not enough healthy members",
  1591. zap.String("local-member-id", s.ID().String()),
  1592. zap.String("requested-member-remove-id", id.String()),
  1593. zap.Error(ErrNotEnoughStartedMembers),
  1594. )
  1595. } else {
  1596. plog.Warningf("not enough started members, rejecting remove member %s", id)
  1597. }
  1598. return ErrNotEnoughStartedMembers
  1599. }
  1600. // downed member is safe to remove since it's not part of the active quorum
  1601. if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
  1602. return nil
  1603. }
  1604. // protect quorum if some members are down
  1605. m := s.cluster.VotingMembers()
  1606. active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
  1607. if (active - 1) < 1+((len(m)-1)/2) {
  1608. if lg := s.getLogger(); lg != nil {
  1609. lg.Warn(
  1610. "rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum",
  1611. zap.String("local-member-id", s.ID().String()),
  1612. zap.String("requested-member-remove", id.String()),
  1613. zap.Int("active-peers", active),
  1614. zap.Error(ErrUnhealthy),
  1615. )
  1616. } else {
  1617. plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id)
  1618. }
  1619. return ErrUnhealthy
  1620. }
  1621. return nil
  1622. }
  1623. func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
  1624. b, merr := json.Marshal(memb)
  1625. if merr != nil {
  1626. return nil, merr
  1627. }
  1628. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  1629. return nil, err
  1630. }
  1631. cc := raftpb.ConfChange{
  1632. Type: raftpb.ConfChangeUpdateNode,
  1633. NodeID: uint64(memb.ID),
  1634. Context: b,
  1635. }
  1636. return s.configure(ctx, cc)
  1637. }
  1638. func (s *EtcdServer) setCommittedIndex(v uint64) {
  1639. atomic.StoreUint64(&s.committedIndex, v)
  1640. }
  1641. func (s *EtcdServer) getCommittedIndex() uint64 {
  1642. return atomic.LoadUint64(&s.committedIndex)
  1643. }
  1644. func (s *EtcdServer) setAppliedIndex(v uint64) {
  1645. atomic.StoreUint64(&s.appliedIndex, v)
  1646. }
  1647. func (s *EtcdServer) getAppliedIndex() uint64 {
  1648. return atomic.LoadUint64(&s.appliedIndex)
  1649. }
  1650. func (s *EtcdServer) setTerm(v uint64) {
  1651. atomic.StoreUint64(&s.term, v)
  1652. }
  1653. func (s *EtcdServer) getTerm() uint64 {
  1654. return atomic.LoadUint64(&s.term)
  1655. }
  1656. func (s *EtcdServer) setLead(v uint64) {
  1657. atomic.StoreUint64(&s.lead, v)
  1658. }
  1659. func (s *EtcdServer) getLead() uint64 {
  1660. return atomic.LoadUint64(&s.lead)
  1661. }
  1662. func (s *EtcdServer) leaderChangedNotify() <-chan struct{} {
  1663. s.leaderChangedMu.RLock()
  1664. defer s.leaderChangedMu.RUnlock()
  1665. return s.leaderChanged
  1666. }
  1667. // RaftStatusGetter represents etcd server and Raft progress.
  1668. type RaftStatusGetter interface {
  1669. ID() types.ID
  1670. Leader() types.ID
  1671. CommittedIndex() uint64
  1672. AppliedIndex() uint64
  1673. Term() uint64
  1674. }
  1675. func (s *EtcdServer) ID() types.ID { return s.id }
  1676. func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) }
  1677. func (s *EtcdServer) Lead() uint64 { return s.getLead() }
  1678. func (s *EtcdServer) CommittedIndex() uint64 { return s.getCommittedIndex() }
  1679. func (s *EtcdServer) AppliedIndex() uint64 { return s.getAppliedIndex() }
  1680. func (s *EtcdServer) Term() uint64 { return s.getTerm() }
  1681. type confChangeResponse struct {
  1682. membs []*membership.Member
  1683. err error
  1684. }
  1685. // configure sends a configuration change through consensus and
  1686. // then waits for it to be applied to the server. It
  1687. // will block until the change is performed or there is an error.
  1688. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) {
  1689. cc.ID = s.reqIDGen.Next()
  1690. ch := s.w.Register(cc.ID)
  1691. start := time.Now()
  1692. if err := s.r.ProposeConfChange(ctx, cc); err != nil {
  1693. s.w.Trigger(cc.ID, nil)
  1694. return nil, err
  1695. }
  1696. select {
  1697. case x := <-ch:
  1698. if x == nil {
  1699. if lg := s.getLogger(); lg != nil {
  1700. lg.Panic("failed to configure")
  1701. } else {
  1702. plog.Panicf("configure trigger value should never be nil")
  1703. }
  1704. }
  1705. resp := x.(*confChangeResponse)
  1706. if lg := s.getLogger(); lg != nil {
  1707. lg.Info(
  1708. "applied a configuration change through raft",
  1709. zap.String("local-member-id", s.ID().String()),
  1710. zap.String("raft-conf-change", cc.Type.String()),
  1711. zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()),
  1712. )
  1713. }
  1714. return resp.membs, resp.err
  1715. case <-ctx.Done():
  1716. s.w.Trigger(cc.ID, nil) // GC wait
  1717. return nil, s.parseProposeCtxErr(ctx.Err(), start)
  1718. case <-s.stopping:
  1719. return nil, ErrStopped
  1720. }
  1721. }
  1722. // sync proposes a SYNC request and is non-blocking.
  1723. // This makes no guarantee that the request will be proposed or performed.
  1724. // The request will be canceled after the given timeout.
  1725. func (s *EtcdServer) sync(timeout time.Duration) {
  1726. req := pb.Request{
  1727. Method: "SYNC",
  1728. ID: s.reqIDGen.Next(),
  1729. Time: time.Now().UnixNano(),
  1730. }
  1731. data := pbutil.MustMarshal(&req)
  1732. // There is no promise that node has leader when do SYNC request,
  1733. // so it uses goroutine to propose.
  1734. ctx, cancel := context.WithTimeout(s.ctx, timeout)
  1735. s.goAttach(func() {
  1736. s.r.Propose(ctx, data)
  1737. cancel()
  1738. })
  1739. }
  1740. // publish registers server information into the cluster. The information
  1741. // is the JSON representation of this server's member struct, updated with the
  1742. // static clientURLs of the server.
  1743. // The function keeps attempting to register until it succeeds,
  1744. // or its server is stopped.
  1745. //
  1746. // Use v2 store to encode member attributes, and apply through Raft
  1747. // but does not go through v2 API endpoint, which means even with v2
  1748. // client handler disabled (e.g. --enable-v2=false), cluster can still
  1749. // process publish requests through rafthttp
  1750. // TODO: Deprecate v2 store
  1751. func (s *EtcdServer) publish(timeout time.Duration) {
  1752. b, err := json.Marshal(s.attributes)
  1753. if err != nil {
  1754. if lg := s.getLogger(); lg != nil {
  1755. lg.Panic("failed to marshal JSON", zap.Error(err))
  1756. } else {
  1757. plog.Panicf("json marshal error: %v", err)
  1758. }
  1759. return
  1760. }
  1761. req := pb.Request{
  1762. Method: "PUT",
  1763. Path: membership.MemberAttributesStorePath(s.id),
  1764. Val: string(b),
  1765. }
  1766. for {
  1767. ctx, cancel := context.WithTimeout(s.ctx, timeout)
  1768. _, err := s.Do(ctx, req)
  1769. cancel()
  1770. switch err {
  1771. case nil:
  1772. close(s.readych)
  1773. if lg := s.getLogger(); lg != nil {
  1774. lg.Info(
  1775. "published local member to cluster through raft",
  1776. zap.String("local-member-id", s.ID().String()),
  1777. zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
  1778. zap.String("request-path", req.Path),
  1779. zap.String("cluster-id", s.cluster.ID().String()),
  1780. zap.Duration("publish-timeout", timeout),
  1781. )
  1782. } else {
  1783. plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID())
  1784. }
  1785. return
  1786. case ErrStopped:
  1787. if lg := s.getLogger(); lg != nil {
  1788. lg.Warn(
  1789. "stopped publish because server is stopped",
  1790. zap.String("local-member-id", s.ID().String()),
  1791. zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
  1792. zap.Duration("publish-timeout", timeout),
  1793. zap.Error(err),
  1794. )
  1795. } else {
  1796. plog.Infof("aborting publish because server is stopped")
  1797. }
  1798. return
  1799. default:
  1800. if lg := s.getLogger(); lg != nil {
  1801. lg.Warn(
  1802. "failed to publish local member to cluster through raft",
  1803. zap.String("local-member-id", s.ID().String()),
  1804. zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
  1805. zap.String("request-path", req.Path),
  1806. zap.Duration("publish-timeout", timeout),
  1807. zap.Error(err),
  1808. )
  1809. } else {
  1810. plog.Errorf("publish error: %v", err)
  1811. }
  1812. }
  1813. }
  1814. }
  1815. func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
  1816. atomic.AddInt64(&s.inflightSnapshots, 1)
  1817. lg := s.getLogger()
  1818. fields := []zap.Field{
  1819. zap.String("from", s.ID().String()),
  1820. zap.String("to", types.ID(merged.To).String()),
  1821. zap.Int64("bytes", merged.TotalSize),
  1822. zap.String("size", humanize.Bytes(uint64(merged.TotalSize))),
  1823. }
  1824. now := time.Now()
  1825. s.r.transport.SendSnapshot(merged)
  1826. if lg != nil {
  1827. lg.Info("sending merged snapshot", fields...)
  1828. }
  1829. s.goAttach(func() {
  1830. select {
  1831. case ok := <-merged.CloseNotify():
  1832. // delay releasing inflight snapshot for another 30 seconds to
  1833. // block log compaction.
  1834. // If the follower still fails to catch up, it is probably just too slow
  1835. // to catch up. We cannot avoid the snapshot cycle anyway.
  1836. if ok {
  1837. select {
  1838. case <-time.After(releaseDelayAfterSnapshot):
  1839. case <-s.stopping:
  1840. }
  1841. }
  1842. atomic.AddInt64(&s.inflightSnapshots, -1)
  1843. if lg != nil {
  1844. lg.Info("sent merged snapshot", append(fields, zap.Duration("took", time.Since(now)))...)
  1845. }
  1846. case <-s.stopping:
  1847. if lg != nil {
  1848. lg.Warn("canceled sending merged snapshot; server stopping", fields...)
  1849. }
  1850. return
  1851. }
  1852. })
  1853. }
  1854. // apply takes entries received from Raft (after it has been committed) and
  1855. // applies them to the current state of the EtcdServer.
  1856. // The given entries should not be empty.
  1857. func (s *EtcdServer) apply(
  1858. es []raftpb.Entry,
  1859. confState *raftpb.ConfState,
  1860. ) (appliedt uint64, appliedi uint64, shouldStop bool) {
  1861. for i := range es {
  1862. e := es[i]
  1863. switch e.Type {
  1864. case raftpb.EntryNormal:
  1865. s.applyEntryNormal(&e)
  1866. s.setAppliedIndex(e.Index)
  1867. s.setTerm(e.Term)
  1868. case raftpb.EntryConfChange:
  1869. // set the consistent index of current executing entry
  1870. if e.Index > s.consistIndex.ConsistentIndex() {
  1871. s.consistIndex.setConsistentIndex(e.Index)
  1872. }
  1873. var cc raftpb.ConfChange
  1874. pbutil.MustUnmarshal(&cc, e.Data)
  1875. removedSelf, err := s.applyConfChange(cc, confState)
  1876. s.setAppliedIndex(e.Index)
  1877. s.setTerm(e.Term)
  1878. shouldStop = shouldStop || removedSelf
  1879. s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err})
  1880. default:
  1881. if lg := s.getLogger(); lg != nil {
  1882. lg.Panic(
  1883. "unknown entry type; must be either EntryNormal or EntryConfChange",
  1884. zap.String("type", e.Type.String()),
  1885. )
  1886. } else {
  1887. plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
  1888. }
  1889. }
  1890. appliedi, appliedt = e.Index, e.Term
  1891. }
  1892. return appliedt, appliedi, shouldStop
  1893. }
  1894. // applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
  1895. func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
  1896. shouldApplyV3 := false
  1897. if e.Index > s.consistIndex.ConsistentIndex() {
  1898. // set the consistent index of current executing entry
  1899. s.consistIndex.setConsistentIndex(e.Index)
  1900. shouldApplyV3 = true
  1901. }
  1902. // raft state machine may generate noop entry when leader confirmation.
  1903. // skip it in advance to avoid some potential bug in the future
  1904. if len(e.Data) == 0 {
  1905. select {
  1906. case s.forceVersionC <- struct{}{}:
  1907. default:
  1908. }
  1909. // promote lessor when the local member is leader and finished
  1910. // applying all entries from the last term.
  1911. if s.isLeader() {
  1912. s.lessor.Promote(s.Cfg.electionTimeout())
  1913. }
  1914. return
  1915. }
  1916. var raftReq pb.InternalRaftRequest
  1917. if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible
  1918. var r pb.Request
  1919. rp := &r
  1920. pbutil.MustUnmarshal(rp, e.Data)
  1921. s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp)))
  1922. return
  1923. }
  1924. if raftReq.V2 != nil {
  1925. req := (*RequestV2)(raftReq.V2)
  1926. s.w.Trigger(req.ID, s.applyV2Request(req))
  1927. return
  1928. }
  1929. // do not re-apply applied entries.
  1930. if !shouldApplyV3 {
  1931. return
  1932. }
  1933. id := raftReq.ID
  1934. if id == 0 {
  1935. id = raftReq.Header.ID
  1936. }
  1937. var ar *applyResult
  1938. needResult := s.w.IsRegistered(id)
  1939. if needResult || !noSideEffect(&raftReq) {
  1940. if !needResult && raftReq.Txn != nil {
  1941. removeNeedlessRangeReqs(raftReq.Txn)
  1942. }
  1943. ar = s.applyV3.Apply(&raftReq)
  1944. }
  1945. if ar == nil {
  1946. return
  1947. }
  1948. if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
  1949. s.w.Trigger(id, ar)
  1950. return
  1951. }
  1952. if lg := s.getLogger(); lg != nil {
  1953. lg.Warn(
  1954. "message exceeded backend quota; raising alarm",
  1955. zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
  1956. zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
  1957. zap.Error(ar.err),
  1958. )
  1959. } else {
  1960. plog.Errorf("applying raft message exceeded backend quota")
  1961. }
  1962. s.goAttach(func() {
  1963. a := &pb.AlarmRequest{
  1964. MemberID: uint64(s.ID()),
  1965. Action: pb.AlarmRequest_ACTIVATE,
  1966. Alarm: pb.AlarmType_NOSPACE,
  1967. }
  1968. s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
  1969. s.w.Trigger(id, ar)
  1970. })
  1971. }
  1972. // applyConfChange applies a ConfChange to the server. It is only
  1973. // invoked with a ConfChange that has already passed through Raft
  1974. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
  1975. if err := s.cluster.ValidateConfigurationChange(cc); err != nil {
  1976. cc.NodeID = raft.None
  1977. s.r.ApplyConfChange(cc)
  1978. return false, err
  1979. }
  1980. lg := s.getLogger()
  1981. *confState = *s.r.ApplyConfChange(cc)
  1982. switch cc.Type {
  1983. case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode:
  1984. confChangeContext := new(membership.ConfigChangeContext)
  1985. if err := json.Unmarshal(cc.Context, confChangeContext); err != nil {
  1986. if lg != nil {
  1987. lg.Panic("failed to unmarshal member", zap.Error(err))
  1988. } else {
  1989. plog.Panicf("unmarshal member should never fail: %v", err)
  1990. }
  1991. }
  1992. if cc.NodeID != uint64(confChangeContext.Member.ID) {
  1993. if lg != nil {
  1994. lg.Panic(
  1995. "got different member ID",
  1996. zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
  1997. zap.String("member-id-from-message", confChangeContext.Member.ID.String()),
  1998. )
  1999. } else {
  2000. plog.Panicf("nodeID should always be equal to member ID")
  2001. }
  2002. }
  2003. if confChangeContext.IsPromote {
  2004. s.cluster.PromoteMember(confChangeContext.Member.ID)
  2005. } else {
  2006. s.cluster.AddMember(&confChangeContext.Member)
  2007. if confChangeContext.Member.ID != s.id {
  2008. s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs)
  2009. }
  2010. }
  2011. // update the isLearner metric when this server id is equal to the id in raft member confChange
  2012. if confChangeContext.Member.ID == s.id {
  2013. if cc.Type == raftpb.ConfChangeAddLearnerNode {
  2014. isLearner.Set(1)
  2015. } else {
  2016. isLearner.Set(0)
  2017. }
  2018. }
  2019. case raftpb.ConfChangeRemoveNode:
  2020. id := types.ID(cc.NodeID)
  2021. s.cluster.RemoveMember(id)
  2022. if id == s.id {
  2023. return true, nil
  2024. }
  2025. s.r.transport.RemovePeer(id)
  2026. case raftpb.ConfChangeUpdateNode:
  2027. m := new(membership.Member)
  2028. if err := json.Unmarshal(cc.Context, m); err != nil {
  2029. if lg != nil {
  2030. lg.Panic("failed to unmarshal member", zap.Error(err))
  2031. } else {
  2032. plog.Panicf("unmarshal member should never fail: %v", err)
  2033. }
  2034. }
  2035. if cc.NodeID != uint64(m.ID) {
  2036. if lg != nil {
  2037. lg.Panic(
  2038. "got different member ID",
  2039. zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
  2040. zap.String("member-id-from-message", m.ID.String()),
  2041. )
  2042. } else {
  2043. plog.Panicf("nodeID should always be equal to member ID")
  2044. }
  2045. }
  2046. s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
  2047. if m.ID != s.id {
  2048. s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
  2049. }
  2050. }
  2051. return false, nil
  2052. }
  2053. // TODO: non-blocking snapshot
  2054. func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
  2055. clone := s.v2store.Clone()
  2056. // commit kv to write metadata (for example: consistent index) to disk.
  2057. // KV().commit() updates the consistent index in backend.
  2058. // All operations that update consistent index must be called sequentially
  2059. // from applyAll function.
  2060. // So KV().Commit() cannot run in parallel with apply. It has to be called outside
  2061. // the go routine created below.
  2062. s.KV().Commit()
  2063. s.goAttach(func() {
  2064. lg := s.getLogger()
  2065. d, err := clone.SaveNoCopy()
  2066. // TODO: current store will never fail to do a snapshot
  2067. // what should we do if the store might fail?
  2068. if err != nil {
  2069. if lg != nil {
  2070. lg.Panic("failed to save v2 store", zap.Error(err))
  2071. } else {
  2072. plog.Panicf("store save should never fail: %v", err)
  2073. }
  2074. }
  2075. snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d)
  2076. if err != nil {
  2077. // the snapshot was done asynchronously with the progress of raft.
  2078. // raft might have already got a newer snapshot.
  2079. if err == raft.ErrSnapOutOfDate {
  2080. return
  2081. }
  2082. if lg != nil {
  2083. lg.Panic("failed to create snapshot", zap.Error(err))
  2084. } else {
  2085. plog.Panicf("unexpected create snapshot error %v", err)
  2086. }
  2087. }
  2088. // SaveSnap saves the snapshot and releases the locked wal files
  2089. // to the snapshot index.
  2090. if err = s.r.storage.SaveSnap(snap); err != nil {
  2091. if lg != nil {
  2092. lg.Panic("failed to save snapshot", zap.Error(err))
  2093. } else {
  2094. plog.Fatalf("save snapshot error: %v", err)
  2095. }
  2096. }
  2097. if lg != nil {
  2098. lg.Info(
  2099. "saved snapshot",
  2100. zap.Uint64("snapshot-index", snap.Metadata.Index),
  2101. )
  2102. } else {
  2103. plog.Infof("saved snapshot at index %d", snap.Metadata.Index)
  2104. }
  2105. // When sending a snapshot, etcd will pause compaction.
  2106. // After receives a snapshot, the slow follower needs to get all the entries right after
  2107. // the snapshot sent to catch up. If we do not pause compaction, the log entries right after
  2108. // the snapshot sent might already be compacted. It happens when the snapshot takes long time
  2109. // to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
  2110. if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
  2111. if lg != nil {
  2112. lg.Info("skip compaction since there is an inflight snapshot")
  2113. } else {
  2114. plog.Infof("skip compaction since there is an inflight snapshot")
  2115. }
  2116. return
  2117. }
  2118. // keep some in memory log entries for slow followers.
  2119. compacti := uint64(1)
  2120. if snapi > s.Cfg.SnapshotCatchUpEntries {
  2121. compacti = snapi - s.Cfg.SnapshotCatchUpEntries
  2122. }
  2123. err = s.r.raftStorage.Compact(compacti)
  2124. if err != nil {
  2125. // the compaction was done asynchronously with the progress of raft.
  2126. // raft log might already been compact.
  2127. if err == raft.ErrCompacted {
  2128. return
  2129. }
  2130. if lg != nil {
  2131. lg.Panic("failed to compact", zap.Error(err))
  2132. } else {
  2133. plog.Panicf("unexpected compaction error %v", err)
  2134. }
  2135. }
  2136. if lg != nil {
  2137. lg.Info(
  2138. "compacted Raft logs",
  2139. zap.Uint64("compact-index", compacti),
  2140. )
  2141. } else {
  2142. plog.Infof("compacted raft log at %d", compacti)
  2143. }
  2144. })
  2145. }
  2146. // CutPeer drops messages to the specified peer.
  2147. func (s *EtcdServer) CutPeer(id types.ID) {
  2148. tr, ok := s.r.transport.(*rafthttp.Transport)
  2149. if ok {
  2150. tr.CutPeer(id)
  2151. }
  2152. }
  2153. // MendPeer recovers the message dropping behavior of the given peer.
  2154. func (s *EtcdServer) MendPeer(id types.ID) {
  2155. tr, ok := s.r.transport.(*rafthttp.Transport)
  2156. if ok {
  2157. tr.MendPeer(id)
  2158. }
  2159. }
  2160. func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
  2161. func (s *EtcdServer) ResumeSending() { s.r.resumeSending() }
  2162. func (s *EtcdServer) ClusterVersion() *semver.Version {
  2163. if s.cluster == nil {
  2164. return nil
  2165. }
  2166. return s.cluster.Version()
  2167. }
  2168. // monitorVersions checks the member's version every monitorVersionInterval.
  2169. // It updates the cluster version if all members agrees on a higher one.
  2170. // It prints out log if there is a member with a higher version than the
  2171. // local version.
  2172. func (s *EtcdServer) monitorVersions() {
  2173. for {
  2174. select {
  2175. case <-s.forceVersionC:
  2176. case <-time.After(monitorVersionInterval):
  2177. case <-s.stopping:
  2178. return
  2179. }
  2180. if s.Leader() != s.ID() {
  2181. continue
  2182. }
  2183. v := decideClusterVersion(s.getLogger(), getVersions(s.getLogger(), s.cluster, s.id, s.peerRt))
  2184. if v != nil {
  2185. // only keep major.minor version for comparison
  2186. v = &semver.Version{
  2187. Major: v.Major,
  2188. Minor: v.Minor,
  2189. }
  2190. }
  2191. // if the current version is nil:
  2192. // 1. use the decided version if possible
  2193. // 2. or use the min cluster version
  2194. if s.cluster.Version() == nil {
  2195. verStr := version.MinClusterVersion
  2196. if v != nil {
  2197. verStr = v.String()
  2198. }
  2199. s.goAttach(func() { s.updateClusterVersion(verStr) })
  2200. continue
  2201. }
  2202. // update cluster version only if the decided version is greater than
  2203. // the current cluster version
  2204. if v != nil && s.cluster.Version().LessThan(*v) {
  2205. s.goAttach(func() { s.updateClusterVersion(v.String()) })
  2206. }
  2207. }
  2208. }
  2209. func (s *EtcdServer) updateClusterVersion(ver string) {
  2210. lg := s.getLogger()
  2211. if s.cluster.Version() == nil {
  2212. if lg != nil {
  2213. lg.Info(
  2214. "setting up initial cluster version",
  2215. zap.String("cluster-version", version.Cluster(ver)),
  2216. )
  2217. } else {
  2218. plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver))
  2219. }
  2220. } else {
  2221. if lg != nil {
  2222. lg.Info(
  2223. "updating cluster version",
  2224. zap.String("from", version.Cluster(s.cluster.Version().String())),
  2225. zap.String("to", version.Cluster(ver)),
  2226. )
  2227. } else {
  2228. plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver))
  2229. }
  2230. }
  2231. req := pb.Request{
  2232. Method: "PUT",
  2233. Path: membership.StoreClusterVersionKey(),
  2234. Val: ver,
  2235. }
  2236. ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
  2237. _, err := s.Do(ctx, req)
  2238. cancel()
  2239. switch err {
  2240. case nil:
  2241. if lg != nil {
  2242. lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver)))
  2243. }
  2244. return
  2245. case ErrStopped:
  2246. if lg != nil {
  2247. lg.Warn("aborting cluster version update; server is stopped", zap.Error(err))
  2248. } else {
  2249. plog.Infof("aborting update cluster version because server is stopped")
  2250. }
  2251. return
  2252. default:
  2253. if lg != nil {
  2254. lg.Warn("failed to update cluster version", zap.Error(err))
  2255. } else {
  2256. plog.Errorf("error updating cluster version (%v)", err)
  2257. }
  2258. }
  2259. }
  2260. func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
  2261. switch err {
  2262. case context.Canceled:
  2263. return ErrCanceled
  2264. case context.DeadlineExceeded:
  2265. s.leadTimeMu.RLock()
  2266. curLeadElected := s.leadElectedTime
  2267. s.leadTimeMu.RUnlock()
  2268. prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
  2269. if start.After(prevLeadLost) && start.Before(curLeadElected) {
  2270. return ErrTimeoutDueToLeaderFail
  2271. }
  2272. lead := types.ID(s.getLead())
  2273. switch lead {
  2274. case types.ID(raft.None):
  2275. // TODO: return error to specify it happens because the cluster does not have leader now
  2276. case s.ID():
  2277. if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) {
  2278. return ErrTimeoutDueToConnectionLost
  2279. }
  2280. default:
  2281. if !isConnectedSince(s.r.transport, start, lead) {
  2282. return ErrTimeoutDueToConnectionLost
  2283. }
  2284. }
  2285. return ErrTimeout
  2286. default:
  2287. return err
  2288. }
  2289. }
  2290. func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv }
  2291. func (s *EtcdServer) Backend() backend.Backend {
  2292. s.bemu.Lock()
  2293. defer s.bemu.Unlock()
  2294. return s.be
  2295. }
  2296. func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore }
  2297. func (s *EtcdServer) restoreAlarms() error {
  2298. s.applyV3 = s.newApplierV3()
  2299. as, err := v3alarm.NewAlarmStore(s)
  2300. if err != nil {
  2301. return err
  2302. }
  2303. s.alarmStore = as
  2304. if len(as.Get(pb.AlarmType_NOSPACE)) > 0 {
  2305. s.applyV3 = newApplierV3Capped(s.applyV3)
  2306. }
  2307. if len(as.Get(pb.AlarmType_CORRUPT)) > 0 {
  2308. s.applyV3 = newApplierV3Corrupt(s.applyV3)
  2309. }
  2310. return nil
  2311. }
  2312. // goAttach creates a goroutine on a given function and tracks it using
  2313. // the etcdserver waitgroup.
  2314. func (s *EtcdServer) goAttach(f func()) {
  2315. s.wgMu.RLock() // this blocks with ongoing close(s.stopping)
  2316. defer s.wgMu.RUnlock()
  2317. select {
  2318. case <-s.stopping:
  2319. if lg := s.getLogger(); lg != nil {
  2320. lg.Warn("server has stopped; skipping goAttach")
  2321. } else {
  2322. plog.Warning("server has stopped (skipping goAttach)")
  2323. }
  2324. return
  2325. default:
  2326. }
  2327. // now safe to add since waitgroup wait has not started yet
  2328. s.wg.Add(1)
  2329. go func() {
  2330. defer s.wg.Done()
  2331. f()
  2332. }()
  2333. }
  2334. func (s *EtcdServer) Alarms() []*pb.AlarmMember {
  2335. return s.alarmStore.Get(pb.AlarmType_NONE)
  2336. }
  2337. func (s *EtcdServer) Logger() *zap.Logger {
  2338. return s.lg
  2339. }
  2340. // IsLearner returns if the local member is raft learner
  2341. func (s *EtcdServer) IsLearner() bool {
  2342. return s.cluster.IsLocalMemberLearner()
  2343. }
  2344. // IsMemberExist returns if the member with the given id exists in cluster.
  2345. func (s *EtcdServer) IsMemberExist(id types.ID) bool {
  2346. return s.cluster.IsMemberExist(id)
  2347. }
  2348. // raftStatus returns the raft status of this etcd node.
  2349. func (s *EtcdServer) raftStatus() raft.Status {
  2350. return s.r.Node.Status()
  2351. }